llvm-project
231 строка · 7.4 Кб
1//===--- Randstruct.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation for Clang's structure field layout
10// randomization.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/Randstruct.h"15#include "clang/AST/ASTContext.h"16#include "clang/AST/ASTDiagnostic.h"17#include "clang/AST/Attr.h"18#include "clang/AST/Decl.h"19#include "clang/AST/DeclCXX.h" // For StaticAssertDecl20#include "clang/Basic/Diagnostic.h"21#include "llvm/ADT/SmallVector.h"22
23#include <algorithm>24#include <random>25#include <set>26#include <sstream>27#include <string>28
29using clang::ASTContext;30using clang::FieldDecl;31using llvm::SmallVector;32
33namespace {34
35// FIXME: Replace this with some discovery once that mechanism exists.
36enum { CACHE_LINE = 64 };37
38// The Bucket class holds the struct fields we're trying to fill to a
39// cache-line.
40class Bucket {41SmallVector<FieldDecl *, 64> Fields;42int Size = 0;43
44public:45virtual ~Bucket() = default;46
47SmallVector<FieldDecl *, 64> &fields() { return Fields; }48void addField(FieldDecl *Field, int FieldSize);49virtual bool canFit(int FieldSize) const {50return Size + FieldSize <= CACHE_LINE;51}52virtual bool isBitfieldRun() const { return false; }53bool full() const { return Size >= CACHE_LINE; }54};55
56void Bucket::addField(FieldDecl *Field, int FieldSize) {57Size += FieldSize;58Fields.push_back(Field);59}
60
61struct BitfieldRunBucket : public Bucket {62bool canFit(int FieldSize) const override { return true; }63bool isBitfieldRun() const override { return true; }64};65
66void randomizeStructureLayoutImpl(const ASTContext &Context,67llvm::SmallVectorImpl<FieldDecl *> &FieldsOut,68std::mt19937 &RNG) {69// All of the Buckets produced by best-effort cache-line algorithm.70SmallVector<std::unique_ptr<Bucket>, 16> Buckets;71
72// The current bucket of fields that we are trying to fill to a cache-line.73std::unique_ptr<Bucket> CurrentBucket;74
75// The current bucket containing the run of adjacent bitfields to ensure they76// remain adjacent.77std::unique_ptr<BitfieldRunBucket> CurrentBitfieldRun;78
79// Tracks the number of fields that we failed to fit to the current bucket,80// and thus still need to be added later.81size_t Skipped = 0;82
83while (!FieldsOut.empty()) {84// If we've Skipped more fields than we have remaining to place, that means85// that they can't fit in our current bucket, and we need to start a new86// one.87if (Skipped >= FieldsOut.size()) {88Skipped = 0;89Buckets.push_back(std::move(CurrentBucket));90}91
92// Take the first field that needs to be put in a bucket.93auto FieldIter = FieldsOut.begin();94FieldDecl *FD = *FieldIter;95
96if (FD->isBitField() && !FD->isZeroLengthBitField(Context)) {97// Start a bitfield run if this is the first bitfield we have found.98if (!CurrentBitfieldRun)99CurrentBitfieldRun = std::make_unique<BitfieldRunBucket>();100
101// We've placed the field, and can remove it from the "awaiting Buckets"102// vector called "Fields."103CurrentBitfieldRun->addField(FD, /*FieldSize is irrelevant here*/ 1);104FieldsOut.erase(FieldIter);105continue;106}107
108// Else, current field is not a bitfield. If we were previously in a109// bitfield run, end it.110if (CurrentBitfieldRun)111Buckets.push_back(std::move(CurrentBitfieldRun));112
113// If we don't have a bucket, make one.114if (!CurrentBucket)115CurrentBucket = std::make_unique<Bucket>();116
117uint64_t Width = Context.getTypeInfo(FD->getType()).Width;118if (Width >= CACHE_LINE) {119std::unique_ptr<Bucket> OverSized = std::make_unique<Bucket>();120OverSized->addField(FD, Width);121FieldsOut.erase(FieldIter);122Buckets.push_back(std::move(OverSized));123continue;124}125
126// If it fits, add it.127if (CurrentBucket->canFit(Width)) {128CurrentBucket->addField(FD, Width);129FieldsOut.erase(FieldIter);130
131// If it's now full, tie off the bucket.132if (CurrentBucket->full()) {133Skipped = 0;134Buckets.push_back(std::move(CurrentBucket));135}136} else {137// We can't fit it in our current bucket. Move to the end for processing138// later.139++Skipped; // Mark it skipped.140FieldsOut.push_back(FD);141FieldsOut.erase(FieldIter);142}143}144
145// Done processing the fields awaiting a bucket.146
147// If we were filling a bucket, tie it off.148if (CurrentBucket)149Buckets.push_back(std::move(CurrentBucket));150
151// If we were processing a bitfield run bucket, tie it off.152if (CurrentBitfieldRun)153Buckets.push_back(std::move(CurrentBitfieldRun));154
155std::shuffle(std::begin(Buckets), std::end(Buckets), RNG);156
157// Produce the new ordering of the elements from the Buckets.158SmallVector<FieldDecl *, 16> FinalOrder;159for (const std::unique_ptr<Bucket> &B : Buckets) {160llvm::SmallVectorImpl<FieldDecl *> &RandFields = B->fields();161if (!B->isBitfieldRun())162std::shuffle(std::begin(RandFields), std::end(RandFields), RNG);163
164FinalOrder.insert(FinalOrder.end(), RandFields.begin(), RandFields.end());165}166
167FieldsOut = FinalOrder;168}
169
170} // anonymous namespace171
172namespace clang {173namespace randstruct {174
175bool randomizeStructureLayout(const ASTContext &Context, RecordDecl *RD,176SmallVectorImpl<Decl *> &FinalOrdering) {177SmallVector<FieldDecl *, 64> RandomizedFields;178SmallVector<Decl *, 8> PostRandomizedFields;179
180unsigned TotalNumFields = 0;181for (Decl *D : RD->decls()) {182++TotalNumFields;183if (auto *FD = dyn_cast<FieldDecl>(D))184RandomizedFields.push_back(FD);185else if (isa<StaticAssertDecl>(D) || isa<IndirectFieldDecl>(D))186PostRandomizedFields.push_back(D);187else188FinalOrdering.push_back(D);189}190
191if (RandomizedFields.empty())192return false;193
194// Struct might end with a flexible array or an array of size 0 or 1,195// in which case we don't want to randomize it.196FieldDecl *FlexibleArray =197RD->hasFlexibleArrayMember() ? RandomizedFields.pop_back_val() : nullptr;198if (!FlexibleArray) {199if (const auto *CA =200dyn_cast<ConstantArrayType>(RandomizedFields.back()->getType()))201if (CA->getSize().sle(2))202FlexibleArray = RandomizedFields.pop_back_val();203}204
205std::string Seed =206Context.getLangOpts().RandstructSeed + RD->getNameAsString();207std::seed_seq SeedSeq(Seed.begin(), Seed.end());208std::mt19937 RNG(SeedSeq);209
210randomizeStructureLayoutImpl(Context, RandomizedFields, RNG);211
212// Plorp the randomized decls into the final ordering.213FinalOrdering.insert(FinalOrdering.end(), RandomizedFields.begin(),214RandomizedFields.end());215
216// Add fields that belong towards the end of the RecordDecl.217FinalOrdering.insert(FinalOrdering.end(), PostRandomizedFields.begin(),218PostRandomizedFields.end());219
220// Add back the flexible array.221if (FlexibleArray)222FinalOrdering.push_back(FlexibleArray);223
224assert(TotalNumFields == FinalOrdering.size() &&225"Decl count has been altered after Randstruct randomization!");226(void)TotalNumFields;227return true;228}
229
230} // end namespace randstruct231} // end namespace clang232