1//===--- Randstruct.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation for Clang's structure field layout
10// randomization.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/Randstruct.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTDiagnostic.h"
17#include "clang/AST/Attr.h"
18#include "clang/AST/Decl.h"
19#include "clang/AST/DeclCXX.h" // For StaticAssertDecl
20#include "clang/Basic/Diagnostic.h"
21#include "llvm/ADT/SmallVector.h"
22
23#include <algorithm>
24#include <random>
25#include <set>
26#include <sstream>
27#include <string>
28
29using clang::ASTContext;
30using clang::FieldDecl;
31using llvm::SmallVector;
32
33namespace {
34
35// FIXME: Replace this with some discovery once that mechanism exists.
36enum { CACHE_LINE = 64 };
37
38// The Bucket class holds the struct fields we're trying to fill to a
39// cache-line.
40class Bucket {
41 SmallVector<FieldDecl *, 64> Fields;
42 int Size = 0;
43
44public:
45 virtual ~Bucket() = default;
46
47 SmallVector<FieldDecl *, 64> &fields() { return Fields; }
48 void addField(FieldDecl *Field, int FieldSize);
49 virtual bool canFit(int FieldSize) const {
50 return Size + FieldSize <= CACHE_LINE;
51 }
52 virtual bool isBitfieldRun() const { return false; }
53 bool full() const { return Size >= CACHE_LINE; }
54};
55
56void Bucket::addField(FieldDecl *Field, int FieldSize) {
57 Size += FieldSize;
58 Fields.push_back(Elt: Field);
59}
60
61struct BitfieldRunBucket : public Bucket {
62 bool canFit(int FieldSize) const override { return true; }
63 bool isBitfieldRun() const override { return true; }
64};
65
66void randomizeStructureLayoutImpl(const ASTContext &Context,
67 llvm::SmallVectorImpl<FieldDecl *> &FieldsOut,
68 std::mt19937 &RNG) {
69 // All of the Buckets produced by best-effort cache-line algorithm.
70 SmallVector<std::unique_ptr<Bucket>, 16> Buckets;
71
72 // The current bucket of fields that we are trying to fill to a cache-line.
73 std::unique_ptr<Bucket> CurrentBucket;
74
75 // The current bucket containing the run of adjacent bitfields to ensure they
76 // remain adjacent.
77 std::unique_ptr<BitfieldRunBucket> CurrentBitfieldRun;
78
79 // Tracks the number of fields that we failed to fit to the current bucket,
80 // and thus still need to be added later.
81 size_t Skipped = 0;
82
83 while (!FieldsOut.empty()) {
84 // If we've Skipped more fields than we have remaining to place, that means
85 // that they can't fit in our current bucket, and we need to start a new
86 // one.
87 if (Skipped >= FieldsOut.size()) {
88 Skipped = 0;
89 Buckets.push_back(Elt: std::move(CurrentBucket));
90 }
91
92 // Take the first field that needs to be put in a bucket.
93 auto FieldIter = FieldsOut.begin();
94 FieldDecl *FD = *FieldIter;
95
96 if (FD->isBitField() && !FD->isZeroLengthBitField(Ctx: Context)) {
97 // Start a bitfield run if this is the first bitfield we have found.
98 if (!CurrentBitfieldRun)
99 CurrentBitfieldRun = std::make_unique<BitfieldRunBucket>();
100
101 // We've placed the field, and can remove it from the "awaiting Buckets"
102 // vector called "Fields."
103 CurrentBitfieldRun->addField(Field: FD, /*FieldSize is irrelevant here*/ FieldSize: 1);
104 FieldsOut.erase(CI: FieldIter);
105 continue;
106 }
107
108 // Else, current field is not a bitfield. If we were previously in a
109 // bitfield run, end it.
110 if (CurrentBitfieldRun)
111 Buckets.push_back(Elt: std::move(CurrentBitfieldRun));
112
113 // If we don't have a bucket, make one.
114 if (!CurrentBucket)
115 CurrentBucket = std::make_unique<Bucket>();
116
117 uint64_t Width = Context.getTypeInfo(T: FD->getType()).Width;
118 if (Width >= CACHE_LINE) {
119 std::unique_ptr<Bucket> OverSized = std::make_unique<Bucket>();
120 OverSized->addField(Field: FD, FieldSize: Width);
121 FieldsOut.erase(CI: FieldIter);
122 Buckets.push_back(Elt: std::move(OverSized));
123 continue;
124 }
125
126 // If it fits, add it.
127 if (CurrentBucket->canFit(FieldSize: Width)) {
128 CurrentBucket->addField(Field: FD, FieldSize: Width);
129 FieldsOut.erase(CI: FieldIter);
130
131 // If it's now full, tie off the bucket.
132 if (CurrentBucket->full()) {
133 Skipped = 0;
134 Buckets.push_back(Elt: std::move(CurrentBucket));
135 }
136 } else {
137 // We can't fit it in our current bucket. Move to the end for processing
138 // later.
139 ++Skipped; // Mark it skipped.
140 FieldsOut.push_back(Elt: FD);
141 FieldsOut.erase(CI: FieldIter);
142 }
143 }
144
145 // Done processing the fields awaiting a bucket.
146
147 // If we were filling a bucket, tie it off.
148 if (CurrentBucket)
149 Buckets.push_back(Elt: std::move(CurrentBucket));
150
151 // If we were processing a bitfield run bucket, tie it off.
152 if (CurrentBitfieldRun)
153 Buckets.push_back(Elt: std::move(CurrentBitfieldRun));
154
155 std::shuffle(first: std::begin(cont&: Buckets), last: std::end(cont&: Buckets), g&: RNG);
156
157 // Produce the new ordering of the elements from the Buckets.
158 SmallVector<FieldDecl *, 16> FinalOrder;
159 for (const std::unique_ptr<Bucket> &B : Buckets) {
160 llvm::SmallVectorImpl<FieldDecl *> &RandFields = B->fields();
161 if (!B->isBitfieldRun())
162 std::shuffle(first: std::begin(cont&: RandFields), last: std::end(cont&: RandFields), g&: RNG);
163
164 FinalOrder.insert(I: FinalOrder.end(), From: RandFields.begin(), To: RandFields.end());
165 }
166
167 FieldsOut = FinalOrder;
168}
169
170} // anonymous namespace
171
172namespace clang {
173namespace randstruct {
174
175bool randomizeStructureLayout(const ASTContext &Context, RecordDecl *RD,
176 SmallVectorImpl<Decl *> &FinalOrdering) {
177 SmallVector<FieldDecl *, 64> RandomizedFields;
178 SmallVector<Decl *, 8> PostRandomizedFields;
179
180 unsigned TotalNumFields = 0;
181 for (Decl *D : RD->decls()) {
182 ++TotalNumFields;
183 if (auto *FD = dyn_cast<FieldDecl>(Val: D))
184 RandomizedFields.push_back(Elt: FD);
185 else if (isa<StaticAssertDecl>(Val: D) || isa<IndirectFieldDecl>(Val: D))
186 PostRandomizedFields.push_back(Elt: D);
187 else
188 FinalOrdering.push_back(Elt: D);
189 }
190
191 if (RandomizedFields.empty())
192 return false;
193
194 // Struct might end with a flexible array or an array of size 0 or 1,
195 // in which case we don't want to randomize it.
196 FieldDecl *FlexibleArray =
197 RD->hasFlexibleArrayMember() ? RandomizedFields.pop_back_val() : nullptr;
198 if (!FlexibleArray) {
199 if (const auto *CA =
200 dyn_cast<ConstantArrayType>(Val: RandomizedFields.back()->getType()))
201 if (CA->getSize().sle(RHS: 2))
202 FlexibleArray = RandomizedFields.pop_back_val();
203 }
204
205 std::string Seed =
206 Context.getLangOpts().RandstructSeed + RD->getNameAsString();
207 std::seed_seq SeedSeq(Seed.begin(), Seed.end());
208 std::mt19937 RNG(SeedSeq);
209
210 randomizeStructureLayoutImpl(Context, FieldsOut&: RandomizedFields, RNG);
211
212 // Plorp the randomized decls into the final ordering.
213 FinalOrdering.insert(I: FinalOrdering.end(), From: RandomizedFields.begin(),
214 To: RandomizedFields.end());
215
216 // Add fields that belong towards the end of the RecordDecl.
217 FinalOrdering.insert(I: FinalOrdering.end(), From: PostRandomizedFields.begin(),
218 To: PostRandomizedFields.end());
219
220 // Add back the flexible array.
221 if (FlexibleArray)
222 FinalOrdering.push_back(Elt: FlexibleArray);
223
224 assert(TotalNumFields == FinalOrdering.size() &&
225 "Decl count has been altered after Randstruct randomization!");
226 (void)TotalNumFields;
227 return true;
228}
229
230} // end namespace randstruct
231} // end namespace clang
232