1//===--- OptimizedStructLayout.cpp - Optimal data layout algorithm ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the performOptimizedStructLayout interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Support/OptimizedStructLayout.h"
14#include <optional>
15
16using namespace llvm;
17
18using Field = OptimizedStructLayoutField;
19
20#ifndef NDEBUG
21static void checkValidLayout(ArrayRef<Field> Fields, uint64_t Size,
22 Align MaxAlign) {
23 uint64_t LastEnd = 0;
24 Align ComputedMaxAlign;
25 for (auto &Field : Fields) {
26 assert(Field.hasFixedOffset() &&
27 "didn't assign a fixed offset to field");
28 assert(isAligned(Field.Alignment, Field.Offset) &&
29 "didn't assign a correctly-aligned offset to field");
30 assert(Field.Offset >= LastEnd &&
31 "didn't assign offsets in ascending order");
32 LastEnd = Field.getEndOffset();
33 assert(Field.Alignment <= MaxAlign &&
34 "didn't compute MaxAlign correctly");
35 ComputedMaxAlign = std::max(Field.Alignment, MaxAlign);
36 }
37 assert(LastEnd == Size && "didn't compute LastEnd correctly");
38 assert(ComputedMaxAlign == MaxAlign && "didn't compute MaxAlign correctly");
39}
40#endif
41
42std::pair<uint64_t, Align>
43llvm::performOptimizedStructLayout(MutableArrayRef<Field> Fields) {
44#ifndef NDEBUG
45 // Do some simple precondition checks.
46 {
47 bool InFixedPrefix = true;
48 size_t LastEnd = 0;
49 for (auto &Field : Fields) {
50 assert(Field.Size > 0 && "field of zero size");
51 if (Field.hasFixedOffset()) {
52 assert(InFixedPrefix &&
53 "fixed-offset fields are not a strict prefix of array");
54 assert(LastEnd <= Field.Offset &&
55 "fixed-offset fields overlap or are not in order");
56 LastEnd = Field.getEndOffset();
57 assert(LastEnd > Field.Offset &&
58 "overflow in fixed-offset end offset");
59 } else {
60 InFixedPrefix = false;
61 }
62 }
63 }
64#endif
65
66 // Do an initial pass over the fields.
67 Align MaxAlign;
68
69 // Find the first flexible-offset field, tracking MaxAlign.
70 auto FirstFlexible = Fields.begin(), E = Fields.end();
71 while (FirstFlexible != E && FirstFlexible->hasFixedOffset()) {
72 MaxAlign = std::max(a: MaxAlign, b: FirstFlexible->Alignment);
73 ++FirstFlexible;
74 }
75
76 // If there are no flexible fields, we're done.
77 if (FirstFlexible == E) {
78 uint64_t Size = 0;
79 if (!Fields.empty())
80 Size = Fields.back().getEndOffset();
81
82#ifndef NDEBUG
83 checkValidLayout(Fields, Size, MaxAlign);
84#endif
85 return std::make_pair(x&: Size, y&: MaxAlign);
86 }
87
88 // Walk over the flexible-offset fields, tracking MaxAlign and
89 // assigning them a unique number in order of their appearance.
90 // We'll use this unique number in the comparison below so that
91 // we can use array_pod_sort, which isn't stable. We won't use it
92 // past that point.
93 {
94 uintptr_t UniqueNumber = 0;
95 for (auto I = FirstFlexible; I != E; ++I) {
96 I->Scratch = reinterpret_cast<void*>(UniqueNumber++);
97 MaxAlign = std::max(a: MaxAlign, b: I->Alignment);
98 }
99 }
100
101 // Sort the flexible elements in order of decreasing alignment,
102 // then decreasing size, and then the original order as recorded
103 // in Scratch. The decreasing-size aspect of this is only really
104 // important if we get into the gap-filling stage below, but it
105 // doesn't hurt here.
106 array_pod_sort(Start: FirstFlexible, End: E,
107 Compare: [](const Field *lhs, const Field *rhs) -> int {
108 // Decreasing alignment.
109 if (lhs->Alignment != rhs->Alignment)
110 return (lhs->Alignment < rhs->Alignment ? 1 : -1);
111
112 // Decreasing size.
113 if (lhs->Size != rhs->Size)
114 return (lhs->Size < rhs->Size ? 1 : -1);
115
116 // Original order.
117 auto lhsNumber = reinterpret_cast<uintptr_t>(lhs->Scratch);
118 auto rhsNumber = reinterpret_cast<uintptr_t>(rhs->Scratch);
119 if (lhsNumber != rhsNumber)
120 return (lhsNumber < rhsNumber ? -1 : 1);
121
122 return 0;
123 });
124
125 // Do a quick check for whether that sort alone has given us a perfect
126 // layout with no interior padding. This is very common: if the
127 // fixed-layout fields have no interior padding, and they end at a
128 // sufficiently-aligned offset for all the flexible-layout fields,
129 // and the flexible-layout fields all have sizes that are multiples
130 // of their alignment, then this will reliably trigger.
131 {
132 bool HasPadding = false;
133 uint64_t LastEnd = 0;
134
135 // Walk the fixed-offset fields.
136 for (auto I = Fields.begin(); I != FirstFlexible; ++I) {
137 assert(I->hasFixedOffset());
138 if (LastEnd != I->Offset) {
139 HasPadding = true;
140 break;
141 }
142 LastEnd = I->getEndOffset();
143 }
144
145 // Walk the flexible-offset fields, optimistically assigning fixed
146 // offsets. Note that we maintain a strict division between the
147 // fixed-offset and flexible-offset fields, so if we end up
148 // discovering padding later in this loop, we can just abandon this
149 // work and we'll ignore the offsets we already assigned.
150 if (!HasPadding) {
151 for (auto I = FirstFlexible; I != E; ++I) {
152 auto Offset = alignTo(Size: LastEnd, A: I->Alignment);
153 if (LastEnd != Offset) {
154 HasPadding = true;
155 break;
156 }
157 I->Offset = Offset;
158 LastEnd = I->getEndOffset();
159 }
160 }
161
162 // If we already have a perfect layout, we're done.
163 if (!HasPadding) {
164#ifndef NDEBUG
165 checkValidLayout(Fields, LastEnd, MaxAlign);
166#endif
167 return std::make_pair(x&: LastEnd, y&: MaxAlign);
168 }
169 }
170
171 // The algorithm sketch at this point is as follows.
172 //
173 // Consider the padding gaps between fixed-offset fields in ascending
174 // order. Let LastEnd be the offset of the first byte following the
175 // field before the gap, or 0 if the gap is at the beginning of the
176 // structure. Find the "best" flexible-offset field according to the
177 // criteria below. If no such field exists, proceed to the next gap.
178 // Otherwise, add the field at the first properly-aligned offset for
179 // that field that is >= LastEnd, then update LastEnd and repeat in
180 // order to fill any remaining gap following that field.
181 //
182 // Next, let LastEnd to be the offset of the first byte following the
183 // last fixed-offset field, or 0 if there are no fixed-offset fields.
184 // While there are flexible-offset fields remaining, find the "best"
185 // flexible-offset field according to the criteria below, add it at
186 // the first properly-aligned offset for that field that is >= LastEnd,
187 // and update LastEnd to the first byte following the field.
188 //
189 // The "best" field is chosen by the following criteria, considered
190 // strictly in order:
191 //
192 // - When filling a gap betweeen fields, the field must fit.
193 // - A field is preferred if it requires less padding following LastEnd.
194 // - A field is preferred if it is more aligned.
195 // - A field is preferred if it is larger.
196 // - A field is preferred if it appeared earlier in the initial order.
197 //
198 // Minimizing leading padding is a greedy attempt to avoid padding
199 // entirely. Preferring more-aligned fields is an attempt to eliminate
200 // stricter constraints earlier, with the idea that weaker alignment
201 // constraints may be resolvable with less padding elsewhere. These
202 // These two rules are sufficient to ensure that we get the optimal
203 // layout in the "C-style" case. Preferring larger fields tends to take
204 // better advantage of large gaps and may be more likely to have a size
205 // that's a multiple of a useful alignment. Preferring the initial
206 // order may help somewhat with locality but is mostly just a way of
207 // ensuring deterministic output.
208 //
209 // Note that this algorithm does not guarantee a minimal layout. Picking
210 // a larger object greedily may leave a gap that cannot be filled as
211 // efficiently. Unfortunately, solving this perfectly is an NP-complete
212 // problem (by reduction from bin-packing: let B_i be the bin sizes and
213 // O_j be the object sizes; add fixed-offset fields such that the gaps
214 // between them have size B_i, and add flexible-offset fields with
215 // alignment 1 and size O_j; if the layout size is equal to the end of
216 // the last fixed-layout field, the objects fit in the bins; note that
217 // this doesn't even require the complexity of alignment).
218
219 // The implementation below is essentially just an optimized version of
220 // scanning the list of remaining fields looking for the best, which
221 // would be O(n^2). In the worst case, it doesn't improve on that.
222 // However, in practice it'll just scan the array of alignment bins
223 // and consider the first few elements from one or two bins. The
224 // number of bins is bounded by a small constant: alignments are powers
225 // of two that are vanishingly unlikely to be over 64 and fairly unlikely
226 // to be over 8. And multiple elements only need to be considered when
227 // filling a gap between fixed-offset fields, which doesn't happen very
228 // often. We could use a data structure within bins that optimizes for
229 // finding the best-sized match, but it would require allocating memory
230 // and copying data, so it's unlikely to be worthwhile.
231
232
233 // Start by organizing the flexible-offset fields into bins according to
234 // their alignment. We expect a small enough number of bins that we
235 // don't care about the asymptotic costs of walking this.
236 struct AlignmentQueue {
237 /// The minimum size of anything currently in this queue.
238 uint64_t MinSize;
239
240 /// The head of the queue. A singly-linked list. The order here should
241 /// be consistent with the earlier sort, i.e. the elements should be
242 /// monotonically descending in size and otherwise in the original order.
243 ///
244 /// We remove the queue from the array as soon as this is empty.
245 OptimizedStructLayoutField *Head;
246
247 /// The alignment requirement of the queue.
248 Align Alignment;
249
250 static Field *getNext(Field *Cur) {
251 return static_cast<Field *>(Cur->Scratch);
252 }
253 };
254 SmallVector<AlignmentQueue, 8> FlexibleFieldsByAlignment;
255 for (auto I = FirstFlexible; I != E; ) {
256 auto Head = I;
257 auto Alignment = I->Alignment;
258
259 uint64_t MinSize = I->Size;
260 auto LastInQueue = I;
261 for (++I; I != E && I->Alignment == Alignment; ++I) {
262 LastInQueue->Scratch = I;
263 LastInQueue = I;
264 MinSize = std::min(a: MinSize, b: I->Size);
265 }
266 LastInQueue->Scratch = nullptr;
267
268 FlexibleFieldsByAlignment.push_back(Elt: {.MinSize: MinSize, .Head: Head, .Alignment: Alignment});
269 }
270
271#ifndef NDEBUG
272 // Verify that we set the queues up correctly.
273 auto checkQueues = [&]{
274 bool FirstQueue = true;
275 Align LastQueueAlignment;
276 for (auto &Queue : FlexibleFieldsByAlignment) {
277 assert((FirstQueue || Queue.Alignment < LastQueueAlignment) &&
278 "bins not in order of descending alignment");
279 LastQueueAlignment = Queue.Alignment;
280 FirstQueue = false;
281
282 assert(Queue.Head && "queue was empty");
283 uint64_t LastSize = ~(uint64_t)0;
284 for (auto I = Queue.Head; I; I = Queue.getNext(I)) {
285 assert(I->Alignment == Queue.Alignment && "bad field in queue");
286 assert(I->Size <= LastSize && "queue not in descending size order");
287 LastSize = I->Size;
288 }
289 }
290 };
291 checkQueues();
292#endif
293
294 /// Helper function to remove a field from a queue.
295 auto spliceFromQueue = [&](AlignmentQueue *Queue, Field *Last, Field *Cur) {
296 assert(Last ? Queue->getNext(Last) == Cur : Queue->Head == Cur);
297
298 // If we're removing Cur from a non-initial position, splice it out
299 // of the linked list.
300 if (Last) {
301 Last->Scratch = Cur->Scratch;
302
303 // If Cur was the last field in the list, we need to update MinSize.
304 // We can just use the last field's size because the list is in
305 // descending order of size.
306 if (!Cur->Scratch)
307 Queue->MinSize = Last->Size;
308
309 // Otherwise, replace the head.
310 } else {
311 if (auto NewHead = Queue->getNext(Cur))
312 Queue->Head = NewHead;
313
314 // If we just emptied the queue, destroy its bin.
315 else
316 FlexibleFieldsByAlignment.erase(CI: Queue);
317 }
318 };
319
320 // Do layout into a local array. Doing this in-place on Fields is
321 // not really feasible.
322 SmallVector<Field, 16> Layout;
323 Layout.reserve(N: Fields.size());
324
325 // The offset that we're currently looking to insert at (or after).
326 uint64_t LastEnd = 0;
327
328 // Helper function to splice Cur out of the given queue and add it
329 // to the layout at the given offset.
330 auto addToLayout = [&](AlignmentQueue *Queue, Field *Last, Field *Cur,
331 uint64_t Offset) -> bool {
332 assert(Offset == alignTo(LastEnd, Cur->Alignment));
333
334 // Splice out. This potentially invalidates Queue.
335 spliceFromQueue(Queue, Last, Cur);
336
337 // Add Cur to the layout.
338 Layout.push_back(Elt: *Cur);
339 Layout.back().Offset = Offset;
340 LastEnd = Layout.back().getEndOffset();
341
342 // Always return true so that we can be tail-called.
343 return true;
344 };
345
346 // Helper function to try to find a field in the given queue that'll
347 // fit starting at StartOffset but before EndOffset (if present).
348 // Note that this never fails if EndOffset is not provided.
349 auto tryAddFillerFromQueue = [&](AlignmentQueue *Queue, uint64_t StartOffset,
350 std::optional<uint64_t> EndOffset) -> bool {
351 assert(Queue->Head);
352 assert(StartOffset == alignTo(LastEnd, Queue->Alignment));
353 assert(!EndOffset || StartOffset < *EndOffset);
354
355 // Figure out the maximum size that a field can be, and ignore this
356 // queue if there's nothing in it that small.
357 auto MaxViableSize =
358 (EndOffset ? *EndOffset - StartOffset : ~(uint64_t)0);
359 if (Queue->MinSize > MaxViableSize)
360 return false;
361
362 // Find the matching field. Note that this should always find
363 // something because of the MinSize check above.
364 for (Field *Cur = Queue->Head, *Last = nullptr; true;
365 Last = Cur, Cur = Queue->getNext(Cur)) {
366 assert(Cur && "didn't find a match in queue despite its MinSize");
367 if (Cur->Size <= MaxViableSize)
368 return addToLayout(Queue, Last, Cur, StartOffset);
369 }
370
371 llvm_unreachable("didn't find a match in queue despite its MinSize");
372 };
373
374 // Helper function to find the "best" flexible-offset field according
375 // to the criteria described above.
376 auto tryAddBestField = [&](std::optional<uint64_t> BeforeOffset) -> bool {
377 assert(!BeforeOffset || LastEnd < *BeforeOffset);
378 auto QueueB = FlexibleFieldsByAlignment.begin();
379 auto QueueE = FlexibleFieldsByAlignment.end();
380
381 // Start by looking for the most-aligned queue that doesn't need any
382 // leading padding after LastEnd.
383 auto FirstQueueToSearch = QueueB;
384 for (; FirstQueueToSearch != QueueE; ++FirstQueueToSearch) {
385 if (isAligned(Lhs: FirstQueueToSearch->Alignment, SizeInBytes: LastEnd))
386 break;
387 }
388
389 uint64_t Offset = LastEnd;
390 while (true) {
391 // Invariant: all of the queues in [FirstQueueToSearch, QueueE)
392 // require the same initial padding offset.
393
394 // Search those queues in descending order of alignment for a
395 // satisfactory field.
396 for (auto Queue = FirstQueueToSearch; Queue != QueueE; ++Queue) {
397 if (tryAddFillerFromQueue(Queue, Offset, BeforeOffset))
398 return true;
399 }
400
401 // Okay, we don't need to scan those again.
402 QueueE = FirstQueueToSearch;
403
404 // If we started from the first queue, we're done.
405 if (FirstQueueToSearch == QueueB)
406 return false;
407
408 // Otherwise, scan backwards to find the most-aligned queue that
409 // still has minimal leading padding after LastEnd. If that
410 // minimal padding is already at or past the end point, we're done.
411 --FirstQueueToSearch;
412 Offset = alignTo(Size: LastEnd, A: FirstQueueToSearch->Alignment);
413 if (BeforeOffset && Offset >= *BeforeOffset)
414 return false;
415 while (FirstQueueToSearch != QueueB &&
416 Offset == alignTo(Size: LastEnd, A: FirstQueueToSearch[-1].Alignment))
417 --FirstQueueToSearch;
418 }
419 };
420
421 // Phase 1: fill the gaps between fixed-offset fields with the best
422 // flexible-offset field that fits.
423 for (auto I = Fields.begin(); I != FirstFlexible; ++I) {
424 assert(LastEnd <= I->Offset);
425 while (LastEnd != I->Offset) {
426 if (!tryAddBestField(I->Offset))
427 break;
428 }
429 Layout.push_back(Elt: *I);
430 LastEnd = I->getEndOffset();
431 }
432
433#ifndef NDEBUG
434 checkQueues();
435#endif
436
437 // Phase 2: repeatedly add the best flexible-offset field until
438 // they're all gone.
439 while (!FlexibleFieldsByAlignment.empty()) {
440 bool Success = tryAddBestField(std::nullopt);
441 assert(Success && "didn't find a field with no fixed limit?");
442 (void) Success;
443 }
444
445 // Copy the layout back into place.
446 assert(Layout.size() == Fields.size());
447 memcpy(dest: Fields.data(), src: Layout.data(),
448 n: Fields.size() * sizeof(OptimizedStructLayoutField));
449
450#ifndef NDEBUG
451 // Make a final check that the layout is valid.
452 checkValidLayout(Fields, LastEnd, MaxAlign);
453#endif
454
455 return std::make_pair(x&: LastEnd, y&: MaxAlign);
456}
457