xref: /freebsd/contrib/llvm-project/llvm/lib/Support/OptimizedStructLayout.cpp (revision 43a5ec4eb41567cc92586503212743d89686d78f)
1 //===--- OptimizedStructLayout.cpp - Optimal data layout algorithm ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the performOptimizedStructLayout interface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Support/OptimizedStructLayout.h"
14 
15 using namespace llvm;
16 
17 using Field = OptimizedStructLayoutField;
18 
19 #ifndef NDEBUG
20 static void checkValidLayout(ArrayRef<Field> Fields, uint64_t Size,
21                              Align MaxAlign) {
22   uint64_t LastEnd = 0;
23   Align ComputedMaxAlign;
24   for (auto &Field : Fields) {
25     assert(Field.hasFixedOffset() &&
26            "didn't assign a fixed offset to field");
27     assert(isAligned(Field.Alignment, Field.Offset) &&
28            "didn't assign a correctly-aligned offset to field");
29     assert(Field.Offset >= LastEnd &&
30            "didn't assign offsets in ascending order");
31     LastEnd = Field.getEndOffset();
32     assert(Field.Alignment <= MaxAlign &&
33            "didn't compute MaxAlign correctly");
34     ComputedMaxAlign = std::max(Field.Alignment, MaxAlign);
35   }
36   assert(LastEnd == Size && "didn't compute LastEnd correctly");
37   assert(ComputedMaxAlign == MaxAlign && "didn't compute MaxAlign correctly");
38 }
39 #endif
40 
41 std::pair<uint64_t, Align>
42 llvm::performOptimizedStructLayout(MutableArrayRef<Field> Fields) {
43 #ifndef NDEBUG
44   // Do some simple precondition checks.
45   {
46     bool InFixedPrefix = true;
47     size_t LastEnd = 0;
48     for (auto &Field : Fields) {
49       assert(Field.Size > 0 && "field of zero size");
50       if (Field.hasFixedOffset()) {
51         assert(InFixedPrefix &&
52                "fixed-offset fields are not a strict prefix of array");
53         assert(LastEnd <= Field.Offset &&
54                "fixed-offset fields overlap or are not in order");
55         LastEnd = Field.getEndOffset();
56         assert(LastEnd > Field.Offset &&
57                "overflow in fixed-offset end offset");
58       } else {
59         InFixedPrefix = false;
60       }
61     }
62   }
63 #endif
64 
65   // Do an initial pass over the fields.
66   Align MaxAlign;
67 
68   // Find the first flexible-offset field, tracking MaxAlign.
69   auto FirstFlexible = Fields.begin(), E = Fields.end();
70   while (FirstFlexible != E && FirstFlexible->hasFixedOffset()) {
71     MaxAlign = std::max(MaxAlign, FirstFlexible->Alignment);
72     ++FirstFlexible;
73   }
74 
75   // If there are no flexible fields, we're done.
76   if (FirstFlexible == E) {
77     uint64_t Size = 0;
78     if (!Fields.empty())
79       Size = Fields.back().getEndOffset();
80 
81 #ifndef NDEBUG
82     checkValidLayout(Fields, Size, MaxAlign);
83 #endif
84     return std::make_pair(Size, MaxAlign);
85   }
86 
87   // Walk over the flexible-offset fields, tracking MaxAlign and
88   // assigning them a unique number in order of their appearance.
89   // We'll use this unique number in the comparison below so that
90   // we can use array_pod_sort, which isn't stable.  We won't use it
91   // past that point.
92   {
93     uintptr_t UniqueNumber = 0;
94     for (auto I = FirstFlexible; I != E; ++I) {
95       I->Scratch = reinterpret_cast<void*>(UniqueNumber++);
96       MaxAlign = std::max(MaxAlign, I->Alignment);
97     }
98   }
99 
100   // Sort the flexible elements in order of decreasing alignment,
101   // then decreasing size, and then the original order as recorded
102   // in Scratch.  The decreasing-size aspect of this is only really
103   // important if we get into the gap-filling stage below, but it
104   // doesn't hurt here.
105   array_pod_sort(FirstFlexible, E,
106                  [](const Field *lhs, const Field *rhs) -> int {
107     // Decreasing alignment.
108     if (lhs->Alignment != rhs->Alignment)
109       return (lhs->Alignment < rhs->Alignment ? 1 : -1);
110 
111     // Decreasing size.
112     if (lhs->Size != rhs->Size)
113       return (lhs->Size < rhs->Size ? 1 : -1);
114 
115     // Original order.
116     auto lhsNumber = reinterpret_cast<uintptr_t>(lhs->Scratch);
117     auto rhsNumber = reinterpret_cast<uintptr_t>(rhs->Scratch);
118     if (lhsNumber != rhsNumber)
119       return (lhsNumber < rhsNumber ? -1 : 1);
120 
121     return 0;
122   });
123 
124   // Do a quick check for whether that sort alone has given us a perfect
125   // layout with no interior padding.  This is very common: if the
126   // fixed-layout fields have no interior padding, and they end at a
127   // sufficiently-aligned offset for all the flexible-layout fields,
128   // and the flexible-layout fields all have sizes that are multiples
129   // of their alignment, then this will reliably trigger.
130   {
131     bool HasPadding = false;
132     uint64_t LastEnd = 0;
133 
134     // Walk the fixed-offset fields.
135     for (auto I = Fields.begin(); I != FirstFlexible; ++I) {
136       assert(I->hasFixedOffset());
137       if (LastEnd != I->Offset) {
138         HasPadding = true;
139         break;
140       }
141       LastEnd = I->getEndOffset();
142     }
143 
144     // Walk the flexible-offset fields, optimistically assigning fixed
145     // offsets.  Note that we maintain a strict division between the
146     // fixed-offset and flexible-offset fields, so if we end up
147     // discovering padding later in this loop, we can just abandon this
148     // work and we'll ignore the offsets we already assigned.
149     if (!HasPadding) {
150       for (auto I = FirstFlexible; I != E; ++I) {
151         auto Offset = alignTo(LastEnd, I->Alignment);
152         if (LastEnd != Offset) {
153           HasPadding = true;
154           break;
155         }
156         I->Offset = Offset;
157         LastEnd = I->getEndOffset();
158       }
159     }
160 
161     // If we already have a perfect layout, we're done.
162     if (!HasPadding) {
163 #ifndef NDEBUG
164       checkValidLayout(Fields, LastEnd, MaxAlign);
165 #endif
166       return std::make_pair(LastEnd, MaxAlign);
167     }
168   }
169 
170   // The algorithm sketch at this point is as follows.
171   //
172   // Consider the padding gaps between fixed-offset fields in ascending
173   // order.  Let LastEnd be the offset of the first byte following the
174   // field before the gap, or 0 if the gap is at the beginning of the
175   // structure.  Find the "best" flexible-offset field according to the
176   // criteria below.  If no such field exists, proceed to the next gap.
177   // Otherwise, add the field at the first properly-aligned offset for
178   // that field that is >= LastEnd, then update LastEnd and repeat in
179   // order to fill any remaining gap following that field.
180   //
181   // Next, let LastEnd to be the offset of the first byte following the
182   // last fixed-offset field, or 0 if there are no fixed-offset fields.
183   // While there are flexible-offset fields remaining, find the "best"
184   // flexible-offset field according to the criteria below, add it at
185   // the first properly-aligned offset for that field that is >= LastEnd,
186   // and update LastEnd to the first byte following the field.
187   //
188   // The "best" field is chosen by the following criteria, considered
189   // strictly in order:
190   //
191   // - When filling a gap betweeen fields, the field must fit.
192   // - A field is preferred if it requires less padding following LastEnd.
193   // - A field is preferred if it is more aligned.
194   // - A field is preferred if it is larger.
195   // - A field is preferred if it appeared earlier in the initial order.
196   //
197   // Minimizing leading padding is a greedy attempt to avoid padding
198   // entirely.  Preferring more-aligned fields is an attempt to eliminate
199   // stricter constraints earlier, with the idea that weaker alignment
200   // constraints may be resolvable with less padding elsewhere.  These
201   // These two rules are sufficient to ensure that we get the optimal
202   // layout in the "C-style" case.  Preferring larger fields tends to take
203   // better advantage of large gaps and may be more likely to have a size
204   // that's a multiple of a useful alignment.  Preferring the initial
205   // order may help somewhat with locality but is mostly just a way of
206   // ensuring deterministic output.
207   //
208   // Note that this algorithm does not guarantee a minimal layout.  Picking
209   // a larger object greedily may leave a gap that cannot be filled as
210   // efficiently.  Unfortunately, solving this perfectly is an NP-complete
211   // problem (by reduction from bin-packing: let B_i be the bin sizes and
212   // O_j be the object sizes; add fixed-offset fields such that the gaps
213   // between them have size B_i, and add flexible-offset fields with
214   // alignment 1 and size O_j; if the layout size is equal to the end of
215   // the last fixed-layout field, the objects fit in the bins; note that
216   // this doesn't even require the complexity of alignment).
217 
218   // The implementation below is essentially just an optimized version of
219   // scanning the list of remaining fields looking for the best, which
220   // would be O(n^2).  In the worst case, it doesn't improve on that.
221   // However, in practice it'll just scan the array of alignment bins
222   // and consider the first few elements from one or two bins.  The
223   // number of bins is bounded by a small constant: alignments are powers
224   // of two that are vanishingly unlikely to be over 64 and fairly unlikely
225   // to be over 8.  And multiple elements only need to be considered when
226   // filling a gap between fixed-offset fields, which doesn't happen very
227   // often.  We could use a data structure within bins that optimizes for
228   // finding the best-sized match, but it would require allocating memory
229   // and copying data, so it's unlikely to be worthwhile.
230 
231 
232   // Start by organizing the flexible-offset fields into bins according to
233   // their alignment.  We expect a small enough number of bins that we
234   // don't care about the asymptotic costs of walking this.
235   struct AlignmentQueue {
236     /// The minimum size of anything currently in this queue.
237     uint64_t MinSize;
238 
239     /// The head of the queue.  A singly-linked list.  The order here should
240     /// be consistent with the earlier sort, i.e. the elements should be
241     /// monotonically descending in size and otherwise in the original order.
242     ///
243     /// We remove the queue from the array as soon as this is empty.
244     OptimizedStructLayoutField *Head;
245 
246     /// The alignment requirement of the queue.
247     Align Alignment;
248 
249     static Field *getNext(Field *Cur) {
250       return static_cast<Field *>(Cur->Scratch);
251     }
252   };
253   SmallVector<AlignmentQueue, 8> FlexibleFieldsByAlignment;
254   for (auto I = FirstFlexible; I != E; ) {
255     auto Head = I;
256     auto Alignment = I->Alignment;
257 
258     uint64_t MinSize = I->Size;
259     auto LastInQueue = I;
260     for (++I; I != E && I->Alignment == Alignment; ++I) {
261       LastInQueue->Scratch = I;
262       LastInQueue = I;
263       MinSize = std::min(MinSize, I->Size);
264     }
265     LastInQueue->Scratch = nullptr;
266 
267     FlexibleFieldsByAlignment.push_back({MinSize, Head, Alignment});
268   }
269 
270 #ifndef NDEBUG
271   // Verify that we set the queues up correctly.
272   auto checkQueues = [&]{
273     bool FirstQueue = true;
274     Align LastQueueAlignment;
275     for (auto &Queue : FlexibleFieldsByAlignment) {
276       assert((FirstQueue || Queue.Alignment < LastQueueAlignment) &&
277              "bins not in order of descending alignment");
278       LastQueueAlignment = Queue.Alignment;
279       FirstQueue = false;
280 
281       assert(Queue.Head && "queue was empty");
282       uint64_t LastSize = ~(uint64_t)0;
283       for (auto I = Queue.Head; I; I = Queue.getNext(I)) {
284         assert(I->Alignment == Queue.Alignment && "bad field in queue");
285         assert(I->Size <= LastSize && "queue not in descending size order");
286         LastSize = I->Size;
287       }
288     }
289   };
290   checkQueues();
291 #endif
292 
293   /// Helper function to remove a field from a queue.
294   auto spliceFromQueue = [&](AlignmentQueue *Queue, Field *Last, Field *Cur) {
295     assert(Last ? Queue->getNext(Last) == Cur : Queue->Head == Cur);
296 
297     // If we're removing Cur from a non-initial position, splice it out
298     // of the linked list.
299     if (Last) {
300       Last->Scratch = Cur->Scratch;
301 
302       // If Cur was the last field in the list, we need to update MinSize.
303       // We can just use the last field's size because the list is in
304       // descending order of size.
305       if (!Cur->Scratch)
306         Queue->MinSize = Last->Size;
307 
308     // Otherwise, replace the head.
309     } else {
310       if (auto NewHead = Queue->getNext(Cur))
311         Queue->Head = NewHead;
312 
313       // If we just emptied the queue, destroy its bin.
314       else
315         FlexibleFieldsByAlignment.erase(Queue);
316     }
317   };
318 
319   // Do layout into a local array.  Doing this in-place on Fields is
320   // not really feasible.
321   SmallVector<Field, 16> Layout;
322   Layout.reserve(Fields.size());
323 
324   // The offset that we're currently looking to insert at (or after).
325   uint64_t LastEnd = 0;
326 
327   // Helper function to splice Cur out of the given queue and add it
328   // to the layout at the given offset.
329   auto addToLayout = [&](AlignmentQueue *Queue, Field *Last, Field *Cur,
330                          uint64_t Offset) -> bool {
331     assert(Offset == alignTo(LastEnd, Cur->Alignment));
332 
333     // Splice out.  This potentially invalidates Queue.
334     spliceFromQueue(Queue, Last, Cur);
335 
336     // Add Cur to the layout.
337     Layout.push_back(*Cur);
338     Layout.back().Offset = Offset;
339     LastEnd = Layout.back().getEndOffset();
340 
341     // Always return true so that we can be tail-called.
342     return true;
343   };
344 
345   // Helper function to try to find a field in the given queue that'll
346   // fit starting at StartOffset but before EndOffset (if present).
347   // Note that this never fails if EndOffset is not provided.
348   auto tryAddFillerFromQueue = [&](AlignmentQueue *Queue,
349                                    uint64_t StartOffset,
350                                    Optional<uint64_t> EndOffset) -> bool {
351     assert(Queue->Head);
352     assert(StartOffset == alignTo(LastEnd, Queue->Alignment));
353     assert(!EndOffset || StartOffset < *EndOffset);
354 
355     // Figure out the maximum size that a field can be, and ignore this
356     // queue if there's nothing in it that small.
357     auto MaxViableSize =
358       (EndOffset ? *EndOffset - StartOffset : ~(uint64_t)0);
359     if (Queue->MinSize > MaxViableSize) return false;
360 
361     // Find the matching field.  Note that this should always find
362     // something because of the MinSize check above.
363     for (Field *Cur = Queue->Head, *Last = nullptr; true;
364            Last = Cur, Cur = Queue->getNext(Cur)) {
365       assert(Cur && "didn't find a match in queue despite its MinSize");
366       if (Cur->Size <= MaxViableSize)
367         return addToLayout(Queue, Last, Cur, StartOffset);
368     }
369 
370     llvm_unreachable("didn't find a match in queue despite its MinSize");
371   };
372 
373   // Helper function to find the "best" flexible-offset field according
374   // to the criteria described above.
375   auto tryAddBestField = [&](Optional<uint64_t> BeforeOffset) -> bool {
376     assert(!BeforeOffset || LastEnd < *BeforeOffset);
377     auto QueueB = FlexibleFieldsByAlignment.begin();
378     auto QueueE = FlexibleFieldsByAlignment.end();
379 
380     // Start by looking for the most-aligned queue that doesn't need any
381     // leading padding after LastEnd.
382     auto FirstQueueToSearch = QueueB;
383     for (; FirstQueueToSearch != QueueE; ++FirstQueueToSearch) {
384       if (isAligned(FirstQueueToSearch->Alignment, LastEnd))
385         break;
386     }
387 
388     uint64_t Offset = LastEnd;
389     while (true) {
390       // Invariant: all of the queues in [FirstQueueToSearch, QueueE)
391       // require the same initial padding offset.
392 
393       // Search those queues in descending order of alignment for a
394       // satisfactory field.
395       for (auto Queue = FirstQueueToSearch; Queue != QueueE; ++Queue) {
396         if (tryAddFillerFromQueue(Queue, Offset, BeforeOffset))
397           return true;
398       }
399 
400       // Okay, we don't need to scan those again.
401       QueueE = FirstQueueToSearch;
402 
403       // If we started from the first queue, we're done.
404       if (FirstQueueToSearch == QueueB)
405         return false;
406 
407       // Otherwise, scan backwards to find the most-aligned queue that
408       // still has minimal leading padding after LastEnd.  If that
409       // minimal padding is already at or past the end point, we're done.
410       --FirstQueueToSearch;
411       Offset = alignTo(LastEnd, FirstQueueToSearch->Alignment);
412       if (BeforeOffset && Offset >= *BeforeOffset)
413         return false;
414       while (FirstQueueToSearch != QueueB &&
415              Offset == alignTo(LastEnd, FirstQueueToSearch[-1].Alignment))
416         --FirstQueueToSearch;
417     }
418   };
419 
420   // Phase 1: fill the gaps between fixed-offset fields with the best
421   // flexible-offset field that fits.
422   for (auto I = Fields.begin(); I != FirstFlexible; ++I) {
423     assert(LastEnd <= I->Offset);
424     while (LastEnd != I->Offset) {
425       if (!tryAddBestField(I->Offset))
426         break;
427     }
428     Layout.push_back(*I);
429     LastEnd = I->getEndOffset();
430   }
431 
432 #ifndef NDEBUG
433   checkQueues();
434 #endif
435 
436   // Phase 2: repeatedly add the best flexible-offset field until
437   // they're all gone.
438   while (!FlexibleFieldsByAlignment.empty()) {
439     bool Success = tryAddBestField(None);
440     assert(Success && "didn't find a field with no fixed limit?");
441     (void) Success;
442   }
443 
444   // Copy the layout back into place.
445   assert(Layout.size() == Fields.size());
446   memcpy(Fields.data(), Layout.data(),
447          Fields.size() * sizeof(OptimizedStructLayoutField));
448 
449 #ifndef NDEBUG
450   // Make a final check that the layout is valid.
451   checkValidLayout(Fields, LastEnd, MaxAlign);
452 #endif
453 
454   return std::make_pair(LastEnd, MaxAlign);
455 }
456