xref: /freebsd/contrib/llvm-project/lld/MachO/UnwindInfoSection.cpp (revision 19261079b74319502c6ffa1249920079f0f69a72)
1 //===- UnwindInfoSection.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "UnwindInfoSection.h"
10 #include "Config.h"
11 #include "InputSection.h"
12 #include "MergedOutputSection.h"
13 #include "OutputSection.h"
14 #include "OutputSegment.h"
15 #include "Symbols.h"
16 #include "SyntheticSections.h"
17 #include "Target.h"
18 
19 #include "lld/Common/ErrorHandler.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/BinaryFormat/MachO.h"
22 
23 using namespace llvm;
24 using namespace llvm::MachO;
25 using namespace lld;
26 using namespace lld::macho;
27 
28 #define COMMON_ENCODINGS_MAX 127
29 #define COMPACT_ENCODINGS_MAX 256
30 
31 #define SECOND_LEVEL_PAGE_BYTES 4096
32 #define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t))
33 #define REGULAR_SECOND_LEVEL_ENTRIES_MAX                                       \
34   ((SECOND_LEVEL_PAGE_BYTES -                                                  \
35     sizeof(unwind_info_regular_second_level_page_header)) /                    \
36    sizeof(unwind_info_regular_second_level_entry))
37 #define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX                                    \
38   ((SECOND_LEVEL_PAGE_BYTES -                                                  \
39     sizeof(unwind_info_compressed_second_level_page_header)) /                 \
40    sizeof(uint32_t))
41 
42 #define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24
43 #define COMPRESSED_ENTRY_FUNC_OFFSET_MASK                                      \
44   UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0)
45 
46 // Compact Unwind format is a Mach-O evolution of DWARF Unwind that
47 // optimizes space and exception-time lookup.  Most DWARF unwind
48 // entries can be replaced with Compact Unwind entries, but the ones
49 // that cannot are retained in DWARF form.
50 //
51 // This comment will address macro-level organization of the pre-link
52 // and post-link compact unwind tables. For micro-level organization
53 // pertaining to the bitfield layout of the 32-bit compact unwind
54 // entries, see libunwind/include/mach-o/compact_unwind_encoding.h
55 //
56 // Important clarifying factoids:
57 //
58 // * __LD,__compact_unwind is the compact unwind format for compiler
59 // output and linker input. It is never a final output. It could be
60 // an intermediate output with the `-r` option which retains relocs.
61 //
62 // * __TEXT,__unwind_info is the compact unwind format for final
63 // linker output. It is never an input.
64 //
65 // * __TEXT,__eh_frame is the DWARF format for both linker input and output.
66 //
67 // * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd
68 // level) by ascending address, and the pages are referenced by an
69 // index (1st level) in the section header.
70 //
71 // * Following the headers in __TEXT,__unwind_info, the bulk of the
72 // section contains a vector of compact unwind entries
73 // `{functionOffset, encoding}` sorted by ascending `functionOffset`.
74 // Adjacent entries with the same encoding can be folded to great
75 // advantage, achieving a 3-order-of-magnitude reduction in the
76 // number of entries.
77 //
78 // * The __TEXT,__unwind_info format can accommodate up to 127 unique
79 // encodings for the space-efficient compressed format. In practice,
80 // fewer than a dozen unique encodings are used by C++ programs of
81 // all sizes. Therefore, we don't even bother implementing the regular
82 // non-compressed format. Time will tell if anyone in the field ever
83 // overflows the 127-encodings limit.
84 
85 // TODO(gkm): prune __eh_frame entries superseded by __unwind_info
86 // TODO(gkm): how do we align the 2nd-level pages?
87 
88 UnwindInfoSection::UnwindInfoSection()
89     : SyntheticSection(segment_names::text, section_names::unwindInfo) {
90   align = WordSize; // TODO(gkm): make this 4 KiB ?
91 }
92 
93 bool UnwindInfoSection::isNeeded() const {
94   return (compactUnwindSection != nullptr);
95 }
96 
97 // Scan the __LD,__compact_unwind entries and compute the space needs of
98 // __TEXT,__unwind_info and __TEXT,__eh_frame
99 
100 void UnwindInfoSection::finalize() {
101   if (compactUnwindSection == nullptr)
102     return;
103 
104   // At this point, the address space for __TEXT,__text has been
105   // assigned, so we can relocate the __LD,__compact_unwind entries
106   // into a temporary buffer. Relocation is necessary in order to sort
107   // the CU entries by function address. Sorting is necessary so that
108   // we can fold adjacent CU entries with identical
109   // encoding+personality+lsda. Folding is necessary because it reduces
110   // the number of CU entries by as much as 3 orders of magnitude!
111   compactUnwindSection->finalize();
112   assert(compactUnwindSection->getSize() % sizeof(CompactUnwindEntry64) == 0);
113   size_t cuCount =
114       compactUnwindSection->getSize() / sizeof(CompactUnwindEntry64);
115   cuVector.resize(cuCount);
116   // Relocate all __LD,__compact_unwind entries
117   compactUnwindSection->writeTo(reinterpret_cast<uint8_t *>(cuVector.data()));
118 
119   // Rather than sort & fold the 32-byte entries directly, we create a
120   // vector of pointers to entries and sort & fold that instead.
121   cuPtrVector.reserve(cuCount);
122   for (const CompactUnwindEntry64 &cuEntry : cuVector)
123     cuPtrVector.emplace_back(&cuEntry);
124   std::sort(cuPtrVector.begin(), cuPtrVector.end(),
125             [](const CompactUnwindEntry64 *a, const CompactUnwindEntry64 *b) {
126               return a->functionAddress < b->functionAddress;
127             });
128 
129   // Fold adjacent entries with matching encoding+personality+lsda
130   // We use three iterators on the same cuPtrVector to fold in-situ:
131   // (1) `foldBegin` is the first of a potential sequence of matching entries
132   // (2) `foldEnd` is the first non-matching entry after `foldBegin`.
133   // The semi-open interval [ foldBegin .. foldEnd ) contains a range
134   // entries that can be folded into a single entry and written to ...
135   // (3) `foldWrite`
136   auto foldWrite = cuPtrVector.begin();
137   for (auto foldBegin = cuPtrVector.begin(); foldBegin < cuPtrVector.end();) {
138     auto foldEnd = foldBegin;
139     while (++foldEnd < cuPtrVector.end() &&
140            (*foldBegin)->encoding == (*foldEnd)->encoding &&
141            (*foldBegin)->personality == (*foldEnd)->personality &&
142            (*foldBegin)->lsda == (*foldEnd)->lsda)
143       ;
144     *foldWrite++ = *foldBegin;
145     foldBegin = foldEnd;
146   }
147   cuPtrVector.erase(foldWrite, cuPtrVector.end());
148 
149   // Count frequencies of the folded encodings
150   EncodingMap encodingFrequencies;
151   for (auto cuPtrEntry : cuPtrVector)
152     encodingFrequencies[cuPtrEntry->encoding]++;
153 
154   // Make a vector of encodings, sorted by descending frequency
155   for (const auto &frequency : encodingFrequencies)
156     commonEncodings.emplace_back(frequency);
157   std::sort(commonEncodings.begin(), commonEncodings.end(),
158             [](const std::pair<compact_unwind_encoding_t, size_t> &a,
159                const std::pair<compact_unwind_encoding_t, size_t> &b) {
160               if (a.second == b.second)
161                 // When frequencies match, secondarily sort on encoding
162                 // to maintain parity with validate-unwind-info.py
163                 return a.first > b.first;
164               return a.second > b.second;
165             });
166 
167   // Truncate the vector to 127 elements.
168   // Common encoding indexes are limited to 0..126, while encoding
169   // indexes 127..255 are local to each second-level page
170   if (commonEncodings.size() > COMMON_ENCODINGS_MAX)
171     commonEncodings.resize(COMMON_ENCODINGS_MAX);
172 
173   // Create a map from encoding to common-encoding-table index
174   for (size_t i = 0; i < commonEncodings.size(); i++)
175     commonEncodingIndexes[commonEncodings[i].first] = i;
176 
177   // Split folded encodings into pages, where each page is limited by ...
178   // (a) 4 KiB capacity
179   // (b) 24-bit difference between first & final function address
180   // (c) 8-bit compact-encoding-table index,
181   //     for which 0..126 references the global common-encodings table,
182   //     and 127..255 references a local per-second-level-page table.
183   // First we try the compact format and determine how many entries fit.
184   // If more entries fit in the regular format, we use that.
185   for (size_t i = 0; i < cuPtrVector.size();) {
186     secondLevelPages.emplace_back();
187     auto &page = secondLevelPages.back();
188     page.entryIndex = i;
189     uintptr_t functionAddressMax =
190         cuPtrVector[i]->functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK;
191     size_t n = commonEncodings.size();
192     size_t wordsRemaining =
193         SECOND_LEVEL_PAGE_WORDS -
194         sizeof(unwind_info_compressed_second_level_page_header) /
195             sizeof(uint32_t);
196     while (wordsRemaining >= 1 && i < cuPtrVector.size()) {
197       const auto *cuPtr = cuPtrVector[i];
198       if (cuPtr->functionAddress >= functionAddressMax) {
199         break;
200       } else if (commonEncodingIndexes.count(cuPtr->encoding) ||
201                  page.localEncodingIndexes.count(cuPtr->encoding)) {
202         i++;
203         wordsRemaining--;
204       } else if (wordsRemaining >= 2 && n < COMPACT_ENCODINGS_MAX) {
205         page.localEncodings.emplace_back(cuPtr->encoding);
206         page.localEncodingIndexes[cuPtr->encoding] = n++;
207         i++;
208         wordsRemaining -= 2;
209       } else {
210         break;
211       }
212     }
213     page.entryCount = i - page.entryIndex;
214 
215     // If this is not the final page, see if it's possible to fit more
216     // entries by using the regular format. This can happen when there
217     // are many unique encodings, and we we saturated the local
218     // encoding table early.
219     if (i < cuPtrVector.size() &&
220         page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) {
221       page.kind = UNWIND_SECOND_LEVEL_REGULAR;
222       page.entryCount = std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX,
223                                  cuPtrVector.size() - page.entryIndex);
224       i = page.entryIndex + page.entryCount;
225     } else {
226       page.kind = UNWIND_SECOND_LEVEL_COMPRESSED;
227     }
228   }
229 
230   // compute size of __TEXT,__unwind_info section
231   level2PagesOffset =
232       sizeof(unwind_info_section_header) +
233       commonEncodings.size() * sizeof(uint32_t) +
234       personalities.size() * sizeof(uint32_t) +
235       // The extra second-level-page entry is for the sentinel
236       (secondLevelPages.size() + 1) *
237           sizeof(unwind_info_section_header_index_entry) +
238       lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry);
239   unwindInfoSize =
240       level2PagesOffset + secondLevelPages.size() * SECOND_LEVEL_PAGE_BYTES;
241 }
242 
243 // All inputs are relocated and output addresses are known, so write!
244 
245 void UnwindInfoSection::writeTo(uint8_t *buf) const {
246   // section header
247   auto *uip = reinterpret_cast<unwind_info_section_header *>(buf);
248   uip->version = 1;
249   uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header);
250   uip->commonEncodingsArrayCount = commonEncodings.size();
251   uip->personalityArraySectionOffset =
252       uip->commonEncodingsArraySectionOffset +
253       (uip->commonEncodingsArrayCount * sizeof(uint32_t));
254   uip->personalityArrayCount = personalities.size();
255   uip->indexSectionOffset = uip->personalityArraySectionOffset +
256                             (uip->personalityArrayCount * sizeof(uint32_t));
257   uip->indexCount = secondLevelPages.size() + 1;
258 
259   // Common encodings
260   auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]);
261   for (const auto &encoding : commonEncodings)
262     *i32p++ = encoding.first;
263 
264   // Personalities
265   for (const uint32_t &personality : personalities)
266     *i32p++ = personality;
267 
268   // Level-1 index
269   uint32_t lsdaOffset =
270       uip->indexSectionOffset +
271       uip->indexCount * sizeof(unwind_info_section_header_index_entry);
272   uint64_t l2PagesOffset = level2PagesOffset;
273   auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p);
274   for (const SecondLevelPage &page : secondLevelPages) {
275     iep->functionOffset = cuPtrVector[page.entryIndex]->functionAddress;
276     iep->secondLevelPagesSectionOffset = l2PagesOffset;
277     iep->lsdaIndexArraySectionOffset = lsdaOffset;
278     iep++;
279     l2PagesOffset += SECOND_LEVEL_PAGE_BYTES;
280   }
281   // Level-1 sentinel
282   const CompactUnwindEntry64 &cuEnd = cuVector.back();
283   iep->functionOffset = cuEnd.functionAddress + cuEnd.functionLength;
284   iep->secondLevelPagesSectionOffset = 0;
285   iep->lsdaIndexArraySectionOffset = lsdaOffset;
286   iep++;
287 
288   // LSDAs
289   auto *lep =
290       reinterpret_cast<unwind_info_section_header_lsda_index_entry *>(iep);
291   for (const unwind_info_section_header_lsda_index_entry &lsda : lsdaEntries) {
292     lep->functionOffset = lsda.functionOffset;
293     lep->lsdaOffset = lsda.lsdaOffset;
294   }
295 
296   // Level-2 pages
297   auto *pp = reinterpret_cast<uint32_t *>(lep);
298   for (const SecondLevelPage &page : secondLevelPages) {
299     if (page.kind == UNWIND_SECOND_LEVEL_COMPRESSED) {
300       uintptr_t functionAddressBase =
301           cuPtrVector[page.entryIndex]->functionAddress;
302       auto *p2p =
303           reinterpret_cast<unwind_info_compressed_second_level_page_header *>(
304               pp);
305       p2p->kind = page.kind;
306       p2p->entryPageOffset =
307           sizeof(unwind_info_compressed_second_level_page_header);
308       p2p->entryCount = page.entryCount;
309       p2p->encodingsPageOffset =
310           p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t);
311       p2p->encodingsCount = page.localEncodings.size();
312       auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
313       for (size_t i = 0; i < page.entryCount; i++) {
314         const CompactUnwindEntry64 *cuep = cuPtrVector[page.entryIndex + i];
315         auto it = commonEncodingIndexes.find(cuep->encoding);
316         if (it == commonEncodingIndexes.end())
317           it = page.localEncodingIndexes.find(cuep->encoding);
318         *ep++ = (it->second << COMPRESSED_ENTRY_FUNC_OFFSET_BITS) |
319                 (cuep->functionAddress - functionAddressBase);
320       }
321       if (page.localEncodings.size() != 0)
322         memcpy(ep, page.localEncodings.data(),
323                page.localEncodings.size() * sizeof(uint32_t));
324     } else {
325       auto *p2p =
326           reinterpret_cast<unwind_info_regular_second_level_page_header *>(pp);
327       p2p->kind = page.kind;
328       p2p->entryPageOffset =
329           sizeof(unwind_info_regular_second_level_page_header);
330       p2p->entryCount = page.entryCount;
331       auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
332       for (size_t i = 0; i < page.entryCount; i++) {
333         const CompactUnwindEntry64 *cuep = cuPtrVector[page.entryIndex + i];
334         *ep++ = cuep->functionAddress;
335         *ep++ = cuep->encoding;
336       }
337     }
338     pp += SECOND_LEVEL_PAGE_WORDS;
339   }
340 }
341