xref: /freebsd/contrib/llvm-project/lld/MachO/UnwindInfoSection.cpp (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 //===- UnwindInfoSection.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "UnwindInfoSection.h"
10 #include "ConcatOutputSection.h"
11 #include "Config.h"
12 #include "InputSection.h"
13 #include "OutputSection.h"
14 #include "OutputSegment.h"
15 #include "SymbolTable.h"
16 #include "Symbols.h"
17 #include "SyntheticSections.h"
18 #include "Target.h"
19 
20 #include "lld/Common/ErrorHandler.h"
21 #include "lld/Common/Memory.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/BinaryFormat/MachO.h"
25 #include "llvm/Support/Parallel.h"
26 
27 #include <numeric>
28 
29 using namespace llvm;
30 using namespace llvm::MachO;
31 using namespace lld;
32 using namespace lld::macho;
33 
34 #define COMMON_ENCODINGS_MAX 127
35 #define COMPACT_ENCODINGS_MAX 256
36 
37 #define SECOND_LEVEL_PAGE_BYTES 4096
38 #define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t))
39 #define REGULAR_SECOND_LEVEL_ENTRIES_MAX                                       \
40   ((SECOND_LEVEL_PAGE_BYTES -                                                  \
41     sizeof(unwind_info_regular_second_level_page_header)) /                    \
42    sizeof(unwind_info_regular_second_level_entry))
43 #define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX                                    \
44   ((SECOND_LEVEL_PAGE_BYTES -                                                  \
45     sizeof(unwind_info_compressed_second_level_page_header)) /                 \
46    sizeof(uint32_t))
47 
48 #define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24
49 #define COMPRESSED_ENTRY_FUNC_OFFSET_MASK                                      \
50   UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0)
51 
52 // Compact Unwind format is a Mach-O evolution of DWARF Unwind that
53 // optimizes space and exception-time lookup.  Most DWARF unwind
54 // entries can be replaced with Compact Unwind entries, but the ones
55 // that cannot are retained in DWARF form.
56 //
57 // This comment will address macro-level organization of the pre-link
58 // and post-link compact unwind tables. For micro-level organization
59 // pertaining to the bitfield layout of the 32-bit compact unwind
60 // entries, see libunwind/include/mach-o/compact_unwind_encoding.h
61 //
62 // Important clarifying factoids:
63 //
64 // * __LD,__compact_unwind is the compact unwind format for compiler
65 // output and linker input. It is never a final output. It could be
66 // an intermediate output with the `-r` option which retains relocs.
67 //
68 // * __TEXT,__unwind_info is the compact unwind format for final
69 // linker output. It is never an input.
70 //
71 // * __TEXT,__eh_frame is the DWARF format for both linker input and output.
72 //
73 // * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd
74 // level) by ascending address, and the pages are referenced by an
75 // index (1st level) in the section header.
76 //
77 // * Following the headers in __TEXT,__unwind_info, the bulk of the
78 // section contains a vector of compact unwind entries
79 // `{functionOffset, encoding}` sorted by ascending `functionOffset`.
80 // Adjacent entries with the same encoding can be folded to great
81 // advantage, achieving a 3-order-of-magnitude reduction in the
82 // number of entries.
83 //
84 // * The __TEXT,__unwind_info format can accommodate up to 127 unique
85 // encodings for the space-efficient compressed format. In practice,
86 // fewer than a dozen unique encodings are used by C++ programs of
87 // all sizes. Therefore, we don't even bother implementing the regular
88 // non-compressed format. Time will tell if anyone in the field ever
89 // overflows the 127-encodings limit.
90 //
91 // Refer to the definition of unwind_info_section_header in
92 // compact_unwind_encoding.h for an overview of the format we are encoding
93 // here.
94 
95 // TODO(gkm): prune __eh_frame entries superseded by __unwind_info, PR50410
96 // TODO(gkm): how do we align the 2nd-level pages?
97 
98 template <class Ptr> struct CompactUnwindEntry {
99   Ptr functionAddress;
100   uint32_t functionLength;
101   compact_unwind_encoding_t encoding;
102   Ptr personality;
103   Ptr lsda;
104 };
105 
106 using EncodingMap = DenseMap<compact_unwind_encoding_t, size_t>;
107 
108 struct SecondLevelPage {
109   uint32_t kind;
110   size_t entryIndex;
111   size_t entryCount;
112   size_t byteCount;
113   std::vector<compact_unwind_encoding_t> localEncodings;
114   EncodingMap localEncodingIndexes;
115 };
116 
117 template <class Ptr>
118 class UnwindInfoSectionImpl final : public UnwindInfoSection {
119 public:
120   void prepareRelocations(ConcatInputSection *) override;
121   void relocateCompactUnwind(std::vector<CompactUnwindEntry<Ptr>> &);
122   Reloc *findLsdaReloc(ConcatInputSection *) const;
123   void encodePersonalities();
124   void finalize() override;
125   void writeTo(uint8_t *buf) const override;
126 
127 private:
128   std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings;
129   EncodingMap commonEncodingIndexes;
130   // The entries here will be in the same order as their originating symbols
131   // in symbolsVec.
132   std::vector<CompactUnwindEntry<Ptr>> cuEntries;
133   // Indices into the cuEntries vector.
134   std::vector<size_t> cuIndices;
135   // Indices of personality functions within the GOT.
136   std::vector<Ptr> personalities;
137   SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *>
138       personalityTable;
139   // Indices into cuEntries for CUEs with a non-null LSDA.
140   std::vector<size_t> entriesWithLsda;
141   // Map of cuEntries index to an index within the LSDA array.
142   DenseMap<size_t, uint32_t> lsdaIndex;
143   std::vector<SecondLevelPage> secondLevelPages;
144   uint64_t level2PagesOffset = 0;
145 };
146 
147 UnwindInfoSection::UnwindInfoSection()
148     : SyntheticSection(segment_names::text, section_names::unwindInfo) {
149   align = 4;
150 }
151 
152 void UnwindInfoSection::prepareRelocations() {
153   // This iteration needs to be deterministic, since prepareRelocations may add
154   // entries to the GOT. Hence the use of a MapVector for
155   // UnwindInfoSection::symbols.
156   for (const Defined *d : make_second_range(symbols))
157     if (d->unwindEntry)
158       prepareRelocations(d->unwindEntry);
159 }
160 
161 // Record function symbols that may need entries emitted in __unwind_info, which
162 // stores unwind data for address ranges.
163 //
164 // Note that if several adjacent functions have the same unwind encoding, LSDA,
165 // and personality function, they share one unwind entry. For this to work,
166 // functions without unwind info need explicit "no unwind info" unwind entries
167 // -- else the unwinder would think they have the unwind info of the closest
168 // function with unwind info right before in the image. Thus, we add function
169 // symbols for each unique address regardless of whether they have associated
170 // unwind info.
171 void UnwindInfoSection::addSymbol(const Defined *d) {
172   if (d->unwindEntry)
173     allEntriesAreOmitted = false;
174   // We don't yet know the final output address of this symbol, but we know that
175   // they are uniquely determined by a combination of the isec and value, so
176   // we use that as the key here.
177   auto p = symbols.insert({{d->isec, d->value}, d});
178   // If we have multiple symbols at the same address, only one of them can have
179   // an associated CUE.
180   if (!p.second && d->unwindEntry) {
181     assert(!p.first->second->unwindEntry);
182     p.first->second = d;
183   }
184 }
185 
186 // Compact unwind relocations have different semantics, so we handle them in a
187 // separate code path from regular relocations. First, we do not wish to add
188 // rebase opcodes for __LD,__compact_unwind, because that section doesn't
189 // actually end up in the final binary. Second, personality pointers always
190 // reside in the GOT and must be treated specially.
191 template <class Ptr>
192 void UnwindInfoSectionImpl<Ptr>::prepareRelocations(ConcatInputSection *isec) {
193   assert(!isec->shouldOmitFromOutput() &&
194          "__compact_unwind section should not be omitted");
195 
196   // FIXME: Make this skip relocations for CompactUnwindEntries that
197   // point to dead-stripped functions. That might save some amount of
198   // work. But since there are usually just few personality functions
199   // that are referenced from many places, at least some of them likely
200   // live, it wouldn't reduce number of got entries.
201   for (size_t i = 0; i < isec->relocs.size(); ++i) {
202     Reloc &r = isec->relocs[i];
203     assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED));
204 
205     // Functions and LSDA entries always reside in the same object file as the
206     // compact unwind entries that references them, and thus appear as section
207     // relocs. There is no need to prepare them. We only prepare relocs for
208     // personality functions.
209     if (r.offset % sizeof(CompactUnwindEntry<Ptr>) !=
210         offsetof(CompactUnwindEntry<Ptr>, personality))
211       continue;
212 
213     if (auto *s = r.referent.dyn_cast<Symbol *>()) {
214       // Personality functions are nearly always system-defined (e.g.,
215       // ___gxx_personality_v0 for C++) and relocated as dylib symbols.  When an
216       // application provides its own personality function, it might be
217       // referenced by an extern Defined symbol reloc, or a local section reloc.
218       if (auto *defined = dyn_cast<Defined>(s)) {
219         // XXX(vyng) This is a a special case for handling duplicate personality
220         // symbols. Note that LD64's behavior is a bit different and it is
221         // inconsistent with how symbol resolution usually work
222         //
223         // So we've decided not to follow it. Instead, simply pick the symbol
224         // with the same name from the symbol table to replace the local one.
225         //
226         // (See discussions/alternatives already considered on D107533)
227         if (!defined->isExternal())
228           if (Symbol *sym = symtab->find(defined->getName()))
229             if (!sym->isLazy())
230               r.referent = s = sym;
231       }
232       if (auto *undefined = dyn_cast<Undefined>(s)) {
233         treatUndefinedSymbol(*undefined);
234         // treatUndefinedSymbol() can replace s with a DylibSymbol; re-check.
235         if (isa<Undefined>(s))
236           continue;
237       }
238 
239       if (auto *defined = dyn_cast<Defined>(s)) {
240         // Check if we have created a synthetic symbol at the same address.
241         Symbol *&personality =
242             personalityTable[{defined->isec, defined->value}];
243         if (personality == nullptr) {
244           personality = defined;
245           in.got->addEntry(defined);
246         } else if (personality != defined) {
247           r.referent = personality;
248         }
249         continue;
250       }
251       assert(isa<DylibSymbol>(s));
252       in.got->addEntry(s);
253       continue;
254     }
255 
256     if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) {
257       assert(!isCoalescedWeak(referentIsec));
258       // Personality functions can be referenced via section relocations
259       // if they live in the same object file. Create placeholder synthetic
260       // symbols for them in the GOT.
261       Symbol *&s = personalityTable[{referentIsec, r.addend}];
262       if (s == nullptr) {
263         // This runs after dead stripping, so the noDeadStrip argument does not
264         // matter.
265         s = make<Defined>("<internal>", /*file=*/nullptr, referentIsec,
266                           r.addend, /*size=*/0, /*isWeakDef=*/false,
267                           /*isExternal=*/false, /*isPrivateExtern=*/false,
268                           /*isThumb=*/false, /*isReferencedDynamically=*/false,
269                           /*noDeadStrip=*/false);
270         in.got->addEntry(s);
271       }
272       r.referent = s;
273       r.addend = 0;
274     }
275   }
276 }
277 
278 // Unwind info lives in __DATA, and finalization of __TEXT will occur before
279 // finalization of __DATA. Moreover, the finalization of unwind info depends on
280 // the exact addresses that it references. So it is safe for compact unwind to
281 // reference addresses in __TEXT, but not addresses in any other segment.
282 static ConcatInputSection *checkTextSegment(InputSection *isec) {
283   if (isec->getSegName() != segment_names::text)
284     error("compact unwind references address in " + toString(isec) +
285           " which is not in segment __TEXT");
286   // __text should always be a ConcatInputSection.
287   return cast<ConcatInputSection>(isec);
288 }
289 
290 // We need to apply the relocations to the pre-link compact unwind section
291 // before converting it to post-link form. There should only be absolute
292 // relocations here: since we are not emitting the pre-link CU section, there
293 // is no source address to make a relative location meaningful.
294 template <class Ptr>
295 void UnwindInfoSectionImpl<Ptr>::relocateCompactUnwind(
296     std::vector<CompactUnwindEntry<Ptr>> &cuEntries) {
297   parallelForEachN(0, symbolsVec.size(), [&](size_t i) {
298     uint8_t *buf = reinterpret_cast<uint8_t *>(cuEntries.data()) +
299                    i * sizeof(CompactUnwindEntry<Ptr>);
300     const Defined *d = symbolsVec[i].second;
301     // Write the functionAddress.
302     writeAddress(buf, d->getVA(), sizeof(Ptr) == 8 ? 3 : 2);
303     if (!d->unwindEntry)
304       return;
305 
306     // Write the rest of the CUE.
307     memcpy(buf + sizeof(Ptr), d->unwindEntry->data.data(),
308            d->unwindEntry->data.size());
309     for (const Reloc &r : d->unwindEntry->relocs) {
310       uint64_t referentVA = 0;
311       if (auto *referentSym = r.referent.dyn_cast<Symbol *>()) {
312         if (!isa<Undefined>(referentSym)) {
313           if (auto *defined = dyn_cast<Defined>(referentSym))
314             checkTextSegment(defined->isec);
315           // At this point in the link, we may not yet know the final address of
316           // the GOT, so we just encode the index. We make it a 1-based index so
317           // that we can distinguish the null pointer case.
318           referentVA = referentSym->gotIndex + 1;
319         }
320       } else {
321         auto *referentIsec = r.referent.get<InputSection *>();
322         checkTextSegment(referentIsec);
323         referentVA = referentIsec->getVA(r.addend);
324       }
325       writeAddress(buf + r.offset, referentVA, r.length);
326     }
327   });
328 }
329 
330 // There should only be a handful of unique personality pointers, so we can
331 // encode them as 2-bit indices into a small array.
332 template <class Ptr> void UnwindInfoSectionImpl<Ptr>::encodePersonalities() {
333   for (size_t idx : cuIndices) {
334     CompactUnwindEntry<Ptr> &cu = cuEntries[idx];
335     if (cu.personality == 0)
336       continue;
337     // Linear search is fast enough for a small array.
338     auto it = find(personalities, cu.personality);
339     uint32_t personalityIndex; // 1-based index
340     if (it != personalities.end()) {
341       personalityIndex = std::distance(personalities.begin(), it) + 1;
342     } else {
343       personalities.push_back(cu.personality);
344       personalityIndex = personalities.size();
345     }
346     cu.encoding |=
347         personalityIndex << countTrailingZeros(
348             static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK));
349   }
350   if (personalities.size() > 3)
351     error("too many personalities (" + std::to_string(personalities.size()) +
352           ") for compact unwind to encode");
353 }
354 
355 static bool canFoldEncoding(compact_unwind_encoding_t encoding) {
356   // From compact_unwind_encoding.h:
357   //  UNWIND_X86_64_MODE_STACK_IND:
358   //  A "frameless" (RBP not used as frame pointer) function large constant
359   //  stack size.  This case is like the previous, except the stack size is too
360   //  large to encode in the compact unwind encoding.  Instead it requires that
361   //  the function contains "subq $nnnnnnnn,RSP" in its prolog.  The compact
362   //  encoding contains the offset to the nnnnnnnn value in the function in
363   //  UNWIND_X86_64_FRAMELESS_STACK_SIZE.
364   // Since this means the unwinder has to look at the `subq` in the function
365   // of the unwind info's unwind address, two functions that have identical
366   // unwind info can't be folded if it's using this encoding since both
367   // entries need unique addresses.
368   static_assert(UNWIND_X86_64_MODE_MASK == UNWIND_X86_MODE_MASK, "");
369   static_assert(UNWIND_X86_64_MODE_STACK_IND == UNWIND_X86_MODE_STACK_IND, "");
370   if ((target->cpuType == CPU_TYPE_X86_64 || target->cpuType == CPU_TYPE_X86) &&
371       (encoding & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_STACK_IND) {
372     // FIXME: Consider passing in the two function addresses and getting
373     // their two stack sizes off the `subq` and only returning false if they're
374     // actually different.
375     return false;
376   }
377   return true;
378 }
379 
380 template <class Ptr>
381 Reloc *
382 UnwindInfoSectionImpl<Ptr>::findLsdaReloc(ConcatInputSection *isec) const {
383   if (isec == nullptr)
384     return nullptr;
385   auto it = llvm::find_if(isec->relocs, [](const Reloc &r) {
386     return r.offset % sizeof(CompactUnwindEntry<Ptr>) ==
387            offsetof(CompactUnwindEntry<Ptr>, lsda);
388   });
389   if (it == isec->relocs.end())
390     return nullptr;
391   return &*it;
392 }
393 
394 // Scan the __LD,__compact_unwind entries and compute the space needs of
395 // __TEXT,__unwind_info and __TEXT,__eh_frame.
396 template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() {
397   if (symbols.empty())
398     return;
399 
400   // At this point, the address space for __TEXT,__text has been
401   // assigned, so we can relocate the __LD,__compact_unwind entries
402   // into a temporary buffer. Relocation is necessary in order to sort
403   // the CU entries by function address. Sorting is necessary so that
404   // we can fold adjacent CU entries with identical
405   // encoding+personality+lsda. Folding is necessary because it reduces
406   // the number of CU entries by as much as 3 orders of magnitude!
407   cuEntries.resize(symbols.size());
408   // The "map" part of the symbols MapVector was only needed for deduplication
409   // in addSymbol(). Now that we are done adding, move the contents to a plain
410   // std::vector for indexed access.
411   symbolsVec = symbols.takeVector();
412   relocateCompactUnwind(cuEntries);
413 
414   // Rather than sort & fold the 32-byte entries directly, we create a
415   // vector of indices to entries and sort & fold that instead.
416   cuIndices.resize(cuEntries.size());
417   std::iota(cuIndices.begin(), cuIndices.end(), 0);
418   llvm::sort(cuIndices, [&](size_t a, size_t b) {
419     return cuEntries[a].functionAddress < cuEntries[b].functionAddress;
420   });
421 
422   // Fold adjacent entries with matching encoding+personality+lsda
423   // We use three iterators on the same cuIndices to fold in-situ:
424   // (1) `foldBegin` is the first of a potential sequence of matching entries
425   // (2) `foldEnd` is the first non-matching entry after `foldBegin`.
426   // The semi-open interval [ foldBegin .. foldEnd ) contains a range
427   // entries that can be folded into a single entry and written to ...
428   // (3) `foldWrite`
429   auto foldWrite = cuIndices.begin();
430   for (auto foldBegin = cuIndices.begin(); foldBegin < cuIndices.end();) {
431     auto foldEnd = foldBegin;
432     while (++foldEnd < cuIndices.end() &&
433            cuEntries[*foldBegin].encoding == cuEntries[*foldEnd].encoding &&
434            cuEntries[*foldBegin].personality ==
435                cuEntries[*foldEnd].personality &&
436            canFoldEncoding(cuEntries[*foldEnd].encoding)) {
437       // In most cases, we can just compare the values of cuEntries[*].lsda.
438       // However, it is possible for -rename_section to cause the LSDA section
439       // (__gcc_except_tab) to be finalized after the unwind info section. In
440       // that case, we don't yet have unique addresses for the LSDA entries.
441       // So we check their relocations instead.
442       // FIXME: should we account for an LSDA at an absolute address? ld64 seems
443       // to support it, but it seems unlikely to be used in practice.
444       Reloc *lsda1 = findLsdaReloc(symbolsVec[*foldBegin].second->unwindEntry);
445       Reloc *lsda2 = findLsdaReloc(symbolsVec[*foldEnd].second->unwindEntry);
446       if (lsda1 == nullptr && lsda2 == nullptr)
447         continue;
448       if (lsda1 == nullptr || lsda2 == nullptr)
449         break;
450       if (lsda1->referent != lsda2->referent)
451         break;
452       if (lsda1->addend != lsda2->addend)
453         break;
454     }
455     *foldWrite++ = *foldBegin;
456     foldBegin = foldEnd;
457   }
458   cuIndices.erase(foldWrite, cuIndices.end());
459 
460   encodePersonalities();
461 
462   // Count frequencies of the folded encodings
463   EncodingMap encodingFrequencies;
464   for (size_t idx : cuIndices)
465     encodingFrequencies[cuEntries[idx].encoding]++;
466 
467   // Make a vector of encodings, sorted by descending frequency
468   for (const auto &frequency : encodingFrequencies)
469     commonEncodings.emplace_back(frequency);
470   llvm::sort(commonEncodings,
471              [](const std::pair<compact_unwind_encoding_t, size_t> &a,
472                 const std::pair<compact_unwind_encoding_t, size_t> &b) {
473                if (a.second == b.second)
474                  // When frequencies match, secondarily sort on encoding
475                  // to maintain parity with validate-unwind-info.py
476                  return a.first > b.first;
477                return a.second > b.second;
478              });
479 
480   // Truncate the vector to 127 elements.
481   // Common encoding indexes are limited to 0..126, while encoding
482   // indexes 127..255 are local to each second-level page
483   if (commonEncodings.size() > COMMON_ENCODINGS_MAX)
484     commonEncodings.resize(COMMON_ENCODINGS_MAX);
485 
486   // Create a map from encoding to common-encoding-table index
487   for (size_t i = 0; i < commonEncodings.size(); i++)
488     commonEncodingIndexes[commonEncodings[i].first] = i;
489 
490   // Split folded encodings into pages, where each page is limited by ...
491   // (a) 4 KiB capacity
492   // (b) 24-bit difference between first & final function address
493   // (c) 8-bit compact-encoding-table index,
494   //     for which 0..126 references the global common-encodings table,
495   //     and 127..255 references a local per-second-level-page table.
496   // First we try the compact format and determine how many entries fit.
497   // If more entries fit in the regular format, we use that.
498   for (size_t i = 0; i < cuIndices.size();) {
499     size_t idx = cuIndices[i];
500     secondLevelPages.emplace_back();
501     SecondLevelPage &page = secondLevelPages.back();
502     page.entryIndex = i;
503     uintptr_t functionAddressMax =
504         cuEntries[idx].functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK;
505     size_t n = commonEncodings.size();
506     size_t wordsRemaining =
507         SECOND_LEVEL_PAGE_WORDS -
508         sizeof(unwind_info_compressed_second_level_page_header) /
509             sizeof(uint32_t);
510     while (wordsRemaining >= 1 && i < cuIndices.size()) {
511       idx = cuIndices[i];
512       const CompactUnwindEntry<Ptr> *cuPtr = &cuEntries[idx];
513       if (cuPtr->functionAddress >= functionAddressMax) {
514         break;
515       } else if (commonEncodingIndexes.count(cuPtr->encoding) ||
516                  page.localEncodingIndexes.count(cuPtr->encoding)) {
517         i++;
518         wordsRemaining--;
519       } else if (wordsRemaining >= 2 && n < COMPACT_ENCODINGS_MAX) {
520         page.localEncodings.emplace_back(cuPtr->encoding);
521         page.localEncodingIndexes[cuPtr->encoding] = n++;
522         i++;
523         wordsRemaining -= 2;
524       } else {
525         break;
526       }
527     }
528     page.entryCount = i - page.entryIndex;
529 
530     // If this is not the final page, see if it's possible to fit more
531     // entries by using the regular format. This can happen when there
532     // are many unique encodings, and we we saturated the local
533     // encoding table early.
534     if (i < cuIndices.size() &&
535         page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) {
536       page.kind = UNWIND_SECOND_LEVEL_REGULAR;
537       page.entryCount = std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX,
538                                  cuIndices.size() - page.entryIndex);
539       i = page.entryIndex + page.entryCount;
540     } else {
541       page.kind = UNWIND_SECOND_LEVEL_COMPRESSED;
542     }
543   }
544 
545   for (size_t idx : cuIndices) {
546     lsdaIndex[idx] = entriesWithLsda.size();
547     const Defined *d = symbolsVec[idx].second;
548     if (findLsdaReloc(d->unwindEntry))
549       entriesWithLsda.push_back(idx);
550   }
551 
552   // compute size of __TEXT,__unwind_info section
553   level2PagesOffset = sizeof(unwind_info_section_header) +
554                       commonEncodings.size() * sizeof(uint32_t) +
555                       personalities.size() * sizeof(uint32_t) +
556                       // The extra second-level-page entry is for the sentinel
557                       (secondLevelPages.size() + 1) *
558                           sizeof(unwind_info_section_header_index_entry) +
559                       entriesWithLsda.size() *
560                           sizeof(unwind_info_section_header_lsda_index_entry);
561   unwindInfoSize =
562       level2PagesOffset + secondLevelPages.size() * SECOND_LEVEL_PAGE_BYTES;
563 }
564 
565 // All inputs are relocated and output addresses are known, so write!
566 
567 template <class Ptr>
568 void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const {
569   assert(!cuIndices.empty() && "call only if there is unwind info");
570 
571   // section header
572   auto *uip = reinterpret_cast<unwind_info_section_header *>(buf);
573   uip->version = 1;
574   uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header);
575   uip->commonEncodingsArrayCount = commonEncodings.size();
576   uip->personalityArraySectionOffset =
577       uip->commonEncodingsArraySectionOffset +
578       (uip->commonEncodingsArrayCount * sizeof(uint32_t));
579   uip->personalityArrayCount = personalities.size();
580   uip->indexSectionOffset = uip->personalityArraySectionOffset +
581                             (uip->personalityArrayCount * sizeof(uint32_t));
582   uip->indexCount = secondLevelPages.size() + 1;
583 
584   // Common encodings
585   auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]);
586   for (const auto &encoding : commonEncodings)
587     *i32p++ = encoding.first;
588 
589   // Personalities
590   for (Ptr personality : personalities)
591     *i32p++ =
592         in.got->addr + (personality - 1) * target->wordSize - in.header->addr;
593 
594   // Level-1 index
595   uint32_t lsdaOffset =
596       uip->indexSectionOffset +
597       uip->indexCount * sizeof(unwind_info_section_header_index_entry);
598   uint64_t l2PagesOffset = level2PagesOffset;
599   auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p);
600   for (const SecondLevelPage &page : secondLevelPages) {
601     size_t idx = cuIndices[page.entryIndex];
602     iep->functionOffset = cuEntries[idx].functionAddress - in.header->addr;
603     iep->secondLevelPagesSectionOffset = l2PagesOffset;
604     iep->lsdaIndexArraySectionOffset =
605         lsdaOffset + lsdaIndex.lookup(idx) *
606                          sizeof(unwind_info_section_header_lsda_index_entry);
607     iep++;
608     l2PagesOffset += SECOND_LEVEL_PAGE_BYTES;
609   }
610   // Level-1 sentinel
611   const CompactUnwindEntry<Ptr> &cuEnd = cuEntries[cuIndices.back()];
612   iep->functionOffset =
613       cuEnd.functionAddress - in.header->addr + cuEnd.functionLength;
614   iep->secondLevelPagesSectionOffset = 0;
615   iep->lsdaIndexArraySectionOffset =
616       lsdaOffset + entriesWithLsda.size() *
617                        sizeof(unwind_info_section_header_lsda_index_entry);
618   iep++;
619 
620   // LSDAs
621   auto *lep =
622       reinterpret_cast<unwind_info_section_header_lsda_index_entry *>(iep);
623   for (size_t idx : entriesWithLsda) {
624     const CompactUnwindEntry<Ptr> &cu = cuEntries[idx];
625     const Defined *d = symbolsVec[idx].second;
626     if (Reloc *r = findLsdaReloc(d->unwindEntry)) {
627       uint64_t va;
628       if (auto *isec = r->referent.dyn_cast<InputSection *>()) {
629         va = isec->getVA(r->addend);
630       } else {
631         auto *sym = r->referent.get<Symbol *>();
632         va = sym->getVA() + r->addend;
633       }
634       lep->lsdaOffset = va - in.header->addr;
635     }
636     lep->functionOffset = cu.functionAddress - in.header->addr;
637     lep++;
638   }
639 
640   // Level-2 pages
641   auto *pp = reinterpret_cast<uint32_t *>(lep);
642   for (const SecondLevelPage &page : secondLevelPages) {
643     if (page.kind == UNWIND_SECOND_LEVEL_COMPRESSED) {
644       uintptr_t functionAddressBase =
645           cuEntries[cuIndices[page.entryIndex]].functionAddress;
646       auto *p2p =
647           reinterpret_cast<unwind_info_compressed_second_level_page_header *>(
648               pp);
649       p2p->kind = page.kind;
650       p2p->entryPageOffset =
651           sizeof(unwind_info_compressed_second_level_page_header);
652       p2p->entryCount = page.entryCount;
653       p2p->encodingsPageOffset =
654           p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t);
655       p2p->encodingsCount = page.localEncodings.size();
656       auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
657       for (size_t i = 0; i < page.entryCount; i++) {
658         const CompactUnwindEntry<Ptr> &cue =
659             cuEntries[cuIndices[page.entryIndex + i]];
660         auto it = commonEncodingIndexes.find(cue.encoding);
661         if (it == commonEncodingIndexes.end())
662           it = page.localEncodingIndexes.find(cue.encoding);
663         *ep++ = (it->second << COMPRESSED_ENTRY_FUNC_OFFSET_BITS) |
664                 (cue.functionAddress - functionAddressBase);
665       }
666       if (!page.localEncodings.empty())
667         memcpy(ep, page.localEncodings.data(),
668                page.localEncodings.size() * sizeof(uint32_t));
669     } else {
670       auto *p2p =
671           reinterpret_cast<unwind_info_regular_second_level_page_header *>(pp);
672       p2p->kind = page.kind;
673       p2p->entryPageOffset =
674           sizeof(unwind_info_regular_second_level_page_header);
675       p2p->entryCount = page.entryCount;
676       auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
677       for (size_t i = 0; i < page.entryCount; i++) {
678         const CompactUnwindEntry<Ptr> &cue =
679             cuEntries[cuIndices[page.entryIndex + i]];
680         *ep++ = cue.functionAddress;
681         *ep++ = cue.encoding;
682       }
683     }
684     pp += SECOND_LEVEL_PAGE_WORDS;
685   }
686 }
687 
688 UnwindInfoSection *macho::makeUnwindInfoSection() {
689   if (target->wordSize == 8)
690     return make<UnwindInfoSectionImpl<uint64_t>>();
691   else
692     return make<UnwindInfoSectionImpl<uint32_t>>();
693 }
694