xref: /freebsd/contrib/llvm-project/lld/ELF/OutputSections.cpp (revision d39bd2c1388b520fcba9abed1932acacead60fba)
1 //===- OutputSections.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "OutputSections.h"
10 #include "Config.h"
11 #include "InputFiles.h"
12 #include "LinkerScript.h"
13 #include "Symbols.h"
14 #include "SyntheticSections.h"
15 #include "Target.h"
16 #include "lld/Common/Arrays.h"
17 #include "lld/Common/Memory.h"
18 #include "llvm/BinaryFormat/Dwarf.h"
19 #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/Parallel.h"
22 #include "llvm/Support/Path.h"
23 #include "llvm/Support/TimeProfiler.h"
24 #if LLVM_ENABLE_ZLIB
25 #include <zlib.h>
26 #endif
27 #if LLVM_ENABLE_ZSTD
28 #include <zstd.h>
29 #endif
30 
31 using namespace llvm;
32 using namespace llvm::dwarf;
33 using namespace llvm::object;
34 using namespace llvm::support::endian;
35 using namespace llvm::ELF;
36 using namespace lld;
37 using namespace lld::elf;
38 
39 uint8_t *Out::bufferStart;
40 PhdrEntry *Out::tlsPhdr;
41 OutputSection *Out::elfHeader;
42 OutputSection *Out::programHeaders;
43 OutputSection *Out::preinitArray;
44 OutputSection *Out::initArray;
45 OutputSection *Out::finiArray;
46 
47 SmallVector<OutputSection *, 0> elf::outputSections;
48 
49 uint32_t OutputSection::getPhdrFlags() const {
50   uint32_t ret = 0;
51   if (config->emachine != EM_ARM || !(flags & SHF_ARM_PURECODE))
52     ret |= PF_R;
53   if (flags & SHF_WRITE)
54     ret |= PF_W;
55   if (flags & SHF_EXECINSTR)
56     ret |= PF_X;
57   return ret;
58 }
59 
60 template <class ELFT>
61 void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
62   shdr->sh_entsize = entsize;
63   shdr->sh_addralign = addralign;
64   shdr->sh_type = type;
65   shdr->sh_offset = offset;
66   shdr->sh_flags = flags;
67   shdr->sh_info = info;
68   shdr->sh_link = link;
69   shdr->sh_addr = addr;
70   shdr->sh_size = size;
71   shdr->sh_name = shName;
72 }
73 
74 OutputSection::OutputSection(StringRef name, uint32_t type, uint64_t flags)
75     : SectionBase(Output, name, flags, /*Entsize*/ 0, /*Alignment*/ 1, type,
76                   /*Info*/ 0, /*Link*/ 0) {}
77 
78 // We allow sections of types listed below to merged into a
79 // single progbits section. This is typically done by linker
80 // scripts. Merging nobits and progbits will force disk space
81 // to be allocated for nobits sections. Other ones don't require
82 // any special treatment on top of progbits, so there doesn't
83 // seem to be a harm in merging them.
84 //
85 // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
86 // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
87 static bool canMergeToProgbits(unsigned type) {
88   return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
89          type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
90          type == SHT_NOTE ||
91          (type == SHT_X86_64_UNWIND && config->emachine == EM_X86_64);
92 }
93 
94 // Record that isec will be placed in the OutputSection. isec does not become
95 // permanent until finalizeInputSections() is called. The function should not be
96 // used after finalizeInputSections() is called. If you need to add an
97 // InputSection post finalizeInputSections(), then you must do the following:
98 //
99 // 1. Find or create an InputSectionDescription to hold InputSection.
100 // 2. Add the InputSection to the InputSectionDescription::sections.
101 // 3. Call commitSection(isec).
102 void OutputSection::recordSection(InputSectionBase *isec) {
103   partition = isec->partition;
104   isec->parent = this;
105   if (commands.empty() || !isa<InputSectionDescription>(commands.back()))
106     commands.push_back(make<InputSectionDescription>(""));
107   auto *isd = cast<InputSectionDescription>(commands.back());
108   isd->sectionBases.push_back(isec);
109 }
110 
111 // Update fields (type, flags, alignment, etc) according to the InputSection
112 // isec. Also check whether the InputSection flags and type are consistent with
113 // other InputSections.
114 void OutputSection::commitSection(InputSection *isec) {
115   if (LLVM_UNLIKELY(type != isec->type)) {
116     if (hasInputSections || typeIsSet) {
117       if (typeIsSet || !canMergeToProgbits(type) ||
118           !canMergeToProgbits(isec->type)) {
119         // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
120         // that the contents at that address is provided by some other means.
121         // Some projects (e.g.
122         // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
123         // behavior. Other types get an error.
124         if (type != SHT_NOBITS) {
125           errorOrWarn("section type mismatch for " + isec->name + "\n>>> " +
126                       toString(isec) + ": " +
127                       getELFSectionTypeName(config->emachine, isec->type) +
128                       "\n>>> output section " + name + ": " +
129                       getELFSectionTypeName(config->emachine, type));
130         }
131       }
132       if (!typeIsSet)
133         type = SHT_PROGBITS;
134     } else {
135       type = isec->type;
136     }
137   }
138   if (!hasInputSections) {
139     // If IS is the first section to be added to this section,
140     // initialize type, entsize and flags from isec.
141     hasInputSections = true;
142     entsize = isec->entsize;
143     flags = isec->flags;
144   } else {
145     // Otherwise, check if new type or flags are compatible with existing ones.
146     if ((flags ^ isec->flags) & SHF_TLS)
147       error("incompatible section flags for " + name + "\n>>> " +
148             toString(isec) + ": 0x" + utohexstr(isec->flags) +
149             "\n>>> output section " + name + ": 0x" + utohexstr(flags));
150   }
151 
152   isec->parent = this;
153   uint64_t andMask =
154       config->emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0;
155   uint64_t orMask = ~andMask;
156   uint64_t andFlags = (flags & isec->flags) & andMask;
157   uint64_t orFlags = (flags | isec->flags) & orMask;
158   flags = andFlags | orFlags;
159   if (nonAlloc)
160     flags &= ~(uint64_t)SHF_ALLOC;
161 
162   addralign = std::max(addralign, isec->addralign);
163 
164   // If this section contains a table of fixed-size entries, sh_entsize
165   // holds the element size. If it contains elements of different size we
166   // set sh_entsize to 0.
167   if (entsize != isec->entsize)
168     entsize = 0;
169 }
170 
171 static MergeSyntheticSection *createMergeSynthetic(StringRef name,
172                                                    uint32_t type,
173                                                    uint64_t flags,
174                                                    uint32_t addralign) {
175   if ((flags & SHF_STRINGS) && config->optimize >= 2)
176     return make<MergeTailSection>(name, type, flags, addralign);
177   return make<MergeNoTailSection>(name, type, flags, addralign);
178 }
179 
180 // This function scans over the InputSectionBase list sectionBases to create
181 // InputSectionDescription::sections.
182 //
183 // It removes MergeInputSections from the input section array and adds
184 // new synthetic sections at the location of the first input section
185 // that it replaces. It then finalizes each synthetic section in order
186 // to compute an output offset for each piece of each input section.
187 void OutputSection::finalizeInputSections() {
188   std::vector<MergeSyntheticSection *> mergeSections;
189   for (SectionCommand *cmd : commands) {
190     auto *isd = dyn_cast<InputSectionDescription>(cmd);
191     if (!isd)
192       continue;
193     isd->sections.reserve(isd->sectionBases.size());
194     for (InputSectionBase *s : isd->sectionBases) {
195       MergeInputSection *ms = dyn_cast<MergeInputSection>(s);
196       if (!ms) {
197         isd->sections.push_back(cast<InputSection>(s));
198         continue;
199       }
200 
201       // We do not want to handle sections that are not alive, so just remove
202       // them instead of trying to merge.
203       if (!ms->isLive())
204         continue;
205 
206       auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) {
207         // While we could create a single synthetic section for two different
208         // values of Entsize, it is better to take Entsize into consideration.
209         //
210         // With a single synthetic section no two pieces with different Entsize
211         // could be equal, so we may as well have two sections.
212         //
213         // Using Entsize in here also allows us to propagate it to the synthetic
214         // section.
215         //
216         // SHF_STRINGS section with different alignments should not be merged.
217         return sec->flags == ms->flags && sec->entsize == ms->entsize &&
218                (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
219       });
220       if (i == mergeSections.end()) {
221         MergeSyntheticSection *syn =
222             createMergeSynthetic(s->name, ms->type, ms->flags, ms->addralign);
223         mergeSections.push_back(syn);
224         i = std::prev(mergeSections.end());
225         syn->entsize = ms->entsize;
226         isd->sections.push_back(syn);
227       }
228       (*i)->addSection(ms);
229     }
230 
231     // sectionBases should not be used from this point onwards. Clear it to
232     // catch misuses.
233     isd->sectionBases.clear();
234 
235     // Some input sections may be removed from the list after ICF.
236     for (InputSection *s : isd->sections)
237       commitSection(s);
238   }
239   for (auto *ms : mergeSections)
240     ms->finalizeContents();
241 }
242 
243 static void sortByOrder(MutableArrayRef<InputSection *> in,
244                         llvm::function_ref<int(InputSectionBase *s)> order) {
245   std::vector<std::pair<int, InputSection *>> v;
246   for (InputSection *s : in)
247     v.emplace_back(order(s), s);
248   llvm::stable_sort(v, less_first());
249 
250   for (size_t i = 0; i < v.size(); ++i)
251     in[i] = v[i].second;
252 }
253 
254 uint64_t elf::getHeaderSize() {
255   if (config->oFormatBinary)
256     return 0;
257   return Out::elfHeader->size + Out::programHeaders->size;
258 }
259 
260 void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
261   assert(isLive());
262   for (SectionCommand *b : commands)
263     if (auto *isd = dyn_cast<InputSectionDescription>(b))
264       sortByOrder(isd->sections, order);
265 }
266 
267 static void nopInstrFill(uint8_t *buf, size_t size) {
268   if (size == 0)
269     return;
270   unsigned i = 0;
271   if (size == 0)
272     return;
273   std::vector<std::vector<uint8_t>> nopFiller = *target->nopInstrs;
274   unsigned num = size / nopFiller.back().size();
275   for (unsigned c = 0; c < num; ++c) {
276     memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
277     i += nopFiller.back().size();
278   }
279   unsigned remaining = size - i;
280   if (!remaining)
281     return;
282   assert(nopFiller[remaining - 1].size() == remaining);
283   memcpy(buf + i, nopFiller[remaining - 1].data(), remaining);
284 }
285 
286 // Fill [Buf, Buf + Size) with Filler.
287 // This is used for linker script "=fillexp" command.
288 static void fill(uint8_t *buf, size_t size,
289                  const std::array<uint8_t, 4> &filler) {
290   size_t i = 0;
291   for (; i + 4 < size; i += 4)
292     memcpy(buf + i, filler.data(), 4);
293   memcpy(buf + i, filler.data(), size - i);
294 }
295 
296 #if LLVM_ENABLE_ZLIB
297 static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level,
298                                             int flush) {
299   // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
300   // data with no zlib header or trailer.
301   z_stream s = {};
302   deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
303   s.next_in = const_cast<uint8_t *>(in.data());
304   s.avail_in = in.size();
305 
306   // Allocate a buffer of half of the input size, and grow it by 1.5x if
307   // insufficient.
308   SmallVector<uint8_t, 0> out;
309   size_t pos = 0;
310   out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
311   do {
312     if (pos == out.size())
313       out.resize_for_overwrite(out.size() * 3 / 2);
314     s.next_out = out.data() + pos;
315     s.avail_out = out.size() - pos;
316     (void)deflate(&s, flush);
317     pos = s.next_out - out.data();
318   } while (s.avail_out == 0);
319   assert(s.avail_in == 0);
320 
321   out.truncate(pos);
322   deflateEnd(&s);
323   return out;
324 }
325 #endif
326 
327 // Compress section contents if this section contains debug info.
328 template <class ELFT> void OutputSection::maybeCompress() {
329   using Elf_Chdr = typename ELFT::Chdr;
330   (void)sizeof(Elf_Chdr);
331 
332   // Compress only DWARF debug sections.
333   if (config->compressDebugSections == DebugCompressionType::None ||
334       (flags & SHF_ALLOC) || !name.starts_with(".debug_") || size == 0)
335     return;
336 
337   llvm::TimeTraceScope timeScope("Compress debug sections");
338   compressed.uncompressedSize = size;
339   auto buf = std::make_unique<uint8_t[]>(size);
340   // Write uncompressed data to a temporary zero-initialized buffer.
341   {
342     parallel::TaskGroup tg;
343     writeTo<ELFT>(buf.get(), tg);
344   }
345 
346 #if LLVM_ENABLE_ZSTD
347   // Use ZSTD's streaming compression API which permits parallel workers working
348   // on the stream. See http://facebook.github.io/zstd/zstd_manual.html
349   // "Streaming compression - HowTo".
350   if (config->compressDebugSections == DebugCompressionType::Zstd) {
351     // Allocate a buffer of half of the input size, and grow it by 1.5x if
352     // insufficient.
353     compressed.shards = std::make_unique<SmallVector<uint8_t, 0>[]>(1);
354     SmallVector<uint8_t, 0> &out = compressed.shards[0];
355     out.resize_for_overwrite(std::max<size_t>(size / 2, 32));
356     size_t pos = 0;
357 
358     ZSTD_CCtx *cctx = ZSTD_createCCtx();
359     // Ignore error if zstd was not built with ZSTD_MULTITHREAD.
360     (void)ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers,
361                                  parallel::strategy.compute_thread_count());
362     ZSTD_outBuffer zob = {out.data(), out.size(), 0};
363     ZSTD_EndDirective directive = ZSTD_e_continue;
364     const size_t blockSize = ZSTD_CStreamInSize();
365     do {
366       const size_t n = std::min(static_cast<size_t>(size - pos), blockSize);
367       if (n == size - pos)
368         directive = ZSTD_e_end;
369       ZSTD_inBuffer zib = {buf.get() + pos, n, 0};
370       size_t bytesRemaining = 0;
371       while (zib.pos != zib.size ||
372              (directive == ZSTD_e_end && bytesRemaining != 0)) {
373         if (zob.pos == zob.size) {
374           out.resize_for_overwrite(out.size() * 3 / 2);
375           zob.dst = out.data();
376           zob.size = out.size();
377         }
378         bytesRemaining = ZSTD_compressStream2(cctx, &zob, &zib, directive);
379         assert(!ZSTD_isError(bytesRemaining));
380       }
381       pos += n;
382     } while (directive != ZSTD_e_end);
383     out.resize(zob.pos);
384     ZSTD_freeCCtx(cctx);
385 
386     size = sizeof(Elf_Chdr) + out.size();
387     flags |= SHF_COMPRESSED;
388     return;
389   }
390 #endif
391 
392 #if LLVM_ENABLE_ZLIB
393   // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
394   // the fastest. If -O2 is given, we use level 6 to compress debug info more by
395   // ~15%. We found that level 7 to 9 doesn't make much difference (~1% more
396   // compression) while they take significant amount of time (~2x), so level 6
397   // seems enough.
398   const int level = config->optimize >= 2 ? 6 : Z_BEST_SPEED;
399 
400   // Split input into 1-MiB shards.
401   constexpr size_t shardSize = 1 << 20;
402   auto shardsIn = split(ArrayRef<uint8_t>(buf.get(), size), shardSize);
403   const size_t numShards = shardsIn.size();
404 
405   // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
406   // shards but the last to flush the output to a byte boundary to be
407   // concatenated with the next shard.
408   auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
409   auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
410   parallelFor(0, numShards, [&](size_t i) {
411     shardsOut[i] = deflateShard(shardsIn[i], level,
412                                 i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
413     shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
414   });
415 
416   // Update section size and combine Alder-32 checksums.
417   uint32_t checksum = 1;       // Initial Adler-32 value
418   size = sizeof(Elf_Chdr) + 2; // Elf_Chdir and zlib header
419   for (size_t i = 0; i != numShards; ++i) {
420     size += shardsOut[i].size();
421     checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
422   }
423   size += 4; // checksum
424 
425   compressed.shards = std::move(shardsOut);
426   compressed.numShards = numShards;
427   compressed.checksum = checksum;
428   flags |= SHF_COMPRESSED;
429 #endif
430 }
431 
432 static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) {
433   if (size == 1)
434     *buf = data;
435   else if (size == 2)
436     write16(buf, data);
437   else if (size == 4)
438     write32(buf, data);
439   else if (size == 8)
440     write64(buf, data);
441   else
442     llvm_unreachable("unsupported Size argument");
443 }
444 
445 template <class ELFT>
446 void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) {
447   llvm::TimeTraceScope timeScope("Write sections", name);
448   if (type == SHT_NOBITS)
449     return;
450 
451   // If --compress-debug-section is specified and if this is a debug section,
452   // we've already compressed section contents. If that's the case,
453   // just write it down.
454   if (compressed.shards) {
455     auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
456     chdr->ch_size = compressed.uncompressedSize;
457     chdr->ch_addralign = addralign;
458     buf += sizeof(*chdr);
459     if (config->compressDebugSections == DebugCompressionType::Zstd) {
460       chdr->ch_type = ELFCOMPRESS_ZSTD;
461       memcpy(buf, compressed.shards[0].data(), compressed.shards[0].size());
462       return;
463     }
464     chdr->ch_type = ELFCOMPRESS_ZLIB;
465 
466     // Compute shard offsets.
467     auto offsets = std::make_unique<size_t[]>(compressed.numShards);
468     offsets[0] = 2; // zlib header
469     for (size_t i = 1; i != compressed.numShards; ++i)
470       offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
471 
472     buf[0] = 0x78; // CMF
473     buf[1] = 0x01; // FLG: best speed
474     parallelFor(0, compressed.numShards, [&](size_t i) {
475       memcpy(buf + offsets[i], compressed.shards[i].data(),
476              compressed.shards[i].size());
477     });
478 
479     write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
480     return;
481   }
482 
483   // Write leading padding.
484   ArrayRef<InputSection *> sections = getInputSections(*this, storage);
485   std::array<uint8_t, 4> filler = getFiller();
486   bool nonZeroFiller = read32(filler.data()) != 0;
487   if (nonZeroFiller)
488     fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler);
489 
490   auto fn = [=](size_t begin, size_t end) {
491     size_t numSections = sections.size();
492     for (size_t i = begin; i != end; ++i) {
493       InputSection *isec = sections[i];
494       if (auto *s = dyn_cast<SyntheticSection>(isec))
495         s->writeTo(buf + isec->outSecOff);
496       else
497         isec->writeTo<ELFT>(buf + isec->outSecOff);
498 
499       // When in Arm BE8 mode, the linker has to convert the big-endian
500       // instructions to little-endian, leaving the data big-endian.
501       if (config->emachine == EM_ARM && !config->isLE && config->armBe8 &&
502           (flags & SHF_EXECINSTR))
503         convertArmInstructionstoBE8(isec, buf + isec->outSecOff);
504 
505       // Fill gaps between sections.
506       if (nonZeroFiller) {
507         uint8_t *start = buf + isec->outSecOff + isec->getSize();
508         uint8_t *end;
509         if (i + 1 == numSections)
510           end = buf + size;
511         else
512           end = buf + sections[i + 1]->outSecOff;
513         if (isec->nopFiller) {
514           assert(target->nopInstrs);
515           nopInstrFill(start, end - start);
516         } else
517           fill(start, end - start, filler);
518       }
519     }
520   };
521 
522   // If there is any BYTE()-family command (rare), write the section content
523   // first then process BYTE to overwrite the filler content. The write is
524   // serial due to the limitation of llvm/Support/Parallel.h.
525   bool written = false;
526   size_t numSections = sections.size();
527   for (SectionCommand *cmd : commands)
528     if (auto *data = dyn_cast<ByteCommand>(cmd)) {
529       if (!std::exchange(written, true))
530         fn(0, numSections);
531       writeInt(buf + data->offset, data->expression().getValue(), data->size);
532     }
533   if (written || !numSections)
534     return;
535 
536   // There is no data command. Write content asynchronously to overlap the write
537   // time with other output sections. Note, if a linker script specifies
538   // overlapping output sections (needs --noinhibit-exec or --no-check-sections
539   // to supress the error), the output may be non-deterministic.
540   const size_t taskSizeLimit = 4 << 20;
541   for (size_t begin = 0, i = 0, taskSize = 0;;) {
542     taskSize += sections[i]->getSize();
543     bool done = ++i == numSections;
544     if (done || taskSize >= taskSizeLimit) {
545       tg.spawn([=] { fn(begin, i); });
546       if (done)
547         break;
548       begin = i;
549       taskSize = 0;
550     }
551   }
552 }
553 
554 static void finalizeShtGroup(OutputSection *os, InputSection *section) {
555   // sh_link field for SHT_GROUP sections should contain the section index of
556   // the symbol table.
557   os->link = in.symTab->getParent()->sectionIndex;
558 
559   if (!section)
560     return;
561 
562   // sh_info then contain index of an entry in symbol table section which
563   // provides signature of the section group.
564   ArrayRef<Symbol *> symbols = section->file->getSymbols();
565   os->info = in.symTab->getSymbolIndex(symbols[section->info]);
566 
567   // Some group members may be combined or discarded, so we need to compute the
568   // new size. The content will be rewritten in InputSection::copyShtGroup.
569   DenseSet<uint32_t> seen;
570   ArrayRef<InputSectionBase *> sections = section->file->getSections();
571   for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
572     if (OutputSection *osec = sections[read32(&idx)]->getOutputSection())
573       seen.insert(osec->sectionIndex);
574   os->size = (1 + seen.size()) * sizeof(uint32_t);
575 }
576 
577 void OutputSection::finalize() {
578   InputSection *first = getFirstInputSection(this);
579 
580   if (flags & SHF_LINK_ORDER) {
581     // We must preserve the link order dependency of sections with the
582     // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
583     // need to translate the InputSection sh_link to the OutputSection sh_link,
584     // all InputSections in the OutputSection have the same dependency.
585     if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
586       link = ex->getLinkOrderDep()->getParent()->sectionIndex;
587     else if (first->flags & SHF_LINK_ORDER)
588       if (auto *d = first->getLinkOrderDep())
589         link = d->getParent()->sectionIndex;
590   }
591 
592   if (type == SHT_GROUP) {
593     finalizeShtGroup(this, first);
594     return;
595   }
596 
597   if (!config->copyRelocs || (type != SHT_RELA && type != SHT_REL))
598     return;
599 
600   // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
601   // Normally 'type' was changed by 'first' so 'first' should be non-null.
602   // However, if the output section is .rela.dyn, 'type' can be set by the empty
603   // synthetic .rela.plt and first can be null.
604   if (!first || isa<SyntheticSection>(first))
605     return;
606 
607   link = in.symTab->getParent()->sectionIndex;
608   // sh_info for SHT_REL[A] sections should contain the section header index of
609   // the section to which the relocation applies.
610   InputSectionBase *s = first->getRelocatedSection();
611   info = s->getOutputSection()->sectionIndex;
612   flags |= SHF_INFO_LINK;
613 }
614 
615 // Returns true if S is in one of the many forms the compiler driver may pass
616 // crtbegin files.
617 //
618 // Gcc uses any of crtbegin[<empty>|S|T].o.
619 // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
620 
621 static bool isCrt(StringRef s, StringRef beginEnd) {
622   s = sys::path::filename(s);
623   if (!s.consume_back(".o"))
624     return false;
625   if (s.consume_front("clang_rt."))
626     return s.consume_front(beginEnd);
627   return s.consume_front(beginEnd) && s.size() <= 1;
628 }
629 
630 // .ctors and .dtors are sorted by this order:
631 //
632 // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
633 // 2. The section is named ".ctors" or ".dtors" (priority: 65536).
634 // 3. The section has an optional priority value in the form of ".ctors.N" or
635 //    ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
636 // 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
637 //
638 // For 2 and 3, the sections are sorted by priority from high to low, e.g.
639 // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336).  In GNU ld's
640 // internal linker scripts, the sorting is by string comparison which can
641 // achieve the same goal given the optional priority values are of the same
642 // length.
643 //
644 // In an ideal world, we don't need this function because .init_array and
645 // .ctors are duplicate features (and .init_array is newer.) However, there
646 // are too many real-world use cases of .ctors, so we had no choice to
647 // support that with this rather ad-hoc semantics.
648 static bool compCtors(const InputSection *a, const InputSection *b) {
649   bool beginA = isCrt(a->file->getName(), "crtbegin");
650   bool beginB = isCrt(b->file->getName(), "crtbegin");
651   if (beginA != beginB)
652     return beginA;
653   bool endA = isCrt(a->file->getName(), "crtend");
654   bool endB = isCrt(b->file->getName(), "crtend");
655   if (endA != endB)
656     return endB;
657   return getPriority(a->name) > getPriority(b->name);
658 }
659 
660 // Sorts input sections by the special rules for .ctors and .dtors.
661 // Unfortunately, the rules are different from the one for .{init,fini}_array.
662 // Read the comment above.
663 void OutputSection::sortCtorsDtors() {
664   assert(commands.size() == 1);
665   auto *isd = cast<InputSectionDescription>(commands[0]);
666   llvm::stable_sort(isd->sections, compCtors);
667 }
668 
669 // If an input string is in the form of "foo.N" where N is a number, return N
670 // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
671 // greater than the lowest priority.
672 int elf::getPriority(StringRef s) {
673   size_t pos = s.rfind('.');
674   if (pos == StringRef::npos)
675     return 65536;
676   int v = 65536;
677   if (to_integer(s.substr(pos + 1), v, 10) &&
678       (pos == 6 && (s.starts_with(".ctors") || s.starts_with(".dtors"))))
679     v = 65535 - v;
680   return v;
681 }
682 
683 InputSection *elf::getFirstInputSection(const OutputSection *os) {
684   for (SectionCommand *cmd : os->commands)
685     if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
686       if (!isd->sections.empty())
687         return isd->sections[0];
688   return nullptr;
689 }
690 
691 ArrayRef<InputSection *>
692 elf::getInputSections(const OutputSection &os,
693                       SmallVector<InputSection *, 0> &storage) {
694   ArrayRef<InputSection *> ret;
695   storage.clear();
696   for (SectionCommand *cmd : os.commands) {
697     auto *isd = dyn_cast<InputSectionDescription>(cmd);
698     if (!isd)
699       continue;
700     if (ret.empty()) {
701       ret = isd->sections;
702     } else {
703       if (storage.empty())
704         storage.assign(ret.begin(), ret.end());
705       storage.insert(storage.end(), isd->sections.begin(), isd->sections.end());
706     }
707   }
708   return storage.empty() ? ret : ArrayRef(storage);
709 }
710 
711 // Sorts input sections by section name suffixes, so that .foo.N comes
712 // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
713 // We want to keep the original order if the priorities are the same
714 // because the compiler keeps the original initialization order in a
715 // translation unit and we need to respect that.
716 // For more detail, read the section of the GCC's manual about init_priority.
717 void OutputSection::sortInitFini() {
718   // Sort sections by priority.
719   sort([](InputSectionBase *s) { return getPriority(s->name); });
720 }
721 
722 std::array<uint8_t, 4> OutputSection::getFiller() {
723   if (filler)
724     return *filler;
725   if (flags & SHF_EXECINSTR)
726     return target->trapInstr;
727   return {0, 0, 0, 0};
728 }
729 
730 void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
731   assert(config->writeAddends && config->checkDynamicRelocs);
732   assert(type == SHT_REL || type == SHT_RELA);
733   SmallVector<InputSection *, 0> storage;
734   ArrayRef<InputSection *> sections = getInputSections(*this, storage);
735   parallelFor(0, sections.size(), [&](size_t i) {
736     // When linking with -r or --emit-relocs we might also call this function
737     // for input .rel[a].<sec> sections which we simply pass through to the
738     // output. We skip over those and only look at the synthetic relocation
739     // sections created during linking.
740     const auto *sec = dyn_cast<RelocationBaseSection>(sections[i]);
741     if (!sec)
742       return;
743     for (const DynamicReloc &rel : sec->relocs) {
744       int64_t addend = rel.addend;
745       const OutputSection *relOsec = rel.inputSec->getOutputSection();
746       assert(relOsec != nullptr && "missing output section for relocation");
747       const uint8_t *relocTarget =
748           bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec);
749       // For SHT_NOBITS the written addend is always zero.
750       int64_t writtenAddend =
751           relOsec->type == SHT_NOBITS
752               ? 0
753               : target->getImplicitAddend(relocTarget, rel.type);
754       if (addend != writtenAddend)
755         internalLinkerError(
756             getErrorLocation(relocTarget),
757             "wrote incorrect addend value 0x" + utohexstr(writtenAddend) +
758                 " instead of 0x" + utohexstr(addend) +
759                 " for dynamic relocation " + toString(rel.type) +
760                 " at offset 0x" + utohexstr(rel.getOffset()) +
761                 (rel.sym ? " against symbol " + toString(*rel.sym) : ""));
762     }
763   });
764 }
765 
766 template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
767 template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
768 template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
769 template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
770 
771 template void OutputSection::writeTo<ELF32LE>(uint8_t *,
772                                               llvm::parallel::TaskGroup &);
773 template void OutputSection::writeTo<ELF32BE>(uint8_t *,
774                                               llvm::parallel::TaskGroup &);
775 template void OutputSection::writeTo<ELF64LE>(uint8_t *,
776                                               llvm::parallel::TaskGroup &);
777 template void OutputSection::writeTo<ELF64BE>(uint8_t *,
778                                               llvm::parallel::TaskGroup &);
779 
780 template void OutputSection::maybeCompress<ELF32LE>();
781 template void OutputSection::maybeCompress<ELF32BE>();
782 template void OutputSection::maybeCompress<ELF64LE>();
783 template void OutputSection::maybeCompress<ELF64BE>();
784