xref: /freebsd/contrib/llvm-project/lld/ELF/OutputSections.cpp (revision 52418fc2be8efa5172b90a3a9e617017173612c4)
1  //===- OutputSections.cpp -------------------------------------------------===//
2  //
3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4  // See https://llvm.org/LICENSE.txt for license information.
5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6  //
7  //===----------------------------------------------------------------------===//
8  
9  #include "OutputSections.h"
10  #include "Config.h"
11  #include "InputFiles.h"
12  #include "LinkerScript.h"
13  #include "Symbols.h"
14  #include "SyntheticSections.h"
15  #include "Target.h"
16  #include "lld/Common/Arrays.h"
17  #include "lld/Common/Memory.h"
18  #include "llvm/BinaryFormat/Dwarf.h"
19  #include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB
20  #include "llvm/Support/Compression.h"
21  #include "llvm/Support/LEB128.h"
22  #include "llvm/Support/Parallel.h"
23  #include "llvm/Support/Path.h"
24  #include "llvm/Support/TimeProfiler.h"
25  #if LLVM_ENABLE_ZLIB
26  // Avoid introducing max as a macro from Windows headers.
27  #define NOMINMAX
28  #include <zlib.h>
29  #endif
30  #if LLVM_ENABLE_ZSTD
31  #include <zstd.h>
32  #endif
33  
34  using namespace llvm;
35  using namespace llvm::dwarf;
36  using namespace llvm::object;
37  using namespace llvm::support::endian;
38  using namespace llvm::ELF;
39  using namespace lld;
40  using namespace lld::elf;
41  
42  uint8_t *Out::bufferStart;
43  PhdrEntry *Out::tlsPhdr;
44  OutputSection *Out::elfHeader;
45  OutputSection *Out::programHeaders;
46  OutputSection *Out::preinitArray;
47  OutputSection *Out::initArray;
48  OutputSection *Out::finiArray;
49  
50  SmallVector<OutputSection *, 0> elf::outputSections;
51  
getPhdrFlags() const52  uint32_t OutputSection::getPhdrFlags() const {
53    uint32_t ret = 0;
54    if (config->emachine != EM_ARM || !(flags & SHF_ARM_PURECODE))
55      ret |= PF_R;
56    if (flags & SHF_WRITE)
57      ret |= PF_W;
58    if (flags & SHF_EXECINSTR)
59      ret |= PF_X;
60    return ret;
61  }
62  
63  template <class ELFT>
writeHeaderTo(typename ELFT::Shdr * shdr)64  void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
65    shdr->sh_entsize = entsize;
66    shdr->sh_addralign = addralign;
67    shdr->sh_type = type;
68    shdr->sh_offset = offset;
69    shdr->sh_flags = flags;
70    shdr->sh_info = info;
71    shdr->sh_link = link;
72    shdr->sh_addr = addr;
73    shdr->sh_size = size;
74    shdr->sh_name = shName;
75  }
76  
OutputSection(StringRef name,uint32_t type,uint64_t flags)77  OutputSection::OutputSection(StringRef name, uint32_t type, uint64_t flags)
78      : SectionBase(Output, name, flags, /*Entsize*/ 0, /*Alignment*/ 1, type,
79                    /*Info*/ 0, /*Link*/ 0) {}
80  
81  // We allow sections of types listed below to merged into a
82  // single progbits section. This is typically done by linker
83  // scripts. Merging nobits and progbits will force disk space
84  // to be allocated for nobits sections. Other ones don't require
85  // any special treatment on top of progbits, so there doesn't
86  // seem to be a harm in merging them.
87  //
88  // NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
89  // them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
canMergeToProgbits(unsigned type)90  static bool canMergeToProgbits(unsigned type) {
91    return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
92           type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
93           type == SHT_NOTE ||
94           (type == SHT_X86_64_UNWIND && config->emachine == EM_X86_64);
95  }
96  
97  // Record that isec will be placed in the OutputSection. isec does not become
98  // permanent until finalizeInputSections() is called. The function should not be
99  // used after finalizeInputSections() is called. If you need to add an
100  // InputSection post finalizeInputSections(), then you must do the following:
101  //
102  // 1. Find or create an InputSectionDescription to hold InputSection.
103  // 2. Add the InputSection to the InputSectionDescription::sections.
104  // 3. Call commitSection(isec).
recordSection(InputSectionBase * isec)105  void OutputSection::recordSection(InputSectionBase *isec) {
106    partition = isec->partition;
107    isec->parent = this;
108    if (commands.empty() || !isa<InputSectionDescription>(commands.back()))
109      commands.push_back(make<InputSectionDescription>(""));
110    auto *isd = cast<InputSectionDescription>(commands.back());
111    isd->sectionBases.push_back(isec);
112  }
113  
114  // Update fields (type, flags, alignment, etc) according to the InputSection
115  // isec. Also check whether the InputSection flags and type are consistent with
116  // other InputSections.
commitSection(InputSection * isec)117  void OutputSection::commitSection(InputSection *isec) {
118    if (LLVM_UNLIKELY(type != isec->type)) {
119      if (!hasInputSections && !typeIsSet) {
120        type = isec->type;
121      } else if (isStaticRelSecType(type) && isStaticRelSecType(isec->type) &&
122                 (type == SHT_CREL) != (isec->type == SHT_CREL)) {
123        // Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
124        type = SHT_CREL;
125        if (type == SHT_REL) {
126          if (name.consume_front(".rel"))
127            name = saver().save(".crel" + name);
128        } else if (name.consume_front(".rela")) {
129          name = saver().save(".crel" + name);
130        }
131      } else {
132        if (typeIsSet || !canMergeToProgbits(type) ||
133            !canMergeToProgbits(isec->type)) {
134          // The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
135          // that the contents at that address is provided by some other means.
136          // Some projects (e.g.
137          // https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
138          // behavior. Other types get an error.
139          if (type != SHT_NOBITS) {
140            errorOrWarn("section type mismatch for " + isec->name + "\n>>> " +
141                        toString(isec) + ": " +
142                        getELFSectionTypeName(config->emachine, isec->type) +
143                        "\n>>> output section " + name + ": " +
144                        getELFSectionTypeName(config->emachine, type));
145          }
146        }
147        if (!typeIsSet)
148          type = SHT_PROGBITS;
149      }
150    }
151    if (!hasInputSections) {
152      // If IS is the first section to be added to this section,
153      // initialize type, entsize and flags from isec.
154      hasInputSections = true;
155      entsize = isec->entsize;
156      flags = isec->flags;
157    } else {
158      // Otherwise, check if new type or flags are compatible with existing ones.
159      if ((flags ^ isec->flags) & SHF_TLS)
160        error("incompatible section flags for " + name + "\n>>> " +
161              toString(isec) + ": 0x" + utohexstr(isec->flags) +
162              "\n>>> output section " + name + ": 0x" + utohexstr(flags));
163    }
164  
165    isec->parent = this;
166    uint64_t andMask =
167        config->emachine == EM_ARM ? (uint64_t)SHF_ARM_PURECODE : 0;
168    uint64_t orMask = ~andMask;
169    uint64_t andFlags = (flags & isec->flags) & andMask;
170    uint64_t orFlags = (flags | isec->flags) & orMask;
171    flags = andFlags | orFlags;
172    if (nonAlloc)
173      flags &= ~(uint64_t)SHF_ALLOC;
174  
175    addralign = std::max(addralign, isec->addralign);
176  
177    // If this section contains a table of fixed-size entries, sh_entsize
178    // holds the element size. If it contains elements of different size we
179    // set sh_entsize to 0.
180    if (entsize != isec->entsize)
181      entsize = 0;
182  }
183  
createMergeSynthetic(StringRef name,uint32_t type,uint64_t flags,uint32_t addralign)184  static MergeSyntheticSection *createMergeSynthetic(StringRef name,
185                                                     uint32_t type,
186                                                     uint64_t flags,
187                                                     uint32_t addralign) {
188    if ((flags & SHF_STRINGS) && config->optimize >= 2)
189      return make<MergeTailSection>(name, type, flags, addralign);
190    return make<MergeNoTailSection>(name, type, flags, addralign);
191  }
192  
193  // This function scans over the InputSectionBase list sectionBases to create
194  // InputSectionDescription::sections.
195  //
196  // It removes MergeInputSections from the input section array and adds
197  // new synthetic sections at the location of the first input section
198  // that it replaces. It then finalizes each synthetic section in order
199  // to compute an output offset for each piece of each input section.
finalizeInputSections(LinkerScript * script)200  void OutputSection::finalizeInputSections(LinkerScript *script) {
201    std::vector<MergeSyntheticSection *> mergeSections;
202    for (SectionCommand *cmd : commands) {
203      auto *isd = dyn_cast<InputSectionDescription>(cmd);
204      if (!isd)
205        continue;
206      isd->sections.reserve(isd->sectionBases.size());
207      for (InputSectionBase *s : isd->sectionBases) {
208        MergeInputSection *ms = dyn_cast<MergeInputSection>(s);
209        if (!ms) {
210          isd->sections.push_back(cast<InputSection>(s));
211          continue;
212        }
213  
214        // We do not want to handle sections that are not alive, so just remove
215        // them instead of trying to merge.
216        if (!ms->isLive())
217          continue;
218  
219        auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) {
220          // While we could create a single synthetic section for two different
221          // values of Entsize, it is better to take Entsize into consideration.
222          //
223          // With a single synthetic section no two pieces with different Entsize
224          // could be equal, so we may as well have two sections.
225          //
226          // Using Entsize in here also allows us to propagate it to the synthetic
227          // section.
228          //
229          // SHF_STRINGS section with different alignments should not be merged.
230          return sec->flags == ms->flags && sec->entsize == ms->entsize &&
231                 (sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
232        });
233        if (i == mergeSections.end()) {
234          MergeSyntheticSection *syn =
235              createMergeSynthetic(s->name, ms->type, ms->flags, ms->addralign);
236          mergeSections.push_back(syn);
237          i = std::prev(mergeSections.end());
238          syn->entsize = ms->entsize;
239          isd->sections.push_back(syn);
240          // The merge synthetic section inherits the potential spill locations of
241          // its first contained section.
242          auto it = script->potentialSpillLists.find(ms);
243          if (it != script->potentialSpillLists.end())
244            script->potentialSpillLists.try_emplace(syn, it->second);
245        }
246        (*i)->addSection(ms);
247      }
248  
249      // sectionBases should not be used from this point onwards. Clear it to
250      // catch misuses.
251      isd->sectionBases.clear();
252  
253      // Some input sections may be removed from the list after ICF.
254      for (InputSection *s : isd->sections)
255        commitSection(s);
256    }
257    for (auto *ms : mergeSections)
258      ms->finalizeContents();
259  }
260  
sortByOrder(MutableArrayRef<InputSection * > in,llvm::function_ref<int (InputSectionBase * s)> order)261  static void sortByOrder(MutableArrayRef<InputSection *> in,
262                          llvm::function_ref<int(InputSectionBase *s)> order) {
263    std::vector<std::pair<int, InputSection *>> v;
264    for (InputSection *s : in)
265      v.emplace_back(order(s), s);
266    llvm::stable_sort(v, less_first());
267  
268    for (size_t i = 0; i < v.size(); ++i)
269      in[i] = v[i].second;
270  }
271  
getHeaderSize()272  uint64_t elf::getHeaderSize() {
273    if (config->oFormatBinary)
274      return 0;
275    return Out::elfHeader->size + Out::programHeaders->size;
276  }
277  
sort(llvm::function_ref<int (InputSectionBase * s)> order)278  void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
279    assert(isLive());
280    for (SectionCommand *b : commands)
281      if (auto *isd = dyn_cast<InputSectionDescription>(b))
282        sortByOrder(isd->sections, order);
283  }
284  
nopInstrFill(uint8_t * buf,size_t size)285  static void nopInstrFill(uint8_t *buf, size_t size) {
286    if (size == 0)
287      return;
288    unsigned i = 0;
289    if (size == 0)
290      return;
291    std::vector<std::vector<uint8_t>> nopFiller = *target->nopInstrs;
292    unsigned num = size / nopFiller.back().size();
293    for (unsigned c = 0; c < num; ++c) {
294      memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
295      i += nopFiller.back().size();
296    }
297    unsigned remaining = size - i;
298    if (!remaining)
299      return;
300    assert(nopFiller[remaining - 1].size() == remaining);
301    memcpy(buf + i, nopFiller[remaining - 1].data(), remaining);
302  }
303  
304  // Fill [Buf, Buf + Size) with Filler.
305  // This is used for linker script "=fillexp" command.
fill(uint8_t * buf,size_t size,const std::array<uint8_t,4> & filler)306  static void fill(uint8_t *buf, size_t size,
307                   const std::array<uint8_t, 4> &filler) {
308    size_t i = 0;
309    for (; i + 4 < size; i += 4)
310      memcpy(buf + i, filler.data(), 4);
311    memcpy(buf + i, filler.data(), size - i);
312  }
313  
314  #if LLVM_ENABLE_ZLIB
deflateShard(ArrayRef<uint8_t> in,int level,int flush)315  static SmallVector<uint8_t, 0> deflateShard(ArrayRef<uint8_t> in, int level,
316                                              int flush) {
317    // 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
318    // data with no zlib header or trailer.
319    z_stream s = {};
320    auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
321    if (res != 0) {
322      errorOrWarn("--compress-sections: deflateInit2 returned " + Twine(res));
323      return {};
324    }
325    s.next_in = const_cast<uint8_t *>(in.data());
326    s.avail_in = in.size();
327  
328    // Allocate a buffer of half of the input size, and grow it by 1.5x if
329    // insufficient.
330    SmallVector<uint8_t, 0> out;
331    size_t pos = 0;
332    out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
333    do {
334      if (pos == out.size())
335        out.resize_for_overwrite(out.size() * 3 / 2);
336      s.next_out = out.data() + pos;
337      s.avail_out = out.size() - pos;
338      (void)deflate(&s, flush);
339      pos = s.next_out - out.data();
340    } while (s.avail_out == 0);
341    assert(s.avail_in == 0);
342  
343    out.truncate(pos);
344    deflateEnd(&s);
345    return out;
346  }
347  #endif
348  
349  // Compress certain non-SHF_ALLOC sections:
350  //
351  // * (if --compress-debug-sections is specified) non-empty .debug_* sections
352  // * (if --compress-sections is specified) matched sections
maybeCompress()353  template <class ELFT> void OutputSection::maybeCompress() {
354    using Elf_Chdr = typename ELFT::Chdr;
355    (void)sizeof(Elf_Chdr);
356  
357    DebugCompressionType ctype = DebugCompressionType::None;
358    size_t compressedSize = sizeof(Elf_Chdr);
359    unsigned level = 0; // default compression level
360    if (!(flags & SHF_ALLOC) && config->compressDebugSections &&
361        name.starts_with(".debug_"))
362      ctype = *config->compressDebugSections;
363    for (auto &[glob, t, l] : config->compressSections)
364      if (glob.match(name))
365        std::tie(ctype, level) = {t, l};
366    if (ctype == DebugCompressionType::None)
367      return;
368    if (flags & SHF_ALLOC) {
369      errorOrWarn("--compress-sections: section '" + name +
370                  "' with the SHF_ALLOC flag cannot be compressed");
371      return;
372    }
373  
374    llvm::TimeTraceScope timeScope("Compress sections");
375    auto buf = std::make_unique<uint8_t[]>(size);
376    // Write uncompressed data to a temporary zero-initialized buffer.
377    {
378      parallel::TaskGroup tg;
379      writeTo<ELFT>(buf.get(), tg);
380    }
381    // The generic ABI specifies "The sh_size and sh_addralign fields of the
382    // section header for a compressed section reflect the requirements of the
383    // compressed section." However, 1-byte alignment has been wildly accepted
384    // and utilized for a long time. Removing alignment padding is particularly
385    // useful when there are many compressed output sections.
386    addralign = 1;
387  
388    // Split input into 1-MiB shards.
389    [[maybe_unused]] constexpr size_t shardSize = 1 << 20;
390    auto shardsIn = split(ArrayRef<uint8_t>(buf.get(), size), shardSize);
391    const size_t numShards = shardsIn.size();
392    auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
393  
394  #if LLVM_ENABLE_ZSTD
395    // Use ZSTD's streaming compression API. See
396    // http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
397    // HowTo".
398    if (ctype == DebugCompressionType::Zstd) {
399      parallelFor(0, numShards, [&](size_t i) {
400        SmallVector<uint8_t, 0> out;
401        ZSTD_CCtx *cctx = ZSTD_createCCtx();
402        ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, level);
403        ZSTD_inBuffer zib = {shardsIn[i].data(), shardsIn[i].size(), 0};
404        ZSTD_outBuffer zob = {nullptr, 0, 0};
405        size_t size;
406        do {
407          // Allocate a buffer of half of the input size, and grow it by 1.5x if
408          // insufficient.
409          if (zob.pos == zob.size) {
410            out.resize_for_overwrite(
411                zob.size ? zob.size * 3 / 2 : std::max<size_t>(zib.size / 4, 64));
412            zob = {out.data(), out.size(), zob.pos};
413          }
414          size = ZSTD_compressStream2(cctx, &zob, &zib, ZSTD_e_end);
415          assert(!ZSTD_isError(size));
416        } while (size != 0);
417        out.truncate(zob.pos);
418        ZSTD_freeCCtx(cctx);
419        shardsOut[i] = std::move(out);
420      });
421      compressed.type = ELFCOMPRESS_ZSTD;
422      for (size_t i = 0; i != numShards; ++i)
423        compressedSize += shardsOut[i].size();
424    }
425  #endif
426  
427  #if LLVM_ENABLE_ZLIB
428    // We chose 1 (Z_BEST_SPEED) as the default compression level because it is
429    // fast and provides decent compression ratios.
430    if (ctype == DebugCompressionType::Zlib) {
431      if (!level)
432        level = Z_BEST_SPEED;
433  
434      // Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
435      // shards but the last to flush the output to a byte boundary to be
436      // concatenated with the next shard.
437      auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
438      parallelFor(0, numShards, [&](size_t i) {
439        shardsOut[i] = deflateShard(shardsIn[i], level,
440                                    i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
441        shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
442      });
443  
444      // Update section size and combine Alder-32 checksums.
445      uint32_t checksum = 1;       // Initial Adler-32 value
446      compressedSize += 2;         // Elf_Chdir and zlib header
447      for (size_t i = 0; i != numShards; ++i) {
448        compressedSize += shardsOut[i].size();
449        checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
450      }
451      compressedSize += 4; // checksum
452      compressed.type = ELFCOMPRESS_ZLIB;
453      compressed.checksum = checksum;
454    }
455  #endif
456  
457    if (compressedSize >= size)
458      return;
459    compressed.uncompressedSize = size;
460    compressed.shards = std::move(shardsOut);
461    compressed.numShards = numShards;
462    size = compressedSize;
463    flags |= SHF_COMPRESSED;
464  }
465  
writeInt(uint8_t * buf,uint64_t data,uint64_t size)466  static void writeInt(uint8_t *buf, uint64_t data, uint64_t size) {
467    if (size == 1)
468      *buf = data;
469    else if (size == 2)
470      write16(buf, data);
471    else if (size == 4)
472      write32(buf, data);
473    else if (size == 8)
474      write64(buf, data);
475    else
476      llvm_unreachable("unsupported Size argument");
477  }
478  
479  template <class ELFT>
writeTo(uint8_t * buf,parallel::TaskGroup & tg)480  void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) {
481    llvm::TimeTraceScope timeScope("Write sections", name);
482    if (type == SHT_NOBITS)
483      return;
484    if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
485      buf += encodeULEB128(crelHeader, buf);
486      memcpy(buf, crelBody.data(), crelBody.size());
487      return;
488    }
489  
490    // If the section is compressed due to
491    // --compress-debug-section/--compress-sections, the content is already known.
492    if (compressed.shards) {
493      auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
494      chdr->ch_type = compressed.type;
495      chdr->ch_size = compressed.uncompressedSize;
496      chdr->ch_addralign = addralign;
497      buf += sizeof(*chdr);
498  
499      auto offsets = std::make_unique<size_t[]>(compressed.numShards);
500      if (compressed.type == ELFCOMPRESS_ZLIB) {
501        buf[0] = 0x78;  // CMF
502        buf[1] = 0x01;  // FLG: best speed
503        offsets[0] = 2; // zlib header
504        write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
505      }
506  
507      // Compute shard offsets.
508      for (size_t i = 1; i != compressed.numShards; ++i)
509        offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
510      parallelFor(0, compressed.numShards, [&](size_t i) {
511        memcpy(buf + offsets[i], compressed.shards[i].data(),
512               compressed.shards[i].size());
513      });
514      return;
515    }
516  
517    // Write leading padding.
518    ArrayRef<InputSection *> sections = getInputSections(*this, storage);
519    std::array<uint8_t, 4> filler = getFiller();
520    bool nonZeroFiller = read32(filler.data()) != 0;
521    if (nonZeroFiller)
522      fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler);
523  
524    if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
525      buf += encodeULEB128(crelHeader, buf);
526      memcpy(buf, crelBody.data(), crelBody.size());
527      return;
528    }
529  
530    auto fn = [=](size_t begin, size_t end) {
531      size_t numSections = sections.size();
532      for (size_t i = begin; i != end; ++i) {
533        InputSection *isec = sections[i];
534        if (auto *s = dyn_cast<SyntheticSection>(isec))
535          s->writeTo(buf + isec->outSecOff);
536        else
537          isec->writeTo<ELFT>(buf + isec->outSecOff);
538  
539        // When in Arm BE8 mode, the linker has to convert the big-endian
540        // instructions to little-endian, leaving the data big-endian.
541        if (config->emachine == EM_ARM && !config->isLE && config->armBe8 &&
542            (flags & SHF_EXECINSTR))
543          convertArmInstructionstoBE8(isec, buf + isec->outSecOff);
544  
545        // Fill gaps between sections.
546        if (nonZeroFiller) {
547          uint8_t *start = buf + isec->outSecOff + isec->getSize();
548          uint8_t *end;
549          if (i + 1 == numSections)
550            end = buf + size;
551          else
552            end = buf + sections[i + 1]->outSecOff;
553          if (isec->nopFiller) {
554            assert(target->nopInstrs);
555            nopInstrFill(start, end - start);
556          } else
557            fill(start, end - start, filler);
558        }
559      }
560    };
561  
562    // If there is any BYTE()-family command (rare), write the section content
563    // first then process BYTE to overwrite the filler content. The write is
564    // serial due to the limitation of llvm/Support/Parallel.h.
565    bool written = false;
566    size_t numSections = sections.size();
567    for (SectionCommand *cmd : commands)
568      if (auto *data = dyn_cast<ByteCommand>(cmd)) {
569        if (!std::exchange(written, true))
570          fn(0, numSections);
571        writeInt(buf + data->offset, data->expression().getValue(), data->size);
572      }
573    if (written || !numSections)
574      return;
575  
576    // There is no data command. Write content asynchronously to overlap the write
577    // time with other output sections. Note, if a linker script specifies
578    // overlapping output sections (needs --noinhibit-exec or --no-check-sections
579    // to supress the error), the output may be non-deterministic.
580    const size_t taskSizeLimit = 4 << 20;
581    for (size_t begin = 0, i = 0, taskSize = 0;;) {
582      taskSize += sections[i]->getSize();
583      bool done = ++i == numSections;
584      if (done || taskSize >= taskSizeLimit) {
585        tg.spawn([=] { fn(begin, i); });
586        if (done)
587          break;
588        begin = i;
589        taskSize = 0;
590      }
591    }
592  }
593  
finalizeShtGroup(OutputSection * os,InputSection * section)594  static void finalizeShtGroup(OutputSection *os, InputSection *section) {
595    // sh_link field for SHT_GROUP sections should contain the section index of
596    // the symbol table.
597    os->link = in.symTab->getParent()->sectionIndex;
598  
599    if (!section)
600      return;
601  
602    // sh_info then contain index of an entry in symbol table section which
603    // provides signature of the section group.
604    ArrayRef<Symbol *> symbols = section->file->getSymbols();
605    os->info = in.symTab->getSymbolIndex(*symbols[section->info]);
606  
607    // Some group members may be combined or discarded, so we need to compute the
608    // new size. The content will be rewritten in InputSection::copyShtGroup.
609    DenseSet<uint32_t> seen;
610    ArrayRef<InputSectionBase *> sections = section->file->getSections();
611    for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
612      if (OutputSection *osec = sections[read32(&idx)]->getOutputSection())
613        seen.insert(osec->sectionIndex);
614    os->size = (1 + seen.size()) * sizeof(uint32_t);
615  }
616  
617  template <class uint>
618  LLVM_ATTRIBUTE_ALWAYS_INLINE static void
encodeOneCrel(raw_svector_ostream & os,Elf_Crel<sizeof (uint)==8> & out,uint offset,const Symbol & sym,uint32_t type,uint addend)619  encodeOneCrel(raw_svector_ostream &os, Elf_Crel<sizeof(uint) == 8> &out,
620                uint offset, const Symbol &sym, uint32_t type, uint addend) {
621    const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
622    out.r_offset = offset;
623    int64_t symidx = in.symTab->getSymbolIndex(sym);
624    if (sym.type == STT_SECTION) {
625      auto *d = dyn_cast<Defined>(&sym);
626      if (d) {
627        SectionBase *section = d->section;
628        assert(section->isLive());
629        addend = sym.getVA(addend) - section->getOutputSection()->addr;
630      } else {
631        // Encode R_*_NONE(symidx=0).
632        symidx = type = addend = 0;
633      }
634    }
635  
636    // Similar to llvm::ELF::encodeCrel.
637    uint8_t b = deltaOffset * 8 + (out.r_symidx != symidx) +
638                (out.r_type != type ? 2 : 0) +
639                (uint(out.r_addend) != addend ? 4 : 0);
640    if (deltaOffset < 0x10) {
641      os << char(b);
642    } else {
643      os << char(b | 0x80);
644      encodeULEB128(deltaOffset >> 4, os);
645    }
646    if (b & 1) {
647      encodeSLEB128(static_cast<int32_t>(symidx - out.r_symidx), os);
648      out.r_symidx = symidx;
649    }
650    if (b & 2) {
651      encodeSLEB128(static_cast<int32_t>(type - out.r_type), os);
652      out.r_type = type;
653    }
654    if (b & 4) {
655      encodeSLEB128(std::make_signed_t<uint>(addend - out.r_addend), os);
656      out.r_addend = addend;
657    }
658  }
659  
660  template <class ELFT>
relToCrel(raw_svector_ostream & os,Elf_Crel<ELFT::Is64Bits> & out,InputSection * relSec,InputSectionBase * sec)661  static size_t relToCrel(raw_svector_ostream &os, Elf_Crel<ELFT::Is64Bits> &out,
662                          InputSection *relSec, InputSectionBase *sec) {
663    const auto &file = *cast<ELFFileBase>(relSec->file);
664    if (relSec->type == SHT_REL) {
665      // REL conversion is complex and unsupported yet.
666      errorOrWarn(toString(relSec) + ": REL cannot be converted to CREL");
667      return 0;
668    }
669    auto rels = relSec->getDataAs<typename ELFT::Rela>();
670    for (auto rel : rels) {
671      encodeOneCrel<typename ELFT::uint>(
672          os, out, sec->getVA(rel.r_offset), file.getRelocTargetSym(rel),
673          rel.getType(config->isMips64EL), getAddend<ELFT>(rel));
674    }
675    return rels.size();
676  }
677  
678  // Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
679  // Input CREL sections are decoded while REL[A] need to be converted.
finalizeNonAllocCrel()680  template <bool is64> void OutputSection::finalizeNonAllocCrel() {
681    using uint = typename Elf_Crel_Impl<is64>::uint;
682    raw_svector_ostream os(crelBody);
683    uint64_t totalCount = 0;
684    Elf_Crel<is64> out{};
685    assert(commands.size() == 1);
686    auto *isd = cast<InputSectionDescription>(commands[0]);
687    for (InputSection *relSec : isd->sections) {
688      const auto &file = *cast<ELFFileBase>(relSec->file);
689      InputSectionBase *sec = relSec->getRelocatedSection();
690      if (relSec->type == SHT_CREL) {
691        RelocsCrel<is64> entries(relSec->content_);
692        totalCount += entries.size();
693        for (Elf_Crel_Impl<is64> r : entries) {
694          encodeOneCrel<uint>(os, out, uint(sec->getVA(r.r_offset)),
695                              file.getSymbol(r.r_symidx), r.r_type, r.r_addend);
696        }
697        continue;
698      }
699  
700      // Convert REL[A] to CREL.
701      if constexpr (is64) {
702        totalCount += config->isLE ? relToCrel<ELF64LE>(os, out, relSec, sec)
703                                   : relToCrel<ELF64BE>(os, out, relSec, sec);
704      } else {
705        totalCount += config->isLE ? relToCrel<ELF32LE>(os, out, relSec, sec)
706                                   : relToCrel<ELF32BE>(os, out, relSec, sec);
707      }
708    }
709  
710    crelHeader = totalCount * 8 + 4;
711    size = getULEB128Size(crelHeader) + crelBody.size();
712  }
713  
finalize()714  void OutputSection::finalize() {
715    InputSection *first = getFirstInputSection(this);
716  
717    if (flags & SHF_LINK_ORDER) {
718      // We must preserve the link order dependency of sections with the
719      // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
720      // need to translate the InputSection sh_link to the OutputSection sh_link,
721      // all InputSections in the OutputSection have the same dependency.
722      if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
723        link = ex->getLinkOrderDep()->getParent()->sectionIndex;
724      else if (first->flags & SHF_LINK_ORDER)
725        if (auto *d = first->getLinkOrderDep())
726          link = d->getParent()->sectionIndex;
727    }
728  
729    if (type == SHT_GROUP) {
730      finalizeShtGroup(this, first);
731      return;
732    }
733  
734    if (!config->copyRelocs || !isStaticRelSecType(type))
735      return;
736  
737    // Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
738    // Normally 'type' was changed by 'first' so 'first' should be non-null.
739    // However, if the output section is .rela.dyn, 'type' can be set by the empty
740    // synthetic .rela.plt and first can be null.
741    if (!first || isa<SyntheticSection>(first))
742      return;
743  
744    link = in.symTab->getParent()->sectionIndex;
745    // sh_info for SHT_REL[A] sections should contain the section header index of
746    // the section to which the relocation applies.
747    InputSectionBase *s = first->getRelocatedSection();
748    info = s->getOutputSection()->sectionIndex;
749    flags |= SHF_INFO_LINK;
750    // Finalize the content of non-alloc CREL.
751    if (type == SHT_CREL) {
752      if (config->is64)
753        finalizeNonAllocCrel<true>();
754      else
755        finalizeNonAllocCrel<false>();
756    }
757  }
758  
759  // Returns true if S is in one of the many forms the compiler driver may pass
760  // crtbegin files.
761  //
762  // Gcc uses any of crtbegin[<empty>|S|T].o.
763  // Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
764  
isCrt(StringRef s,StringRef beginEnd)765  static bool isCrt(StringRef s, StringRef beginEnd) {
766    s = sys::path::filename(s);
767    if (!s.consume_back(".o"))
768      return false;
769    if (s.consume_front("clang_rt."))
770      return s.consume_front(beginEnd);
771    return s.consume_front(beginEnd) && s.size() <= 1;
772  }
773  
774  // .ctors and .dtors are sorted by this order:
775  //
776  // 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
777  // 2. The section is named ".ctors" or ".dtors" (priority: 65536).
778  // 3. The section has an optional priority value in the form of ".ctors.N" or
779  //    ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
780  // 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
781  //
782  // For 2 and 3, the sections are sorted by priority from high to low, e.g.
783  // .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336).  In GNU ld's
784  // internal linker scripts, the sorting is by string comparison which can
785  // achieve the same goal given the optional priority values are of the same
786  // length.
787  //
788  // In an ideal world, we don't need this function because .init_array and
789  // .ctors are duplicate features (and .init_array is newer.) However, there
790  // are too many real-world use cases of .ctors, so we had no choice to
791  // support that with this rather ad-hoc semantics.
compCtors(const InputSection * a,const InputSection * b)792  static bool compCtors(const InputSection *a, const InputSection *b) {
793    bool beginA = isCrt(a->file->getName(), "crtbegin");
794    bool beginB = isCrt(b->file->getName(), "crtbegin");
795    if (beginA != beginB)
796      return beginA;
797    bool endA = isCrt(a->file->getName(), "crtend");
798    bool endB = isCrt(b->file->getName(), "crtend");
799    if (endA != endB)
800      return endB;
801    return getPriority(a->name) > getPriority(b->name);
802  }
803  
804  // Sorts input sections by the special rules for .ctors and .dtors.
805  // Unfortunately, the rules are different from the one for .{init,fini}_array.
806  // Read the comment above.
sortCtorsDtors()807  void OutputSection::sortCtorsDtors() {
808    assert(commands.size() == 1);
809    auto *isd = cast<InputSectionDescription>(commands[0]);
810    llvm::stable_sort(isd->sections, compCtors);
811  }
812  
813  // If an input string is in the form of "foo.N" where N is a number, return N
814  // (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
815  // greater than the lowest priority.
getPriority(StringRef s)816  int elf::getPriority(StringRef s) {
817    size_t pos = s.rfind('.');
818    if (pos == StringRef::npos)
819      return 65536;
820    int v = 65536;
821    if (to_integer(s.substr(pos + 1), v, 10) &&
822        (pos == 6 && (s.starts_with(".ctors") || s.starts_with(".dtors"))))
823      v = 65535 - v;
824    return v;
825  }
826  
getFirstInputSection(const OutputSection * os)827  InputSection *elf::getFirstInputSection(const OutputSection *os) {
828    for (SectionCommand *cmd : os->commands)
829      if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
830        if (!isd->sections.empty())
831          return isd->sections[0];
832    return nullptr;
833  }
834  
835  ArrayRef<InputSection *>
getInputSections(const OutputSection & os,SmallVector<InputSection *,0> & storage)836  elf::getInputSections(const OutputSection &os,
837                        SmallVector<InputSection *, 0> &storage) {
838    ArrayRef<InputSection *> ret;
839    storage.clear();
840    for (SectionCommand *cmd : os.commands) {
841      auto *isd = dyn_cast<InputSectionDescription>(cmd);
842      if (!isd)
843        continue;
844      if (ret.empty()) {
845        ret = isd->sections;
846      } else {
847        if (storage.empty())
848          storage.assign(ret.begin(), ret.end());
849        storage.insert(storage.end(), isd->sections.begin(), isd->sections.end());
850      }
851    }
852    return storage.empty() ? ret : ArrayRef(storage);
853  }
854  
855  // Sorts input sections by section name suffixes, so that .foo.N comes
856  // before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
857  // We want to keep the original order if the priorities are the same
858  // because the compiler keeps the original initialization order in a
859  // translation unit and we need to respect that.
860  // For more detail, read the section of the GCC's manual about init_priority.
sortInitFini()861  void OutputSection::sortInitFini() {
862    // Sort sections by priority.
863    sort([](InputSectionBase *s) { return getPriority(s->name); });
864  }
865  
getFiller()866  std::array<uint8_t, 4> OutputSection::getFiller() {
867    if (filler)
868      return *filler;
869    if (flags & SHF_EXECINSTR)
870      return target->trapInstr;
871    return {0, 0, 0, 0};
872  }
873  
checkDynRelAddends(const uint8_t * bufStart)874  void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
875    assert(config->writeAddends && config->checkDynamicRelocs);
876    assert(isStaticRelSecType(type));
877    SmallVector<InputSection *, 0> storage;
878    ArrayRef<InputSection *> sections = getInputSections(*this, storage);
879    parallelFor(0, sections.size(), [&](size_t i) {
880      // When linking with -r or --emit-relocs we might also call this function
881      // for input .rel[a].<sec> sections which we simply pass through to the
882      // output. We skip over those and only look at the synthetic relocation
883      // sections created during linking.
884      const auto *sec = dyn_cast<RelocationBaseSection>(sections[i]);
885      if (!sec)
886        return;
887      for (const DynamicReloc &rel : sec->relocs) {
888        int64_t addend = rel.addend;
889        const OutputSection *relOsec = rel.inputSec->getOutputSection();
890        assert(relOsec != nullptr && "missing output section for relocation");
891        // Some targets have NOBITS synthetic sections with dynamic relocations
892        // with non-zero addends. Skip such sections.
893        if (is_contained({EM_PPC, EM_PPC64}, config->emachine) &&
894            (rel.inputSec == in.ppc64LongBranchTarget.get() ||
895             rel.inputSec == in.igotPlt.get()))
896          continue;
897        const uint8_t *relocTarget =
898            bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec);
899        // For SHT_NOBITS the written addend is always zero.
900        int64_t writtenAddend =
901            relOsec->type == SHT_NOBITS
902                ? 0
903                : target->getImplicitAddend(relocTarget, rel.type);
904        if (addend != writtenAddend)
905          internalLinkerError(
906              getErrorLocation(relocTarget),
907              "wrote incorrect addend value 0x" + utohexstr(writtenAddend) +
908                  " instead of 0x" + utohexstr(addend) +
909                  " for dynamic relocation " + toString(rel.type) +
910                  " at offset 0x" + utohexstr(rel.getOffset()) +
911                  (rel.sym ? " against symbol " + toString(*rel.sym) : ""));
912      }
913    });
914  }
915  
916  template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
917  template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
918  template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
919  template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
920  
921  template void OutputSection::writeTo<ELF32LE>(uint8_t *,
922                                                llvm::parallel::TaskGroup &);
923  template void OutputSection::writeTo<ELF32BE>(uint8_t *,
924                                                llvm::parallel::TaskGroup &);
925  template void OutputSection::writeTo<ELF64LE>(uint8_t *,
926                                                llvm::parallel::TaskGroup &);
927  template void OutputSection::writeTo<ELF64BE>(uint8_t *,
928                                                llvm::parallel::TaskGroup &);
929  
930  template void OutputSection::maybeCompress<ELF32LE>();
931  template void OutputSection::maybeCompress<ELF32BE>();
932  template void OutputSection::maybeCompress<ELF64LE>();
933  template void OutputSection::maybeCompress<ELF64BE>();
934