xref: /freebsd/contrib/llvm-project/lld/ELF/InputSection.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===- InputSection.cpp ---------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "InputSection.h"
10 #include "Config.h"
11 #include "InputFiles.h"
12 #include "OutputSections.h"
13 #include "Relocations.h"
14 #include "SymbolTable.h"
15 #include "Symbols.h"
16 #include "SyntheticSections.h"
17 #include "Target.h"
18 #include "lld/Common/DWARF.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/Compression.h"
21 #include "llvm/Support/Endian.h"
22 #include "llvm/Support/xxhash.h"
23 #include <algorithm>
24 #include <optional>
25 #include <vector>
26 
27 using namespace llvm;
28 using namespace llvm::ELF;
29 using namespace llvm::object;
30 using namespace llvm::support;
31 using namespace llvm::support::endian;
32 using namespace llvm::sys;
33 using namespace lld;
34 using namespace lld::elf;
35 
36 // Returns a string to construct an error message.
37 std::string elf::toStr(Ctx &ctx, const InputSectionBase *sec) {
38   return (toStr(ctx, sec->file) + ":(" + sec->name + ")").str();
39 }
40 
41 const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
42                                      const InputSectionBase *sec) {
43   return s << toStr(s.ctx, sec);
44 }
45 
46 template <class ELFT>
47 static ArrayRef<uint8_t> getSectionContents(ObjFile<ELFT> &file,
48                                             const typename ELFT::Shdr &hdr) {
49   if (hdr.sh_type == SHT_NOBITS)
50     return ArrayRef<uint8_t>(nullptr, hdr.sh_size);
51   return check(file.getObj().getSectionContents(hdr));
52 }
53 
54 InputSectionBase::InputSectionBase(InputFile *file, StringRef name,
55                                    uint32_t type, uint64_t flags, uint32_t link,
56                                    uint32_t info, uint32_t addralign,
57                                    uint32_t entsize, ArrayRef<uint8_t> data,
58                                    Kind sectionKind)
59     : SectionBase(sectionKind, file, name, type, flags, link, info, addralign,
60                   entsize),
61       bss(0), decodedCrel(0), keepUnique(0), nopFiller(0),
62       content_(data.data()), size(data.size()) {
63   // In order to reduce memory allocation, we assume that mergeable
64   // sections are smaller than 4 GiB, which is not an unreasonable
65   // assumption as of 2017.
66   if (sectionKind == SectionBase::Merge && content().size() > UINT32_MAX)
67     ErrAlways(getCtx()) << this << ": section too large";
68 
69   // The ELF spec states that a value of 0 means the section has
70   // no alignment constraints.
71   uint32_t v = std::max<uint32_t>(addralign, 1);
72   if (!isPowerOf2_64(v)) {
73     Err(getCtx()) << this << ": sh_addralign is not a power of 2";
74     v = 1;
75   }
76   this->addralign = v;
77 
78   // If SHF_COMPRESSED is set, parse the header. The legacy .zdebug format is no
79   // longer supported.
80   if (flags & SHF_COMPRESSED) {
81     Ctx &ctx = file->ctx;
82     invokeELFT(parseCompressedHeader, ctx);
83   }
84 }
85 
86 // SHF_INFO_LINK and SHF_GROUP are normally resolved and not copied to the
87 // output section. However, for relocatable linking without
88 // --force-group-allocation, the SHF_GROUP flag and section groups are retained.
89 static uint64_t getFlags(Ctx &ctx, uint64_t flags) {
90   flags &= ~(uint64_t)SHF_INFO_LINK;
91   if (ctx.arg.resolveGroups)
92     flags &= ~(uint64_t)SHF_GROUP;
93   return flags;
94 }
95 
96 template <class ELFT>
97 InputSectionBase::InputSectionBase(ObjFile<ELFT> &file,
98                                    const typename ELFT::Shdr &hdr,
99                                    StringRef name, Kind sectionKind)
100     : InputSectionBase(&file, name, hdr.sh_type,
101                        getFlags(file.ctx, hdr.sh_flags), hdr.sh_link,
102                        hdr.sh_info, hdr.sh_addralign, hdr.sh_entsize,
103                        getSectionContents(file, hdr), sectionKind) {
104   // We reject object files having insanely large alignments even though
105   // they are allowed by the spec. I think 4GB is a reasonable limitation.
106   // We might want to relax this in the future.
107   if (hdr.sh_addralign > UINT32_MAX) {
108     Err(getCtx()) << &file << ": section sh_addralign is too large";
109     addralign = 1;
110   }
111 }
112 
113 size_t InputSectionBase::getSize() const {
114   if (auto *s = dyn_cast<SyntheticSection>(this))
115     return s->getSize();
116   return size - bytesDropped;
117 }
118 
119 template <class ELFT>
120 static void decompressAux(Ctx &ctx, const InputSectionBase &sec, uint8_t *out,
121                           size_t size) {
122   auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(sec.content_);
123   auto compressed = ArrayRef<uint8_t>(sec.content_, sec.compressedSize)
124                         .slice(sizeof(typename ELFT::Chdr));
125   if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
126                     ? compression::zlib::decompress(compressed, out, size)
127                     : compression::zstd::decompress(compressed, out, size))
128     Err(ctx) << &sec << ": decompress failed: " << std::move(e);
129 }
130 
131 void InputSectionBase::decompress() const {
132   Ctx &ctx = getCtx();
133   uint8_t *buf = makeThreadLocalN<uint8_t>(size);
134   invokeELFT(decompressAux, ctx, *this, buf, size);
135   content_ = buf;
136   compressed = false;
137 }
138 
139 template <class ELFT>
140 RelsOrRelas<ELFT> InputSectionBase::relsOrRelas(bool supportsCrel) const {
141   if (relSecIdx == 0)
142     return {};
143   RelsOrRelas<ELFT> ret;
144   auto *f = cast<ObjFile<ELFT>>(file);
145   typename ELFT::Shdr shdr = f->template getELFShdrs<ELFT>()[relSecIdx];
146   if (shdr.sh_type == SHT_CREL) {
147     // Return an iterator if supported by caller.
148     if (supportsCrel) {
149       ret.crels = Relocs<typename ELFT::Crel>(
150           (const uint8_t *)f->mb.getBufferStart() + shdr.sh_offset);
151       return ret;
152     }
153     InputSectionBase *const &relSec = f->getSections()[relSecIdx];
154     // Otherwise, allocate a buffer to hold the decoded RELA relocations. When
155     // called for the first time, relSec is null (without --emit-relocs) or an
156     // InputSection with false decodedCrel.
157     if (!relSec || !cast<InputSection>(relSec)->decodedCrel) {
158       auto *sec = makeThreadLocal<InputSection>(*f, shdr, name);
159       f->cacheDecodedCrel(relSecIdx, sec);
160       sec->type = SHT_RELA;
161       sec->decodedCrel = true;
162 
163       RelocsCrel<ELFT::Is64Bits> entries(sec->content_);
164       sec->size = entries.size() * sizeof(typename ELFT::Rela);
165       auto *relas = makeThreadLocalN<typename ELFT::Rela>(entries.size());
166       sec->content_ = reinterpret_cast<uint8_t *>(relas);
167       for (auto [i, r] : llvm::enumerate(entries)) {
168         relas[i].r_offset = r.r_offset;
169         relas[i].setSymbolAndType(r.r_symidx, r.r_type, false);
170         relas[i].r_addend = r.r_addend;
171       }
172     }
173     ret.relas = {ArrayRef(
174         reinterpret_cast<const typename ELFT::Rela *>(relSec->content_),
175         relSec->size / sizeof(typename ELFT::Rela))};
176     return ret;
177   }
178 
179   const void *content = f->mb.getBufferStart() + shdr.sh_offset;
180   size_t size = shdr.sh_size;
181   if (shdr.sh_type == SHT_REL) {
182     ret.rels = {ArrayRef(reinterpret_cast<const typename ELFT::Rel *>(content),
183                          size / sizeof(typename ELFT::Rel))};
184   } else {
185     assert(shdr.sh_type == SHT_RELA);
186     ret.relas = {
187         ArrayRef(reinterpret_cast<const typename ELFT::Rela *>(content),
188                  size / sizeof(typename ELFT::Rela))};
189   }
190   return ret;
191 }
192 
193 Ctx &SectionBase::getCtx() const { return file->ctx; }
194 
195 uint64_t SectionBase::getOffset(uint64_t offset) const {
196   switch (kind()) {
197   case Output: {
198     auto *os = cast<OutputSection>(this);
199     // For output sections we treat offset -1 as the end of the section.
200     return offset == uint64_t(-1) ? os->size : offset;
201   }
202   case Class:
203     llvm_unreachable("section classes do not have offsets");
204   case Regular:
205   case Synthetic:
206   case Spill:
207     return cast<InputSection>(this)->outSecOff + offset;
208   case EHFrame: {
209     // Two code paths may reach here. First, clang_rt.crtbegin.o and GCC
210     // crtbeginT.o may reference the start of an empty .eh_frame to identify the
211     // start of the output .eh_frame. Just return offset.
212     //
213     // Second, InputSection::copyRelocations on .eh_frame. Some pieces may be
214     // discarded due to GC/ICF. We should compute the output section offset.
215     const EhInputSection *es = cast<EhInputSection>(this);
216     if (!es->content().empty())
217       if (InputSection *isec = es->getParent())
218         return isec->outSecOff + es->getParentOffset(offset);
219     return offset;
220   }
221   case Merge:
222     const MergeInputSection *ms = cast<MergeInputSection>(this);
223     if (InputSection *isec = ms->getParent())
224       return isec->outSecOff + ms->getParentOffset(offset);
225     return ms->getParentOffset(offset);
226   }
227   llvm_unreachable("invalid section kind");
228 }
229 
230 uint64_t SectionBase::getVA(uint64_t offset) const {
231   const OutputSection *out = getOutputSection();
232   return (out ? out->addr : 0) + getOffset(offset);
233 }
234 
235 OutputSection *SectionBase::getOutputSection() {
236   InputSection *sec;
237   if (auto *isec = dyn_cast<InputSection>(this))
238     sec = isec;
239   else if (auto *ms = dyn_cast<MergeInputSection>(this))
240     sec = ms->getParent();
241   else if (auto *eh = dyn_cast<EhInputSection>(this))
242     sec = eh->getParent();
243   else
244     return cast<OutputSection>(this);
245   return sec ? sec->getParent() : nullptr;
246 }
247 
248 // When a section is compressed, `rawData` consists with a header followed
249 // by zlib-compressed data. This function parses a header to initialize
250 // `uncompressedSize` member and remove the header from `rawData`.
251 template <typename ELFT>
252 void InputSectionBase::parseCompressedHeader(Ctx &ctx) {
253   flags &= ~(uint64_t)SHF_COMPRESSED;
254 
255   // New-style header
256   if (content().size() < sizeof(typename ELFT::Chdr)) {
257     ErrAlways(ctx) << this << ": corrupted compressed section";
258     return;
259   }
260 
261   auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content().data());
262   if (hdr->ch_type == ELFCOMPRESS_ZLIB) {
263     if (!compression::zlib::isAvailable())
264       ErrAlways(ctx) << this
265                      << " is compressed with ELFCOMPRESS_ZLIB, but lld is "
266                         "not built with zlib support";
267   } else if (hdr->ch_type == ELFCOMPRESS_ZSTD) {
268     if (!compression::zstd::isAvailable())
269       ErrAlways(ctx) << this
270                      << " is compressed with ELFCOMPRESS_ZSTD, but lld is "
271                         "not built with zstd support";
272   } else {
273     ErrAlways(ctx) << this << ": unsupported compression type ("
274                    << uint32_t(hdr->ch_type) << ")";
275     return;
276   }
277 
278   compressed = true;
279   compressedSize = size;
280   size = hdr->ch_size;
281   addralign = std::max<uint32_t>(hdr->ch_addralign, 1);
282 }
283 
284 InputSection *InputSectionBase::getLinkOrderDep() const {
285   assert(flags & SHF_LINK_ORDER);
286   if (!link)
287     return nullptr;
288   return cast<InputSection>(file->getSections()[link]);
289 }
290 
291 // Find a symbol that encloses a given location.
292 Defined *InputSectionBase::getEnclosingSymbol(uint64_t offset,
293                                               uint8_t type) const {
294   if (file->isInternal())
295     return nullptr;
296   for (Symbol *b : file->getSymbols())
297     if (Defined *d = dyn_cast<Defined>(b))
298       if (d->section == this && d->value <= offset &&
299           offset < d->value + d->size && (type == 0 || type == d->type))
300         return d;
301   return nullptr;
302 }
303 
304 // Returns an object file location string. Used to construct an error message.
305 std::string InputSectionBase::getLocation(uint64_t offset) const {
306   std::string secAndOffset =
307       (name + "+0x" + Twine::utohexstr(offset) + ")").str();
308 
309   std::string filename = toStr(getCtx(), file);
310   if (Defined *d = getEnclosingFunction(offset))
311     return filename + ":(function " + toStr(getCtx(), *d) + ": " + secAndOffset;
312 
313   return filename + ":(" + secAndOffset;
314 }
315 
316 static void printFileLine(const ELFSyncStream &s, StringRef path,
317                           unsigned line) {
318   StringRef filename = path::filename(path);
319   s << filename << ':' << line;
320   if (filename != path)
321     s << " (" << path << ':' << line << ')';
322 }
323 
324 // Print an error message that looks like this:
325 //
326 //   foo.c:42 (/home/alice/possibly/very/long/path/foo.c:42)
327 const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
328                                      InputSectionBase::SrcMsg &&msg) {
329   auto &sec = msg.sec;
330   if (sec.file->kind() != InputFile::ObjKind)
331     return s;
332   auto &file = cast<ELFFileBase>(*sec.file);
333 
334   // First, look up the DWARF line table.
335   ArrayRef<InputSectionBase *> sections = file.getSections();
336   auto it = llvm::find(sections, &sec);
337   uint64_t sectionIndex = it != sections.end()
338                               ? it - sections.begin()
339                               : object::SectionedAddress::UndefSection;
340   DWARFCache *dwarf = file.getDwarf();
341   if (auto info = dwarf->getDILineInfo(msg.offset, sectionIndex))
342     printFileLine(s, info->FileName, info->Line);
343   else if (auto fileLine = dwarf->getVariableLoc(msg.sym.getName()))
344     // If it failed, look up again as a variable.
345     printFileLine(s, fileLine->first, fileLine->second);
346   else
347     // File.sourceFile contains STT_FILE symbol, and that is a last resort.
348     s << file.sourceFile;
349   return s;
350 }
351 
352 // Returns a filename string along with an optional section name. This
353 // function is intended to be used for constructing an error
354 // message. The returned message looks like this:
355 //
356 //   path/to/foo.o:(function bar)
357 //
358 // or
359 //
360 //   path/to/foo.o:(function bar) in archive path/to/bar.a
361 const ELFSyncStream &elf::operator<<(const ELFSyncStream &s,
362                                      InputSectionBase::ObjMsg &&msg) {
363   auto *sec = msg.sec;
364   s << sec->file->getName() << ":(";
365 
366   // Find a symbol that encloses a given location. getObjMsg may be called
367   // before ObjFile::initSectionsAndLocalSyms where local symbols are
368   // initialized.
369   if (Defined *d = sec->getEnclosingSymbol(msg.offset))
370     s << d;
371   else
372     s << sec->name << "+0x" << Twine::utohexstr(msg.offset);
373   s << ')';
374   if (!sec->file->archiveName.empty())
375     s << (" in archive " + sec->file->archiveName).str();
376   return s;
377 }
378 
379 PotentialSpillSection::PotentialSpillSection(const InputSectionBase &source,
380                                              InputSectionDescription &isd)
381     : InputSection(source.file, source.name, source.type, source.flags,
382                    source.addralign, source.addralign, {}, SectionBase::Spill),
383       isd(&isd) {}
384 
385 InputSection InputSection::discarded(nullptr, "", 0, 0, 0, 0,
386                                      ArrayRef<uint8_t>());
387 
388 InputSection::InputSection(InputFile *f, StringRef name, uint32_t type,
389                            uint64_t flags, uint32_t addralign, uint32_t entsize,
390                            ArrayRef<uint8_t> data, Kind k)
391     : InputSectionBase(f, name, type, flags,
392                        /*link=*/0, /*info=*/0, addralign, /*entsize=*/entsize,
393                        data, k) {
394   assert(f || this == &InputSection::discarded);
395 }
396 
397 template <class ELFT>
398 InputSection::InputSection(ObjFile<ELFT> &f, const typename ELFT::Shdr &header,
399                            StringRef name)
400     : InputSectionBase(f, header, name, InputSectionBase::Regular) {}
401 
402 // Copy SHT_GROUP section contents. Used only for the -r option.
403 template <class ELFT> void InputSection::copyShtGroup(uint8_t *buf) {
404   // ELFT::Word is the 32-bit integral type in the target endianness.
405   using u32 = typename ELFT::Word;
406   ArrayRef<u32> from = getDataAs<u32>();
407   auto *to = reinterpret_cast<u32 *>(buf);
408 
409   // The first entry is not a section number but a flag.
410   *to++ = from[0];
411 
412   // Adjust section numbers because section numbers in an input object files are
413   // different in the output. We also need to handle combined or discarded
414   // members.
415   ArrayRef<InputSectionBase *> sections = file->getSections();
416   DenseSet<uint32_t> seen;
417   for (uint32_t idx : from.slice(1)) {
418     OutputSection *osec = sections[idx]->getOutputSection();
419     if (osec && seen.insert(osec->sectionIndex).second)
420       *to++ = osec->sectionIndex;
421   }
422 }
423 
424 InputSectionBase *InputSection::getRelocatedSection() const {
425   if (file->isInternal() || !isStaticRelSecType(type))
426     return nullptr;
427   ArrayRef<InputSectionBase *> sections = file->getSections();
428   return sections[info];
429 }
430 
431 template <class ELFT, class RelTy>
432 void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf) {
433   bool linkerRelax =
434       ctx.arg.relax && is_contained({EM_RISCV, EM_LOONGARCH}, ctx.arg.emachine);
435   if (!ctx.arg.relocatable && (linkerRelax || ctx.arg.branchToBranch)) {
436     // On LoongArch and RISC-V, relaxation might change relocations: copy
437     // from internal ones that are updated by relaxation.
438     InputSectionBase *sec = getRelocatedSection();
439     copyRelocations<ELFT, RelTy>(
440         ctx, buf,
441         llvm::make_range(sec->relocations.begin(), sec->relocations.end()));
442   } else {
443     // Convert the raw relocations in the input section into Relocation objects
444     // suitable to be used by copyRelocations below.
445     struct MapRel {
446       Ctx &ctx;
447       const ObjFile<ELFT> &file;
448       Relocation operator()(const RelTy &rel) const {
449         // RelExpr is not used so set to a dummy value.
450         return Relocation{R_NONE, rel.getType(ctx.arg.isMips64EL), rel.r_offset,
451                           getAddend<ELFT>(rel), &file.getRelocTargetSym(rel)};
452       }
453     };
454 
455     using RawRels = ArrayRef<RelTy>;
456     using MapRelIter =
457         llvm::mapped_iterator<typename RawRels::iterator, MapRel>;
458     auto mapRel = MapRel{ctx, *getFile<ELFT>()};
459     RawRels rawRels = getDataAs<RelTy>();
460     auto rels = llvm::make_range(MapRelIter(rawRels.begin(), mapRel),
461                                  MapRelIter(rawRels.end(), mapRel));
462     copyRelocations<ELFT, RelTy>(ctx, buf, rels);
463   }
464 }
465 
466 // This is used for -r and --emit-relocs. We can't use memcpy to copy
467 // relocations because we need to update symbol table offset and section index
468 // for each relocation. So we copy relocations one by one.
469 template <class ELFT, class RelTy, class RelIt>
470 void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf,
471                                    llvm::iterator_range<RelIt> rels) {
472   const TargetInfo &target = *ctx.target;
473   InputSectionBase *sec = getRelocatedSection();
474   (void)sec->contentMaybeDecompress(); // uncompress if needed
475 
476   for (const Relocation &rel : rels) {
477     RelType type = rel.type;
478     const ObjFile<ELFT> *file = getFile<ELFT>();
479     Symbol &sym = *rel.sym;
480 
481     auto *p = reinterpret_cast<typename ELFT::Rela *>(buf);
482     buf += sizeof(RelTy);
483 
484     if (RelTy::HasAddend)
485       p->r_addend = rel.addend;
486 
487     // Output section VA is zero for -r, so r_offset is an offset within the
488     // section, but for --emit-relocs it is a virtual address.
489     p->r_offset = sec->getVA(rel.offset);
490     p->setSymbolAndType(ctx.in.symTab->getSymbolIndex(sym), type,
491                         ctx.arg.isMips64EL);
492 
493     if (sym.type == STT_SECTION) {
494       // We combine multiple section symbols into only one per
495       // section. This means we have to update the addend. That is
496       // trivial for Elf_Rela, but for Elf_Rel we have to write to the
497       // section data. We do that by adding to the Relocation vector.
498 
499       // .eh_frame is horribly special and can reference discarded sections. To
500       // avoid having to parse and recreate .eh_frame, we just replace any
501       // relocation in it pointing to discarded sections with R_*_NONE, which
502       // hopefully creates a frame that is ignored at runtime. Also, don't warn
503       // on .gcc_except_table and debug sections.
504       //
505       // See the comment in maybeReportUndefined for PPC32 .got2 and PPC64 .toc
506       auto *d = dyn_cast<Defined>(&sym);
507       if (!d) {
508         if (!isDebugSection(*sec) && sec->name != ".eh_frame" &&
509             sec->name != ".gcc_except_table" && sec->name != ".got2" &&
510             sec->name != ".toc") {
511           uint32_t secIdx = cast<Undefined>(sym).discardedSecIdx;
512           Elf_Shdr_Impl<ELFT> sec = file->template getELFShdrs<ELFT>()[secIdx];
513           Warn(ctx) << "relocation refers to a discarded section: "
514                     << CHECK2(file->getObj().getSectionName(sec), file)
515                     << "\n>>> referenced by " << getObjMsg(p->r_offset);
516         }
517         p->setSymbolAndType(0, 0, false);
518         continue;
519       }
520       SectionBase *section = d->section;
521       assert(section->isLive());
522 
523       int64_t addend = rel.addend;
524       const uint8_t *bufLoc = sec->content().begin() + rel.offset;
525       if (!RelTy::HasAddend)
526         addend = target.getImplicitAddend(bufLoc, type);
527 
528       if (ctx.arg.emachine == EM_MIPS &&
529           target.getRelExpr(type, sym, bufLoc) == RE_MIPS_GOTREL) {
530         // Some MIPS relocations depend on "gp" value. By default,
531         // this value has 0x7ff0 offset from a .got section. But
532         // relocatable files produced by a compiler or a linker
533         // might redefine this default value and we must use it
534         // for a calculation of the relocation result. When we
535         // generate EXE or DSO it's trivial. Generating a relocatable
536         // output is more difficult case because the linker does
537         // not calculate relocations in this mode and loses
538         // individual "gp" values used by each input object file.
539         // As a workaround we add the "gp" value to the relocation
540         // addend and save it back to the file.
541         addend += sec->getFile<ELFT>()->mipsGp0;
542       }
543 
544       if (RelTy::HasAddend)
545         p->r_addend =
546             sym.getVA(ctx, addend) - section->getOutputSection()->addr;
547       // For SHF_ALLOC sections relocated by REL, append a relocation to
548       // sec->relocations so that relocateAlloc transitively called by
549       // writeSections will update the implicit addend. Non-SHF_ALLOC sections
550       // utilize relocateNonAlloc to process raw relocations and do not need
551       // this sec->relocations change.
552       else if (ctx.arg.relocatable && (sec->flags & SHF_ALLOC) &&
553                type != target.noneRel)
554         sec->addReloc({R_ABS, type, rel.offset, addend, &sym});
555     } else if (ctx.arg.emachine == EM_PPC && type == R_PPC_PLTREL24 &&
556                p->r_addend >= 0x8000 && sec->file->ppc32Got2) {
557       // Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24
558       // indicates that r30 is relative to the input section .got2
559       // (r_addend>=0x8000), after linking, r30 should be relative to the output
560       // section .got2 . To compensate for the shift, adjust r_addend by
561       // ppc32Got->outSecOff.
562       p->r_addend += sec->file->ppc32Got2->outSecOff;
563     }
564   }
565 }
566 
567 // The ARM and AArch64 ABI handle pc-relative relocations to undefined weak
568 // references specially. The general rule is that the value of the symbol in
569 // this context is the address of the place P. A further special case is that
570 // branch relocations to an undefined weak reference resolve to the next
571 // instruction.
572 static uint32_t getARMUndefinedRelativeWeakVA(RelType type, uint32_t a,
573                                               uint32_t p) {
574   switch (type) {
575   // Unresolved branch relocations to weak references resolve to next
576   // instruction, this will be either 2 or 4 bytes on from P.
577   case R_ARM_THM_JUMP8:
578   case R_ARM_THM_JUMP11:
579     return p + 2 + a;
580   case R_ARM_CALL:
581   case R_ARM_JUMP24:
582   case R_ARM_PC24:
583   case R_ARM_PLT32:
584   case R_ARM_PREL31:
585   case R_ARM_THM_JUMP19:
586   case R_ARM_THM_JUMP24:
587     return p + 4 + a;
588   case R_ARM_THM_CALL:
589     // We don't want an interworking BLX to ARM
590     return p + 5 + a;
591   // Unresolved non branch pc-relative relocations
592   // R_ARM_TARGET2 which can be resolved relatively is not present as it never
593   // targets a weak-reference.
594   case R_ARM_MOVW_PREL_NC:
595   case R_ARM_MOVT_PREL:
596   case R_ARM_REL32:
597   case R_ARM_THM_ALU_PREL_11_0:
598   case R_ARM_THM_MOVW_PREL_NC:
599   case R_ARM_THM_MOVT_PREL:
600   case R_ARM_THM_PC12:
601     return p + a;
602   // p + a is unrepresentable as negative immediates can't be encoded.
603   case R_ARM_THM_PC8:
604     return p;
605   }
606   llvm_unreachable("ARM pc-relative relocation expected\n");
607 }
608 
609 // The comment above getARMUndefinedRelativeWeakVA applies to this function.
610 static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
611   switch (type) {
612   // Unresolved branch relocations to weak references resolve to next
613   // instruction, this is 4 bytes on from P.
614   case R_AARCH64_CALL26:
615   case R_AARCH64_CONDBR19:
616   case R_AARCH64_JUMP26:
617   case R_AARCH64_TSTBR14:
618     return p + 4;
619   // Unresolved non branch pc-relative relocations
620   case R_AARCH64_PREL16:
621   case R_AARCH64_PREL32:
622   case R_AARCH64_PREL64:
623   case R_AARCH64_ADR_PREL_LO21:
624   case R_AARCH64_LD_PREL_LO19:
625   case R_AARCH64_PLT32:
626     return p;
627   }
628   llvm_unreachable("AArch64 pc-relative relocation expected\n");
629 }
630 
631 static uint64_t getRISCVUndefinedRelativeWeakVA(uint64_t type, uint64_t p) {
632   switch (type) {
633   case R_RISCV_BRANCH:
634   case R_RISCV_JAL:
635   case R_RISCV_CALL:
636   case R_RISCV_CALL_PLT:
637   case R_RISCV_RVC_BRANCH:
638   case R_RISCV_RVC_JUMP:
639   case R_RISCV_PLT32:
640     return p;
641   default:
642     return 0;
643   }
644 }
645 
646 // ARM SBREL relocations are of the form S + A - B where B is the static base
647 // The ARM ABI defines base to be "addressing origin of the output segment
648 // defining the symbol S". We defined the "addressing origin"/static base to be
649 // the base of the PT_LOAD segment containing the Sym.
650 // The procedure call standard only defines a Read Write Position Independent
651 // RWPI variant so in practice we should expect the static base to be the base
652 // of the RW segment.
653 static uint64_t getARMStaticBase(const Symbol &sym) {
654   OutputSection *os = sym.getOutputSection();
655   if (!os || !os->ptLoad || !os->ptLoad->firstSec) {
656     Err(os->ctx) << "SBREL relocation to " << sym.getName()
657                  << " without static base";
658     return 0;
659   }
660   return os->ptLoad->firstSec->addr;
661 }
662 
663 // For RE_RISCV_PC_INDIRECT (R_RISCV_PCREL_LO12_{I,S}), the symbol actually
664 // points the corresponding R_RISCV_PCREL_HI20 relocation, and the target VA
665 // is calculated using PCREL_HI20's symbol.
666 //
667 // This function returns the R_RISCV_PCREL_HI20 relocation from the
668 // R_RISCV_PCREL_LO12 relocation.
669 static Relocation *getRISCVPCRelHi20(Ctx &ctx, const InputSectionBase *loSec,
670                                      const Relocation &loReloc) {
671   uint64_t addend = loReloc.addend;
672   Symbol *sym = loReloc.sym;
673 
674   const Defined *d = cast<Defined>(sym);
675   if (!d->section) {
676     Err(ctx) << loSec->getLocation(loReloc.offset)
677              << ": R_RISCV_PCREL_LO12 relocation points to an absolute symbol: "
678              << sym->getName();
679     return nullptr;
680   }
681   InputSection *hiSec = cast<InputSection>(d->section);
682 
683   if (hiSec != loSec)
684     Err(ctx) << loSec->getLocation(loReloc.offset)
685              << ": R_RISCV_PCREL_LO12 relocation points to a symbol '"
686              << sym->getName() << "' in a different section '" << hiSec->name
687              << "'";
688 
689   if (addend != 0)
690     Warn(ctx) << loSec->getLocation(loReloc.offset)
691               << ": non-zero addend in R_RISCV_PCREL_LO12 relocation to "
692               << hiSec->getObjMsg(d->value) << " is ignored";
693 
694   // Relocations are sorted by offset, so we can use std::equal_range to do
695   // binary search.
696   Relocation hiReloc;
697   hiReloc.offset = d->value;
698   auto range =
699       std::equal_range(hiSec->relocs().begin(), hiSec->relocs().end(), hiReloc,
700                        [](const Relocation &lhs, const Relocation &rhs) {
701                          return lhs.offset < rhs.offset;
702                        });
703 
704   for (auto it = range.first; it != range.second; ++it)
705     if (it->type == R_RISCV_PCREL_HI20 || it->type == R_RISCV_GOT_HI20 ||
706         it->type == R_RISCV_TLS_GD_HI20 || it->type == R_RISCV_TLS_GOT_HI20)
707       return &*it;
708 
709   Err(ctx) << loSec->getLocation(loReloc.offset)
710            << ": R_RISCV_PCREL_LO12 relocation points to "
711            << hiSec->getObjMsg(d->value)
712            << " without an associated R_RISCV_PCREL_HI20 relocation";
713   return nullptr;
714 }
715 
716 // A TLS symbol's virtual address is relative to the TLS segment. Add a
717 // target-specific adjustment to produce a thread-pointer-relative offset.
718 static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) {
719   // On targets that support TLSDESC, _TLS_MODULE_BASE_@tpoff = 0.
720   if (&s == ctx.sym.tlsModuleBase)
721     return 0;
722 
723   // There are 2 TLS layouts. Among targets we support, x86 uses TLS Variant 2
724   // while most others use Variant 1. At run time TP will be aligned to p_align.
725 
726   // Variant 1. TP will be followed by an optional gap (which is the size of 2
727   // pointers on ARM/AArch64, 0 on other targets), followed by alignment
728   // padding, then the static TLS blocks. The alignment padding is added so that
729   // (TP + gap + padding) is congruent to p_vaddr modulo p_align.
730   //
731   // Variant 2. Static TLS blocks, followed by alignment padding are placed
732   // before TP. The alignment padding is added so that (TP - padding -
733   // p_memsz) is congruent to p_vaddr modulo p_align.
734   PhdrEntry *tls = ctx.tlsPhdr;
735   if (!tls) // Reported an error in getSymVA
736     return 0;
737   switch (ctx.arg.emachine) {
738     // Variant 1.
739   case EM_ARM:
740   case EM_AARCH64:
741     return s.getVA(ctx, 0) + ctx.arg.wordsize * 2 +
742            ((tls->p_vaddr - ctx.arg.wordsize * 2) & (tls->p_align - 1));
743   case EM_MIPS:
744   case EM_PPC:
745   case EM_PPC64:
746     // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is
747     // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library
748     // data and 0xf000 of the program's TLS segment.
749     return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000;
750   case EM_LOONGARCH:
751   case EM_RISCV:
752     // See the comment in handleTlsRelocation. For TLSDESC=>IE,
753     // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} also reach here. While
754     // `tls` may be null, the return value is ignored.
755     if (s.type != STT_TLS)
756       return 0;
757     return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1));
758 
759     // Variant 2.
760   case EM_HEXAGON:
761   case EM_S390:
762   case EM_SPARCV9:
763   case EM_386:
764   case EM_X86_64:
765     return s.getVA(ctx, 0) - tls->p_memsz -
766            ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1));
767   default:
768     llvm_unreachable("unhandled ctx.arg.emachine");
769   }
770 }
771 
772 uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
773                                             uint64_t p) const {
774   int64_t a = r.addend;
775   switch (r.expr) {
776   case R_ABS:
777   case R_DTPREL:
778   case R_RELAX_TLS_LD_TO_LE_ABS:
779   case R_RELAX_GOT_PC_NOPIC:
780   case RE_AARCH64_AUTH:
781   case RE_RISCV_ADD:
782   case RE_RISCV_LEB128:
783     return r.sym->getVA(ctx, a);
784   case R_ADDEND:
785     return a;
786   case R_RELAX_HINT:
787     return 0;
788   case RE_ARM_SBREL:
789     return r.sym->getVA(ctx, a) - getARMStaticBase(*r.sym);
790   case R_GOT:
791   case RE_AARCH64_AUTH_GOT:
792   case R_RELAX_TLS_GD_TO_IE_ABS:
793     return r.sym->getGotVA(ctx) + a;
794   case RE_LOONGARCH_GOT:
795     // The LoongArch TLS GD relocs reuse the R_LARCH_GOT_PC_LO12 reloc r.type
796     // for their page offsets. The arithmetics are different in the TLS case
797     // so we have to duplicate some logic here.
798     if (r.sym->hasFlag(NEEDS_TLSGD) && r.type != R_LARCH_TLS_IE_PC_LO12)
799       // Like RE_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value.
800       return ctx.in.got->getGlobalDynAddr(*r.sym) + a;
801     return r.sym->getGotVA(ctx) + a;
802   case R_GOTONLY_PC:
803     return ctx.in.got->getVA() + a - p;
804   case R_GOTPLTONLY_PC:
805     return ctx.in.gotPlt->getVA() + a - p;
806   case R_GOTREL:
807   case RE_PPC64_RELAX_TOC:
808     return r.sym->getVA(ctx, a) - ctx.in.got->getVA();
809   case R_GOTPLTREL:
810     return r.sym->getVA(ctx, a) - ctx.in.gotPlt->getVA();
811   case R_GOTPLT:
812   case R_RELAX_TLS_GD_TO_IE_GOTPLT:
813     return r.sym->getGotVA(ctx) + a - ctx.in.gotPlt->getVA();
814   case R_TLSLD_GOT_OFF:
815   case R_GOT_OFF:
816   case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
817     return r.sym->getGotOffset(ctx) + a;
818   case RE_AARCH64_GOT_PAGE_PC:
819   case RE_AARCH64_AUTH_GOT_PAGE_PC:
820   case RE_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
821     return getAArch64Page(r.sym->getGotVA(ctx) + a) - getAArch64Page(p);
822   case RE_AARCH64_GOT_PAGE:
823     return r.sym->getGotVA(ctx) + a - getAArch64Page(ctx.in.got->getVA());
824   case R_GOT_PC:
825   case RE_AARCH64_AUTH_GOT_PC:
826   case R_RELAX_TLS_GD_TO_IE:
827     return r.sym->getGotVA(ctx) + a - p;
828   case R_GOTPLT_GOTREL:
829     return r.sym->getGotPltVA(ctx) + a - ctx.in.got->getVA();
830   case R_GOTPLT_PC:
831     return r.sym->getGotPltVA(ctx) + a - p;
832   case RE_LOONGARCH_GOT_PAGE_PC:
833   case RE_LOONGARCH_RELAX_TLS_GD_TO_IE_PAGE_PC:
834     if (r.sym->hasFlag(NEEDS_TLSGD))
835       return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(*r.sym) + a, p,
836                                    r.type);
837     return getLoongArchPageDelta(r.sym->getGotVA(ctx) + a, p, r.type);
838   case RE_MIPS_GOTREL:
839     return r.sym->getVA(ctx, a) - ctx.in.mipsGot->getGp(file);
840   case RE_MIPS_GOT_GP:
841     return ctx.in.mipsGot->getGp(file) + a;
842   case RE_MIPS_GOT_GP_PC: {
843     // R_MIPS_LO16 expression has RE_MIPS_GOT_GP_PC r.type iif the target
844     // is _gp_disp symbol. In that case we should use the following
845     // formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at
846     // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
847     // microMIPS variants of these relocations use slightly different
848     // expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
849     // to correctly handle less-significant bit of the microMIPS symbol.
850     uint64_t v = ctx.in.mipsGot->getGp(file) + a - p;
851     if (r.type == R_MIPS_LO16 || r.type == R_MICROMIPS_LO16)
852       v += 4;
853     if (r.type == R_MICROMIPS_LO16 || r.type == R_MICROMIPS_HI16)
854       v -= 1;
855     return v;
856   }
857   case RE_MIPS_GOT_LOCAL_PAGE:
858     // If relocation against MIPS local symbol requires GOT entry, this entry
859     // should be initialized by 'page address'. This address is high 16-bits
860     // of sum the symbol's value and the addend.
861     return ctx.in.mipsGot->getVA() +
862            ctx.in.mipsGot->getPageEntryOffset(file, *r.sym, a) -
863            ctx.in.mipsGot->getGp(file);
864   case RE_MIPS_GOT_OFF:
865   case RE_MIPS_GOT_OFF32:
866     // In case of MIPS if a GOT relocation has non-zero addend this addend
867     // should be applied to the GOT entry content not to the GOT entry offset.
868     // That is why we use separate expression r.type.
869     return ctx.in.mipsGot->getVA() +
870            ctx.in.mipsGot->getSymEntryOffset(file, *r.sym, a) -
871            ctx.in.mipsGot->getGp(file);
872   case RE_MIPS_TLSGD:
873     return ctx.in.mipsGot->getVA() +
874            ctx.in.mipsGot->getGlobalDynOffset(file, *r.sym) -
875            ctx.in.mipsGot->getGp(file);
876   case RE_MIPS_TLSLD:
877     return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(file) -
878            ctx.in.mipsGot->getGp(file);
879   case RE_AARCH64_PAGE_PC: {
880     uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(ctx, a);
881     return getAArch64Page(val) - getAArch64Page(p);
882   }
883   case RE_RISCV_PC_INDIRECT: {
884     if (const Relocation *hiRel = getRISCVPCRelHi20(ctx, this, r))
885       return getRelocTargetVA(ctx, *hiRel, r.sym->getVA(ctx));
886     return 0;
887   }
888   case RE_LOONGARCH_PAGE_PC:
889     return getLoongArchPageDelta(r.sym->getVA(ctx, a), p, r.type);
890   case R_PC:
891   case RE_ARM_PCA: {
892     uint64_t dest;
893     if (r.expr == RE_ARM_PCA)
894       // Some PC relative ARM (Thumb) relocations align down the place.
895       p = p & 0xfffffffc;
896     if (r.sym->isUndefined()) {
897       // On ARM and AArch64 a branch to an undefined weak resolves to the next
898       // instruction, otherwise the place. On RISC-V, resolve an undefined weak
899       // to the same instruction to cause an infinite loop (making the user
900       // aware of the issue) while ensuring no overflow.
901       // Note: if the symbol is hidden, its binding has been converted to local,
902       // so we just check isUndefined() here.
903       if (ctx.arg.emachine == EM_ARM)
904         dest = getARMUndefinedRelativeWeakVA(r.type, a, p);
905       else if (ctx.arg.emachine == EM_AARCH64)
906         dest = getAArch64UndefinedRelativeWeakVA(r.type, p) + a;
907       else if (ctx.arg.emachine == EM_PPC)
908         dest = p;
909       else if (ctx.arg.emachine == EM_RISCV)
910         dest = getRISCVUndefinedRelativeWeakVA(r.type, p) + a;
911       else
912         dest = r.sym->getVA(ctx, a);
913     } else {
914       dest = r.sym->getVA(ctx, a);
915     }
916     return dest - p;
917   }
918   case R_PLT:
919     return r.sym->getPltVA(ctx) + a;
920   case R_PLT_PC:
921   case RE_PPC64_CALL_PLT:
922     return r.sym->getPltVA(ctx) + a - p;
923   case RE_LOONGARCH_PLT_PAGE_PC:
924     return getLoongArchPageDelta(r.sym->getPltVA(ctx) + a, p, r.type);
925   case R_PLT_GOTPLT:
926     return r.sym->getPltVA(ctx) + a - ctx.in.gotPlt->getVA();
927   case R_PLT_GOTREL:
928     return r.sym->getPltVA(ctx) + a - ctx.in.got->getVA();
929   case RE_PPC32_PLTREL:
930     // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
931     // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
932     // target VA computation.
933     return r.sym->getPltVA(ctx) - p;
934   case RE_PPC64_CALL: {
935     uint64_t symVA = r.sym->getVA(ctx, a);
936     // If we have an undefined weak symbol, we might get here with a symbol
937     // address of zero. That could overflow, but the code must be unreachable,
938     // so don't bother doing anything at all.
939     if (!symVA)
940       return 0;
941 
942     // PPC64 V2 ABI describes two entry points to a function. The global entry
943     // point is used for calls where the caller and callee (may) have different
944     // TOC base pointers and r2 needs to be modified to hold the TOC base for
945     // the callee. For local calls the caller and callee share the same
946     // TOC base and so the TOC pointer initialization code should be skipped by
947     // branching to the local entry point.
948     return symVA - p +
949            getPPC64GlobalEntryToLocalEntryOffset(ctx, r.sym->stOther);
950   }
951   case RE_PPC64_TOCBASE:
952     return getPPC64TocBase(ctx) + a;
953   case R_RELAX_GOT_PC:
954   case RE_PPC64_RELAX_GOT_PC:
955     return r.sym->getVA(ctx, a) - p;
956   case R_RELAX_TLS_GD_TO_LE:
957   case R_RELAX_TLS_IE_TO_LE:
958   case R_RELAX_TLS_LD_TO_LE:
959   case R_TPREL:
960     // It is not very clear what to return if the symbol is undefined. With
961     // --noinhibit-exec, even a non-weak undefined reference may reach here.
962     // Just return A, which matches R_ABS, and the behavior of some dynamic
963     // loaders.
964     if (r.sym->isUndefined())
965       return a;
966     return getTlsTpOffset(ctx, *r.sym) + a;
967   case R_RELAX_TLS_GD_TO_LE_NEG:
968   case R_TPREL_NEG:
969     if (r.sym->isUndefined())
970       return a;
971     return -getTlsTpOffset(ctx, *r.sym) + a;
972   case R_SIZE:
973     return r.sym->getSize() + a;
974   case R_TLSDESC:
975   case RE_AARCH64_AUTH_TLSDESC:
976     return ctx.in.got->getTlsDescAddr(*r.sym) + a;
977   case R_TLSDESC_PC:
978     return ctx.in.got->getTlsDescAddr(*r.sym) + a - p;
979   case R_TLSDESC_GOTPLT:
980     return ctx.in.got->getTlsDescAddr(*r.sym) + a - ctx.in.gotPlt->getVA();
981   case RE_AARCH64_TLSDESC_PAGE:
982   case RE_AARCH64_AUTH_TLSDESC_PAGE:
983     return getAArch64Page(ctx.in.got->getTlsDescAddr(*r.sym) + a) -
984            getAArch64Page(p);
985   case RE_LOONGARCH_TLSDESC_PAGE_PC:
986     return getLoongArchPageDelta(ctx.in.got->getTlsDescAddr(*r.sym) + a, p,
987                                  r.type);
988   case R_TLSGD_GOT:
989     return ctx.in.got->getGlobalDynOffset(*r.sym) + a;
990   case R_TLSGD_GOTPLT:
991     return ctx.in.got->getGlobalDynAddr(*r.sym) + a - ctx.in.gotPlt->getVA();
992   case R_TLSGD_PC:
993     return ctx.in.got->getGlobalDynAddr(*r.sym) + a - p;
994   case RE_LOONGARCH_TLSGD_PAGE_PC:
995     return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(*r.sym) + a, p,
996                                  r.type);
997   case R_TLSLD_GOTPLT:
998     return ctx.in.got->getVA() + ctx.in.got->getTlsIndexOff() + a -
999            ctx.in.gotPlt->getVA();
1000   case R_TLSLD_GOT:
1001     return ctx.in.got->getTlsIndexOff() + a;
1002   case R_TLSLD_PC:
1003     return ctx.in.got->getTlsIndexVA() + a - p;
1004   default:
1005     llvm_unreachable("invalid expression");
1006   }
1007 }
1008 
1009 // This function applies relocations to sections without SHF_ALLOC bit.
1010 // Such sections are never mapped to memory at runtime. Debug sections are
1011 // an example. Relocations in non-alloc sections are much easier to
1012 // handle than in allocated sections because it will never need complex
1013 // treatment such as GOT or PLT (because at runtime no one refers them).
1014 // So, we handle relocations for non-alloc sections directly in this
1015 // function as a performance optimization.
1016 template <class ELFT, class RelTy>
1017 void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf,
1018                                     Relocs<RelTy> rels) {
1019   const unsigned bits = sizeof(typename ELFT::uint) * 8;
1020   const TargetInfo &target = *ctx.target;
1021   const auto emachine = ctx.arg.emachine;
1022   const bool isDebug = isDebugSection(*this);
1023   const bool isDebugLine = isDebug && name == ".debug_line";
1024   std::optional<uint64_t> tombstone;
1025   if (isDebug) {
1026     if (name == ".debug_loc" || name == ".debug_ranges")
1027       tombstone = 1;
1028     else if (name == ".debug_names")
1029       tombstone = UINT64_MAX; // tombstone value
1030     else
1031       tombstone = 0;
1032   }
1033   for (const auto &patAndValue : llvm::reverse(ctx.arg.deadRelocInNonAlloc))
1034     if (patAndValue.first.match(this->name)) {
1035       tombstone = patAndValue.second;
1036       break;
1037     }
1038 
1039   const InputFile *f = this->file;
1040   for (auto it = rels.begin(), end = rels.end(); it != end; ++it) {
1041     const RelTy &rel = *it;
1042     const RelType type = rel.getType(ctx.arg.isMips64EL);
1043     const uint64_t offset = rel.r_offset;
1044     uint8_t *bufLoc = buf + offset;
1045     int64_t addend = getAddend<ELFT>(rel);
1046     if (!RelTy::HasAddend)
1047       addend += target.getImplicitAddend(bufLoc, type);
1048 
1049     Symbol &sym = f->getRelocTargetSym(rel);
1050     RelExpr expr = target.getRelExpr(type, sym, bufLoc);
1051     if (expr == R_NONE)
1052       continue;
1053     auto *ds = dyn_cast<Defined>(&sym);
1054 
1055     if (emachine == EM_RISCV && type == R_RISCV_SET_ULEB128) {
1056       if (++it != end &&
1057           it->getType(/*isMips64EL=*/false) == R_RISCV_SUB_ULEB128 &&
1058           it->r_offset == offset) {
1059         uint64_t val;
1060         if (!ds && tombstone) {
1061           val = *tombstone;
1062         } else {
1063           val = sym.getVA(ctx, addend) -
1064                 (f->getRelocTargetSym(*it).getVA(ctx) + getAddend<ELFT>(*it));
1065         }
1066         if (overwriteULEB128(bufLoc, val) >= 0x80)
1067           Err(ctx) << getLocation(offset) << ": ULEB128 value " << val
1068                    << " exceeds available space; references '" << &sym << "'";
1069         continue;
1070       }
1071       Err(ctx) << getLocation(offset)
1072                << ": R_RISCV_SET_ULEB128 not paired with R_RISCV_SUB_SET128";
1073       return;
1074     }
1075 
1076     if (tombstone && (expr == R_ABS || expr == R_DTPREL)) {
1077       // Resolve relocations in .debug_* referencing (discarded symbols or ICF
1078       // folded section symbols) to a tombstone value. Resolving to addend is
1079       // unsatisfactory because the result address range may collide with a
1080       // valid range of low address, or leave multiple CUs claiming ownership of
1081       // the same range of code, which may confuse consumers.
1082       //
1083       // To address the problems, we use -1 as a tombstone value for most
1084       // .debug_* sections. We have to ignore the addend because we don't want
1085       // to resolve an address attribute (which may have a non-zero addend) to
1086       // -1+addend (wrap around to a low address).
1087       //
1088       // R_DTPREL type relocations represent an offset into the dynamic thread
1089       // vector. The computed value is st_value plus a non-negative offset.
1090       // Negative values are invalid, so -1 can be used as the tombstone value.
1091       //
1092       // If the referenced symbol is relative to a discarded section (due to
1093       // --gc-sections, COMDAT, etc), it has been converted to a Undefined.
1094       // `ds->folded` catches the ICF folded case. However, resolving a
1095       // relocation in .debug_line to -1 would stop debugger users from setting
1096       // breakpoints on the folded-in function, so exclude .debug_line.
1097       //
1098       // For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value
1099       // (base address selection entry), use 1 (which is used by GNU ld for
1100       // .debug_ranges).
1101       //
1102       // TODO To reduce disruption, we use 0 instead of -1 as the tombstone
1103       // value. Enable -1 in a future release.
1104       if (!ds || (ds->folded && !isDebugLine)) {
1105         // If -z dead-reloc-in-nonalloc= is specified, respect it.
1106         uint64_t value = SignExtend64<bits>(*tombstone);
1107         // For a 32-bit local TU reference in .debug_names, X86_64::relocate
1108         // requires that the unsigned value for R_X86_64_32 is truncated to
1109         // 32-bit. Other 64-bit targets's don't discern signed/unsigned 32-bit
1110         // absolute relocations and do not need this change.
1111         if (emachine == EM_X86_64 && type == R_X86_64_32)
1112           value = static_cast<uint32_t>(value);
1113         target.relocateNoSym(bufLoc, type, value);
1114         continue;
1115       }
1116     }
1117 
1118     // For a relocatable link, content relocated by relocation types with an
1119     // explicit addend, such as RELA, remain unchanged and we can stop here.
1120     // While content relocated by relocation types with an implicit addend, such
1121     // as REL, needs the implicit addend updated.
1122     if (ctx.arg.relocatable && (RelTy::HasAddend || sym.type != STT_SECTION))
1123       continue;
1124 
1125     // R_ABS/R_DTPREL and some other relocations can be used from non-SHF_ALLOC
1126     // sections.
1127     if (LLVM_LIKELY(expr == R_ABS) || expr == R_DTPREL || expr == R_GOTPLTREL ||
1128         expr == RE_RISCV_ADD || expr == RE_ARM_SBREL) {
1129       target.relocateNoSym(bufLoc, type,
1130                            SignExtend64<bits>(sym.getVA(ctx, addend)));
1131       continue;
1132     }
1133 
1134     if (expr == R_SIZE) {
1135       target.relocateNoSym(bufLoc, type,
1136                            SignExtend64<bits>(sym.getSize() + addend));
1137       continue;
1138     }
1139 
1140     // If the control reaches here, we found a PC-relative relocation in a
1141     // non-ALLOC section. Since non-ALLOC section is not loaded into memory
1142     // at runtime, the notion of PC-relative doesn't make sense here. So,
1143     // this is a usage error. However, GNU linkers historically accept such
1144     // relocations without any errors and relocate them as if they were at
1145     // address 0. For bug-compatibility, we accept them with warnings. We
1146     // know Steel Bank Common Lisp as of 2018 have this bug.
1147     //
1148     // GCC 8.0 or earlier have a bug that they emit R_386_GOTPC relocations
1149     // against _GLOBAL_OFFSET_TABLE_ for .debug_info. The bug has been fixed in
1150     // 2017 (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82630), but we need to
1151     // keep this bug-compatible code for a while.
1152     bool isErr = expr != R_PC && !(emachine == EM_386 && type == R_386_GOTPC);
1153     {
1154       ELFSyncStream diag(ctx, isErr && !ctx.arg.noinhibitExec
1155                                   ? DiagLevel::Err
1156                                   : DiagLevel::Warn);
1157       diag << getLocation(offset) << ": has non-ABS relocation " << type
1158            << " against symbol '" << &sym << "'";
1159     }
1160     if (!isErr)
1161       target.relocateNoSym(
1162           bufLoc, type,
1163           SignExtend64<bits>(sym.getVA(ctx, addend - offset - outSecOff)));
1164   }
1165 }
1166 
1167 template <class ELFT>
1168 void InputSectionBase::relocate(Ctx &ctx, uint8_t *buf, uint8_t *bufEnd) {
1169   if ((flags & SHF_EXECINSTR) && LLVM_UNLIKELY(getFile<ELFT>()->splitStack))
1170     adjustSplitStackFunctionPrologues<ELFT>(ctx, buf, bufEnd);
1171 
1172   if (flags & SHF_ALLOC) {
1173     ctx.target->relocateAlloc(*this, buf);
1174     return;
1175   }
1176 
1177   auto *sec = cast<InputSection>(this);
1178   // For a relocatable link, also call relocateNonAlloc() to rewrite applicable
1179   // locations with tombstone values.
1180   invokeOnRelocs(*sec, sec->relocateNonAlloc<ELFT>, ctx, buf);
1181 }
1182 
1183 // For each function-defining prologue, find any calls to __morestack,
1184 // and replace them with calls to __morestack_non_split.
1185 static void switchMorestackCallsToMorestackNonSplit(
1186     Ctx &ctx, DenseSet<Defined *> &prologues,
1187     SmallVector<Relocation *, 0> &morestackCalls) {
1188 
1189   // If the target adjusted a function's prologue, all calls to
1190   // __morestack inside that function should be switched to
1191   // __morestack_non_split.
1192   Symbol *moreStackNonSplit = ctx.symtab->find("__morestack_non_split");
1193   if (!moreStackNonSplit) {
1194     ErrAlways(ctx) << "mixing split-stack objects requires a definition of "
1195                       "__morestack_non_split";
1196     return;
1197   }
1198 
1199   // Sort both collections to compare addresses efficiently.
1200   llvm::sort(morestackCalls, [](const Relocation *l, const Relocation *r) {
1201     return l->offset < r->offset;
1202   });
1203   std::vector<Defined *> functions(prologues.begin(), prologues.end());
1204   llvm::sort(functions, [](const Defined *l, const Defined *r) {
1205     return l->value < r->value;
1206   });
1207 
1208   auto it = morestackCalls.begin();
1209   for (Defined *f : functions) {
1210     // Find the first call to __morestack within the function.
1211     while (it != morestackCalls.end() && (*it)->offset < f->value)
1212       ++it;
1213     // Adjust all calls inside the function.
1214     while (it != morestackCalls.end() && (*it)->offset < f->value + f->size) {
1215       (*it)->sym = moreStackNonSplit;
1216       ++it;
1217     }
1218   }
1219 }
1220 
1221 static bool enclosingPrologueAttempted(uint64_t offset,
1222                                        const DenseSet<Defined *> &prologues) {
1223   for (Defined *f : prologues)
1224     if (f->value <= offset && offset < f->value + f->size)
1225       return true;
1226   return false;
1227 }
1228 
1229 // If a function compiled for split stack calls a function not
1230 // compiled for split stack, then the caller needs its prologue
1231 // adjusted to ensure that the called function will have enough stack
1232 // available. Find those functions, and adjust their prologues.
1233 template <class ELFT>
1234 void InputSectionBase::adjustSplitStackFunctionPrologues(Ctx &ctx, uint8_t *buf,
1235                                                          uint8_t *end) {
1236   DenseSet<Defined *> prologues;
1237   SmallVector<Relocation *, 0> morestackCalls;
1238 
1239   for (Relocation &rel : relocs()) {
1240     // Ignore calls into the split-stack api.
1241     if (rel.sym->getName().starts_with("__morestack")) {
1242       if (rel.sym->getName() == "__morestack")
1243         morestackCalls.push_back(&rel);
1244       continue;
1245     }
1246 
1247     // A relocation to non-function isn't relevant. Sometimes
1248     // __morestack is not marked as a function, so this check comes
1249     // after the name check.
1250     if (rel.sym->type != STT_FUNC)
1251       continue;
1252 
1253     // If the callee's-file was compiled with split stack, nothing to do.  In
1254     // this context, a "Defined" symbol is one "defined by the binary currently
1255     // being produced". So an "undefined" symbol might be provided by a shared
1256     // library. It is not possible to tell how such symbols were compiled, so be
1257     // conservative.
1258     if (Defined *d = dyn_cast<Defined>(rel.sym))
1259       if (InputSection *isec = cast_or_null<InputSection>(d->section))
1260         if (!isec || !isec->getFile<ELFT>() || isec->getFile<ELFT>()->splitStack)
1261           continue;
1262 
1263     if (enclosingPrologueAttempted(rel.offset, prologues))
1264       continue;
1265 
1266     if (Defined *f = getEnclosingFunction(rel.offset)) {
1267       prologues.insert(f);
1268       if (ctx.target->adjustPrologueForCrossSplitStack(buf + f->value, end,
1269                                                        f->stOther))
1270         continue;
1271       if (!getFile<ELFT>()->someNoSplitStack)
1272         Err(ctx)
1273             << this << ": " << f->getName() << " (with -fsplit-stack) calls "
1274             << rel.sym->getName()
1275             << " (without -fsplit-stack), but couldn't adjust its prologue";
1276     }
1277   }
1278 
1279   if (ctx.target->needsMoreStackNonSplit)
1280     switchMorestackCallsToMorestackNonSplit(ctx, prologues, morestackCalls);
1281 }
1282 
1283 template <class ELFT> void InputSection::writeTo(Ctx &ctx, uint8_t *buf) {
1284   if (LLVM_UNLIKELY(type == SHT_NOBITS))
1285     return;
1286   // If -r or --emit-relocs is given, then an InputSection
1287   // may be a relocation section.
1288   if (LLVM_UNLIKELY(type == SHT_RELA)) {
1289     copyRelocations<ELFT, typename ELFT::Rela>(ctx, buf);
1290     return;
1291   }
1292   if (LLVM_UNLIKELY(type == SHT_REL)) {
1293     copyRelocations<ELFT, typename ELFT::Rel>(ctx, buf);
1294     return;
1295   }
1296 
1297   // If -r is given, we may have a SHT_GROUP section.
1298   if (LLVM_UNLIKELY(type == SHT_GROUP)) {
1299     copyShtGroup<ELFT>(buf);
1300     return;
1301   }
1302 
1303   // If this is a compressed section, uncompress section contents directly
1304   // to the buffer.
1305   if (compressed) {
1306     auto *hdr = reinterpret_cast<const typename ELFT::Chdr *>(content_);
1307     auto compressed = ArrayRef<uint8_t>(content_, compressedSize)
1308                           .slice(sizeof(typename ELFT::Chdr));
1309     size_t size = this->size;
1310     if (Error e = hdr->ch_type == ELFCOMPRESS_ZLIB
1311                       ? compression::zlib::decompress(compressed, buf, size)
1312                       : compression::zstd::decompress(compressed, buf, size))
1313       Err(ctx) << this << ": decompress failed: " << std::move(e);
1314     uint8_t *bufEnd = buf + size;
1315     relocate<ELFT>(ctx, buf, bufEnd);
1316     return;
1317   }
1318 
1319   // Copy section contents from source object file to output file
1320   // and then apply relocations.
1321   memcpy(buf, content().data(), content().size());
1322   relocate<ELFT>(ctx, buf, buf + content().size());
1323 }
1324 
1325 void InputSection::replace(InputSection *other) {
1326   addralign = std::max(addralign, other->addralign);
1327 
1328   // When a section is replaced with another section that was allocated to
1329   // another partition, the replacement section (and its associated sections)
1330   // need to be placed in the main partition so that both partitions will be
1331   // able to access it.
1332   if (partition != other->partition) {
1333     partition = 1;
1334     for (InputSection *isec : dependentSections)
1335       isec->partition = 1;
1336   }
1337 
1338   other->repl = repl;
1339   other->markDead();
1340 }
1341 
1342 template <class ELFT>
1343 EhInputSection::EhInputSection(ObjFile<ELFT> &f,
1344                                const typename ELFT::Shdr &header,
1345                                StringRef name)
1346     : InputSectionBase(f, header, name, InputSectionBase::EHFrame) {}
1347 
1348 SyntheticSection *EhInputSection::getParent() const {
1349   return cast_or_null<SyntheticSection>(parent);
1350 }
1351 
1352 // .eh_frame is a sequence of CIE or FDE records.
1353 // This function splits an input section into records and returns them.
1354 template <class ELFT> void EhInputSection::split() {
1355   const RelsOrRelas<ELFT> rels = relsOrRelas<ELFT>(/*supportsCrel=*/false);
1356   // getReloc expects the relocations to be sorted by r_offset. See the comment
1357   // in scanRelocs.
1358   if (rels.areRelocsRel()) {
1359     SmallVector<typename ELFT::Rel, 0> storage;
1360     split<ELFT>(sortRels(rels.rels, storage));
1361   } else {
1362     SmallVector<typename ELFT::Rela, 0> storage;
1363     split<ELFT>(sortRels(rels.relas, storage));
1364   }
1365 }
1366 
1367 template <class ELFT, class RelTy>
1368 void EhInputSection::split(ArrayRef<RelTy> rels) {
1369   ArrayRef<uint8_t> d = content();
1370   const char *msg = nullptr;
1371   unsigned relI = 0;
1372   while (!d.empty()) {
1373     if (d.size() < 4) {
1374       msg = "CIE/FDE too small";
1375       break;
1376     }
1377     uint64_t size = endian::read32<ELFT::Endianness>(d.data());
1378     if (size == 0) // ZERO terminator
1379       break;
1380     uint32_t id = endian::read32<ELFT::Endianness>(d.data() + 4);
1381     size += 4;
1382     if (LLVM_UNLIKELY(size > d.size())) {
1383       // If it is 0xFFFFFFFF, the next 8 bytes contain the size instead,
1384       // but we do not support that format yet.
1385       msg = size == UINT32_MAX + uint64_t(4)
1386                 ? "CIE/FDE too large"
1387                 : "CIE/FDE ends past the end of the section";
1388       break;
1389     }
1390 
1391     // Find the first relocation that points to [off,off+size). Relocations
1392     // have been sorted by r_offset.
1393     const uint64_t off = d.data() - content().data();
1394     while (relI != rels.size() && rels[relI].r_offset < off)
1395       ++relI;
1396     unsigned firstRel = -1;
1397     if (relI != rels.size() && rels[relI].r_offset < off + size)
1398       firstRel = relI;
1399     (id == 0 ? cies : fdes).emplace_back(off, this, size, firstRel);
1400     d = d.slice(size);
1401   }
1402   if (msg)
1403     Err(file->ctx) << "corrupted .eh_frame: " << msg << "\n>>> defined in "
1404                    << getObjMsg(d.data() - content().data());
1405 }
1406 
1407 // Return the offset in an output section for a given input offset.
1408 uint64_t EhInputSection::getParentOffset(uint64_t offset) const {
1409   auto it = partition_point(
1410       fdes, [=](EhSectionPiece p) { return p.inputOff <= offset; });
1411   if (it == fdes.begin() || it[-1].inputOff + it[-1].size <= offset) {
1412     it = partition_point(
1413         cies, [=](EhSectionPiece p) { return p.inputOff <= offset; });
1414     if (it == cies.begin()) // invalid piece
1415       return offset;
1416   }
1417   if (it[-1].outputOff == -1) // invalid piece
1418     return offset - it[-1].inputOff;
1419   return it[-1].outputOff + (offset - it[-1].inputOff);
1420 }
1421 
1422 static size_t findNull(StringRef s, size_t entSize) {
1423   for (unsigned i = 0, n = s.size(); i != n; i += entSize) {
1424     const char *b = s.begin() + i;
1425     if (std::all_of(b, b + entSize, [](char c) { return c == 0; }))
1426       return i;
1427   }
1428   llvm_unreachable("");
1429 }
1430 
1431 // Split SHF_STRINGS section. Such section is a sequence of
1432 // null-terminated strings.
1433 void MergeInputSection::splitStrings(StringRef s, size_t entSize) {
1434   const bool live = !(flags & SHF_ALLOC) || !getCtx().arg.gcSections;
1435   const char *p = s.data(), *end = s.data() + s.size();
1436   if (!std::all_of(end - entSize, end, [](char c) { return c == 0; })) {
1437     Err(getCtx()) << this << ": string is not null terminated";
1438     pieces.emplace_back(entSize, 0, false);
1439     return;
1440   }
1441   if (entSize == 1) {
1442     // Optimize the common case.
1443     do {
1444       size_t size = strlen(p);
1445       pieces.emplace_back(p - s.begin(), xxh3_64bits(StringRef(p, size)), live);
1446       p += size + 1;
1447     } while (p != end);
1448   } else {
1449     do {
1450       size_t size = findNull(StringRef(p, end - p), entSize);
1451       pieces.emplace_back(p - s.begin(), xxh3_64bits(StringRef(p, size)), live);
1452       p += size + entSize;
1453     } while (p != end);
1454   }
1455 }
1456 
1457 // Split non-SHF_STRINGS section. Such section is a sequence of
1458 // fixed size records.
1459 void MergeInputSection::splitNonStrings(ArrayRef<uint8_t> data,
1460                                         size_t entSize) {
1461   size_t size = data.size();
1462   assert((size % entSize) == 0);
1463   const bool live = !(flags & SHF_ALLOC) || !getCtx().arg.gcSections;
1464 
1465   pieces.resize_for_overwrite(size / entSize);
1466   for (size_t i = 0, j = 0; i != size; i += entSize, j++)
1467     pieces[j] = {i, (uint32_t)xxh3_64bits(data.slice(i, entSize)), live};
1468 }
1469 
1470 template <class ELFT>
1471 MergeInputSection::MergeInputSection(ObjFile<ELFT> &f,
1472                                      const typename ELFT::Shdr &header,
1473                                      StringRef name)
1474     : InputSectionBase(f, header, name, InputSectionBase::Merge) {}
1475 
1476 MergeInputSection::MergeInputSection(Ctx &ctx, StringRef name, uint32_t type,
1477                                      uint64_t flags, uint64_t entsize,
1478                                      ArrayRef<uint8_t> data)
1479     : InputSectionBase(ctx.internalFile, name, type, flags, /*link=*/0,
1480                        /*info=*/0,
1481                        /*addralign=*/entsize, entsize, data,
1482                        SectionBase::Merge) {}
1483 
1484 // This function is called after we obtain a complete list of input sections
1485 // that need to be linked. This is responsible to split section contents
1486 // into small chunks for further processing.
1487 //
1488 // Note that this function is called from parallelForEach. This must be
1489 // thread-safe (i.e. no memory allocation from the pools).
1490 void MergeInputSection::splitIntoPieces() {
1491   assert(pieces.empty());
1492 
1493   if (flags & SHF_STRINGS)
1494     splitStrings(toStringRef(contentMaybeDecompress()), entsize);
1495   else
1496     splitNonStrings(contentMaybeDecompress(), entsize);
1497 }
1498 
1499 SectionPiece &MergeInputSection::getSectionPiece(uint64_t offset) {
1500   if (content().size() <= offset) {
1501     Err(getCtx()) << this << ": offset is outside the section";
1502     return pieces[0];
1503   }
1504   return partition_point(
1505       pieces, [=](SectionPiece p) { return p.inputOff <= offset; })[-1];
1506 }
1507 
1508 // Return the offset in an output section for a given input offset.
1509 uint64_t MergeInputSection::getParentOffset(uint64_t offset) const {
1510   const SectionPiece &piece = getSectionPiece(offset);
1511   return piece.outputOff + (offset - piece.inputOff);
1512 }
1513 
1514 template InputSection::InputSection(ObjFile<ELF32LE> &, const ELF32LE::Shdr &,
1515                                     StringRef);
1516 template InputSection::InputSection(ObjFile<ELF32BE> &, const ELF32BE::Shdr &,
1517                                     StringRef);
1518 template InputSection::InputSection(ObjFile<ELF64LE> &, const ELF64LE::Shdr &,
1519                                     StringRef);
1520 template InputSection::InputSection(ObjFile<ELF64BE> &, const ELF64BE::Shdr &,
1521                                     StringRef);
1522 
1523 template void InputSection::writeTo<ELF32LE>(Ctx &, uint8_t *);
1524 template void InputSection::writeTo<ELF32BE>(Ctx &, uint8_t *);
1525 template void InputSection::writeTo<ELF64LE>(Ctx &, uint8_t *);
1526 template void InputSection::writeTo<ELF64BE>(Ctx &, uint8_t *);
1527 
1528 template RelsOrRelas<ELF32LE>
1529 InputSectionBase::relsOrRelas<ELF32LE>(bool) const;
1530 template RelsOrRelas<ELF32BE>
1531 InputSectionBase::relsOrRelas<ELF32BE>(bool) const;
1532 template RelsOrRelas<ELF64LE>
1533 InputSectionBase::relsOrRelas<ELF64LE>(bool) const;
1534 template RelsOrRelas<ELF64BE>
1535 InputSectionBase::relsOrRelas<ELF64BE>(bool) const;
1536 
1537 template MergeInputSection::MergeInputSection(ObjFile<ELF32LE> &,
1538                                               const ELF32LE::Shdr &, StringRef);
1539 template MergeInputSection::MergeInputSection(ObjFile<ELF32BE> &,
1540                                               const ELF32BE::Shdr &, StringRef);
1541 template MergeInputSection::MergeInputSection(ObjFile<ELF64LE> &,
1542                                               const ELF64LE::Shdr &, StringRef);
1543 template MergeInputSection::MergeInputSection(ObjFile<ELF64BE> &,
1544                                               const ELF64BE::Shdr &, StringRef);
1545 
1546 template EhInputSection::EhInputSection(ObjFile<ELF32LE> &,
1547                                         const ELF32LE::Shdr &, StringRef);
1548 template EhInputSection::EhInputSection(ObjFile<ELF32BE> &,
1549                                         const ELF32BE::Shdr &, StringRef);
1550 template EhInputSection::EhInputSection(ObjFile<ELF64LE> &,
1551                                         const ELF64LE::Shdr &, StringRef);
1552 template EhInputSection::EhInputSection(ObjFile<ELF64BE> &,
1553                                         const ELF64BE::Shdr &, StringRef);
1554 
1555 template void EhInputSection::split<ELF32LE>();
1556 template void EhInputSection::split<ELF32BE>();
1557 template void EhInputSection::split<ELF64LE>();
1558 template void EhInputSection::split<ELF64BE>();
1559