1 //===- Relocations.cpp ----------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains platform-independent functions to process relocations. 10 // I'll describe the overview of this file here. 11 // 12 // Simple relocations are easy to handle for the linker. For example, 13 // for R_X86_64_PC64 relocs, the linker just has to fix up locations 14 // with the relative offsets to the target symbols. It would just be 15 // reading records from relocation sections and applying them to output. 16 // 17 // But not all relocations are that easy to handle. For example, for 18 // R_386_GOTOFF relocs, the linker has to create new GOT entries for 19 // symbols if they don't exist, and fix up locations with GOT entry 20 // offsets from the beginning of GOT section. So there is more than 21 // fixing addresses in relocation processing. 22 // 23 // ELF defines a large number of complex relocations. 24 // 25 // The functions in this file analyze relocations and do whatever needs 26 // to be done. It includes, but not limited to, the following. 27 // 28 // - create GOT/PLT entries 29 // - create new relocations in .dynsym to let the dynamic linker resolve 30 // them at runtime (since ELF supports dynamic linking, not all 31 // relocations can be resolved at link-time) 32 // - create COPY relocs and reserve space in .bss 33 // - replace expensive relocs (in terms of runtime cost) with cheap ones 34 // - error out infeasible combinations such as PIC and non-relative relocs 35 // 36 // Note that the functions in this file don't actually apply relocations 37 // because it doesn't know about the output file nor the output file buffer. 38 // It instead stores Relocation objects to InputSection's Relocations 39 // vector to let it apply later in InputSection::writeTo. 40 // 41 //===----------------------------------------------------------------------===// 42 43 #include "Relocations.h" 44 #include "Config.h" 45 #include "InputFiles.h" 46 #include "LinkerScript.h" 47 #include "OutputSections.h" 48 #include "SymbolTable.h" 49 #include "Symbols.h" 50 #include "SyntheticSections.h" 51 #include "Target.h" 52 #include "Thunks.h" 53 #include "lld/Common/ErrorHandler.h" 54 #include "lld/Common/Memory.h" 55 #include "llvm/ADT/SmallSet.h" 56 #include "llvm/BinaryFormat/ELF.h" 57 #include "llvm/Demangle/Demangle.h" 58 #include "llvm/Support/Endian.h" 59 #include <algorithm> 60 61 using namespace llvm; 62 using namespace llvm::ELF; 63 using namespace llvm::object; 64 using namespace llvm::support::endian; 65 using namespace lld; 66 using namespace lld::elf; 67 68 static std::optional<std::string> getLinkerScriptLocation(const Symbol &sym) { 69 for (SectionCommand *cmd : script->sectionCommands) 70 if (auto *assign = dyn_cast<SymbolAssignment>(cmd)) 71 if (assign->sym == &sym) 72 return assign->location; 73 return std::nullopt; 74 } 75 76 static std::string getDefinedLocation(const Symbol &sym) { 77 const char msg[] = "\n>>> defined in "; 78 if (sym.file) 79 return msg + toString(sym.file); 80 if (std::optional<std::string> loc = getLinkerScriptLocation(sym)) 81 return msg + *loc; 82 return ""; 83 } 84 85 // Construct a message in the following format. 86 // 87 // >>> defined in /home/alice/src/foo.o 88 // >>> referenced by bar.c:12 (/home/alice/src/bar.c:12) 89 // >>> /home/alice/src/bar.o:(.text+0x1) 90 static std::string getLocation(InputSectionBase &s, const Symbol &sym, 91 uint64_t off) { 92 std::string msg = getDefinedLocation(sym) + "\n>>> referenced by "; 93 std::string src = s.getSrcMsg(sym, off); 94 if (!src.empty()) 95 msg += src + "\n>>> "; 96 return msg + s.getObjMsg(off); 97 } 98 99 void elf::reportRangeError(uint8_t *loc, const Relocation &rel, const Twine &v, 100 int64_t min, uint64_t max) { 101 ErrorPlace errPlace = getErrorPlace(loc); 102 std::string hint; 103 if (rel.sym) { 104 if (!rel.sym->isSection()) 105 hint = "; references '" + lld::toString(*rel.sym) + '\''; 106 else if (auto *d = dyn_cast<Defined>(rel.sym)) 107 hint = ("; references section '" + d->section->name + "'").str(); 108 109 if (config->emachine == EM_X86_64 && rel.type == R_X86_64_PC32 && 110 rel.sym->getOutputSection() && 111 (rel.sym->getOutputSection()->flags & SHF_X86_64_LARGE)) { 112 hint += "; R_X86_64_PC32 should not reference a section marked " 113 "SHF_X86_64_LARGE"; 114 } 115 } 116 if (!errPlace.srcLoc.empty()) 117 hint += "\n>>> referenced by " + errPlace.srcLoc; 118 if (rel.sym && !rel.sym->isSection()) 119 hint += getDefinedLocation(*rel.sym); 120 121 if (errPlace.isec && errPlace.isec->name.starts_with(".debug")) 122 hint += "; consider recompiling with -fdebug-types-section to reduce size " 123 "of debug sections"; 124 125 errorOrWarn(errPlace.loc + "relocation " + lld::toString(rel.type) + 126 " out of range: " + v.str() + " is not in [" + Twine(min).str() + 127 ", " + Twine(max).str() + "]" + hint); 128 } 129 130 void elf::reportRangeError(uint8_t *loc, int64_t v, int n, const Symbol &sym, 131 const Twine &msg) { 132 ErrorPlace errPlace = getErrorPlace(loc); 133 std::string hint; 134 if (!sym.getName().empty()) 135 hint = 136 "; references '" + lld::toString(sym) + '\'' + getDefinedLocation(sym); 137 errorOrWarn(errPlace.loc + msg + " is out of range: " + Twine(v) + 138 " is not in [" + Twine(llvm::minIntN(n)) + ", " + 139 Twine(llvm::maxIntN(n)) + "]" + hint); 140 } 141 142 // Build a bitmask with one bit set for each 64 subset of RelExpr. 143 static constexpr uint64_t buildMask() { return 0; } 144 145 template <typename... Tails> 146 static constexpr uint64_t buildMask(int head, Tails... tails) { 147 return (0 <= head && head < 64 ? uint64_t(1) << head : 0) | 148 buildMask(tails...); 149 } 150 151 // Return true if `Expr` is one of `Exprs`. 152 // There are more than 64 but less than 128 RelExprs, so we divide the set of 153 // exprs into [0, 64) and [64, 128) and represent each range as a constant 154 // 64-bit mask. Then we decide which mask to test depending on the value of 155 // expr and use a simple shift and bitwise-and to test for membership. 156 template <RelExpr... Exprs> static bool oneof(RelExpr expr) { 157 assert(0 <= expr && (int)expr < 128 && 158 "RelExpr is too large for 128-bit mask!"); 159 160 if (expr >= 64) 161 return (uint64_t(1) << (expr - 64)) & buildMask((Exprs - 64)...); 162 return (uint64_t(1) << expr) & buildMask(Exprs...); 163 } 164 165 static RelType getMipsPairType(RelType type, bool isLocal) { 166 switch (type) { 167 case R_MIPS_HI16: 168 return R_MIPS_LO16; 169 case R_MIPS_GOT16: 170 // In case of global symbol, the R_MIPS_GOT16 relocation does not 171 // have a pair. Each global symbol has a unique entry in the GOT 172 // and a corresponding instruction with help of the R_MIPS_GOT16 173 // relocation loads an address of the symbol. In case of local 174 // symbol, the R_MIPS_GOT16 relocation creates a GOT entry to hold 175 // the high 16 bits of the symbol's value. A paired R_MIPS_LO16 176 // relocations handle low 16 bits of the address. That allows 177 // to allocate only one GOT entry for every 64 KBytes of local data. 178 return isLocal ? R_MIPS_LO16 : R_MIPS_NONE; 179 case R_MICROMIPS_GOT16: 180 return isLocal ? R_MICROMIPS_LO16 : R_MIPS_NONE; 181 case R_MIPS_PCHI16: 182 return R_MIPS_PCLO16; 183 case R_MICROMIPS_HI16: 184 return R_MICROMIPS_LO16; 185 default: 186 return R_MIPS_NONE; 187 } 188 } 189 190 // True if non-preemptable symbol always has the same value regardless of where 191 // the DSO is loaded. 192 static bool isAbsolute(const Symbol &sym) { 193 if (sym.isUndefWeak()) 194 return true; 195 if (const auto *dr = dyn_cast<Defined>(&sym)) 196 return dr->section == nullptr; // Absolute symbol. 197 return false; 198 } 199 200 static bool isAbsoluteValue(const Symbol &sym) { 201 return isAbsolute(sym) || sym.isTls(); 202 } 203 204 // Returns true if Expr refers a PLT entry. 205 static bool needsPlt(RelExpr expr) { 206 return oneof<R_PLT, R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, 207 R_GOTPLT_PC, R_LOONGARCH_PLT_PAGE_PC, R_PPC32_PLTREL, 208 R_PPC64_CALL_PLT>(expr); 209 } 210 211 bool lld::elf::needsGot(RelExpr expr) { 212 return oneof<R_GOT, R_GOT_OFF, R_MIPS_GOT_LOCAL_PAGE, R_MIPS_GOT_OFF, 213 R_MIPS_GOT_OFF32, R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTPLT, 214 R_AARCH64_GOT_PAGE, R_LOONGARCH_GOT, R_LOONGARCH_GOT_PAGE_PC>( 215 expr); 216 } 217 218 // True if this expression is of the form Sym - X, where X is a position in the 219 // file (PC, or GOT for example). 220 static bool isRelExpr(RelExpr expr) { 221 return oneof<R_PC, R_GOTREL, R_GOTPLTREL, R_ARM_PCA, R_MIPS_GOTREL, 222 R_PPC64_CALL, R_PPC64_RELAX_TOC, R_AARCH64_PAGE_PC, 223 R_RELAX_GOT_PC, R_RISCV_PC_INDIRECT, R_PPC64_RELAX_GOT_PC, 224 R_LOONGARCH_PAGE_PC>(expr); 225 } 226 227 static RelExpr toPlt(RelExpr expr) { 228 switch (expr) { 229 case R_LOONGARCH_PAGE_PC: 230 return R_LOONGARCH_PLT_PAGE_PC; 231 case R_PPC64_CALL: 232 return R_PPC64_CALL_PLT; 233 case R_PC: 234 return R_PLT_PC; 235 case R_ABS: 236 return R_PLT; 237 case R_GOTREL: 238 return R_PLT_GOTREL; 239 default: 240 return expr; 241 } 242 } 243 244 static RelExpr fromPlt(RelExpr expr) { 245 // We decided not to use a plt. Optimize a reference to the plt to a 246 // reference to the symbol itself. 247 switch (expr) { 248 case R_PLT_PC: 249 case R_PPC32_PLTREL: 250 return R_PC; 251 case R_LOONGARCH_PLT_PAGE_PC: 252 return R_LOONGARCH_PAGE_PC; 253 case R_PPC64_CALL_PLT: 254 return R_PPC64_CALL; 255 case R_PLT: 256 return R_ABS; 257 case R_PLT_GOTPLT: 258 return R_GOTPLTREL; 259 case R_PLT_GOTREL: 260 return R_GOTREL; 261 default: 262 return expr; 263 } 264 } 265 266 // Returns true if a given shared symbol is in a read-only segment in a DSO. 267 template <class ELFT> static bool isReadOnly(SharedSymbol &ss) { 268 using Elf_Phdr = typename ELFT::Phdr; 269 270 // Determine if the symbol is read-only by scanning the DSO's program headers. 271 const auto &file = cast<SharedFile>(*ss.file); 272 for (const Elf_Phdr &phdr : 273 check(file.template getObj<ELFT>().program_headers())) 274 if ((phdr.p_type == ELF::PT_LOAD || phdr.p_type == ELF::PT_GNU_RELRO) && 275 !(phdr.p_flags & ELF::PF_W) && ss.value >= phdr.p_vaddr && 276 ss.value < phdr.p_vaddr + phdr.p_memsz) 277 return true; 278 return false; 279 } 280 281 // Returns symbols at the same offset as a given symbol, including SS itself. 282 // 283 // If two or more symbols are at the same offset, and at least one of 284 // them are copied by a copy relocation, all of them need to be copied. 285 // Otherwise, they would refer to different places at runtime. 286 template <class ELFT> 287 static SmallSet<SharedSymbol *, 4> getSymbolsAt(SharedSymbol &ss) { 288 using Elf_Sym = typename ELFT::Sym; 289 290 const auto &file = cast<SharedFile>(*ss.file); 291 292 SmallSet<SharedSymbol *, 4> ret; 293 for (const Elf_Sym &s : file.template getGlobalELFSyms<ELFT>()) { 294 if (s.st_shndx == SHN_UNDEF || s.st_shndx == SHN_ABS || 295 s.getType() == STT_TLS || s.st_value != ss.value) 296 continue; 297 StringRef name = check(s.getName(file.getStringTable())); 298 Symbol *sym = symtab.find(name); 299 if (auto *alias = dyn_cast_or_null<SharedSymbol>(sym)) 300 ret.insert(alias); 301 } 302 303 // The loop does not check SHT_GNU_verneed, so ret does not contain 304 // non-default version symbols. If ss has a non-default version, ret won't 305 // contain ss. Just add ss unconditionally. If a non-default version alias is 306 // separately copy relocated, it and ss will have different addresses. 307 // Fortunately this case is impractical and fails with GNU ld as well. 308 ret.insert(&ss); 309 return ret; 310 } 311 312 // When a symbol is copy relocated or we create a canonical plt entry, it is 313 // effectively a defined symbol. In the case of copy relocation the symbol is 314 // in .bss and in the case of a canonical plt entry it is in .plt. This function 315 // replaces the existing symbol with a Defined pointing to the appropriate 316 // location. 317 static void replaceWithDefined(Symbol &sym, SectionBase &sec, uint64_t value, 318 uint64_t size) { 319 Symbol old = sym; 320 Defined(sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value, 321 size, &sec) 322 .overwrite(sym); 323 324 sym.versionId = old.versionId; 325 sym.exportDynamic = true; 326 sym.isUsedInRegularObj = true; 327 // A copy relocated alias may need a GOT entry. 328 sym.flags.store(old.flags.load(std::memory_order_relaxed) & NEEDS_GOT, 329 std::memory_order_relaxed); 330 } 331 332 // Reserve space in .bss or .bss.rel.ro for copy relocation. 333 // 334 // The copy relocation is pretty much a hack. If you use a copy relocation 335 // in your program, not only the symbol name but the symbol's size, RW/RO 336 // bit and alignment become part of the ABI. In addition to that, if the 337 // symbol has aliases, the aliases become part of the ABI. That's subtle, 338 // but if you violate that implicit ABI, that can cause very counter- 339 // intuitive consequences. 340 // 341 // So, what is the copy relocation? It's for linking non-position 342 // independent code to DSOs. In an ideal world, all references to data 343 // exported by DSOs should go indirectly through GOT. But if object files 344 // are compiled as non-PIC, all data references are direct. There is no 345 // way for the linker to transform the code to use GOT, as machine 346 // instructions are already set in stone in object files. This is where 347 // the copy relocation takes a role. 348 // 349 // A copy relocation instructs the dynamic linker to copy data from a DSO 350 // to a specified address (which is usually in .bss) at load-time. If the 351 // static linker (that's us) finds a direct data reference to a DSO 352 // symbol, it creates a copy relocation, so that the symbol can be 353 // resolved as if it were in .bss rather than in a DSO. 354 // 355 // As you can see in this function, we create a copy relocation for the 356 // dynamic linker, and the relocation contains not only symbol name but 357 // various other information about the symbol. So, such attributes become a 358 // part of the ABI. 359 // 360 // Note for application developers: I can give you a piece of advice if 361 // you are writing a shared library. You probably should export only 362 // functions from your library. You shouldn't export variables. 363 // 364 // As an example what can happen when you export variables without knowing 365 // the semantics of copy relocations, assume that you have an exported 366 // variable of type T. It is an ABI-breaking change to add new members at 367 // end of T even though doing that doesn't change the layout of the 368 // existing members. That's because the space for the new members are not 369 // reserved in .bss unless you recompile the main program. That means they 370 // are likely to overlap with other data that happens to be laid out next 371 // to the variable in .bss. This kind of issue is sometimes very hard to 372 // debug. What's a solution? Instead of exporting a variable V from a DSO, 373 // define an accessor getV(). 374 template <class ELFT> static void addCopyRelSymbol(SharedSymbol &ss) { 375 // Copy relocation against zero-sized symbol doesn't make sense. 376 uint64_t symSize = ss.getSize(); 377 if (symSize == 0 || ss.alignment == 0) 378 fatal("cannot create a copy relocation for symbol " + toString(ss)); 379 380 // See if this symbol is in a read-only segment. If so, preserve the symbol's 381 // memory protection by reserving space in the .bss.rel.ro section. 382 bool isRO = isReadOnly<ELFT>(ss); 383 BssSection *sec = 384 make<BssSection>(isRO ? ".bss.rel.ro" : ".bss", symSize, ss.alignment); 385 OutputSection *osec = (isRO ? in.bssRelRo : in.bss)->getParent(); 386 387 // At this point, sectionBases has been migrated to sections. Append sec to 388 // sections. 389 if (osec->commands.empty() || 390 !isa<InputSectionDescription>(osec->commands.back())) 391 osec->commands.push_back(make<InputSectionDescription>("")); 392 auto *isd = cast<InputSectionDescription>(osec->commands.back()); 393 isd->sections.push_back(sec); 394 osec->commitSection(sec); 395 396 // Look through the DSO's dynamic symbol table for aliases and create a 397 // dynamic symbol for each one. This causes the copy relocation to correctly 398 // interpose any aliases. 399 for (SharedSymbol *sym : getSymbolsAt<ELFT>(ss)) 400 replaceWithDefined(*sym, *sec, 0, sym->size); 401 402 mainPart->relaDyn->addSymbolReloc(target->copyRel, *sec, 0, ss); 403 } 404 405 // .eh_frame sections are mergeable input sections, so their input 406 // offsets are not linearly mapped to output section. For each input 407 // offset, we need to find a section piece containing the offset and 408 // add the piece's base address to the input offset to compute the 409 // output offset. That isn't cheap. 410 // 411 // This class is to speed up the offset computation. When we process 412 // relocations, we access offsets in the monotonically increasing 413 // order. So we can optimize for that access pattern. 414 // 415 // For sections other than .eh_frame, this class doesn't do anything. 416 namespace { 417 class OffsetGetter { 418 public: 419 OffsetGetter() = default; 420 explicit OffsetGetter(InputSectionBase &sec) { 421 if (auto *eh = dyn_cast<EhInputSection>(&sec)) { 422 cies = eh->cies; 423 fdes = eh->fdes; 424 i = cies.begin(); 425 j = fdes.begin(); 426 } 427 } 428 429 // Translates offsets in input sections to offsets in output sections. 430 // Given offset must increase monotonically. We assume that Piece is 431 // sorted by inputOff. 432 uint64_t get(uint64_t off) { 433 if (cies.empty()) 434 return off; 435 436 while (j != fdes.end() && j->inputOff <= off) 437 ++j; 438 auto it = j; 439 if (j == fdes.begin() || j[-1].inputOff + j[-1].size <= off) { 440 while (i != cies.end() && i->inputOff <= off) 441 ++i; 442 if (i == cies.begin() || i[-1].inputOff + i[-1].size <= off) 443 fatal(".eh_frame: relocation is not in any piece"); 444 it = i; 445 } 446 447 // Offset -1 means that the piece is dead (i.e. garbage collected). 448 if (it[-1].outputOff == -1) 449 return -1; 450 return it[-1].outputOff + (off - it[-1].inputOff); 451 } 452 453 private: 454 ArrayRef<EhSectionPiece> cies, fdes; 455 ArrayRef<EhSectionPiece>::iterator i, j; 456 }; 457 458 // This class encapsulates states needed to scan relocations for one 459 // InputSectionBase. 460 class RelocationScanner { 461 public: 462 template <class ELFT> void scanSection(InputSectionBase &s); 463 464 private: 465 InputSectionBase *sec; 466 OffsetGetter getter; 467 468 // End of relocations, used by Mips/PPC64. 469 const void *end = nullptr; 470 471 template <class RelTy> RelType getMipsN32RelType(RelTy *&rel) const; 472 template <class ELFT, class RelTy> 473 int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const; 474 bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym, 475 uint64_t relOff) const; 476 void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym, 477 int64_t addend) const; 478 template <class ELFT, class RelTy> void scanOne(RelTy *&i); 479 template <class ELFT, class RelTy> void scan(ArrayRef<RelTy> rels); 480 }; 481 } // namespace 482 483 // MIPS has an odd notion of "paired" relocations to calculate addends. 484 // For example, if a relocation is of R_MIPS_HI16, there must be a 485 // R_MIPS_LO16 relocation after that, and an addend is calculated using 486 // the two relocations. 487 template <class ELFT, class RelTy> 488 int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr, 489 bool isLocal) const { 490 if (expr == R_MIPS_GOTREL && isLocal) 491 return sec->getFile<ELFT>()->mipsGp0; 492 493 // The ABI says that the paired relocation is used only for REL. 494 // See p. 4-17 at ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 495 if (RelTy::IsRela) 496 return 0; 497 498 RelType type = rel.getType(config->isMips64EL); 499 uint32_t pairTy = getMipsPairType(type, isLocal); 500 if (pairTy == R_MIPS_NONE) 501 return 0; 502 503 const uint8_t *buf = sec->content().data(); 504 uint32_t symIndex = rel.getSymbol(config->isMips64EL); 505 506 // To make things worse, paired relocations might not be contiguous in 507 // the relocation table, so we need to do linear search. *sigh* 508 for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri) 509 if (ri->getType(config->isMips64EL) == pairTy && 510 ri->getSymbol(config->isMips64EL) == symIndex) 511 return target->getImplicitAddend(buf + ri->r_offset, pairTy); 512 513 warn("can't find matching " + toString(pairTy) + " relocation for " + 514 toString(type)); 515 return 0; 516 } 517 518 // Custom error message if Sym is defined in a discarded section. 519 template <class ELFT> 520 static std::string maybeReportDiscarded(Undefined &sym) { 521 auto *file = dyn_cast_or_null<ObjFile<ELFT>>(sym.file); 522 if (!file || !sym.discardedSecIdx) 523 return ""; 524 ArrayRef<typename ELFT::Shdr> objSections = 525 file->template getELFShdrs<ELFT>(); 526 527 std::string msg; 528 if (sym.type == ELF::STT_SECTION) { 529 msg = "relocation refers to a discarded section: "; 530 msg += CHECK( 531 file->getObj().getSectionName(objSections[sym.discardedSecIdx]), file); 532 } else { 533 msg = "relocation refers to a symbol in a discarded section: " + 534 toString(sym); 535 } 536 msg += "\n>>> defined in " + toString(file); 537 538 Elf_Shdr_Impl<ELFT> elfSec = objSections[sym.discardedSecIdx - 1]; 539 if (elfSec.sh_type != SHT_GROUP) 540 return msg; 541 542 // If the discarded section is a COMDAT. 543 StringRef signature = file->getShtGroupSignature(objSections, elfSec); 544 if (const InputFile *prevailing = 545 symtab.comdatGroups.lookup(CachedHashStringRef(signature))) { 546 msg += "\n>>> section group signature: " + signature.str() + 547 "\n>>> prevailing definition is in " + toString(prevailing); 548 if (sym.nonPrevailing) { 549 msg += "\n>>> or the symbol in the prevailing group had STB_WEAK " 550 "binding and the symbol in a non-prevailing group had STB_GLOBAL " 551 "binding. Mixing groups with STB_WEAK and STB_GLOBAL binding " 552 "signature is not supported"; 553 } 554 } 555 return msg; 556 } 557 558 namespace { 559 // Undefined diagnostics are collected in a vector and emitted once all of 560 // them are known, so that some postprocessing on the list of undefined symbols 561 // can happen before lld emits diagnostics. 562 struct UndefinedDiag { 563 Undefined *sym; 564 struct Loc { 565 InputSectionBase *sec; 566 uint64_t offset; 567 }; 568 std::vector<Loc> locs; 569 bool isWarning; 570 }; 571 572 std::vector<UndefinedDiag> undefs; 573 std::mutex relocMutex; 574 } 575 576 // Check whether the definition name def is a mangled function name that matches 577 // the reference name ref. 578 static bool canSuggestExternCForCXX(StringRef ref, StringRef def) { 579 llvm::ItaniumPartialDemangler d; 580 std::string name = def.str(); 581 if (d.partialDemangle(name.c_str())) 582 return false; 583 char *buf = d.getFunctionName(nullptr, nullptr); 584 if (!buf) 585 return false; 586 bool ret = ref == buf; 587 free(buf); 588 return ret; 589 } 590 591 // Suggest an alternative spelling of an "undefined symbol" diagnostic. Returns 592 // the suggested symbol, which is either in the symbol table, or in the same 593 // file of sym. 594 static const Symbol *getAlternativeSpelling(const Undefined &sym, 595 std::string &pre_hint, 596 std::string &post_hint) { 597 DenseMap<StringRef, const Symbol *> map; 598 if (sym.file && sym.file->kind() == InputFile::ObjKind) { 599 auto *file = cast<ELFFileBase>(sym.file); 600 // If sym is a symbol defined in a discarded section, maybeReportDiscarded() 601 // will give an error. Don't suggest an alternative spelling. 602 if (file && sym.discardedSecIdx != 0 && 603 file->getSections()[sym.discardedSecIdx] == &InputSection::discarded) 604 return nullptr; 605 606 // Build a map of local defined symbols. 607 for (const Symbol *s : sym.file->getSymbols()) 608 if (s->isLocal() && s->isDefined() && !s->getName().empty()) 609 map.try_emplace(s->getName(), s); 610 } 611 612 auto suggest = [&](StringRef newName) -> const Symbol * { 613 // If defined locally. 614 if (const Symbol *s = map.lookup(newName)) 615 return s; 616 617 // If in the symbol table and not undefined. 618 if (const Symbol *s = symtab.find(newName)) 619 if (!s->isUndefined()) 620 return s; 621 622 return nullptr; 623 }; 624 625 // This loop enumerates all strings of Levenshtein distance 1 as typo 626 // correction candidates and suggests the one that exists as a non-undefined 627 // symbol. 628 StringRef name = sym.getName(); 629 for (size_t i = 0, e = name.size(); i != e + 1; ++i) { 630 // Insert a character before name[i]. 631 std::string newName = (name.substr(0, i) + "0" + name.substr(i)).str(); 632 for (char c = '0'; c <= 'z'; ++c) { 633 newName[i] = c; 634 if (const Symbol *s = suggest(newName)) 635 return s; 636 } 637 if (i == e) 638 break; 639 640 // Substitute name[i]. 641 newName = std::string(name); 642 for (char c = '0'; c <= 'z'; ++c) { 643 newName[i] = c; 644 if (const Symbol *s = suggest(newName)) 645 return s; 646 } 647 648 // Transpose name[i] and name[i+1]. This is of edit distance 2 but it is 649 // common. 650 if (i + 1 < e) { 651 newName[i] = name[i + 1]; 652 newName[i + 1] = name[i]; 653 if (const Symbol *s = suggest(newName)) 654 return s; 655 } 656 657 // Delete name[i]. 658 newName = (name.substr(0, i) + name.substr(i + 1)).str(); 659 if (const Symbol *s = suggest(newName)) 660 return s; 661 } 662 663 // Case mismatch, e.g. Foo vs FOO. 664 for (auto &it : map) 665 if (name.equals_insensitive(it.first)) 666 return it.second; 667 for (Symbol *sym : symtab.getSymbols()) 668 if (!sym->isUndefined() && name.equals_insensitive(sym->getName())) 669 return sym; 670 671 // The reference may be a mangled name while the definition is not. Suggest a 672 // missing extern "C". 673 if (name.starts_with("_Z")) { 674 std::string buf = name.str(); 675 llvm::ItaniumPartialDemangler d; 676 if (!d.partialDemangle(buf.c_str())) 677 if (char *buf = d.getFunctionName(nullptr, nullptr)) { 678 const Symbol *s = suggest(buf); 679 free(buf); 680 if (s) { 681 pre_hint = ": extern \"C\" "; 682 return s; 683 } 684 } 685 } else { 686 const Symbol *s = nullptr; 687 for (auto &it : map) 688 if (canSuggestExternCForCXX(name, it.first)) { 689 s = it.second; 690 break; 691 } 692 if (!s) 693 for (Symbol *sym : symtab.getSymbols()) 694 if (canSuggestExternCForCXX(name, sym->getName())) { 695 s = sym; 696 break; 697 } 698 if (s) { 699 pre_hint = " to declare "; 700 post_hint = " as extern \"C\"?"; 701 return s; 702 } 703 } 704 705 return nullptr; 706 } 707 708 static void reportUndefinedSymbol(const UndefinedDiag &undef, 709 bool correctSpelling) { 710 Undefined &sym = *undef.sym; 711 712 auto visibility = [&]() -> std::string { 713 switch (sym.visibility()) { 714 case STV_INTERNAL: 715 return "internal "; 716 case STV_HIDDEN: 717 return "hidden "; 718 case STV_PROTECTED: 719 return "protected "; 720 default: 721 return ""; 722 } 723 }; 724 725 std::string msg; 726 switch (config->ekind) { 727 case ELF32LEKind: 728 msg = maybeReportDiscarded<ELF32LE>(sym); 729 break; 730 case ELF32BEKind: 731 msg = maybeReportDiscarded<ELF32BE>(sym); 732 break; 733 case ELF64LEKind: 734 msg = maybeReportDiscarded<ELF64LE>(sym); 735 break; 736 case ELF64BEKind: 737 msg = maybeReportDiscarded<ELF64BE>(sym); 738 break; 739 default: 740 llvm_unreachable(""); 741 } 742 if (msg.empty()) 743 msg = "undefined " + visibility() + "symbol: " + toString(sym); 744 745 const size_t maxUndefReferences = 3; 746 size_t i = 0; 747 for (UndefinedDiag::Loc l : undef.locs) { 748 if (i >= maxUndefReferences) 749 break; 750 InputSectionBase &sec = *l.sec; 751 uint64_t offset = l.offset; 752 753 msg += "\n>>> referenced by "; 754 // In the absence of line number information, utilize DW_TAG_variable (if 755 // present) for the enclosing symbol (e.g. var in `int *a[] = {&undef};`). 756 Symbol *enclosing = sec.getEnclosingSymbol(offset); 757 std::string src = sec.getSrcMsg(enclosing ? *enclosing : sym, offset); 758 if (!src.empty()) 759 msg += src + "\n>>> "; 760 msg += sec.getObjMsg(offset); 761 i++; 762 } 763 764 if (i < undef.locs.size()) 765 msg += ("\n>>> referenced " + Twine(undef.locs.size() - i) + " more times") 766 .str(); 767 768 if (correctSpelling) { 769 std::string pre_hint = ": ", post_hint; 770 if (const Symbol *corrected = 771 getAlternativeSpelling(sym, pre_hint, post_hint)) { 772 msg += "\n>>> did you mean" + pre_hint + toString(*corrected) + post_hint; 773 if (corrected->file) 774 msg += "\n>>> defined in: " + toString(corrected->file); 775 } 776 } 777 778 if (sym.getName().starts_with("_ZTV")) 779 msg += 780 "\n>>> the vtable symbol may be undefined because the class is missing " 781 "its key function (see https://lld.llvm.org/missingkeyfunction)"; 782 if (config->gcSections && config->zStartStopGC && 783 sym.getName().starts_with("__start_")) { 784 msg += "\n>>> the encapsulation symbol needs to be retained under " 785 "--gc-sections properly; consider -z nostart-stop-gc " 786 "(see https://lld.llvm.org/ELF/start-stop-gc)"; 787 } 788 789 if (undef.isWarning) 790 warn(msg); 791 else 792 error(msg, ErrorTag::SymbolNotFound, {sym.getName()}); 793 } 794 795 void elf::reportUndefinedSymbols() { 796 // Find the first "undefined symbol" diagnostic for each diagnostic, and 797 // collect all "referenced from" lines at the first diagnostic. 798 DenseMap<Symbol *, UndefinedDiag *> firstRef; 799 for (UndefinedDiag &undef : undefs) { 800 assert(undef.locs.size() == 1); 801 if (UndefinedDiag *canon = firstRef.lookup(undef.sym)) { 802 canon->locs.push_back(undef.locs[0]); 803 undef.locs.clear(); 804 } else 805 firstRef[undef.sym] = &undef; 806 } 807 808 // Enable spell corrector for the first 2 diagnostics. 809 for (const auto &[i, undef] : llvm::enumerate(undefs)) 810 if (!undef.locs.empty()) 811 reportUndefinedSymbol(undef, i < 2); 812 undefs.clear(); 813 } 814 815 // Report an undefined symbol if necessary. 816 // Returns true if the undefined symbol will produce an error message. 817 static bool maybeReportUndefined(Undefined &sym, InputSectionBase &sec, 818 uint64_t offset) { 819 std::lock_guard<std::mutex> lock(relocMutex); 820 // If versioned, issue an error (even if the symbol is weak) because we don't 821 // know the defining filename which is required to construct a Verneed entry. 822 if (sym.hasVersionSuffix) { 823 undefs.push_back({&sym, {{&sec, offset}}, false}); 824 return true; 825 } 826 if (sym.isWeak()) 827 return false; 828 829 bool canBeExternal = !sym.isLocal() && sym.visibility() == STV_DEFAULT; 830 if (config->unresolvedSymbols == UnresolvedPolicy::Ignore && canBeExternal) 831 return false; 832 833 // clang (as of 2019-06-12) / gcc (as of 8.2.1) PPC64 may emit a .rela.toc 834 // which references a switch table in a discarded .rodata/.text section. The 835 // .toc and the .rela.toc are incorrectly not placed in the comdat. The ELF 836 // spec says references from outside the group to a STB_LOCAL symbol are not 837 // allowed. Work around the bug. 838 // 839 // PPC32 .got2 is similar but cannot be fixed. Multiple .got2 is infeasible 840 // because .LC0-.LTOC is not representable if the two labels are in different 841 // .got2 842 if (sym.discardedSecIdx != 0 && (sec.name == ".got2" || sec.name == ".toc")) 843 return false; 844 845 bool isWarning = 846 (config->unresolvedSymbols == UnresolvedPolicy::Warn && canBeExternal) || 847 config->noinhibitExec; 848 undefs.push_back({&sym, {{&sec, offset}}, isWarning}); 849 return !isWarning; 850 } 851 852 // MIPS N32 ABI treats series of successive relocations with the same offset 853 // as a single relocation. The similar approach used by N64 ABI, but this ABI 854 // packs all relocations into the single relocation record. Here we emulate 855 // this for the N32 ABI. Iterate over relocation with the same offset and put 856 // theirs types into the single bit-set. 857 template <class RelTy> 858 RelType RelocationScanner::getMipsN32RelType(RelTy *&rel) const { 859 RelType type = 0; 860 uint64_t offset = rel->r_offset; 861 862 int n = 0; 863 while (rel != static_cast<const RelTy *>(end) && rel->r_offset == offset) 864 type |= (rel++)->getType(config->isMips64EL) << (8 * n++); 865 return type; 866 } 867 868 template <bool shard = false> 869 static void addRelativeReloc(InputSectionBase &isec, uint64_t offsetInSec, 870 Symbol &sym, int64_t addend, RelExpr expr, 871 RelType type) { 872 Partition &part = isec.getPartition(); 873 874 if (sym.isTagged()) { 875 std::lock_guard<std::mutex> lock(relocMutex); 876 part.relaDyn->addRelativeReloc(target->relativeRel, isec, offsetInSec, sym, 877 addend, type, expr); 878 // With MTE globals, we always want to derive the address tag by `ldg`-ing 879 // the symbol. When we have a RELATIVE relocation though, we no longer have 880 // a reference to the symbol. Because of this, when we have an addend that 881 // puts the result of the RELATIVE relocation out-of-bounds of the symbol 882 // (e.g. the addend is outside of [0, sym.getSize()]), the AArch64 MemtagABI 883 // says we should store the offset to the start of the symbol in the target 884 // field. This is described in further detail in: 885 // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#841extended-semantics-of-r_aarch64_relative 886 if (addend < 0 || static_cast<uint64_t>(addend) >= sym.getSize()) 887 isec.relocations.push_back({expr, type, offsetInSec, addend, &sym}); 888 return; 889 } 890 891 // Add a relative relocation. If relrDyn section is enabled, and the 892 // relocation offset is guaranteed to be even, add the relocation to 893 // the relrDyn section, otherwise add it to the relaDyn section. 894 // relrDyn sections don't support odd offsets. Also, relrDyn sections 895 // don't store the addend values, so we must write it to the relocated 896 // address. 897 if (part.relrDyn && isec.addralign >= 2 && offsetInSec % 2 == 0) { 898 isec.addReloc({expr, type, offsetInSec, addend, &sym}); 899 if (shard) 900 part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back( 901 {&isec, offsetInSec}); 902 else 903 part.relrDyn->relocs.push_back({&isec, offsetInSec}); 904 return; 905 } 906 part.relaDyn->addRelativeReloc<shard>(target->relativeRel, isec, offsetInSec, 907 sym, addend, type, expr); 908 } 909 910 template <class PltSection, class GotPltSection> 911 static void addPltEntry(PltSection &plt, GotPltSection &gotPlt, 912 RelocationBaseSection &rel, RelType type, Symbol &sym) { 913 plt.addEntry(sym); 914 gotPlt.addEntry(sym); 915 rel.addReloc({type, &gotPlt, sym.getGotPltOffset(), 916 sym.isPreemptible ? DynamicReloc::AgainstSymbol 917 : DynamicReloc::AddendOnlyWithTargetVA, 918 sym, 0, R_ABS}); 919 } 920 921 void elf::addGotEntry(Symbol &sym) { 922 in.got->addEntry(sym); 923 uint64_t off = sym.getGotOffset(); 924 925 // If preemptible, emit a GLOB_DAT relocation. 926 if (sym.isPreemptible) { 927 mainPart->relaDyn->addReloc({target->gotRel, in.got.get(), off, 928 DynamicReloc::AgainstSymbol, sym, 0, R_ABS}); 929 return; 930 } 931 932 // Otherwise, the value is either a link-time constant or the load base 933 // plus a constant. 934 if (!config->isPic || isAbsolute(sym)) 935 in.got->addConstant({R_ABS, target->symbolicRel, off, 0, &sym}); 936 else 937 addRelativeReloc(*in.got, off, sym, 0, R_ABS, target->symbolicRel); 938 } 939 940 static void addTpOffsetGotEntry(Symbol &sym) { 941 in.got->addEntry(sym); 942 uint64_t off = sym.getGotOffset(); 943 if (!sym.isPreemptible && !config->shared) { 944 in.got->addConstant({R_TPREL, target->symbolicRel, off, 0, &sym}); 945 return; 946 } 947 mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( 948 target->tlsGotRel, *in.got, off, sym, target->symbolicRel); 949 } 950 951 // Return true if we can define a symbol in the executable that 952 // contains the value/function of a symbol defined in a shared 953 // library. 954 static bool canDefineSymbolInExecutable(Symbol &sym) { 955 // If the symbol has default visibility the symbol defined in the 956 // executable will preempt it. 957 // Note that we want the visibility of the shared symbol itself, not 958 // the visibility of the symbol in the output file we are producing. 959 if (!sym.dsoProtected) 960 return true; 961 962 // If we are allowed to break address equality of functions, defining 963 // a plt entry will allow the program to call the function in the 964 // .so, but the .so and the executable will no agree on the address 965 // of the function. Similar logic for objects. 966 return ((sym.isFunc() && config->ignoreFunctionAddressEquality) || 967 (sym.isObject() && config->ignoreDataAddressEquality)); 968 } 969 970 // Returns true if a given relocation can be computed at link-time. 971 // This only handles relocation types expected in processAux. 972 // 973 // For instance, we know the offset from a relocation to its target at 974 // link-time if the relocation is PC-relative and refers a 975 // non-interposable function in the same executable. This function 976 // will return true for such relocation. 977 // 978 // If this function returns false, that means we need to emit a 979 // dynamic relocation so that the relocation will be fixed at load-time. 980 bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type, 981 const Symbol &sym, 982 uint64_t relOff) const { 983 // These expressions always compute a constant 984 if (oneof<R_GOTPLT, R_GOT_OFF, R_RELAX_HINT, R_MIPS_GOT_LOCAL_PAGE, 985 R_MIPS_GOTREL, R_MIPS_GOT_OFF, R_MIPS_GOT_OFF32, R_MIPS_GOT_GP_PC, 986 R_AARCH64_GOT_PAGE_PC, R_GOT_PC, R_GOTONLY_PC, R_GOTPLTONLY_PC, 987 R_PLT_PC, R_PLT_GOTREL, R_PLT_GOTPLT, R_GOTPLT_GOTREL, R_GOTPLT_PC, 988 R_PPC32_PLTREL, R_PPC64_CALL_PLT, R_PPC64_RELAX_TOC, R_RISCV_ADD, 989 R_AARCH64_GOT_PAGE, R_LOONGARCH_PLT_PAGE_PC, R_LOONGARCH_GOT, 990 R_LOONGARCH_GOT_PAGE_PC>(e)) 991 return true; 992 993 // These never do, except if the entire file is position dependent or if 994 // only the low bits are used. 995 if (e == R_GOT || e == R_PLT) 996 return target->usesOnlyLowPageBits(type) || !config->isPic; 997 998 if (sym.isPreemptible) 999 return false; 1000 if (!config->isPic) 1001 return true; 1002 1003 // Constant when referencing a non-preemptible symbol. 1004 if (e == R_SIZE || e == R_RISCV_LEB128) 1005 return true; 1006 1007 // For the target and the relocation, we want to know if they are 1008 // absolute or relative. 1009 bool absVal = isAbsoluteValue(sym); 1010 bool relE = isRelExpr(e); 1011 if (absVal && !relE) 1012 return true; 1013 if (!absVal && relE) 1014 return true; 1015 if (!absVal && !relE) 1016 return target->usesOnlyLowPageBits(type); 1017 1018 assert(absVal && relE); 1019 1020 // Allow R_PLT_PC (optimized to R_PC here) to a hidden undefined weak symbol 1021 // in PIC mode. This is a little strange, but it allows us to link function 1022 // calls to such symbols (e.g. glibc/stdlib/exit.c:__run_exit_handlers). 1023 // Normally such a call will be guarded with a comparison, which will load a 1024 // zero from the GOT. 1025 if (sym.isUndefWeak()) 1026 return true; 1027 1028 // We set the final symbols values for linker script defined symbols later. 1029 // They always can be computed as a link time constant. 1030 if (sym.scriptDefined) 1031 return true; 1032 1033 error("relocation " + toString(type) + " cannot refer to absolute symbol: " + 1034 toString(sym) + getLocation(*sec, sym, relOff)); 1035 return true; 1036 } 1037 1038 // The reason we have to do this early scan is as follows 1039 // * To mmap the output file, we need to know the size 1040 // * For that, we need to know how many dynamic relocs we will have. 1041 // It might be possible to avoid this by outputting the file with write: 1042 // * Write the allocated output sections, computing addresses. 1043 // * Apply relocations, recording which ones require a dynamic reloc. 1044 // * Write the dynamic relocations. 1045 // * Write the rest of the file. 1046 // This would have some drawbacks. For example, we would only know if .rela.dyn 1047 // is needed after applying relocations. If it is, it will go after rw and rx 1048 // sections. Given that it is ro, we will need an extra PT_LOAD. This 1049 // complicates things for the dynamic linker and means we would have to reserve 1050 // space for the extra PT_LOAD even if we end up not using it. 1051 void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, 1052 Symbol &sym, int64_t addend) const { 1053 // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT 1054 // indirection. 1055 const bool isIfunc = sym.isGnuIFunc(); 1056 if (!sym.isPreemptible && (!isIfunc || config->zIfuncNoplt)) { 1057 if (expr != R_GOT_PC) { 1058 // The 0x8000 bit of r_addend of R_PPC_PLTREL24 is used to choose call 1059 // stub type. It should be ignored if optimized to R_PC. 1060 if (config->emachine == EM_PPC && expr == R_PPC32_PLTREL) 1061 addend &= ~0x8000; 1062 // R_HEX_GD_PLT_B22_PCREL (call a@GDPLT) is transformed into 1063 // call __tls_get_addr even if the symbol is non-preemptible. 1064 if (!(config->emachine == EM_HEXAGON && 1065 (type == R_HEX_GD_PLT_B22_PCREL || 1066 type == R_HEX_GD_PLT_B22_PCREL_X || 1067 type == R_HEX_GD_PLT_B32_PCREL_X))) 1068 expr = fromPlt(expr); 1069 } else if (!isAbsoluteValue(sym)) { 1070 expr = 1071 target->adjustGotPcExpr(type, addend, sec->content().data() + offset); 1072 // If the target adjusted the expression to R_RELAX_GOT_PC, we may end up 1073 // needing the GOT if we can't relax everything. 1074 if (expr == R_RELAX_GOT_PC) 1075 in.got->hasGotOffRel.store(true, std::memory_order_relaxed); 1076 } 1077 } 1078 1079 // We were asked not to generate PLT entries for ifuncs. Instead, pass the 1080 // direct relocation on through. 1081 if (LLVM_UNLIKELY(isIfunc) && config->zIfuncNoplt) { 1082 std::lock_guard<std::mutex> lock(relocMutex); 1083 sym.exportDynamic = true; 1084 mainPart->relaDyn->addSymbolReloc(type, *sec, offset, sym, addend, type); 1085 return; 1086 } 1087 1088 if (needsGot(expr)) { 1089 if (config->emachine == EM_MIPS) { 1090 // MIPS ABI has special rules to process GOT entries and doesn't 1091 // require relocation entries for them. A special case is TLS 1092 // relocations. In that case dynamic loader applies dynamic 1093 // relocations to initialize TLS GOT entries. 1094 // See "Global Offset Table" in Chapter 5 in the following document 1095 // for detailed description: 1096 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf 1097 in.mipsGot->addEntry(*sec->file, sym, addend, expr); 1098 } else if (!sym.isTls() || config->emachine != EM_LOONGARCH) { 1099 // Many LoongArch TLS relocs reuse the R_LOONGARCH_GOT type, in which 1100 // case the NEEDS_GOT flag shouldn't get set. 1101 sym.setFlags(NEEDS_GOT); 1102 } 1103 } else if (needsPlt(expr)) { 1104 sym.setFlags(NEEDS_PLT); 1105 } else if (LLVM_UNLIKELY(isIfunc)) { 1106 sym.setFlags(HAS_DIRECT_RELOC); 1107 } 1108 1109 // If the relocation is known to be a link-time constant, we know no dynamic 1110 // relocation will be created, pass the control to relocateAlloc() or 1111 // relocateNonAlloc() to resolve it. 1112 // 1113 // The behavior of an undefined weak reference is implementation defined. For 1114 // non-link-time constants, we resolve relocations statically (let 1115 // relocate{,Non}Alloc() resolve them) for -no-pie and try producing dynamic 1116 // relocations for -pie and -shared. 1117 // 1118 // The general expectation of -no-pie static linking is that there is no 1119 // dynamic relocation (except IRELATIVE). Emitting dynamic relocations for 1120 // -shared matches the spirit of its -z undefs default. -pie has freedom on 1121 // choices, and we choose dynamic relocations to be consistent with the 1122 // handling of GOT-generating relocations. 1123 if (isStaticLinkTimeConstant(expr, type, sym, offset) || 1124 (!config->isPic && sym.isUndefWeak())) { 1125 sec->addReloc({expr, type, offset, addend, &sym}); 1126 return; 1127 } 1128 1129 // Use a simple -z notext rule that treats all sections except .eh_frame as 1130 // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our 1131 // SectionBase::getOffset would incorrectly adjust the offset). 1132 // 1133 // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel 1134 // conversion. We still emit a dynamic relocation. 1135 bool canWrite = (sec->flags & SHF_WRITE) || 1136 !(config->zText || 1137 (isa<EhInputSection>(sec) && config->emachine != EM_MIPS)); 1138 if (canWrite) { 1139 RelType rel = target->getDynRel(type); 1140 if (oneof<R_GOT, R_LOONGARCH_GOT>(expr) || 1141 (rel == target->symbolicRel && !sym.isPreemptible)) { 1142 addRelativeReloc<true>(*sec, offset, sym, addend, expr, type); 1143 return; 1144 } else if (rel != 0) { 1145 if (config->emachine == EM_MIPS && rel == target->symbolicRel) 1146 rel = target->relativeRel; 1147 std::lock_guard<std::mutex> lock(relocMutex); 1148 sec->getPartition().relaDyn->addSymbolReloc(rel, *sec, offset, sym, 1149 addend, type); 1150 1151 // MIPS ABI turns using of GOT and dynamic relocations inside out. 1152 // While regular ABI uses dynamic relocations to fill up GOT entries 1153 // MIPS ABI requires dynamic linker to fills up GOT entries using 1154 // specially sorted dynamic symbol table. This affects even dynamic 1155 // relocations against symbols which do not require GOT entries 1156 // creation explicitly, i.e. do not have any GOT-relocations. So if 1157 // a preemptible symbol has a dynamic relocation we anyway have 1158 // to create a GOT entry for it. 1159 // If a non-preemptible symbol has a dynamic relocation against it, 1160 // dynamic linker takes it st_value, adds offset and writes down 1161 // result of the dynamic relocation. In case of preemptible symbol 1162 // dynamic linker performs symbol resolution, writes the symbol value 1163 // to the GOT entry and reads the GOT entry when it needs to perform 1164 // a dynamic relocation. 1165 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19 1166 if (config->emachine == EM_MIPS) 1167 in.mipsGot->addEntry(*sec->file, sym, addend, expr); 1168 return; 1169 } 1170 } 1171 1172 // When producing an executable, we can perform copy relocations (for 1173 // STT_OBJECT) and canonical PLT (for STT_FUNC) if sym is defined by a DSO. 1174 if (!config->shared && sym.isShared()) { 1175 if (!canDefineSymbolInExecutable(sym)) { 1176 errorOrWarn("cannot preempt symbol: " + toString(sym) + 1177 getLocation(*sec, sym, offset)); 1178 return; 1179 } 1180 1181 if (sym.isObject()) { 1182 // Produce a copy relocation. 1183 if (auto *ss = dyn_cast<SharedSymbol>(&sym)) { 1184 if (!config->zCopyreloc) 1185 error("unresolvable relocation " + toString(type) + 1186 " against symbol '" + toString(*ss) + 1187 "'; recompile with -fPIC or remove '-z nocopyreloc'" + 1188 getLocation(*sec, sym, offset)); 1189 sym.setFlags(NEEDS_COPY); 1190 } 1191 sec->addReloc({expr, type, offset, addend, &sym}); 1192 return; 1193 } 1194 1195 // This handles a non PIC program call to function in a shared library. In 1196 // an ideal world, we could just report an error saying the relocation can 1197 // overflow at runtime. In the real world with glibc, crt1.o has a 1198 // R_X86_64_PC32 pointing to libc.so. 1199 // 1200 // The general idea on how to handle such cases is to create a PLT entry and 1201 // use that as the function value. 1202 // 1203 // For the static linking part, we just return a plt expr and everything 1204 // else will use the PLT entry as the address. 1205 // 1206 // The remaining problem is making sure pointer equality still works. We 1207 // need the help of the dynamic linker for that. We let it know that we have 1208 // a direct reference to a so symbol by creating an undefined symbol with a 1209 // non zero st_value. Seeing that, the dynamic linker resolves the symbol to 1210 // the value of the symbol we created. This is true even for got entries, so 1211 // pointer equality is maintained. To avoid an infinite loop, the only entry 1212 // that points to the real function is a dedicated got entry used by the 1213 // plt. That is identified by special relocation types (R_X86_64_JUMP_SLOT, 1214 // R_386_JMP_SLOT, etc). 1215 1216 // For position independent executable on i386, the plt entry requires ebx 1217 // to be set. This causes two problems: 1218 // * If some code has a direct reference to a function, it was probably 1219 // compiled without -fPIE/-fPIC and doesn't maintain ebx. 1220 // * If a library definition gets preempted to the executable, it will have 1221 // the wrong ebx value. 1222 if (sym.isFunc()) { 1223 if (config->pie && config->emachine == EM_386) 1224 errorOrWarn("symbol '" + toString(sym) + 1225 "' cannot be preempted; recompile with -fPIE" + 1226 getLocation(*sec, sym, offset)); 1227 sym.setFlags(NEEDS_COPY | NEEDS_PLT); 1228 sec->addReloc({expr, type, offset, addend, &sym}); 1229 return; 1230 } 1231 } 1232 1233 errorOrWarn("relocation " + toString(type) + " cannot be used against " + 1234 (sym.getName().empty() ? "local symbol" 1235 : "symbol '" + toString(sym) + "'") + 1236 "; recompile with -fPIC" + getLocation(*sec, sym, offset)); 1237 } 1238 1239 // This function is similar to the `handleTlsRelocation`. MIPS does not 1240 // support any relaxations for TLS relocations so by factoring out MIPS 1241 // handling in to the separate function we can simplify the code and do not 1242 // pollute other `handleTlsRelocation` by MIPS `ifs` statements. 1243 // Mips has a custom MipsGotSection that handles the writing of GOT entries 1244 // without dynamic relocations. 1245 static unsigned handleMipsTlsRelocation(RelType type, Symbol &sym, 1246 InputSectionBase &c, uint64_t offset, 1247 int64_t addend, RelExpr expr) { 1248 if (expr == R_MIPS_TLSLD) { 1249 in.mipsGot->addTlsIndex(*c.file); 1250 c.addReloc({expr, type, offset, addend, &sym}); 1251 return 1; 1252 } 1253 if (expr == R_MIPS_TLSGD) { 1254 in.mipsGot->addDynTlsEntry(*c.file, sym); 1255 c.addReloc({expr, type, offset, addend, &sym}); 1256 return 1; 1257 } 1258 return 0; 1259 } 1260 1261 // Notes about General Dynamic and Local Dynamic TLS models below. They may 1262 // require the generation of a pair of GOT entries that have associated dynamic 1263 // relocations. The pair of GOT entries created are of the form GOT[e0] Module 1264 // Index (Used to find pointer to TLS block at run-time) GOT[e1] Offset of 1265 // symbol in TLS block. 1266 // 1267 // Returns the number of relocations processed. 1268 static unsigned handleTlsRelocation(RelType type, Symbol &sym, 1269 InputSectionBase &c, uint64_t offset, 1270 int64_t addend, RelExpr expr) { 1271 if (expr == R_TPREL || expr == R_TPREL_NEG) { 1272 if (config->shared) { 1273 errorOrWarn("relocation " + toString(type) + " against " + toString(sym) + 1274 " cannot be used with -shared" + getLocation(c, sym, offset)); 1275 return 1; 1276 } 1277 return 0; 1278 } 1279 1280 if (config->emachine == EM_MIPS) 1281 return handleMipsTlsRelocation(type, sym, c, offset, addend, expr); 1282 bool isRISCV = config->emachine == EM_RISCV; 1283 1284 if (oneof<R_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, 1285 R_TLSDESC_GOTPLT>(expr) && 1286 config->shared) { 1287 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a label. Do not 1288 // set NEEDS_TLSDESC on the label. 1289 if (expr != R_TLSDESC_CALL) { 1290 if (!isRISCV || type == R_RISCV_TLSDESC_HI20) 1291 sym.setFlags(NEEDS_TLSDESC); 1292 c.addReloc({expr, type, offset, addend, &sym}); 1293 } 1294 return 1; 1295 } 1296 1297 // ARM, Hexagon, LoongArch and RISC-V do not support GD/LD to IE/LE 1298 // optimizations. 1299 // RISC-V supports TLSDESC to IE/LE optimizations. 1300 // For PPC64, if the file has missing R_PPC64_TLSGD/R_PPC64_TLSLD, disable 1301 // optimization as well. 1302 bool execOptimize = 1303 !config->shared && config->emachine != EM_ARM && 1304 config->emachine != EM_HEXAGON && config->emachine != EM_LOONGARCH && 1305 !(isRISCV && expr != R_TLSDESC_PC && expr != R_TLSDESC_CALL) && 1306 !c.file->ppc64DisableTLSRelax; 1307 1308 // If we are producing an executable and the symbol is non-preemptable, it 1309 // must be defined and the code sequence can be optimized to use Local-Exec. 1310 // 1311 // ARM and RISC-V do not support any relaxations for TLS relocations, however, 1312 // we can omit the DTPMOD dynamic relocations and resolve them at link time 1313 // because them are always 1. This may be necessary for static linking as 1314 // DTPMOD may not be expected at load time. 1315 bool isLocalInExecutable = !sym.isPreemptible && !config->shared; 1316 1317 // Local Dynamic is for access to module local TLS variables, while still 1318 // being suitable for being dynamically loaded via dlopen. GOT[e0] is the 1319 // module index, with a special value of 0 for the current module. GOT[e1] is 1320 // unused. There only needs to be one module index entry. 1321 if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) { 1322 // Local-Dynamic relocs can be optimized to Local-Exec. 1323 if (execOptimize) { 1324 c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), type, 1325 offset, addend, &sym}); 1326 return target->getTlsGdRelaxSkip(type); 1327 } 1328 if (expr == R_TLSLD_HINT) 1329 return 1; 1330 ctx.needsTlsLd.store(true, std::memory_order_relaxed); 1331 c.addReloc({expr, type, offset, addend, &sym}); 1332 return 1; 1333 } 1334 1335 // Local-Dynamic relocs can be optimized to Local-Exec. 1336 if (expr == R_DTPREL) { 1337 if (execOptimize) 1338 expr = target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE); 1339 c.addReloc({expr, type, offset, addend, &sym}); 1340 return 1; 1341 } 1342 1343 // Local-Dynamic sequence where offset of tls variable relative to dynamic 1344 // thread pointer is stored in the got. This cannot be optimized to 1345 // Local-Exec. 1346 if (expr == R_TLSLD_GOT_OFF) { 1347 sym.setFlags(NEEDS_GOT_DTPREL); 1348 c.addReloc({expr, type, offset, addend, &sym}); 1349 return 1; 1350 } 1351 1352 if (oneof<R_AARCH64_TLSDESC_PAGE, R_TLSDESC, R_TLSDESC_CALL, R_TLSDESC_PC, 1353 R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC, 1354 R_LOONGARCH_TLSGD_PAGE_PC>(expr)) { 1355 if (!execOptimize) { 1356 sym.setFlags(NEEDS_TLSGD); 1357 c.addReloc({expr, type, offset, addend, &sym}); 1358 return 1; 1359 } 1360 1361 // Global-Dynamic/TLSDESC can be optimized to Initial-Exec or Local-Exec 1362 // depending on the symbol being locally defined or not. 1363 // 1364 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12_I,CALL} reference a non-preemptible 1365 // label, so the LE optimization will be categorized as 1366 // R_RELAX_TLS_GD_TO_LE. We fix the categorization in RISCV::relocateAlloc. 1367 if (sym.isPreemptible) { 1368 sym.setFlags(NEEDS_TLSGD_TO_IE); 1369 c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), type, 1370 offset, addend, &sym}); 1371 } else { 1372 c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), type, 1373 offset, addend, &sym}); 1374 } 1375 return target->getTlsGdRelaxSkip(type); 1376 } 1377 1378 if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, R_AARCH64_GOT_PAGE_PC, 1379 R_LOONGARCH_GOT_PAGE_PC, R_GOT_OFF, R_TLSIE_HINT>(expr)) { 1380 ctx.hasTlsIe.store(true, std::memory_order_relaxed); 1381 // Initial-Exec relocs can be optimized to Local-Exec if the symbol is 1382 // locally defined. This is not supported on SystemZ. 1383 if (execOptimize && isLocalInExecutable && config->emachine != EM_S390) { 1384 c.addReloc({R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym}); 1385 } else if (expr != R_TLSIE_HINT) { 1386 sym.setFlags(NEEDS_TLSIE); 1387 // R_GOT needs a relative relocation for PIC on i386 and Hexagon. 1388 if (expr == R_GOT && config->isPic && !target->usesOnlyLowPageBits(type)) 1389 addRelativeReloc<true>(c, offset, sym, addend, expr, type); 1390 else 1391 c.addReloc({expr, type, offset, addend, &sym}); 1392 } 1393 return 1; 1394 } 1395 1396 return 0; 1397 } 1398 1399 template <class ELFT, class RelTy> void RelocationScanner::scanOne(RelTy *&i) { 1400 const RelTy &rel = *i; 1401 uint32_t symIndex = rel.getSymbol(config->isMips64EL); 1402 Symbol &sym = sec->getFile<ELFT>()->getSymbol(symIndex); 1403 RelType type; 1404 if (config->mipsN32Abi) { 1405 type = getMipsN32RelType(i); 1406 } else { 1407 type = rel.getType(config->isMips64EL); 1408 ++i; 1409 } 1410 // Get an offset in an output section this relocation is applied to. 1411 uint64_t offset = getter.get(rel.r_offset); 1412 if (offset == uint64_t(-1)) 1413 return; 1414 1415 RelExpr expr = target->getRelExpr(type, sym, sec->content().data() + offset); 1416 int64_t addend = RelTy::IsRela 1417 ? getAddend<ELFT>(rel) 1418 : target->getImplicitAddend( 1419 sec->content().data() + rel.r_offset, type); 1420 if (LLVM_UNLIKELY(config->emachine == EM_MIPS)) 1421 addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal()); 1422 else if (config->emachine == EM_PPC64 && config->isPic && type == R_PPC64_TOC) 1423 addend += getPPC64TocBase(); 1424 1425 // Ignore R_*_NONE and other marker relocations. 1426 if (expr == R_NONE) 1427 return; 1428 1429 // Error if the target symbol is undefined. Symbol index 0 may be used by 1430 // marker relocations, e.g. R_*_NONE and R_ARM_V4BX. Don't error on them. 1431 if (sym.isUndefined() && symIndex != 0 && 1432 maybeReportUndefined(cast<Undefined>(sym), *sec, offset)) 1433 return; 1434 1435 if (config->emachine == EM_PPC64) { 1436 // We can separate the small code model relocations into 2 categories: 1437 // 1) Those that access the compiler generated .toc sections. 1438 // 2) Those that access the linker allocated got entries. 1439 // lld allocates got entries to symbols on demand. Since we don't try to 1440 // sort the got entries in any way, we don't have to track which objects 1441 // have got-based small code model relocs. The .toc sections get placed 1442 // after the end of the linker allocated .got section and we do sort those 1443 // so sections addressed with small code model relocations come first. 1444 if (type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS) 1445 sec->file->ppc64SmallCodeModelTocRelocs = true; 1446 1447 // Record the TOC entry (.toc + addend) as not relaxable. See the comment in 1448 // InputSectionBase::relocateAlloc(). 1449 if (type == R_PPC64_TOC16_LO && sym.isSection() && isa<Defined>(sym) && 1450 cast<Defined>(sym).section->name == ".toc") 1451 ppc64noTocRelax.insert({&sym, addend}); 1452 1453 if ((type == R_PPC64_TLSGD && expr == R_TLSDESC_CALL) || 1454 (type == R_PPC64_TLSLD && expr == R_TLSLD_HINT)) { 1455 if (i == end) { 1456 errorOrWarn("R_PPC64_TLSGD/R_PPC64_TLSLD may not be the last " 1457 "relocation" + 1458 getLocation(*sec, sym, offset)); 1459 return; 1460 } 1461 1462 // Offset the 4-byte aligned R_PPC64_TLSGD by one byte in the NOTOC case, 1463 // so we can discern it later from the toc-case. 1464 if (i->getType(/*isMips64EL=*/false) == R_PPC64_REL24_NOTOC) 1465 ++offset; 1466 } 1467 } 1468 1469 // If the relocation does not emit a GOT or GOTPLT entry but its computation 1470 // uses their addresses, we need GOT or GOTPLT to be created. 1471 // 1472 // The 5 types that relative GOTPLT are all x86 and x86-64 specific. 1473 if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT, 1474 R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) { 1475 in.gotPlt->hasGotPltOffRel.store(true, std::memory_order_relaxed); 1476 } else if (oneof<R_GOTONLY_PC, R_GOTREL, R_PPC32_PLTREL, R_PPC64_TOCBASE, 1477 R_PPC64_RELAX_TOC>(expr)) { 1478 in.got->hasGotOffRel.store(true, std::memory_order_relaxed); 1479 } 1480 1481 // Process TLS relocations, including TLS optimizations. Note that 1482 // R_TPREL and R_TPREL_NEG relocations are resolved in processAux. 1483 // 1484 // Some RISCV TLSDESC relocations reference a local NOTYPE symbol, 1485 // but we need to process them in handleTlsRelocation. 1486 if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) { 1487 if (unsigned processed = 1488 handleTlsRelocation(type, sym, *sec, offset, addend, expr)) { 1489 i += processed - 1; 1490 return; 1491 } 1492 } 1493 1494 processAux(expr, type, offset, sym, addend); 1495 } 1496 1497 // R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for 1498 // General Dynamic/Local Dynamic code sequences. If a GD/LD GOT relocation is 1499 // found but no R_PPC64_TLSGD/R_PPC64_TLSLD is seen, we assume that the 1500 // instructions are generated by very old IBM XL compilers. Work around the 1501 // issue by disabling GD/LD to IE/LE relaxation. 1502 template <class RelTy> 1503 static void checkPPC64TLSRelax(InputSectionBase &sec, ArrayRef<RelTy> rels) { 1504 // Skip if sec is synthetic (sec.file is null) or if sec has been marked. 1505 if (!sec.file || sec.file->ppc64DisableTLSRelax) 1506 return; 1507 bool hasGDLD = false; 1508 for (const RelTy &rel : rels) { 1509 RelType type = rel.getType(false); 1510 switch (type) { 1511 case R_PPC64_TLSGD: 1512 case R_PPC64_TLSLD: 1513 return; // Found a marker 1514 case R_PPC64_GOT_TLSGD16: 1515 case R_PPC64_GOT_TLSGD16_HA: 1516 case R_PPC64_GOT_TLSGD16_HI: 1517 case R_PPC64_GOT_TLSGD16_LO: 1518 case R_PPC64_GOT_TLSLD16: 1519 case R_PPC64_GOT_TLSLD16_HA: 1520 case R_PPC64_GOT_TLSLD16_HI: 1521 case R_PPC64_GOT_TLSLD16_LO: 1522 hasGDLD = true; 1523 break; 1524 } 1525 } 1526 if (hasGDLD) { 1527 sec.file->ppc64DisableTLSRelax = true; 1528 warn(toString(sec.file) + 1529 ": disable TLS relaxation due to R_PPC64_GOT_TLS* relocations without " 1530 "R_PPC64_TLSGD/R_PPC64_TLSLD relocations"); 1531 } 1532 } 1533 1534 template <class ELFT, class RelTy> 1535 void RelocationScanner::scan(ArrayRef<RelTy> rels) { 1536 // Not all relocations end up in Sec->Relocations, but a lot do. 1537 sec->relocations.reserve(rels.size()); 1538 1539 if (config->emachine == EM_PPC64) 1540 checkPPC64TLSRelax<RelTy>(*sec, rels); 1541 1542 // For EhInputSection, OffsetGetter expects the relocations to be sorted by 1543 // r_offset. In rare cases (.eh_frame pieces are reordered by a linker 1544 // script), the relocations may be unordered. 1545 // On SystemZ, all sections need to be sorted by r_offset, to allow TLS 1546 // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip. 1547 SmallVector<RelTy, 0> storage; 1548 if (isa<EhInputSection>(sec) || config->emachine == EM_S390) 1549 rels = sortRels(rels, storage); 1550 1551 end = static_cast<const void *>(rels.end()); 1552 for (auto i = rels.begin(); i != end;) 1553 scanOne<ELFT>(i); 1554 1555 // Sort relocations by offset for more efficient searching for 1556 // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64. 1557 if (config->emachine == EM_RISCV || 1558 (config->emachine == EM_PPC64 && sec->name == ".toc")) 1559 llvm::stable_sort(sec->relocs(), 1560 [](const Relocation &lhs, const Relocation &rhs) { 1561 return lhs.offset < rhs.offset; 1562 }); 1563 } 1564 1565 template <class ELFT> void RelocationScanner::scanSection(InputSectionBase &s) { 1566 sec = &s; 1567 getter = OffsetGetter(s); 1568 const RelsOrRelas<ELFT> rels = s.template relsOrRelas<ELFT>(); 1569 if (rels.areRelocsRel()) 1570 scan<ELFT>(rels.rels); 1571 else 1572 scan<ELFT>(rels.relas); 1573 } 1574 1575 template <class ELFT> void elf::scanRelocations() { 1576 // Scan all relocations. Each relocation goes through a series of tests to 1577 // determine if it needs special treatment, such as creating GOT, PLT, 1578 // copy relocations, etc. Note that relocations for non-alloc sections are 1579 // directly processed by InputSection::relocateNonAlloc. 1580 1581 // Deterministic parallellism needs sorting relocations which is unsuitable 1582 // for -z nocombreloc. MIPS and PPC64 use global states which are not suitable 1583 // for parallelism. 1584 bool serial = !config->zCombreloc || config->emachine == EM_MIPS || 1585 config->emachine == EM_PPC64; 1586 parallel::TaskGroup tg; 1587 for (ELFFileBase *f : ctx.objectFiles) { 1588 auto fn = [f]() { 1589 RelocationScanner scanner; 1590 for (InputSectionBase *s : f->getSections()) { 1591 if (s && s->kind() == SectionBase::Regular && s->isLive() && 1592 (s->flags & SHF_ALLOC) && 1593 !(s->type == SHT_ARM_EXIDX && config->emachine == EM_ARM)) 1594 scanner.template scanSection<ELFT>(*s); 1595 } 1596 }; 1597 tg.spawn(fn, serial); 1598 } 1599 1600 tg.spawn([] { 1601 RelocationScanner scanner; 1602 for (Partition &part : partitions) { 1603 for (EhInputSection *sec : part.ehFrame->sections) 1604 scanner.template scanSection<ELFT>(*sec); 1605 if (part.armExidx && part.armExidx->isLive()) 1606 for (InputSection *sec : part.armExidx->exidxSections) 1607 if (sec->isLive()) 1608 scanner.template scanSection<ELFT>(*sec); 1609 } 1610 }); 1611 } 1612 1613 static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) { 1614 // Handle a reference to a non-preemptible ifunc. These are special in a 1615 // few ways: 1616 // 1617 // - Unlike most non-preemptible symbols, non-preemptible ifuncs do not have 1618 // a fixed value. But assuming that all references to the ifunc are 1619 // GOT-generating or PLT-generating, the handling of an ifunc is 1620 // relatively straightforward. We create a PLT entry in Iplt, which is 1621 // usually at the end of .plt, which makes an indirect call using a 1622 // matching GOT entry in igotPlt, which is usually at the end of .got.plt. 1623 // The GOT entry is relocated using an IRELATIVE relocation in relaIplt, 1624 // which is usually at the end of .rela.plt. Unlike most relocations in 1625 // .rela.plt, which may be evaluated lazily without -z now, dynamic 1626 // loaders evaluate IRELATIVE relocs eagerly, which means that for 1627 // IRELATIVE relocs only, GOT-generating relocations can point directly to 1628 // .got.plt without requiring a separate GOT entry. 1629 // 1630 // - Despite the fact that an ifunc does not have a fixed value, compilers 1631 // that are not passed -fPIC will assume that they do, and will emit 1632 // direct (non-GOT-generating, non-PLT-generating) relocations to the 1633 // symbol. This means that if a direct relocation to the symbol is 1634 // seen, the linker must set a value for the symbol, and this value must 1635 // be consistent no matter what type of reference is made to the symbol. 1636 // This can be done by creating a PLT entry for the symbol in the way 1637 // described above and making it canonical, that is, making all references 1638 // point to the PLT entry instead of the resolver. In lld we also store 1639 // the address of the PLT entry in the dynamic symbol table, which means 1640 // that the symbol will also have the same value in other modules. 1641 // Because the value loaded from the GOT needs to be consistent with 1642 // the value computed using a direct relocation, a non-preemptible ifunc 1643 // may end up with two GOT entries, one in .got.plt that points to the 1644 // address returned by the resolver and is used only by the PLT entry, 1645 // and another in .got that points to the PLT entry and is used by 1646 // GOT-generating relocations. 1647 // 1648 // - The fact that these symbols do not have a fixed value makes them an 1649 // exception to the general rule that a statically linked executable does 1650 // not require any form of dynamic relocation. To handle these relocations 1651 // correctly, the IRELATIVE relocations are stored in an array which a 1652 // statically linked executable's startup code must enumerate using the 1653 // linker-defined symbols __rela?_iplt_{start,end}. 1654 if (!sym.isGnuIFunc() || sym.isPreemptible || config->zIfuncNoplt) 1655 return false; 1656 // Skip unreferenced non-preemptible ifunc. 1657 if (!(flags & (NEEDS_GOT | NEEDS_PLT | HAS_DIRECT_RELOC))) 1658 return true; 1659 1660 sym.isInIplt = true; 1661 1662 // Create an Iplt and the associated IRELATIVE relocation pointing to the 1663 // original section/value pairs. For non-GOT non-PLT relocation case below, we 1664 // may alter section/value, so create a copy of the symbol to make 1665 // section/value fixed. 1666 auto *directSym = makeDefined(cast<Defined>(sym)); 1667 directSym->allocateAux(); 1668 addPltEntry(*in.iplt, *in.igotPlt, *in.relaIplt, target->iRelativeRel, 1669 *directSym); 1670 sym.allocateAux(); 1671 symAux.back().pltIdx = symAux[directSym->auxIdx].pltIdx; 1672 1673 if (flags & HAS_DIRECT_RELOC) { 1674 // Change the value to the IPLT and redirect all references to it. 1675 auto &d = cast<Defined>(sym); 1676 d.section = in.iplt.get(); 1677 d.value = d.getPltIdx() * target->ipltEntrySize; 1678 d.size = 0; 1679 // It's important to set the symbol type here so that dynamic loaders 1680 // don't try to call the PLT as if it were an ifunc resolver. 1681 d.type = STT_FUNC; 1682 1683 if (flags & NEEDS_GOT) 1684 addGotEntry(sym); 1685 } else if (flags & NEEDS_GOT) { 1686 // Redirect GOT accesses to point to the Igot. 1687 sym.gotInIgot = true; 1688 } 1689 return true; 1690 } 1691 1692 void elf::postScanRelocations() { 1693 auto fn = [](Symbol &sym) { 1694 auto flags = sym.flags.load(std::memory_order_relaxed); 1695 if (handleNonPreemptibleIfunc(sym, flags)) 1696 return; 1697 1698 if (sym.isTagged() && sym.isDefined()) 1699 mainPart->memtagGlobalDescriptors->addSymbol(sym); 1700 1701 if (!sym.needsDynReloc()) 1702 return; 1703 sym.allocateAux(); 1704 1705 if (flags & NEEDS_GOT) 1706 addGotEntry(sym); 1707 if (flags & NEEDS_PLT) 1708 addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, target->pltRel, sym); 1709 if (flags & NEEDS_COPY) { 1710 if (sym.isObject()) { 1711 invokeELFT(addCopyRelSymbol, cast<SharedSymbol>(sym)); 1712 // NEEDS_COPY is cleared for sym and its aliases so that in 1713 // later iterations aliases won't cause redundant copies. 1714 assert(!sym.hasFlag(NEEDS_COPY)); 1715 } else { 1716 assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT)); 1717 if (!sym.isDefined()) { 1718 replaceWithDefined(sym, *in.plt, 1719 target->pltHeaderSize + 1720 target->pltEntrySize * sym.getPltIdx(), 1721 0); 1722 sym.setFlags(NEEDS_COPY); 1723 if (config->emachine == EM_PPC) { 1724 // PPC32 canonical PLT entries are at the beginning of .glink 1725 cast<Defined>(sym).value = in.plt->headerSize; 1726 in.plt->headerSize += 16; 1727 cast<PPC32GlinkSection>(*in.plt).canonical_plts.push_back(&sym); 1728 } 1729 } 1730 } 1731 } 1732 1733 if (!sym.isTls()) 1734 return; 1735 bool isLocalInExecutable = !sym.isPreemptible && !config->shared; 1736 GotSection *got = in.got.get(); 1737 1738 if (flags & NEEDS_TLSDESC) { 1739 got->addTlsDescEntry(sym); 1740 mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( 1741 target->tlsDescRel, *got, got->getTlsDescOffset(sym), sym, 1742 target->tlsDescRel); 1743 } 1744 if (flags & NEEDS_TLSGD) { 1745 got->addDynTlsEntry(sym); 1746 uint64_t off = got->getGlobalDynOffset(sym); 1747 if (isLocalInExecutable) 1748 // Write one to the GOT slot. 1749 got->addConstant({R_ADDEND, target->symbolicRel, off, 1, &sym}); 1750 else 1751 mainPart->relaDyn->addSymbolReloc(target->tlsModuleIndexRel, *got, off, 1752 sym); 1753 1754 // If the symbol is preemptible we need the dynamic linker to write 1755 // the offset too. 1756 uint64_t offsetOff = off + config->wordsize; 1757 if (sym.isPreemptible) 1758 mainPart->relaDyn->addSymbolReloc(target->tlsOffsetRel, *got, offsetOff, 1759 sym); 1760 else 1761 got->addConstant({R_ABS, target->tlsOffsetRel, offsetOff, 0, &sym}); 1762 } 1763 if (flags & NEEDS_TLSGD_TO_IE) { 1764 got->addEntry(sym); 1765 mainPart->relaDyn->addSymbolReloc(target->tlsGotRel, *got, 1766 sym.getGotOffset(), sym); 1767 } 1768 if (flags & NEEDS_GOT_DTPREL) { 1769 got->addEntry(sym); 1770 got->addConstant( 1771 {R_ABS, target->tlsOffsetRel, sym.getGotOffset(), 0, &sym}); 1772 } 1773 1774 if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE)) 1775 addTpOffsetGotEntry(sym); 1776 }; 1777 1778 GotSection *got = in.got.get(); 1779 if (ctx.needsTlsLd.load(std::memory_order_relaxed) && got->addTlsIndex()) { 1780 static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0); 1781 if (config->shared) 1782 mainPart->relaDyn->addReloc( 1783 {target->tlsModuleIndexRel, got, got->getTlsIndexOff()}); 1784 else 1785 got->addConstant( 1786 {R_ADDEND, target->symbolicRel, got->getTlsIndexOff(), 1, &dummy}); 1787 } 1788 1789 assert(symAux.size() == 1); 1790 for (Symbol *sym : symtab.getSymbols()) 1791 fn(*sym); 1792 1793 // Local symbols may need the aforementioned non-preemptible ifunc and GOT 1794 // handling. They don't need regular PLT. 1795 for (ELFFileBase *file : ctx.objectFiles) 1796 for (Symbol *sym : file->getLocalSymbols()) 1797 fn(*sym); 1798 } 1799 1800 static bool mergeCmp(const InputSection *a, const InputSection *b) { 1801 // std::merge requires a strict weak ordering. 1802 if (a->outSecOff < b->outSecOff) 1803 return true; 1804 1805 // FIXME dyn_cast<ThunkSection> is non-null for any SyntheticSection. 1806 if (a->outSecOff == b->outSecOff && a != b) { 1807 auto *ta = dyn_cast<ThunkSection>(a); 1808 auto *tb = dyn_cast<ThunkSection>(b); 1809 1810 // Check if Thunk is immediately before any specific Target 1811 // InputSection for example Mips LA25 Thunks. 1812 if (ta && ta->getTargetInputSection() == b) 1813 return true; 1814 1815 // Place Thunk Sections without specific targets before 1816 // non-Thunk Sections. 1817 if (ta && !tb && !ta->getTargetInputSection()) 1818 return true; 1819 } 1820 1821 return false; 1822 } 1823 1824 // Call Fn on every executable InputSection accessed via the linker script 1825 // InputSectionDescription::Sections. 1826 static void forEachInputSectionDescription( 1827 ArrayRef<OutputSection *> outputSections, 1828 llvm::function_ref<void(OutputSection *, InputSectionDescription *)> fn) { 1829 for (OutputSection *os : outputSections) { 1830 if (!(os->flags & SHF_ALLOC) || !(os->flags & SHF_EXECINSTR)) 1831 continue; 1832 for (SectionCommand *bc : os->commands) 1833 if (auto *isd = dyn_cast<InputSectionDescription>(bc)) 1834 fn(os, isd); 1835 } 1836 } 1837 1838 // Thunk Implementation 1839 // 1840 // Thunks (sometimes called stubs, veneers or branch islands) are small pieces 1841 // of code that the linker inserts inbetween a caller and a callee. The thunks 1842 // are added at link time rather than compile time as the decision on whether 1843 // a thunk is needed, such as the caller and callee being out of range, can only 1844 // be made at link time. 1845 // 1846 // It is straightforward to tell given the current state of the program when a 1847 // thunk is needed for a particular call. The more difficult part is that 1848 // the thunk needs to be placed in the program such that the caller can reach 1849 // the thunk and the thunk can reach the callee; furthermore, adding thunks to 1850 // the program alters addresses, which can mean more thunks etc. 1851 // 1852 // In lld we have a synthetic ThunkSection that can hold many Thunks. 1853 // The decision to have a ThunkSection act as a container means that we can 1854 // more easily handle the most common case of a single block of contiguous 1855 // Thunks by inserting just a single ThunkSection. 1856 // 1857 // The implementation of Thunks in lld is split across these areas 1858 // Relocations.cpp : Framework for creating and placing thunks 1859 // Thunks.cpp : The code generated for each supported thunk 1860 // Target.cpp : Target specific hooks that the framework uses to decide when 1861 // a thunk is used 1862 // Synthetic.cpp : Implementation of ThunkSection 1863 // Writer.cpp : Iteratively call framework until no more Thunks added 1864 // 1865 // Thunk placement requirements: 1866 // Mips LA25 thunks. These must be placed immediately before the callee section 1867 // We can assume that the caller is in range of the Thunk. These are modelled 1868 // by Thunks that return the section they must precede with 1869 // getTargetInputSection(). 1870 // 1871 // ARM interworking and range extension thunks. These thunks must be placed 1872 // within range of the caller. All implemented ARM thunks can always reach the 1873 // callee as they use an indirect jump via a register that has no range 1874 // restrictions. 1875 // 1876 // Thunk placement algorithm: 1877 // For Mips LA25 ThunkSections; the placement is explicit, it has to be before 1878 // getTargetInputSection(). 1879 // 1880 // For thunks that must be placed within range of the caller there are many 1881 // possible choices given that the maximum range from the caller is usually 1882 // much larger than the average InputSection size. Desirable properties include: 1883 // - Maximize reuse of thunks by multiple callers 1884 // - Minimize number of ThunkSections to simplify insertion 1885 // - Handle impact of already added Thunks on addresses 1886 // - Simple to understand and implement 1887 // 1888 // In lld for the first pass, we pre-create one or more ThunkSections per 1889 // InputSectionDescription at Target specific intervals. A ThunkSection is 1890 // placed so that the estimated end of the ThunkSection is within range of the 1891 // start of the InputSectionDescription or the previous ThunkSection. For 1892 // example: 1893 // InputSectionDescription 1894 // Section 0 1895 // ... 1896 // Section N 1897 // ThunkSection 0 1898 // Section N + 1 1899 // ... 1900 // Section N + K 1901 // Thunk Section 1 1902 // 1903 // The intention is that we can add a Thunk to a ThunkSection that is well 1904 // spaced enough to service a number of callers without having to do a lot 1905 // of work. An important principle is that it is not an error if a Thunk cannot 1906 // be placed in a pre-created ThunkSection; when this happens we create a new 1907 // ThunkSection placed next to the caller. This allows us to handle the vast 1908 // majority of thunks simply, but also handle rare cases where the branch range 1909 // is smaller than the target specific spacing. 1910 // 1911 // The algorithm is expected to create all the thunks that are needed in a 1912 // single pass, with a small number of programs needing a second pass due to 1913 // the insertion of thunks in the first pass increasing the offset between 1914 // callers and callees that were only just in range. 1915 // 1916 // A consequence of allowing new ThunkSections to be created outside of the 1917 // pre-created ThunkSections is that in rare cases calls to Thunks that were in 1918 // range in pass K, are out of range in some pass > K due to the insertion of 1919 // more Thunks in between the caller and callee. When this happens we retarget 1920 // the relocation back to the original target and create another Thunk. 1921 1922 // Remove ThunkSections that are empty, this should only be the initial set 1923 // precreated on pass 0. 1924 1925 // Insert the Thunks for OutputSection OS into their designated place 1926 // in the Sections vector, and recalculate the InputSection output section 1927 // offsets. 1928 // This may invalidate any output section offsets stored outside of InputSection 1929 void ThunkCreator::mergeThunks(ArrayRef<OutputSection *> outputSections) { 1930 forEachInputSectionDescription( 1931 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 1932 if (isd->thunkSections.empty()) 1933 return; 1934 1935 // Remove any zero sized precreated Thunks. 1936 llvm::erase_if(isd->thunkSections, 1937 [](const std::pair<ThunkSection *, uint32_t> &ts) { 1938 return ts.first->getSize() == 0; 1939 }); 1940 1941 // ISD->ThunkSections contains all created ThunkSections, including 1942 // those inserted in previous passes. Extract the Thunks created this 1943 // pass and order them in ascending outSecOff. 1944 std::vector<ThunkSection *> newThunks; 1945 for (std::pair<ThunkSection *, uint32_t> ts : isd->thunkSections) 1946 if (ts.second == pass) 1947 newThunks.push_back(ts.first); 1948 llvm::stable_sort(newThunks, 1949 [](const ThunkSection *a, const ThunkSection *b) { 1950 return a->outSecOff < b->outSecOff; 1951 }); 1952 1953 // Merge sorted vectors of Thunks and InputSections by outSecOff 1954 SmallVector<InputSection *, 0> tmp; 1955 tmp.reserve(isd->sections.size() + newThunks.size()); 1956 1957 std::merge(isd->sections.begin(), isd->sections.end(), 1958 newThunks.begin(), newThunks.end(), std::back_inserter(tmp), 1959 mergeCmp); 1960 1961 isd->sections = std::move(tmp); 1962 }); 1963 } 1964 1965 static int64_t getPCBias(RelType type) { 1966 if (config->emachine != EM_ARM) 1967 return 0; 1968 switch (type) { 1969 case R_ARM_THM_JUMP19: 1970 case R_ARM_THM_JUMP24: 1971 case R_ARM_THM_CALL: 1972 return 4; 1973 default: 1974 return 8; 1975 } 1976 } 1977 1978 // Find or create a ThunkSection within the InputSectionDescription (ISD) that 1979 // is in range of Src. An ISD maps to a range of InputSections described by a 1980 // linker script section pattern such as { .text .text.* }. 1981 ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os, 1982 InputSection *isec, 1983 InputSectionDescription *isd, 1984 const Relocation &rel, 1985 uint64_t src) { 1986 // See the comment in getThunk for -pcBias below. 1987 const int64_t pcBias = getPCBias(rel.type); 1988 for (std::pair<ThunkSection *, uint32_t> tp : isd->thunkSections) { 1989 ThunkSection *ts = tp.first; 1990 uint64_t tsBase = os->addr + ts->outSecOff - pcBias; 1991 uint64_t tsLimit = tsBase + ts->getSize(); 1992 if (target->inBranchRange(rel.type, src, 1993 (src > tsLimit) ? tsBase : tsLimit)) 1994 return ts; 1995 } 1996 1997 // No suitable ThunkSection exists. This can happen when there is a branch 1998 // with lower range than the ThunkSection spacing or when there are too 1999 // many Thunks. Create a new ThunkSection as close to the InputSection as 2000 // possible. Error if InputSection is so large we cannot place ThunkSection 2001 // anywhere in Range. 2002 uint64_t thunkSecOff = isec->outSecOff; 2003 if (!target->inBranchRange(rel.type, src, 2004 os->addr + thunkSecOff + rel.addend)) { 2005 thunkSecOff = isec->outSecOff + isec->getSize(); 2006 if (!target->inBranchRange(rel.type, src, 2007 os->addr + thunkSecOff + rel.addend)) 2008 fatal("InputSection too large for range extension thunk " + 2009 isec->getObjMsg(src - (os->addr + isec->outSecOff))); 2010 } 2011 return addThunkSection(os, isd, thunkSecOff); 2012 } 2013 2014 // Add a Thunk that needs to be placed in a ThunkSection that immediately 2015 // precedes its Target. 2016 ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) { 2017 ThunkSection *ts = thunkedSections.lookup(isec); 2018 if (ts) 2019 return ts; 2020 2021 // Find InputSectionRange within Target Output Section (TOS) that the 2022 // InputSection (IS) that we need to precede is in. 2023 OutputSection *tos = isec->getParent(); 2024 for (SectionCommand *bc : tos->commands) { 2025 auto *isd = dyn_cast<InputSectionDescription>(bc); 2026 if (!isd || isd->sections.empty()) 2027 continue; 2028 2029 InputSection *first = isd->sections.front(); 2030 InputSection *last = isd->sections.back(); 2031 2032 if (isec->outSecOff < first->outSecOff || last->outSecOff < isec->outSecOff) 2033 continue; 2034 2035 ts = addThunkSection(tos, isd, isec->outSecOff); 2036 thunkedSections[isec] = ts; 2037 return ts; 2038 } 2039 2040 return nullptr; 2041 } 2042 2043 // Create one or more ThunkSections per OS that can be used to place Thunks. 2044 // We attempt to place the ThunkSections using the following desirable 2045 // properties: 2046 // - Within range of the maximum number of callers 2047 // - Minimise the number of ThunkSections 2048 // 2049 // We follow a simple but conservative heuristic to place ThunkSections at 2050 // offsets that are multiples of a Target specific branch range. 2051 // For an InputSectionDescription that is smaller than the range, a single 2052 // ThunkSection at the end of the range will do. 2053 // 2054 // For an InputSectionDescription that is more than twice the size of the range, 2055 // we place the last ThunkSection at range bytes from the end of the 2056 // InputSectionDescription in order to increase the likelihood that the 2057 // distance from a thunk to its target will be sufficiently small to 2058 // allow for the creation of a short thunk. 2059 void ThunkCreator::createInitialThunkSections( 2060 ArrayRef<OutputSection *> outputSections) { 2061 uint32_t thunkSectionSpacing = target->getThunkSectionSpacing(); 2062 2063 forEachInputSectionDescription( 2064 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2065 if (isd->sections.empty()) 2066 return; 2067 2068 uint32_t isdBegin = isd->sections.front()->outSecOff; 2069 uint32_t isdEnd = 2070 isd->sections.back()->outSecOff + isd->sections.back()->getSize(); 2071 uint32_t lastThunkLowerBound = -1; 2072 if (isdEnd - isdBegin > thunkSectionSpacing * 2) 2073 lastThunkLowerBound = isdEnd - thunkSectionSpacing; 2074 2075 uint32_t isecLimit; 2076 uint32_t prevIsecLimit = isdBegin; 2077 uint32_t thunkUpperBound = isdBegin + thunkSectionSpacing; 2078 2079 for (const InputSection *isec : isd->sections) { 2080 isecLimit = isec->outSecOff + isec->getSize(); 2081 if (isecLimit > thunkUpperBound) { 2082 addThunkSection(os, isd, prevIsecLimit); 2083 thunkUpperBound = prevIsecLimit + thunkSectionSpacing; 2084 } 2085 if (isecLimit > lastThunkLowerBound) 2086 break; 2087 prevIsecLimit = isecLimit; 2088 } 2089 addThunkSection(os, isd, isecLimit); 2090 }); 2091 } 2092 2093 ThunkSection *ThunkCreator::addThunkSection(OutputSection *os, 2094 InputSectionDescription *isd, 2095 uint64_t off) { 2096 auto *ts = make<ThunkSection>(os, off); 2097 ts->partition = os->partition; 2098 if ((config->fixCortexA53Errata843419 || config->fixCortexA8) && 2099 !isd->sections.empty()) { 2100 // The errata fixes are sensitive to addresses modulo 4 KiB. When we add 2101 // thunks we disturb the base addresses of sections placed after the thunks 2102 // this makes patches we have generated redundant, and may cause us to 2103 // generate more patches as different instructions are now in sensitive 2104 // locations. When we generate more patches we may force more branches to 2105 // go out of range, causing more thunks to be generated. In pathological 2106 // cases this can cause the address dependent content pass not to converge. 2107 // We fix this by rounding up the size of the ThunkSection to 4KiB, this 2108 // limits the insertion of a ThunkSection on the addresses modulo 4 KiB, 2109 // which means that adding Thunks to the section does not invalidate 2110 // errata patches for following code. 2111 // Rounding up the size to 4KiB has consequences for code-size and can 2112 // trip up linker script defined assertions. For example the linux kernel 2113 // has an assertion that what LLD represents as an InputSectionDescription 2114 // does not exceed 4 KiB even if the overall OutputSection is > 128 Mib. 2115 // We use the heuristic of rounding up the size when both of the following 2116 // conditions are true: 2117 // 1.) The OutputSection is larger than the ThunkSectionSpacing. This 2118 // accounts for the case where no single InputSectionDescription is 2119 // larger than the OutputSection size. This is conservative but simple. 2120 // 2.) The InputSectionDescription is larger than 4 KiB. This will prevent 2121 // any assertion failures that an InputSectionDescription is < 4 KiB 2122 // in size. 2123 uint64_t isdSize = isd->sections.back()->outSecOff + 2124 isd->sections.back()->getSize() - 2125 isd->sections.front()->outSecOff; 2126 if (os->size > target->getThunkSectionSpacing() && isdSize > 4096) 2127 ts->roundUpSizeForErrata = true; 2128 } 2129 isd->thunkSections.push_back({ts, pass}); 2130 return ts; 2131 } 2132 2133 static bool isThunkSectionCompatible(InputSection *source, 2134 SectionBase *target) { 2135 // We can't reuse thunks in different loadable partitions because they might 2136 // not be loaded. But partition 1 (the main partition) will always be loaded. 2137 if (source->partition != target->partition) 2138 return target->partition == 1; 2139 return true; 2140 } 2141 2142 std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec, 2143 Relocation &rel, uint64_t src) { 2144 std::vector<Thunk *> *thunkVec = nullptr; 2145 // Arm and Thumb have a PC Bias of 8 and 4 respectively, this is cancelled 2146 // out in the relocation addend. We compensate for the PC bias so that 2147 // an Arm and Thumb relocation to the same destination get the same keyAddend, 2148 // which is usually 0. 2149 const int64_t pcBias = getPCBias(rel.type); 2150 const int64_t keyAddend = rel.addend + pcBias; 2151 2152 // We use a ((section, offset), addend) pair to find the thunk position if 2153 // possible so that we create only one thunk for aliased symbols or ICFed 2154 // sections. There may be multiple relocations sharing the same (section, 2155 // offset + addend) pair. We may revert the relocation back to its original 2156 // non-Thunk target, so we cannot fold offset + addend. 2157 if (auto *d = dyn_cast<Defined>(rel.sym)) 2158 if (!d->isInPlt() && d->section) 2159 thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value}, 2160 keyAddend}]; 2161 if (!thunkVec) 2162 thunkVec = &thunkedSymbols[{rel.sym, keyAddend}]; 2163 2164 // Check existing Thunks for Sym to see if they can be reused 2165 for (Thunk *t : *thunkVec) 2166 if (isThunkSectionCompatible(isec, t->getThunkTargetSym()->section) && 2167 t->isCompatibleWith(*isec, rel) && 2168 target->inBranchRange(rel.type, src, 2169 t->getThunkTargetSym()->getVA(-pcBias))) 2170 return std::make_pair(t, false); 2171 2172 // No existing compatible Thunk in range, create a new one 2173 Thunk *t = addThunk(*isec, rel); 2174 thunkVec->push_back(t); 2175 return std::make_pair(t, true); 2176 } 2177 2178 // Return true if the relocation target is an in range Thunk. 2179 // Return false if the relocation is not to a Thunk. If the relocation target 2180 // was originally to a Thunk, but is no longer in range we revert the 2181 // relocation back to its original non-Thunk target. 2182 bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) { 2183 if (Thunk *t = thunks.lookup(rel.sym)) { 2184 if (target->inBranchRange(rel.type, src, rel.sym->getVA(rel.addend))) 2185 return true; 2186 rel.sym = &t->destination; 2187 rel.addend = t->addend; 2188 if (rel.sym->isInPlt()) 2189 rel.expr = toPlt(rel.expr); 2190 } 2191 return false; 2192 } 2193 2194 // Process all relocations from the InputSections that have been assigned 2195 // to InputSectionDescriptions and redirect through Thunks if needed. The 2196 // function should be called iteratively until it returns false. 2197 // 2198 // PreConditions: 2199 // All InputSections that may need a Thunk are reachable from 2200 // OutputSectionCommands. 2201 // 2202 // All OutputSections have an address and all InputSections have an offset 2203 // within the OutputSection. 2204 // 2205 // The offsets between caller (relocation place) and callee 2206 // (relocation target) will not be modified outside of createThunks(). 2207 // 2208 // PostConditions: 2209 // If return value is true then ThunkSections have been inserted into 2210 // OutputSections. All relocations that needed a Thunk based on the information 2211 // available to createThunks() on entry have been redirected to a Thunk. Note 2212 // that adding Thunks changes offsets between caller and callee so more Thunks 2213 // may be required. 2214 // 2215 // If return value is false then no more Thunks are needed, and createThunks has 2216 // made no changes. If the target requires range extension thunks, currently 2217 // ARM, then any future change in offset between caller and callee risks a 2218 // relocation out of range error. 2219 bool ThunkCreator::createThunks(uint32_t pass, 2220 ArrayRef<OutputSection *> outputSections) { 2221 this->pass = pass; 2222 bool addressesChanged = false; 2223 2224 if (pass == 0 && target->getThunkSectionSpacing()) 2225 createInitialThunkSections(outputSections); 2226 2227 // Create all the Thunks and insert them into synthetic ThunkSections. The 2228 // ThunkSections are later inserted back into InputSectionDescriptions. 2229 // We separate the creation of ThunkSections from the insertion of the 2230 // ThunkSections as ThunkSections are not always inserted into the same 2231 // InputSectionDescription as the caller. 2232 forEachInputSectionDescription( 2233 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2234 for (InputSection *isec : isd->sections) 2235 for (Relocation &rel : isec->relocs()) { 2236 uint64_t src = isec->getVA(rel.offset); 2237 2238 // If we are a relocation to an existing Thunk, check if it is 2239 // still in range. If not then Rel will be altered to point to its 2240 // original target so another Thunk can be generated. 2241 if (pass > 0 && normalizeExistingThunk(rel, src)) 2242 continue; 2243 2244 if (!target->needsThunk(rel.expr, rel.type, isec->file, src, 2245 *rel.sym, rel.addend)) 2246 continue; 2247 2248 Thunk *t; 2249 bool isNew; 2250 std::tie(t, isNew) = getThunk(isec, rel, src); 2251 2252 if (isNew) { 2253 // Find or create a ThunkSection for the new Thunk 2254 ThunkSection *ts; 2255 if (auto *tis = t->getTargetInputSection()) 2256 ts = getISThunkSec(tis); 2257 else 2258 ts = getISDThunkSec(os, isec, isd, rel, src); 2259 ts->addThunk(t); 2260 thunks[t->getThunkTargetSym()] = t; 2261 } 2262 2263 // Redirect relocation to Thunk, we never go via the PLT to a Thunk 2264 rel.sym = t->getThunkTargetSym(); 2265 rel.expr = fromPlt(rel.expr); 2266 2267 // On AArch64 and PPC, a jump/call relocation may be encoded as 2268 // STT_SECTION + non-zero addend, clear the addend after 2269 // redirection. 2270 if (config->emachine != EM_MIPS) 2271 rel.addend = -getPCBias(rel.type); 2272 } 2273 2274 for (auto &p : isd->thunkSections) 2275 addressesChanged |= p.first->assignOffsets(); 2276 }); 2277 2278 for (auto &p : thunkedSections) 2279 addressesChanged |= p.second->assignOffsets(); 2280 2281 // Merge all created synthetic ThunkSections back into OutputSection 2282 mergeThunks(outputSections); 2283 return addressesChanged; 2284 } 2285 2286 // The following aid in the conversion of call x@GDPLT to call __tls_get_addr 2287 // hexagonNeedsTLSSymbol scans for relocations would require a call to 2288 // __tls_get_addr. 2289 // hexagonTLSSymbolUpdate rebinds the relocation to __tls_get_addr. 2290 bool elf::hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections) { 2291 bool needTlsSymbol = false; 2292 forEachInputSectionDescription( 2293 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2294 for (InputSection *isec : isd->sections) 2295 for (Relocation &rel : isec->relocs()) 2296 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { 2297 needTlsSymbol = true; 2298 return; 2299 } 2300 }); 2301 return needTlsSymbol; 2302 } 2303 2304 void elf::hexagonTLSSymbolUpdate(ArrayRef<OutputSection *> outputSections) { 2305 Symbol *sym = symtab.find("__tls_get_addr"); 2306 if (!sym) 2307 return; 2308 bool needEntry = true; 2309 forEachInputSectionDescription( 2310 outputSections, [&](OutputSection *os, InputSectionDescription *isd) { 2311 for (InputSection *isec : isd->sections) 2312 for (Relocation &rel : isec->relocs()) 2313 if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { 2314 if (needEntry) { 2315 sym->allocateAux(); 2316 addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, target->pltRel, 2317 *sym); 2318 needEntry = false; 2319 } 2320 rel.sym = sym; 2321 } 2322 }); 2323 } 2324 2325 template void elf::scanRelocations<ELF32LE>(); 2326 template void elf::scanRelocations<ELF32BE>(); 2327 template void elf::scanRelocations<ELF64LE>(); 2328 template void elf::scanRelocations<ELF64BE>(); 2329