1 //===- SyntheticSections.cpp ---------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SyntheticSections.h" 10 #include "ConcatOutputSection.h" 11 #include "Config.h" 12 #include "ExportTrie.h" 13 #include "InputFiles.h" 14 #include "MachOStructs.h" 15 #include "OutputSegment.h" 16 #include "SymbolTable.h" 17 #include "Symbols.h" 18 19 #include "lld/Common/CommonLinkerContext.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/Config/llvm-config.h" 22 #include "llvm/Support/EndianStream.h" 23 #include "llvm/Support/FileSystem.h" 24 #include "llvm/Support/LEB128.h" 25 #include "llvm/Support/Parallel.h" 26 #include "llvm/Support/Path.h" 27 28 #if defined(__APPLE__) 29 #include <sys/mman.h> 30 31 #define COMMON_DIGEST_FOR_OPENSSL 32 #include <CommonCrypto/CommonDigest.h> 33 #else 34 #include "llvm/Support/SHA256.h" 35 #endif 36 37 #ifdef LLVM_HAVE_LIBXAR 38 #include <fcntl.h> 39 extern "C" { 40 #include <xar/xar.h> 41 } 42 #endif 43 44 using namespace llvm; 45 using namespace llvm::MachO; 46 using namespace llvm::support; 47 using namespace llvm::support::endian; 48 using namespace lld; 49 using namespace lld::macho; 50 51 // Reads `len` bytes at data and writes the 32-byte SHA256 checksum to `output`. 52 static void sha256(const uint8_t *data, size_t len, uint8_t *output) { 53 #if defined(__APPLE__) 54 // FIXME: Make LLVM's SHA256 faster and use it unconditionally. See PR56121 55 // for some notes on this. 56 CC_SHA256(data, len, output); 57 #else 58 ArrayRef<uint8_t> block(data, len); 59 std::array<uint8_t, 32> hash = SHA256::hash(block); 60 static_assert(hash.size() == CodeSignatureSection::hashSize, ""); 61 memcpy(output, hash.data(), hash.size()); 62 #endif 63 } 64 65 InStruct macho::in; 66 std::vector<SyntheticSection *> macho::syntheticSections; 67 68 SyntheticSection::SyntheticSection(const char *segname, const char *name) 69 : OutputSection(SyntheticKind, name) { 70 std::tie(this->segname, this->name) = maybeRenameSection({segname, name}); 71 isec = makeSyntheticInputSection(segname, name); 72 isec->parent = this; 73 syntheticSections.push_back(this); 74 } 75 76 // dyld3's MachOLoaded::getSlide() assumes that the __TEXT segment starts 77 // from the beginning of the file (i.e. the header). 78 MachHeaderSection::MachHeaderSection() 79 : SyntheticSection(segment_names::text, section_names::header) { 80 // XXX: This is a hack. (See D97007) 81 // Setting the index to 1 to pretend that this section is the text 82 // section. 83 index = 1; 84 isec->isFinal = true; 85 } 86 87 void MachHeaderSection::addLoadCommand(LoadCommand *lc) { 88 loadCommands.push_back(lc); 89 sizeOfCmds += lc->getSize(); 90 } 91 92 uint64_t MachHeaderSection::getSize() const { 93 uint64_t size = target->headerSize + sizeOfCmds + config->headerPad; 94 // If we are emitting an encryptable binary, our load commands must have a 95 // separate (non-encrypted) page to themselves. 96 if (config->emitEncryptionInfo) 97 size = alignTo(size, target->getPageSize()); 98 return size; 99 } 100 101 static uint32_t cpuSubtype() { 102 uint32_t subtype = target->cpuSubtype; 103 104 if (config->outputType == MH_EXECUTE && !config->staticLink && 105 target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL && 106 config->platform() == PLATFORM_MACOS && 107 config->platformInfo.minimum >= VersionTuple(10, 5)) 108 subtype |= CPU_SUBTYPE_LIB64; 109 110 return subtype; 111 } 112 113 void MachHeaderSection::writeTo(uint8_t *buf) const { 114 auto *hdr = reinterpret_cast<mach_header *>(buf); 115 hdr->magic = target->magic; 116 hdr->cputype = target->cpuType; 117 hdr->cpusubtype = cpuSubtype(); 118 hdr->filetype = config->outputType; 119 hdr->ncmds = loadCommands.size(); 120 hdr->sizeofcmds = sizeOfCmds; 121 hdr->flags = MH_DYLDLINK; 122 123 if (config->namespaceKind == NamespaceKind::twolevel) 124 hdr->flags |= MH_NOUNDEFS | MH_TWOLEVEL; 125 126 if (config->outputType == MH_DYLIB && !config->hasReexports) 127 hdr->flags |= MH_NO_REEXPORTED_DYLIBS; 128 129 if (config->markDeadStrippableDylib) 130 hdr->flags |= MH_DEAD_STRIPPABLE_DYLIB; 131 132 if (config->outputType == MH_EXECUTE && config->isPic) 133 hdr->flags |= MH_PIE; 134 135 if (config->outputType == MH_DYLIB && config->applicationExtension) 136 hdr->flags |= MH_APP_EXTENSION_SAFE; 137 138 if (in.exports->hasWeakSymbol || in.weakBinding->hasNonWeakDefinition()) 139 hdr->flags |= MH_WEAK_DEFINES; 140 141 if (in.exports->hasWeakSymbol || in.weakBinding->hasEntry()) 142 hdr->flags |= MH_BINDS_TO_WEAK; 143 144 for (const OutputSegment *seg : outputSegments) { 145 for (const OutputSection *osec : seg->getSections()) { 146 if (isThreadLocalVariables(osec->flags)) { 147 hdr->flags |= MH_HAS_TLV_DESCRIPTORS; 148 break; 149 } 150 } 151 } 152 153 uint8_t *p = reinterpret_cast<uint8_t *>(hdr) + target->headerSize; 154 for (const LoadCommand *lc : loadCommands) { 155 lc->writeTo(p); 156 p += lc->getSize(); 157 } 158 } 159 160 PageZeroSection::PageZeroSection() 161 : SyntheticSection(segment_names::pageZero, section_names::pageZero) {} 162 163 RebaseSection::RebaseSection() 164 : LinkEditSection(segment_names::linkEdit, section_names::rebase) {} 165 166 namespace { 167 struct RebaseState { 168 uint64_t sequenceLength; 169 uint64_t skipLength; 170 }; 171 } // namespace 172 173 static void emitIncrement(uint64_t incr, raw_svector_ostream &os) { 174 assert(incr != 0); 175 176 if ((incr >> target->p2WordSize) <= REBASE_IMMEDIATE_MASK && 177 (incr % target->wordSize) == 0) { 178 os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_IMM_SCALED | 179 (incr >> target->p2WordSize)); 180 } else { 181 os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_ULEB); 182 encodeULEB128(incr, os); 183 } 184 } 185 186 static void flushRebase(const RebaseState &state, raw_svector_ostream &os) { 187 assert(state.sequenceLength > 0); 188 189 if (state.skipLength == target->wordSize) { 190 if (state.sequenceLength <= REBASE_IMMEDIATE_MASK) { 191 os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_IMM_TIMES | 192 state.sequenceLength); 193 } else { 194 os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ULEB_TIMES); 195 encodeULEB128(state.sequenceLength, os); 196 } 197 } else if (state.sequenceLength == 1) { 198 os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB); 199 encodeULEB128(state.skipLength - target->wordSize, os); 200 } else { 201 os << static_cast<uint8_t>( 202 REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB); 203 encodeULEB128(state.sequenceLength, os); 204 encodeULEB128(state.skipLength - target->wordSize, os); 205 } 206 } 207 208 // Rebases are communicated to dyld using a bytecode, whose opcodes cause the 209 // memory location at a specific address to be rebased and/or the address to be 210 // incremented. 211 // 212 // Opcode REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB is the most generic 213 // one, encoding a series of evenly spaced addresses. This algorithm works by 214 // splitting up the sorted list of addresses into such chunks. If the locations 215 // are consecutive or the sequence consists of a single location, flushRebase 216 // will use a smaller, more specialized encoding. 217 static void encodeRebases(const OutputSegment *seg, 218 MutableArrayRef<Location> locations, 219 raw_svector_ostream &os) { 220 // dyld operates on segments. Translate section offsets into segment offsets. 221 for (Location &loc : locations) 222 loc.offset = 223 loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(loc.offset); 224 // The algorithm assumes that locations are unique. 225 Location *end = 226 llvm::unique(locations, [](const Location &a, const Location &b) { 227 return a.offset == b.offset; 228 }); 229 size_t count = end - locations.begin(); 230 231 os << static_cast<uint8_t>(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | 232 seg->index); 233 assert(!locations.empty()); 234 uint64_t offset = locations[0].offset; 235 encodeULEB128(offset, os); 236 237 RebaseState state{1, target->wordSize}; 238 239 for (size_t i = 1; i < count; ++i) { 240 offset = locations[i].offset; 241 242 uint64_t skip = offset - locations[i - 1].offset; 243 assert(skip != 0 && "duplicate locations should have been weeded out"); 244 245 if (skip == state.skipLength) { 246 ++state.sequenceLength; 247 } else if (state.sequenceLength == 1) { 248 ++state.sequenceLength; 249 state.skipLength = skip; 250 } else if (skip < state.skipLength) { 251 // The address is lower than what the rebase pointer would be if the last 252 // location would be part of a sequence. We start a new sequence from the 253 // previous location. 254 --state.sequenceLength; 255 flushRebase(state, os); 256 257 state.sequenceLength = 2; 258 state.skipLength = skip; 259 } else { 260 // The address is at some positive offset from the rebase pointer. We 261 // start a new sequence which begins with the current location. 262 flushRebase(state, os); 263 emitIncrement(skip - state.skipLength, os); 264 state.sequenceLength = 1; 265 state.skipLength = target->wordSize; 266 } 267 } 268 flushRebase(state, os); 269 } 270 271 void RebaseSection::finalizeContents() { 272 if (locations.empty()) 273 return; 274 275 raw_svector_ostream os{contents}; 276 os << static_cast<uint8_t>(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER); 277 278 llvm::sort(locations, [](const Location &a, const Location &b) { 279 return a.isec->getVA(a.offset) < b.isec->getVA(b.offset); 280 }); 281 282 for (size_t i = 0, count = locations.size(); i < count;) { 283 const OutputSegment *seg = locations[i].isec->parent->parent; 284 size_t j = i + 1; 285 while (j < count && locations[j].isec->parent->parent == seg) 286 ++j; 287 encodeRebases(seg, {locations.data() + i, locations.data() + j}, os); 288 i = j; 289 } 290 os << static_cast<uint8_t>(REBASE_OPCODE_DONE); 291 } 292 293 void RebaseSection::writeTo(uint8_t *buf) const { 294 memcpy(buf, contents.data(), contents.size()); 295 } 296 297 NonLazyPointerSectionBase::NonLazyPointerSectionBase(const char *segname, 298 const char *name) 299 : SyntheticSection(segname, name) { 300 align = target->wordSize; 301 } 302 303 void macho::addNonLazyBindingEntries(const Symbol *sym, 304 const InputSection *isec, uint64_t offset, 305 int64_t addend) { 306 if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) { 307 in.binding->addEntry(dysym, isec, offset, addend); 308 if (dysym->isWeakDef()) 309 in.weakBinding->addEntry(sym, isec, offset, addend); 310 } else if (const auto *defined = dyn_cast<Defined>(sym)) { 311 in.rebase->addEntry(isec, offset); 312 if (defined->isExternalWeakDef()) 313 in.weakBinding->addEntry(sym, isec, offset, addend); 314 else if (defined->interposable) 315 in.binding->addEntry(sym, isec, offset, addend); 316 } else { 317 // Undefined symbols are filtered out in scanRelocations(); we should never 318 // get here 319 llvm_unreachable("cannot bind to an undefined symbol"); 320 } 321 } 322 323 void NonLazyPointerSectionBase::addEntry(Symbol *sym) { 324 if (entries.insert(sym)) { 325 assert(!sym->isInGot()); 326 sym->gotIndex = entries.size() - 1; 327 328 addNonLazyBindingEntries(sym, isec, sym->gotIndex * target->wordSize); 329 } 330 } 331 332 void NonLazyPointerSectionBase::writeTo(uint8_t *buf) const { 333 for (size_t i = 0, n = entries.size(); i < n; ++i) 334 if (auto *defined = dyn_cast<Defined>(entries[i])) 335 write64le(&buf[i * target->wordSize], defined->getVA()); 336 } 337 338 GotSection::GotSection() 339 : NonLazyPointerSectionBase(segment_names::data, section_names::got) { 340 flags = S_NON_LAZY_SYMBOL_POINTERS; 341 } 342 343 TlvPointerSection::TlvPointerSection() 344 : NonLazyPointerSectionBase(segment_names::data, 345 section_names::threadPtrs) { 346 flags = S_THREAD_LOCAL_VARIABLE_POINTERS; 347 } 348 349 BindingSection::BindingSection() 350 : LinkEditSection(segment_names::linkEdit, section_names::binding) {} 351 352 namespace { 353 struct Binding { 354 OutputSegment *segment = nullptr; 355 uint64_t offset = 0; 356 int64_t addend = 0; 357 }; 358 struct BindIR { 359 // Default value of 0xF0 is not valid opcode and should make the program 360 // scream instead of accidentally writing "valid" values. 361 uint8_t opcode = 0xF0; 362 uint64_t data = 0; 363 uint64_t consecutiveCount = 0; 364 }; 365 } // namespace 366 367 // Encode a sequence of opcodes that tell dyld to write the address of symbol + 368 // addend at osec->addr + outSecOff. 369 // 370 // The bind opcode "interpreter" remembers the values of each binding field, so 371 // we only need to encode the differences between bindings. Hence the use of 372 // lastBinding. 373 static void encodeBinding(const OutputSection *osec, uint64_t outSecOff, 374 int64_t addend, Binding &lastBinding, 375 std::vector<BindIR> &opcodes) { 376 OutputSegment *seg = osec->parent; 377 uint64_t offset = osec->getSegmentOffset() + outSecOff; 378 if (lastBinding.segment != seg) { 379 opcodes.push_back( 380 {static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | 381 seg->index), 382 offset}); 383 lastBinding.segment = seg; 384 lastBinding.offset = offset; 385 } else if (lastBinding.offset != offset) { 386 opcodes.push_back({BIND_OPCODE_ADD_ADDR_ULEB, offset - lastBinding.offset}); 387 lastBinding.offset = offset; 388 } 389 390 if (lastBinding.addend != addend) { 391 opcodes.push_back( 392 {BIND_OPCODE_SET_ADDEND_SLEB, static_cast<uint64_t>(addend)}); 393 lastBinding.addend = addend; 394 } 395 396 opcodes.push_back({BIND_OPCODE_DO_BIND, 0}); 397 // DO_BIND causes dyld to both perform the binding and increment the offset 398 lastBinding.offset += target->wordSize; 399 } 400 401 static void optimizeOpcodes(std::vector<BindIR> &opcodes) { 402 // Pass 1: Combine bind/add pairs 403 size_t i; 404 int pWrite = 0; 405 for (i = 1; i < opcodes.size(); ++i, ++pWrite) { 406 if ((opcodes[i].opcode == BIND_OPCODE_ADD_ADDR_ULEB) && 407 (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND)) { 408 opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB; 409 opcodes[pWrite].data = opcodes[i].data; 410 ++i; 411 } else { 412 opcodes[pWrite] = opcodes[i - 1]; 413 } 414 } 415 if (i == opcodes.size()) 416 opcodes[pWrite] = opcodes[i - 1]; 417 opcodes.resize(pWrite + 1); 418 419 // Pass 2: Compress two or more bind_add opcodes 420 pWrite = 0; 421 for (i = 1; i < opcodes.size(); ++i, ++pWrite) { 422 if ((opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && 423 (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && 424 (opcodes[i].data == opcodes[i - 1].data)) { 425 opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB; 426 opcodes[pWrite].consecutiveCount = 2; 427 opcodes[pWrite].data = opcodes[i].data; 428 ++i; 429 while (i < opcodes.size() && 430 (opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && 431 (opcodes[i].data == opcodes[i - 1].data)) { 432 opcodes[pWrite].consecutiveCount++; 433 ++i; 434 } 435 } else { 436 opcodes[pWrite] = opcodes[i - 1]; 437 } 438 } 439 if (i == opcodes.size()) 440 opcodes[pWrite] = opcodes[i - 1]; 441 opcodes.resize(pWrite + 1); 442 443 // Pass 3: Use immediate encodings 444 // Every binding is the size of one pointer. If the next binding is a 445 // multiple of wordSize away that is within BIND_IMMEDIATE_MASK, the 446 // opcode can be scaled by wordSize into a single byte and dyld will 447 // expand it to the correct address. 448 for (auto &p : opcodes) { 449 // It's unclear why the check needs to be less than BIND_IMMEDIATE_MASK, 450 // but ld64 currently does this. This could be a potential bug, but 451 // for now, perform the same behavior to prevent mysterious bugs. 452 if ((p.opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && 453 ((p.data / target->wordSize) < BIND_IMMEDIATE_MASK) && 454 ((p.data % target->wordSize) == 0)) { 455 p.opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED; 456 p.data /= target->wordSize; 457 } 458 } 459 } 460 461 static void flushOpcodes(const BindIR &op, raw_svector_ostream &os) { 462 uint8_t opcode = op.opcode & BIND_OPCODE_MASK; 463 switch (opcode) { 464 case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: 465 case BIND_OPCODE_ADD_ADDR_ULEB: 466 case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: 467 os << op.opcode; 468 encodeULEB128(op.data, os); 469 break; 470 case BIND_OPCODE_SET_ADDEND_SLEB: 471 os << op.opcode; 472 encodeSLEB128(static_cast<int64_t>(op.data), os); 473 break; 474 case BIND_OPCODE_DO_BIND: 475 os << op.opcode; 476 break; 477 case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: 478 os << op.opcode; 479 encodeULEB128(op.consecutiveCount, os); 480 encodeULEB128(op.data, os); 481 break; 482 case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: 483 os << static_cast<uint8_t>(op.opcode | op.data); 484 break; 485 default: 486 llvm_unreachable("cannot bind to an unrecognized symbol"); 487 } 488 } 489 490 // Non-weak bindings need to have their dylib ordinal encoded as well. 491 static int16_t ordinalForDylibSymbol(const DylibSymbol &dysym) { 492 if (config->namespaceKind == NamespaceKind::flat || dysym.isDynamicLookup()) 493 return static_cast<int16_t>(BIND_SPECIAL_DYLIB_FLAT_LOOKUP); 494 assert(dysym.getFile()->isReferenced()); 495 return dysym.getFile()->ordinal; 496 } 497 498 static int16_t ordinalForSymbol(const Symbol &sym) { 499 if (const auto *dysym = dyn_cast<DylibSymbol>(&sym)) 500 return ordinalForDylibSymbol(*dysym); 501 assert(cast<Defined>(&sym)->interposable); 502 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP; 503 } 504 505 static void encodeDylibOrdinal(int16_t ordinal, raw_svector_ostream &os) { 506 if (ordinal <= 0) { 507 os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | 508 (ordinal & BIND_IMMEDIATE_MASK)); 509 } else if (ordinal <= BIND_IMMEDIATE_MASK) { 510 os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal); 511 } else { 512 os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); 513 encodeULEB128(ordinal, os); 514 } 515 } 516 517 static void encodeWeakOverride(const Defined *defined, 518 raw_svector_ostream &os) { 519 os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | 520 BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION) 521 << defined->getName() << '\0'; 522 } 523 524 // Organize the bindings so we can encoded them with fewer opcodes. 525 // 526 // First, all bindings for a given symbol should be grouped together. 527 // BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM is the largest opcode (since it 528 // has an associated symbol string), so we only want to emit it once per symbol. 529 // 530 // Within each group, we sort the bindings by address. Since bindings are 531 // delta-encoded, sorting them allows for a more compact result. Note that 532 // sorting by address alone ensures that bindings for the same segment / section 533 // are located together, minimizing the number of times we have to emit 534 // BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB. 535 // 536 // Finally, we sort the symbols by the address of their first binding, again 537 // to facilitate the delta-encoding process. 538 template <class Sym> 539 std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> 540 sortBindings(const BindingsMap<const Sym *> &bindingsMap) { 541 std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> bindingsVec( 542 bindingsMap.begin(), bindingsMap.end()); 543 for (auto &p : bindingsVec) { 544 std::vector<BindingEntry> &bindings = p.second; 545 llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) { 546 return a.target.getVA() < b.target.getVA(); 547 }); 548 } 549 llvm::sort(bindingsVec, [](const auto &a, const auto &b) { 550 return a.second[0].target.getVA() < b.second[0].target.getVA(); 551 }); 552 return bindingsVec; 553 } 554 555 // Emit bind opcodes, which are a stream of byte-sized opcodes that dyld 556 // interprets to update a record with the following fields: 557 // * segment index (of the segment to write the symbol addresses to, typically 558 // the __DATA_CONST segment which contains the GOT) 559 // * offset within the segment, indicating the next location to write a binding 560 // * symbol type 561 // * symbol library ordinal (the index of its library's LC_LOAD_DYLIB command) 562 // * symbol name 563 // * addend 564 // When dyld sees BIND_OPCODE_DO_BIND, it uses the current record state to bind 565 // a symbol in the GOT, and increments the segment offset to point to the next 566 // entry. It does *not* clear the record state after doing the bind, so 567 // subsequent opcodes only need to encode the differences between bindings. 568 void BindingSection::finalizeContents() { 569 raw_svector_ostream os{contents}; 570 Binding lastBinding; 571 int16_t lastOrdinal = 0; 572 573 for (auto &p : sortBindings(bindingsMap)) { 574 const Symbol *sym = p.first; 575 std::vector<BindingEntry> &bindings = p.second; 576 uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM; 577 if (sym->isWeakRef()) 578 flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT; 579 os << flags << sym->getName() << '\0' 580 << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER); 581 int16_t ordinal = ordinalForSymbol(*sym); 582 if (ordinal != lastOrdinal) { 583 encodeDylibOrdinal(ordinal, os); 584 lastOrdinal = ordinal; 585 } 586 std::vector<BindIR> opcodes; 587 for (const BindingEntry &b : bindings) 588 encodeBinding(b.target.isec->parent, 589 b.target.isec->getOffset(b.target.offset), b.addend, 590 lastBinding, opcodes); 591 if (config->optimize > 1) 592 optimizeOpcodes(opcodes); 593 for (const auto &op : opcodes) 594 flushOpcodes(op, os); 595 } 596 if (!bindingsMap.empty()) 597 os << static_cast<uint8_t>(BIND_OPCODE_DONE); 598 } 599 600 void BindingSection::writeTo(uint8_t *buf) const { 601 memcpy(buf, contents.data(), contents.size()); 602 } 603 604 WeakBindingSection::WeakBindingSection() 605 : LinkEditSection(segment_names::linkEdit, section_names::weakBinding) {} 606 607 void WeakBindingSection::finalizeContents() { 608 raw_svector_ostream os{contents}; 609 Binding lastBinding; 610 611 for (const Defined *defined : definitions) 612 encodeWeakOverride(defined, os); 613 614 for (auto &p : sortBindings(bindingsMap)) { 615 const Symbol *sym = p.first; 616 std::vector<BindingEntry> &bindings = p.second; 617 os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM) 618 << sym->getName() << '\0' 619 << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER); 620 std::vector<BindIR> opcodes; 621 for (const BindingEntry &b : bindings) 622 encodeBinding(b.target.isec->parent, 623 b.target.isec->getOffset(b.target.offset), b.addend, 624 lastBinding, opcodes); 625 if (config->optimize > 1) 626 optimizeOpcodes(opcodes); 627 for (const auto &op : opcodes) 628 flushOpcodes(op, os); 629 } 630 if (!bindingsMap.empty() || !definitions.empty()) 631 os << static_cast<uint8_t>(BIND_OPCODE_DONE); 632 } 633 634 void WeakBindingSection::writeTo(uint8_t *buf) const { 635 memcpy(buf, contents.data(), contents.size()); 636 } 637 638 StubsSection::StubsSection() 639 : SyntheticSection(segment_names::text, section_names::stubs) { 640 flags = S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS; 641 // The stubs section comprises machine instructions, which are aligned to 642 // 4 bytes on the archs we care about. 643 align = 4; 644 reserved2 = target->stubSize; 645 } 646 647 uint64_t StubsSection::getSize() const { 648 return entries.size() * target->stubSize; 649 } 650 651 void StubsSection::writeTo(uint8_t *buf) const { 652 size_t off = 0; 653 for (const Symbol *sym : entries) { 654 target->writeStub(buf + off, *sym); 655 off += target->stubSize; 656 } 657 } 658 659 void StubsSection::finalize() { isFinal = true; } 660 661 bool StubsSection::addEntry(Symbol *sym) { 662 bool inserted = entries.insert(sym); 663 if (inserted) 664 sym->stubsIndex = entries.size() - 1; 665 return inserted; 666 } 667 668 StubHelperSection::StubHelperSection() 669 : SyntheticSection(segment_names::text, section_names::stubHelper) { 670 flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS; 671 align = 4; // This section comprises machine instructions 672 } 673 674 uint64_t StubHelperSection::getSize() const { 675 return target->stubHelperHeaderSize + 676 in.lazyBinding->getEntries().size() * target->stubHelperEntrySize; 677 } 678 679 bool StubHelperSection::isNeeded() const { return in.lazyBinding->isNeeded(); } 680 681 void StubHelperSection::writeTo(uint8_t *buf) const { 682 target->writeStubHelperHeader(buf); 683 size_t off = target->stubHelperHeaderSize; 684 for (const Symbol *sym : in.lazyBinding->getEntries()) { 685 target->writeStubHelperEntry(buf + off, *sym, addr + off); 686 off += target->stubHelperEntrySize; 687 } 688 } 689 690 void StubHelperSection::setup() { 691 Symbol *binder = symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr, 692 /*isWeakRef=*/false); 693 if (auto *undefined = dyn_cast<Undefined>(binder)) 694 treatUndefinedSymbol(*undefined, 695 "lazy binding (normally in libSystem.dylib)"); 696 697 // treatUndefinedSymbol() can replace binder with a DylibSymbol; re-check. 698 stubBinder = dyn_cast_or_null<DylibSymbol>(binder); 699 if (stubBinder == nullptr) 700 return; 701 702 in.got->addEntry(stubBinder); 703 704 in.imageLoaderCache->parent = 705 ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache); 706 inputSections.push_back(in.imageLoaderCache); 707 // Since this isn't in the symbol table or in any input file, the noDeadStrip 708 // argument doesn't matter. 709 dyldPrivate = 710 make<Defined>("__dyld_private", nullptr, in.imageLoaderCache, 0, 0, 711 /*isWeakDef=*/false, 712 /*isExternal=*/false, /*isPrivateExtern=*/false, 713 /*includeInSymtab=*/true, 714 /*isThumb=*/false, /*isReferencedDynamically=*/false, 715 /*noDeadStrip=*/false); 716 dyldPrivate->used = true; 717 } 718 719 LazyPointerSection::LazyPointerSection() 720 : SyntheticSection(segment_names::data, section_names::lazySymbolPtr) { 721 align = target->wordSize; 722 flags = S_LAZY_SYMBOL_POINTERS; 723 } 724 725 uint64_t LazyPointerSection::getSize() const { 726 return in.stubs->getEntries().size() * target->wordSize; 727 } 728 729 bool LazyPointerSection::isNeeded() const { 730 return !in.stubs->getEntries().empty(); 731 } 732 733 void LazyPointerSection::writeTo(uint8_t *buf) const { 734 size_t off = 0; 735 for (const Symbol *sym : in.stubs->getEntries()) { 736 if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) { 737 if (dysym->hasStubsHelper()) { 738 uint64_t stubHelperOffset = 739 target->stubHelperHeaderSize + 740 dysym->stubsHelperIndex * target->stubHelperEntrySize; 741 write64le(buf + off, in.stubHelper->addr + stubHelperOffset); 742 } 743 } else { 744 write64le(buf + off, sym->getVA()); 745 } 746 off += target->wordSize; 747 } 748 } 749 750 LazyBindingSection::LazyBindingSection() 751 : LinkEditSection(segment_names::linkEdit, section_names::lazyBinding) {} 752 753 void LazyBindingSection::finalizeContents() { 754 // TODO: Just precompute output size here instead of writing to a temporary 755 // buffer 756 for (Symbol *sym : entries) 757 sym->lazyBindOffset = encode(*sym); 758 } 759 760 void LazyBindingSection::writeTo(uint8_t *buf) const { 761 memcpy(buf, contents.data(), contents.size()); 762 } 763 764 void LazyBindingSection::addEntry(Symbol *sym) { 765 if (entries.insert(sym)) { 766 sym->stubsHelperIndex = entries.size() - 1; 767 in.rebase->addEntry(in.lazyPointers->isec, 768 sym->stubsIndex * target->wordSize); 769 } 770 } 771 772 // Unlike the non-lazy binding section, the bind opcodes in this section aren't 773 // interpreted all at once. Rather, dyld will start interpreting opcodes at a 774 // given offset, typically only binding a single symbol before it finds a 775 // BIND_OPCODE_DONE terminator. As such, unlike in the non-lazy-binding case, 776 // we cannot encode just the differences between symbols; we have to emit the 777 // complete bind information for each symbol. 778 uint32_t LazyBindingSection::encode(const Symbol &sym) { 779 uint32_t opstreamOffset = contents.size(); 780 OutputSegment *dataSeg = in.lazyPointers->parent; 781 os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | 782 dataSeg->index); 783 uint64_t offset = 784 in.lazyPointers->addr - dataSeg->addr + sym.stubsIndex * target->wordSize; 785 encodeULEB128(offset, os); 786 encodeDylibOrdinal(ordinalForSymbol(sym), os); 787 788 uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM; 789 if (sym.isWeakRef()) 790 flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT; 791 792 os << flags << sym.getName() << '\0' 793 << static_cast<uint8_t>(BIND_OPCODE_DO_BIND) 794 << static_cast<uint8_t>(BIND_OPCODE_DONE); 795 return opstreamOffset; 796 } 797 798 ExportSection::ExportSection() 799 : LinkEditSection(segment_names::linkEdit, section_names::export_) {} 800 801 void ExportSection::finalizeContents() { 802 trieBuilder.setImageBase(in.header->addr); 803 for (const Symbol *sym : symtab->getSymbols()) { 804 if (const auto *defined = dyn_cast<Defined>(sym)) { 805 if (defined->privateExtern || !defined->isLive()) 806 continue; 807 trieBuilder.addSymbol(*defined); 808 hasWeakSymbol = hasWeakSymbol || sym->isWeakDef(); 809 } 810 } 811 size = trieBuilder.build(); 812 } 813 814 void ExportSection::writeTo(uint8_t *buf) const { trieBuilder.writeTo(buf); } 815 816 DataInCodeSection::DataInCodeSection() 817 : LinkEditSection(segment_names::linkEdit, section_names::dataInCode) {} 818 819 template <class LP> 820 static std::vector<MachO::data_in_code_entry> collectDataInCodeEntries() { 821 std::vector<MachO::data_in_code_entry> dataInCodeEntries; 822 for (const InputFile *inputFile : inputFiles) { 823 if (!isa<ObjFile>(inputFile)) 824 continue; 825 const ObjFile *objFile = cast<ObjFile>(inputFile); 826 ArrayRef<MachO::data_in_code_entry> entries = objFile->getDataInCode(); 827 if (entries.empty()) 828 continue; 829 830 assert(is_sorted(dataInCodeEntries, [](const data_in_code_entry &lhs, 831 const data_in_code_entry &rhs) { 832 return lhs.offset < rhs.offset; 833 })); 834 // For each code subsection find 'data in code' entries residing in it. 835 // Compute the new offset values as 836 // <offset within subsection> + <subsection address> - <__TEXT address>. 837 for (const Section *section : objFile->sections) { 838 for (const Subsection &subsec : section->subsections) { 839 const InputSection *isec = subsec.isec; 840 if (!isCodeSection(isec)) 841 continue; 842 if (cast<ConcatInputSection>(isec)->shouldOmitFromOutput()) 843 continue; 844 const uint64_t beginAddr = section->addr + subsec.offset; 845 auto it = llvm::lower_bound( 846 entries, beginAddr, 847 [](const MachO::data_in_code_entry &entry, uint64_t addr) { 848 return entry.offset < addr; 849 }); 850 const uint64_t endAddr = beginAddr + isec->getSize(); 851 for (const auto end = entries.end(); 852 it != end && it->offset + it->length <= endAddr; ++it) 853 dataInCodeEntries.push_back( 854 {static_cast<uint32_t>(isec->getVA(it->offset - beginAddr) - 855 in.header->addr), 856 it->length, it->kind}); 857 } 858 } 859 } 860 return dataInCodeEntries; 861 } 862 863 void DataInCodeSection::finalizeContents() { 864 entries = target->wordSize == 8 ? collectDataInCodeEntries<LP64>() 865 : collectDataInCodeEntries<ILP32>(); 866 } 867 868 void DataInCodeSection::writeTo(uint8_t *buf) const { 869 if (!entries.empty()) 870 memcpy(buf, entries.data(), getRawSize()); 871 } 872 873 FunctionStartsSection::FunctionStartsSection() 874 : LinkEditSection(segment_names::linkEdit, section_names::functionStarts) {} 875 876 void FunctionStartsSection::finalizeContents() { 877 raw_svector_ostream os{contents}; 878 std::vector<uint64_t> addrs; 879 for (const InputFile *file : inputFiles) { 880 if (auto *objFile = dyn_cast<ObjFile>(file)) { 881 for (const Symbol *sym : objFile->symbols) { 882 if (const auto *defined = dyn_cast_or_null<Defined>(sym)) { 883 if (!defined->isec || !isCodeSection(defined->isec) || 884 !defined->isLive()) 885 continue; 886 // TODO: Add support for thumbs, in that case 887 // the lowest bit of nextAddr needs to be set to 1. 888 addrs.push_back(defined->getVA()); 889 } 890 } 891 } 892 } 893 llvm::sort(addrs); 894 uint64_t addr = in.header->addr; 895 for (uint64_t nextAddr : addrs) { 896 uint64_t delta = nextAddr - addr; 897 if (delta == 0) 898 continue; 899 encodeULEB128(delta, os); 900 addr = nextAddr; 901 } 902 os << '\0'; 903 } 904 905 void FunctionStartsSection::writeTo(uint8_t *buf) const { 906 memcpy(buf, contents.data(), contents.size()); 907 } 908 909 SymtabSection::SymtabSection(StringTableSection &stringTableSection) 910 : LinkEditSection(segment_names::linkEdit, section_names::symbolTable), 911 stringTableSection(stringTableSection) {} 912 913 void SymtabSection::emitBeginSourceStab(StringRef sourceFile) { 914 StabsEntry stab(N_SO); 915 stab.strx = stringTableSection.addString(saver().save(sourceFile)); 916 stabs.emplace_back(std::move(stab)); 917 } 918 919 void SymtabSection::emitEndSourceStab() { 920 StabsEntry stab(N_SO); 921 stab.sect = 1; 922 stabs.emplace_back(std::move(stab)); 923 } 924 925 void SymtabSection::emitObjectFileStab(ObjFile *file) { 926 StabsEntry stab(N_OSO); 927 stab.sect = target->cpuSubtype; 928 SmallString<261> path(!file->archiveName.empty() ? file->archiveName 929 : file->getName()); 930 std::error_code ec = sys::fs::make_absolute(path); 931 if (ec) 932 fatal("failed to get absolute path for " + path); 933 934 if (!file->archiveName.empty()) 935 path.append({"(", file->getName(), ")"}); 936 937 StringRef adjustedPath = saver().save(path.str()); 938 adjustedPath.consume_front(config->osoPrefix); 939 940 stab.strx = stringTableSection.addString(adjustedPath); 941 stab.desc = 1; 942 stab.value = file->modTime; 943 stabs.emplace_back(std::move(stab)); 944 } 945 946 void SymtabSection::emitEndFunStab(Defined *defined) { 947 StabsEntry stab(N_FUN); 948 stab.value = defined->size; 949 stabs.emplace_back(std::move(stab)); 950 } 951 952 void SymtabSection::emitStabs() { 953 if (config->omitDebugInfo) 954 return; 955 956 for (const std::string &s : config->astPaths) { 957 StabsEntry astStab(N_AST); 958 astStab.strx = stringTableSection.addString(s); 959 stabs.emplace_back(std::move(astStab)); 960 } 961 962 // Cache the file ID for each symbol in an std::pair for faster sorting. 963 using SortingPair = std::pair<Defined *, int>; 964 std::vector<SortingPair> symbolsNeedingStabs; 965 for (const SymtabEntry &entry : 966 concat<SymtabEntry>(localSymbols, externalSymbols)) { 967 Symbol *sym = entry.sym; 968 assert(sym->isLive() && 969 "dead symbols should not be in localSymbols, externalSymbols"); 970 if (auto *defined = dyn_cast<Defined>(sym)) { 971 // Excluded symbols should have been filtered out in finalizeContents(). 972 assert(defined->includeInSymtab); 973 974 if (defined->isAbsolute()) 975 continue; 976 977 // Constant-folded symbols go in the executable's symbol table, but don't 978 // get a stabs entry. 979 if (defined->wasIdenticalCodeFolded) 980 continue; 981 982 InputSection *isec = defined->isec; 983 ObjFile *file = dyn_cast_or_null<ObjFile>(isec->getFile()); 984 if (!file || !file->compileUnit) 985 continue; 986 987 symbolsNeedingStabs.emplace_back(defined, defined->isec->getFile()->id); 988 } 989 } 990 991 llvm::stable_sort(symbolsNeedingStabs, 992 [&](const SortingPair &a, const SortingPair &b) { 993 return a.second < b.second; 994 }); 995 996 // Emit STABS symbols so that dsymutil and/or the debugger can map address 997 // regions in the final binary to the source and object files from which they 998 // originated. 999 InputFile *lastFile = nullptr; 1000 for (SortingPair &pair : symbolsNeedingStabs) { 1001 Defined *defined = pair.first; 1002 InputSection *isec = defined->isec; 1003 ObjFile *file = cast<ObjFile>(isec->getFile()); 1004 1005 if (lastFile == nullptr || lastFile != file) { 1006 if (lastFile != nullptr) 1007 emitEndSourceStab(); 1008 lastFile = file; 1009 1010 emitBeginSourceStab(file->sourceFile()); 1011 emitObjectFileStab(file); 1012 } 1013 1014 StabsEntry symStab; 1015 symStab.sect = defined->isec->parent->index; 1016 symStab.strx = stringTableSection.addString(defined->getName()); 1017 symStab.value = defined->getVA(); 1018 1019 if (isCodeSection(isec)) { 1020 symStab.type = N_FUN; 1021 stabs.emplace_back(std::move(symStab)); 1022 emitEndFunStab(defined); 1023 } else { 1024 symStab.type = defined->isExternal() ? N_GSYM : N_STSYM; 1025 stabs.emplace_back(std::move(symStab)); 1026 } 1027 } 1028 1029 if (!stabs.empty()) 1030 emitEndSourceStab(); 1031 } 1032 1033 void SymtabSection::finalizeContents() { 1034 auto addSymbol = [&](std::vector<SymtabEntry> &symbols, Symbol *sym) { 1035 uint32_t strx = stringTableSection.addString(sym->getName()); 1036 symbols.push_back({sym, strx}); 1037 }; 1038 1039 std::function<void(Symbol *)> localSymbolsHandler; 1040 switch (config->localSymbolsPresence) { 1041 case SymtabPresence::All: 1042 localSymbolsHandler = [&](Symbol *sym) { addSymbol(localSymbols, sym); }; 1043 break; 1044 case SymtabPresence::None: 1045 localSymbolsHandler = [&](Symbol *) { /* Do nothing*/ }; 1046 break; 1047 case SymtabPresence::SelectivelyIncluded: 1048 localSymbolsHandler = [&](Symbol *sym) { 1049 if (config->localSymbolPatterns.match(sym->getName())) 1050 addSymbol(localSymbols, sym); 1051 }; 1052 break; 1053 case SymtabPresence::SelectivelyExcluded: 1054 localSymbolsHandler = [&](Symbol *sym) { 1055 if (!config->localSymbolPatterns.match(sym->getName())) 1056 addSymbol(localSymbols, sym); 1057 }; 1058 break; 1059 } 1060 1061 // Local symbols aren't in the SymbolTable, so we walk the list of object 1062 // files to gather them. 1063 // But if `-x` is set, then we don't need to. localSymbolsHandler() will do 1064 // the right thing regardless, but this check is a perf optimization because 1065 // iterating through all the input files and their symbols is expensive. 1066 if (config->localSymbolsPresence != SymtabPresence::None) { 1067 for (const InputFile *file : inputFiles) { 1068 if (auto *objFile = dyn_cast<ObjFile>(file)) { 1069 for (Symbol *sym : objFile->symbols) { 1070 if (auto *defined = dyn_cast_or_null<Defined>(sym)) { 1071 if (defined->isExternal() || !defined->isLive() || 1072 !defined->includeInSymtab) 1073 continue; 1074 localSymbolsHandler(sym); 1075 } 1076 } 1077 } 1078 } 1079 } 1080 1081 // __dyld_private is a local symbol too. It's linker-created and doesn't 1082 // exist in any object file. 1083 if (Defined *dyldPrivate = in.stubHelper->dyldPrivate) 1084 localSymbolsHandler(dyldPrivate); 1085 1086 for (Symbol *sym : symtab->getSymbols()) { 1087 if (!sym->isLive()) 1088 continue; 1089 if (auto *defined = dyn_cast<Defined>(sym)) { 1090 if (!defined->includeInSymtab) 1091 continue; 1092 assert(defined->isExternal()); 1093 if (defined->privateExtern) 1094 localSymbolsHandler(defined); 1095 else 1096 addSymbol(externalSymbols, defined); 1097 } else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) { 1098 if (dysym->isReferenced()) 1099 addSymbol(undefinedSymbols, sym); 1100 } 1101 } 1102 1103 emitStabs(); 1104 uint32_t symtabIndex = stabs.size(); 1105 for (const SymtabEntry &entry : 1106 concat<SymtabEntry>(localSymbols, externalSymbols, undefinedSymbols)) { 1107 entry.sym->symtabIndex = symtabIndex++; 1108 } 1109 } 1110 1111 uint32_t SymtabSection::getNumSymbols() const { 1112 return stabs.size() + localSymbols.size() + externalSymbols.size() + 1113 undefinedSymbols.size(); 1114 } 1115 1116 // This serves to hide (type-erase) the template parameter from SymtabSection. 1117 template <class LP> class SymtabSectionImpl final : public SymtabSection { 1118 public: 1119 SymtabSectionImpl(StringTableSection &stringTableSection) 1120 : SymtabSection(stringTableSection) {} 1121 uint64_t getRawSize() const override; 1122 void writeTo(uint8_t *buf) const override; 1123 }; 1124 1125 template <class LP> uint64_t SymtabSectionImpl<LP>::getRawSize() const { 1126 return getNumSymbols() * sizeof(typename LP::nlist); 1127 } 1128 1129 template <class LP> void SymtabSectionImpl<LP>::writeTo(uint8_t *buf) const { 1130 auto *nList = reinterpret_cast<typename LP::nlist *>(buf); 1131 // Emit the stabs entries before the "real" symbols. We cannot emit them 1132 // after as that would render Symbol::symtabIndex inaccurate. 1133 for (const StabsEntry &entry : stabs) { 1134 nList->n_strx = entry.strx; 1135 nList->n_type = entry.type; 1136 nList->n_sect = entry.sect; 1137 nList->n_desc = entry.desc; 1138 nList->n_value = entry.value; 1139 ++nList; 1140 } 1141 1142 for (const SymtabEntry &entry : concat<const SymtabEntry>( 1143 localSymbols, externalSymbols, undefinedSymbols)) { 1144 nList->n_strx = entry.strx; 1145 // TODO populate n_desc with more flags 1146 if (auto *defined = dyn_cast<Defined>(entry.sym)) { 1147 uint8_t scope = 0; 1148 if (defined->privateExtern) { 1149 // Private external -- dylib scoped symbol. 1150 // Promote to non-external at link time. 1151 scope = N_PEXT; 1152 } else if (defined->isExternal()) { 1153 // Normal global symbol. 1154 scope = N_EXT; 1155 } else { 1156 // TU-local symbol from localSymbols. 1157 scope = 0; 1158 } 1159 1160 if (defined->isAbsolute()) { 1161 nList->n_type = scope | N_ABS; 1162 nList->n_sect = NO_SECT; 1163 nList->n_value = defined->value; 1164 } else { 1165 nList->n_type = scope | N_SECT; 1166 nList->n_sect = defined->isec->parent->index; 1167 // For the N_SECT symbol type, n_value is the address of the symbol 1168 nList->n_value = defined->getVA(); 1169 } 1170 nList->n_desc |= defined->thumb ? N_ARM_THUMB_DEF : 0; 1171 nList->n_desc |= defined->isExternalWeakDef() ? N_WEAK_DEF : 0; 1172 nList->n_desc |= 1173 defined->referencedDynamically ? REFERENCED_DYNAMICALLY : 0; 1174 } else if (auto *dysym = dyn_cast<DylibSymbol>(entry.sym)) { 1175 uint16_t n_desc = nList->n_desc; 1176 int16_t ordinal = ordinalForDylibSymbol(*dysym); 1177 if (ordinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP) 1178 SET_LIBRARY_ORDINAL(n_desc, DYNAMIC_LOOKUP_ORDINAL); 1179 else if (ordinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE) 1180 SET_LIBRARY_ORDINAL(n_desc, EXECUTABLE_ORDINAL); 1181 else { 1182 assert(ordinal > 0); 1183 SET_LIBRARY_ORDINAL(n_desc, static_cast<uint8_t>(ordinal)); 1184 } 1185 1186 nList->n_type = N_EXT; 1187 n_desc |= dysym->isWeakDef() ? N_WEAK_DEF : 0; 1188 n_desc |= dysym->isWeakRef() ? N_WEAK_REF : 0; 1189 nList->n_desc = n_desc; 1190 } 1191 ++nList; 1192 } 1193 } 1194 1195 template <class LP> 1196 SymtabSection * 1197 macho::makeSymtabSection(StringTableSection &stringTableSection) { 1198 return make<SymtabSectionImpl<LP>>(stringTableSection); 1199 } 1200 1201 IndirectSymtabSection::IndirectSymtabSection() 1202 : LinkEditSection(segment_names::linkEdit, 1203 section_names::indirectSymbolTable) {} 1204 1205 uint32_t IndirectSymtabSection::getNumSymbols() const { 1206 return in.got->getEntries().size() + in.tlvPointers->getEntries().size() + 1207 2 * in.stubs->getEntries().size(); 1208 } 1209 1210 bool IndirectSymtabSection::isNeeded() const { 1211 return in.got->isNeeded() || in.tlvPointers->isNeeded() || 1212 in.stubs->isNeeded(); 1213 } 1214 1215 void IndirectSymtabSection::finalizeContents() { 1216 uint32_t off = 0; 1217 in.got->reserved1 = off; 1218 off += in.got->getEntries().size(); 1219 in.tlvPointers->reserved1 = off; 1220 off += in.tlvPointers->getEntries().size(); 1221 in.stubs->reserved1 = off; 1222 off += in.stubs->getEntries().size(); 1223 in.lazyPointers->reserved1 = off; 1224 } 1225 1226 static uint32_t indirectValue(const Symbol *sym) { 1227 if (sym->symtabIndex == UINT32_MAX) 1228 return INDIRECT_SYMBOL_LOCAL; 1229 if (auto *defined = dyn_cast<Defined>(sym)) 1230 if (defined->privateExtern) 1231 return INDIRECT_SYMBOL_LOCAL; 1232 return sym->symtabIndex; 1233 } 1234 1235 void IndirectSymtabSection::writeTo(uint8_t *buf) const { 1236 uint32_t off = 0; 1237 for (const Symbol *sym : in.got->getEntries()) { 1238 write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); 1239 ++off; 1240 } 1241 for (const Symbol *sym : in.tlvPointers->getEntries()) { 1242 write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); 1243 ++off; 1244 } 1245 for (const Symbol *sym : in.stubs->getEntries()) { 1246 write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); 1247 ++off; 1248 } 1249 // There is a 1:1 correspondence between stubs and LazyPointerSection 1250 // entries. But giving __stubs and __la_symbol_ptr the same reserved1 1251 // (the offset into the indirect symbol table) so that they both refer 1252 // to the same range of offsets confuses `strip`, so write the stubs 1253 // symbol table offsets a second time. 1254 for (const Symbol *sym : in.stubs->getEntries()) { 1255 write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); 1256 ++off; 1257 } 1258 } 1259 1260 StringTableSection::StringTableSection() 1261 : LinkEditSection(segment_names::linkEdit, section_names::stringTable) {} 1262 1263 uint32_t StringTableSection::addString(StringRef str) { 1264 uint32_t strx = size; 1265 strings.push_back(str); // TODO: consider deduplicating strings 1266 size += str.size() + 1; // account for null terminator 1267 return strx; 1268 } 1269 1270 void StringTableSection::writeTo(uint8_t *buf) const { 1271 uint32_t off = 0; 1272 for (StringRef str : strings) { 1273 memcpy(buf + off, str.data(), str.size()); 1274 off += str.size() + 1; // account for null terminator 1275 } 1276 } 1277 1278 static_assert((CodeSignatureSection::blobHeadersSize % 8) == 0, ""); 1279 static_assert((CodeSignatureSection::fixedHeadersSize % 8) == 0, ""); 1280 1281 CodeSignatureSection::CodeSignatureSection() 1282 : LinkEditSection(segment_names::linkEdit, section_names::codeSignature) { 1283 align = 16; // required by libstuff 1284 // FIXME: Consider using finalOutput instead of outputFile. 1285 fileName = config->outputFile; 1286 size_t slashIndex = fileName.rfind("/"); 1287 if (slashIndex != std::string::npos) 1288 fileName = fileName.drop_front(slashIndex + 1); 1289 1290 // NOTE: Any changes to these calculations should be repeated 1291 // in llvm-objcopy's MachOLayoutBuilder::layoutTail. 1292 allHeadersSize = alignTo<16>(fixedHeadersSize + fileName.size() + 1); 1293 fileNamePad = allHeadersSize - fixedHeadersSize - fileName.size(); 1294 } 1295 1296 uint32_t CodeSignatureSection::getBlockCount() const { 1297 return (fileOff + blockSize - 1) / blockSize; 1298 } 1299 1300 uint64_t CodeSignatureSection::getRawSize() const { 1301 return allHeadersSize + getBlockCount() * hashSize; 1302 } 1303 1304 void CodeSignatureSection::writeHashes(uint8_t *buf) const { 1305 // NOTE: Changes to this functionality should be repeated in llvm-objcopy's 1306 // MachOWriter::writeSignatureData. 1307 uint8_t *hashes = buf + fileOff + allHeadersSize; 1308 parallelFor(0, getBlockCount(), [&](size_t i) { 1309 sha256(buf + i * blockSize, 1310 std::min(static_cast<size_t>(fileOff - i * blockSize), 1311 static_cast<size_t>(blockSize)), 1312 hashes + i * hashSize); 1313 }); 1314 #if defined(__APPLE__) 1315 // This is macOS-specific work-around and makes no sense for any 1316 // other host OS. See https://openradar.appspot.com/FB8914231 1317 // 1318 // The macOS kernel maintains a signature-verification cache to 1319 // quickly validate applications at time of execve(2). The trouble 1320 // is that for the kernel creates the cache entry at the time of the 1321 // mmap(2) call, before we have a chance to write either the code to 1322 // sign or the signature header+hashes. The fix is to invalidate 1323 // all cached data associated with the output file, thus discarding 1324 // the bogus prematurely-cached signature. 1325 msync(buf, fileOff + getSize(), MS_INVALIDATE); 1326 #endif 1327 } 1328 1329 void CodeSignatureSection::writeTo(uint8_t *buf) const { 1330 // NOTE: Changes to this functionality should be repeated in llvm-objcopy's 1331 // MachOWriter::writeSignatureData. 1332 uint32_t signatureSize = static_cast<uint32_t>(getSize()); 1333 auto *superBlob = reinterpret_cast<CS_SuperBlob *>(buf); 1334 write32be(&superBlob->magic, CSMAGIC_EMBEDDED_SIGNATURE); 1335 write32be(&superBlob->length, signatureSize); 1336 write32be(&superBlob->count, 1); 1337 auto *blobIndex = reinterpret_cast<CS_BlobIndex *>(&superBlob[1]); 1338 write32be(&blobIndex->type, CSSLOT_CODEDIRECTORY); 1339 write32be(&blobIndex->offset, blobHeadersSize); 1340 auto *codeDirectory = 1341 reinterpret_cast<CS_CodeDirectory *>(buf + blobHeadersSize); 1342 write32be(&codeDirectory->magic, CSMAGIC_CODEDIRECTORY); 1343 write32be(&codeDirectory->length, signatureSize - blobHeadersSize); 1344 write32be(&codeDirectory->version, CS_SUPPORTSEXECSEG); 1345 write32be(&codeDirectory->flags, CS_ADHOC | CS_LINKER_SIGNED); 1346 write32be(&codeDirectory->hashOffset, 1347 sizeof(CS_CodeDirectory) + fileName.size() + fileNamePad); 1348 write32be(&codeDirectory->identOffset, sizeof(CS_CodeDirectory)); 1349 codeDirectory->nSpecialSlots = 0; 1350 write32be(&codeDirectory->nCodeSlots, getBlockCount()); 1351 write32be(&codeDirectory->codeLimit, fileOff); 1352 codeDirectory->hashSize = static_cast<uint8_t>(hashSize); 1353 codeDirectory->hashType = kSecCodeSignatureHashSHA256; 1354 codeDirectory->platform = 0; 1355 codeDirectory->pageSize = blockSizeShift; 1356 codeDirectory->spare2 = 0; 1357 codeDirectory->scatterOffset = 0; 1358 codeDirectory->teamOffset = 0; 1359 codeDirectory->spare3 = 0; 1360 codeDirectory->codeLimit64 = 0; 1361 OutputSegment *textSeg = getOrCreateOutputSegment(segment_names::text); 1362 write64be(&codeDirectory->execSegBase, textSeg->fileOff); 1363 write64be(&codeDirectory->execSegLimit, textSeg->fileSize); 1364 write64be(&codeDirectory->execSegFlags, 1365 config->outputType == MH_EXECUTE ? CS_EXECSEG_MAIN_BINARY : 0); 1366 auto *id = reinterpret_cast<char *>(&codeDirectory[1]); 1367 memcpy(id, fileName.begin(), fileName.size()); 1368 memset(id + fileName.size(), 0, fileNamePad); 1369 } 1370 1371 BitcodeBundleSection::BitcodeBundleSection() 1372 : SyntheticSection(segment_names::llvm, section_names::bitcodeBundle) {} 1373 1374 class ErrorCodeWrapper { 1375 public: 1376 explicit ErrorCodeWrapper(std::error_code ec) : errorCode(ec.value()) {} 1377 explicit ErrorCodeWrapper(int ec) : errorCode(ec) {} 1378 operator int() const { return errorCode; } 1379 1380 private: 1381 int errorCode; 1382 }; 1383 1384 #define CHECK_EC(exp) \ 1385 do { \ 1386 ErrorCodeWrapper ec(exp); \ 1387 if (ec) \ 1388 fatal(Twine("operation failed with error code ") + Twine(ec) + ": " + \ 1389 #exp); \ 1390 } while (0); 1391 1392 void BitcodeBundleSection::finalize() { 1393 #ifdef LLVM_HAVE_LIBXAR 1394 using namespace llvm::sys::fs; 1395 CHECK_EC(createTemporaryFile("bitcode-bundle", "xar", xarPath)); 1396 1397 #pragma clang diagnostic push 1398 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 1399 xar_t xar(xar_open(xarPath.data(), O_RDWR)); 1400 #pragma clang diagnostic pop 1401 if (!xar) 1402 fatal("failed to open XAR temporary file at " + xarPath); 1403 CHECK_EC(xar_opt_set(xar, XAR_OPT_COMPRESSION, XAR_OPT_VAL_NONE)); 1404 // FIXME: add more data to XAR 1405 CHECK_EC(xar_close(xar)); 1406 1407 file_size(xarPath, xarSize); 1408 #endif // defined(LLVM_HAVE_LIBXAR) 1409 } 1410 1411 void BitcodeBundleSection::writeTo(uint8_t *buf) const { 1412 using namespace llvm::sys::fs; 1413 file_t handle = 1414 CHECK(openNativeFile(xarPath, CD_OpenExisting, FA_Read, OF_None), 1415 "failed to open XAR file"); 1416 std::error_code ec; 1417 mapped_file_region xarMap(handle, mapped_file_region::mapmode::readonly, 1418 xarSize, 0, ec); 1419 if (ec) 1420 fatal("failed to map XAR file"); 1421 memcpy(buf, xarMap.const_data(), xarSize); 1422 1423 closeFile(handle); 1424 remove(xarPath); 1425 } 1426 1427 CStringSection::CStringSection() 1428 : SyntheticSection(segment_names::text, section_names::cString) { 1429 flags = S_CSTRING_LITERALS; 1430 } 1431 1432 void CStringSection::addInput(CStringInputSection *isec) { 1433 isec->parent = this; 1434 inputs.push_back(isec); 1435 if (isec->align > align) 1436 align = isec->align; 1437 } 1438 1439 void CStringSection::writeTo(uint8_t *buf) const { 1440 for (const CStringInputSection *isec : inputs) { 1441 for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { 1442 if (!isec->pieces[i].live) 1443 continue; 1444 StringRef string = isec->getStringRef(i); 1445 memcpy(buf + isec->pieces[i].outSecOff, string.data(), string.size()); 1446 } 1447 } 1448 } 1449 1450 void CStringSection::finalizeContents() { 1451 uint64_t offset = 0; 1452 for (CStringInputSection *isec : inputs) { 1453 for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { 1454 if (!isec->pieces[i].live) 1455 continue; 1456 // See comment above DeduplicatedCStringSection for how alignment is 1457 // handled. 1458 uint32_t pieceAlign = 1459 1 << countTrailingZeros(isec->align | isec->pieces[i].inSecOff); 1460 offset = alignTo(offset, pieceAlign); 1461 isec->pieces[i].outSecOff = offset; 1462 isec->isFinal = true; 1463 StringRef string = isec->getStringRef(i); 1464 offset += string.size(); 1465 } 1466 } 1467 size = offset; 1468 } 1469 1470 // Mergeable cstring literals are found under the __TEXT,__cstring section. In 1471 // contrast to ELF, which puts strings that need different alignments into 1472 // different sections, clang's Mach-O backend puts them all in one section. 1473 // Strings that need to be aligned have the .p2align directive emitted before 1474 // them, which simply translates into zero padding in the object file. In other 1475 // words, we have to infer the desired alignment of these cstrings from their 1476 // addresses. 1477 // 1478 // We differ slightly from ld64 in how we've chosen to align these cstrings. 1479 // Both LLD and ld64 preserve the number of trailing zeros in each cstring's 1480 // address in the input object files. When deduplicating identical cstrings, 1481 // both linkers pick the cstring whose address has more trailing zeros, and 1482 // preserve the alignment of that address in the final binary. However, ld64 1483 // goes a step further and also preserves the offset of the cstring from the 1484 // last section-aligned address. I.e. if a cstring is at offset 18 in the 1485 // input, with a section alignment of 16, then both LLD and ld64 will ensure the 1486 // final address is 2-byte aligned (since 18 == 16 + 2). But ld64 will also 1487 // ensure that the final address is of the form 16 * k + 2 for some k. 1488 // 1489 // Note that ld64's heuristic means that a dedup'ed cstring's final address is 1490 // dependent on the order of the input object files. E.g. if in addition to the 1491 // cstring at offset 18 above, we have a duplicate one in another file with a 1492 // `.cstring` section alignment of 2 and an offset of zero, then ld64 will pick 1493 // the cstring from the object file earlier on the command line (since both have 1494 // the same number of trailing zeros in their address). So the final cstring may 1495 // either be at some address `16 * k + 2` or at some address `2 * k`. 1496 // 1497 // I've opted not to follow this behavior primarily for implementation 1498 // simplicity, and secondarily to save a few more bytes. It's not clear to me 1499 // that preserving the section alignment + offset is ever necessary, and there 1500 // are many cases that are clearly redundant. In particular, if an x86_64 object 1501 // file contains some strings that are accessed via SIMD instructions, then the 1502 // .cstring section in the object file will be 16-byte-aligned (since SIMD 1503 // requires its operand addresses to be 16-byte aligned). However, there will 1504 // typically also be other cstrings in the same file that aren't used via SIMD 1505 // and don't need this alignment. They will be emitted at some arbitrary address 1506 // `A`, but ld64 will treat them as being 16-byte aligned with an offset of `16 1507 // % A`. 1508 void DeduplicatedCStringSection::finalizeContents() { 1509 // Find the largest alignment required for each string. 1510 for (const CStringInputSection *isec : inputs) { 1511 for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { 1512 const StringPiece &piece = isec->pieces[i]; 1513 if (!piece.live) 1514 continue; 1515 auto s = isec->getCachedHashStringRef(i); 1516 assert(isec->align != 0); 1517 uint8_t trailingZeros = countTrailingZeros(isec->align | piece.inSecOff); 1518 auto it = stringOffsetMap.insert( 1519 std::make_pair(s, StringOffset(trailingZeros))); 1520 if (!it.second && it.first->second.trailingZeros < trailingZeros) 1521 it.first->second.trailingZeros = trailingZeros; 1522 } 1523 } 1524 1525 // Assign an offset for each string and save it to the corresponding 1526 // StringPieces for easy access. 1527 for (CStringInputSection *isec : inputs) { 1528 for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { 1529 if (!isec->pieces[i].live) 1530 continue; 1531 auto s = isec->getCachedHashStringRef(i); 1532 auto it = stringOffsetMap.find(s); 1533 assert(it != stringOffsetMap.end()); 1534 StringOffset &offsetInfo = it->second; 1535 if (offsetInfo.outSecOff == UINT64_MAX) { 1536 offsetInfo.outSecOff = alignTo(size, 1ULL << offsetInfo.trailingZeros); 1537 size = offsetInfo.outSecOff + s.size(); 1538 } 1539 isec->pieces[i].outSecOff = offsetInfo.outSecOff; 1540 } 1541 isec->isFinal = true; 1542 } 1543 } 1544 1545 void DeduplicatedCStringSection::writeTo(uint8_t *buf) const { 1546 for (const auto &p : stringOffsetMap) { 1547 StringRef data = p.first.val(); 1548 uint64_t off = p.second.outSecOff; 1549 if (!data.empty()) 1550 memcpy(buf + off, data.data(), data.size()); 1551 } 1552 } 1553 1554 // This section is actually emitted as __TEXT,__const by ld64, but clang may 1555 // emit input sections of that name, and LLD doesn't currently support mixing 1556 // synthetic and concat-type OutputSections. To work around this, I've given 1557 // our merged-literals section a different name. 1558 WordLiteralSection::WordLiteralSection() 1559 : SyntheticSection(segment_names::text, section_names::literals) { 1560 align = 16; 1561 } 1562 1563 void WordLiteralSection::addInput(WordLiteralInputSection *isec) { 1564 isec->parent = this; 1565 inputs.push_back(isec); 1566 } 1567 1568 void WordLiteralSection::finalizeContents() { 1569 for (WordLiteralInputSection *isec : inputs) { 1570 // We do all processing of the InputSection here, so it will be effectively 1571 // finalized. 1572 isec->isFinal = true; 1573 const uint8_t *buf = isec->data.data(); 1574 switch (sectionType(isec->getFlags())) { 1575 case S_4BYTE_LITERALS: { 1576 for (size_t off = 0, e = isec->data.size(); off < e; off += 4) { 1577 if (!isec->isLive(off)) 1578 continue; 1579 uint32_t value = *reinterpret_cast<const uint32_t *>(buf + off); 1580 literal4Map.emplace(value, literal4Map.size()); 1581 } 1582 break; 1583 } 1584 case S_8BYTE_LITERALS: { 1585 for (size_t off = 0, e = isec->data.size(); off < e; off += 8) { 1586 if (!isec->isLive(off)) 1587 continue; 1588 uint64_t value = *reinterpret_cast<const uint64_t *>(buf + off); 1589 literal8Map.emplace(value, literal8Map.size()); 1590 } 1591 break; 1592 } 1593 case S_16BYTE_LITERALS: { 1594 for (size_t off = 0, e = isec->data.size(); off < e; off += 16) { 1595 if (!isec->isLive(off)) 1596 continue; 1597 UInt128 value = *reinterpret_cast<const UInt128 *>(buf + off); 1598 literal16Map.emplace(value, literal16Map.size()); 1599 } 1600 break; 1601 } 1602 default: 1603 llvm_unreachable("invalid literal section type"); 1604 } 1605 } 1606 } 1607 1608 void WordLiteralSection::writeTo(uint8_t *buf) const { 1609 // Note that we don't attempt to do any endianness conversion in addInput(), 1610 // so we don't do it here either -- just write out the original value, 1611 // byte-for-byte. 1612 for (const auto &p : literal16Map) 1613 memcpy(buf + p.second * 16, &p.first, 16); 1614 buf += literal16Map.size() * 16; 1615 1616 for (const auto &p : literal8Map) 1617 memcpy(buf + p.second * 8, &p.first, 8); 1618 buf += literal8Map.size() * 8; 1619 1620 for (const auto &p : literal4Map) 1621 memcpy(buf + p.second * 4, &p.first, 4); 1622 } 1623 1624 ObjCImageInfoSection::ObjCImageInfoSection() 1625 : SyntheticSection(segment_names::data, section_names::objCImageInfo) {} 1626 1627 ObjCImageInfoSection::ImageInfo 1628 ObjCImageInfoSection::parseImageInfo(const InputFile *file) { 1629 ImageInfo info; 1630 ArrayRef<uint8_t> data = file->objCImageInfo; 1631 // The image info struct has the following layout: 1632 // struct { 1633 // uint32_t version; 1634 // uint32_t flags; 1635 // }; 1636 if (data.size() < 8) { 1637 warn(toString(file) + ": invalid __objc_imageinfo size"); 1638 return info; 1639 } 1640 1641 auto *buf = reinterpret_cast<const uint32_t *>(data.data()); 1642 if (read32le(buf) != 0) { 1643 warn(toString(file) + ": invalid __objc_imageinfo version"); 1644 return info; 1645 } 1646 1647 uint32_t flags = read32le(buf + 1); 1648 info.swiftVersion = (flags >> 8) & 0xff; 1649 info.hasCategoryClassProperties = flags & 0x40; 1650 return info; 1651 } 1652 1653 static std::string swiftVersionString(uint8_t version) { 1654 switch (version) { 1655 case 1: 1656 return "1.0"; 1657 case 2: 1658 return "1.1"; 1659 case 3: 1660 return "2.0"; 1661 case 4: 1662 return "3.0"; 1663 case 5: 1664 return "4.0"; 1665 default: 1666 return ("0x" + Twine::utohexstr(version)).str(); 1667 } 1668 } 1669 1670 // Validate each object file's __objc_imageinfo and use them to generate the 1671 // image info for the output binary. Only two pieces of info are relevant: 1672 // 1. The Swift version (should be identical across inputs) 1673 // 2. `bool hasCategoryClassProperties` (true only if true for all inputs) 1674 void ObjCImageInfoSection::finalizeContents() { 1675 assert(files.size() != 0); // should have already been checked via isNeeded() 1676 1677 info.hasCategoryClassProperties = true; 1678 const InputFile *firstFile; 1679 for (auto file : files) { 1680 ImageInfo inputInfo = parseImageInfo(file); 1681 info.hasCategoryClassProperties &= inputInfo.hasCategoryClassProperties; 1682 1683 // swiftVersion 0 means no Swift is present, so no version checking required 1684 if (inputInfo.swiftVersion == 0) 1685 continue; 1686 1687 if (info.swiftVersion != 0 && info.swiftVersion != inputInfo.swiftVersion) { 1688 error("Swift version mismatch: " + toString(firstFile) + " has version " + 1689 swiftVersionString(info.swiftVersion) + " but " + toString(file) + 1690 " has version " + swiftVersionString(inputInfo.swiftVersion)); 1691 } else { 1692 info.swiftVersion = inputInfo.swiftVersion; 1693 firstFile = file; 1694 } 1695 } 1696 } 1697 1698 void ObjCImageInfoSection::writeTo(uint8_t *buf) const { 1699 uint32_t flags = info.hasCategoryClassProperties ? 0x40 : 0x0; 1700 flags |= info.swiftVersion << 8; 1701 write32le(buf + 4, flags); 1702 } 1703 1704 void macho::createSyntheticSymbols() { 1705 auto addHeaderSymbol = [](const char *name) { 1706 symtab->addSynthetic(name, in.header->isec, /*value=*/0, 1707 /*isPrivateExtern=*/true, /*includeInSymtab=*/false, 1708 /*referencedDynamically=*/false); 1709 }; 1710 1711 switch (config->outputType) { 1712 // FIXME: Assign the right address value for these symbols 1713 // (rather than 0). But we need to do that after assignAddresses(). 1714 case MH_EXECUTE: 1715 // If linking PIE, __mh_execute_header is a defined symbol in 1716 // __TEXT, __text) 1717 // Otherwise, it's an absolute symbol. 1718 if (config->isPic) 1719 symtab->addSynthetic("__mh_execute_header", in.header->isec, /*value=*/0, 1720 /*isPrivateExtern=*/false, /*includeInSymtab=*/true, 1721 /*referencedDynamically=*/true); 1722 else 1723 symtab->addSynthetic("__mh_execute_header", /*isec=*/nullptr, /*value=*/0, 1724 /*isPrivateExtern=*/false, /*includeInSymtab=*/true, 1725 /*referencedDynamically=*/true); 1726 break; 1727 1728 // The following symbols are N_SECT symbols, even though the header is not 1729 // part of any section and that they are private to the bundle/dylib/object 1730 // they are part of. 1731 case MH_BUNDLE: 1732 addHeaderSymbol("__mh_bundle_header"); 1733 break; 1734 case MH_DYLIB: 1735 addHeaderSymbol("__mh_dylib_header"); 1736 break; 1737 case MH_DYLINKER: 1738 addHeaderSymbol("__mh_dylinker_header"); 1739 break; 1740 case MH_OBJECT: 1741 addHeaderSymbol("__mh_object_header"); 1742 break; 1743 default: 1744 llvm_unreachable("unexpected outputType"); 1745 break; 1746 } 1747 1748 // The Itanium C++ ABI requires dylibs to pass a pointer to __cxa_atexit 1749 // which does e.g. cleanup of static global variables. The ABI document 1750 // says that the pointer can point to any address in one of the dylib's 1751 // segments, but in practice ld64 seems to set it to point to the header, 1752 // so that's what's implemented here. 1753 addHeaderSymbol("___dso_handle"); 1754 } 1755 1756 template SymtabSection *macho::makeSymtabSection<LP64>(StringTableSection &); 1757 template SymtabSection *macho::makeSymtabSection<ILP32>(StringTableSection &); 1758