1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/MC/MCAssembler.h" 10 #include "llvm/ADT/ArrayRef.h" 11 #include "llvm/ADT/SmallString.h" 12 #include "llvm/ADT/SmallVector.h" 13 #include "llvm/ADT/Statistic.h" 14 #include "llvm/ADT/StringRef.h" 15 #include "llvm/ADT/Twine.h" 16 #include "llvm/MC/MCAsmBackend.h" 17 #include "llvm/MC/MCAsmInfo.h" 18 #include "llvm/MC/MCAsmLayout.h" 19 #include "llvm/MC/MCCodeEmitter.h" 20 #include "llvm/MC/MCCodeView.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCDwarf.h" 23 #include "llvm/MC/MCExpr.h" 24 #include "llvm/MC/MCFixup.h" 25 #include "llvm/MC/MCFixupKindInfo.h" 26 #include "llvm/MC/MCFragment.h" 27 #include "llvm/MC/MCInst.h" 28 #include "llvm/MC/MCObjectWriter.h" 29 #include "llvm/MC/MCSection.h" 30 #include "llvm/MC/MCSectionELF.h" 31 #include "llvm/MC/MCSymbol.h" 32 #include "llvm/MC/MCValue.h" 33 #include "llvm/Support/Alignment.h" 34 #include "llvm/Support/Casting.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/LEB128.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include <cassert> 41 #include <cstdint> 42 #include <cstring> 43 #include <tuple> 44 #include <utility> 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "assembler" 49 50 namespace { 51 namespace stats { 52 53 STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); 54 STATISTIC(EmittedRelaxableFragments, 55 "Number of emitted assembler fragments - relaxable"); 56 STATISTIC(EmittedDataFragments, 57 "Number of emitted assembler fragments - data"); 58 STATISTIC(EmittedCompactEncodedInstFragments, 59 "Number of emitted assembler fragments - compact encoded inst"); 60 STATISTIC(EmittedAlignFragments, 61 "Number of emitted assembler fragments - align"); 62 STATISTIC(EmittedFillFragments, 63 "Number of emitted assembler fragments - fill"); 64 STATISTIC(EmittedOrgFragments, 65 "Number of emitted assembler fragments - org"); 66 STATISTIC(evaluateFixup, "Number of evaluated fixups"); 67 STATISTIC(FragmentLayouts, "Number of fragment layouts"); 68 STATISTIC(ObjectBytes, "Number of emitted object file bytes"); 69 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); 70 STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); 71 72 } // end namespace stats 73 } // end anonymous namespace 74 75 // FIXME FIXME FIXME: There are number of places in this file where we convert 76 // what is a 64-bit assembler value used for computation into a value in the 77 // object file, which may truncate it. We should detect that truncation where 78 // invalid and report errors back. 79 80 /* *** */ 81 82 MCAssembler::MCAssembler(MCContext &Context, 83 std::unique_ptr<MCAsmBackend> Backend, 84 std::unique_ptr<MCCodeEmitter> Emitter, 85 std::unique_ptr<MCObjectWriter> Writer) 86 : Context(Context), Backend(std::move(Backend)), 87 Emitter(std::move(Emitter)), Writer(std::move(Writer)), 88 BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), 89 IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { 90 VersionInfo.Major = 0; // Major version == 0 for "none specified" 91 } 92 93 MCAssembler::~MCAssembler() = default; 94 95 void MCAssembler::reset() { 96 Sections.clear(); 97 Symbols.clear(); 98 IndirectSymbols.clear(); 99 DataRegions.clear(); 100 LinkerOptions.clear(); 101 FileNames.clear(); 102 ThumbFuncs.clear(); 103 BundleAlignSize = 0; 104 RelaxAll = false; 105 SubsectionsViaSymbols = false; 106 IncrementalLinkerCompatible = false; 107 ELFHeaderEFlags = 0; 108 LOHContainer.reset(); 109 VersionInfo.Major = 0; 110 VersionInfo.SDKVersion = VersionTuple(); 111 112 // reset objects owned by us 113 if (getBackendPtr()) 114 getBackendPtr()->reset(); 115 if (getEmitterPtr()) 116 getEmitterPtr()->reset(); 117 if (getWriterPtr()) 118 getWriterPtr()->reset(); 119 getLOHContainer().reset(); 120 } 121 122 bool MCAssembler::registerSection(MCSection &Section) { 123 if (Section.isRegistered()) 124 return false; 125 Sections.push_back(&Section); 126 Section.setIsRegistered(true); 127 return true; 128 } 129 130 bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { 131 if (ThumbFuncs.count(Symbol)) 132 return true; 133 134 if (!Symbol->isVariable()) 135 return false; 136 137 const MCExpr *Expr = Symbol->getVariableValue(); 138 139 MCValue V; 140 if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) 141 return false; 142 143 if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) 144 return false; 145 146 const MCSymbolRefExpr *Ref = V.getSymA(); 147 if (!Ref) 148 return false; 149 150 if (Ref->getKind() != MCSymbolRefExpr::VK_None) 151 return false; 152 153 const MCSymbol &Sym = Ref->getSymbol(); 154 if (!isThumbFunc(&Sym)) 155 return false; 156 157 ThumbFuncs.insert(Symbol); // Cache it. 158 return true; 159 } 160 161 bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { 162 // Non-temporary labels should always be visible to the linker. 163 if (!Symbol.isTemporary()) 164 return true; 165 166 if (Symbol.isUsedInReloc()) 167 return true; 168 169 return false; 170 } 171 172 const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { 173 // Linker visible symbols define atoms. 174 if (isSymbolLinkerVisible(S)) 175 return &S; 176 177 // Absolute and undefined symbols have no defining atom. 178 if (!S.isInSection()) 179 return nullptr; 180 181 // Non-linker visible symbols in sections which can't be atomized have no 182 // defining atom. 183 if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( 184 *S.getFragment()->getParent())) 185 return nullptr; 186 187 // Otherwise, return the atom for the containing fragment. 188 return S.getFragment()->getAtom(); 189 } 190 191 bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, 192 const MCFixup &Fixup, const MCFragment *DF, 193 MCValue &Target, uint64_t &Value, 194 bool &WasForced) const { 195 ++stats::evaluateFixup; 196 197 // FIXME: This code has some duplication with recordRelocation. We should 198 // probably merge the two into a single callback that tries to evaluate a 199 // fixup and records a relocation if one is needed. 200 201 // On error claim to have completely evaluated the fixup, to prevent any 202 // further processing from being done. 203 const MCExpr *Expr = Fixup.getValue(); 204 MCContext &Ctx = getContext(); 205 Value = 0; 206 WasForced = false; 207 if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { 208 Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); 209 return true; 210 } 211 if (const MCSymbolRefExpr *RefB = Target.getSymB()) { 212 if (RefB->getKind() != MCSymbolRefExpr::VK_None) { 213 Ctx.reportError(Fixup.getLoc(), 214 "unsupported subtraction of qualified symbol"); 215 return true; 216 } 217 } 218 219 assert(getBackendPtr() && "Expected assembler backend"); 220 bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & 221 MCFixupKindInfo::FKF_IsTarget; 222 223 if (IsTarget) 224 return getBackend().evaluateTargetFixup(*this, Layout, Fixup, DF, Target, 225 Value, WasForced); 226 227 bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & 228 MCFixupKindInfo::FKF_IsPCRel; 229 230 bool IsResolved = false; 231 if (IsPCRel) { 232 if (Target.getSymB()) { 233 IsResolved = false; 234 } else if (!Target.getSymA()) { 235 IsResolved = false; 236 } else { 237 const MCSymbolRefExpr *A = Target.getSymA(); 238 const MCSymbol &SA = A->getSymbol(); 239 if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { 240 IsResolved = false; 241 } else if (auto *Writer = getWriterPtr()) { 242 IsResolved = Writer->isSymbolRefDifferenceFullyResolvedImpl( 243 *this, SA, *DF, false, true); 244 } 245 } 246 } else { 247 IsResolved = Target.isAbsolute(); 248 } 249 250 Value = Target.getConstant(); 251 252 if (const MCSymbolRefExpr *A = Target.getSymA()) { 253 const MCSymbol &Sym = A->getSymbol(); 254 if (Sym.isDefined()) 255 Value += Layout.getSymbolOffset(Sym); 256 } 257 if (const MCSymbolRefExpr *B = Target.getSymB()) { 258 const MCSymbol &Sym = B->getSymbol(); 259 if (Sym.isDefined()) 260 Value -= Layout.getSymbolOffset(Sym); 261 } 262 263 bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & 264 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; 265 assert((ShouldAlignPC ? IsPCRel : true) && 266 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); 267 268 if (IsPCRel) { 269 uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); 270 271 // A number of ARM fixups in Thumb mode require that the effective PC 272 // address be determined as the 32-bit aligned version of the actual offset. 273 if (ShouldAlignPC) Offset &= ~0x3; 274 Value -= Offset; 275 } 276 277 // Let the backend force a relocation if needed. 278 if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { 279 IsResolved = false; 280 WasForced = true; 281 } 282 283 return IsResolved; 284 } 285 286 uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, 287 const MCFragment &F) const { 288 assert(getBackendPtr() && "Requires assembler backend"); 289 switch (F.getKind()) { 290 case MCFragment::FT_Data: 291 return cast<MCDataFragment>(F).getContents().size(); 292 case MCFragment::FT_Relaxable: 293 return cast<MCRelaxableFragment>(F).getContents().size(); 294 case MCFragment::FT_CompactEncodedInst: 295 return cast<MCCompactEncodedInstFragment>(F).getContents().size(); 296 case MCFragment::FT_Fill: { 297 auto &FF = cast<MCFillFragment>(F); 298 int64_t NumValues = 0; 299 if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) { 300 getContext().reportError(FF.getLoc(), 301 "expected assembly-time absolute expression"); 302 return 0; 303 } 304 int64_t Size = NumValues * FF.getValueSize(); 305 if (Size < 0) { 306 getContext().reportError(FF.getLoc(), "invalid number of bytes"); 307 return 0; 308 } 309 return Size; 310 } 311 312 case MCFragment::FT_LEB: 313 return cast<MCLEBFragment>(F).getContents().size(); 314 315 case MCFragment::FT_BoundaryAlign: 316 return cast<MCBoundaryAlignFragment>(F).getSize(); 317 318 case MCFragment::FT_SymbolId: 319 return 4; 320 321 case MCFragment::FT_Align: { 322 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 323 unsigned Offset = Layout.getFragmentOffset(&AF); 324 unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment())); 325 326 // Insert extra Nops for code alignment if the target define 327 // shouldInsertExtraNopBytesForCodeAlign target hook. 328 if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() && 329 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) 330 return Size; 331 332 // If we are padding with nops, force the padding to be larger than the 333 // minimum nop size. 334 if (Size > 0 && AF.hasEmitNops()) { 335 while (Size % getBackend().getMinimumNopSize()) 336 Size += AF.getAlignment(); 337 } 338 if (Size > AF.getMaxBytesToEmit()) 339 return 0; 340 return Size; 341 } 342 343 case MCFragment::FT_Org: { 344 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 345 MCValue Value; 346 if (!OF.getOffset().evaluateAsValue(Value, Layout)) { 347 getContext().reportError(OF.getLoc(), 348 "expected assembly-time absolute expression"); 349 return 0; 350 } 351 352 uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); 353 int64_t TargetLocation = Value.getConstant(); 354 if (const MCSymbolRefExpr *A = Value.getSymA()) { 355 uint64_t Val; 356 if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { 357 getContext().reportError(OF.getLoc(), "expected absolute expression"); 358 return 0; 359 } 360 TargetLocation += Val; 361 } 362 int64_t Size = TargetLocation - FragmentOffset; 363 if (Size < 0 || Size >= 0x40000000) { 364 getContext().reportError( 365 OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + 366 "' (at offset '" + Twine(FragmentOffset) + "')"); 367 return 0; 368 } 369 return Size; 370 } 371 372 case MCFragment::FT_Dwarf: 373 return cast<MCDwarfLineAddrFragment>(F).getContents().size(); 374 case MCFragment::FT_DwarfFrame: 375 return cast<MCDwarfCallFrameFragment>(F).getContents().size(); 376 case MCFragment::FT_CVInlineLines: 377 return cast<MCCVInlineLineTableFragment>(F).getContents().size(); 378 case MCFragment::FT_CVDefRange: 379 return cast<MCCVDefRangeFragment>(F).getContents().size(); 380 case MCFragment::FT_Dummy: 381 llvm_unreachable("Should not have been added"); 382 } 383 384 llvm_unreachable("invalid fragment kind"); 385 } 386 387 void MCAsmLayout::layoutFragment(MCFragment *F) { 388 MCFragment *Prev = F->getPrevNode(); 389 390 // We should never try to recompute something which is valid. 391 assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); 392 // We should never try to compute the fragment layout if its predecessor 393 // isn't valid. 394 assert((!Prev || isFragmentValid(Prev)) && 395 "Attempt to compute fragment before its predecessor!"); 396 397 ++stats::FragmentLayouts; 398 399 // Compute fragment offset and size. 400 if (Prev) 401 F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); 402 else 403 F->Offset = 0; 404 LastValidFragment[F->getParent()] = F; 405 406 // If bundling is enabled and this fragment has instructions in it, it has to 407 // obey the bundling restrictions. With padding, we'll have: 408 // 409 // 410 // BundlePadding 411 // ||| 412 // ------------------------------------- 413 // Prev |##########| F | 414 // ------------------------------------- 415 // ^ 416 // | 417 // F->Offset 418 // 419 // The fragment's offset will point to after the padding, and its computed 420 // size won't include the padding. 421 // 422 // When the -mc-relax-all flag is used, we optimize bundling by writting the 423 // padding directly into fragments when the instructions are emitted inside 424 // the streamer. When the fragment is larger than the bundle size, we need to 425 // ensure that it's bundle aligned. This means that if we end up with 426 // multiple fragments, we must emit bundle padding between fragments. 427 // 428 // ".align N" is an example of a directive that introduces multiple 429 // fragments. We could add a special case to handle ".align N" by emitting 430 // within-fragment padding (which would produce less padding when N is less 431 // than the bundle size), but for now we don't. 432 // 433 if (Assembler.isBundlingEnabled() && F->hasInstructions()) { 434 assert(isa<MCEncodedFragment>(F) && 435 "Only MCEncodedFragment implementations have instructions"); 436 MCEncodedFragment *EF = cast<MCEncodedFragment>(F); 437 uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); 438 439 if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) 440 report_fatal_error("Fragment can't be larger than a bundle size"); 441 442 uint64_t RequiredBundlePadding = 443 computeBundlePadding(Assembler, EF, EF->Offset, FSize); 444 if (RequiredBundlePadding > UINT8_MAX) 445 report_fatal_error("Padding cannot exceed 255 bytes"); 446 EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); 447 EF->Offset += RequiredBundlePadding; 448 } 449 } 450 451 void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { 452 bool New = !Symbol.isRegistered(); 453 if (Created) 454 *Created = New; 455 if (New) { 456 Symbol.setIsRegistered(true); 457 Symbols.push_back(&Symbol); 458 } 459 } 460 461 void MCAssembler::writeFragmentPadding(raw_ostream &OS, 462 const MCEncodedFragment &EF, 463 uint64_t FSize) const { 464 assert(getBackendPtr() && "Expected assembler backend"); 465 // Should NOP padding be written out before this fragment? 466 unsigned BundlePadding = EF.getBundlePadding(); 467 if (BundlePadding > 0) { 468 assert(isBundlingEnabled() && 469 "Writing bundle padding with disabled bundling"); 470 assert(EF.hasInstructions() && 471 "Writing bundle padding for a fragment without instructions"); 472 473 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); 474 if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { 475 // If the padding itself crosses a bundle boundary, it must be emitted 476 // in 2 pieces, since even nop instructions must not cross boundaries. 477 // v--------------v <- BundleAlignSize 478 // v---------v <- BundlePadding 479 // ---------------------------- 480 // | Prev |####|####| F | 481 // ---------------------------- 482 // ^-------------------^ <- TotalLength 483 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); 484 if (!getBackend().writeNopData(OS, DistanceToBoundary)) 485 report_fatal_error("unable to write NOP sequence of " + 486 Twine(DistanceToBoundary) + " bytes"); 487 BundlePadding -= DistanceToBoundary; 488 } 489 if (!getBackend().writeNopData(OS, BundlePadding)) 490 report_fatal_error("unable to write NOP sequence of " + 491 Twine(BundlePadding) + " bytes"); 492 } 493 } 494 495 /// Write the fragment \p F to the output file. 496 static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, 497 const MCAsmLayout &Layout, const MCFragment &F) { 498 // FIXME: Embed in fragments instead? 499 uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); 500 501 support::endianness Endian = Asm.getBackend().Endian; 502 503 if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F)) 504 Asm.writeFragmentPadding(OS, *EF, FragmentSize); 505 506 // This variable (and its dummy usage) is to participate in the assert at 507 // the end of the function. 508 uint64_t Start = OS.tell(); 509 (void) Start; 510 511 ++stats::EmittedFragments; 512 513 switch (F.getKind()) { 514 case MCFragment::FT_Align: { 515 ++stats::EmittedAlignFragments; 516 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 517 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); 518 519 uint64_t Count = FragmentSize / AF.getValueSize(); 520 521 // FIXME: This error shouldn't actually occur (the front end should emit 522 // multiple .align directives to enforce the semantics it wants), but is 523 // severe enough that we want to report it. How to handle this? 524 if (Count * AF.getValueSize() != FragmentSize) 525 report_fatal_error("undefined .align directive, value size '" + 526 Twine(AF.getValueSize()) + 527 "' is not a divisor of padding size '" + 528 Twine(FragmentSize) + "'"); 529 530 // See if we are aligning with nops, and if so do that first to try to fill 531 // the Count bytes. Then if that did not fill any bytes or there are any 532 // bytes left to fill use the Value and ValueSize to fill the rest. 533 // If we are aligning with nops, ask that target to emit the right data. 534 if (AF.hasEmitNops()) { 535 if (!Asm.getBackend().writeNopData(OS, Count)) 536 report_fatal_error("unable to write nop sequence of " + 537 Twine(Count) + " bytes"); 538 break; 539 } 540 541 // Otherwise, write out in multiples of the value size. 542 for (uint64_t i = 0; i != Count; ++i) { 543 switch (AF.getValueSize()) { 544 default: llvm_unreachable("Invalid size!"); 545 case 1: OS << char(AF.getValue()); break; 546 case 2: 547 support::endian::write<uint16_t>(OS, AF.getValue(), Endian); 548 break; 549 case 4: 550 support::endian::write<uint32_t>(OS, AF.getValue(), Endian); 551 break; 552 case 8: 553 support::endian::write<uint64_t>(OS, AF.getValue(), Endian); 554 break; 555 } 556 } 557 break; 558 } 559 560 case MCFragment::FT_Data: 561 ++stats::EmittedDataFragments; 562 OS << cast<MCDataFragment>(F).getContents(); 563 break; 564 565 case MCFragment::FT_Relaxable: 566 ++stats::EmittedRelaxableFragments; 567 OS << cast<MCRelaxableFragment>(F).getContents(); 568 break; 569 570 case MCFragment::FT_CompactEncodedInst: 571 ++stats::EmittedCompactEncodedInstFragments; 572 OS << cast<MCCompactEncodedInstFragment>(F).getContents(); 573 break; 574 575 case MCFragment::FT_Fill: { 576 ++stats::EmittedFillFragments; 577 const MCFillFragment &FF = cast<MCFillFragment>(F); 578 uint64_t V = FF.getValue(); 579 unsigned VSize = FF.getValueSize(); 580 const unsigned MaxChunkSize = 16; 581 char Data[MaxChunkSize]; 582 assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size"); 583 // Duplicate V into Data as byte vector to reduce number of 584 // writes done. As such, do endian conversion here. 585 for (unsigned I = 0; I != VSize; ++I) { 586 unsigned index = Endian == support::little ? I : (VSize - I - 1); 587 Data[I] = uint8_t(V >> (index * 8)); 588 } 589 for (unsigned I = VSize; I < MaxChunkSize; ++I) 590 Data[I] = Data[I - VSize]; 591 592 // Set to largest multiple of VSize in Data. 593 const unsigned NumPerChunk = MaxChunkSize / VSize; 594 // Set ChunkSize to largest multiple of VSize in Data 595 const unsigned ChunkSize = VSize * NumPerChunk; 596 597 // Do copies by chunk. 598 StringRef Ref(Data, ChunkSize); 599 for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) 600 OS << Ref; 601 602 // do remainder if needed. 603 unsigned TrailingCount = FragmentSize % ChunkSize; 604 if (TrailingCount) 605 OS.write(Data, TrailingCount); 606 break; 607 } 608 609 case MCFragment::FT_LEB: { 610 const MCLEBFragment &LF = cast<MCLEBFragment>(F); 611 OS << LF.getContents(); 612 break; 613 } 614 615 case MCFragment::FT_BoundaryAlign: { 616 if (!Asm.getBackend().writeNopData(OS, FragmentSize)) 617 report_fatal_error("unable to write nop sequence of " + 618 Twine(FragmentSize) + " bytes"); 619 break; 620 } 621 622 case MCFragment::FT_SymbolId: { 623 const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F); 624 support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian); 625 break; 626 } 627 628 case MCFragment::FT_Org: { 629 ++stats::EmittedOrgFragments; 630 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 631 632 for (uint64_t i = 0, e = FragmentSize; i != e; ++i) 633 OS << char(OF.getValue()); 634 635 break; 636 } 637 638 case MCFragment::FT_Dwarf: { 639 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); 640 OS << OF.getContents(); 641 break; 642 } 643 case MCFragment::FT_DwarfFrame: { 644 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); 645 OS << CF.getContents(); 646 break; 647 } 648 case MCFragment::FT_CVInlineLines: { 649 const auto &OF = cast<MCCVInlineLineTableFragment>(F); 650 OS << OF.getContents(); 651 break; 652 } 653 case MCFragment::FT_CVDefRange: { 654 const auto &DRF = cast<MCCVDefRangeFragment>(F); 655 OS << DRF.getContents(); 656 break; 657 } 658 case MCFragment::FT_Dummy: 659 llvm_unreachable("Should not have been added"); 660 } 661 662 assert(OS.tell() - Start == FragmentSize && 663 "The stream should advance by fragment size"); 664 } 665 666 void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, 667 const MCAsmLayout &Layout) const { 668 assert(getBackendPtr() && "Expected assembler backend"); 669 670 // Ignore virtual sections. 671 if (Sec->isVirtualSection()) { 672 assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!"); 673 674 // Check that contents are only things legal inside a virtual section. 675 for (const MCFragment &F : *Sec) { 676 switch (F.getKind()) { 677 default: llvm_unreachable("Invalid fragment in virtual section!"); 678 case MCFragment::FT_Data: { 679 // Check that we aren't trying to write a non-zero contents (or fixups) 680 // into a virtual section. This is to support clients which use standard 681 // directives to fill the contents of virtual sections. 682 const MCDataFragment &DF = cast<MCDataFragment>(F); 683 if (DF.fixup_begin() != DF.fixup_end()) 684 report_fatal_error("cannot have fixups in virtual section!"); 685 for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) 686 if (DF.getContents()[i]) { 687 if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec)) 688 report_fatal_error("non-zero initializer found in section '" + 689 ELFSec->getSectionName() + "'"); 690 else 691 report_fatal_error("non-zero initializer found in virtual section"); 692 } 693 break; 694 } 695 case MCFragment::FT_Align: 696 // Check that we aren't trying to write a non-zero value into a virtual 697 // section. 698 assert((cast<MCAlignFragment>(F).getValueSize() == 0 || 699 cast<MCAlignFragment>(F).getValue() == 0) && 700 "Invalid align in virtual section!"); 701 break; 702 case MCFragment::FT_Fill: 703 assert((cast<MCFillFragment>(F).getValue() == 0) && 704 "Invalid fill in virtual section!"); 705 break; 706 } 707 } 708 709 return; 710 } 711 712 uint64_t Start = OS.tell(); 713 (void)Start; 714 715 for (const MCFragment &F : *Sec) 716 writeFragment(OS, *this, Layout, F); 717 718 assert(OS.tell() - Start == Layout.getSectionAddressSize(Sec)); 719 } 720 721 std::tuple<MCValue, uint64_t, bool> 722 MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, 723 const MCFixup &Fixup) { 724 // Evaluate the fixup. 725 MCValue Target; 726 uint64_t FixedValue; 727 bool WasForced; 728 bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, 729 WasForced); 730 if (!IsResolved) { 731 // The fixup was unresolved, we need a relocation. Inform the object 732 // writer of the relocation, and give it an opportunity to adjust the 733 // fixup value if need be. 734 if (Target.getSymA() && Target.getSymB() && 735 getBackend().requiresDiffExpressionRelocations()) { 736 // The fixup represents the difference between two symbols, which the 737 // backend has indicated must be resolved at link time. Split up the fixup 738 // into two relocations, one for the add, and one for the sub, and emit 739 // both of these. The constant will be associated with the add half of the 740 // expression. 741 MCFixup FixupAdd = MCFixup::createAddFor(Fixup); 742 MCValue TargetAdd = 743 MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); 744 getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd, 745 FixedValue); 746 MCFixup FixupSub = MCFixup::createSubFor(Fixup); 747 MCValue TargetSub = MCValue::get(Target.getSymB()); 748 getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub, 749 FixedValue); 750 } else { 751 getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, 752 FixedValue); 753 } 754 } 755 return std::make_tuple(Target, FixedValue, IsResolved); 756 } 757 758 void MCAssembler::layout(MCAsmLayout &Layout) { 759 assert(getBackendPtr() && "Expected assembler backend"); 760 DEBUG_WITH_TYPE("mc-dump", { 761 errs() << "assembler backend - pre-layout\n--\n"; 762 dump(); }); 763 764 // Create dummy fragments and assign section ordinals. 765 unsigned SectionIndex = 0; 766 for (MCSection &Sec : *this) { 767 // Create dummy fragments to eliminate any empty sections, this simplifies 768 // layout. 769 if (Sec.getFragmentList().empty()) 770 new MCDataFragment(&Sec); 771 772 Sec.setOrdinal(SectionIndex++); 773 } 774 775 // Assign layout order indices to sections and fragments. 776 for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { 777 MCSection *Sec = Layout.getSectionOrder()[i]; 778 Sec->setLayoutOrder(i); 779 780 unsigned FragmentIndex = 0; 781 for (MCFragment &Frag : *Sec) 782 Frag.setLayoutOrder(FragmentIndex++); 783 } 784 785 // Layout until everything fits. 786 while (layoutOnce(Layout)) 787 if (getContext().hadError()) 788 return; 789 790 DEBUG_WITH_TYPE("mc-dump", { 791 errs() << "assembler backend - post-relaxation\n--\n"; 792 dump(); }); 793 794 // Finalize the layout, including fragment lowering. 795 finishLayout(Layout); 796 797 DEBUG_WITH_TYPE("mc-dump", { 798 errs() << "assembler backend - final-layout\n--\n"; 799 dump(); }); 800 801 // Allow the object writer a chance to perform post-layout binding (for 802 // example, to set the index fields in the symbol data). 803 getWriter().executePostLayoutBinding(*this, Layout); 804 805 // Evaluate and apply the fixups, generating relocation entries as necessary. 806 for (MCSection &Sec : *this) { 807 for (MCFragment &Frag : Sec) { 808 // Data and relaxable fragments both have fixups. So only process 809 // those here. 810 // FIXME: Is there a better way to do this? MCEncodedFragmentWithFixups 811 // being templated makes this tricky. 812 if (isa<MCEncodedFragment>(&Frag) && 813 isa<MCCompactEncodedInstFragment>(&Frag)) 814 continue; 815 if (!isa<MCEncodedFragment>(&Frag) && !isa<MCCVDefRangeFragment>(&Frag) && 816 !isa<MCAlignFragment>(&Frag)) 817 continue; 818 ArrayRef<MCFixup> Fixups; 819 MutableArrayRef<char> Contents; 820 const MCSubtargetInfo *STI = nullptr; 821 if (auto *FragWithFixups = dyn_cast<MCDataFragment>(&Frag)) { 822 Fixups = FragWithFixups->getFixups(); 823 Contents = FragWithFixups->getContents(); 824 STI = FragWithFixups->getSubtargetInfo(); 825 assert(!FragWithFixups->hasInstructions() || STI != nullptr); 826 } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(&Frag)) { 827 Fixups = FragWithFixups->getFixups(); 828 Contents = FragWithFixups->getContents(); 829 STI = FragWithFixups->getSubtargetInfo(); 830 assert(!FragWithFixups->hasInstructions() || STI != nullptr); 831 } else if (auto *FragWithFixups = dyn_cast<MCCVDefRangeFragment>(&Frag)) { 832 Fixups = FragWithFixups->getFixups(); 833 Contents = FragWithFixups->getContents(); 834 } else if (auto *FragWithFixups = dyn_cast<MCDwarfLineAddrFragment>(&Frag)) { 835 Fixups = FragWithFixups->getFixups(); 836 Contents = FragWithFixups->getContents(); 837 } else if (auto *AF = dyn_cast<MCAlignFragment>(&Frag)) { 838 // Insert fixup type for code alignment if the target define 839 // shouldInsertFixupForCodeAlign target hook. 840 if (Sec.UseCodeAlign() && AF->hasEmitNops()) { 841 getBackend().shouldInsertFixupForCodeAlign(*this, Layout, *AF); 842 } 843 continue; 844 } else if (auto *FragWithFixups = 845 dyn_cast<MCDwarfCallFrameFragment>(&Frag)) { 846 Fixups = FragWithFixups->getFixups(); 847 Contents = FragWithFixups->getContents(); 848 } else 849 llvm_unreachable("Unknown fragment with fixups!"); 850 for (const MCFixup &Fixup : Fixups) { 851 uint64_t FixedValue; 852 bool IsResolved; 853 MCValue Target; 854 std::tie(Target, FixedValue, IsResolved) = 855 handleFixup(Layout, Frag, Fixup); 856 getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, 857 IsResolved, STI); 858 } 859 } 860 } 861 } 862 863 void MCAssembler::Finish() { 864 // Create the layout object. 865 MCAsmLayout Layout(*this); 866 layout(Layout); 867 868 // Write the object file. 869 stats::ObjectBytes += getWriter().writeObject(*this, Layout); 870 } 871 872 bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, 873 const MCRelaxableFragment *DF, 874 const MCAsmLayout &Layout) const { 875 assert(getBackendPtr() && "Expected assembler backend"); 876 MCValue Target; 877 uint64_t Value; 878 bool WasForced; 879 bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced); 880 if (Target.getSymA() && 881 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && 882 Fixup.getKind() == FK_Data_1) 883 return false; 884 return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, 885 Layout, WasForced); 886 } 887 888 bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, 889 const MCAsmLayout &Layout) const { 890 assert(getBackendPtr() && "Expected assembler backend"); 891 // If this inst doesn't ever need relaxation, ignore it. This occurs when we 892 // are intentionally pushing out inst fragments, or because we relaxed a 893 // previous instruction to one that doesn't need relaxation. 894 if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) 895 return false; 896 897 for (const MCFixup &Fixup : F->getFixups()) 898 if (fixupNeedsRelaxation(Fixup, F, Layout)) 899 return true; 900 901 return false; 902 } 903 904 bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, 905 MCRelaxableFragment &F) { 906 assert(getEmitterPtr() && 907 "Expected CodeEmitter defined for relaxInstruction"); 908 if (!fragmentNeedsRelaxation(&F, Layout)) 909 return false; 910 911 ++stats::RelaxedInstructions; 912 913 // FIXME-PERF: We could immediately lower out instructions if we can tell 914 // they are fully resolved, to avoid retesting on later passes. 915 916 // Relax the fragment. 917 918 MCInst Relaxed; 919 getBackend().relaxInstruction(F.getInst(), *F.getSubtargetInfo(), Relaxed); 920 921 // Encode the new instruction. 922 // 923 // FIXME-PERF: If it matters, we could let the target do this. It can 924 // probably do so more efficiently in many cases. 925 SmallVector<MCFixup, 4> Fixups; 926 SmallString<256> Code; 927 raw_svector_ostream VecOS(Code); 928 getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo()); 929 930 // Update the fragment. 931 F.setInst(Relaxed); 932 F.getContents() = Code; 933 F.getFixups() = Fixups; 934 935 return true; 936 } 937 938 bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { 939 uint64_t OldSize = LF.getContents().size(); 940 int64_t Value; 941 bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); 942 if (!Abs) 943 report_fatal_error("sleb128 and uleb128 expressions must be absolute"); 944 SmallString<8> &Data = LF.getContents(); 945 Data.clear(); 946 raw_svector_ostream OSE(Data); 947 // The compiler can generate EH table assembly that is impossible to assemble 948 // without either adding padding to an LEB fragment or adding extra padding 949 // to a later alignment fragment. To accommodate such tables, relaxation can 950 // only increase an LEB fragment size here, not decrease it. See PR35809. 951 if (LF.isSigned()) 952 encodeSLEB128(Value, OSE, OldSize); 953 else 954 encodeULEB128(Value, OSE, OldSize); 955 return OldSize != LF.getContents().size(); 956 } 957 958 /// Check if the branch crosses the boundary. 959 /// 960 /// \param StartAddr start address of the fused/unfused branch. 961 /// \param Size size of the fused/unfused branch. 962 /// \param BoundaryAlignment alignment requirement of the branch. 963 /// \returns true if the branch cross the boundary. 964 static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, 965 Align BoundaryAlignment) { 966 uint64_t EndAddr = StartAddr + Size; 967 return (StartAddr >> Log2(BoundaryAlignment)) != 968 ((EndAddr - 1) >> Log2(BoundaryAlignment)); 969 } 970 971 /// Check if the branch is against the boundary. 972 /// 973 /// \param StartAddr start address of the fused/unfused branch. 974 /// \param Size size of the fused/unfused branch. 975 /// \param BoundaryAlignment alignment requirement of the branch. 976 /// \returns true if the branch is against the boundary. 977 static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, 978 Align BoundaryAlignment) { 979 uint64_t EndAddr = StartAddr + Size; 980 return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; 981 } 982 983 /// Check if the branch needs padding. 984 /// 985 /// \param StartAddr start address of the fused/unfused branch. 986 /// \param Size size of the fused/unfused branch. 987 /// \param BoundaryAlignment alignment requirement of the branch. 988 /// \returns true if the branch needs padding. 989 static bool needPadding(uint64_t StartAddr, uint64_t Size, 990 Align BoundaryAlignment) { 991 return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || 992 isAgainstBoundary(StartAddr, Size, BoundaryAlignment); 993 } 994 995 bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, 996 MCBoundaryAlignFragment &BF) { 997 // The MCBoundaryAlignFragment that doesn't emit NOP should not be relaxed. 998 if (!BF.canEmitNops()) 999 return false; 1000 1001 uint64_t AlignedOffset = Layout.getFragmentOffset(BF.getNextNode()); 1002 uint64_t AlignedSize = 0; 1003 const MCFragment *F = BF.getNextNode(); 1004 // If the branch is unfused, it is emitted into one fragment, otherwise it is 1005 // emitted into two fragments at most, the next MCBoundaryAlignFragment(if 1006 // exists) also marks the end of the branch. 1007 for (auto i = 0, N = BF.isFused() ? 2 : 1; 1008 i != N && !isa<MCBoundaryAlignFragment>(F); ++i, F = F->getNextNode()) { 1009 AlignedSize += computeFragmentSize(Layout, *F); 1010 } 1011 uint64_t OldSize = BF.getSize(); 1012 AlignedOffset -= OldSize; 1013 Align BoundaryAlignment = BF.getAlignment(); 1014 uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment) 1015 ? offsetToAlignment(AlignedOffset, BoundaryAlignment) 1016 : 0U; 1017 if (NewSize == OldSize) 1018 return false; 1019 BF.setSize(NewSize); 1020 Layout.invalidateFragmentsFrom(&BF); 1021 return true; 1022 } 1023 1024 bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, 1025 MCDwarfLineAddrFragment &DF) { 1026 MCContext &Context = Layout.getAssembler().getContext(); 1027 uint64_t OldSize = DF.getContents().size(); 1028 int64_t AddrDelta; 1029 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); 1030 assert(Abs && "We created a line delta with an invalid expression"); 1031 (void)Abs; 1032 int64_t LineDelta; 1033 LineDelta = DF.getLineDelta(); 1034 SmallVectorImpl<char> &Data = DF.getContents(); 1035 Data.clear(); 1036 raw_svector_ostream OSE(Data); 1037 DF.getFixups().clear(); 1038 1039 if (!getBackend().requiresDiffExpressionRelocations()) { 1040 MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, 1041 AddrDelta, OSE); 1042 } else { 1043 uint32_t Offset; 1044 uint32_t Size; 1045 bool SetDelta = MCDwarfLineAddr::FixedEncode(Context, 1046 getDWARFLinetableParams(), 1047 LineDelta, AddrDelta, 1048 OSE, &Offset, &Size); 1049 // Add Fixups for address delta or new address. 1050 const MCExpr *FixupExpr; 1051 if (SetDelta) { 1052 FixupExpr = &DF.getAddrDelta(); 1053 } else { 1054 const MCBinaryExpr *ABE = cast<MCBinaryExpr>(&DF.getAddrDelta()); 1055 FixupExpr = ABE->getLHS(); 1056 } 1057 DF.getFixups().push_back( 1058 MCFixup::create(Offset, FixupExpr, 1059 MCFixup::getKindForSize(Size, false /*isPCRel*/))); 1060 } 1061 1062 return OldSize != Data.size(); 1063 } 1064 1065 bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, 1066 MCDwarfCallFrameFragment &DF) { 1067 MCContext &Context = Layout.getAssembler().getContext(); 1068 uint64_t OldSize = DF.getContents().size(); 1069 int64_t AddrDelta; 1070 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); 1071 assert(Abs && "We created call frame with an invalid expression"); 1072 (void) Abs; 1073 SmallVectorImpl<char> &Data = DF.getContents(); 1074 Data.clear(); 1075 raw_svector_ostream OSE(Data); 1076 DF.getFixups().clear(); 1077 1078 if (getBackend().requiresDiffExpressionRelocations()) { 1079 uint32_t Offset; 1080 uint32_t Size; 1081 MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE, &Offset, 1082 &Size); 1083 if (Size) { 1084 DF.getFixups().push_back(MCFixup::create( 1085 Offset, &DF.getAddrDelta(), 1086 MCFixup::getKindForSizeInBits(Size /*In bits.*/, false /*isPCRel*/))); 1087 } 1088 } else { 1089 MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); 1090 } 1091 1092 return OldSize != Data.size(); 1093 } 1094 1095 bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, 1096 MCCVInlineLineTableFragment &F) { 1097 unsigned OldSize = F.getContents().size(); 1098 getContext().getCVContext().encodeInlineLineTable(Layout, F); 1099 return OldSize != F.getContents().size(); 1100 } 1101 1102 bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, 1103 MCCVDefRangeFragment &F) { 1104 unsigned OldSize = F.getContents().size(); 1105 getContext().getCVContext().encodeDefRange(Layout, F); 1106 return OldSize != F.getContents().size(); 1107 } 1108 1109 bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { 1110 // Holds the first fragment which needed relaxing during this layout. It will 1111 // remain NULL if none were relaxed. 1112 // When a fragment is relaxed, all the fragments following it should get 1113 // invalidated because their offset is going to change. 1114 MCFragment *FirstRelaxedFragment = nullptr; 1115 1116 // Attempt to relax all the fragments in the section. 1117 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) { 1118 // Check if this is a fragment that needs relaxation. 1119 bool RelaxedFrag = false; 1120 switch(I->getKind()) { 1121 default: 1122 break; 1123 case MCFragment::FT_Relaxable: 1124 assert(!getRelaxAll() && 1125 "Did not expect a MCRelaxableFragment in RelaxAll mode"); 1126 RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I)); 1127 break; 1128 case MCFragment::FT_Dwarf: 1129 RelaxedFrag = relaxDwarfLineAddr(Layout, 1130 *cast<MCDwarfLineAddrFragment>(I)); 1131 break; 1132 case MCFragment::FT_DwarfFrame: 1133 RelaxedFrag = 1134 relaxDwarfCallFrameFragment(Layout, 1135 *cast<MCDwarfCallFrameFragment>(I)); 1136 break; 1137 case MCFragment::FT_LEB: 1138 RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I)); 1139 break; 1140 case MCFragment::FT_BoundaryAlign: 1141 RelaxedFrag = 1142 relaxBoundaryAlign(Layout, *cast<MCBoundaryAlignFragment>(I)); 1143 break; 1144 case MCFragment::FT_CVInlineLines: 1145 RelaxedFrag = 1146 relaxCVInlineLineTable(Layout, *cast<MCCVInlineLineTableFragment>(I)); 1147 break; 1148 case MCFragment::FT_CVDefRange: 1149 RelaxedFrag = relaxCVDefRange(Layout, *cast<MCCVDefRangeFragment>(I)); 1150 break; 1151 } 1152 if (RelaxedFrag && !FirstRelaxedFragment) 1153 FirstRelaxedFragment = &*I; 1154 } 1155 if (FirstRelaxedFragment) { 1156 Layout.invalidateFragmentsFrom(FirstRelaxedFragment); 1157 return true; 1158 } 1159 return false; 1160 } 1161 1162 bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { 1163 ++stats::RelaxationSteps; 1164 1165 bool WasRelaxed = false; 1166 for (iterator it = begin(), ie = end(); it != ie; ++it) { 1167 MCSection &Sec = *it; 1168 while (layoutSectionOnce(Layout, Sec)) 1169 WasRelaxed = true; 1170 } 1171 1172 return WasRelaxed; 1173 } 1174 1175 void MCAssembler::finishLayout(MCAsmLayout &Layout) { 1176 assert(getBackendPtr() && "Expected assembler backend"); 1177 // The layout is done. Mark every fragment as valid. 1178 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { 1179 MCSection &Section = *Layout.getSectionOrder()[i]; 1180 Layout.getFragmentOffset(&*Section.getFragmentList().rbegin()); 1181 computeFragmentSize(Layout, *Section.getFragmentList().rbegin()); 1182 } 1183 getBackend().finishLayout(*this, Layout); 1184 } 1185 1186 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1187 LLVM_DUMP_METHOD void MCAssembler::dump() const{ 1188 raw_ostream &OS = errs(); 1189 1190 OS << "<MCAssembler\n"; 1191 OS << " Sections:[\n "; 1192 for (const_iterator it = begin(), ie = end(); it != ie; ++it) { 1193 if (it != begin()) OS << ",\n "; 1194 it->dump(); 1195 } 1196 OS << "],\n"; 1197 OS << " Symbols:["; 1198 1199 for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { 1200 if (it != symbol_begin()) OS << ",\n "; 1201 OS << "("; 1202 it->dump(); 1203 OS << ", Index:" << it->getIndex() << ", "; 1204 OS << ")"; 1205 } 1206 OS << "]>\n"; 1207 } 1208 #endif 1209