1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/MC/MCAssembler.h" 10 #include "llvm/ADT/ArrayRef.h" 11 #include "llvm/ADT/SmallString.h" 12 #include "llvm/ADT/SmallVector.h" 13 #include "llvm/ADT/Statistic.h" 14 #include "llvm/ADT/StringRef.h" 15 #include "llvm/ADT/Twine.h" 16 #include "llvm/MC/MCAsmBackend.h" 17 #include "llvm/MC/MCAsmInfo.h" 18 #include "llvm/MC/MCAsmLayout.h" 19 #include "llvm/MC/MCCodeEmitter.h" 20 #include "llvm/MC/MCCodeView.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCDwarf.h" 23 #include "llvm/MC/MCExpr.h" 24 #include "llvm/MC/MCFixup.h" 25 #include "llvm/MC/MCFixupKindInfo.h" 26 #include "llvm/MC/MCFragment.h" 27 #include "llvm/MC/MCInst.h" 28 #include "llvm/MC/MCObjectWriter.h" 29 #include "llvm/MC/MCSection.h" 30 #include "llvm/MC/MCSectionELF.h" 31 #include "llvm/MC/MCSymbol.h" 32 #include "llvm/MC/MCValue.h" 33 #include "llvm/Support/Alignment.h" 34 #include "llvm/Support/Casting.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/LEB128.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include <cassert> 41 #include <cstdint> 42 #include <cstring> 43 #include <tuple> 44 #include <utility> 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "assembler" 49 50 namespace { 51 namespace stats { 52 53 STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); 54 STATISTIC(EmittedRelaxableFragments, 55 "Number of emitted assembler fragments - relaxable"); 56 STATISTIC(EmittedDataFragments, 57 "Number of emitted assembler fragments - data"); 58 STATISTIC(EmittedCompactEncodedInstFragments, 59 "Number of emitted assembler fragments - compact encoded inst"); 60 STATISTIC(EmittedAlignFragments, 61 "Number of emitted assembler fragments - align"); 62 STATISTIC(EmittedFillFragments, 63 "Number of emitted assembler fragments - fill"); 64 STATISTIC(EmittedOrgFragments, 65 "Number of emitted assembler fragments - org"); 66 STATISTIC(evaluateFixup, "Number of evaluated fixups"); 67 STATISTIC(FragmentLayouts, "Number of fragment layouts"); 68 STATISTIC(ObjectBytes, "Number of emitted object file bytes"); 69 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); 70 STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); 71 72 } // end namespace stats 73 } // end anonymous namespace 74 75 // FIXME FIXME FIXME: There are number of places in this file where we convert 76 // what is a 64-bit assembler value used for computation into a value in the 77 // object file, which may truncate it. We should detect that truncation where 78 // invalid and report errors back. 79 80 /* *** */ 81 82 MCAssembler::MCAssembler(MCContext &Context, 83 std::unique_ptr<MCAsmBackend> Backend, 84 std::unique_ptr<MCCodeEmitter> Emitter, 85 std::unique_ptr<MCObjectWriter> Writer) 86 : Context(Context), Backend(std::move(Backend)), 87 Emitter(std::move(Emitter)), Writer(std::move(Writer)), 88 BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), 89 IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { 90 VersionInfo.Major = 0; // Major version == 0 for "none specified" 91 } 92 93 MCAssembler::~MCAssembler() = default; 94 95 void MCAssembler::reset() { 96 Sections.clear(); 97 Symbols.clear(); 98 IndirectSymbols.clear(); 99 DataRegions.clear(); 100 LinkerOptions.clear(); 101 FileNames.clear(); 102 ThumbFuncs.clear(); 103 BundleAlignSize = 0; 104 RelaxAll = false; 105 SubsectionsViaSymbols = false; 106 IncrementalLinkerCompatible = false; 107 ELFHeaderEFlags = 0; 108 LOHContainer.reset(); 109 VersionInfo.Major = 0; 110 VersionInfo.SDKVersion = VersionTuple(); 111 112 // reset objects owned by us 113 if (getBackendPtr()) 114 getBackendPtr()->reset(); 115 if (getEmitterPtr()) 116 getEmitterPtr()->reset(); 117 if (getWriterPtr()) 118 getWriterPtr()->reset(); 119 getLOHContainer().reset(); 120 } 121 122 bool MCAssembler::registerSection(MCSection &Section) { 123 if (Section.isRegistered()) 124 return false; 125 Sections.push_back(&Section); 126 Section.setIsRegistered(true); 127 return true; 128 } 129 130 bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { 131 if (ThumbFuncs.count(Symbol)) 132 return true; 133 134 if (!Symbol->isVariable()) 135 return false; 136 137 const MCExpr *Expr = Symbol->getVariableValue(); 138 139 MCValue V; 140 if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) 141 return false; 142 143 if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) 144 return false; 145 146 const MCSymbolRefExpr *Ref = V.getSymA(); 147 if (!Ref) 148 return false; 149 150 if (Ref->getKind() != MCSymbolRefExpr::VK_None) 151 return false; 152 153 const MCSymbol &Sym = Ref->getSymbol(); 154 if (!isThumbFunc(&Sym)) 155 return false; 156 157 ThumbFuncs.insert(Symbol); // Cache it. 158 return true; 159 } 160 161 bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { 162 // Non-temporary labels should always be visible to the linker. 163 if (!Symbol.isTemporary()) 164 return true; 165 166 if (Symbol.isUsedInReloc()) 167 return true; 168 169 return false; 170 } 171 172 const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { 173 // Linker visible symbols define atoms. 174 if (isSymbolLinkerVisible(S)) 175 return &S; 176 177 // Absolute and undefined symbols have no defining atom. 178 if (!S.isInSection()) 179 return nullptr; 180 181 // Non-linker visible symbols in sections which can't be atomized have no 182 // defining atom. 183 if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( 184 *S.getFragment()->getParent())) 185 return nullptr; 186 187 // Otherwise, return the atom for the containing fragment. 188 return S.getFragment()->getAtom(); 189 } 190 191 bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, 192 const MCFixup &Fixup, const MCFragment *DF, 193 MCValue &Target, uint64_t &Value, 194 bool &WasForced) const { 195 ++stats::evaluateFixup; 196 197 // FIXME: This code has some duplication with recordRelocation. We should 198 // probably merge the two into a single callback that tries to evaluate a 199 // fixup and records a relocation if one is needed. 200 201 // On error claim to have completely evaluated the fixup, to prevent any 202 // further processing from being done. 203 const MCExpr *Expr = Fixup.getValue(); 204 MCContext &Ctx = getContext(); 205 Value = 0; 206 WasForced = false; 207 if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { 208 Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); 209 return true; 210 } 211 if (const MCSymbolRefExpr *RefB = Target.getSymB()) { 212 if (RefB->getKind() != MCSymbolRefExpr::VK_None) { 213 Ctx.reportError(Fixup.getLoc(), 214 "unsupported subtraction of qualified symbol"); 215 return true; 216 } 217 } 218 219 assert(getBackendPtr() && "Expected assembler backend"); 220 bool IsTarget = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & 221 MCFixupKindInfo::FKF_IsTarget; 222 223 if (IsTarget) 224 return getBackend().evaluateTargetFixup(*this, Layout, Fixup, DF, Target, 225 Value, WasForced); 226 227 unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags; 228 bool IsPCRel = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags & 229 MCFixupKindInfo::FKF_IsPCRel; 230 231 bool IsResolved = false; 232 if (IsPCRel) { 233 if (Target.getSymB()) { 234 IsResolved = false; 235 } else if (!Target.getSymA()) { 236 IsResolved = false; 237 } else { 238 const MCSymbolRefExpr *A = Target.getSymA(); 239 const MCSymbol &SA = A->getSymbol(); 240 if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { 241 IsResolved = false; 242 } else if (auto *Writer = getWriterPtr()) { 243 IsResolved = (FixupFlags & MCFixupKindInfo::FKF_Constant) || 244 Writer->isSymbolRefDifferenceFullyResolvedImpl( 245 *this, SA, *DF, false, true); 246 } 247 } 248 } else { 249 IsResolved = Target.isAbsolute(); 250 } 251 252 Value = Target.getConstant(); 253 254 if (const MCSymbolRefExpr *A = Target.getSymA()) { 255 const MCSymbol &Sym = A->getSymbol(); 256 if (Sym.isDefined()) 257 Value += Layout.getSymbolOffset(Sym); 258 } 259 if (const MCSymbolRefExpr *B = Target.getSymB()) { 260 const MCSymbol &Sym = B->getSymbol(); 261 if (Sym.isDefined()) 262 Value -= Layout.getSymbolOffset(Sym); 263 } 264 265 bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & 266 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; 267 assert((ShouldAlignPC ? IsPCRel : true) && 268 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); 269 270 if (IsPCRel) { 271 uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); 272 273 // A number of ARM fixups in Thumb mode require that the effective PC 274 // address be determined as the 32-bit aligned version of the actual offset. 275 if (ShouldAlignPC) Offset &= ~0x3; 276 Value -= Offset; 277 } 278 279 // Let the backend force a relocation if needed. 280 if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { 281 IsResolved = false; 282 WasForced = true; 283 } 284 285 return IsResolved; 286 } 287 288 uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, 289 const MCFragment &F) const { 290 assert(getBackendPtr() && "Requires assembler backend"); 291 switch (F.getKind()) { 292 case MCFragment::FT_Data: 293 return cast<MCDataFragment>(F).getContents().size(); 294 case MCFragment::FT_Relaxable: 295 return cast<MCRelaxableFragment>(F).getContents().size(); 296 case MCFragment::FT_CompactEncodedInst: 297 return cast<MCCompactEncodedInstFragment>(F).getContents().size(); 298 case MCFragment::FT_Fill: { 299 auto &FF = cast<MCFillFragment>(F); 300 int64_t NumValues = 0; 301 if (!FF.getNumValues().evaluateAsAbsolute(NumValues, Layout)) { 302 getContext().reportError(FF.getLoc(), 303 "expected assembly-time absolute expression"); 304 return 0; 305 } 306 int64_t Size = NumValues * FF.getValueSize(); 307 if (Size < 0) { 308 getContext().reportError(FF.getLoc(), "invalid number of bytes"); 309 return 0; 310 } 311 return Size; 312 } 313 314 case MCFragment::FT_LEB: 315 return cast<MCLEBFragment>(F).getContents().size(); 316 317 case MCFragment::FT_BoundaryAlign: 318 return cast<MCBoundaryAlignFragment>(F).getSize(); 319 320 case MCFragment::FT_SymbolId: 321 return 4; 322 323 case MCFragment::FT_Align: { 324 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 325 unsigned Offset = Layout.getFragmentOffset(&AF); 326 unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment())); 327 328 // Insert extra Nops for code alignment if the target define 329 // shouldInsertExtraNopBytesForCodeAlign target hook. 330 if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() && 331 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) 332 return Size; 333 334 // If we are padding with nops, force the padding to be larger than the 335 // minimum nop size. 336 if (Size > 0 && AF.hasEmitNops()) { 337 while (Size % getBackend().getMinimumNopSize()) 338 Size += AF.getAlignment(); 339 } 340 if (Size > AF.getMaxBytesToEmit()) 341 return 0; 342 return Size; 343 } 344 345 case MCFragment::FT_Org: { 346 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 347 MCValue Value; 348 if (!OF.getOffset().evaluateAsValue(Value, Layout)) { 349 getContext().reportError(OF.getLoc(), 350 "expected assembly-time absolute expression"); 351 return 0; 352 } 353 354 uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); 355 int64_t TargetLocation = Value.getConstant(); 356 if (const MCSymbolRefExpr *A = Value.getSymA()) { 357 uint64_t Val; 358 if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { 359 getContext().reportError(OF.getLoc(), "expected absolute expression"); 360 return 0; 361 } 362 TargetLocation += Val; 363 } 364 int64_t Size = TargetLocation - FragmentOffset; 365 if (Size < 0 || Size >= 0x40000000) { 366 getContext().reportError( 367 OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + 368 "' (at offset '" + Twine(FragmentOffset) + "')"); 369 return 0; 370 } 371 return Size; 372 } 373 374 case MCFragment::FT_Dwarf: 375 return cast<MCDwarfLineAddrFragment>(F).getContents().size(); 376 case MCFragment::FT_DwarfFrame: 377 return cast<MCDwarfCallFrameFragment>(F).getContents().size(); 378 case MCFragment::FT_CVInlineLines: 379 return cast<MCCVInlineLineTableFragment>(F).getContents().size(); 380 case MCFragment::FT_CVDefRange: 381 return cast<MCCVDefRangeFragment>(F).getContents().size(); 382 case MCFragment::FT_Dummy: 383 llvm_unreachable("Should not have been added"); 384 } 385 386 llvm_unreachable("invalid fragment kind"); 387 } 388 389 void MCAsmLayout::layoutFragment(MCFragment *F) { 390 MCFragment *Prev = F->getPrevNode(); 391 392 // We should never try to recompute something which is valid. 393 assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); 394 // We should never try to compute the fragment layout if its predecessor 395 // isn't valid. 396 assert((!Prev || isFragmentValid(Prev)) && 397 "Attempt to compute fragment before its predecessor!"); 398 399 ++stats::FragmentLayouts; 400 401 // Compute fragment offset and size. 402 if (Prev) 403 F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); 404 else 405 F->Offset = 0; 406 LastValidFragment[F->getParent()] = F; 407 408 // If bundling is enabled and this fragment has instructions in it, it has to 409 // obey the bundling restrictions. With padding, we'll have: 410 // 411 // 412 // BundlePadding 413 // ||| 414 // ------------------------------------- 415 // Prev |##########| F | 416 // ------------------------------------- 417 // ^ 418 // | 419 // F->Offset 420 // 421 // The fragment's offset will point to after the padding, and its computed 422 // size won't include the padding. 423 // 424 // When the -mc-relax-all flag is used, we optimize bundling by writting the 425 // padding directly into fragments when the instructions are emitted inside 426 // the streamer. When the fragment is larger than the bundle size, we need to 427 // ensure that it's bundle aligned. This means that if we end up with 428 // multiple fragments, we must emit bundle padding between fragments. 429 // 430 // ".align N" is an example of a directive that introduces multiple 431 // fragments. We could add a special case to handle ".align N" by emitting 432 // within-fragment padding (which would produce less padding when N is less 433 // than the bundle size), but for now we don't. 434 // 435 if (Assembler.isBundlingEnabled() && F->hasInstructions()) { 436 assert(isa<MCEncodedFragment>(F) && 437 "Only MCEncodedFragment implementations have instructions"); 438 MCEncodedFragment *EF = cast<MCEncodedFragment>(F); 439 uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); 440 441 if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) 442 report_fatal_error("Fragment can't be larger than a bundle size"); 443 444 uint64_t RequiredBundlePadding = 445 computeBundlePadding(Assembler, EF, EF->Offset, FSize); 446 if (RequiredBundlePadding > UINT8_MAX) 447 report_fatal_error("Padding cannot exceed 255 bytes"); 448 EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); 449 EF->Offset += RequiredBundlePadding; 450 } 451 } 452 453 void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { 454 bool New = !Symbol.isRegistered(); 455 if (Created) 456 *Created = New; 457 if (New) { 458 Symbol.setIsRegistered(true); 459 Symbols.push_back(&Symbol); 460 } 461 } 462 463 void MCAssembler::writeFragmentPadding(raw_ostream &OS, 464 const MCEncodedFragment &EF, 465 uint64_t FSize) const { 466 assert(getBackendPtr() && "Expected assembler backend"); 467 // Should NOP padding be written out before this fragment? 468 unsigned BundlePadding = EF.getBundlePadding(); 469 if (BundlePadding > 0) { 470 assert(isBundlingEnabled() && 471 "Writing bundle padding with disabled bundling"); 472 assert(EF.hasInstructions() && 473 "Writing bundle padding for a fragment without instructions"); 474 475 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); 476 if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { 477 // If the padding itself crosses a bundle boundary, it must be emitted 478 // in 2 pieces, since even nop instructions must not cross boundaries. 479 // v--------------v <- BundleAlignSize 480 // v---------v <- BundlePadding 481 // ---------------------------- 482 // | Prev |####|####| F | 483 // ---------------------------- 484 // ^-------------------^ <- TotalLength 485 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); 486 if (!getBackend().writeNopData(OS, DistanceToBoundary)) 487 report_fatal_error("unable to write NOP sequence of " + 488 Twine(DistanceToBoundary) + " bytes"); 489 BundlePadding -= DistanceToBoundary; 490 } 491 if (!getBackend().writeNopData(OS, BundlePadding)) 492 report_fatal_error("unable to write NOP sequence of " + 493 Twine(BundlePadding) + " bytes"); 494 } 495 } 496 497 /// Write the fragment \p F to the output file. 498 static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, 499 const MCAsmLayout &Layout, const MCFragment &F) { 500 // FIXME: Embed in fragments instead? 501 uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); 502 503 support::endianness Endian = Asm.getBackend().Endian; 504 505 if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F)) 506 Asm.writeFragmentPadding(OS, *EF, FragmentSize); 507 508 // This variable (and its dummy usage) is to participate in the assert at 509 // the end of the function. 510 uint64_t Start = OS.tell(); 511 (void) Start; 512 513 ++stats::EmittedFragments; 514 515 switch (F.getKind()) { 516 case MCFragment::FT_Align: { 517 ++stats::EmittedAlignFragments; 518 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 519 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); 520 521 uint64_t Count = FragmentSize / AF.getValueSize(); 522 523 // FIXME: This error shouldn't actually occur (the front end should emit 524 // multiple .align directives to enforce the semantics it wants), but is 525 // severe enough that we want to report it. How to handle this? 526 if (Count * AF.getValueSize() != FragmentSize) 527 report_fatal_error("undefined .align directive, value size '" + 528 Twine(AF.getValueSize()) + 529 "' is not a divisor of padding size '" + 530 Twine(FragmentSize) + "'"); 531 532 // See if we are aligning with nops, and if so do that first to try to fill 533 // the Count bytes. Then if that did not fill any bytes or there are any 534 // bytes left to fill use the Value and ValueSize to fill the rest. 535 // If we are aligning with nops, ask that target to emit the right data. 536 if (AF.hasEmitNops()) { 537 if (!Asm.getBackend().writeNopData(OS, Count)) 538 report_fatal_error("unable to write nop sequence of " + 539 Twine(Count) + " bytes"); 540 break; 541 } 542 543 // Otherwise, write out in multiples of the value size. 544 for (uint64_t i = 0; i != Count; ++i) { 545 switch (AF.getValueSize()) { 546 default: llvm_unreachable("Invalid size!"); 547 case 1: OS << char(AF.getValue()); break; 548 case 2: 549 support::endian::write<uint16_t>(OS, AF.getValue(), Endian); 550 break; 551 case 4: 552 support::endian::write<uint32_t>(OS, AF.getValue(), Endian); 553 break; 554 case 8: 555 support::endian::write<uint64_t>(OS, AF.getValue(), Endian); 556 break; 557 } 558 } 559 break; 560 } 561 562 case MCFragment::FT_Data: 563 ++stats::EmittedDataFragments; 564 OS << cast<MCDataFragment>(F).getContents(); 565 break; 566 567 case MCFragment::FT_Relaxable: 568 ++stats::EmittedRelaxableFragments; 569 OS << cast<MCRelaxableFragment>(F).getContents(); 570 break; 571 572 case MCFragment::FT_CompactEncodedInst: 573 ++stats::EmittedCompactEncodedInstFragments; 574 OS << cast<MCCompactEncodedInstFragment>(F).getContents(); 575 break; 576 577 case MCFragment::FT_Fill: { 578 ++stats::EmittedFillFragments; 579 const MCFillFragment &FF = cast<MCFillFragment>(F); 580 uint64_t V = FF.getValue(); 581 unsigned VSize = FF.getValueSize(); 582 const unsigned MaxChunkSize = 16; 583 char Data[MaxChunkSize]; 584 assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size"); 585 // Duplicate V into Data as byte vector to reduce number of 586 // writes done. As such, do endian conversion here. 587 for (unsigned I = 0; I != VSize; ++I) { 588 unsigned index = Endian == support::little ? I : (VSize - I - 1); 589 Data[I] = uint8_t(V >> (index * 8)); 590 } 591 for (unsigned I = VSize; I < MaxChunkSize; ++I) 592 Data[I] = Data[I - VSize]; 593 594 // Set to largest multiple of VSize in Data. 595 const unsigned NumPerChunk = MaxChunkSize / VSize; 596 // Set ChunkSize to largest multiple of VSize in Data 597 const unsigned ChunkSize = VSize * NumPerChunk; 598 599 // Do copies by chunk. 600 StringRef Ref(Data, ChunkSize); 601 for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I) 602 OS << Ref; 603 604 // do remainder if needed. 605 unsigned TrailingCount = FragmentSize % ChunkSize; 606 if (TrailingCount) 607 OS.write(Data, TrailingCount); 608 break; 609 } 610 611 case MCFragment::FT_LEB: { 612 const MCLEBFragment &LF = cast<MCLEBFragment>(F); 613 OS << LF.getContents(); 614 break; 615 } 616 617 case MCFragment::FT_BoundaryAlign: { 618 if (!Asm.getBackend().writeNopData(OS, FragmentSize)) 619 report_fatal_error("unable to write nop sequence of " + 620 Twine(FragmentSize) + " bytes"); 621 break; 622 } 623 624 case MCFragment::FT_SymbolId: { 625 const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F); 626 support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian); 627 break; 628 } 629 630 case MCFragment::FT_Org: { 631 ++stats::EmittedOrgFragments; 632 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 633 634 for (uint64_t i = 0, e = FragmentSize; i != e; ++i) 635 OS << char(OF.getValue()); 636 637 break; 638 } 639 640 case MCFragment::FT_Dwarf: { 641 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); 642 OS << OF.getContents(); 643 break; 644 } 645 case MCFragment::FT_DwarfFrame: { 646 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); 647 OS << CF.getContents(); 648 break; 649 } 650 case MCFragment::FT_CVInlineLines: { 651 const auto &OF = cast<MCCVInlineLineTableFragment>(F); 652 OS << OF.getContents(); 653 break; 654 } 655 case MCFragment::FT_CVDefRange: { 656 const auto &DRF = cast<MCCVDefRangeFragment>(F); 657 OS << DRF.getContents(); 658 break; 659 } 660 case MCFragment::FT_Dummy: 661 llvm_unreachable("Should not have been added"); 662 } 663 664 assert(OS.tell() - Start == FragmentSize && 665 "The stream should advance by fragment size"); 666 } 667 668 void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, 669 const MCAsmLayout &Layout) const { 670 assert(getBackendPtr() && "Expected assembler backend"); 671 672 // Ignore virtual sections. 673 if (Sec->isVirtualSection()) { 674 assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!"); 675 676 // Check that contents are only things legal inside a virtual section. 677 for (const MCFragment &F : *Sec) { 678 switch (F.getKind()) { 679 default: llvm_unreachable("Invalid fragment in virtual section!"); 680 case MCFragment::FT_Data: { 681 // Check that we aren't trying to write a non-zero contents (or fixups) 682 // into a virtual section. This is to support clients which use standard 683 // directives to fill the contents of virtual sections. 684 const MCDataFragment &DF = cast<MCDataFragment>(F); 685 if (DF.fixup_begin() != DF.fixup_end()) 686 report_fatal_error("cannot have fixups in virtual section!"); 687 for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) 688 if (DF.getContents()[i]) { 689 if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec)) 690 report_fatal_error("non-zero initializer found in section '" + 691 ELFSec->getSectionName() + "'"); 692 else 693 report_fatal_error("non-zero initializer found in virtual section"); 694 } 695 break; 696 } 697 case MCFragment::FT_Align: 698 // Check that we aren't trying to write a non-zero value into a virtual 699 // section. 700 assert((cast<MCAlignFragment>(F).getValueSize() == 0 || 701 cast<MCAlignFragment>(F).getValue() == 0) && 702 "Invalid align in virtual section!"); 703 break; 704 case MCFragment::FT_Fill: 705 assert((cast<MCFillFragment>(F).getValue() == 0) && 706 "Invalid fill in virtual section!"); 707 break; 708 } 709 } 710 711 return; 712 } 713 714 uint64_t Start = OS.tell(); 715 (void)Start; 716 717 for (const MCFragment &F : *Sec) 718 writeFragment(OS, *this, Layout, F); 719 720 assert(OS.tell() - Start == Layout.getSectionAddressSize(Sec)); 721 } 722 723 std::tuple<MCValue, uint64_t, bool> 724 MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, 725 const MCFixup &Fixup) { 726 // Evaluate the fixup. 727 MCValue Target; 728 uint64_t FixedValue; 729 bool WasForced; 730 bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, 731 WasForced); 732 if (!IsResolved) { 733 // The fixup was unresolved, we need a relocation. Inform the object 734 // writer of the relocation, and give it an opportunity to adjust the 735 // fixup value if need be. 736 if (Target.getSymA() && Target.getSymB() && 737 getBackend().requiresDiffExpressionRelocations()) { 738 // The fixup represents the difference between two symbols, which the 739 // backend has indicated must be resolved at link time. Split up the fixup 740 // into two relocations, one for the add, and one for the sub, and emit 741 // both of these. The constant will be associated with the add half of the 742 // expression. 743 MCFixup FixupAdd = MCFixup::createAddFor(Fixup); 744 MCValue TargetAdd = 745 MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); 746 getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd, 747 FixedValue); 748 MCFixup FixupSub = MCFixup::createSubFor(Fixup); 749 MCValue TargetSub = MCValue::get(Target.getSymB()); 750 getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub, 751 FixedValue); 752 } else { 753 getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, 754 FixedValue); 755 } 756 } 757 return std::make_tuple(Target, FixedValue, IsResolved); 758 } 759 760 void MCAssembler::layout(MCAsmLayout &Layout) { 761 assert(getBackendPtr() && "Expected assembler backend"); 762 DEBUG_WITH_TYPE("mc-dump", { 763 errs() << "assembler backend - pre-layout\n--\n"; 764 dump(); }); 765 766 // Create dummy fragments and assign section ordinals. 767 unsigned SectionIndex = 0; 768 for (MCSection &Sec : *this) { 769 // Create dummy fragments to eliminate any empty sections, this simplifies 770 // layout. 771 if (Sec.getFragmentList().empty()) 772 new MCDataFragment(&Sec); 773 774 Sec.setOrdinal(SectionIndex++); 775 } 776 777 // Assign layout order indices to sections and fragments. 778 for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { 779 MCSection *Sec = Layout.getSectionOrder()[i]; 780 Sec->setLayoutOrder(i); 781 782 unsigned FragmentIndex = 0; 783 for (MCFragment &Frag : *Sec) 784 Frag.setLayoutOrder(FragmentIndex++); 785 } 786 787 // Layout until everything fits. 788 while (layoutOnce(Layout)) 789 if (getContext().hadError()) 790 return; 791 792 DEBUG_WITH_TYPE("mc-dump", { 793 errs() << "assembler backend - post-relaxation\n--\n"; 794 dump(); }); 795 796 // Finalize the layout, including fragment lowering. 797 finishLayout(Layout); 798 799 DEBUG_WITH_TYPE("mc-dump", { 800 errs() << "assembler backend - final-layout\n--\n"; 801 dump(); }); 802 803 // Allow the object writer a chance to perform post-layout binding (for 804 // example, to set the index fields in the symbol data). 805 getWriter().executePostLayoutBinding(*this, Layout); 806 807 // Evaluate and apply the fixups, generating relocation entries as necessary. 808 for (MCSection &Sec : *this) { 809 for (MCFragment &Frag : Sec) { 810 // Data and relaxable fragments both have fixups. So only process 811 // those here. 812 // FIXME: Is there a better way to do this? MCEncodedFragmentWithFixups 813 // being templated makes this tricky. 814 if (isa<MCEncodedFragment>(&Frag) && 815 isa<MCCompactEncodedInstFragment>(&Frag)) 816 continue; 817 if (!isa<MCEncodedFragment>(&Frag) && !isa<MCCVDefRangeFragment>(&Frag) && 818 !isa<MCAlignFragment>(&Frag)) 819 continue; 820 ArrayRef<MCFixup> Fixups; 821 MutableArrayRef<char> Contents; 822 const MCSubtargetInfo *STI = nullptr; 823 if (auto *FragWithFixups = dyn_cast<MCDataFragment>(&Frag)) { 824 Fixups = FragWithFixups->getFixups(); 825 Contents = FragWithFixups->getContents(); 826 STI = FragWithFixups->getSubtargetInfo(); 827 assert(!FragWithFixups->hasInstructions() || STI != nullptr); 828 } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(&Frag)) { 829 Fixups = FragWithFixups->getFixups(); 830 Contents = FragWithFixups->getContents(); 831 STI = FragWithFixups->getSubtargetInfo(); 832 assert(!FragWithFixups->hasInstructions() || STI != nullptr); 833 } else if (auto *FragWithFixups = dyn_cast<MCCVDefRangeFragment>(&Frag)) { 834 Fixups = FragWithFixups->getFixups(); 835 Contents = FragWithFixups->getContents(); 836 } else if (auto *FragWithFixups = dyn_cast<MCDwarfLineAddrFragment>(&Frag)) { 837 Fixups = FragWithFixups->getFixups(); 838 Contents = FragWithFixups->getContents(); 839 } else if (auto *AF = dyn_cast<MCAlignFragment>(&Frag)) { 840 // Insert fixup type for code alignment if the target define 841 // shouldInsertFixupForCodeAlign target hook. 842 if (Sec.UseCodeAlign() && AF->hasEmitNops()) { 843 getBackend().shouldInsertFixupForCodeAlign(*this, Layout, *AF); 844 } 845 continue; 846 } else if (auto *FragWithFixups = 847 dyn_cast<MCDwarfCallFrameFragment>(&Frag)) { 848 Fixups = FragWithFixups->getFixups(); 849 Contents = FragWithFixups->getContents(); 850 } else 851 llvm_unreachable("Unknown fragment with fixups!"); 852 for (const MCFixup &Fixup : Fixups) { 853 uint64_t FixedValue; 854 bool IsResolved; 855 MCValue Target; 856 std::tie(Target, FixedValue, IsResolved) = 857 handleFixup(Layout, Frag, Fixup); 858 getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, 859 IsResolved, STI); 860 } 861 } 862 } 863 } 864 865 void MCAssembler::Finish() { 866 // Create the layout object. 867 MCAsmLayout Layout(*this); 868 layout(Layout); 869 870 // Write the object file. 871 stats::ObjectBytes += getWriter().writeObject(*this, Layout); 872 } 873 874 bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, 875 const MCRelaxableFragment *DF, 876 const MCAsmLayout &Layout) const { 877 assert(getBackendPtr() && "Expected assembler backend"); 878 MCValue Target; 879 uint64_t Value; 880 bool WasForced; 881 bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, WasForced); 882 if (Target.getSymA() && 883 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && 884 Fixup.getKind() == FK_Data_1) 885 return false; 886 return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, 887 Layout, WasForced); 888 } 889 890 bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, 891 const MCAsmLayout &Layout) const { 892 assert(getBackendPtr() && "Expected assembler backend"); 893 // If this inst doesn't ever need relaxation, ignore it. This occurs when we 894 // are intentionally pushing out inst fragments, or because we relaxed a 895 // previous instruction to one that doesn't need relaxation. 896 if (!getBackend().mayNeedRelaxation(F->getInst(), *F->getSubtargetInfo())) 897 return false; 898 899 for (const MCFixup &Fixup : F->getFixups()) 900 if (fixupNeedsRelaxation(Fixup, F, Layout)) 901 return true; 902 903 return false; 904 } 905 906 bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, 907 MCRelaxableFragment &F) { 908 assert(getEmitterPtr() && 909 "Expected CodeEmitter defined for relaxInstruction"); 910 if (!fragmentNeedsRelaxation(&F, Layout)) 911 return false; 912 913 ++stats::RelaxedInstructions; 914 915 // FIXME-PERF: We could immediately lower out instructions if we can tell 916 // they are fully resolved, to avoid retesting on later passes. 917 918 // Relax the fragment. 919 920 MCInst Relaxed; 921 getBackend().relaxInstruction(F.getInst(), *F.getSubtargetInfo(), Relaxed); 922 923 // Encode the new instruction. 924 // 925 // FIXME-PERF: If it matters, we could let the target do this. It can 926 // probably do so more efficiently in many cases. 927 SmallVector<MCFixup, 4> Fixups; 928 SmallString<256> Code; 929 raw_svector_ostream VecOS(Code); 930 getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, *F.getSubtargetInfo()); 931 932 // Update the fragment. 933 F.setInst(Relaxed); 934 F.getContents() = Code; 935 F.getFixups() = Fixups; 936 937 return true; 938 } 939 940 bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { 941 uint64_t OldSize = LF.getContents().size(); 942 int64_t Value; 943 bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); 944 if (!Abs) 945 report_fatal_error("sleb128 and uleb128 expressions must be absolute"); 946 SmallString<8> &Data = LF.getContents(); 947 Data.clear(); 948 raw_svector_ostream OSE(Data); 949 // The compiler can generate EH table assembly that is impossible to assemble 950 // without either adding padding to an LEB fragment or adding extra padding 951 // to a later alignment fragment. To accommodate such tables, relaxation can 952 // only increase an LEB fragment size here, not decrease it. See PR35809. 953 if (LF.isSigned()) 954 encodeSLEB128(Value, OSE, OldSize); 955 else 956 encodeULEB128(Value, OSE, OldSize); 957 return OldSize != LF.getContents().size(); 958 } 959 960 /// Check if the branch crosses the boundary. 961 /// 962 /// \param StartAddr start address of the fused/unfused branch. 963 /// \param Size size of the fused/unfused branch. 964 /// \param BoundaryAlignment alignment requirement of the branch. 965 /// \returns true if the branch cross the boundary. 966 static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, 967 Align BoundaryAlignment) { 968 uint64_t EndAddr = StartAddr + Size; 969 return (StartAddr >> Log2(BoundaryAlignment)) != 970 ((EndAddr - 1) >> Log2(BoundaryAlignment)); 971 } 972 973 /// Check if the branch is against the boundary. 974 /// 975 /// \param StartAddr start address of the fused/unfused branch. 976 /// \param Size size of the fused/unfused branch. 977 /// \param BoundaryAlignment alignment requirement of the branch. 978 /// \returns true if the branch is against the boundary. 979 static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, 980 Align BoundaryAlignment) { 981 uint64_t EndAddr = StartAddr + Size; 982 return (EndAddr & (BoundaryAlignment.value() - 1)) == 0; 983 } 984 985 /// Check if the branch needs padding. 986 /// 987 /// \param StartAddr start address of the fused/unfused branch. 988 /// \param Size size of the fused/unfused branch. 989 /// \param BoundaryAlignment alignment requirement of the branch. 990 /// \returns true if the branch needs padding. 991 static bool needPadding(uint64_t StartAddr, uint64_t Size, 992 Align BoundaryAlignment) { 993 return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) || 994 isAgainstBoundary(StartAddr, Size, BoundaryAlignment); 995 } 996 997 bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, 998 MCBoundaryAlignFragment &BF) { 999 // The MCBoundaryAlignFragment that doesn't emit NOP should not be relaxed. 1000 if (!BF.canEmitNops()) 1001 return false; 1002 1003 uint64_t AlignedOffset = Layout.getFragmentOffset(BF.getNextNode()); 1004 uint64_t AlignedSize = 0; 1005 const MCFragment *F = BF.getNextNode(); 1006 // If the branch is unfused, it is emitted into one fragment, otherwise it is 1007 // emitted into two fragments at most, the next MCBoundaryAlignFragment(if 1008 // exists) also marks the end of the branch. 1009 for (auto i = 0, N = BF.isFused() ? 2 : 1; 1010 i != N && !isa<MCBoundaryAlignFragment>(F); ++i, F = F->getNextNode()) { 1011 AlignedSize += computeFragmentSize(Layout, *F); 1012 } 1013 uint64_t OldSize = BF.getSize(); 1014 AlignedOffset -= OldSize; 1015 Align BoundaryAlignment = BF.getAlignment(); 1016 uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment) 1017 ? offsetToAlignment(AlignedOffset, BoundaryAlignment) 1018 : 0U; 1019 if (NewSize == OldSize) 1020 return false; 1021 BF.setSize(NewSize); 1022 Layout.invalidateFragmentsFrom(&BF); 1023 return true; 1024 } 1025 1026 bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, 1027 MCDwarfLineAddrFragment &DF) { 1028 MCContext &Context = Layout.getAssembler().getContext(); 1029 uint64_t OldSize = DF.getContents().size(); 1030 int64_t AddrDelta; 1031 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); 1032 assert(Abs && "We created a line delta with an invalid expression"); 1033 (void)Abs; 1034 int64_t LineDelta; 1035 LineDelta = DF.getLineDelta(); 1036 SmallVectorImpl<char> &Data = DF.getContents(); 1037 Data.clear(); 1038 raw_svector_ostream OSE(Data); 1039 DF.getFixups().clear(); 1040 1041 if (!getBackend().requiresDiffExpressionRelocations()) { 1042 MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, 1043 AddrDelta, OSE); 1044 } else { 1045 uint32_t Offset; 1046 uint32_t Size; 1047 bool SetDelta = MCDwarfLineAddr::FixedEncode(Context, 1048 getDWARFLinetableParams(), 1049 LineDelta, AddrDelta, 1050 OSE, &Offset, &Size); 1051 // Add Fixups for address delta or new address. 1052 const MCExpr *FixupExpr; 1053 if (SetDelta) { 1054 FixupExpr = &DF.getAddrDelta(); 1055 } else { 1056 const MCBinaryExpr *ABE = cast<MCBinaryExpr>(&DF.getAddrDelta()); 1057 FixupExpr = ABE->getLHS(); 1058 } 1059 DF.getFixups().push_back( 1060 MCFixup::create(Offset, FixupExpr, 1061 MCFixup::getKindForSize(Size, false /*isPCRel*/))); 1062 } 1063 1064 return OldSize != Data.size(); 1065 } 1066 1067 bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, 1068 MCDwarfCallFrameFragment &DF) { 1069 MCContext &Context = Layout.getAssembler().getContext(); 1070 uint64_t OldSize = DF.getContents().size(); 1071 int64_t AddrDelta; 1072 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); 1073 assert(Abs && "We created call frame with an invalid expression"); 1074 (void) Abs; 1075 SmallVectorImpl<char> &Data = DF.getContents(); 1076 Data.clear(); 1077 raw_svector_ostream OSE(Data); 1078 DF.getFixups().clear(); 1079 1080 if (getBackend().requiresDiffExpressionRelocations()) { 1081 uint32_t Offset; 1082 uint32_t Size; 1083 MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE, &Offset, 1084 &Size); 1085 if (Size) { 1086 DF.getFixups().push_back(MCFixup::create( 1087 Offset, &DF.getAddrDelta(), 1088 MCFixup::getKindForSizeInBits(Size /*In bits.*/, false /*isPCRel*/))); 1089 } 1090 } else { 1091 MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); 1092 } 1093 1094 return OldSize != Data.size(); 1095 } 1096 1097 bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, 1098 MCCVInlineLineTableFragment &F) { 1099 unsigned OldSize = F.getContents().size(); 1100 getContext().getCVContext().encodeInlineLineTable(Layout, F); 1101 return OldSize != F.getContents().size(); 1102 } 1103 1104 bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, 1105 MCCVDefRangeFragment &F) { 1106 unsigned OldSize = F.getContents().size(); 1107 getContext().getCVContext().encodeDefRange(Layout, F); 1108 return OldSize != F.getContents().size(); 1109 } 1110 1111 bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { 1112 // Holds the first fragment which needed relaxing during this layout. It will 1113 // remain NULL if none were relaxed. 1114 // When a fragment is relaxed, all the fragments following it should get 1115 // invalidated because their offset is going to change. 1116 MCFragment *FirstRelaxedFragment = nullptr; 1117 1118 // Attempt to relax all the fragments in the section. 1119 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) { 1120 // Check if this is a fragment that needs relaxation. 1121 bool RelaxedFrag = false; 1122 switch(I->getKind()) { 1123 default: 1124 break; 1125 case MCFragment::FT_Relaxable: 1126 assert(!getRelaxAll() && 1127 "Did not expect a MCRelaxableFragment in RelaxAll mode"); 1128 RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I)); 1129 break; 1130 case MCFragment::FT_Dwarf: 1131 RelaxedFrag = relaxDwarfLineAddr(Layout, 1132 *cast<MCDwarfLineAddrFragment>(I)); 1133 break; 1134 case MCFragment::FT_DwarfFrame: 1135 RelaxedFrag = 1136 relaxDwarfCallFrameFragment(Layout, 1137 *cast<MCDwarfCallFrameFragment>(I)); 1138 break; 1139 case MCFragment::FT_LEB: 1140 RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I)); 1141 break; 1142 case MCFragment::FT_BoundaryAlign: 1143 RelaxedFrag = 1144 relaxBoundaryAlign(Layout, *cast<MCBoundaryAlignFragment>(I)); 1145 break; 1146 case MCFragment::FT_CVInlineLines: 1147 RelaxedFrag = 1148 relaxCVInlineLineTable(Layout, *cast<MCCVInlineLineTableFragment>(I)); 1149 break; 1150 case MCFragment::FT_CVDefRange: 1151 RelaxedFrag = relaxCVDefRange(Layout, *cast<MCCVDefRangeFragment>(I)); 1152 break; 1153 } 1154 if (RelaxedFrag && !FirstRelaxedFragment) 1155 FirstRelaxedFragment = &*I; 1156 } 1157 if (FirstRelaxedFragment) { 1158 Layout.invalidateFragmentsFrom(FirstRelaxedFragment); 1159 return true; 1160 } 1161 return false; 1162 } 1163 1164 bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { 1165 ++stats::RelaxationSteps; 1166 1167 bool WasRelaxed = false; 1168 for (iterator it = begin(), ie = end(); it != ie; ++it) { 1169 MCSection &Sec = *it; 1170 while (layoutSectionOnce(Layout, Sec)) 1171 WasRelaxed = true; 1172 } 1173 1174 return WasRelaxed; 1175 } 1176 1177 void MCAssembler::finishLayout(MCAsmLayout &Layout) { 1178 assert(getBackendPtr() && "Expected assembler backend"); 1179 // The layout is done. Mark every fragment as valid. 1180 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { 1181 MCSection &Section = *Layout.getSectionOrder()[i]; 1182 Layout.getFragmentOffset(&*Section.getFragmentList().rbegin()); 1183 computeFragmentSize(Layout, *Section.getFragmentList().rbegin()); 1184 } 1185 getBackend().finishLayout(*this, Layout); 1186 } 1187 1188 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1189 LLVM_DUMP_METHOD void MCAssembler::dump() const{ 1190 raw_ostream &OS = errs(); 1191 1192 OS << "<MCAssembler\n"; 1193 OS << " Sections:[\n "; 1194 for (const_iterator it = begin(), ie = end(); it != ie; ++it) { 1195 if (it != begin()) OS << ",\n "; 1196 it->dump(); 1197 } 1198 OS << "],\n"; 1199 OS << " Symbols:["; 1200 1201 for (const_symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) { 1202 if (it != symbol_begin()) OS << ",\n "; 1203 OS << "("; 1204 it->dump(); 1205 OS << ", Index:" << it->getIndex() << ", "; 1206 OS << ")"; 1207 } 1208 OS << "]>\n"; 1209 } 1210 #endif 1211