1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/MC/MCAssembler.h"
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/SmallString.h"
12 #include "llvm/ADT/SmallVector.h"
13 #include "llvm/ADT/Statistic.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/MC/MCAsmBackend.h"
17 #include "llvm/MC/MCAsmInfo.h"
18 #include "llvm/MC/MCCodeEmitter.h"
19 #include "llvm/MC/MCCodeView.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCDwarf.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCFixup.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCObjectWriter.h"
26 #include "llvm/MC/MCSection.h"
27 #include "llvm/MC/MCSymbol.h"
28 #include "llvm/MC/MCValue.h"
29 #include "llvm/Support/Alignment.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/EndianStream.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/LEB128.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include <cassert>
37 #include <cstdint>
38 #include <tuple>
39 #include <utility>
40
41 using namespace llvm;
42
43 namespace llvm {
44 class MCSubtargetInfo;
45 }
46
47 #define DEBUG_TYPE "assembler"
48
49 namespace {
50 namespace stats {
51
52 STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total");
53 STATISTIC(EmittedRelaxableFragments,
54 "Number of emitted assembler fragments - relaxable");
55 STATISTIC(EmittedDataFragments,
56 "Number of emitted assembler fragments - data");
57 STATISTIC(EmittedAlignFragments,
58 "Number of emitted assembler fragments - align");
59 STATISTIC(EmittedFillFragments,
60 "Number of emitted assembler fragments - fill");
61 STATISTIC(EmittedNopsFragments, "Number of emitted assembler fragments - nops");
62 STATISTIC(EmittedOrgFragments, "Number of emitted assembler fragments - org");
63 STATISTIC(evaluateFixup, "Number of evaluated fixups");
64 STATISTIC(ObjectBytes, "Number of emitted object file bytes");
65 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
66 STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
67
68 } // end namespace stats
69 } // end anonymous namespace
70
71 // FIXME FIXME FIXME: There are number of places in this file where we convert
72 // what is a 64-bit assembler value used for computation into a value in the
73 // object file, which may truncate it. We should detect that truncation where
74 // invalid and report errors back.
75
76 /* *** */
77
MCAssembler(MCContext & Context,std::unique_ptr<MCAsmBackend> Backend,std::unique_ptr<MCCodeEmitter> Emitter,std::unique_ptr<MCObjectWriter> Writer)78 MCAssembler::MCAssembler(MCContext &Context,
79 std::unique_ptr<MCAsmBackend> Backend,
80 std::unique_ptr<MCCodeEmitter> Emitter,
81 std::unique_ptr<MCObjectWriter> Writer)
82 : Context(Context), Backend(std::move(Backend)),
83 Emitter(std::move(Emitter)), Writer(std::move(Writer)) {
84 if (this->Backend)
85 this->Backend->setAssembler(this);
86 if (this->Writer)
87 this->Writer->setAssembler(this);
88 }
89
reset()90 void MCAssembler::reset() {
91 HasLayout = false;
92 HasFinalLayout = false;
93 RelaxAll = false;
94 Sections.clear();
95 Symbols.clear();
96 ThumbFuncs.clear();
97 BundleAlignSize = 0;
98
99 // reset objects owned by us
100 if (getBackendPtr())
101 getBackendPtr()->reset();
102 if (getEmitterPtr())
103 getEmitterPtr()->reset();
104 if (Writer)
105 Writer->reset();
106 }
107
registerSection(MCSection & Section)108 bool MCAssembler::registerSection(MCSection &Section) {
109 if (Section.isRegistered())
110 return false;
111 assert(Section.curFragList()->Head && "allocInitialFragment not called");
112 Sections.push_back(&Section);
113 Section.setIsRegistered(true);
114 return true;
115 }
116
isThumbFunc(const MCSymbol * Symbol) const117 bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
118 if (ThumbFuncs.count(Symbol))
119 return true;
120
121 if (!Symbol->isVariable())
122 return false;
123
124 const MCExpr *Expr = Symbol->getVariableValue();
125
126 MCValue V;
127 if (!Expr->evaluateAsRelocatable(V, nullptr))
128 return false;
129
130 if (V.getSubSym() || V.getSpecifier())
131 return false;
132
133 auto *Sym = V.getAddSym();
134 if (!Sym || V.getSpecifier())
135 return false;
136
137 if (!isThumbFunc(Sym))
138 return false;
139
140 ThumbFuncs.insert(Symbol); // Cache it.
141 return true;
142 }
143
evaluateFixup(const MCFragment & F,MCFixup & Fixup,MCValue & Target,uint64_t & Value,bool RecordReloc,MutableArrayRef<char> Contents) const144 bool MCAssembler::evaluateFixup(const MCFragment &F, MCFixup &Fixup,
145 MCValue &Target, uint64_t &Value,
146 bool RecordReloc,
147 MutableArrayRef<char> Contents) const {
148 ++stats::evaluateFixup;
149
150 // FIXME: This code has some duplication with recordRelocation. We should
151 // probably merge the two into a single callback that tries to evaluate a
152 // fixup and records a relocation if one is needed.
153
154 // On error claim to have completely evaluated the fixup, to prevent any
155 // further processing from being done.
156 const MCExpr *Expr = Fixup.getValue();
157 Value = 0;
158 if (!Expr->evaluateAsRelocatable(Target, this)) {
159 reportError(Fixup.getLoc(), "expected relocatable expression");
160 return true;
161 }
162
163 bool IsResolved = false;
164 if (auto State = getBackend().evaluateFixup(F, Fixup, Target, Value)) {
165 IsResolved = *State;
166 } else {
167 const MCSymbol *Add = Target.getAddSym();
168 const MCSymbol *Sub = Target.getSubSym();
169 Value += Target.getConstant();
170 if (Add && Add->isDefined())
171 Value += getSymbolOffset(*Add);
172 if (Sub && Sub->isDefined())
173 Value -= getSymbolOffset(*Sub);
174
175 if (Fixup.isPCRel()) {
176 Value -= getFragmentOffset(F) + Fixup.getOffset();
177 if (Add && !Sub && !Add->isUndefined() && !Add->isAbsolute()) {
178 IsResolved = getWriter().isSymbolRefDifferenceFullyResolvedImpl(
179 *Add, F, false, true);
180 }
181 } else {
182 IsResolved = Target.isAbsolute();
183 }
184 }
185
186 if (!RecordReloc)
187 return IsResolved;
188
189 if (IsResolved && mc::isRelocRelocation(Fixup.getKind()))
190 IsResolved = false;
191 getBackend().applyFixup(F, Fixup, Target, Contents, Value, IsResolved);
192 return true;
193 }
194
computeFragmentSize(const MCFragment & F) const195 uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
196 assert(getBackendPtr() && "Requires assembler backend");
197 switch (F.getKind()) {
198 case MCFragment::FT_Data:
199 case MCFragment::FT_Relaxable:
200 case MCFragment::FT_LEB:
201 case MCFragment::FT_Dwarf:
202 case MCFragment::FT_DwarfFrame:
203 case MCFragment::FT_CVInlineLines:
204 case MCFragment::FT_CVDefRange:
205 case MCFragment::FT_PseudoProbe:
206 return cast<MCEncodedFragment>(F).getContents().size();
207 case MCFragment::FT_Fill: {
208 auto &FF = cast<MCFillFragment>(F);
209 int64_t NumValues = 0;
210 if (!FF.getNumValues().evaluateKnownAbsolute(NumValues, *this)) {
211 recordError(FF.getLoc(), "expected assembly-time absolute expression");
212 return 0;
213 }
214 int64_t Size = NumValues * FF.getValueSize();
215 if (Size < 0) {
216 recordError(FF.getLoc(), "invalid number of bytes");
217 return 0;
218 }
219 return Size;
220 }
221
222 case MCFragment::FT_Nops:
223 return cast<MCNopsFragment>(F).getNumBytes();
224
225 case MCFragment::FT_BoundaryAlign:
226 return cast<MCBoundaryAlignFragment>(F).getSize();
227
228 case MCFragment::FT_SymbolId:
229 return 4;
230
231 case MCFragment::FT_Align: {
232 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
233 unsigned Offset = getFragmentOffset(AF);
234 unsigned Size = offsetToAlignment(Offset, AF.getAlignment());
235
236 // Insert extra Nops for code alignment if the target define
237 // shouldInsertExtraNopBytesForCodeAlign target hook.
238 if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() &&
239 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size))
240 return Size;
241
242 // If we are padding with nops, force the padding to be larger than the
243 // minimum nop size.
244 if (Size > 0 && AF.hasEmitNops()) {
245 while (Size % getBackend().getMinimumNopSize())
246 Size += AF.getAlignment().value();
247 }
248 if (Size > AF.getMaxBytesToEmit())
249 return 0;
250 return Size;
251 }
252
253 case MCFragment::FT_Org: {
254 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
255 MCValue Value;
256 if (!OF.getOffset().evaluateAsValue(Value, *this)) {
257 recordError(OF.getLoc(), "expected assembly-time absolute expression");
258 return 0;
259 }
260
261 uint64_t FragmentOffset = getFragmentOffset(OF);
262 int64_t TargetLocation = Value.getConstant();
263 if (const auto *SA = Value.getAddSym()) {
264 uint64_t Val;
265 if (!getSymbolOffset(*SA, Val)) {
266 recordError(OF.getLoc(), "expected absolute expression");
267 return 0;
268 }
269 TargetLocation += Val;
270 }
271 int64_t Size = TargetLocation - FragmentOffset;
272 if (Size < 0 || Size >= 0x40000000) {
273 recordError(OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) +
274 "' (at offset '" + Twine(FragmentOffset) +
275 "')");
276 return 0;
277 }
278 return Size;
279 }
280 }
281
282 llvm_unreachable("invalid fragment kind");
283 }
284
285 // Compute the amount of padding required before the fragment \p F to
286 // obey bundling restrictions, where \p FOffset is the fragment's offset in
287 // its section and \p FSize is the fragment's size.
computeBundlePadding(unsigned BundleSize,const MCEncodedFragment * F,uint64_t FOffset,uint64_t FSize)288 static uint64_t computeBundlePadding(unsigned BundleSize,
289 const MCEncodedFragment *F,
290 uint64_t FOffset, uint64_t FSize) {
291 uint64_t OffsetInBundle = FOffset & (BundleSize - 1);
292 uint64_t EndOfFragment = OffsetInBundle + FSize;
293
294 // There are two kinds of bundling restrictions:
295 //
296 // 1) For alignToBundleEnd(), add padding to ensure that the fragment will
297 // *end* on a bundle boundary.
298 // 2) Otherwise, check if the fragment would cross a bundle boundary. If it
299 // would, add padding until the end of the bundle so that the fragment
300 // will start in a new one.
301 if (F->alignToBundleEnd()) {
302 // Three possibilities here:
303 //
304 // A) The fragment just happens to end at a bundle boundary, so we're good.
305 // B) The fragment ends before the current bundle boundary: pad it just
306 // enough to reach the boundary.
307 // C) The fragment ends after the current bundle boundary: pad it until it
308 // reaches the end of the next bundle boundary.
309 //
310 // Note: this code could be made shorter with some modulo trickery, but it's
311 // intentionally kept in its more explicit form for simplicity.
312 if (EndOfFragment == BundleSize)
313 return 0;
314 else if (EndOfFragment < BundleSize)
315 return BundleSize - EndOfFragment;
316 else { // EndOfFragment > BundleSize
317 return 2 * BundleSize - EndOfFragment;
318 }
319 } else if (OffsetInBundle > 0 && EndOfFragment > BundleSize)
320 return BundleSize - OffsetInBundle;
321 else
322 return 0;
323 }
324
layoutBundle(MCFragment * Prev,MCFragment * F) const325 void MCAssembler::layoutBundle(MCFragment *Prev, MCFragment *F) const {
326 // If bundling is enabled and this fragment has instructions in it, it has to
327 // obey the bundling restrictions. With padding, we'll have:
328 //
329 //
330 // BundlePadding
331 // |||
332 // -------------------------------------
333 // Prev |##########| F |
334 // -------------------------------------
335 // ^
336 // |
337 // F->Offset
338 //
339 // The fragment's offset will point to after the padding, and its computed
340 // size won't include the padding.
341 //
342 // ".align N" is an example of a directive that introduces multiple
343 // fragments. We could add a special case to handle ".align N" by emitting
344 // within-fragment padding (which would produce less padding when N is less
345 // than the bundle size), but for now we don't.
346 //
347 assert(isa<MCEncodedFragment>(F) &&
348 "Only MCEncodedFragment implementations have instructions");
349 MCEncodedFragment *EF = cast<MCEncodedFragment>(F);
350 uint64_t FSize = computeFragmentSize(*EF);
351
352 if (FSize > getBundleAlignSize())
353 report_fatal_error("Fragment can't be larger than a bundle size");
354
355 uint64_t RequiredBundlePadding =
356 computeBundlePadding(getBundleAlignSize(), EF, EF->Offset, FSize);
357 if (RequiredBundlePadding > UINT8_MAX)
358 report_fatal_error("Padding cannot exceed 255 bytes");
359 EF->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding));
360 EF->Offset += RequiredBundlePadding;
361 if (auto *DF = dyn_cast_or_null<MCDataFragment>(Prev))
362 if (DF->getContents().empty())
363 DF->Offset = EF->Offset;
364 }
365
366 // Simple getSymbolOffset helper for the non-variable case.
getLabelOffset(const MCAssembler & Asm,const MCSymbol & S,bool ReportError,uint64_t & Val)367 static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S,
368 bool ReportError, uint64_t &Val) {
369 if (!S.getFragment()) {
370 if (ReportError)
371 reportFatalUsageError("cannot evaluate undefined symbol '" + S.getName() +
372 "'");
373 return false;
374 }
375 Val = Asm.getFragmentOffset(*S.getFragment()) + S.getOffset();
376 return true;
377 }
378
getSymbolOffsetImpl(const MCAssembler & Asm,const MCSymbol & S,bool ReportError,uint64_t & Val)379 static bool getSymbolOffsetImpl(const MCAssembler &Asm, const MCSymbol &S,
380 bool ReportError, uint64_t &Val) {
381 if (!S.isVariable())
382 return getLabelOffset(Asm, S, ReportError, Val);
383
384 // If SD is a variable, evaluate it.
385 MCValue Target;
386 if (!S.getVariableValue()->evaluateAsValue(Target, Asm))
387 reportFatalUsageError("cannot evaluate equated symbol '" + S.getName() +
388 "'");
389
390 uint64_t Offset = Target.getConstant();
391
392 const MCSymbol *A = Target.getAddSym();
393 if (A) {
394 uint64_t ValA;
395 // FIXME: On most platforms, `Target`'s component symbols are labels from
396 // having been simplified during evaluation, but on Mach-O they can be
397 // variables due to PR19203. This, and the line below for `B` can be
398 // restored to call `getLabelOffset` when PR19203 is fixed.
399 if (!getSymbolOffsetImpl(Asm, *A, ReportError, ValA))
400 return false;
401 Offset += ValA;
402 }
403
404 const MCSymbol *B = Target.getSubSym();
405 if (B) {
406 uint64_t ValB;
407 if (!getSymbolOffsetImpl(Asm, *B, ReportError, ValB))
408 return false;
409 Offset -= ValB;
410 }
411
412 Val = Offset;
413 return true;
414 }
415
getSymbolOffset(const MCSymbol & S,uint64_t & Val) const416 bool MCAssembler::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const {
417 return getSymbolOffsetImpl(*this, S, false, Val);
418 }
419
getSymbolOffset(const MCSymbol & S) const420 uint64_t MCAssembler::getSymbolOffset(const MCSymbol &S) const {
421 uint64_t Val;
422 getSymbolOffsetImpl(*this, S, true, Val);
423 return Val;
424 }
425
getBaseSymbol(const MCSymbol & Symbol) const426 const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const {
427 assert(HasLayout);
428 if (!Symbol.isVariable())
429 return &Symbol;
430
431 const MCExpr *Expr = Symbol.getVariableValue();
432 MCValue Value;
433 if (!Expr->evaluateAsValue(Value, *this)) {
434 reportError(Expr->getLoc(), "expression could not be evaluated");
435 return nullptr;
436 }
437
438 const MCSymbol *SymB = Value.getSubSym();
439 if (SymB) {
440 reportError(Expr->getLoc(),
441 Twine("symbol '") + SymB->getName() +
442 "' could not be evaluated in a subtraction expression");
443 return nullptr;
444 }
445
446 const MCSymbol *A = Value.getAddSym();
447 if (!A)
448 return nullptr;
449
450 const MCSymbol &ASym = *A;
451 if (ASym.isCommon()) {
452 reportError(Expr->getLoc(), "Common symbol '" + ASym.getName() +
453 "' cannot be used in assignment expr");
454 return nullptr;
455 }
456
457 return &ASym;
458 }
459
getSectionAddressSize(const MCSection & Sec) const460 uint64_t MCAssembler::getSectionAddressSize(const MCSection &Sec) const {
461 assert(HasLayout);
462 // The size is the last fragment's end offset.
463 const MCFragment &F = *Sec.curFragList()->Tail;
464 return getFragmentOffset(F) + computeFragmentSize(F);
465 }
466
getSectionFileSize(const MCSection & Sec) const467 uint64_t MCAssembler::getSectionFileSize(const MCSection &Sec) const {
468 // Virtual sections have no file size.
469 if (Sec.isVirtualSection())
470 return 0;
471 return getSectionAddressSize(Sec);
472 }
473
registerSymbol(const MCSymbol & Symbol)474 bool MCAssembler::registerSymbol(const MCSymbol &Symbol) {
475 bool Changed = !Symbol.isRegistered();
476 if (Changed) {
477 Symbol.setIsRegistered(true);
478 Symbols.push_back(&Symbol);
479 }
480 return Changed;
481 }
482
writeFragmentPadding(raw_ostream & OS,const MCEncodedFragment & EF,uint64_t FSize) const483 void MCAssembler::writeFragmentPadding(raw_ostream &OS,
484 const MCEncodedFragment &EF,
485 uint64_t FSize) const {
486 assert(getBackendPtr() && "Expected assembler backend");
487 // Should NOP padding be written out before this fragment?
488 unsigned BundlePadding = EF.getBundlePadding();
489 if (BundlePadding > 0) {
490 assert(isBundlingEnabled() &&
491 "Writing bundle padding with disabled bundling");
492 assert(EF.hasInstructions() &&
493 "Writing bundle padding for a fragment without instructions");
494
495 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize);
496 const MCSubtargetInfo *STI = EF.getSubtargetInfo();
497 if (EF.alignToBundleEnd() && TotalLength > getBundleAlignSize()) {
498 // If the padding itself crosses a bundle boundary, it must be emitted
499 // in 2 pieces, since even nop instructions must not cross boundaries.
500 // v--------------v <- BundleAlignSize
501 // v---------v <- BundlePadding
502 // ----------------------------
503 // | Prev |####|####| F |
504 // ----------------------------
505 // ^-------------------^ <- TotalLength
506 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize();
507 if (!getBackend().writeNopData(OS, DistanceToBoundary, STI))
508 report_fatal_error("unable to write NOP sequence of " +
509 Twine(DistanceToBoundary) + " bytes");
510 BundlePadding -= DistanceToBoundary;
511 }
512 if (!getBackend().writeNopData(OS, BundlePadding, STI))
513 report_fatal_error("unable to write NOP sequence of " +
514 Twine(BundlePadding) + " bytes");
515 }
516 }
517
518 /// Write the fragment \p F to the output file.
writeFragment(raw_ostream & OS,const MCAssembler & Asm,const MCFragment & F)519 static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
520 const MCFragment &F) {
521 // FIXME: Embed in fragments instead?
522 uint64_t FragmentSize = Asm.computeFragmentSize(F);
523
524 llvm::endianness Endian = Asm.getBackend().Endian;
525
526 if (const MCEncodedFragment *EF = dyn_cast<MCEncodedFragment>(&F))
527 Asm.writeFragmentPadding(OS, *EF, FragmentSize);
528
529 // This variable (and its dummy usage) is to participate in the assert at
530 // the end of the function.
531 uint64_t Start = OS.tell();
532 (void) Start;
533
534 ++stats::EmittedFragments;
535
536 switch (F.getKind()) {
537 case MCFragment::FT_Data:
538 case MCFragment::FT_Relaxable:
539 case MCFragment::FT_LEB:
540 case MCFragment::FT_Dwarf:
541 case MCFragment::FT_DwarfFrame:
542 case MCFragment::FT_CVInlineLines:
543 case MCFragment::FT_CVDefRange:
544 case MCFragment::FT_PseudoProbe: {
545 if (F.getKind() == MCFragment::FT_Data)
546 ++stats::EmittedDataFragments;
547 else if (F.getKind() == MCFragment::FT_Relaxable)
548 ++stats::EmittedRelaxableFragments;
549 const auto &EF = cast<MCEncodedFragment>(F);
550 OS << StringRef(EF.getContents().data(), EF.getContents().size());
551 break;
552 }
553 case MCFragment::FT_Align: {
554 ++stats::EmittedAlignFragments;
555 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
556 assert(AF.getFillLen() && "Invalid virtual align in concrete fragment!");
557
558 uint64_t Count = FragmentSize / AF.getFillLen();
559 assert(FragmentSize % AF.getFillLen() == 0 &&
560 "computeFragmentSize computed size is incorrect");
561
562 // See if we are aligning with nops, and if so do that first to try to fill
563 // the Count bytes. Then if that did not fill any bytes or there are any
564 // bytes left to fill use the Value and ValueSize to fill the rest.
565 // If we are aligning with nops, ask that target to emit the right data.
566 if (AF.hasEmitNops()) {
567 if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo()))
568 report_fatal_error("unable to write nop sequence of " +
569 Twine(Count) + " bytes");
570 break;
571 }
572
573 // Otherwise, write out in multiples of the value size.
574 for (uint64_t i = 0; i != Count; ++i) {
575 switch (AF.getFillLen()) {
576 default: llvm_unreachable("Invalid size!");
577 case 1:
578 OS << char(AF.getFill());
579 break;
580 case 2:
581 support::endian::write<uint16_t>(OS, AF.getFill(), Endian);
582 break;
583 case 4:
584 support::endian::write<uint32_t>(OS, AF.getFill(), Endian);
585 break;
586 case 8:
587 support::endian::write<uint64_t>(OS, AF.getFill(), Endian);
588 break;
589 }
590 }
591 break;
592 }
593
594 case MCFragment::FT_Fill: {
595 ++stats::EmittedFillFragments;
596 const MCFillFragment &FF = cast<MCFillFragment>(F);
597 uint64_t V = FF.getValue();
598 unsigned VSize = FF.getValueSize();
599 const unsigned MaxChunkSize = 16;
600 char Data[MaxChunkSize];
601 assert(0 < VSize && VSize <= MaxChunkSize && "Illegal fragment fill size");
602 // Duplicate V into Data as byte vector to reduce number of
603 // writes done. As such, do endian conversion here.
604 for (unsigned I = 0; I != VSize; ++I) {
605 unsigned index = Endian == llvm::endianness::little ? I : (VSize - I - 1);
606 Data[I] = uint8_t(V >> (index * 8));
607 }
608 for (unsigned I = VSize; I < MaxChunkSize; ++I)
609 Data[I] = Data[I - VSize];
610
611 // Set to largest multiple of VSize in Data.
612 const unsigned NumPerChunk = MaxChunkSize / VSize;
613 // Set ChunkSize to largest multiple of VSize in Data
614 const unsigned ChunkSize = VSize * NumPerChunk;
615
616 // Do copies by chunk.
617 StringRef Ref(Data, ChunkSize);
618 for (uint64_t I = 0, E = FragmentSize / ChunkSize; I != E; ++I)
619 OS << Ref;
620
621 // do remainder if needed.
622 unsigned TrailingCount = FragmentSize % ChunkSize;
623 if (TrailingCount)
624 OS.write(Data, TrailingCount);
625 break;
626 }
627
628 case MCFragment::FT_Nops: {
629 ++stats::EmittedNopsFragments;
630 const MCNopsFragment &NF = cast<MCNopsFragment>(F);
631
632 int64_t NumBytes = NF.getNumBytes();
633 int64_t ControlledNopLength = NF.getControlledNopLength();
634 int64_t MaximumNopLength =
635 Asm.getBackend().getMaximumNopSize(*NF.getSubtargetInfo());
636
637 assert(NumBytes > 0 && "Expected positive NOPs fragment size");
638 assert(ControlledNopLength >= 0 && "Expected non-negative NOP size");
639
640 if (ControlledNopLength > MaximumNopLength) {
641 Asm.reportError(NF.getLoc(), "illegal NOP size " +
642 std::to_string(ControlledNopLength) +
643 ". (expected within [0, " +
644 std::to_string(MaximumNopLength) + "])");
645 // Clamp the NOP length as reportError does not stop the execution
646 // immediately.
647 ControlledNopLength = MaximumNopLength;
648 }
649
650 // Use maximum value if the size of each NOP is not specified
651 if (!ControlledNopLength)
652 ControlledNopLength = MaximumNopLength;
653
654 while (NumBytes) {
655 uint64_t NumBytesToEmit =
656 (uint64_t)std::min(NumBytes, ControlledNopLength);
657 assert(NumBytesToEmit && "try to emit empty NOP instruction");
658 if (!Asm.getBackend().writeNopData(OS, NumBytesToEmit,
659 NF.getSubtargetInfo())) {
660 report_fatal_error("unable to write nop sequence of the remaining " +
661 Twine(NumBytesToEmit) + " bytes");
662 break;
663 }
664 NumBytes -= NumBytesToEmit;
665 }
666 break;
667 }
668
669 case MCFragment::FT_BoundaryAlign: {
670 const MCBoundaryAlignFragment &BF = cast<MCBoundaryAlignFragment>(F);
671 if (!Asm.getBackend().writeNopData(OS, FragmentSize, BF.getSubtargetInfo()))
672 report_fatal_error("unable to write nop sequence of " +
673 Twine(FragmentSize) + " bytes");
674 break;
675 }
676
677 case MCFragment::FT_SymbolId: {
678 const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F);
679 support::endian::write<uint32_t>(OS, SF.getSymbol()->getIndex(), Endian);
680 break;
681 }
682
683 case MCFragment::FT_Org: {
684 ++stats::EmittedOrgFragments;
685 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
686
687 for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
688 OS << char(OF.getValue());
689
690 break;
691 }
692
693 }
694
695 assert(OS.tell() - Start == FragmentSize &&
696 "The stream should advance by fragment size");
697 }
698
writeSectionData(raw_ostream & OS,const MCSection * Sec) const699 void MCAssembler::writeSectionData(raw_ostream &OS,
700 const MCSection *Sec) const {
701 assert(getBackendPtr() && "Expected assembler backend");
702
703 // Ignore virtual sections.
704 if (Sec->isVirtualSection()) {
705 assert(getSectionFileSize(*Sec) == 0 && "Invalid size for section!");
706
707 // Check that contents are only things legal inside a virtual section.
708 for (const MCFragment &F : *Sec) {
709 switch (F.getKind()) {
710 default: llvm_unreachable("Invalid fragment in virtual section!");
711 case MCFragment::FT_Data: {
712 // Check that we aren't trying to write a non-zero contents (or fixups)
713 // into a virtual section. This is to support clients which use standard
714 // directives to fill the contents of virtual sections.
715 const MCDataFragment &DF = cast<MCDataFragment>(F);
716 if (DF.getFixups().size())
717 reportError(SMLoc(), Sec->getVirtualSectionKind() + " section '" +
718 Sec->getName() + "' cannot have fixups");
719 for (char C : DF.getContents())
720 if (C) {
721 reportError(SMLoc(), Sec->getVirtualSectionKind() + " section '" +
722 Sec->getName() +
723 "' cannot have non-zero initializers");
724 break;
725 }
726 break;
727 }
728 case MCFragment::FT_Align:
729 // Check that we aren't trying to write a non-zero value into a virtual
730 // section.
731 assert((cast<MCAlignFragment>(F).getFillLen() == 0 ||
732 cast<MCAlignFragment>(F).getFill() == 0) &&
733 "Invalid align in virtual section!");
734 break;
735 case MCFragment::FT_Fill:
736 assert((cast<MCFillFragment>(F).getValue() == 0) &&
737 "Invalid fill in virtual section!");
738 break;
739 case MCFragment::FT_Org:
740 break;
741 }
742 }
743
744 return;
745 }
746
747 uint64_t Start = OS.tell();
748 (void)Start;
749
750 for (const MCFragment &F : *Sec)
751 writeFragment(OS, *this, F);
752
753 flushPendingErrors();
754 assert(getContext().hadError() ||
755 OS.tell() - Start == getSectionAddressSize(*Sec));
756 }
757
layout()758 void MCAssembler::layout() {
759 assert(getBackendPtr() && "Expected assembler backend");
760 DEBUG_WITH_TYPE("mc-dump-pre", {
761 errs() << "assembler backend - pre-layout\n--\n";
762 dump();
763 });
764
765 // Assign section ordinals.
766 unsigned SectionIndex = 0;
767 for (MCSection &Sec : *this) {
768 Sec.setOrdinal(SectionIndex++);
769
770 // Chain together fragments from all subsections.
771 if (Sec.Subsections.size() > 1) {
772 MCDataFragment Dummy;
773 MCFragment *Tail = &Dummy;
774 for (auto &[_, List] : Sec.Subsections) {
775 assert(List.Head);
776 Tail->Next = List.Head;
777 Tail = List.Tail;
778 }
779 Sec.Subsections.clear();
780 Sec.Subsections.push_back({0u, {Dummy.getNext(), Tail}});
781 Sec.CurFragList = &Sec.Subsections[0].second;
782
783 unsigned FragmentIndex = 0;
784 for (MCFragment &Frag : Sec)
785 Frag.setLayoutOrder(FragmentIndex++);
786 }
787 }
788
789 // Layout until everything fits.
790 this->HasLayout = true;
791 for (MCSection &Sec : *this)
792 layoutSection(Sec);
793 unsigned FirstStable = Sections.size();
794 while ((FirstStable = relaxOnce(FirstStable)) > 0)
795 if (getContext().hadError())
796 return;
797
798 // Some targets might want to adjust fragment offsets. If so, perform another
799 // layout iteration.
800 if (getBackend().finishLayout(*this))
801 for (MCSection &Sec : *this)
802 layoutSection(Sec);
803
804 flushPendingErrors();
805
806 DEBUG_WITH_TYPE("mc-dump", {
807 errs() << "assembler backend - final-layout\n--\n";
808 dump(); });
809
810 // Allow the object writer a chance to perform post-layout binding (for
811 // example, to set the index fields in the symbol data).
812 getWriter().executePostLayoutBinding();
813
814 // Fragment sizes are finalized. For RISC-V linker relaxation, this flag
815 // helps check whether a PC-relative fixup is fully resolved.
816 this->HasFinalLayout = true;
817
818 // Evaluate and apply the fixups, generating relocation entries as necessary.
819 for (MCSection &Sec : *this) {
820 for (MCFragment &Frag : Sec) {
821 // Process fragments with fixups here.
822 if (auto *F = dyn_cast<MCEncodedFragment>(&Frag)) {
823 auto Contents = F->getContents();
824 for (MCFixup &Fixup : F->getFixups()) {
825 uint64_t FixedValue;
826 MCValue Target;
827 evaluateFixup(Frag, Fixup, Target, FixedValue,
828 /*RecordReloc=*/true, Contents);
829 }
830 } else if (auto *AF = dyn_cast<MCAlignFragment>(&Frag)) {
831 // For RISC-V linker relaxation, an alignment relocation might be
832 // needed.
833 if (AF->hasEmitNops())
834 getBackend().shouldInsertFixupForCodeAlign(*this, *AF);
835 }
836 }
837 }
838 }
839
Finish()840 void MCAssembler::Finish() {
841 layout();
842
843 // Write the object file.
844 stats::ObjectBytes += getWriter().writeObject();
845
846 HasLayout = false;
847 assert(PendingErrors.empty());
848 }
849
fixupNeedsRelaxation(const MCRelaxableFragment & F,const MCFixup & Fixup) const850 bool MCAssembler::fixupNeedsRelaxation(const MCRelaxableFragment &F,
851 const MCFixup &Fixup) const {
852 assert(getBackendPtr() && "Expected assembler backend");
853 MCValue Target;
854 uint64_t Value;
855 bool Resolved = evaluateFixup(F, const_cast<MCFixup &>(Fixup), Target, Value,
856 /*RecordReloc=*/false, {});
857 return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Target, Value,
858 Resolved);
859 }
860
relaxInstruction(MCRelaxableFragment & F)861 bool MCAssembler::relaxInstruction(MCRelaxableFragment &F) {
862 assert(getEmitterPtr() &&
863 "Expected CodeEmitter defined for relaxInstruction");
864 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
865 // are intentionally pushing out inst fragments, or because we relaxed a
866 // previous instruction to one that doesn't need relaxation.
867 if (!getBackend().mayNeedRelaxation(F.getOpcode(), F.getOperands(),
868 *F.getSubtargetInfo()))
869 return false;
870
871 bool DoRelax = false;
872 for (const MCFixup &Fixup : F.getFixups())
873 if ((DoRelax = fixupNeedsRelaxation(F, Fixup)))
874 break;
875 if (!DoRelax)
876 return false;
877
878 ++stats::RelaxedInstructions;
879
880 // TODO Refactor relaxInstruction to accept MCRelaxableFragment and remove
881 // `setInst`.
882 MCInst Relaxed = F.getInst();
883 getBackend().relaxInstruction(Relaxed, *F.getSubtargetInfo());
884
885 // Encode the new instruction.
886 F.setInst(Relaxed);
887 SmallVector<char, 16> Data;
888 SmallVector<MCFixup, 1> Fixups;
889 getEmitter().encodeInstruction(Relaxed, Data, Fixups, *F.getSubtargetInfo());
890 F.setContents(Data);
891 F.setFixups(Fixups);
892 return true;
893 }
894
relaxLEB(MCLEBFragment & LF)895 bool MCAssembler::relaxLEB(MCLEBFragment &LF) {
896 const unsigned OldSize = static_cast<unsigned>(LF.getContents().size());
897 unsigned PadTo = OldSize;
898 int64_t Value;
899 LF.clearFixups();
900 // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols
901 // requires that .uleb128 A-B is foldable where A and B reside in different
902 // fragments. This is used by __gcc_except_table.
903 bool Abs = getWriter().getSubsectionsViaSymbols()
904 ? LF.getValue().evaluateKnownAbsolute(Value, *this)
905 : LF.getValue().evaluateAsAbsolute(Value, *this);
906 if (!Abs) {
907 bool Relaxed, UseZeroPad;
908 std::tie(Relaxed, UseZeroPad) = getBackend().relaxLEB128(LF, Value);
909 if (!Relaxed) {
910 reportError(LF.getValue().getLoc(),
911 Twine(LF.isSigned() ? ".s" : ".u") +
912 "leb128 expression is not absolute");
913 LF.setValue(MCConstantExpr::create(0, Context));
914 }
915 uint8_t Tmp[10]; // maximum size: ceil(64/7)
916 PadTo = std::max(PadTo, encodeULEB128(uint64_t(Value), Tmp));
917 if (UseZeroPad)
918 Value = 0;
919 }
920 uint8_t Data[16];
921 size_t Size = 0;
922 // The compiler can generate EH table assembly that is impossible to assemble
923 // without either adding padding to an LEB fragment or adding extra padding
924 // to a later alignment fragment. To accommodate such tables, relaxation can
925 // only increase an LEB fragment size here, not decrease it. See PR35809.
926 if (LF.isSigned())
927 Size = encodeSLEB128(Value, Data, PadTo);
928 else
929 Size = encodeULEB128(Value, Data, PadTo);
930 LF.setContents({reinterpret_cast<char *>(Data), Size});
931 return OldSize != Size;
932 }
933
934 /// Check if the branch crosses the boundary.
935 ///
936 /// \param StartAddr start address of the fused/unfused branch.
937 /// \param Size size of the fused/unfused branch.
938 /// \param BoundaryAlignment alignment requirement of the branch.
939 /// \returns true if the branch cross the boundary.
mayCrossBoundary(uint64_t StartAddr,uint64_t Size,Align BoundaryAlignment)940 static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size,
941 Align BoundaryAlignment) {
942 uint64_t EndAddr = StartAddr + Size;
943 return (StartAddr >> Log2(BoundaryAlignment)) !=
944 ((EndAddr - 1) >> Log2(BoundaryAlignment));
945 }
946
947 /// Check if the branch is against the boundary.
948 ///
949 /// \param StartAddr start address of the fused/unfused branch.
950 /// \param Size size of the fused/unfused branch.
951 /// \param BoundaryAlignment alignment requirement of the branch.
952 /// \returns true if the branch is against the boundary.
isAgainstBoundary(uint64_t StartAddr,uint64_t Size,Align BoundaryAlignment)953 static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size,
954 Align BoundaryAlignment) {
955 uint64_t EndAddr = StartAddr + Size;
956 return (EndAddr & (BoundaryAlignment.value() - 1)) == 0;
957 }
958
959 /// Check if the branch needs padding.
960 ///
961 /// \param StartAddr start address of the fused/unfused branch.
962 /// \param Size size of the fused/unfused branch.
963 /// \param BoundaryAlignment alignment requirement of the branch.
964 /// \returns true if the branch needs padding.
needPadding(uint64_t StartAddr,uint64_t Size,Align BoundaryAlignment)965 static bool needPadding(uint64_t StartAddr, uint64_t Size,
966 Align BoundaryAlignment) {
967 return mayCrossBoundary(StartAddr, Size, BoundaryAlignment) ||
968 isAgainstBoundary(StartAddr, Size, BoundaryAlignment);
969 }
970
relaxBoundaryAlign(MCBoundaryAlignFragment & BF)971 bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) {
972 // BoundaryAlignFragment that doesn't need to align any fragment should not be
973 // relaxed.
974 if (!BF.getLastFragment())
975 return false;
976
977 uint64_t AlignedOffset = getFragmentOffset(BF);
978 uint64_t AlignedSize = 0;
979 for (const MCFragment *F = BF.getNext();; F = F->getNext()) {
980 AlignedSize += computeFragmentSize(*F);
981 if (F == BF.getLastFragment())
982 break;
983 }
984
985 Align BoundaryAlignment = BF.getAlignment();
986 uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment)
987 ? offsetToAlignment(AlignedOffset, BoundaryAlignment)
988 : 0U;
989 if (NewSize == BF.getSize())
990 return false;
991 BF.setSize(NewSize);
992 return true;
993 }
994
relaxDwarfLineAddr(MCDwarfLineAddrFragment & DF)995 bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) {
996 bool WasRelaxed;
997 if (getBackend().relaxDwarfLineAddr(DF, WasRelaxed))
998 return WasRelaxed;
999
1000 MCContext &Context = getContext();
1001 auto OldSize = DF.getContents().size();
1002 int64_t AddrDelta;
1003 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this);
1004 assert(Abs && "We created a line delta with an invalid expression");
1005 (void)Abs;
1006 int64_t LineDelta;
1007 LineDelta = DF.getLineDelta();
1008 SmallVector<char, 8> Data;
1009
1010 MCDwarfLineAddr::encode(Context, getDWARFLinetableParams(), LineDelta,
1011 AddrDelta, Data);
1012 DF.setContents(Data);
1013 DF.clearFixups();
1014 return OldSize != Data.size();
1015 }
1016
relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment & DF)1017 bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) {
1018 bool WasRelaxed;
1019 if (getBackend().relaxDwarfCFA(DF, WasRelaxed))
1020 return WasRelaxed;
1021
1022 MCContext &Context = getContext();
1023 int64_t Value;
1024 bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Value, *this);
1025 if (!Abs) {
1026 reportError(DF.getAddrDelta().getLoc(),
1027 "invalid CFI advance_loc expression");
1028 DF.setAddrDelta(MCConstantExpr::create(0, Context));
1029 return false;
1030 }
1031
1032 auto OldSize = DF.getContents().size();
1033 SmallVector<char, 8> Data;
1034 MCDwarfFrameEmitter::encodeAdvanceLoc(Context, Value, Data);
1035 DF.setContents(Data);
1036 DF.clearFixups();
1037 return OldSize != Data.size();
1038 }
1039
relaxCVInlineLineTable(MCCVInlineLineTableFragment & F)1040 bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) {
1041 unsigned OldSize = F.getContents().size();
1042 getContext().getCVContext().encodeInlineLineTable(*this, F);
1043 return OldSize != F.getContents().size();
1044 }
1045
relaxCVDefRange(MCCVDefRangeFragment & F)1046 bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) {
1047 unsigned OldSize = F.getContents().size();
1048 getContext().getCVContext().encodeDefRange(*this, F);
1049 return OldSize != F.getContents().size();
1050 }
1051
relaxFill(MCFillFragment & F)1052 bool MCAssembler::relaxFill(MCFillFragment &F) {
1053 uint64_t Size = computeFragmentSize(F);
1054 if (F.getSize() == Size)
1055 return false;
1056 F.setSize(Size);
1057 return true;
1058 }
1059
relaxPseudoProbeAddr(MCPseudoProbeAddrFragment & PF)1060 bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) {
1061 uint64_t OldSize = PF.getContents().size();
1062 int64_t AddrDelta;
1063 bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this);
1064 assert(Abs && "We created a pseudo probe with an invalid expression");
1065 (void)Abs;
1066 SmallVector<char, 8> Data;
1067 raw_svector_ostream OSE(Data);
1068
1069 // AddrDelta is a signed integer
1070 encodeSLEB128(AddrDelta, OSE, OldSize);
1071 PF.setContents(Data);
1072 PF.clearFixups();
1073 return OldSize != Data.size();
1074 }
1075
relaxFragment(MCFragment & F)1076 bool MCAssembler::relaxFragment(MCFragment &F) {
1077 switch(F.getKind()) {
1078 default:
1079 return false;
1080 case MCFragment::FT_Relaxable:
1081 assert(!getRelaxAll() &&
1082 "Did not expect a MCRelaxableFragment in RelaxAll mode");
1083 return relaxInstruction(cast<MCRelaxableFragment>(F));
1084 case MCFragment::FT_Dwarf:
1085 return relaxDwarfLineAddr(cast<MCDwarfLineAddrFragment>(F));
1086 case MCFragment::FT_DwarfFrame:
1087 return relaxDwarfCallFrameFragment(cast<MCDwarfCallFrameFragment>(F));
1088 case MCFragment::FT_LEB:
1089 return relaxLEB(cast<MCLEBFragment>(F));
1090 case MCFragment::FT_BoundaryAlign:
1091 return relaxBoundaryAlign(cast<MCBoundaryAlignFragment>(F));
1092 case MCFragment::FT_CVInlineLines:
1093 return relaxCVInlineLineTable(cast<MCCVInlineLineTableFragment>(F));
1094 case MCFragment::FT_CVDefRange:
1095 return relaxCVDefRange(cast<MCCVDefRangeFragment>(F));
1096 case MCFragment::FT_Fill:
1097 return relaxFill(cast<MCFillFragment>(F));
1098 case MCFragment::FT_PseudoProbe:
1099 return relaxPseudoProbeAddr(cast<MCPseudoProbeAddrFragment>(F));
1100 }
1101 }
1102
layoutSection(MCSection & Sec)1103 void MCAssembler::layoutSection(MCSection &Sec) {
1104 MCFragment *Prev = nullptr;
1105 uint64_t Offset = 0;
1106 for (MCFragment &F : Sec) {
1107 F.Offset = Offset;
1108 if (LLVM_UNLIKELY(isBundlingEnabled())) {
1109 if (F.hasInstructions()) {
1110 layoutBundle(Prev, &F);
1111 Offset = F.Offset;
1112 }
1113 Prev = &F;
1114 }
1115 Offset += computeFragmentSize(F);
1116 }
1117 }
1118
relaxOnce(unsigned FirstStable)1119 unsigned MCAssembler::relaxOnce(unsigned FirstStable) {
1120 ++stats::RelaxationSteps;
1121 PendingErrors.clear();
1122
1123 unsigned Res = 0;
1124 for (unsigned I = 0; I != FirstStable; ++I) {
1125 // Assume each iteration finalizes at least one extra fragment. If the
1126 // layout does not converge after N+1 iterations, bail out.
1127 auto &Sec = *Sections[I];
1128 auto MaxIter = Sec.curFragList()->Tail->getLayoutOrder() + 1;
1129 for (;;) {
1130 bool Changed = false;
1131 for (MCFragment &F : Sec)
1132 if (relaxFragment(F))
1133 Changed = true;
1134
1135 if (!Changed)
1136 break;
1137 // If any fragment changed size, it might impact the layout of subsequent
1138 // sections. Therefore, we must re-evaluate all sections.
1139 FirstStable = Sections.size();
1140 Res = I;
1141 if (--MaxIter == 0)
1142 break;
1143 layoutSection(Sec);
1144 }
1145 }
1146 // The subsequent relaxOnce call only needs to visit Sections [0,Res) if no
1147 // change occurred.
1148 return Res;
1149 }
1150
reportError(SMLoc L,const Twine & Msg) const1151 void MCAssembler::reportError(SMLoc L, const Twine &Msg) const {
1152 getContext().reportError(L, Msg);
1153 }
1154
recordError(SMLoc Loc,const Twine & Msg) const1155 void MCAssembler::recordError(SMLoc Loc, const Twine &Msg) const {
1156 PendingErrors.emplace_back(Loc, Msg.str());
1157 }
1158
flushPendingErrors() const1159 void MCAssembler::flushPendingErrors() const {
1160 for (auto &Err : PendingErrors)
1161 reportError(Err.first, Err.second);
1162 PendingErrors.clear();
1163 }
1164
1165 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1166 LLVM_DUMP_METHOD void MCAssembler::dump() const{
1167 raw_ostream &OS = errs();
1168 DenseMap<const MCFragment *, SmallVector<const MCSymbol *, 0>> FragToSyms;
1169 // Scan symbols and build a map of fragments to their corresponding symbols.
1170 // For variable symbols, we don't want to call their getFragment, which might
1171 // modify `Fragment`.
1172 for (const MCSymbol &Sym : symbols())
1173 if (!Sym.isVariable())
1174 if (auto *F = Sym.getFragment())
1175 FragToSyms.try_emplace(F).first->second.push_back(&Sym);
1176
1177 OS << "Sections:[";
1178 for (const MCSection &Sec : *this) {
1179 OS << '\n';
1180 Sec.dump(&FragToSyms);
1181 }
1182 OS << "\n]\n";
1183 }
1184 #endif
1185
getLoc() const1186 SMLoc MCFixup::getLoc() const {
1187 if (auto *E = getValue())
1188 return E->getLoc();
1189 return {};
1190 }
1191