1 //===- MachineFunction.cpp ------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Collect native machine code information for a function. This allows 10 // target-specific information about the generated code to be stored with each 11 // function. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallString.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringRef.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/Analysis/ConstantFolding.h" 25 #include "llvm/Analysis/EHPersonalities.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineConstantPool.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineInstr.h" 30 #include "llvm/CodeGen/MachineJumpTableInfo.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/PseudoSourceValue.h" 35 #include "llvm/CodeGen/TargetFrameLowering.h" 36 #include "llvm/CodeGen/TargetInstrInfo.h" 37 #include "llvm/CodeGen/TargetLowering.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/CodeGen/WasmEHFuncInfo.h" 41 #include "llvm/CodeGen/WinEHFuncInfo.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/DataLayout.h" 47 #include "llvm/IR/DebugInfoMetadata.h" 48 #include "llvm/IR/DerivedTypes.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/GlobalValue.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Module.h" 55 #include "llvm/IR/ModuleSlotTracker.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/MC/MCContext.h" 58 #include "llvm/MC/MCSymbol.h" 59 #include "llvm/MC/SectionKind.h" 60 #include "llvm/Support/Casting.h" 61 #include "llvm/Support/CommandLine.h" 62 #include "llvm/Support/Compiler.h" 63 #include "llvm/Support/DOTGraphTraits.h" 64 #include "llvm/Support/Debug.h" 65 #include "llvm/Support/ErrorHandling.h" 66 #include "llvm/Support/GraphWriter.h" 67 #include "llvm/Support/raw_ostream.h" 68 #include "llvm/Target/TargetMachine.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <string> 75 #include <type_traits> 76 #include <utility> 77 #include <vector> 78 79 #include "LiveDebugValues/LiveDebugValues.h" 80 81 using namespace llvm; 82 83 #define DEBUG_TYPE "codegen" 84 85 static cl::opt<unsigned> AlignAllFunctions( 86 "align-all-functions", 87 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " 88 "means align on 16B boundaries)."), 89 cl::init(0), cl::Hidden); 90 91 static const char *getPropertyName(MachineFunctionProperties::Property Prop) { 92 using P = MachineFunctionProperties::Property; 93 94 // clang-format off 95 switch(Prop) { 96 case P::FailedISel: return "FailedISel"; 97 case P::IsSSA: return "IsSSA"; 98 case P::Legalized: return "Legalized"; 99 case P::NoPHIs: return "NoPHIs"; 100 case P::NoVRegs: return "NoVRegs"; 101 case P::RegBankSelected: return "RegBankSelected"; 102 case P::Selected: return "Selected"; 103 case P::TracksLiveness: return "TracksLiveness"; 104 case P::TiedOpsRewritten: return "TiedOpsRewritten"; 105 case P::FailsVerification: return "FailsVerification"; 106 case P::TracksDebugUserValues: return "TracksDebugUserValues"; 107 } 108 // clang-format on 109 llvm_unreachable("Invalid machine function property"); 110 } 111 112 // Pin the vtable to this file. 113 void MachineFunction::Delegate::anchor() {} 114 115 void MachineFunctionProperties::print(raw_ostream &OS) const { 116 const char *Separator = ""; 117 for (BitVector::size_type I = 0; I < Properties.size(); ++I) { 118 if (!Properties[I]) 119 continue; 120 OS << Separator << getPropertyName(static_cast<Property>(I)); 121 Separator = ", "; 122 } 123 } 124 125 //===----------------------------------------------------------------------===// 126 // MachineFunction implementation 127 //===----------------------------------------------------------------------===// 128 129 // Out-of-line virtual method. 130 MachineFunctionInfo::~MachineFunctionInfo() = default; 131 132 void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) { 133 MBB->getParent()->deleteMachineBasicBlock(MBB); 134 } 135 136 static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI, 137 const Function &F) { 138 if (auto MA = F.getFnStackAlign()) 139 return MA->value(); 140 return STI->getFrameLowering()->getStackAlign().value(); 141 } 142 143 MachineFunction::MachineFunction(Function &F, const LLVMTargetMachine &Target, 144 const TargetSubtargetInfo &STI, 145 unsigned FunctionNum, MachineModuleInfo &mmi) 146 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) { 147 FunctionNumber = FunctionNum; 148 init(); 149 } 150 151 void MachineFunction::handleInsertion(MachineInstr &MI) { 152 if (TheDelegate) 153 TheDelegate->MF_HandleInsertion(MI); 154 } 155 156 void MachineFunction::handleRemoval(MachineInstr &MI) { 157 if (TheDelegate) 158 TheDelegate->MF_HandleRemoval(MI); 159 } 160 161 void MachineFunction::init() { 162 // Assume the function starts in SSA form with correct liveness. 163 Properties.set(MachineFunctionProperties::Property::IsSSA); 164 Properties.set(MachineFunctionProperties::Property::TracksLiveness); 165 if (STI->getRegisterInfo()) 166 RegInfo = new (Allocator) MachineRegisterInfo(this); 167 else 168 RegInfo = nullptr; 169 170 MFInfo = nullptr; 171 // We can realign the stack if the target supports it and the user hasn't 172 // explicitly asked us not to. 173 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() && 174 !F.hasFnAttribute("no-realign-stack"); 175 FrameInfo = new (Allocator) MachineFrameInfo( 176 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP, 177 /*ForcedRealign=*/CanRealignSP && 178 F.hasFnAttribute(Attribute::StackAlignment)); 179 180 if (F.hasFnAttribute(Attribute::StackAlignment)) 181 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign()); 182 183 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout()); 184 Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); 185 186 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. 187 // FIXME: Use Function::hasOptSize(). 188 if (!F.hasFnAttribute(Attribute::OptimizeForSize)) 189 Alignment = std::max(Alignment, 190 STI->getTargetLowering()->getPrefFunctionAlignment()); 191 192 if (AlignAllFunctions) 193 Alignment = Align(1ULL << AlignAllFunctions); 194 195 JumpTableInfo = nullptr; 196 197 if (isFuncletEHPersonality(classifyEHPersonality( 198 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) { 199 WinEHInfo = new (Allocator) WinEHFuncInfo(); 200 } 201 202 if (isScopedEHPersonality(classifyEHPersonality( 203 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) { 204 WasmEHInfo = new (Allocator) WasmEHFuncInfo(); 205 } 206 207 assert(Target.isCompatibleDataLayout(getDataLayout()) && 208 "Can't create a MachineFunction using a Module with a " 209 "Target-incompatible DataLayout attached\n"); 210 211 PSVManager = 212 std::make_unique<PseudoSourceValueManager>(*(getSubtarget(). 213 getInstrInfo())); 214 } 215 216 MachineFunction::~MachineFunction() { 217 clear(); 218 } 219 220 void MachineFunction::clear() { 221 Properties.reset(); 222 // Don't call destructors on MachineInstr and MachineOperand. All of their 223 // memory comes from the BumpPtrAllocator which is about to be purged. 224 // 225 // Do call MachineBasicBlock destructors, it contains std::vectors. 226 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I)) 227 I->Insts.clearAndLeakNodesUnsafely(); 228 MBBNumbering.clear(); 229 230 InstructionRecycler.clear(Allocator); 231 OperandRecycler.clear(Allocator); 232 BasicBlockRecycler.clear(Allocator); 233 CodeViewAnnotations.clear(); 234 VariableDbgInfos.clear(); 235 if (RegInfo) { 236 RegInfo->~MachineRegisterInfo(); 237 Allocator.Deallocate(RegInfo); 238 } 239 if (MFInfo) { 240 MFInfo->~MachineFunctionInfo(); 241 Allocator.Deallocate(MFInfo); 242 } 243 244 FrameInfo->~MachineFrameInfo(); 245 Allocator.Deallocate(FrameInfo); 246 247 ConstantPool->~MachineConstantPool(); 248 Allocator.Deallocate(ConstantPool); 249 250 if (JumpTableInfo) { 251 JumpTableInfo->~MachineJumpTableInfo(); 252 Allocator.Deallocate(JumpTableInfo); 253 } 254 255 if (WinEHInfo) { 256 WinEHInfo->~WinEHFuncInfo(); 257 Allocator.Deallocate(WinEHInfo); 258 } 259 260 if (WasmEHInfo) { 261 WasmEHInfo->~WasmEHFuncInfo(); 262 Allocator.Deallocate(WasmEHInfo); 263 } 264 } 265 266 const DataLayout &MachineFunction::getDataLayout() const { 267 return F.getParent()->getDataLayout(); 268 } 269 270 /// Get the JumpTableInfo for this function. 271 /// If it does not already exist, allocate one. 272 MachineJumpTableInfo *MachineFunction:: 273 getOrCreateJumpTableInfo(unsigned EntryKind) { 274 if (JumpTableInfo) return JumpTableInfo; 275 276 JumpTableInfo = new (Allocator) 277 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind); 278 return JumpTableInfo; 279 } 280 281 DenormalMode MachineFunction::getDenormalMode(const fltSemantics &FPType) const { 282 return F.getDenormalMode(FPType); 283 } 284 285 /// Should we be emitting segmented stack stuff for the function 286 bool MachineFunction::shouldSplitStack() const { 287 return getFunction().hasFnAttribute("split-stack"); 288 } 289 290 LLVM_NODISCARD unsigned 291 MachineFunction::addFrameInst(const MCCFIInstruction &Inst) { 292 FrameInstructions.push_back(Inst); 293 return FrameInstructions.size() - 1; 294 } 295 296 /// This discards all of the MachineBasicBlock numbers and recomputes them. 297 /// This guarantees that the MBB numbers are sequential, dense, and match the 298 /// ordering of the blocks within the function. If a specific MachineBasicBlock 299 /// is specified, only that block and those after it are renumbered. 300 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) { 301 if (empty()) { MBBNumbering.clear(); return; } 302 MachineFunction::iterator MBBI, E = end(); 303 if (MBB == nullptr) 304 MBBI = begin(); 305 else 306 MBBI = MBB->getIterator(); 307 308 // Figure out the block number this should have. 309 unsigned BlockNo = 0; 310 if (MBBI != begin()) 311 BlockNo = std::prev(MBBI)->getNumber() + 1; 312 313 for (; MBBI != E; ++MBBI, ++BlockNo) { 314 if (MBBI->getNumber() != (int)BlockNo) { 315 // Remove use of the old number. 316 if (MBBI->getNumber() != -1) { 317 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI && 318 "MBB number mismatch!"); 319 MBBNumbering[MBBI->getNumber()] = nullptr; 320 } 321 322 // If BlockNo is already taken, set that block's number to -1. 323 if (MBBNumbering[BlockNo]) 324 MBBNumbering[BlockNo]->setNumber(-1); 325 326 MBBNumbering[BlockNo] = &*MBBI; 327 MBBI->setNumber(BlockNo); 328 } 329 } 330 331 // Okay, all the blocks are renumbered. If we have compactified the block 332 // numbering, shrink MBBNumbering now. 333 assert(BlockNo <= MBBNumbering.size() && "Mismatch!"); 334 MBBNumbering.resize(BlockNo); 335 } 336 337 /// This method iterates over the basic blocks and assigns their IsBeginSection 338 /// and IsEndSection fields. This must be called after MBB layout is finalized 339 /// and the SectionID's are assigned to MBBs. 340 void MachineFunction::assignBeginEndSections() { 341 front().setIsBeginSection(); 342 auto CurrentSectionID = front().getSectionID(); 343 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) { 344 if (MBBI->getSectionID() == CurrentSectionID) 345 continue; 346 MBBI->setIsBeginSection(); 347 std::prev(MBBI)->setIsEndSection(); 348 CurrentSectionID = MBBI->getSectionID(); 349 } 350 back().setIsEndSection(); 351 } 352 353 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'. 354 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID, 355 DebugLoc DL, 356 bool NoImplicit) { 357 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator)) 358 MachineInstr(*this, MCID, std::move(DL), NoImplicit); 359 } 360 361 /// Create a new MachineInstr which is a copy of the 'Orig' instruction, 362 /// identical in all ways except the instruction has no parent, prev, or next. 363 MachineInstr * 364 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) { 365 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator)) 366 MachineInstr(*this, *Orig); 367 } 368 369 MachineInstr &MachineFunction::cloneMachineInstrBundle( 370 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, 371 const MachineInstr &Orig) { 372 MachineInstr *FirstClone = nullptr; 373 MachineBasicBlock::const_instr_iterator I = Orig.getIterator(); 374 while (true) { 375 MachineInstr *Cloned = CloneMachineInstr(&*I); 376 MBB.insert(InsertBefore, Cloned); 377 if (FirstClone == nullptr) { 378 FirstClone = Cloned; 379 } else { 380 Cloned->bundleWithPred(); 381 } 382 383 if (!I->isBundledWithSucc()) 384 break; 385 ++I; 386 } 387 // Copy over call site info to the cloned instruction if needed. If Orig is in 388 // a bundle, copyCallSiteInfo takes care of finding the call instruction in 389 // the bundle. 390 if (Orig.shouldUpdateCallSiteInfo()) 391 copyCallSiteInfo(&Orig, FirstClone); 392 return *FirstClone; 393 } 394 395 /// Delete the given MachineInstr. 396 /// 397 /// This function also serves as the MachineInstr destructor - the real 398 /// ~MachineInstr() destructor must be empty. 399 void MachineFunction::deleteMachineInstr(MachineInstr *MI) { 400 // Verify that a call site info is at valid state. This assertion should 401 // be triggered during the implementation of support for the 402 // call site info of a new architecture. If the assertion is triggered, 403 // back trace will tell where to insert a call to updateCallSiteInfo(). 404 assert((!MI->isCandidateForCallSiteEntry() || 405 CallSitesInfo.find(MI) == CallSitesInfo.end()) && 406 "Call site info was not updated!"); 407 // Strip it for parts. The operand array and the MI object itself are 408 // independently recyclable. 409 if (MI->Operands) 410 deallocateOperandArray(MI->CapOperands, MI->Operands); 411 // Don't call ~MachineInstr() which must be trivial anyway because 412 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their 413 // destructors. 414 InstructionRecycler.Deallocate(Allocator, MI); 415 } 416 417 /// Allocate a new MachineBasicBlock. Use this instead of 418 /// `new MachineBasicBlock'. 419 MachineBasicBlock * 420 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) { 421 return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator)) 422 MachineBasicBlock(*this, bb); 423 } 424 425 /// Delete the given MachineBasicBlock. 426 void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) { 427 assert(MBB->getParent() == this && "MBB parent mismatch!"); 428 // Clean up any references to MBB in jump tables before deleting it. 429 if (JumpTableInfo) 430 JumpTableInfo->RemoveMBBFromJumpTables(MBB); 431 MBB->~MachineBasicBlock(); 432 BasicBlockRecycler.Deallocate(Allocator, MBB); 433 } 434 435 MachineMemOperand *MachineFunction::getMachineMemOperand( 436 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, 437 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, 438 SyncScope::ID SSID, AtomicOrdering Ordering, 439 AtomicOrdering FailureOrdering) { 440 return new (Allocator) 441 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges, 442 SSID, Ordering, FailureOrdering); 443 } 444 445 MachineMemOperand *MachineFunction::getMachineMemOperand( 446 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, 447 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, 448 SyncScope::ID SSID, AtomicOrdering Ordering, 449 AtomicOrdering FailureOrdering) { 450 return new (Allocator) 451 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID, 452 Ordering, FailureOrdering); 453 } 454 455 MachineMemOperand *MachineFunction::getMachineMemOperand( 456 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) { 457 return new (Allocator) 458 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), 459 AAMDNodes(), nullptr, MMO->getSyncScopeID(), 460 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 461 } 462 463 MachineMemOperand *MachineFunction::getMachineMemOperand( 464 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) { 465 return new (Allocator) 466 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(), 467 AAMDNodes(), nullptr, MMO->getSyncScopeID(), 468 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 469 } 470 471 MachineMemOperand * 472 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 473 int64_t Offset, LLT Ty) { 474 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo(); 475 476 // If there is no pointer value, the offset isn't tracked so we need to adjust 477 // the base alignment. 478 Align Alignment = PtrInfo.V.isNull() 479 ? commonAlignment(MMO->getBaseAlign(), Offset) 480 : MMO->getBaseAlign(); 481 482 // Do not preserve ranges, since we don't necessarily know what the high bits 483 // are anymore. 484 return new (Allocator) MachineMemOperand( 485 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment, 486 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(), 487 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 488 } 489 490 MachineMemOperand * 491 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 492 const AAMDNodes &AAInfo) { 493 MachinePointerInfo MPI = MMO->getValue() ? 494 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) : 495 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset()); 496 497 return new (Allocator) MachineMemOperand( 498 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo, 499 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(), 500 MMO->getFailureOrdering()); 501 } 502 503 MachineMemOperand * 504 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 505 MachineMemOperand::Flags Flags) { 506 return new (Allocator) MachineMemOperand( 507 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(), 508 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), 509 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 510 } 511 512 MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo( 513 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol, 514 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker) { 515 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol, 516 PostInstrSymbol, HeapAllocMarker); 517 } 518 519 const char *MachineFunction::createExternalSymbolName(StringRef Name) { 520 char *Dest = Allocator.Allocate<char>(Name.size() + 1); 521 llvm::copy(Name, Dest); 522 Dest[Name.size()] = 0; 523 return Dest; 524 } 525 526 uint32_t *MachineFunction::allocateRegMask() { 527 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs(); 528 unsigned Size = MachineOperand::getRegMaskSize(NumRegs); 529 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size); 530 memset(Mask, 0, Size * sizeof(Mask[0])); 531 return Mask; 532 } 533 534 ArrayRef<int> MachineFunction::allocateShuffleMask(ArrayRef<int> Mask) { 535 int* AllocMask = Allocator.Allocate<int>(Mask.size()); 536 copy(Mask, AllocMask); 537 return {AllocMask, Mask.size()}; 538 } 539 540 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 541 LLVM_DUMP_METHOD void MachineFunction::dump() const { 542 print(dbgs()); 543 } 544 #endif 545 546 StringRef MachineFunction::getName() const { 547 return getFunction().getName(); 548 } 549 550 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const { 551 OS << "# Machine code for function " << getName() << ": "; 552 getProperties().print(OS); 553 OS << '\n'; 554 555 // Print Frame Information 556 FrameInfo->print(*this, OS); 557 558 // Print JumpTable Information 559 if (JumpTableInfo) 560 JumpTableInfo->print(OS); 561 562 // Print Constant Pool 563 ConstantPool->print(OS); 564 565 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo(); 566 567 if (RegInfo && !RegInfo->livein_empty()) { 568 OS << "Function Live Ins: "; 569 for (MachineRegisterInfo::livein_iterator 570 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) { 571 OS << printReg(I->first, TRI); 572 if (I->second) 573 OS << " in " << printReg(I->second, TRI); 574 if (std::next(I) != E) 575 OS << ", "; 576 } 577 OS << '\n'; 578 } 579 580 ModuleSlotTracker MST(getFunction().getParent()); 581 MST.incorporateFunction(getFunction()); 582 for (const auto &BB : *this) { 583 OS << '\n'; 584 // If we print the whole function, print it at its most verbose level. 585 BB.print(OS, MST, Indexes, /*IsStandalone=*/true); 586 } 587 588 OS << "\n# End machine code for function " << getName() << ".\n\n"; 589 } 590 591 /// True if this function needs frame moves for debug or exceptions. 592 bool MachineFunction::needsFrameMoves() const { 593 return getMMI().hasDebugInfo() || 594 getTarget().Options.ForceDwarfFrameSection || 595 F.needsUnwindTableEntry(); 596 } 597 598 namespace llvm { 599 600 template<> 601 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits { 602 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 603 604 static std::string getGraphName(const MachineFunction *F) { 605 return ("CFG for '" + F->getName() + "' function").str(); 606 } 607 608 std::string getNodeLabel(const MachineBasicBlock *Node, 609 const MachineFunction *Graph) { 610 std::string OutStr; 611 { 612 raw_string_ostream OSS(OutStr); 613 614 if (isSimple()) { 615 OSS << printMBBReference(*Node); 616 if (const BasicBlock *BB = Node->getBasicBlock()) 617 OSS << ": " << BB->getName(); 618 } else 619 Node->print(OSS); 620 } 621 622 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin()); 623 624 // Process string output to make it nicer... 625 for (unsigned i = 0; i != OutStr.length(); ++i) 626 if (OutStr[i] == '\n') { // Left justify 627 OutStr[i] = '\\'; 628 OutStr.insert(OutStr.begin()+i+1, 'l'); 629 } 630 return OutStr; 631 } 632 }; 633 634 } // end namespace llvm 635 636 void MachineFunction::viewCFG() const 637 { 638 #ifndef NDEBUG 639 ViewGraph(this, "mf" + getName()); 640 #else 641 errs() << "MachineFunction::viewCFG is only available in debug builds on " 642 << "systems with Graphviz or gv!\n"; 643 #endif // NDEBUG 644 } 645 646 void MachineFunction::viewCFGOnly() const 647 { 648 #ifndef NDEBUG 649 ViewGraph(this, "mf" + getName(), true); 650 #else 651 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on " 652 << "systems with Graphviz or gv!\n"; 653 #endif // NDEBUG 654 } 655 656 /// Add the specified physical register as a live-in value and 657 /// create a corresponding virtual register for it. 658 Register MachineFunction::addLiveIn(MCRegister PReg, 659 const TargetRegisterClass *RC) { 660 MachineRegisterInfo &MRI = getRegInfo(); 661 Register VReg = MRI.getLiveInVirtReg(PReg); 662 if (VReg) { 663 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg); 664 (void)VRegRC; 665 // A physical register can be added several times. 666 // Between two calls, the register class of the related virtual register 667 // may have been constrained to match some operation constraints. 668 // In that case, check that the current register class includes the 669 // physical register and is a sub class of the specified RC. 670 assert((VRegRC == RC || (VRegRC->contains(PReg) && 671 RC->hasSubClassEq(VRegRC))) && 672 "Register class mismatch!"); 673 return VReg; 674 } 675 VReg = MRI.createVirtualRegister(RC); 676 MRI.addLiveIn(PReg, VReg); 677 return VReg; 678 } 679 680 /// Return the MCSymbol for the specified non-empty jump table. 681 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a 682 /// normal 'L' label is returned. 683 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx, 684 bool isLinkerPrivate) const { 685 const DataLayout &DL = getDataLayout(); 686 assert(JumpTableInfo && "No jump tables"); 687 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!"); 688 689 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix() 690 : DL.getPrivateGlobalPrefix(); 691 SmallString<60> Name; 692 raw_svector_ostream(Name) 693 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI; 694 return Ctx.getOrCreateSymbol(Name); 695 } 696 697 /// Return a function-local symbol to represent the PIC base. 698 MCSymbol *MachineFunction::getPICBaseSymbol() const { 699 const DataLayout &DL = getDataLayout(); 700 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) + 701 Twine(getFunctionNumber()) + "$pb"); 702 } 703 704 /// \name Exception Handling 705 /// \{ 706 707 LandingPadInfo & 708 MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) { 709 unsigned N = LandingPads.size(); 710 for (unsigned i = 0; i < N; ++i) { 711 LandingPadInfo &LP = LandingPads[i]; 712 if (LP.LandingPadBlock == LandingPad) 713 return LP; 714 } 715 716 LandingPads.push_back(LandingPadInfo(LandingPad)); 717 return LandingPads[N]; 718 } 719 720 void MachineFunction::addInvoke(MachineBasicBlock *LandingPad, 721 MCSymbol *BeginLabel, MCSymbol *EndLabel) { 722 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 723 LP.BeginLabels.push_back(BeginLabel); 724 LP.EndLabels.push_back(EndLabel); 725 } 726 727 MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) { 728 MCSymbol *LandingPadLabel = Ctx.createTempSymbol(); 729 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 730 LP.LandingPadLabel = LandingPadLabel; 731 732 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI(); 733 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) { 734 if (const auto *PF = 735 dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts())) 736 getMMI().addPersonality(PF); 737 738 if (LPI->isCleanup()) 739 addCleanup(LandingPad); 740 741 // FIXME: New EH - Add the clauses in reverse order. This isn't 100% 742 // correct, but we need to do it this way because of how the DWARF EH 743 // emitter processes the clauses. 744 for (unsigned I = LPI->getNumClauses(); I != 0; --I) { 745 Value *Val = LPI->getClause(I - 1); 746 if (LPI->isCatch(I - 1)) { 747 addCatchTypeInfo(LandingPad, 748 dyn_cast<GlobalValue>(Val->stripPointerCasts())); 749 } else { 750 // Add filters in a list. 751 auto *CVal = cast<Constant>(Val); 752 SmallVector<const GlobalValue *, 4> FilterList; 753 for (const Use &U : CVal->operands()) 754 FilterList.push_back(cast<GlobalValue>(U->stripPointerCasts())); 755 756 addFilterTypeInfo(LandingPad, FilterList); 757 } 758 } 759 760 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) { 761 for (unsigned I = CPI->getNumArgOperands(); I != 0; --I) { 762 Value *TypeInfo = CPI->getArgOperand(I - 1)->stripPointerCasts(); 763 addCatchTypeInfo(LandingPad, dyn_cast<GlobalValue>(TypeInfo)); 764 } 765 766 } else { 767 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!"); 768 } 769 770 return LandingPadLabel; 771 } 772 773 void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad, 774 ArrayRef<const GlobalValue *> TyInfo) { 775 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 776 for (const GlobalValue *GV : llvm::reverse(TyInfo)) 777 LP.TypeIds.push_back(getTypeIDFor(GV)); 778 } 779 780 void MachineFunction::addFilterTypeInfo(MachineBasicBlock *LandingPad, 781 ArrayRef<const GlobalValue *> TyInfo) { 782 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 783 std::vector<unsigned> IdsInFilter(TyInfo.size()); 784 for (unsigned I = 0, E = TyInfo.size(); I != E; ++I) 785 IdsInFilter[I] = getTypeIDFor(TyInfo[I]); 786 LP.TypeIds.push_back(getFilterIDFor(IdsInFilter)); 787 } 788 789 void MachineFunction::tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap, 790 bool TidyIfNoBeginLabels) { 791 for (unsigned i = 0; i != LandingPads.size(); ) { 792 LandingPadInfo &LandingPad = LandingPads[i]; 793 if (LandingPad.LandingPadLabel && 794 !LandingPad.LandingPadLabel->isDefined() && 795 (!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0)) 796 LandingPad.LandingPadLabel = nullptr; 797 798 // Special case: we *should* emit LPs with null LP MBB. This indicates 799 // "nounwind" case. 800 if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) { 801 LandingPads.erase(LandingPads.begin() + i); 802 continue; 803 } 804 805 if (TidyIfNoBeginLabels) { 806 for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) { 807 MCSymbol *BeginLabel = LandingPad.BeginLabels[j]; 808 MCSymbol *EndLabel = LandingPad.EndLabels[j]; 809 if ((BeginLabel->isDefined() || (LPMap && (*LPMap)[BeginLabel] != 0)) && 810 (EndLabel->isDefined() || (LPMap && (*LPMap)[EndLabel] != 0))) 811 continue; 812 813 LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j); 814 LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j); 815 --j; 816 --e; 817 } 818 819 // Remove landing pads with no try-ranges. 820 if (LandingPads[i].BeginLabels.empty()) { 821 LandingPads.erase(LandingPads.begin() + i); 822 continue; 823 } 824 } 825 826 // If there is no landing pad, ensure that the list of typeids is empty. 827 // If the only typeid is a cleanup, this is the same as having no typeids. 828 if (!LandingPad.LandingPadBlock || 829 (LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0])) 830 LandingPad.TypeIds.clear(); 831 ++i; 832 } 833 } 834 835 void MachineFunction::addCleanup(MachineBasicBlock *LandingPad) { 836 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 837 LP.TypeIds.push_back(0); 838 } 839 840 void MachineFunction::addSEHCatchHandler(MachineBasicBlock *LandingPad, 841 const Function *Filter, 842 const BlockAddress *RecoverBA) { 843 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 844 SEHHandler Handler; 845 Handler.FilterOrFinally = Filter; 846 Handler.RecoverBA = RecoverBA; 847 LP.SEHHandlers.push_back(Handler); 848 } 849 850 void MachineFunction::addSEHCleanupHandler(MachineBasicBlock *LandingPad, 851 const Function *Cleanup) { 852 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 853 SEHHandler Handler; 854 Handler.FilterOrFinally = Cleanup; 855 Handler.RecoverBA = nullptr; 856 LP.SEHHandlers.push_back(Handler); 857 } 858 859 void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym, 860 ArrayRef<unsigned> Sites) { 861 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end()); 862 } 863 864 unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) { 865 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i) 866 if (TypeInfos[i] == TI) return i + 1; 867 868 TypeInfos.push_back(TI); 869 return TypeInfos.size(); 870 } 871 872 int MachineFunction::getFilterIDFor(std::vector<unsigned> &TyIds) { 873 // If the new filter coincides with the tail of an existing filter, then 874 // re-use the existing filter. Folding filters more than this requires 875 // re-ordering filters and/or their elements - probably not worth it. 876 for (unsigned i : FilterEnds) { 877 unsigned j = TyIds.size(); 878 879 while (i && j) 880 if (FilterIds[--i] != TyIds[--j]) 881 goto try_next; 882 883 if (!j) 884 // The new filter coincides with range [i, end) of the existing filter. 885 return -(1 + i); 886 887 try_next:; 888 } 889 890 // Add the new filter. 891 int FilterID = -(1 + FilterIds.size()); 892 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1); 893 llvm::append_range(FilterIds, TyIds); 894 FilterEnds.push_back(FilterIds.size()); 895 FilterIds.push_back(0); // terminator 896 return FilterID; 897 } 898 899 MachineFunction::CallSiteInfoMap::iterator 900 MachineFunction::getCallSiteInfo(const MachineInstr *MI) { 901 assert(MI->isCandidateForCallSiteEntry() && 902 "Call site info refers only to call (MI) candidates"); 903 904 if (!Target.Options.EmitCallSiteInfo) 905 return CallSitesInfo.end(); 906 return CallSitesInfo.find(MI); 907 } 908 909 /// Return the call machine instruction or find a call within bundle. 910 static const MachineInstr *getCallInstr(const MachineInstr *MI) { 911 if (!MI->isBundle()) 912 return MI; 913 914 for (auto &BMI : make_range(getBundleStart(MI->getIterator()), 915 getBundleEnd(MI->getIterator()))) 916 if (BMI.isCandidateForCallSiteEntry()) 917 return &BMI; 918 919 llvm_unreachable("Unexpected bundle without a call site candidate"); 920 } 921 922 void MachineFunction::eraseCallSiteInfo(const MachineInstr *MI) { 923 assert(MI->shouldUpdateCallSiteInfo() && 924 "Call site info refers only to call (MI) candidates or " 925 "candidates inside bundles"); 926 927 const MachineInstr *CallMI = getCallInstr(MI); 928 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI); 929 if (CSIt == CallSitesInfo.end()) 930 return; 931 CallSitesInfo.erase(CSIt); 932 } 933 934 void MachineFunction::copyCallSiteInfo(const MachineInstr *Old, 935 const MachineInstr *New) { 936 assert(Old->shouldUpdateCallSiteInfo() && 937 "Call site info refers only to call (MI) candidates or " 938 "candidates inside bundles"); 939 940 if (!New->isCandidateForCallSiteEntry()) 941 return eraseCallSiteInfo(Old); 942 943 const MachineInstr *OldCallMI = getCallInstr(Old); 944 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI); 945 if (CSIt == CallSitesInfo.end()) 946 return; 947 948 CallSiteInfo CSInfo = CSIt->second; 949 CallSitesInfo[New] = CSInfo; 950 } 951 952 void MachineFunction::moveCallSiteInfo(const MachineInstr *Old, 953 const MachineInstr *New) { 954 assert(Old->shouldUpdateCallSiteInfo() && 955 "Call site info refers only to call (MI) candidates or " 956 "candidates inside bundles"); 957 958 if (!New->isCandidateForCallSiteEntry()) 959 return eraseCallSiteInfo(Old); 960 961 const MachineInstr *OldCallMI = getCallInstr(Old); 962 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI); 963 if (CSIt == CallSitesInfo.end()) 964 return; 965 966 CallSiteInfo CSInfo = std::move(CSIt->second); 967 CallSitesInfo.erase(CSIt); 968 CallSitesInfo[New] = CSInfo; 969 } 970 971 void MachineFunction::setDebugInstrNumberingCount(unsigned Num) { 972 DebugInstrNumberingCount = Num; 973 } 974 975 void MachineFunction::makeDebugValueSubstitution(DebugInstrOperandPair A, 976 DebugInstrOperandPair B, 977 unsigned Subreg) { 978 // Catch any accidental self-loops. 979 assert(A.first != B.first); 980 // Don't allow any substitutions _from_ the memory operand number. 981 assert(A.second != DebugOperandMemNumber); 982 983 DebugValueSubstitutions.push_back({A, B, Subreg}); 984 } 985 986 void MachineFunction::substituteDebugValuesForInst(const MachineInstr &Old, 987 MachineInstr &New, 988 unsigned MaxOperand) { 989 // If the Old instruction wasn't tracked at all, there is no work to do. 990 unsigned OldInstrNum = Old.peekDebugInstrNum(); 991 if (!OldInstrNum) 992 return; 993 994 // Iterate over all operands looking for defs to create substitutions for. 995 // Avoid creating new instr numbers unless we create a new substitution. 996 // While this has no functional effect, it risks confusing someone reading 997 // MIR output. 998 // Examine all the operands, or the first N specified by the caller. 999 MaxOperand = std::min(MaxOperand, Old.getNumOperands()); 1000 for (unsigned int I = 0; I < MaxOperand; ++I) { 1001 const auto &OldMO = Old.getOperand(I); 1002 auto &NewMO = New.getOperand(I); 1003 (void)NewMO; 1004 1005 if (!OldMO.isReg() || !OldMO.isDef()) 1006 continue; 1007 assert(NewMO.isDef()); 1008 1009 unsigned NewInstrNum = New.getDebugInstrNum(); 1010 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I), 1011 std::make_pair(NewInstrNum, I)); 1012 } 1013 } 1014 1015 auto MachineFunction::salvageCopySSA(MachineInstr &MI) 1016 -> DebugInstrOperandPair { 1017 MachineRegisterInfo &MRI = getRegInfo(); 1018 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 1019 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo(); 1020 1021 // Chase the value read by a copy-like instruction back to the instruction 1022 // that ultimately _defines_ that value. This may pass: 1023 // * Through multiple intermediate copies, including subregister moves / 1024 // copies, 1025 // * Copies from physical registers that must then be traced back to the 1026 // defining instruction, 1027 // * Or, physical registers may be live-in to (only) the entry block, which 1028 // requires a DBG_PHI to be created. 1029 // We can pursue this problem in that order: trace back through copies, 1030 // optionally through a physical register, to a defining instruction. We 1031 // should never move from physreg to vreg. As we're still in SSA form, no need 1032 // to worry about partial definitions of registers. 1033 1034 // Helper lambda to interpret a copy-like instruction. Takes instruction, 1035 // returns the register read and any subregister identifying which part is 1036 // read. 1037 auto GetRegAndSubreg = 1038 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> { 1039 Register NewReg, OldReg; 1040 unsigned SubReg; 1041 if (Cpy.isCopy()) { 1042 OldReg = Cpy.getOperand(0).getReg(); 1043 NewReg = Cpy.getOperand(1).getReg(); 1044 SubReg = Cpy.getOperand(1).getSubReg(); 1045 } else if (Cpy.isSubregToReg()) { 1046 OldReg = Cpy.getOperand(0).getReg(); 1047 NewReg = Cpy.getOperand(2).getReg(); 1048 SubReg = Cpy.getOperand(3).getImm(); 1049 } else { 1050 auto CopyDetails = *TII.isCopyInstr(Cpy); 1051 const MachineOperand &Src = *CopyDetails.Source; 1052 const MachineOperand &Dest = *CopyDetails.Destination; 1053 OldReg = Dest.getReg(); 1054 NewReg = Src.getReg(); 1055 SubReg = Src.getSubReg(); 1056 } 1057 1058 return {NewReg, SubReg}; 1059 }; 1060 1061 // First seek either the defining instruction, or a copy from a physreg. 1062 // During search, the current state is the current copy instruction, and which 1063 // register we've read. Accumulate qualifying subregisters into SubregsSeen; 1064 // deal with those later. 1065 auto State = GetRegAndSubreg(MI); 1066 auto CurInst = MI.getIterator(); 1067 SmallVector<unsigned, 4> SubregsSeen; 1068 while (true) { 1069 // If we've found a copy from a physreg, first portion of search is over. 1070 if (!State.first.isVirtual()) 1071 break; 1072 1073 // Record any subregister qualifier. 1074 if (State.second) 1075 SubregsSeen.push_back(State.second); 1076 1077 assert(MRI.hasOneDef(State.first)); 1078 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent(); 1079 CurInst = Inst.getIterator(); 1080 1081 // Any non-copy instruction is the defining instruction we're seeking. 1082 if (!Inst.isCopyLike() && !TII.isCopyInstr(Inst)) 1083 break; 1084 State = GetRegAndSubreg(Inst); 1085 }; 1086 1087 // Helper lambda to apply additional subregister substitutions to a known 1088 // instruction/operand pair. Adds new (fake) substitutions so that we can 1089 // record the subregister. FIXME: this isn't very space efficient if multiple 1090 // values are tracked back through the same copies; cache something later. 1091 auto ApplySubregisters = 1092 [&](DebugInstrOperandPair P) -> DebugInstrOperandPair { 1093 for (unsigned Subreg : reverse(SubregsSeen)) { 1094 // Fetch a new instruction number, not attached to an actual instruction. 1095 unsigned NewInstrNumber = getNewDebugInstrNum(); 1096 // Add a substitution from the "new" number to the known one, with a 1097 // qualifying subreg. 1098 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg); 1099 // Return the new number; to find the underlying value, consumers need to 1100 // deal with the qualifying subreg. 1101 P = {NewInstrNumber, 0}; 1102 } 1103 return P; 1104 }; 1105 1106 // If we managed to find the defining instruction after COPYs, return an 1107 // instruction / operand pair after adding subregister qualifiers. 1108 if (State.first.isVirtual()) { 1109 // Virtual register def -- we can just look up where this happens. 1110 MachineInstr *Inst = MRI.def_begin(State.first)->getParent(); 1111 for (auto &MO : Inst->operands()) { 1112 if (!MO.isReg() || !MO.isDef() || MO.getReg() != State.first) 1113 continue; 1114 return ApplySubregisters( 1115 {Inst->getDebugInstrNum(), Inst->getOperandNo(&MO)}); 1116 } 1117 1118 llvm_unreachable("Vreg def with no corresponding operand?"); 1119 } 1120 1121 // Our search ended in a copy from a physreg: walk back up the function 1122 // looking for whatever defines the physreg. 1123 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst)); 1124 State = GetRegAndSubreg(*CurInst); 1125 Register RegToSeek = State.first; 1126 1127 auto RMII = CurInst->getReverseIterator(); 1128 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend()); 1129 for (auto &ToExamine : PrevInstrs) { 1130 for (auto &MO : ToExamine.operands()) { 1131 // Test for operand that defines something aliasing RegToSeek. 1132 if (!MO.isReg() || !MO.isDef() || 1133 !TRI.regsOverlap(RegToSeek, MO.getReg())) 1134 continue; 1135 1136 return ApplySubregisters( 1137 {ToExamine.getDebugInstrNum(), ToExamine.getOperandNo(&MO)}); 1138 } 1139 } 1140 1141 MachineBasicBlock &InsertBB = *CurInst->getParent(); 1142 1143 // We reached the start of the block before finding a defining instruction. 1144 // It could be from a constant register, otherwise it must be an argument. 1145 if (TRI.isConstantPhysReg(State.first)) { 1146 // We can produce a DBG_PHI that identifies the constant physreg. Doesn't 1147 // matter where we put it, as it's constant valued. 1148 assert(CurInst->isCopy()); 1149 } else if (State.first == TRI.getFrameRegister(*this)) { 1150 // LLVM IR is allowed to read the framepointer by calling a 1151 // llvm.frameaddress.* intrinsic. We can support this by emitting a 1152 // DBG_PHI $fp. This isn't ideal, because it extends the behaviours / 1153 // position that DBG_PHIs appear at, limiting what can be done later. 1154 // TODO: see if there's a better way of expressing these variable 1155 // locations. 1156 ; 1157 } else { 1158 // Assert that this is the entry block, or an EH pad. If it isn't, then 1159 // there is some code construct we don't recognise that deals with physregs 1160 // across blocks. 1161 assert(!State.first.isVirtual()); 1162 assert(&*InsertBB.getParent()->begin() == &InsertBB || InsertBB.isEHPad()); 1163 } 1164 1165 // Create DBG_PHI for specified physreg. 1166 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(), 1167 TII.get(TargetOpcode::DBG_PHI)); 1168 Builder.addReg(State.first); 1169 unsigned NewNum = getNewDebugInstrNum(); 1170 Builder.addImm(NewNum); 1171 return ApplySubregisters({NewNum, 0u}); 1172 } 1173 1174 void MachineFunction::finalizeDebugInstrRefs() { 1175 auto *TII = getSubtarget().getInstrInfo(); 1176 1177 auto MakeUndefDbgValue = [&](MachineInstr &MI) { 1178 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE); 1179 MI.setDesc(RefII); 1180 MI.getOperand(0).setReg(0); 1181 MI.getOperand(1).ChangeToRegister(0, false); 1182 }; 1183 1184 for (auto &MBB : *this) { 1185 for (auto &MI : MBB) { 1186 if (!MI.isDebugRef() || !MI.getOperand(0).isReg()) 1187 continue; 1188 1189 Register Reg = MI.getOperand(0).getReg(); 1190 1191 // Some vregs can be deleted as redundant in the meantime. Mark those 1192 // as DBG_VALUE $noreg. Additionally, some normal instructions are 1193 // quickly deleted, leaving dangling references to vregs with no def. 1194 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) { 1195 MakeUndefDbgValue(MI); 1196 continue; 1197 } 1198 1199 assert(Reg.isVirtual()); 1200 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg); 1201 1202 // If we've found a copy-like instruction, follow it back to the 1203 // instruction that defines the source value, see salvageCopySSA docs 1204 // for why this is important. 1205 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) { 1206 auto Result = salvageCopySSA(DefMI); 1207 MI.getOperand(0).ChangeToImmediate(Result.first); 1208 MI.getOperand(1).setImm(Result.second); 1209 } else { 1210 // Otherwise, identify the operand number that the VReg refers to. 1211 unsigned OperandIdx = 0; 1212 for (const auto &MO : DefMI.operands()) { 1213 if (MO.isReg() && MO.isDef() && MO.getReg() == Reg) 1214 break; 1215 ++OperandIdx; 1216 } 1217 assert(OperandIdx < DefMI.getNumOperands()); 1218 1219 // Morph this instr ref to point at the given instruction and operand. 1220 unsigned ID = DefMI.getDebugInstrNum(); 1221 MI.getOperand(0).ChangeToImmediate(ID); 1222 MI.getOperand(1).setImm(OperandIdx); 1223 } 1224 } 1225 } 1226 } 1227 1228 bool MachineFunction::useDebugInstrRef() const { 1229 // Disable instr-ref at -O0: it's very slow (in compile time). We can still 1230 // have optimized code inlined into this unoptimized code, however with 1231 // fewer and less aggressive optimizations happening, coverage and accuracy 1232 // should not suffer. 1233 if (getTarget().getOptLevel() == CodeGenOpt::None) 1234 return false; 1235 1236 // Don't use instr-ref if this function is marked optnone. 1237 if (F.hasFnAttribute(Attribute::OptimizeNone)) 1238 return false; 1239 1240 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple())) 1241 return true; 1242 1243 return false; 1244 } 1245 1246 // Use one million as a high / reserved number. 1247 const unsigned MachineFunction::DebugOperandMemNumber = 1000000; 1248 1249 /// \} 1250 1251 //===----------------------------------------------------------------------===// 1252 // MachineJumpTableInfo implementation 1253 //===----------------------------------------------------------------------===// 1254 1255 /// Return the size of each entry in the jump table. 1256 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const { 1257 // The size of a jump table entry is 4 bytes unless the entry is just the 1258 // address of a block, in which case it is the pointer size. 1259 switch (getEntryKind()) { 1260 case MachineJumpTableInfo::EK_BlockAddress: 1261 return TD.getPointerSize(); 1262 case MachineJumpTableInfo::EK_GPRel64BlockAddress: 1263 return 8; 1264 case MachineJumpTableInfo::EK_GPRel32BlockAddress: 1265 case MachineJumpTableInfo::EK_LabelDifference32: 1266 case MachineJumpTableInfo::EK_Custom32: 1267 return 4; 1268 case MachineJumpTableInfo::EK_Inline: 1269 return 0; 1270 } 1271 llvm_unreachable("Unknown jump table encoding!"); 1272 } 1273 1274 /// Return the alignment of each entry in the jump table. 1275 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const { 1276 // The alignment of a jump table entry is the alignment of int32 unless the 1277 // entry is just the address of a block, in which case it is the pointer 1278 // alignment. 1279 switch (getEntryKind()) { 1280 case MachineJumpTableInfo::EK_BlockAddress: 1281 return TD.getPointerABIAlignment(0).value(); 1282 case MachineJumpTableInfo::EK_GPRel64BlockAddress: 1283 return TD.getABIIntegerTypeAlignment(64).value(); 1284 case MachineJumpTableInfo::EK_GPRel32BlockAddress: 1285 case MachineJumpTableInfo::EK_LabelDifference32: 1286 case MachineJumpTableInfo::EK_Custom32: 1287 return TD.getABIIntegerTypeAlignment(32).value(); 1288 case MachineJumpTableInfo::EK_Inline: 1289 return 1; 1290 } 1291 llvm_unreachable("Unknown jump table encoding!"); 1292 } 1293 1294 /// Create a new jump table entry in the jump table info. 1295 unsigned MachineJumpTableInfo::createJumpTableIndex( 1296 const std::vector<MachineBasicBlock*> &DestBBs) { 1297 assert(!DestBBs.empty() && "Cannot create an empty jump table!"); 1298 JumpTables.push_back(MachineJumpTableEntry(DestBBs)); 1299 return JumpTables.size()-1; 1300 } 1301 1302 /// If Old is the target of any jump tables, update the jump tables to branch 1303 /// to New instead. 1304 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old, 1305 MachineBasicBlock *New) { 1306 assert(Old != New && "Not making a change?"); 1307 bool MadeChange = false; 1308 for (size_t i = 0, e = JumpTables.size(); i != e; ++i) 1309 ReplaceMBBInJumpTable(i, Old, New); 1310 return MadeChange; 1311 } 1312 1313 /// If MBB is present in any jump tables, remove it. 1314 bool MachineJumpTableInfo::RemoveMBBFromJumpTables(MachineBasicBlock *MBB) { 1315 bool MadeChange = false; 1316 for (MachineJumpTableEntry &JTE : JumpTables) { 1317 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB); 1318 MadeChange |= (removeBeginItr != JTE.MBBs.end()); 1319 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end()); 1320 } 1321 return MadeChange; 1322 } 1323 1324 /// If Old is a target of the jump tables, update the jump table to branch to 1325 /// New instead. 1326 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx, 1327 MachineBasicBlock *Old, 1328 MachineBasicBlock *New) { 1329 assert(Old != New && "Not making a change?"); 1330 bool MadeChange = false; 1331 MachineJumpTableEntry &JTE = JumpTables[Idx]; 1332 for (MachineBasicBlock *&MBB : JTE.MBBs) 1333 if (MBB == Old) { 1334 MBB = New; 1335 MadeChange = true; 1336 } 1337 return MadeChange; 1338 } 1339 1340 void MachineJumpTableInfo::print(raw_ostream &OS) const { 1341 if (JumpTables.empty()) return; 1342 1343 OS << "Jump Tables:\n"; 1344 1345 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) { 1346 OS << printJumpTableEntryReference(i) << ':'; 1347 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs) 1348 OS << ' ' << printMBBReference(*MBB); 1349 if (i != e) 1350 OS << '\n'; 1351 } 1352 1353 OS << '\n'; 1354 } 1355 1356 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1357 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); } 1358 #endif 1359 1360 Printable llvm::printJumpTableEntryReference(unsigned Idx) { 1361 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; }); 1362 } 1363 1364 //===----------------------------------------------------------------------===// 1365 // MachineConstantPool implementation 1366 //===----------------------------------------------------------------------===// 1367 1368 void MachineConstantPoolValue::anchor() {} 1369 1370 unsigned MachineConstantPoolValue::getSizeInBytes(const DataLayout &DL) const { 1371 return DL.getTypeAllocSize(Ty); 1372 } 1373 1374 unsigned MachineConstantPoolEntry::getSizeInBytes(const DataLayout &DL) const { 1375 if (isMachineConstantPoolEntry()) 1376 return Val.MachineCPVal->getSizeInBytes(DL); 1377 return DL.getTypeAllocSize(Val.ConstVal->getType()); 1378 } 1379 1380 bool MachineConstantPoolEntry::needsRelocation() const { 1381 if (isMachineConstantPoolEntry()) 1382 return true; 1383 return Val.ConstVal->needsDynamicRelocation(); 1384 } 1385 1386 SectionKind 1387 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const { 1388 if (needsRelocation()) 1389 return SectionKind::getReadOnlyWithRel(); 1390 switch (getSizeInBytes(*DL)) { 1391 case 4: 1392 return SectionKind::getMergeableConst4(); 1393 case 8: 1394 return SectionKind::getMergeableConst8(); 1395 case 16: 1396 return SectionKind::getMergeableConst16(); 1397 case 32: 1398 return SectionKind::getMergeableConst32(); 1399 default: 1400 return SectionKind::getReadOnly(); 1401 } 1402 } 1403 1404 MachineConstantPool::~MachineConstantPool() { 1405 // A constant may be a member of both Constants and MachineCPVsSharingEntries, 1406 // so keep track of which we've deleted to avoid double deletions. 1407 DenseSet<MachineConstantPoolValue*> Deleted; 1408 for (const MachineConstantPoolEntry &C : Constants) 1409 if (C.isMachineConstantPoolEntry()) { 1410 Deleted.insert(C.Val.MachineCPVal); 1411 delete C.Val.MachineCPVal; 1412 } 1413 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) { 1414 if (Deleted.count(CPV) == 0) 1415 delete CPV; 1416 } 1417 } 1418 1419 /// Test whether the given two constants can be allocated the same constant pool 1420 /// entry. 1421 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, 1422 const DataLayout &DL) { 1423 // Handle the trivial case quickly. 1424 if (A == B) return true; 1425 1426 // If they have the same type but weren't the same constant, quickly 1427 // reject them. 1428 if (A->getType() == B->getType()) return false; 1429 1430 // We can't handle structs or arrays. 1431 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) || 1432 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType())) 1433 return false; 1434 1435 // For now, only support constants with the same size. 1436 uint64_t StoreSize = DL.getTypeStoreSize(A->getType()); 1437 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128) 1438 return false; 1439 1440 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8); 1441 1442 // Try constant folding a bitcast of both instructions to an integer. If we 1443 // get two identical ConstantInt's, then we are good to share them. We use 1444 // the constant folding APIs to do this so that we get the benefit of 1445 // DataLayout. 1446 if (isa<PointerType>(A->getType())) 1447 A = ConstantFoldCastOperand(Instruction::PtrToInt, 1448 const_cast<Constant *>(A), IntTy, DL); 1449 else if (A->getType() != IntTy) 1450 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A), 1451 IntTy, DL); 1452 if (isa<PointerType>(B->getType())) 1453 B = ConstantFoldCastOperand(Instruction::PtrToInt, 1454 const_cast<Constant *>(B), IntTy, DL); 1455 else if (B->getType() != IntTy) 1456 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B), 1457 IntTy, DL); 1458 1459 return A == B; 1460 } 1461 1462 /// Create a new entry in the constant pool or return an existing one. 1463 /// User must specify the log2 of the minimum required alignment for the object. 1464 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C, 1465 Align Alignment) { 1466 if (Alignment > PoolAlignment) PoolAlignment = Alignment; 1467 1468 // Check to see if we already have this constant. 1469 // 1470 // FIXME, this could be made much more efficient for large constant pools. 1471 for (unsigned i = 0, e = Constants.size(); i != e; ++i) 1472 if (!Constants[i].isMachineConstantPoolEntry() && 1473 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) { 1474 if (Constants[i].getAlign() < Alignment) 1475 Constants[i].Alignment = Alignment; 1476 return i; 1477 } 1478 1479 Constants.push_back(MachineConstantPoolEntry(C, Alignment)); 1480 return Constants.size()-1; 1481 } 1482 1483 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V, 1484 Align Alignment) { 1485 if (Alignment > PoolAlignment) PoolAlignment = Alignment; 1486 1487 // Check to see if we already have this constant. 1488 // 1489 // FIXME, this could be made much more efficient for large constant pools. 1490 int Idx = V->getExistingMachineCPValue(this, Alignment); 1491 if (Idx != -1) { 1492 MachineCPVsSharingEntries.insert(V); 1493 return (unsigned)Idx; 1494 } 1495 1496 Constants.push_back(MachineConstantPoolEntry(V, Alignment)); 1497 return Constants.size()-1; 1498 } 1499 1500 void MachineConstantPool::print(raw_ostream &OS) const { 1501 if (Constants.empty()) return; 1502 1503 OS << "Constant Pool:\n"; 1504 for (unsigned i = 0, e = Constants.size(); i != e; ++i) { 1505 OS << " cp#" << i << ": "; 1506 if (Constants[i].isMachineConstantPoolEntry()) 1507 Constants[i].Val.MachineCPVal->print(OS); 1508 else 1509 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false); 1510 OS << ", align=" << Constants[i].getAlign().value(); 1511 OS << "\n"; 1512 } 1513 } 1514 1515 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1516 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); } 1517 #endif 1518