1 //===- MachineFunction.cpp ------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Collect native machine code information for a function. This allows 10 // target-specific information about the generated code to be stored with each 11 // function. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallString.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringRef.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/Analysis/ConstantFolding.h" 25 #include "llvm/Analysis/ProfileSummaryInfo.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineConstantPool.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineInstr.h" 30 #include "llvm/CodeGen/MachineJumpTableInfo.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/PseudoSourceValue.h" 35 #include "llvm/CodeGen/TargetFrameLowering.h" 36 #include "llvm/CodeGen/TargetInstrInfo.h" 37 #include "llvm/CodeGen/TargetLowering.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/TargetSubtargetInfo.h" 40 #include "llvm/CodeGen/WasmEHFuncInfo.h" 41 #include "llvm/CodeGen/WinEHFuncInfo.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/Constant.h" 46 #include "llvm/IR/DataLayout.h" 47 #include "llvm/IR/DerivedTypes.h" 48 #include "llvm/IR/EHPersonalities.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/GlobalValue.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Module.h" 55 #include "llvm/IR/ModuleSlotTracker.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/MC/MCContext.h" 58 #include "llvm/MC/MCSymbol.h" 59 #include "llvm/MC/SectionKind.h" 60 #include "llvm/Support/Casting.h" 61 #include "llvm/Support/CommandLine.h" 62 #include "llvm/Support/Compiler.h" 63 #include "llvm/Support/DOTGraphTraits.h" 64 #include "llvm/Support/ErrorHandling.h" 65 #include "llvm/Support/GraphWriter.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Target/TargetMachine.h" 68 #include <algorithm> 69 #include <cassert> 70 #include <cstddef> 71 #include <cstdint> 72 #include <iterator> 73 #include <string> 74 #include <type_traits> 75 #include <utility> 76 #include <vector> 77 78 #include "LiveDebugValues/LiveDebugValues.h" 79 80 using namespace llvm; 81 82 #define DEBUG_TYPE "codegen" 83 84 static cl::opt<unsigned> AlignAllFunctions( 85 "align-all-functions", 86 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " 87 "means align on 16B boundaries)."), 88 cl::init(0), cl::Hidden); 89 90 static const char *getPropertyName(MachineFunctionProperties::Property Prop) { 91 using P = MachineFunctionProperties::Property; 92 93 // clang-format off 94 switch(Prop) { 95 case P::FailedISel: return "FailedISel"; 96 case P::IsSSA: return "IsSSA"; 97 case P::Legalized: return "Legalized"; 98 case P::NoPHIs: return "NoPHIs"; 99 case P::NoVRegs: return "NoVRegs"; 100 case P::RegBankSelected: return "RegBankSelected"; 101 case P::Selected: return "Selected"; 102 case P::TracksLiveness: return "TracksLiveness"; 103 case P::TiedOpsRewritten: return "TiedOpsRewritten"; 104 case P::FailsVerification: return "FailsVerification"; 105 case P::TracksDebugUserValues: return "TracksDebugUserValues"; 106 } 107 // clang-format on 108 llvm_unreachable("Invalid machine function property"); 109 } 110 111 void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo) { 112 if (!F.hasFnAttribute(Attribute::SafeStack)) 113 return; 114 115 auto *Existing = 116 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation)); 117 118 if (!Existing || Existing->getNumOperands() != 2) 119 return; 120 121 auto *MetadataName = "unsafe-stack-size"; 122 if (auto &N = Existing->getOperand(0)) { 123 if (N.equalsStr(MetadataName)) { 124 if (auto &Op = Existing->getOperand(1)) { 125 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue(); 126 FrameInfo.setUnsafeStackSize(Val); 127 } 128 } 129 } 130 } 131 132 // Pin the vtable to this file. 133 void MachineFunction::Delegate::anchor() {} 134 135 void MachineFunctionProperties::print(raw_ostream &OS) const { 136 const char *Separator = ""; 137 for (BitVector::size_type I = 0; I < Properties.size(); ++I) { 138 if (!Properties[I]) 139 continue; 140 OS << Separator << getPropertyName(static_cast<Property>(I)); 141 Separator = ", "; 142 } 143 } 144 145 //===----------------------------------------------------------------------===// 146 // MachineFunction implementation 147 //===----------------------------------------------------------------------===// 148 149 // Out-of-line virtual method. 150 MachineFunctionInfo::~MachineFunctionInfo() = default; 151 152 void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) { 153 MBB->getParent()->deleteMachineBasicBlock(MBB); 154 } 155 156 static inline Align getFnStackAlignment(const TargetSubtargetInfo *STI, 157 const Function &F) { 158 if (auto MA = F.getFnStackAlign()) 159 return *MA; 160 return STI->getFrameLowering()->getStackAlign(); 161 } 162 163 MachineFunction::MachineFunction(Function &F, const LLVMTargetMachine &Target, 164 const TargetSubtargetInfo &STI, 165 unsigned FunctionNum, MachineModuleInfo &mmi) 166 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) { 167 FunctionNumber = FunctionNum; 168 init(); 169 } 170 171 void MachineFunction::handleInsertion(MachineInstr &MI) { 172 if (TheDelegate) 173 TheDelegate->MF_HandleInsertion(MI); 174 } 175 176 void MachineFunction::handleRemoval(MachineInstr &MI) { 177 if (TheDelegate) 178 TheDelegate->MF_HandleRemoval(MI); 179 } 180 181 void MachineFunction::init() { 182 // Assume the function starts in SSA form with correct liveness. 183 Properties.set(MachineFunctionProperties::Property::IsSSA); 184 Properties.set(MachineFunctionProperties::Property::TracksLiveness); 185 if (STI->getRegisterInfo()) 186 RegInfo = new (Allocator) MachineRegisterInfo(this); 187 else 188 RegInfo = nullptr; 189 190 MFInfo = nullptr; 191 192 // We can realign the stack if the target supports it and the user hasn't 193 // explicitly asked us not to. 194 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() && 195 !F.hasFnAttribute("no-realign-stack"); 196 FrameInfo = new (Allocator) MachineFrameInfo( 197 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP, 198 /*ForcedRealign=*/CanRealignSP && 199 F.hasFnAttribute(Attribute::StackAlignment)); 200 201 setUnsafeStackSize(F, *FrameInfo); 202 203 if (F.hasFnAttribute(Attribute::StackAlignment)) 204 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign()); 205 206 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout()); 207 Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); 208 209 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. 210 // FIXME: Use Function::hasOptSize(). 211 if (!F.hasFnAttribute(Attribute::OptimizeForSize)) 212 Alignment = std::max(Alignment, 213 STI->getTargetLowering()->getPrefFunctionAlignment()); 214 215 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls 216 // to load a type hash before the function label. Ensure functions are aligned 217 // by a least 4 to avoid unaligned access, which is especially important for 218 // -mno-unaligned-access. 219 if (F.hasMetadata(LLVMContext::MD_func_sanitize) || 220 F.getMetadata(LLVMContext::MD_kcfi_type)) 221 Alignment = std::max(Alignment, Align(4)); 222 223 if (AlignAllFunctions) 224 Alignment = Align(1ULL << AlignAllFunctions); 225 226 JumpTableInfo = nullptr; 227 228 if (isFuncletEHPersonality(classifyEHPersonality( 229 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) { 230 WinEHInfo = new (Allocator) WinEHFuncInfo(); 231 } 232 233 if (isScopedEHPersonality(classifyEHPersonality( 234 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) { 235 WasmEHInfo = new (Allocator) WasmEHFuncInfo(); 236 } 237 238 assert(Target.isCompatibleDataLayout(getDataLayout()) && 239 "Can't create a MachineFunction using a Module with a " 240 "Target-incompatible DataLayout attached\n"); 241 242 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget()); 243 } 244 245 void MachineFunction::initTargetMachineFunctionInfo( 246 const TargetSubtargetInfo &STI) { 247 assert(!MFInfo && "MachineFunctionInfo already set"); 248 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI); 249 } 250 251 MachineFunction::~MachineFunction() { 252 clear(); 253 } 254 255 void MachineFunction::clear() { 256 Properties.reset(); 257 // Don't call destructors on MachineInstr and MachineOperand. All of their 258 // memory comes from the BumpPtrAllocator which is about to be purged. 259 // 260 // Do call MachineBasicBlock destructors, it contains std::vectors. 261 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I)) 262 I->Insts.clearAndLeakNodesUnsafely(); 263 MBBNumbering.clear(); 264 265 InstructionRecycler.clear(Allocator); 266 OperandRecycler.clear(Allocator); 267 BasicBlockRecycler.clear(Allocator); 268 CodeViewAnnotations.clear(); 269 VariableDbgInfos.clear(); 270 if (RegInfo) { 271 RegInfo->~MachineRegisterInfo(); 272 Allocator.Deallocate(RegInfo); 273 } 274 if (MFInfo) { 275 MFInfo->~MachineFunctionInfo(); 276 Allocator.Deallocate(MFInfo); 277 } 278 279 FrameInfo->~MachineFrameInfo(); 280 Allocator.Deallocate(FrameInfo); 281 282 ConstantPool->~MachineConstantPool(); 283 Allocator.Deallocate(ConstantPool); 284 285 if (JumpTableInfo) { 286 JumpTableInfo->~MachineJumpTableInfo(); 287 Allocator.Deallocate(JumpTableInfo); 288 } 289 290 if (WinEHInfo) { 291 WinEHInfo->~WinEHFuncInfo(); 292 Allocator.Deallocate(WinEHInfo); 293 } 294 295 if (WasmEHInfo) { 296 WasmEHInfo->~WasmEHFuncInfo(); 297 Allocator.Deallocate(WasmEHInfo); 298 } 299 } 300 301 const DataLayout &MachineFunction::getDataLayout() const { 302 return F.getParent()->getDataLayout(); 303 } 304 305 /// Get the JumpTableInfo for this function. 306 /// If it does not already exist, allocate one. 307 MachineJumpTableInfo *MachineFunction:: 308 getOrCreateJumpTableInfo(unsigned EntryKind) { 309 if (JumpTableInfo) return JumpTableInfo; 310 311 JumpTableInfo = new (Allocator) 312 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind); 313 return JumpTableInfo; 314 } 315 316 DenormalMode MachineFunction::getDenormalMode(const fltSemantics &FPType) const { 317 return F.getDenormalMode(FPType); 318 } 319 320 /// Should we be emitting segmented stack stuff for the function 321 bool MachineFunction::shouldSplitStack() const { 322 return getFunction().hasFnAttribute("split-stack"); 323 } 324 325 [[nodiscard]] unsigned 326 MachineFunction::addFrameInst(const MCCFIInstruction &Inst) { 327 FrameInstructions.push_back(Inst); 328 return FrameInstructions.size() - 1; 329 } 330 331 /// This discards all of the MachineBasicBlock numbers and recomputes them. 332 /// This guarantees that the MBB numbers are sequential, dense, and match the 333 /// ordering of the blocks within the function. If a specific MachineBasicBlock 334 /// is specified, only that block and those after it are renumbered. 335 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) { 336 if (empty()) { MBBNumbering.clear(); return; } 337 MachineFunction::iterator MBBI, E = end(); 338 if (MBB == nullptr) 339 MBBI = begin(); 340 else 341 MBBI = MBB->getIterator(); 342 343 // Figure out the block number this should have. 344 unsigned BlockNo = 0; 345 if (MBBI != begin()) 346 BlockNo = std::prev(MBBI)->getNumber() + 1; 347 348 for (; MBBI != E; ++MBBI, ++BlockNo) { 349 if (MBBI->getNumber() != (int)BlockNo) { 350 // Remove use of the old number. 351 if (MBBI->getNumber() != -1) { 352 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI && 353 "MBB number mismatch!"); 354 MBBNumbering[MBBI->getNumber()] = nullptr; 355 } 356 357 // If BlockNo is already taken, set that block's number to -1. 358 if (MBBNumbering[BlockNo]) 359 MBBNumbering[BlockNo]->setNumber(-1); 360 361 MBBNumbering[BlockNo] = &*MBBI; 362 MBBI->setNumber(BlockNo); 363 } 364 } 365 366 // Okay, all the blocks are renumbered. If we have compactified the block 367 // numbering, shrink MBBNumbering now. 368 assert(BlockNo <= MBBNumbering.size() && "Mismatch!"); 369 MBBNumbering.resize(BlockNo); 370 } 371 372 /// This method iterates over the basic blocks and assigns their IsBeginSection 373 /// and IsEndSection fields. This must be called after MBB layout is finalized 374 /// and the SectionID's are assigned to MBBs. 375 void MachineFunction::assignBeginEndSections() { 376 front().setIsBeginSection(); 377 auto CurrentSectionID = front().getSectionID(); 378 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) { 379 if (MBBI->getSectionID() == CurrentSectionID) 380 continue; 381 MBBI->setIsBeginSection(); 382 std::prev(MBBI)->setIsEndSection(); 383 CurrentSectionID = MBBI->getSectionID(); 384 } 385 back().setIsEndSection(); 386 } 387 388 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'. 389 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID, 390 DebugLoc DL, 391 bool NoImplicit) { 392 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator)) 393 MachineInstr(*this, MCID, std::move(DL), NoImplicit); 394 } 395 396 /// Create a new MachineInstr which is a copy of the 'Orig' instruction, 397 /// identical in all ways except the instruction has no parent, prev, or next. 398 MachineInstr * 399 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) { 400 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator)) 401 MachineInstr(*this, *Orig); 402 } 403 404 MachineInstr &MachineFunction::cloneMachineInstrBundle( 405 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, 406 const MachineInstr &Orig) { 407 MachineInstr *FirstClone = nullptr; 408 MachineBasicBlock::const_instr_iterator I = Orig.getIterator(); 409 while (true) { 410 MachineInstr *Cloned = CloneMachineInstr(&*I); 411 MBB.insert(InsertBefore, Cloned); 412 if (FirstClone == nullptr) { 413 FirstClone = Cloned; 414 } else { 415 Cloned->bundleWithPred(); 416 } 417 418 if (!I->isBundledWithSucc()) 419 break; 420 ++I; 421 } 422 // Copy over call site info to the cloned instruction if needed. If Orig is in 423 // a bundle, copyCallSiteInfo takes care of finding the call instruction in 424 // the bundle. 425 if (Orig.shouldUpdateCallSiteInfo()) 426 copyCallSiteInfo(&Orig, FirstClone); 427 return *FirstClone; 428 } 429 430 /// Delete the given MachineInstr. 431 /// 432 /// This function also serves as the MachineInstr destructor - the real 433 /// ~MachineInstr() destructor must be empty. 434 void MachineFunction::deleteMachineInstr(MachineInstr *MI) { 435 // Verify that a call site info is at valid state. This assertion should 436 // be triggered during the implementation of support for the 437 // call site info of a new architecture. If the assertion is triggered, 438 // back trace will tell where to insert a call to updateCallSiteInfo(). 439 assert((!MI->isCandidateForCallSiteEntry() || !CallSitesInfo.contains(MI)) && 440 "Call site info was not updated!"); 441 // Strip it for parts. The operand array and the MI object itself are 442 // independently recyclable. 443 if (MI->Operands) 444 deallocateOperandArray(MI->CapOperands, MI->Operands); 445 // Don't call ~MachineInstr() which must be trivial anyway because 446 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their 447 // destructors. 448 InstructionRecycler.Deallocate(Allocator, MI); 449 } 450 451 /// Allocate a new MachineBasicBlock. Use this instead of 452 /// `new MachineBasicBlock'. 453 MachineBasicBlock * 454 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) { 455 MachineBasicBlock *MBB = 456 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator)) 457 MachineBasicBlock(*this, bb); 458 // Set BBID for `-basic-block=sections=labels` and 459 // `-basic-block-sections=list` to allow robust mapping of profiles to basic 460 // blocks. 461 if (Target.getBBSectionsType() == BasicBlockSection::Labels || 462 Target.getBBSectionsType() == BasicBlockSection::List) 463 MBB->setBBID(NextBBID++); 464 return MBB; 465 } 466 467 /// Delete the given MachineBasicBlock. 468 void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) { 469 assert(MBB->getParent() == this && "MBB parent mismatch!"); 470 // Clean up any references to MBB in jump tables before deleting it. 471 if (JumpTableInfo) 472 JumpTableInfo->RemoveMBBFromJumpTables(MBB); 473 MBB->~MachineBasicBlock(); 474 BasicBlockRecycler.Deallocate(Allocator, MBB); 475 } 476 477 MachineMemOperand *MachineFunction::getMachineMemOperand( 478 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, 479 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, 480 SyncScope::ID SSID, AtomicOrdering Ordering, 481 AtomicOrdering FailureOrdering) { 482 return new (Allocator) 483 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges, 484 SSID, Ordering, FailureOrdering); 485 } 486 487 MachineMemOperand *MachineFunction::getMachineMemOperand( 488 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, 489 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, 490 SyncScope::ID SSID, AtomicOrdering Ordering, 491 AtomicOrdering FailureOrdering) { 492 return new (Allocator) 493 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID, 494 Ordering, FailureOrdering); 495 } 496 497 MachineMemOperand *MachineFunction::getMachineMemOperand( 498 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) { 499 return new (Allocator) 500 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), 501 AAMDNodes(), nullptr, MMO->getSyncScopeID(), 502 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 503 } 504 505 MachineMemOperand *MachineFunction::getMachineMemOperand( 506 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) { 507 return new (Allocator) 508 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(), 509 AAMDNodes(), nullptr, MMO->getSyncScopeID(), 510 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 511 } 512 513 MachineMemOperand * 514 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 515 int64_t Offset, LLT Ty) { 516 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo(); 517 518 // If there is no pointer value, the offset isn't tracked so we need to adjust 519 // the base alignment. 520 Align Alignment = PtrInfo.V.isNull() 521 ? commonAlignment(MMO->getBaseAlign(), Offset) 522 : MMO->getBaseAlign(); 523 524 // Do not preserve ranges, since we don't necessarily know what the high bits 525 // are anymore. 526 return new (Allocator) MachineMemOperand( 527 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment, 528 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(), 529 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 530 } 531 532 MachineMemOperand * 533 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 534 const AAMDNodes &AAInfo) { 535 MachinePointerInfo MPI = MMO->getValue() ? 536 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) : 537 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset()); 538 539 return new (Allocator) MachineMemOperand( 540 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo, 541 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(), 542 MMO->getFailureOrdering()); 543 } 544 545 MachineMemOperand * 546 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 547 MachineMemOperand::Flags Flags) { 548 return new (Allocator) MachineMemOperand( 549 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(), 550 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), 551 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 552 } 553 554 MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo( 555 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol, 556 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, 557 uint32_t CFIType) { 558 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol, 559 PostInstrSymbol, HeapAllocMarker, 560 PCSections, CFIType); 561 } 562 563 const char *MachineFunction::createExternalSymbolName(StringRef Name) { 564 char *Dest = Allocator.Allocate<char>(Name.size() + 1); 565 llvm::copy(Name, Dest); 566 Dest[Name.size()] = 0; 567 return Dest; 568 } 569 570 uint32_t *MachineFunction::allocateRegMask() { 571 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs(); 572 unsigned Size = MachineOperand::getRegMaskSize(NumRegs); 573 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size); 574 memset(Mask, 0, Size * sizeof(Mask[0])); 575 return Mask; 576 } 577 578 ArrayRef<int> MachineFunction::allocateShuffleMask(ArrayRef<int> Mask) { 579 int* AllocMask = Allocator.Allocate<int>(Mask.size()); 580 copy(Mask, AllocMask); 581 return {AllocMask, Mask.size()}; 582 } 583 584 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 585 LLVM_DUMP_METHOD void MachineFunction::dump() const { 586 print(dbgs()); 587 } 588 #endif 589 590 StringRef MachineFunction::getName() const { 591 return getFunction().getName(); 592 } 593 594 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const { 595 OS << "# Machine code for function " << getName() << ": "; 596 getProperties().print(OS); 597 OS << '\n'; 598 599 // Print Frame Information 600 FrameInfo->print(*this, OS); 601 602 // Print JumpTable Information 603 if (JumpTableInfo) 604 JumpTableInfo->print(OS); 605 606 // Print Constant Pool 607 ConstantPool->print(OS); 608 609 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo(); 610 611 if (RegInfo && !RegInfo->livein_empty()) { 612 OS << "Function Live Ins: "; 613 for (MachineRegisterInfo::livein_iterator 614 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) { 615 OS << printReg(I->first, TRI); 616 if (I->second) 617 OS << " in " << printReg(I->second, TRI); 618 if (std::next(I) != E) 619 OS << ", "; 620 } 621 OS << '\n'; 622 } 623 624 ModuleSlotTracker MST(getFunction().getParent()); 625 MST.incorporateFunction(getFunction()); 626 for (const auto &BB : *this) { 627 OS << '\n'; 628 // If we print the whole function, print it at its most verbose level. 629 BB.print(OS, MST, Indexes, /*IsStandalone=*/true); 630 } 631 632 OS << "\n# End machine code for function " << getName() << ".\n\n"; 633 } 634 635 /// True if this function needs frame moves for debug or exceptions. 636 bool MachineFunction::needsFrameMoves() const { 637 return getMMI().hasDebugInfo() || 638 getTarget().Options.ForceDwarfFrameSection || 639 F.needsUnwindTableEntry(); 640 } 641 642 namespace llvm { 643 644 template<> 645 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits { 646 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 647 648 static std::string getGraphName(const MachineFunction *F) { 649 return ("CFG for '" + F->getName() + "' function").str(); 650 } 651 652 std::string getNodeLabel(const MachineBasicBlock *Node, 653 const MachineFunction *Graph) { 654 std::string OutStr; 655 { 656 raw_string_ostream OSS(OutStr); 657 658 if (isSimple()) { 659 OSS << printMBBReference(*Node); 660 if (const BasicBlock *BB = Node->getBasicBlock()) 661 OSS << ": " << BB->getName(); 662 } else 663 Node->print(OSS); 664 } 665 666 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin()); 667 668 // Process string output to make it nicer... 669 for (unsigned i = 0; i != OutStr.length(); ++i) 670 if (OutStr[i] == '\n') { // Left justify 671 OutStr[i] = '\\'; 672 OutStr.insert(OutStr.begin()+i+1, 'l'); 673 } 674 return OutStr; 675 } 676 }; 677 678 } // end namespace llvm 679 680 void MachineFunction::viewCFG() const 681 { 682 #ifndef NDEBUG 683 ViewGraph(this, "mf" + getName()); 684 #else 685 errs() << "MachineFunction::viewCFG is only available in debug builds on " 686 << "systems with Graphviz or gv!\n"; 687 #endif // NDEBUG 688 } 689 690 void MachineFunction::viewCFGOnly() const 691 { 692 #ifndef NDEBUG 693 ViewGraph(this, "mf" + getName(), true); 694 #else 695 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on " 696 << "systems with Graphviz or gv!\n"; 697 #endif // NDEBUG 698 } 699 700 /// Add the specified physical register as a live-in value and 701 /// create a corresponding virtual register for it. 702 Register MachineFunction::addLiveIn(MCRegister PReg, 703 const TargetRegisterClass *RC) { 704 MachineRegisterInfo &MRI = getRegInfo(); 705 Register VReg = MRI.getLiveInVirtReg(PReg); 706 if (VReg) { 707 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg); 708 (void)VRegRC; 709 // A physical register can be added several times. 710 // Between two calls, the register class of the related virtual register 711 // may have been constrained to match some operation constraints. 712 // In that case, check that the current register class includes the 713 // physical register and is a sub class of the specified RC. 714 assert((VRegRC == RC || (VRegRC->contains(PReg) && 715 RC->hasSubClassEq(VRegRC))) && 716 "Register class mismatch!"); 717 return VReg; 718 } 719 VReg = MRI.createVirtualRegister(RC); 720 MRI.addLiveIn(PReg, VReg); 721 return VReg; 722 } 723 724 /// Return the MCSymbol for the specified non-empty jump table. 725 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a 726 /// normal 'L' label is returned. 727 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx, 728 bool isLinkerPrivate) const { 729 const DataLayout &DL = getDataLayout(); 730 assert(JumpTableInfo && "No jump tables"); 731 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!"); 732 733 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix() 734 : DL.getPrivateGlobalPrefix(); 735 SmallString<60> Name; 736 raw_svector_ostream(Name) 737 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI; 738 return Ctx.getOrCreateSymbol(Name); 739 } 740 741 /// Return a function-local symbol to represent the PIC base. 742 MCSymbol *MachineFunction::getPICBaseSymbol() const { 743 const DataLayout &DL = getDataLayout(); 744 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) + 745 Twine(getFunctionNumber()) + "$pb"); 746 } 747 748 /// \name Exception Handling 749 /// \{ 750 751 LandingPadInfo & 752 MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) { 753 unsigned N = LandingPads.size(); 754 for (unsigned i = 0; i < N; ++i) { 755 LandingPadInfo &LP = LandingPads[i]; 756 if (LP.LandingPadBlock == LandingPad) 757 return LP; 758 } 759 760 LandingPads.push_back(LandingPadInfo(LandingPad)); 761 return LandingPads[N]; 762 } 763 764 void MachineFunction::addInvoke(MachineBasicBlock *LandingPad, 765 MCSymbol *BeginLabel, MCSymbol *EndLabel) { 766 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 767 LP.BeginLabels.push_back(BeginLabel); 768 LP.EndLabels.push_back(EndLabel); 769 } 770 771 MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) { 772 MCSymbol *LandingPadLabel = Ctx.createTempSymbol(); 773 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 774 LP.LandingPadLabel = LandingPadLabel; 775 776 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI(); 777 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) { 778 // If there's no typeid list specified, then "cleanup" is implicit. 779 // Otherwise, id 0 is reserved for the cleanup action. 780 if (LPI->isCleanup() && LPI->getNumClauses() != 0) 781 LP.TypeIds.push_back(0); 782 783 // FIXME: New EH - Add the clauses in reverse order. This isn't 100% 784 // correct, but we need to do it this way because of how the DWARF EH 785 // emitter processes the clauses. 786 for (unsigned I = LPI->getNumClauses(); I != 0; --I) { 787 Value *Val = LPI->getClause(I - 1); 788 if (LPI->isCatch(I - 1)) { 789 LP.TypeIds.push_back( 790 getTypeIDFor(dyn_cast<GlobalValue>(Val->stripPointerCasts()))); 791 } else { 792 // Add filters in a list. 793 auto *CVal = cast<Constant>(Val); 794 SmallVector<unsigned, 4> FilterList; 795 for (const Use &U : CVal->operands()) 796 FilterList.push_back( 797 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts()))); 798 799 LP.TypeIds.push_back(getFilterIDFor(FilterList)); 800 } 801 } 802 803 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) { 804 for (unsigned I = CPI->arg_size(); I != 0; --I) { 805 auto *TypeInfo = 806 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts()); 807 LP.TypeIds.push_back(getTypeIDFor(TypeInfo)); 808 } 809 810 } else { 811 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!"); 812 } 813 814 return LandingPadLabel; 815 } 816 817 void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym, 818 ArrayRef<unsigned> Sites) { 819 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end()); 820 } 821 822 unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) { 823 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i) 824 if (TypeInfos[i] == TI) return i + 1; 825 826 TypeInfos.push_back(TI); 827 return TypeInfos.size(); 828 } 829 830 int MachineFunction::getFilterIDFor(ArrayRef<unsigned> TyIds) { 831 // If the new filter coincides with the tail of an existing filter, then 832 // re-use the existing filter. Folding filters more than this requires 833 // re-ordering filters and/or their elements - probably not worth it. 834 for (unsigned i : FilterEnds) { 835 unsigned j = TyIds.size(); 836 837 while (i && j) 838 if (FilterIds[--i] != TyIds[--j]) 839 goto try_next; 840 841 if (!j) 842 // The new filter coincides with range [i, end) of the existing filter. 843 return -(1 + i); 844 845 try_next:; 846 } 847 848 // Add the new filter. 849 int FilterID = -(1 + FilterIds.size()); 850 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1); 851 llvm::append_range(FilterIds, TyIds); 852 FilterEnds.push_back(FilterIds.size()); 853 FilterIds.push_back(0); // terminator 854 return FilterID; 855 } 856 857 MachineFunction::CallSiteInfoMap::iterator 858 MachineFunction::getCallSiteInfo(const MachineInstr *MI) { 859 assert(MI->isCandidateForCallSiteEntry() && 860 "Call site info refers only to call (MI) candidates"); 861 862 if (!Target.Options.EmitCallSiteInfo) 863 return CallSitesInfo.end(); 864 return CallSitesInfo.find(MI); 865 } 866 867 /// Return the call machine instruction or find a call within bundle. 868 static const MachineInstr *getCallInstr(const MachineInstr *MI) { 869 if (!MI->isBundle()) 870 return MI; 871 872 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()), 873 getBundleEnd(MI->getIterator()))) 874 if (BMI.isCandidateForCallSiteEntry()) 875 return &BMI; 876 877 llvm_unreachable("Unexpected bundle without a call site candidate"); 878 } 879 880 void MachineFunction::eraseCallSiteInfo(const MachineInstr *MI) { 881 assert(MI->shouldUpdateCallSiteInfo() && 882 "Call site info refers only to call (MI) candidates or " 883 "candidates inside bundles"); 884 885 const MachineInstr *CallMI = getCallInstr(MI); 886 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI); 887 if (CSIt == CallSitesInfo.end()) 888 return; 889 CallSitesInfo.erase(CSIt); 890 } 891 892 void MachineFunction::copyCallSiteInfo(const MachineInstr *Old, 893 const MachineInstr *New) { 894 assert(Old->shouldUpdateCallSiteInfo() && 895 "Call site info refers only to call (MI) candidates or " 896 "candidates inside bundles"); 897 898 if (!New->isCandidateForCallSiteEntry()) 899 return eraseCallSiteInfo(Old); 900 901 const MachineInstr *OldCallMI = getCallInstr(Old); 902 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI); 903 if (CSIt == CallSitesInfo.end()) 904 return; 905 906 CallSiteInfo CSInfo = CSIt->second; 907 CallSitesInfo[New] = CSInfo; 908 } 909 910 void MachineFunction::moveCallSiteInfo(const MachineInstr *Old, 911 const MachineInstr *New) { 912 assert(Old->shouldUpdateCallSiteInfo() && 913 "Call site info refers only to call (MI) candidates or " 914 "candidates inside bundles"); 915 916 if (!New->isCandidateForCallSiteEntry()) 917 return eraseCallSiteInfo(Old); 918 919 const MachineInstr *OldCallMI = getCallInstr(Old); 920 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI); 921 if (CSIt == CallSitesInfo.end()) 922 return; 923 924 CallSiteInfo CSInfo = std::move(CSIt->second); 925 CallSitesInfo.erase(CSIt); 926 CallSitesInfo[New] = CSInfo; 927 } 928 929 void MachineFunction::setDebugInstrNumberingCount(unsigned Num) { 930 DebugInstrNumberingCount = Num; 931 } 932 933 void MachineFunction::makeDebugValueSubstitution(DebugInstrOperandPair A, 934 DebugInstrOperandPair B, 935 unsigned Subreg) { 936 // Catch any accidental self-loops. 937 assert(A.first != B.first); 938 // Don't allow any substitutions _from_ the memory operand number. 939 assert(A.second != DebugOperandMemNumber); 940 941 DebugValueSubstitutions.push_back({A, B, Subreg}); 942 } 943 944 void MachineFunction::substituteDebugValuesForInst(const MachineInstr &Old, 945 MachineInstr &New, 946 unsigned MaxOperand) { 947 // If the Old instruction wasn't tracked at all, there is no work to do. 948 unsigned OldInstrNum = Old.peekDebugInstrNum(); 949 if (!OldInstrNum) 950 return; 951 952 // Iterate over all operands looking for defs to create substitutions for. 953 // Avoid creating new instr numbers unless we create a new substitution. 954 // While this has no functional effect, it risks confusing someone reading 955 // MIR output. 956 // Examine all the operands, or the first N specified by the caller. 957 MaxOperand = std::min(MaxOperand, Old.getNumOperands()); 958 for (unsigned int I = 0; I < MaxOperand; ++I) { 959 const auto &OldMO = Old.getOperand(I); 960 auto &NewMO = New.getOperand(I); 961 (void)NewMO; 962 963 if (!OldMO.isReg() || !OldMO.isDef()) 964 continue; 965 assert(NewMO.isDef()); 966 967 unsigned NewInstrNum = New.getDebugInstrNum(); 968 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I), 969 std::make_pair(NewInstrNum, I)); 970 } 971 } 972 973 auto MachineFunction::salvageCopySSA( 974 MachineInstr &MI, DenseMap<Register, DebugInstrOperandPair> &DbgPHICache) 975 -> DebugInstrOperandPair { 976 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo(); 977 978 // Check whether this copy-like instruction has already been salvaged into 979 // an operand pair. 980 Register Dest; 981 if (auto CopyDstSrc = TII.isCopyInstr(MI)) { 982 Dest = CopyDstSrc->Destination->getReg(); 983 } else { 984 assert(MI.isSubregToReg()); 985 Dest = MI.getOperand(0).getReg(); 986 } 987 988 auto CacheIt = DbgPHICache.find(Dest); 989 if (CacheIt != DbgPHICache.end()) 990 return CacheIt->second; 991 992 // Calculate the instruction number to use, or install a DBG_PHI. 993 auto OperandPair = salvageCopySSAImpl(MI); 994 DbgPHICache.insert({Dest, OperandPair}); 995 return OperandPair; 996 } 997 998 auto MachineFunction::salvageCopySSAImpl(MachineInstr &MI) 999 -> DebugInstrOperandPair { 1000 MachineRegisterInfo &MRI = getRegInfo(); 1001 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 1002 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo(); 1003 1004 // Chase the value read by a copy-like instruction back to the instruction 1005 // that ultimately _defines_ that value. This may pass: 1006 // * Through multiple intermediate copies, including subregister moves / 1007 // copies, 1008 // * Copies from physical registers that must then be traced back to the 1009 // defining instruction, 1010 // * Or, physical registers may be live-in to (only) the entry block, which 1011 // requires a DBG_PHI to be created. 1012 // We can pursue this problem in that order: trace back through copies, 1013 // optionally through a physical register, to a defining instruction. We 1014 // should never move from physreg to vreg. As we're still in SSA form, no need 1015 // to worry about partial definitions of registers. 1016 1017 // Helper lambda to interpret a copy-like instruction. Takes instruction, 1018 // returns the register read and any subregister identifying which part is 1019 // read. 1020 auto GetRegAndSubreg = 1021 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> { 1022 Register NewReg, OldReg; 1023 unsigned SubReg; 1024 if (Cpy.isCopy()) { 1025 OldReg = Cpy.getOperand(0).getReg(); 1026 NewReg = Cpy.getOperand(1).getReg(); 1027 SubReg = Cpy.getOperand(1).getSubReg(); 1028 } else if (Cpy.isSubregToReg()) { 1029 OldReg = Cpy.getOperand(0).getReg(); 1030 NewReg = Cpy.getOperand(2).getReg(); 1031 SubReg = Cpy.getOperand(3).getImm(); 1032 } else { 1033 auto CopyDetails = *TII.isCopyInstr(Cpy); 1034 const MachineOperand &Src = *CopyDetails.Source; 1035 const MachineOperand &Dest = *CopyDetails.Destination; 1036 OldReg = Dest.getReg(); 1037 NewReg = Src.getReg(); 1038 SubReg = Src.getSubReg(); 1039 } 1040 1041 return {NewReg, SubReg}; 1042 }; 1043 1044 // First seek either the defining instruction, or a copy from a physreg. 1045 // During search, the current state is the current copy instruction, and which 1046 // register we've read. Accumulate qualifying subregisters into SubregsSeen; 1047 // deal with those later. 1048 auto State = GetRegAndSubreg(MI); 1049 auto CurInst = MI.getIterator(); 1050 SmallVector<unsigned, 4> SubregsSeen; 1051 while (true) { 1052 // If we've found a copy from a physreg, first portion of search is over. 1053 if (!State.first.isVirtual()) 1054 break; 1055 1056 // Record any subregister qualifier. 1057 if (State.second) 1058 SubregsSeen.push_back(State.second); 1059 1060 assert(MRI.hasOneDef(State.first)); 1061 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent(); 1062 CurInst = Inst.getIterator(); 1063 1064 // Any non-copy instruction is the defining instruction we're seeking. 1065 if (!Inst.isCopyLike() && !TII.isCopyInstr(Inst)) 1066 break; 1067 State = GetRegAndSubreg(Inst); 1068 }; 1069 1070 // Helper lambda to apply additional subregister substitutions to a known 1071 // instruction/operand pair. Adds new (fake) substitutions so that we can 1072 // record the subregister. FIXME: this isn't very space efficient if multiple 1073 // values are tracked back through the same copies; cache something later. 1074 auto ApplySubregisters = 1075 [&](DebugInstrOperandPair P) -> DebugInstrOperandPair { 1076 for (unsigned Subreg : reverse(SubregsSeen)) { 1077 // Fetch a new instruction number, not attached to an actual instruction. 1078 unsigned NewInstrNumber = getNewDebugInstrNum(); 1079 // Add a substitution from the "new" number to the known one, with a 1080 // qualifying subreg. 1081 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg); 1082 // Return the new number; to find the underlying value, consumers need to 1083 // deal with the qualifying subreg. 1084 P = {NewInstrNumber, 0}; 1085 } 1086 return P; 1087 }; 1088 1089 // If we managed to find the defining instruction after COPYs, return an 1090 // instruction / operand pair after adding subregister qualifiers. 1091 if (State.first.isVirtual()) { 1092 // Virtual register def -- we can just look up where this happens. 1093 MachineInstr *Inst = MRI.def_begin(State.first)->getParent(); 1094 for (auto &MO : Inst->all_defs()) { 1095 if (MO.getReg() != State.first) 1096 continue; 1097 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()}); 1098 } 1099 1100 llvm_unreachable("Vreg def with no corresponding operand?"); 1101 } 1102 1103 // Our search ended in a copy from a physreg: walk back up the function 1104 // looking for whatever defines the physreg. 1105 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst)); 1106 State = GetRegAndSubreg(*CurInst); 1107 Register RegToSeek = State.first; 1108 1109 auto RMII = CurInst->getReverseIterator(); 1110 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend()); 1111 for (auto &ToExamine : PrevInstrs) { 1112 for (auto &MO : ToExamine.all_defs()) { 1113 // Test for operand that defines something aliasing RegToSeek. 1114 if (!TRI.regsOverlap(RegToSeek, MO.getReg())) 1115 continue; 1116 1117 return ApplySubregisters( 1118 {ToExamine.getDebugInstrNum(), MO.getOperandNo()}); 1119 } 1120 } 1121 1122 MachineBasicBlock &InsertBB = *CurInst->getParent(); 1123 1124 // We reached the start of the block before finding a defining instruction. 1125 // There are numerous scenarios where this can happen: 1126 // * Constant physical registers, 1127 // * Several intrinsics that allow LLVM-IR to read arbitary registers, 1128 // * Arguments in the entry block, 1129 // * Exception handling landing pads. 1130 // Validating all of them is too difficult, so just insert a DBG_PHI reading 1131 // the variable value at this position, rather than checking it makes sense. 1132 1133 // Create DBG_PHI for specified physreg. 1134 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(), 1135 TII.get(TargetOpcode::DBG_PHI)); 1136 Builder.addReg(State.first); 1137 unsigned NewNum = getNewDebugInstrNum(); 1138 Builder.addImm(NewNum); 1139 return ApplySubregisters({NewNum, 0u}); 1140 } 1141 1142 void MachineFunction::finalizeDebugInstrRefs() { 1143 auto *TII = getSubtarget().getInstrInfo(); 1144 1145 auto MakeUndefDbgValue = [&](MachineInstr &MI) { 1146 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST); 1147 MI.setDesc(RefII); 1148 MI.setDebugValueUndef(); 1149 }; 1150 1151 DenseMap<Register, DebugInstrOperandPair> ArgDbgPHIs; 1152 for (auto &MBB : *this) { 1153 for (auto &MI : MBB) { 1154 if (!MI.isDebugRef()) 1155 continue; 1156 1157 bool IsValidRef = true; 1158 1159 for (MachineOperand &MO : MI.debug_operands()) { 1160 if (!MO.isReg()) 1161 continue; 1162 1163 Register Reg = MO.getReg(); 1164 1165 // Some vregs can be deleted as redundant in the meantime. Mark those 1166 // as DBG_VALUE $noreg. Additionally, some normal instructions are 1167 // quickly deleted, leaving dangling references to vregs with no def. 1168 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) { 1169 IsValidRef = false; 1170 break; 1171 } 1172 1173 assert(Reg.isVirtual()); 1174 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg); 1175 1176 // If we've found a copy-like instruction, follow it back to the 1177 // instruction that defines the source value, see salvageCopySSA docs 1178 // for why this is important. 1179 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) { 1180 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs); 1181 MO.ChangeToDbgInstrRef(Result.first, Result.second); 1182 } else { 1183 // Otherwise, identify the operand number that the VReg refers to. 1184 unsigned OperandIdx = 0; 1185 for (const auto &DefMO : DefMI.operands()) { 1186 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg) 1187 break; 1188 ++OperandIdx; 1189 } 1190 assert(OperandIdx < DefMI.getNumOperands()); 1191 1192 // Morph this instr ref to point at the given instruction and operand. 1193 unsigned ID = DefMI.getDebugInstrNum(); 1194 MO.ChangeToDbgInstrRef(ID, OperandIdx); 1195 } 1196 } 1197 1198 if (!IsValidRef) 1199 MakeUndefDbgValue(MI); 1200 } 1201 } 1202 } 1203 1204 bool MachineFunction::shouldUseDebugInstrRef() const { 1205 // Disable instr-ref at -O0: it's very slow (in compile time). We can still 1206 // have optimized code inlined into this unoptimized code, however with 1207 // fewer and less aggressive optimizations happening, coverage and accuracy 1208 // should not suffer. 1209 if (getTarget().getOptLevel() == CodeGenOpt::None) 1210 return false; 1211 1212 // Don't use instr-ref if this function is marked optnone. 1213 if (F.hasFnAttribute(Attribute::OptimizeNone)) 1214 return false; 1215 1216 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple())) 1217 return true; 1218 1219 return false; 1220 } 1221 1222 bool MachineFunction::useDebugInstrRef() const { 1223 return UseDebugInstrRef; 1224 } 1225 1226 void MachineFunction::setUseDebugInstrRef(bool Use) { 1227 UseDebugInstrRef = Use; 1228 } 1229 1230 // Use one million as a high / reserved number. 1231 const unsigned MachineFunction::DebugOperandMemNumber = 1000000; 1232 1233 /// \} 1234 1235 //===----------------------------------------------------------------------===// 1236 // MachineJumpTableInfo implementation 1237 //===----------------------------------------------------------------------===// 1238 1239 /// Return the size of each entry in the jump table. 1240 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const { 1241 // The size of a jump table entry is 4 bytes unless the entry is just the 1242 // address of a block, in which case it is the pointer size. 1243 switch (getEntryKind()) { 1244 case MachineJumpTableInfo::EK_BlockAddress: 1245 return TD.getPointerSize(); 1246 case MachineJumpTableInfo::EK_GPRel64BlockAddress: 1247 return 8; 1248 case MachineJumpTableInfo::EK_GPRel32BlockAddress: 1249 case MachineJumpTableInfo::EK_LabelDifference32: 1250 case MachineJumpTableInfo::EK_Custom32: 1251 return 4; 1252 case MachineJumpTableInfo::EK_Inline: 1253 return 0; 1254 } 1255 llvm_unreachable("Unknown jump table encoding!"); 1256 } 1257 1258 /// Return the alignment of each entry in the jump table. 1259 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const { 1260 // The alignment of a jump table entry is the alignment of int32 unless the 1261 // entry is just the address of a block, in which case it is the pointer 1262 // alignment. 1263 switch (getEntryKind()) { 1264 case MachineJumpTableInfo::EK_BlockAddress: 1265 return TD.getPointerABIAlignment(0).value(); 1266 case MachineJumpTableInfo::EK_GPRel64BlockAddress: 1267 return TD.getABIIntegerTypeAlignment(64).value(); 1268 case MachineJumpTableInfo::EK_GPRel32BlockAddress: 1269 case MachineJumpTableInfo::EK_LabelDifference32: 1270 case MachineJumpTableInfo::EK_Custom32: 1271 return TD.getABIIntegerTypeAlignment(32).value(); 1272 case MachineJumpTableInfo::EK_Inline: 1273 return 1; 1274 } 1275 llvm_unreachable("Unknown jump table encoding!"); 1276 } 1277 1278 /// Create a new jump table entry in the jump table info. 1279 unsigned MachineJumpTableInfo::createJumpTableIndex( 1280 const std::vector<MachineBasicBlock*> &DestBBs) { 1281 assert(!DestBBs.empty() && "Cannot create an empty jump table!"); 1282 JumpTables.push_back(MachineJumpTableEntry(DestBBs)); 1283 return JumpTables.size()-1; 1284 } 1285 1286 /// If Old is the target of any jump tables, update the jump tables to branch 1287 /// to New instead. 1288 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old, 1289 MachineBasicBlock *New) { 1290 assert(Old != New && "Not making a change?"); 1291 bool MadeChange = false; 1292 for (size_t i = 0, e = JumpTables.size(); i != e; ++i) 1293 ReplaceMBBInJumpTable(i, Old, New); 1294 return MadeChange; 1295 } 1296 1297 /// If MBB is present in any jump tables, remove it. 1298 bool MachineJumpTableInfo::RemoveMBBFromJumpTables(MachineBasicBlock *MBB) { 1299 bool MadeChange = false; 1300 for (MachineJumpTableEntry &JTE : JumpTables) { 1301 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB); 1302 MadeChange |= (removeBeginItr != JTE.MBBs.end()); 1303 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end()); 1304 } 1305 return MadeChange; 1306 } 1307 1308 /// If Old is a target of the jump tables, update the jump table to branch to 1309 /// New instead. 1310 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx, 1311 MachineBasicBlock *Old, 1312 MachineBasicBlock *New) { 1313 assert(Old != New && "Not making a change?"); 1314 bool MadeChange = false; 1315 MachineJumpTableEntry &JTE = JumpTables[Idx]; 1316 for (MachineBasicBlock *&MBB : JTE.MBBs) 1317 if (MBB == Old) { 1318 MBB = New; 1319 MadeChange = true; 1320 } 1321 return MadeChange; 1322 } 1323 1324 void MachineJumpTableInfo::print(raw_ostream &OS) const { 1325 if (JumpTables.empty()) return; 1326 1327 OS << "Jump Tables:\n"; 1328 1329 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) { 1330 OS << printJumpTableEntryReference(i) << ':'; 1331 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs) 1332 OS << ' ' << printMBBReference(*MBB); 1333 if (i != e) 1334 OS << '\n'; 1335 } 1336 1337 OS << '\n'; 1338 } 1339 1340 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1341 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); } 1342 #endif 1343 1344 Printable llvm::printJumpTableEntryReference(unsigned Idx) { 1345 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; }); 1346 } 1347 1348 //===----------------------------------------------------------------------===// 1349 // MachineConstantPool implementation 1350 //===----------------------------------------------------------------------===// 1351 1352 void MachineConstantPoolValue::anchor() {} 1353 1354 unsigned MachineConstantPoolValue::getSizeInBytes(const DataLayout &DL) const { 1355 return DL.getTypeAllocSize(Ty); 1356 } 1357 1358 unsigned MachineConstantPoolEntry::getSizeInBytes(const DataLayout &DL) const { 1359 if (isMachineConstantPoolEntry()) 1360 return Val.MachineCPVal->getSizeInBytes(DL); 1361 return DL.getTypeAllocSize(Val.ConstVal->getType()); 1362 } 1363 1364 bool MachineConstantPoolEntry::needsRelocation() const { 1365 if (isMachineConstantPoolEntry()) 1366 return true; 1367 return Val.ConstVal->needsDynamicRelocation(); 1368 } 1369 1370 SectionKind 1371 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const { 1372 if (needsRelocation()) 1373 return SectionKind::getReadOnlyWithRel(); 1374 switch (getSizeInBytes(*DL)) { 1375 case 4: 1376 return SectionKind::getMergeableConst4(); 1377 case 8: 1378 return SectionKind::getMergeableConst8(); 1379 case 16: 1380 return SectionKind::getMergeableConst16(); 1381 case 32: 1382 return SectionKind::getMergeableConst32(); 1383 default: 1384 return SectionKind::getReadOnly(); 1385 } 1386 } 1387 1388 MachineConstantPool::~MachineConstantPool() { 1389 // A constant may be a member of both Constants and MachineCPVsSharingEntries, 1390 // so keep track of which we've deleted to avoid double deletions. 1391 DenseSet<MachineConstantPoolValue*> Deleted; 1392 for (const MachineConstantPoolEntry &C : Constants) 1393 if (C.isMachineConstantPoolEntry()) { 1394 Deleted.insert(C.Val.MachineCPVal); 1395 delete C.Val.MachineCPVal; 1396 } 1397 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) { 1398 if (Deleted.count(CPV) == 0) 1399 delete CPV; 1400 } 1401 } 1402 1403 /// Test whether the given two constants can be allocated the same constant pool 1404 /// entry referenced by \param A. 1405 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, 1406 const DataLayout &DL) { 1407 // Handle the trivial case quickly. 1408 if (A == B) return true; 1409 1410 // If they have the same type but weren't the same constant, quickly 1411 // reject them. 1412 if (A->getType() == B->getType()) return false; 1413 1414 // We can't handle structs or arrays. 1415 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) || 1416 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType())) 1417 return false; 1418 1419 // For now, only support constants with the same size. 1420 uint64_t StoreSize = DL.getTypeStoreSize(A->getType()); 1421 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128) 1422 return false; 1423 1424 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement(); 1425 1426 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8); 1427 1428 // Try constant folding a bitcast of both instructions to an integer. If we 1429 // get two identical ConstantInt's, then we are good to share them. We use 1430 // the constant folding APIs to do this so that we get the benefit of 1431 // DataLayout. 1432 if (isa<PointerType>(A->getType())) 1433 A = ConstantFoldCastOperand(Instruction::PtrToInt, 1434 const_cast<Constant *>(A), IntTy, DL); 1435 else if (A->getType() != IntTy) 1436 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A), 1437 IntTy, DL); 1438 if (isa<PointerType>(B->getType())) 1439 B = ConstantFoldCastOperand(Instruction::PtrToInt, 1440 const_cast<Constant *>(B), IntTy, DL); 1441 else if (B->getType() != IntTy) 1442 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B), 1443 IntTy, DL); 1444 1445 if (A != B) 1446 return false; 1447 1448 // Constants only safely match if A doesn't contain undef/poison. 1449 // As we'll be reusing A, it doesn't matter if B contain undef/poison. 1450 // TODO: Handle cases where A and B have the same undef/poison elements. 1451 // TODO: Merge A and B with mismatching undef/poison elements. 1452 return !ContainsUndefOrPoisonA; 1453 } 1454 1455 /// Create a new entry in the constant pool or return an existing one. 1456 /// User must specify the log2 of the minimum required alignment for the object. 1457 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C, 1458 Align Alignment) { 1459 if (Alignment > PoolAlignment) PoolAlignment = Alignment; 1460 1461 // Check to see if we already have this constant. 1462 // 1463 // FIXME, this could be made much more efficient for large constant pools. 1464 for (unsigned i = 0, e = Constants.size(); i != e; ++i) 1465 if (!Constants[i].isMachineConstantPoolEntry() && 1466 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) { 1467 if (Constants[i].getAlign() < Alignment) 1468 Constants[i].Alignment = Alignment; 1469 return i; 1470 } 1471 1472 Constants.push_back(MachineConstantPoolEntry(C, Alignment)); 1473 return Constants.size()-1; 1474 } 1475 1476 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V, 1477 Align Alignment) { 1478 if (Alignment > PoolAlignment) PoolAlignment = Alignment; 1479 1480 // Check to see if we already have this constant. 1481 // 1482 // FIXME, this could be made much more efficient for large constant pools. 1483 int Idx = V->getExistingMachineCPValue(this, Alignment); 1484 if (Idx != -1) { 1485 MachineCPVsSharingEntries.insert(V); 1486 return (unsigned)Idx; 1487 } 1488 1489 Constants.push_back(MachineConstantPoolEntry(V, Alignment)); 1490 return Constants.size()-1; 1491 } 1492 1493 void MachineConstantPool::print(raw_ostream &OS) const { 1494 if (Constants.empty()) return; 1495 1496 OS << "Constant Pool:\n"; 1497 for (unsigned i = 0, e = Constants.size(); i != e; ++i) { 1498 OS << " cp#" << i << ": "; 1499 if (Constants[i].isMachineConstantPoolEntry()) 1500 Constants[i].Val.MachineCPVal->print(OS); 1501 else 1502 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false); 1503 OS << ", align=" << Constants[i].getAlign().value(); 1504 OS << "\n"; 1505 } 1506 } 1507 1508 //===----------------------------------------------------------------------===// 1509 // Template specialization for MachineFunction implementation of 1510 // ProfileSummaryInfo::getEntryCount(). 1511 //===----------------------------------------------------------------------===// 1512 template <> 1513 std::optional<Function::ProfileCount> 1514 ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>( 1515 const llvm::MachineFunction *F) const { 1516 return F->getFunction().getEntryCount(); 1517 } 1518 1519 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1520 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); } 1521 #endif 1522