1 //===- MachineFunction.cpp ------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Collect native machine code information for a function. This allows 10 // target-specific information about the generated code to be stored with each 11 // function. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallString.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringRef.h" 23 #include "llvm/ADT/Twine.h" 24 #include "llvm/Analysis/ConstantFolding.h" 25 #include "llvm/Analysis/ProfileSummaryInfo.h" 26 #include "llvm/CodeGen/MachineBasicBlock.h" 27 #include "llvm/CodeGen/MachineConstantPool.h" 28 #include "llvm/CodeGen/MachineFrameInfo.h" 29 #include "llvm/CodeGen/MachineInstr.h" 30 #include "llvm/CodeGen/MachineJumpTableInfo.h" 31 #include "llvm/CodeGen/MachineMemOperand.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/PseudoSourceValue.h" 35 #include "llvm/CodeGen/PseudoSourceValueManager.h" 36 #include "llvm/CodeGen/TargetFrameLowering.h" 37 #include "llvm/CodeGen/TargetInstrInfo.h" 38 #include "llvm/CodeGen/TargetLowering.h" 39 #include "llvm/CodeGen/TargetRegisterInfo.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/CodeGen/WasmEHFuncInfo.h" 42 #include "llvm/CodeGen/WinEHFuncInfo.h" 43 #include "llvm/Config/llvm-config.h" 44 #include "llvm/IR/Attributes.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/DataLayout.h" 48 #include "llvm/IR/DerivedTypes.h" 49 #include "llvm/IR/EHPersonalities.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GlobalValue.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/Metadata.h" 55 #include "llvm/IR/Module.h" 56 #include "llvm/IR/ModuleSlotTracker.h" 57 #include "llvm/IR/Value.h" 58 #include "llvm/MC/MCContext.h" 59 #include "llvm/MC/MCSymbol.h" 60 #include "llvm/MC/SectionKind.h" 61 #include "llvm/Support/Casting.h" 62 #include "llvm/Support/CommandLine.h" 63 #include "llvm/Support/Compiler.h" 64 #include "llvm/Support/DOTGraphTraits.h" 65 #include "llvm/Support/ErrorHandling.h" 66 #include "llvm/Support/GraphWriter.h" 67 #include "llvm/Support/raw_ostream.h" 68 #include "llvm/Target/TargetMachine.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <string> 75 #include <type_traits> 76 #include <utility> 77 #include <vector> 78 79 #include "LiveDebugValues/LiveDebugValues.h" 80 81 using namespace llvm; 82 83 #define DEBUG_TYPE "codegen" 84 85 static cl::opt<unsigned> AlignAllFunctions( 86 "align-all-functions", 87 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " 88 "means align on 16B boundaries)."), 89 cl::init(0), cl::Hidden); 90 91 static const char *getPropertyName(MachineFunctionProperties::Property Prop) { 92 using P = MachineFunctionProperties::Property; 93 94 // clang-format off 95 switch(Prop) { 96 case P::FailedISel: return "FailedISel"; 97 case P::IsSSA: return "IsSSA"; 98 case P::Legalized: return "Legalized"; 99 case P::NoPHIs: return "NoPHIs"; 100 case P::NoVRegs: return "NoVRegs"; 101 case P::RegBankSelected: return "RegBankSelected"; 102 case P::Selected: return "Selected"; 103 case P::TracksLiveness: return "TracksLiveness"; 104 case P::TiedOpsRewritten: return "TiedOpsRewritten"; 105 case P::FailsVerification: return "FailsVerification"; 106 case P::TracksDebugUserValues: return "TracksDebugUserValues"; 107 } 108 // clang-format on 109 llvm_unreachable("Invalid machine function property"); 110 } 111 112 void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo) { 113 if (!F.hasFnAttribute(Attribute::SafeStack)) 114 return; 115 116 auto *Existing = 117 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation)); 118 119 if (!Existing || Existing->getNumOperands() != 2) 120 return; 121 122 auto *MetadataName = "unsafe-stack-size"; 123 if (auto &N = Existing->getOperand(0)) { 124 if (N.equalsStr(MetadataName)) { 125 if (auto &Op = Existing->getOperand(1)) { 126 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue(); 127 FrameInfo.setUnsafeStackSize(Val); 128 } 129 } 130 } 131 } 132 133 // Pin the vtable to this file. 134 void MachineFunction::Delegate::anchor() {} 135 136 void MachineFunctionProperties::print(raw_ostream &OS) const { 137 const char *Separator = ""; 138 for (BitVector::size_type I = 0; I < Properties.size(); ++I) { 139 if (!Properties[I]) 140 continue; 141 OS << Separator << getPropertyName(static_cast<Property>(I)); 142 Separator = ", "; 143 } 144 } 145 146 //===----------------------------------------------------------------------===// 147 // MachineFunction implementation 148 //===----------------------------------------------------------------------===// 149 150 // Out-of-line virtual method. 151 MachineFunctionInfo::~MachineFunctionInfo() = default; 152 153 void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) { 154 MBB->getParent()->deleteMachineBasicBlock(MBB); 155 } 156 157 static inline Align getFnStackAlignment(const TargetSubtargetInfo *STI, 158 const Function &F) { 159 if (auto MA = F.getFnStackAlign()) 160 return *MA; 161 return STI->getFrameLowering()->getStackAlign(); 162 } 163 164 MachineFunction::MachineFunction(Function &F, const LLVMTargetMachine &Target, 165 const TargetSubtargetInfo &STI, 166 unsigned FunctionNum, MachineModuleInfo &mmi) 167 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) { 168 FunctionNumber = FunctionNum; 169 init(); 170 } 171 172 void MachineFunction::handleInsertion(MachineInstr &MI) { 173 if (TheDelegate) 174 TheDelegate->MF_HandleInsertion(MI); 175 } 176 177 void MachineFunction::handleRemoval(MachineInstr &MI) { 178 if (TheDelegate) 179 TheDelegate->MF_HandleRemoval(MI); 180 } 181 182 void MachineFunction::handleChangeDesc(MachineInstr &MI, 183 const MCInstrDesc &TID) { 184 if (TheDelegate) 185 TheDelegate->MF_HandleChangeDesc(MI, TID); 186 } 187 188 void MachineFunction::init() { 189 // Assume the function starts in SSA form with correct liveness. 190 Properties.set(MachineFunctionProperties::Property::IsSSA); 191 Properties.set(MachineFunctionProperties::Property::TracksLiveness); 192 if (STI->getRegisterInfo()) 193 RegInfo = new (Allocator) MachineRegisterInfo(this); 194 else 195 RegInfo = nullptr; 196 197 MFInfo = nullptr; 198 199 // We can realign the stack if the target supports it and the user hasn't 200 // explicitly asked us not to. 201 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() && 202 !F.hasFnAttribute("no-realign-stack"); 203 FrameInfo = new (Allocator) MachineFrameInfo( 204 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP, 205 /*ForcedRealign=*/CanRealignSP && 206 F.hasFnAttribute(Attribute::StackAlignment)); 207 208 setUnsafeStackSize(F, *FrameInfo); 209 210 if (F.hasFnAttribute(Attribute::StackAlignment)) 211 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign()); 212 213 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout()); 214 Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); 215 216 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. 217 // FIXME: Use Function::hasOptSize(). 218 if (!F.hasFnAttribute(Attribute::OptimizeForSize)) 219 Alignment = std::max(Alignment, 220 STI->getTargetLowering()->getPrefFunctionAlignment()); 221 222 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls 223 // to load a type hash before the function label. Ensure functions are aligned 224 // by a least 4 to avoid unaligned access, which is especially important for 225 // -mno-unaligned-access. 226 if (F.hasMetadata(LLVMContext::MD_func_sanitize) || 227 F.getMetadata(LLVMContext::MD_kcfi_type)) 228 Alignment = std::max(Alignment, Align(4)); 229 230 if (AlignAllFunctions) 231 Alignment = Align(1ULL << AlignAllFunctions); 232 233 JumpTableInfo = nullptr; 234 235 if (isFuncletEHPersonality(classifyEHPersonality( 236 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) { 237 WinEHInfo = new (Allocator) WinEHFuncInfo(); 238 } 239 240 if (isScopedEHPersonality(classifyEHPersonality( 241 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) { 242 WasmEHInfo = new (Allocator) WasmEHFuncInfo(); 243 } 244 245 assert(Target.isCompatibleDataLayout(getDataLayout()) && 246 "Can't create a MachineFunction using a Module with a " 247 "Target-incompatible DataLayout attached\n"); 248 249 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget()); 250 } 251 252 void MachineFunction::initTargetMachineFunctionInfo( 253 const TargetSubtargetInfo &STI) { 254 assert(!MFInfo && "MachineFunctionInfo already set"); 255 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI); 256 } 257 258 MachineFunction::~MachineFunction() { 259 clear(); 260 } 261 262 void MachineFunction::clear() { 263 Properties.reset(); 264 // Don't call destructors on MachineInstr and MachineOperand. All of their 265 // memory comes from the BumpPtrAllocator which is about to be purged. 266 // 267 // Do call MachineBasicBlock destructors, it contains std::vectors. 268 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I)) 269 I->Insts.clearAndLeakNodesUnsafely(); 270 MBBNumbering.clear(); 271 272 InstructionRecycler.clear(Allocator); 273 OperandRecycler.clear(Allocator); 274 BasicBlockRecycler.clear(Allocator); 275 CodeViewAnnotations.clear(); 276 VariableDbgInfos.clear(); 277 if (RegInfo) { 278 RegInfo->~MachineRegisterInfo(); 279 Allocator.Deallocate(RegInfo); 280 } 281 if (MFInfo) { 282 MFInfo->~MachineFunctionInfo(); 283 Allocator.Deallocate(MFInfo); 284 } 285 286 FrameInfo->~MachineFrameInfo(); 287 Allocator.Deallocate(FrameInfo); 288 289 ConstantPool->~MachineConstantPool(); 290 Allocator.Deallocate(ConstantPool); 291 292 if (JumpTableInfo) { 293 JumpTableInfo->~MachineJumpTableInfo(); 294 Allocator.Deallocate(JumpTableInfo); 295 } 296 297 if (WinEHInfo) { 298 WinEHInfo->~WinEHFuncInfo(); 299 Allocator.Deallocate(WinEHInfo); 300 } 301 302 if (WasmEHInfo) { 303 WasmEHInfo->~WasmEHFuncInfo(); 304 Allocator.Deallocate(WasmEHInfo); 305 } 306 } 307 308 const DataLayout &MachineFunction::getDataLayout() const { 309 return F.getParent()->getDataLayout(); 310 } 311 312 /// Get the JumpTableInfo for this function. 313 /// If it does not already exist, allocate one. 314 MachineJumpTableInfo *MachineFunction:: 315 getOrCreateJumpTableInfo(unsigned EntryKind) { 316 if (JumpTableInfo) return JumpTableInfo; 317 318 JumpTableInfo = new (Allocator) 319 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind); 320 return JumpTableInfo; 321 } 322 323 DenormalMode MachineFunction::getDenormalMode(const fltSemantics &FPType) const { 324 return F.getDenormalMode(FPType); 325 } 326 327 /// Should we be emitting segmented stack stuff for the function 328 bool MachineFunction::shouldSplitStack() const { 329 return getFunction().hasFnAttribute("split-stack"); 330 } 331 332 [[nodiscard]] unsigned 333 MachineFunction::addFrameInst(const MCCFIInstruction &Inst) { 334 FrameInstructions.push_back(Inst); 335 return FrameInstructions.size() - 1; 336 } 337 338 /// This discards all of the MachineBasicBlock numbers and recomputes them. 339 /// This guarantees that the MBB numbers are sequential, dense, and match the 340 /// ordering of the blocks within the function. If a specific MachineBasicBlock 341 /// is specified, only that block and those after it are renumbered. 342 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) { 343 if (empty()) { MBBNumbering.clear(); return; } 344 MachineFunction::iterator MBBI, E = end(); 345 if (MBB == nullptr) 346 MBBI = begin(); 347 else 348 MBBI = MBB->getIterator(); 349 350 // Figure out the block number this should have. 351 unsigned BlockNo = 0; 352 if (MBBI != begin()) 353 BlockNo = std::prev(MBBI)->getNumber() + 1; 354 355 for (; MBBI != E; ++MBBI, ++BlockNo) { 356 if (MBBI->getNumber() != (int)BlockNo) { 357 // Remove use of the old number. 358 if (MBBI->getNumber() != -1) { 359 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI && 360 "MBB number mismatch!"); 361 MBBNumbering[MBBI->getNumber()] = nullptr; 362 } 363 364 // If BlockNo is already taken, set that block's number to -1. 365 if (MBBNumbering[BlockNo]) 366 MBBNumbering[BlockNo]->setNumber(-1); 367 368 MBBNumbering[BlockNo] = &*MBBI; 369 MBBI->setNumber(BlockNo); 370 } 371 } 372 373 // Okay, all the blocks are renumbered. If we have compactified the block 374 // numbering, shrink MBBNumbering now. 375 assert(BlockNo <= MBBNumbering.size() && "Mismatch!"); 376 MBBNumbering.resize(BlockNo); 377 } 378 379 /// This method iterates over the basic blocks and assigns their IsBeginSection 380 /// and IsEndSection fields. This must be called after MBB layout is finalized 381 /// and the SectionID's are assigned to MBBs. 382 void MachineFunction::assignBeginEndSections() { 383 front().setIsBeginSection(); 384 auto CurrentSectionID = front().getSectionID(); 385 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) { 386 if (MBBI->getSectionID() == CurrentSectionID) 387 continue; 388 MBBI->setIsBeginSection(); 389 std::prev(MBBI)->setIsEndSection(); 390 CurrentSectionID = MBBI->getSectionID(); 391 } 392 back().setIsEndSection(); 393 } 394 395 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'. 396 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID, 397 DebugLoc DL, 398 bool NoImplicit) { 399 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator)) 400 MachineInstr(*this, MCID, std::move(DL), NoImplicit); 401 } 402 403 /// Create a new MachineInstr which is a copy of the 'Orig' instruction, 404 /// identical in all ways except the instruction has no parent, prev, or next. 405 MachineInstr * 406 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) { 407 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator)) 408 MachineInstr(*this, *Orig); 409 } 410 411 MachineInstr &MachineFunction::cloneMachineInstrBundle( 412 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, 413 const MachineInstr &Orig) { 414 MachineInstr *FirstClone = nullptr; 415 MachineBasicBlock::const_instr_iterator I = Orig.getIterator(); 416 while (true) { 417 MachineInstr *Cloned = CloneMachineInstr(&*I); 418 MBB.insert(InsertBefore, Cloned); 419 if (FirstClone == nullptr) { 420 FirstClone = Cloned; 421 } else { 422 Cloned->bundleWithPred(); 423 } 424 425 if (!I->isBundledWithSucc()) 426 break; 427 ++I; 428 } 429 // Copy over call site info to the cloned instruction if needed. If Orig is in 430 // a bundle, copyCallSiteInfo takes care of finding the call instruction in 431 // the bundle. 432 if (Orig.shouldUpdateCallSiteInfo()) 433 copyCallSiteInfo(&Orig, FirstClone); 434 return *FirstClone; 435 } 436 437 /// Delete the given MachineInstr. 438 /// 439 /// This function also serves as the MachineInstr destructor - the real 440 /// ~MachineInstr() destructor must be empty. 441 void MachineFunction::deleteMachineInstr(MachineInstr *MI) { 442 // Verify that a call site info is at valid state. This assertion should 443 // be triggered during the implementation of support for the 444 // call site info of a new architecture. If the assertion is triggered, 445 // back trace will tell where to insert a call to updateCallSiteInfo(). 446 assert((!MI->isCandidateForCallSiteEntry() || !CallSitesInfo.contains(MI)) && 447 "Call site info was not updated!"); 448 // Strip it for parts. The operand array and the MI object itself are 449 // independently recyclable. 450 if (MI->Operands) 451 deallocateOperandArray(MI->CapOperands, MI->Operands); 452 // Don't call ~MachineInstr() which must be trivial anyway because 453 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their 454 // destructors. 455 InstructionRecycler.Deallocate(Allocator, MI); 456 } 457 458 /// Allocate a new MachineBasicBlock. Use this instead of 459 /// `new MachineBasicBlock'. 460 MachineBasicBlock * 461 MachineFunction::CreateMachineBasicBlock(const BasicBlock *BB, 462 std::optional<UniqueBBID> BBID) { 463 MachineBasicBlock *MBB = 464 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator)) 465 MachineBasicBlock(*this, BB); 466 // Set BBID for `-basic-block=sections=labels` and 467 // `-basic-block-sections=list` to allow robust mapping of profiles to basic 468 // blocks. 469 if (Target.getBBSectionsType() == BasicBlockSection::Labels || 470 Target.getBBSectionsType() == BasicBlockSection::List) 471 MBB->setBBID(BBID.has_value() ? *BBID : UniqueBBID{NextBBID++, 0}); 472 return MBB; 473 } 474 475 /// Delete the given MachineBasicBlock. 476 void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) { 477 assert(MBB->getParent() == this && "MBB parent mismatch!"); 478 // Clean up any references to MBB in jump tables before deleting it. 479 if (JumpTableInfo) 480 JumpTableInfo->RemoveMBBFromJumpTables(MBB); 481 MBB->~MachineBasicBlock(); 482 BasicBlockRecycler.Deallocate(Allocator, MBB); 483 } 484 485 MachineMemOperand *MachineFunction::getMachineMemOperand( 486 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, 487 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, 488 SyncScope::ID SSID, AtomicOrdering Ordering, 489 AtomicOrdering FailureOrdering) { 490 return new (Allocator) 491 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges, 492 SSID, Ordering, FailureOrdering); 493 } 494 495 MachineMemOperand *MachineFunction::getMachineMemOperand( 496 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, 497 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges, 498 SyncScope::ID SSID, AtomicOrdering Ordering, 499 AtomicOrdering FailureOrdering) { 500 return new (Allocator) 501 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID, 502 Ordering, FailureOrdering); 503 } 504 505 MachineMemOperand *MachineFunction::getMachineMemOperand( 506 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) { 507 return new (Allocator) 508 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), 509 AAMDNodes(), nullptr, MMO->getSyncScopeID(), 510 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 511 } 512 513 MachineMemOperand *MachineFunction::getMachineMemOperand( 514 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) { 515 return new (Allocator) 516 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(), 517 AAMDNodes(), nullptr, MMO->getSyncScopeID(), 518 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 519 } 520 521 MachineMemOperand * 522 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 523 int64_t Offset, LLT Ty) { 524 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo(); 525 526 // If there is no pointer value, the offset isn't tracked so we need to adjust 527 // the base alignment. 528 Align Alignment = PtrInfo.V.isNull() 529 ? commonAlignment(MMO->getBaseAlign(), Offset) 530 : MMO->getBaseAlign(); 531 532 // Do not preserve ranges, since we don't necessarily know what the high bits 533 // are anymore. 534 return new (Allocator) MachineMemOperand( 535 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment, 536 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(), 537 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 538 } 539 540 MachineMemOperand * 541 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 542 const AAMDNodes &AAInfo) { 543 MachinePointerInfo MPI = MMO->getValue() ? 544 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) : 545 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset()); 546 547 return new (Allocator) MachineMemOperand( 548 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo, 549 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(), 550 MMO->getFailureOrdering()); 551 } 552 553 MachineMemOperand * 554 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, 555 MachineMemOperand::Flags Flags) { 556 return new (Allocator) MachineMemOperand( 557 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(), 558 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), 559 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); 560 } 561 562 MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo( 563 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol, 564 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections, 565 uint32_t CFIType) { 566 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol, 567 PostInstrSymbol, HeapAllocMarker, 568 PCSections, CFIType); 569 } 570 571 const char *MachineFunction::createExternalSymbolName(StringRef Name) { 572 char *Dest = Allocator.Allocate<char>(Name.size() + 1); 573 llvm::copy(Name, Dest); 574 Dest[Name.size()] = 0; 575 return Dest; 576 } 577 578 uint32_t *MachineFunction::allocateRegMask() { 579 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs(); 580 unsigned Size = MachineOperand::getRegMaskSize(NumRegs); 581 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size); 582 memset(Mask, 0, Size * sizeof(Mask[0])); 583 return Mask; 584 } 585 586 ArrayRef<int> MachineFunction::allocateShuffleMask(ArrayRef<int> Mask) { 587 int* AllocMask = Allocator.Allocate<int>(Mask.size()); 588 copy(Mask, AllocMask); 589 return {AllocMask, Mask.size()}; 590 } 591 592 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 593 LLVM_DUMP_METHOD void MachineFunction::dump() const { 594 print(dbgs()); 595 } 596 #endif 597 598 StringRef MachineFunction::getName() const { 599 return getFunction().getName(); 600 } 601 602 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const { 603 OS << "# Machine code for function " << getName() << ": "; 604 getProperties().print(OS); 605 OS << '\n'; 606 607 // Print Frame Information 608 FrameInfo->print(*this, OS); 609 610 // Print JumpTable Information 611 if (JumpTableInfo) 612 JumpTableInfo->print(OS); 613 614 // Print Constant Pool 615 ConstantPool->print(OS); 616 617 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo(); 618 619 if (RegInfo && !RegInfo->livein_empty()) { 620 OS << "Function Live Ins: "; 621 for (MachineRegisterInfo::livein_iterator 622 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) { 623 OS << printReg(I->first, TRI); 624 if (I->second) 625 OS << " in " << printReg(I->second, TRI); 626 if (std::next(I) != E) 627 OS << ", "; 628 } 629 OS << '\n'; 630 } 631 632 ModuleSlotTracker MST(getFunction().getParent()); 633 MST.incorporateFunction(getFunction()); 634 for (const auto &BB : *this) { 635 OS << '\n'; 636 // If we print the whole function, print it at its most verbose level. 637 BB.print(OS, MST, Indexes, /*IsStandalone=*/true); 638 } 639 640 OS << "\n# End machine code for function " << getName() << ".\n\n"; 641 } 642 643 /// True if this function needs frame moves for debug or exceptions. 644 bool MachineFunction::needsFrameMoves() const { 645 return getMMI().hasDebugInfo() || 646 getTarget().Options.ForceDwarfFrameSection || 647 F.needsUnwindTableEntry(); 648 } 649 650 namespace llvm { 651 652 template<> 653 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits { 654 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 655 656 static std::string getGraphName(const MachineFunction *F) { 657 return ("CFG for '" + F->getName() + "' function").str(); 658 } 659 660 std::string getNodeLabel(const MachineBasicBlock *Node, 661 const MachineFunction *Graph) { 662 std::string OutStr; 663 { 664 raw_string_ostream OSS(OutStr); 665 666 if (isSimple()) { 667 OSS << printMBBReference(*Node); 668 if (const BasicBlock *BB = Node->getBasicBlock()) 669 OSS << ": " << BB->getName(); 670 } else 671 Node->print(OSS); 672 } 673 674 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin()); 675 676 // Process string output to make it nicer... 677 for (unsigned i = 0; i != OutStr.length(); ++i) 678 if (OutStr[i] == '\n') { // Left justify 679 OutStr[i] = '\\'; 680 OutStr.insert(OutStr.begin()+i+1, 'l'); 681 } 682 return OutStr; 683 } 684 }; 685 686 } // end namespace llvm 687 688 void MachineFunction::viewCFG() const 689 { 690 #ifndef NDEBUG 691 ViewGraph(this, "mf" + getName()); 692 #else 693 errs() << "MachineFunction::viewCFG is only available in debug builds on " 694 << "systems with Graphviz or gv!\n"; 695 #endif // NDEBUG 696 } 697 698 void MachineFunction::viewCFGOnly() const 699 { 700 #ifndef NDEBUG 701 ViewGraph(this, "mf" + getName(), true); 702 #else 703 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on " 704 << "systems with Graphviz or gv!\n"; 705 #endif // NDEBUG 706 } 707 708 /// Add the specified physical register as a live-in value and 709 /// create a corresponding virtual register for it. 710 Register MachineFunction::addLiveIn(MCRegister PReg, 711 const TargetRegisterClass *RC) { 712 MachineRegisterInfo &MRI = getRegInfo(); 713 Register VReg = MRI.getLiveInVirtReg(PReg); 714 if (VReg) { 715 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg); 716 (void)VRegRC; 717 // A physical register can be added several times. 718 // Between two calls, the register class of the related virtual register 719 // may have been constrained to match some operation constraints. 720 // In that case, check that the current register class includes the 721 // physical register and is a sub class of the specified RC. 722 assert((VRegRC == RC || (VRegRC->contains(PReg) && 723 RC->hasSubClassEq(VRegRC))) && 724 "Register class mismatch!"); 725 return VReg; 726 } 727 VReg = MRI.createVirtualRegister(RC); 728 MRI.addLiveIn(PReg, VReg); 729 return VReg; 730 } 731 732 /// Return the MCSymbol for the specified non-empty jump table. 733 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a 734 /// normal 'L' label is returned. 735 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx, 736 bool isLinkerPrivate) const { 737 const DataLayout &DL = getDataLayout(); 738 assert(JumpTableInfo && "No jump tables"); 739 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!"); 740 741 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix() 742 : DL.getPrivateGlobalPrefix(); 743 SmallString<60> Name; 744 raw_svector_ostream(Name) 745 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI; 746 return Ctx.getOrCreateSymbol(Name); 747 } 748 749 /// Return a function-local symbol to represent the PIC base. 750 MCSymbol *MachineFunction::getPICBaseSymbol() const { 751 const DataLayout &DL = getDataLayout(); 752 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) + 753 Twine(getFunctionNumber()) + "$pb"); 754 } 755 756 /// \name Exception Handling 757 /// \{ 758 759 LandingPadInfo & 760 MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) { 761 unsigned N = LandingPads.size(); 762 for (unsigned i = 0; i < N; ++i) { 763 LandingPadInfo &LP = LandingPads[i]; 764 if (LP.LandingPadBlock == LandingPad) 765 return LP; 766 } 767 768 LandingPads.push_back(LandingPadInfo(LandingPad)); 769 return LandingPads[N]; 770 } 771 772 void MachineFunction::addInvoke(MachineBasicBlock *LandingPad, 773 MCSymbol *BeginLabel, MCSymbol *EndLabel) { 774 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 775 LP.BeginLabels.push_back(BeginLabel); 776 LP.EndLabels.push_back(EndLabel); 777 } 778 779 MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) { 780 MCSymbol *LandingPadLabel = Ctx.createTempSymbol(); 781 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad); 782 LP.LandingPadLabel = LandingPadLabel; 783 784 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI(); 785 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) { 786 // If there's no typeid list specified, then "cleanup" is implicit. 787 // Otherwise, id 0 is reserved for the cleanup action. 788 if (LPI->isCleanup() && LPI->getNumClauses() != 0) 789 LP.TypeIds.push_back(0); 790 791 // FIXME: New EH - Add the clauses in reverse order. This isn't 100% 792 // correct, but we need to do it this way because of how the DWARF EH 793 // emitter processes the clauses. 794 for (unsigned I = LPI->getNumClauses(); I != 0; --I) { 795 Value *Val = LPI->getClause(I - 1); 796 if (LPI->isCatch(I - 1)) { 797 LP.TypeIds.push_back( 798 getTypeIDFor(dyn_cast<GlobalValue>(Val->stripPointerCasts()))); 799 } else { 800 // Add filters in a list. 801 auto *CVal = cast<Constant>(Val); 802 SmallVector<unsigned, 4> FilterList; 803 for (const Use &U : CVal->operands()) 804 FilterList.push_back( 805 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts()))); 806 807 LP.TypeIds.push_back(getFilterIDFor(FilterList)); 808 } 809 } 810 811 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) { 812 for (unsigned I = CPI->arg_size(); I != 0; --I) { 813 auto *TypeInfo = 814 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts()); 815 LP.TypeIds.push_back(getTypeIDFor(TypeInfo)); 816 } 817 818 } else { 819 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!"); 820 } 821 822 return LandingPadLabel; 823 } 824 825 void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym, 826 ArrayRef<unsigned> Sites) { 827 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end()); 828 } 829 830 unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) { 831 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i) 832 if (TypeInfos[i] == TI) return i + 1; 833 834 TypeInfos.push_back(TI); 835 return TypeInfos.size(); 836 } 837 838 int MachineFunction::getFilterIDFor(ArrayRef<unsigned> TyIds) { 839 // If the new filter coincides with the tail of an existing filter, then 840 // re-use the existing filter. Folding filters more than this requires 841 // re-ordering filters and/or their elements - probably not worth it. 842 for (unsigned i : FilterEnds) { 843 unsigned j = TyIds.size(); 844 845 while (i && j) 846 if (FilterIds[--i] != TyIds[--j]) 847 goto try_next; 848 849 if (!j) 850 // The new filter coincides with range [i, end) of the existing filter. 851 return -(1 + i); 852 853 try_next:; 854 } 855 856 // Add the new filter. 857 int FilterID = -(1 + FilterIds.size()); 858 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1); 859 llvm::append_range(FilterIds, TyIds); 860 FilterEnds.push_back(FilterIds.size()); 861 FilterIds.push_back(0); // terminator 862 return FilterID; 863 } 864 865 MachineFunction::CallSiteInfoMap::iterator 866 MachineFunction::getCallSiteInfo(const MachineInstr *MI) { 867 assert(MI->isCandidateForCallSiteEntry() && 868 "Call site info refers only to call (MI) candidates"); 869 870 if (!Target.Options.EmitCallSiteInfo) 871 return CallSitesInfo.end(); 872 return CallSitesInfo.find(MI); 873 } 874 875 /// Return the call machine instruction or find a call within bundle. 876 static const MachineInstr *getCallInstr(const MachineInstr *MI) { 877 if (!MI->isBundle()) 878 return MI; 879 880 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()), 881 getBundleEnd(MI->getIterator()))) 882 if (BMI.isCandidateForCallSiteEntry()) 883 return &BMI; 884 885 llvm_unreachable("Unexpected bundle without a call site candidate"); 886 } 887 888 void MachineFunction::eraseCallSiteInfo(const MachineInstr *MI) { 889 assert(MI->shouldUpdateCallSiteInfo() && 890 "Call site info refers only to call (MI) candidates or " 891 "candidates inside bundles"); 892 893 const MachineInstr *CallMI = getCallInstr(MI); 894 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI); 895 if (CSIt == CallSitesInfo.end()) 896 return; 897 CallSitesInfo.erase(CSIt); 898 } 899 900 void MachineFunction::copyCallSiteInfo(const MachineInstr *Old, 901 const MachineInstr *New) { 902 assert(Old->shouldUpdateCallSiteInfo() && 903 "Call site info refers only to call (MI) candidates or " 904 "candidates inside bundles"); 905 906 if (!New->isCandidateForCallSiteEntry()) 907 return eraseCallSiteInfo(Old); 908 909 const MachineInstr *OldCallMI = getCallInstr(Old); 910 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI); 911 if (CSIt == CallSitesInfo.end()) 912 return; 913 914 CallSiteInfo CSInfo = CSIt->second; 915 CallSitesInfo[New] = CSInfo; 916 } 917 918 void MachineFunction::moveCallSiteInfo(const MachineInstr *Old, 919 const MachineInstr *New) { 920 assert(Old->shouldUpdateCallSiteInfo() && 921 "Call site info refers only to call (MI) candidates or " 922 "candidates inside bundles"); 923 924 if (!New->isCandidateForCallSiteEntry()) 925 return eraseCallSiteInfo(Old); 926 927 const MachineInstr *OldCallMI = getCallInstr(Old); 928 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI); 929 if (CSIt == CallSitesInfo.end()) 930 return; 931 932 CallSiteInfo CSInfo = std::move(CSIt->second); 933 CallSitesInfo.erase(CSIt); 934 CallSitesInfo[New] = CSInfo; 935 } 936 937 void MachineFunction::setDebugInstrNumberingCount(unsigned Num) { 938 DebugInstrNumberingCount = Num; 939 } 940 941 void MachineFunction::makeDebugValueSubstitution(DebugInstrOperandPair A, 942 DebugInstrOperandPair B, 943 unsigned Subreg) { 944 // Catch any accidental self-loops. 945 assert(A.first != B.first); 946 // Don't allow any substitutions _from_ the memory operand number. 947 assert(A.second != DebugOperandMemNumber); 948 949 DebugValueSubstitutions.push_back({A, B, Subreg}); 950 } 951 952 void MachineFunction::substituteDebugValuesForInst(const MachineInstr &Old, 953 MachineInstr &New, 954 unsigned MaxOperand) { 955 // If the Old instruction wasn't tracked at all, there is no work to do. 956 unsigned OldInstrNum = Old.peekDebugInstrNum(); 957 if (!OldInstrNum) 958 return; 959 960 // Iterate over all operands looking for defs to create substitutions for. 961 // Avoid creating new instr numbers unless we create a new substitution. 962 // While this has no functional effect, it risks confusing someone reading 963 // MIR output. 964 // Examine all the operands, or the first N specified by the caller. 965 MaxOperand = std::min(MaxOperand, Old.getNumOperands()); 966 for (unsigned int I = 0; I < MaxOperand; ++I) { 967 const auto &OldMO = Old.getOperand(I); 968 auto &NewMO = New.getOperand(I); 969 (void)NewMO; 970 971 if (!OldMO.isReg() || !OldMO.isDef()) 972 continue; 973 assert(NewMO.isDef()); 974 975 unsigned NewInstrNum = New.getDebugInstrNum(); 976 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I), 977 std::make_pair(NewInstrNum, I)); 978 } 979 } 980 981 auto MachineFunction::salvageCopySSA( 982 MachineInstr &MI, DenseMap<Register, DebugInstrOperandPair> &DbgPHICache) 983 -> DebugInstrOperandPair { 984 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo(); 985 986 // Check whether this copy-like instruction has already been salvaged into 987 // an operand pair. 988 Register Dest; 989 if (auto CopyDstSrc = TII.isCopyInstr(MI)) { 990 Dest = CopyDstSrc->Destination->getReg(); 991 } else { 992 assert(MI.isSubregToReg()); 993 Dest = MI.getOperand(0).getReg(); 994 } 995 996 auto CacheIt = DbgPHICache.find(Dest); 997 if (CacheIt != DbgPHICache.end()) 998 return CacheIt->second; 999 1000 // Calculate the instruction number to use, or install a DBG_PHI. 1001 auto OperandPair = salvageCopySSAImpl(MI); 1002 DbgPHICache.insert({Dest, OperandPair}); 1003 return OperandPair; 1004 } 1005 1006 auto MachineFunction::salvageCopySSAImpl(MachineInstr &MI) 1007 -> DebugInstrOperandPair { 1008 MachineRegisterInfo &MRI = getRegInfo(); 1009 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 1010 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo(); 1011 1012 // Chase the value read by a copy-like instruction back to the instruction 1013 // that ultimately _defines_ that value. This may pass: 1014 // * Through multiple intermediate copies, including subregister moves / 1015 // copies, 1016 // * Copies from physical registers that must then be traced back to the 1017 // defining instruction, 1018 // * Or, physical registers may be live-in to (only) the entry block, which 1019 // requires a DBG_PHI to be created. 1020 // We can pursue this problem in that order: trace back through copies, 1021 // optionally through a physical register, to a defining instruction. We 1022 // should never move from physreg to vreg. As we're still in SSA form, no need 1023 // to worry about partial definitions of registers. 1024 1025 // Helper lambda to interpret a copy-like instruction. Takes instruction, 1026 // returns the register read and any subregister identifying which part is 1027 // read. 1028 auto GetRegAndSubreg = 1029 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> { 1030 Register NewReg, OldReg; 1031 unsigned SubReg; 1032 if (Cpy.isCopy()) { 1033 OldReg = Cpy.getOperand(0).getReg(); 1034 NewReg = Cpy.getOperand(1).getReg(); 1035 SubReg = Cpy.getOperand(1).getSubReg(); 1036 } else if (Cpy.isSubregToReg()) { 1037 OldReg = Cpy.getOperand(0).getReg(); 1038 NewReg = Cpy.getOperand(2).getReg(); 1039 SubReg = Cpy.getOperand(3).getImm(); 1040 } else { 1041 auto CopyDetails = *TII.isCopyInstr(Cpy); 1042 const MachineOperand &Src = *CopyDetails.Source; 1043 const MachineOperand &Dest = *CopyDetails.Destination; 1044 OldReg = Dest.getReg(); 1045 NewReg = Src.getReg(); 1046 SubReg = Src.getSubReg(); 1047 } 1048 1049 return {NewReg, SubReg}; 1050 }; 1051 1052 // First seek either the defining instruction, or a copy from a physreg. 1053 // During search, the current state is the current copy instruction, and which 1054 // register we've read. Accumulate qualifying subregisters into SubregsSeen; 1055 // deal with those later. 1056 auto State = GetRegAndSubreg(MI); 1057 auto CurInst = MI.getIterator(); 1058 SmallVector<unsigned, 4> SubregsSeen; 1059 while (true) { 1060 // If we've found a copy from a physreg, first portion of search is over. 1061 if (!State.first.isVirtual()) 1062 break; 1063 1064 // Record any subregister qualifier. 1065 if (State.second) 1066 SubregsSeen.push_back(State.second); 1067 1068 assert(MRI.hasOneDef(State.first)); 1069 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent(); 1070 CurInst = Inst.getIterator(); 1071 1072 // Any non-copy instruction is the defining instruction we're seeking. 1073 if (!Inst.isCopyLike() && !TII.isCopyInstr(Inst)) 1074 break; 1075 State = GetRegAndSubreg(Inst); 1076 }; 1077 1078 // Helper lambda to apply additional subregister substitutions to a known 1079 // instruction/operand pair. Adds new (fake) substitutions so that we can 1080 // record the subregister. FIXME: this isn't very space efficient if multiple 1081 // values are tracked back through the same copies; cache something later. 1082 auto ApplySubregisters = 1083 [&](DebugInstrOperandPair P) -> DebugInstrOperandPair { 1084 for (unsigned Subreg : reverse(SubregsSeen)) { 1085 // Fetch a new instruction number, not attached to an actual instruction. 1086 unsigned NewInstrNumber = getNewDebugInstrNum(); 1087 // Add a substitution from the "new" number to the known one, with a 1088 // qualifying subreg. 1089 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg); 1090 // Return the new number; to find the underlying value, consumers need to 1091 // deal with the qualifying subreg. 1092 P = {NewInstrNumber, 0}; 1093 } 1094 return P; 1095 }; 1096 1097 // If we managed to find the defining instruction after COPYs, return an 1098 // instruction / operand pair after adding subregister qualifiers. 1099 if (State.first.isVirtual()) { 1100 // Virtual register def -- we can just look up where this happens. 1101 MachineInstr *Inst = MRI.def_begin(State.first)->getParent(); 1102 for (auto &MO : Inst->all_defs()) { 1103 if (MO.getReg() != State.first) 1104 continue; 1105 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()}); 1106 } 1107 1108 llvm_unreachable("Vreg def with no corresponding operand?"); 1109 } 1110 1111 // Our search ended in a copy from a physreg: walk back up the function 1112 // looking for whatever defines the physreg. 1113 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst)); 1114 State = GetRegAndSubreg(*CurInst); 1115 Register RegToSeek = State.first; 1116 1117 auto RMII = CurInst->getReverseIterator(); 1118 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend()); 1119 for (auto &ToExamine : PrevInstrs) { 1120 for (auto &MO : ToExamine.all_defs()) { 1121 // Test for operand that defines something aliasing RegToSeek. 1122 if (!TRI.regsOverlap(RegToSeek, MO.getReg())) 1123 continue; 1124 1125 return ApplySubregisters( 1126 {ToExamine.getDebugInstrNum(), MO.getOperandNo()}); 1127 } 1128 } 1129 1130 MachineBasicBlock &InsertBB = *CurInst->getParent(); 1131 1132 // We reached the start of the block before finding a defining instruction. 1133 // There are numerous scenarios where this can happen: 1134 // * Constant physical registers, 1135 // * Several intrinsics that allow LLVM-IR to read arbitary registers, 1136 // * Arguments in the entry block, 1137 // * Exception handling landing pads. 1138 // Validating all of them is too difficult, so just insert a DBG_PHI reading 1139 // the variable value at this position, rather than checking it makes sense. 1140 1141 // Create DBG_PHI for specified physreg. 1142 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(), 1143 TII.get(TargetOpcode::DBG_PHI)); 1144 Builder.addReg(State.first); 1145 unsigned NewNum = getNewDebugInstrNum(); 1146 Builder.addImm(NewNum); 1147 return ApplySubregisters({NewNum, 0u}); 1148 } 1149 1150 void MachineFunction::finalizeDebugInstrRefs() { 1151 auto *TII = getSubtarget().getInstrInfo(); 1152 1153 auto MakeUndefDbgValue = [&](MachineInstr &MI) { 1154 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST); 1155 MI.setDesc(RefII); 1156 MI.setDebugValueUndef(); 1157 }; 1158 1159 DenseMap<Register, DebugInstrOperandPair> ArgDbgPHIs; 1160 for (auto &MBB : *this) { 1161 for (auto &MI : MBB) { 1162 if (!MI.isDebugRef()) 1163 continue; 1164 1165 bool IsValidRef = true; 1166 1167 for (MachineOperand &MO : MI.debug_operands()) { 1168 if (!MO.isReg()) 1169 continue; 1170 1171 Register Reg = MO.getReg(); 1172 1173 // Some vregs can be deleted as redundant in the meantime. Mark those 1174 // as DBG_VALUE $noreg. Additionally, some normal instructions are 1175 // quickly deleted, leaving dangling references to vregs with no def. 1176 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) { 1177 IsValidRef = false; 1178 break; 1179 } 1180 1181 assert(Reg.isVirtual()); 1182 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg); 1183 1184 // If we've found a copy-like instruction, follow it back to the 1185 // instruction that defines the source value, see salvageCopySSA docs 1186 // for why this is important. 1187 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) { 1188 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs); 1189 MO.ChangeToDbgInstrRef(Result.first, Result.second); 1190 } else { 1191 // Otherwise, identify the operand number that the VReg refers to. 1192 unsigned OperandIdx = 0; 1193 for (const auto &DefMO : DefMI.operands()) { 1194 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg) 1195 break; 1196 ++OperandIdx; 1197 } 1198 assert(OperandIdx < DefMI.getNumOperands()); 1199 1200 // Morph this instr ref to point at the given instruction and operand. 1201 unsigned ID = DefMI.getDebugInstrNum(); 1202 MO.ChangeToDbgInstrRef(ID, OperandIdx); 1203 } 1204 } 1205 1206 if (!IsValidRef) 1207 MakeUndefDbgValue(MI); 1208 } 1209 } 1210 } 1211 1212 bool MachineFunction::shouldUseDebugInstrRef() const { 1213 // Disable instr-ref at -O0: it's very slow (in compile time). We can still 1214 // have optimized code inlined into this unoptimized code, however with 1215 // fewer and less aggressive optimizations happening, coverage and accuracy 1216 // should not suffer. 1217 if (getTarget().getOptLevel() == CodeGenOptLevel::None) 1218 return false; 1219 1220 // Don't use instr-ref if this function is marked optnone. 1221 if (F.hasFnAttribute(Attribute::OptimizeNone)) 1222 return false; 1223 1224 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple())) 1225 return true; 1226 1227 return false; 1228 } 1229 1230 bool MachineFunction::useDebugInstrRef() const { 1231 return UseDebugInstrRef; 1232 } 1233 1234 void MachineFunction::setUseDebugInstrRef(bool Use) { 1235 UseDebugInstrRef = Use; 1236 } 1237 1238 // Use one million as a high / reserved number. 1239 const unsigned MachineFunction::DebugOperandMemNumber = 1000000; 1240 1241 /// \} 1242 1243 //===----------------------------------------------------------------------===// 1244 // MachineJumpTableInfo implementation 1245 //===----------------------------------------------------------------------===// 1246 1247 /// Return the size of each entry in the jump table. 1248 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const { 1249 // The size of a jump table entry is 4 bytes unless the entry is just the 1250 // address of a block, in which case it is the pointer size. 1251 switch (getEntryKind()) { 1252 case MachineJumpTableInfo::EK_BlockAddress: 1253 return TD.getPointerSize(); 1254 case MachineJumpTableInfo::EK_GPRel64BlockAddress: 1255 case MachineJumpTableInfo::EK_LabelDifference64: 1256 return 8; 1257 case MachineJumpTableInfo::EK_GPRel32BlockAddress: 1258 case MachineJumpTableInfo::EK_LabelDifference32: 1259 case MachineJumpTableInfo::EK_Custom32: 1260 return 4; 1261 case MachineJumpTableInfo::EK_Inline: 1262 return 0; 1263 } 1264 llvm_unreachable("Unknown jump table encoding!"); 1265 } 1266 1267 /// Return the alignment of each entry in the jump table. 1268 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const { 1269 // The alignment of a jump table entry is the alignment of int32 unless the 1270 // entry is just the address of a block, in which case it is the pointer 1271 // alignment. 1272 switch (getEntryKind()) { 1273 case MachineJumpTableInfo::EK_BlockAddress: 1274 return TD.getPointerABIAlignment(0).value(); 1275 case MachineJumpTableInfo::EK_GPRel64BlockAddress: 1276 case MachineJumpTableInfo::EK_LabelDifference64: 1277 return TD.getABIIntegerTypeAlignment(64).value(); 1278 case MachineJumpTableInfo::EK_GPRel32BlockAddress: 1279 case MachineJumpTableInfo::EK_LabelDifference32: 1280 case MachineJumpTableInfo::EK_Custom32: 1281 return TD.getABIIntegerTypeAlignment(32).value(); 1282 case MachineJumpTableInfo::EK_Inline: 1283 return 1; 1284 } 1285 llvm_unreachable("Unknown jump table encoding!"); 1286 } 1287 1288 /// Create a new jump table entry in the jump table info. 1289 unsigned MachineJumpTableInfo::createJumpTableIndex( 1290 const std::vector<MachineBasicBlock*> &DestBBs) { 1291 assert(!DestBBs.empty() && "Cannot create an empty jump table!"); 1292 JumpTables.push_back(MachineJumpTableEntry(DestBBs)); 1293 return JumpTables.size()-1; 1294 } 1295 1296 /// If Old is the target of any jump tables, update the jump tables to branch 1297 /// to New instead. 1298 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old, 1299 MachineBasicBlock *New) { 1300 assert(Old != New && "Not making a change?"); 1301 bool MadeChange = false; 1302 for (size_t i = 0, e = JumpTables.size(); i != e; ++i) 1303 ReplaceMBBInJumpTable(i, Old, New); 1304 return MadeChange; 1305 } 1306 1307 /// If MBB is present in any jump tables, remove it. 1308 bool MachineJumpTableInfo::RemoveMBBFromJumpTables(MachineBasicBlock *MBB) { 1309 bool MadeChange = false; 1310 for (MachineJumpTableEntry &JTE : JumpTables) { 1311 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB); 1312 MadeChange |= (removeBeginItr != JTE.MBBs.end()); 1313 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end()); 1314 } 1315 return MadeChange; 1316 } 1317 1318 /// If Old is a target of the jump tables, update the jump table to branch to 1319 /// New instead. 1320 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx, 1321 MachineBasicBlock *Old, 1322 MachineBasicBlock *New) { 1323 assert(Old != New && "Not making a change?"); 1324 bool MadeChange = false; 1325 MachineJumpTableEntry &JTE = JumpTables[Idx]; 1326 for (MachineBasicBlock *&MBB : JTE.MBBs) 1327 if (MBB == Old) { 1328 MBB = New; 1329 MadeChange = true; 1330 } 1331 return MadeChange; 1332 } 1333 1334 void MachineJumpTableInfo::print(raw_ostream &OS) const { 1335 if (JumpTables.empty()) return; 1336 1337 OS << "Jump Tables:\n"; 1338 1339 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) { 1340 OS << printJumpTableEntryReference(i) << ':'; 1341 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs) 1342 OS << ' ' << printMBBReference(*MBB); 1343 if (i != e) 1344 OS << '\n'; 1345 } 1346 1347 OS << '\n'; 1348 } 1349 1350 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1351 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); } 1352 #endif 1353 1354 Printable llvm::printJumpTableEntryReference(unsigned Idx) { 1355 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; }); 1356 } 1357 1358 //===----------------------------------------------------------------------===// 1359 // MachineConstantPool implementation 1360 //===----------------------------------------------------------------------===// 1361 1362 void MachineConstantPoolValue::anchor() {} 1363 1364 unsigned MachineConstantPoolValue::getSizeInBytes(const DataLayout &DL) const { 1365 return DL.getTypeAllocSize(Ty); 1366 } 1367 1368 unsigned MachineConstantPoolEntry::getSizeInBytes(const DataLayout &DL) const { 1369 if (isMachineConstantPoolEntry()) 1370 return Val.MachineCPVal->getSizeInBytes(DL); 1371 return DL.getTypeAllocSize(Val.ConstVal->getType()); 1372 } 1373 1374 bool MachineConstantPoolEntry::needsRelocation() const { 1375 if (isMachineConstantPoolEntry()) 1376 return true; 1377 return Val.ConstVal->needsDynamicRelocation(); 1378 } 1379 1380 SectionKind 1381 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const { 1382 if (needsRelocation()) 1383 return SectionKind::getReadOnlyWithRel(); 1384 switch (getSizeInBytes(*DL)) { 1385 case 4: 1386 return SectionKind::getMergeableConst4(); 1387 case 8: 1388 return SectionKind::getMergeableConst8(); 1389 case 16: 1390 return SectionKind::getMergeableConst16(); 1391 case 32: 1392 return SectionKind::getMergeableConst32(); 1393 default: 1394 return SectionKind::getReadOnly(); 1395 } 1396 } 1397 1398 MachineConstantPool::~MachineConstantPool() { 1399 // A constant may be a member of both Constants and MachineCPVsSharingEntries, 1400 // so keep track of which we've deleted to avoid double deletions. 1401 DenseSet<MachineConstantPoolValue*> Deleted; 1402 for (const MachineConstantPoolEntry &C : Constants) 1403 if (C.isMachineConstantPoolEntry()) { 1404 Deleted.insert(C.Val.MachineCPVal); 1405 delete C.Val.MachineCPVal; 1406 } 1407 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) { 1408 if (Deleted.count(CPV) == 0) 1409 delete CPV; 1410 } 1411 } 1412 1413 /// Test whether the given two constants can be allocated the same constant pool 1414 /// entry referenced by \param A. 1415 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, 1416 const DataLayout &DL) { 1417 // Handle the trivial case quickly. 1418 if (A == B) return true; 1419 1420 // If they have the same type but weren't the same constant, quickly 1421 // reject them. 1422 if (A->getType() == B->getType()) return false; 1423 1424 // We can't handle structs or arrays. 1425 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) || 1426 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType())) 1427 return false; 1428 1429 // For now, only support constants with the same size. 1430 uint64_t StoreSize = DL.getTypeStoreSize(A->getType()); 1431 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128) 1432 return false; 1433 1434 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement(); 1435 1436 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8); 1437 1438 // Try constant folding a bitcast of both instructions to an integer. If we 1439 // get two identical ConstantInt's, then we are good to share them. We use 1440 // the constant folding APIs to do this so that we get the benefit of 1441 // DataLayout. 1442 if (isa<PointerType>(A->getType())) 1443 A = ConstantFoldCastOperand(Instruction::PtrToInt, 1444 const_cast<Constant *>(A), IntTy, DL); 1445 else if (A->getType() != IntTy) 1446 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A), 1447 IntTy, DL); 1448 if (isa<PointerType>(B->getType())) 1449 B = ConstantFoldCastOperand(Instruction::PtrToInt, 1450 const_cast<Constant *>(B), IntTy, DL); 1451 else if (B->getType() != IntTy) 1452 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B), 1453 IntTy, DL); 1454 1455 if (A != B) 1456 return false; 1457 1458 // Constants only safely match if A doesn't contain undef/poison. 1459 // As we'll be reusing A, it doesn't matter if B contain undef/poison. 1460 // TODO: Handle cases where A and B have the same undef/poison elements. 1461 // TODO: Merge A and B with mismatching undef/poison elements. 1462 return !ContainsUndefOrPoisonA; 1463 } 1464 1465 /// Create a new entry in the constant pool or return an existing one. 1466 /// User must specify the log2 of the minimum required alignment for the object. 1467 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C, 1468 Align Alignment) { 1469 if (Alignment > PoolAlignment) PoolAlignment = Alignment; 1470 1471 // Check to see if we already have this constant. 1472 // 1473 // FIXME, this could be made much more efficient for large constant pools. 1474 for (unsigned i = 0, e = Constants.size(); i != e; ++i) 1475 if (!Constants[i].isMachineConstantPoolEntry() && 1476 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) { 1477 if (Constants[i].getAlign() < Alignment) 1478 Constants[i].Alignment = Alignment; 1479 return i; 1480 } 1481 1482 Constants.push_back(MachineConstantPoolEntry(C, Alignment)); 1483 return Constants.size()-1; 1484 } 1485 1486 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V, 1487 Align Alignment) { 1488 if (Alignment > PoolAlignment) PoolAlignment = Alignment; 1489 1490 // Check to see if we already have this constant. 1491 // 1492 // FIXME, this could be made much more efficient for large constant pools. 1493 int Idx = V->getExistingMachineCPValue(this, Alignment); 1494 if (Idx != -1) { 1495 MachineCPVsSharingEntries.insert(V); 1496 return (unsigned)Idx; 1497 } 1498 1499 Constants.push_back(MachineConstantPoolEntry(V, Alignment)); 1500 return Constants.size()-1; 1501 } 1502 1503 void MachineConstantPool::print(raw_ostream &OS) const { 1504 if (Constants.empty()) return; 1505 1506 OS << "Constant Pool:\n"; 1507 for (unsigned i = 0, e = Constants.size(); i != e; ++i) { 1508 OS << " cp#" << i << ": "; 1509 if (Constants[i].isMachineConstantPoolEntry()) 1510 Constants[i].Val.MachineCPVal->print(OS); 1511 else 1512 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false); 1513 OS << ", align=" << Constants[i].getAlign().value(); 1514 OS << "\n"; 1515 } 1516 } 1517 1518 //===----------------------------------------------------------------------===// 1519 // Template specialization for MachineFunction implementation of 1520 // ProfileSummaryInfo::getEntryCount(). 1521 //===----------------------------------------------------------------------===// 1522 template <> 1523 std::optional<Function::ProfileCount> 1524 ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>( 1525 const llvm::MachineFunction *F) const { 1526 return F->getFunction().getEntryCount(); 1527 } 1528 1529 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1530 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); } 1531 #endif 1532