1 //===-- FunctionLoweringInfo.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements routines for translating functions from LLVM IR into 10 // Machine IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/FunctionLoweringInfo.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 17 #include "llvm/CodeGen/Analysis.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/TargetFrameLowering.h" 23 #include "llvm/CodeGen/TargetInstrInfo.h" 24 #include "llvm/CodeGen/TargetLowering.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/CodeGen/WasmEHFuncInfo.h" 28 #include "llvm/CodeGen/WinEHFuncInfo.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Module.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 using namespace llvm; 40 41 #define DEBUG_TYPE "function-lowering-info" 42 43 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 44 /// PHI nodes or outside of the basic block that defines it, or used by a 45 /// switch or atomic instruction, which may expand to multiple basic blocks. 46 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) { 47 if (I->use_empty()) return false; 48 if (isa<PHINode>(I)) return true; 49 const BasicBlock *BB = I->getParent(); 50 for (const User *U : I->users()) 51 if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U)) 52 return true; 53 54 return false; 55 } 56 57 static ISD::NodeType getPreferredExtendForValue(const Instruction *I) { 58 // For the users of the source value being used for compare instruction, if 59 // the number of signed predicate is greater than unsigned predicate, we 60 // prefer to use SIGN_EXTEND. 61 // 62 // With this optimization, we would be able to reduce some redundant sign or 63 // zero extension instruction, and eventually more machine CSE opportunities 64 // can be exposed. 65 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 66 unsigned NumOfSigned = 0, NumOfUnsigned = 0; 67 for (const User *U : I->users()) { 68 if (const auto *CI = dyn_cast<CmpInst>(U)) { 69 NumOfSigned += CI->isSigned(); 70 NumOfUnsigned += CI->isUnsigned(); 71 } 72 } 73 if (NumOfSigned > NumOfUnsigned) 74 ExtendKind = ISD::SIGN_EXTEND; 75 76 return ExtendKind; 77 } 78 79 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, 80 SelectionDAG *DAG) { 81 Fn = &fn; 82 MF = &mf; 83 TLI = MF->getSubtarget().getTargetLowering(); 84 RegInfo = &MF->getRegInfo(); 85 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 86 DA = DAG->getDivergenceAnalysis(); 87 88 // Check whether the function can return without sret-demotion. 89 SmallVector<ISD::OutputArg, 4> Outs; 90 CallingConv::ID CC = Fn->getCallingConv(); 91 92 GetReturnInfo(CC, Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI, 93 mf.getDataLayout()); 94 CanLowerReturn = 95 TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext()); 96 97 // If this personality uses funclets, we need to do a bit more work. 98 DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects; 99 EHPersonality Personality = classifyEHPersonality( 100 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr); 101 if (isFuncletEHPersonality(Personality)) { 102 // Calculate state numbers if we haven't already. 103 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo(); 104 if (Personality == EHPersonality::MSVC_CXX) 105 calculateWinCXXEHStateNumbers(&fn, EHInfo); 106 else if (isAsynchronousEHPersonality(Personality)) 107 calculateSEHStateNumbers(&fn, EHInfo); 108 else if (Personality == EHPersonality::CoreCLR) 109 calculateClrEHStateNumbers(&fn, EHInfo); 110 111 // Map all BB references in the WinEH data to MBBs. 112 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 113 for (WinEHHandlerType &H : TBME.HandlerArray) { 114 if (const AllocaInst *AI = H.CatchObj.Alloca) 115 CatchObjects.insert({AI, {}}).first->second.push_back( 116 &H.CatchObj.FrameIndex); 117 else 118 H.CatchObj.FrameIndex = INT_MAX; 119 } 120 } 121 } 122 123 // Initialize the mapping of values to registers. This is only set up for 124 // instruction values that are used outside of the block that defines 125 // them. 126 const Align StackAlign = TFI->getStackAlign(); 127 for (const BasicBlock &BB : *Fn) { 128 for (const Instruction &I : BB) { 129 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 130 Type *Ty = AI->getAllocatedType(); 131 Align TyPrefAlign = MF->getDataLayout().getPrefTypeAlign(Ty); 132 // The "specified" alignment is the alignment written on the alloca, 133 // or the preferred alignment of the type if none is specified. 134 // 135 // (Unspecified alignment on allocas will be going away soon.) 136 Align SpecifiedAlign = AI->getAlign(); 137 138 // If the preferred alignment of the type is higher than the specified 139 // alignment of the alloca, promote the alignment, as long as it doesn't 140 // require realigning the stack. 141 // 142 // FIXME: Do we really want to second-guess the IR in isel? 143 Align Alignment = 144 std::max(std::min(TyPrefAlign, StackAlign), SpecifiedAlign); 145 146 // Static allocas can be folded into the initial stack frame 147 // adjustment. For targets that don't realign the stack, don't 148 // do this if there is an extra alignment requirement. 149 if (AI->isStaticAlloca() && 150 (TFI->isStackRealignable() || (Alignment <= StackAlign))) { 151 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize()); 152 uint64_t TySize = 153 MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinValue(); 154 155 TySize *= CUI->getZExtValue(); // Get total allocated size. 156 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 157 int FrameIndex = INT_MAX; 158 auto Iter = CatchObjects.find(AI); 159 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) { 160 FrameIndex = MF->getFrameInfo().CreateFixedObject( 161 TySize, 0, /*IsImmutable=*/false, /*isAliased=*/true); 162 MF->getFrameInfo().setObjectAlignment(FrameIndex, Alignment); 163 } else { 164 FrameIndex = MF->getFrameInfo().CreateStackObject(TySize, Alignment, 165 false, AI); 166 } 167 168 // Scalable vectors may need a special StackID to distinguish 169 // them from other (fixed size) stack objects. 170 if (isa<ScalableVectorType>(Ty)) 171 MF->getFrameInfo().setStackID(FrameIndex, 172 TFI->getStackIDForScalableVectors()); 173 174 StaticAllocaMap[AI] = FrameIndex; 175 // Update the catch handler information. 176 if (Iter != CatchObjects.end()) { 177 for (int *CatchObjPtr : Iter->second) 178 *CatchObjPtr = FrameIndex; 179 } 180 } else { 181 // FIXME: Overaligned static allocas should be grouped into 182 // a single dynamic allocation instead of using a separate 183 // stack allocation for each one. 184 // Inform the Frame Information that we have variable-sized objects. 185 MF->getFrameInfo().CreateVariableSizedObject( 186 Alignment <= StackAlign ? Align(1) : Alignment, AI); 187 } 188 } else if (auto *Call = dyn_cast<CallBase>(&I)) { 189 // Look for inline asm that clobbers the SP register. 190 if (Call->isInlineAsm()) { 191 Register SP = TLI->getStackPointerRegisterToSaveRestore(); 192 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 193 std::vector<TargetLowering::AsmOperandInfo> Ops = 194 TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, 195 *Call); 196 for (TargetLowering::AsmOperandInfo &Op : Ops) { 197 if (Op.Type == InlineAsm::isClobber) { 198 // Clobbers don't have SDValue operands, hence SDValue(). 199 TLI->ComputeConstraintToUse(Op, SDValue(), DAG); 200 std::pair<unsigned, const TargetRegisterClass *> PhysReg = 201 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode, 202 Op.ConstraintVT); 203 if (PhysReg.first == SP) 204 MF->getFrameInfo().setHasOpaqueSPAdjustment(true); 205 } 206 } 207 } 208 // Look for calls to the @llvm.va_start intrinsic. We can omit some 209 // prologue boilerplate for variadic functions that don't examine their 210 // arguments. 211 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) { 212 if (II->getIntrinsicID() == Intrinsic::vastart) 213 MF->getFrameInfo().setHasVAStart(true); 214 } 215 216 // If we have a musttail call in a variadic function, we need to ensure 217 // we forward implicit register parameters. 218 if (const auto *CI = dyn_cast<CallInst>(&I)) { 219 if (CI->isMustTailCall() && Fn->isVarArg()) 220 MF->getFrameInfo().setHasMustTailInVarArgFunc(true); 221 } 222 } 223 224 // Mark values used outside their block as exported, by allocating 225 // a virtual register for them. 226 if (isUsedOutsideOfDefiningBlock(&I)) 227 if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I))) 228 InitializeRegForValue(&I); 229 230 // Decide the preferred extend type for a value. 231 PreferredExtendType[&I] = getPreferredExtendForValue(&I); 232 } 233 } 234 235 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 236 // also creates the initial PHI MachineInstrs, though none of the input 237 // operands are populated. 238 for (const BasicBlock &BB : *Fn) { 239 // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks 240 // are really data, and no instructions can live here. 241 if (BB.isEHPad()) { 242 const Instruction *PadInst = BB.getFirstNonPHI(); 243 // If this is a non-landingpad EH pad, mark this function as using 244 // funclets. 245 // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid 246 // setting this in such cases in order to improve frame layout. 247 if (!isa<LandingPadInst>(PadInst)) { 248 MF->setHasEHScopes(true); 249 MF->setHasEHFunclets(true); 250 MF->getFrameInfo().setHasOpaqueSPAdjustment(true); 251 } 252 if (isa<CatchSwitchInst>(PadInst)) { 253 assert(&*BB.begin() == PadInst && 254 "WinEHPrepare failed to remove PHIs from imaginary BBs"); 255 continue; 256 } 257 if (isa<FuncletPadInst>(PadInst)) 258 assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs"); 259 } 260 261 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB); 262 MBBMap[&BB] = MBB; 263 MF->push_back(MBB); 264 265 // Transfer the address-taken flag. This is necessary because there could 266 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only 267 // the first one should be marked. 268 if (BB.hasAddressTaken()) 269 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB)); 270 271 // Mark landing pad blocks. 272 if (BB.isEHPad()) 273 MBB->setIsEHPad(); 274 275 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 276 // appropriate. 277 for (const PHINode &PN : BB.phis()) { 278 if (PN.use_empty()) 279 continue; 280 281 // Skip empty types 282 if (PN.getType()->isEmptyTy()) 283 continue; 284 285 DebugLoc DL = PN.getDebugLoc(); 286 unsigned PHIReg = ValueMap[&PN]; 287 assert(PHIReg && "PHI node does not have an assigned virtual register!"); 288 289 SmallVector<EVT, 4> ValueVTs; 290 ComputeValueVTs(*TLI, MF->getDataLayout(), PN.getType(), ValueVTs); 291 for (EVT VT : ValueVTs) { 292 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT); 293 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 294 for (unsigned i = 0; i != NumRegisters; ++i) 295 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i); 296 PHIReg += NumRegisters; 297 } 298 } 299 } 300 301 if (isFuncletEHPersonality(Personality)) { 302 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo(); 303 304 // Map all BB references in the WinEH data to MBBs. 305 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 306 for (WinEHHandlerType &H : TBME.HandlerArray) { 307 if (H.Handler) 308 H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()]; 309 } 310 } 311 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap) 312 if (UME.Cleanup) 313 UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()]; 314 for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) { 315 const auto *BB = UME.Handler.get<const BasicBlock *>(); 316 UME.Handler = MBBMap[BB]; 317 } 318 for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) { 319 const auto *BB = CME.Handler.get<const BasicBlock *>(); 320 CME.Handler = MBBMap[BB]; 321 } 322 } else if (Personality == EHPersonality::Wasm_CXX) { 323 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo(); 324 calculateWasmEHInfo(&fn, EHInfo); 325 326 // Map all BB references in the Wasm EH data to MBBs. 327 DenseMap<BBOrMBB, BBOrMBB> SrcToUnwindDest; 328 for (auto &KV : EHInfo.SrcToUnwindDest) { 329 const auto *Src = KV.first.get<const BasicBlock *>(); 330 const auto *Dest = KV.second.get<const BasicBlock *>(); 331 SrcToUnwindDest[MBBMap[Src]] = MBBMap[Dest]; 332 } 333 EHInfo.SrcToUnwindDest = std::move(SrcToUnwindDest); 334 DenseMap<BBOrMBB, SmallPtrSet<BBOrMBB, 4>> UnwindDestToSrcs; 335 for (auto &KV : EHInfo.UnwindDestToSrcs) { 336 const auto *Dest = KV.first.get<const BasicBlock *>(); 337 UnwindDestToSrcs[MBBMap[Dest]] = SmallPtrSet<BBOrMBB, 4>(); 338 for (const auto P : KV.second) 339 UnwindDestToSrcs[MBBMap[Dest]].insert( 340 MBBMap[P.get<const BasicBlock *>()]); 341 } 342 EHInfo.UnwindDestToSrcs = std::move(UnwindDestToSrcs); 343 } 344 } 345 346 /// clear - Clear out all the function-specific state. This returns this 347 /// FunctionLoweringInfo to an empty state, ready to be used for a 348 /// different function. 349 void FunctionLoweringInfo::clear() { 350 MBBMap.clear(); 351 ValueMap.clear(); 352 VirtReg2Value.clear(); 353 StaticAllocaMap.clear(); 354 LiveOutRegInfo.clear(); 355 VisitedBBs.clear(); 356 ArgDbgValues.clear(); 357 DescribedArgs.clear(); 358 ByValArgFrameIndexMap.clear(); 359 RegFixups.clear(); 360 RegsWithFixups.clear(); 361 StatepointStackSlots.clear(); 362 StatepointRelocationMaps.clear(); 363 PreferredExtendType.clear(); 364 } 365 366 /// CreateReg - Allocate a single virtual register for the given type. 367 Register FunctionLoweringInfo::CreateReg(MVT VT, bool isDivergent) { 368 return RegInfo->createVirtualRegister(TLI->getRegClassFor(VT, isDivergent)); 369 } 370 371 /// CreateRegs - Allocate the appropriate number of virtual registers of 372 /// the correctly promoted or expanded types. Assign these registers 373 /// consecutive vreg numbers and return the first assigned number. 374 /// 375 /// In the case that the given value has struct or array type, this function 376 /// will assign registers for each member or element. 377 /// 378 Register FunctionLoweringInfo::CreateRegs(Type *Ty, bool isDivergent) { 379 SmallVector<EVT, 4> ValueVTs; 380 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs); 381 382 Register FirstReg; 383 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { 384 EVT ValueVT = ValueVTs[Value]; 385 MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT); 386 387 unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT); 388 for (unsigned i = 0; i != NumRegs; ++i) { 389 Register R = CreateReg(RegisterVT, isDivergent); 390 if (!FirstReg) FirstReg = R; 391 } 392 } 393 return FirstReg; 394 } 395 396 Register FunctionLoweringInfo::CreateRegs(const Value *V) { 397 return CreateRegs(V->getType(), DA && DA->isDivergent(V) && 398 !TLI->requiresUniformRegister(*MF, V)); 399 } 400 401 /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the 402 /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If 403 /// the register's LiveOutInfo is for a smaller bit width, it is extended to 404 /// the larger bit width by zero extension. The bit width must be no smaller 405 /// than the LiveOutInfo's existing bit width. 406 const FunctionLoweringInfo::LiveOutInfo * 407 FunctionLoweringInfo::GetLiveOutRegInfo(Register Reg, unsigned BitWidth) { 408 if (!LiveOutRegInfo.inBounds(Reg)) 409 return nullptr; 410 411 LiveOutInfo *LOI = &LiveOutRegInfo[Reg]; 412 if (!LOI->IsValid) 413 return nullptr; 414 415 if (BitWidth > LOI->Known.getBitWidth()) { 416 LOI->NumSignBits = 1; 417 LOI->Known = LOI->Known.anyext(BitWidth); 418 } 419 420 return LOI; 421 } 422 423 /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination 424 /// register based on the LiveOutInfo of its operands. 425 void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) { 426 Type *Ty = PN->getType(); 427 if (!Ty->isIntegerTy() || Ty->isVectorTy()) 428 return; 429 430 SmallVector<EVT, 1> ValueVTs; 431 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs); 432 assert(ValueVTs.size() == 1 && 433 "PHIs with non-vector integer types should have a single VT."); 434 EVT IntVT = ValueVTs[0]; 435 436 if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1) 437 return; 438 IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT); 439 unsigned BitWidth = IntVT.getSizeInBits(); 440 441 auto It = ValueMap.find(PN); 442 if (It == ValueMap.end()) 443 return; 444 445 Register DestReg = It->second; 446 if (DestReg == 0) 447 return; 448 assert(DestReg.isVirtual() && "Expected a virtual reg"); 449 LiveOutRegInfo.grow(DestReg); 450 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg]; 451 452 Value *V = PN->getIncomingValue(0); 453 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 454 DestLOI.NumSignBits = 1; 455 DestLOI.Known = KnownBits(BitWidth); 456 return; 457 } 458 459 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 460 APInt Val; 461 if (TLI->signExtendConstant(CI)) 462 Val = CI->getValue().sext(BitWidth); 463 else 464 Val = CI->getValue().zext(BitWidth); 465 DestLOI.NumSignBits = Val.getNumSignBits(); 466 DestLOI.Known = KnownBits::makeConstant(Val); 467 } else { 468 assert(ValueMap.count(V) && "V should have been placed in ValueMap when its" 469 "CopyToReg node was created."); 470 Register SrcReg = ValueMap[V]; 471 if (!SrcReg.isVirtual()) { 472 DestLOI.IsValid = false; 473 return; 474 } 475 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 476 if (!SrcLOI) { 477 DestLOI.IsValid = false; 478 return; 479 } 480 DestLOI = *SrcLOI; 481 } 482 483 assert(DestLOI.Known.Zero.getBitWidth() == BitWidth && 484 DestLOI.Known.One.getBitWidth() == BitWidth && 485 "Masks should have the same bit width as the type."); 486 487 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 488 Value *V = PN->getIncomingValue(i); 489 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 490 DestLOI.NumSignBits = 1; 491 DestLOI.Known = KnownBits(BitWidth); 492 return; 493 } 494 495 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 496 APInt Val; 497 if (TLI->signExtendConstant(CI)) 498 Val = CI->getValue().sext(BitWidth); 499 else 500 Val = CI->getValue().zext(BitWidth); 501 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits()); 502 DestLOI.Known.Zero &= ~Val; 503 DestLOI.Known.One &= Val; 504 continue; 505 } 506 507 assert(ValueMap.count(V) && "V should have been placed in ValueMap when " 508 "its CopyToReg node was created."); 509 Register SrcReg = ValueMap[V]; 510 if (!SrcReg.isVirtual()) { 511 DestLOI.IsValid = false; 512 return; 513 } 514 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 515 if (!SrcLOI) { 516 DestLOI.IsValid = false; 517 return; 518 } 519 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits); 520 DestLOI.Known = KnownBits::commonBits(DestLOI.Known, SrcLOI->Known); 521 } 522 } 523 524 /// setArgumentFrameIndex - Record frame index for the byval 525 /// argument. This overrides previous frame index entry for this argument, 526 /// if any. 527 void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A, 528 int FI) { 529 ByValArgFrameIndexMap[A] = FI; 530 } 531 532 /// getArgumentFrameIndex - Get frame index for the byval argument. 533 /// If the argument does not have any assigned frame index then 0 is 534 /// returned. 535 int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) { 536 auto I = ByValArgFrameIndexMap.find(A); 537 if (I != ByValArgFrameIndexMap.end()) 538 return I->second; 539 LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n"); 540 return INT_MAX; 541 } 542 543 Register FunctionLoweringInfo::getCatchPadExceptionPointerVReg( 544 const Value *CPI, const TargetRegisterClass *RC) { 545 MachineRegisterInfo &MRI = MF->getRegInfo(); 546 auto I = CatchPadExceptionPointers.insert({CPI, 0}); 547 Register &VReg = I.first->second; 548 if (I.second) 549 VReg = MRI.createVirtualRegister(RC); 550 assert(VReg && "null vreg in exception pointer table!"); 551 return VReg; 552 } 553 554 const Value * 555 FunctionLoweringInfo::getValueFromVirtualReg(Register Vreg) { 556 if (VirtReg2Value.empty()) { 557 SmallVector<EVT, 4> ValueVTs; 558 for (auto &P : ValueMap) { 559 ValueVTs.clear(); 560 ComputeValueVTs(*TLI, Fn->getParent()->getDataLayout(), 561 P.first->getType(), ValueVTs); 562 unsigned Reg = P.second; 563 for (EVT VT : ValueVTs) { 564 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT); 565 for (unsigned i = 0, e = NumRegisters; i != e; ++i) 566 VirtReg2Value[Reg++] = P.first; 567 } 568 } 569 } 570 return VirtReg2Value.lookup(Vreg); 571 } 572