1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPULegalizerInfo.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/FunctionLoweringInfo.h" 23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/IR/IntrinsicsAMDGPU.h" 26 27 #define DEBUG_TYPE "amdgpu-call-lowering" 28 29 using namespace llvm; 30 31 namespace { 32 33 /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. 34 static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, 35 Register ValVReg, CCValAssign &VA) { 36 if (VA.getLocVT().getSizeInBits() < 32) { 37 // 16-bit types are reported as legal for 32-bit registers. We need to 38 // extend and do a 32-bit copy to avoid the verifier complaining about it. 39 return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 40 } 41 42 return Handler.extendRegister(ValVReg, VA); 43 } 44 45 struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { 46 AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 47 MachineInstrBuilder MIB) 48 : OutgoingValueHandler(B, MRI), MIB(MIB) {} 49 50 MachineInstrBuilder MIB; 51 52 Register getStackAddress(uint64_t Size, int64_t Offset, 53 MachinePointerInfo &MPO, 54 ISD::ArgFlagsTy Flags) override { 55 llvm_unreachable("not implemented"); 56 } 57 58 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 59 MachinePointerInfo &MPO, CCValAssign &VA) override { 60 llvm_unreachable("not implemented"); 61 } 62 63 void assignValueToReg(Register ValVReg, Register PhysReg, 64 CCValAssign VA) override { 65 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 66 67 // If this is a scalar return, insert a readfirstlane just in case the value 68 // ends up in a VGPR. 69 // FIXME: Assert this is a shader return. 70 const SIRegisterInfo *TRI 71 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 72 if (TRI->isSGPRReg(MRI, PhysReg)) { 73 LLT Ty = MRI.getType(ExtReg); 74 LLT S32 = LLT::scalar(32); 75 if (Ty != S32) { 76 // FIXME: We should probably support readfirstlane intrinsics with all 77 // legal 32-bit types. 78 assert(Ty.getSizeInBits() == 32); 79 if (Ty.isPointer()) 80 ExtReg = MIRBuilder.buildPtrToInt(S32, ExtReg).getReg(0); 81 else 82 ExtReg = MIRBuilder.buildBitcast(S32, ExtReg).getReg(0); 83 } 84 85 auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 86 {MRI.getType(ExtReg)}, false) 87 .addReg(ExtReg); 88 ExtReg = ToSGPR.getReg(0); 89 } 90 91 MIRBuilder.buildCopy(PhysReg, ExtReg); 92 MIB.addUse(PhysReg, RegState::Implicit); 93 } 94 }; 95 96 struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { 97 uint64_t StackUsed = 0; 98 99 AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 100 : IncomingValueHandler(B, MRI) {} 101 102 Register getStackAddress(uint64_t Size, int64_t Offset, 103 MachinePointerInfo &MPO, 104 ISD::ArgFlagsTy Flags) override { 105 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 106 107 // Byval is assumed to be writable memory, but other stack passed arguments 108 // are not. 109 const bool IsImmutable = !Flags.isByVal(); 110 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); 111 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 112 auto AddrReg = MIRBuilder.buildFrameIndex( 113 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 114 StackUsed = std::max(StackUsed, Size + Offset); 115 return AddrReg.getReg(0); 116 } 117 118 void assignValueToReg(Register ValVReg, Register PhysReg, 119 CCValAssign VA) override { 120 markPhysRegUsed(PhysReg); 121 122 if (VA.getLocVT().getSizeInBits() < 32) { 123 // 16-bit types are reported as legal for 32-bit registers. We need to do 124 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 125 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 126 127 // If we have signext/zeroext, it applies to the whole 32-bit register 128 // before truncation. 129 auto Extended = 130 buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); 131 MIRBuilder.buildTrunc(ValVReg, Extended); 132 return; 133 } 134 135 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 136 } 137 138 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 139 MachinePointerInfo &MPO, CCValAssign &VA) override { 140 MachineFunction &MF = MIRBuilder.getMF(); 141 142 auto MMO = MF.getMachineMemOperand( 143 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, 144 inferAlignFromPtrInfo(MF, MPO)); 145 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 146 } 147 148 /// How the physical register gets marked varies between formal 149 /// parameters (it's a basic-block live-in), and a call instruction 150 /// (it's an implicit-def of the BL). 151 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 152 }; 153 154 struct FormalArgHandler : public AMDGPUIncomingArgHandler { 155 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 156 : AMDGPUIncomingArgHandler(B, MRI) {} 157 158 void markPhysRegUsed(unsigned PhysReg) override { 159 MIRBuilder.getMBB().addLiveIn(PhysReg); 160 } 161 }; 162 163 struct CallReturnHandler : public AMDGPUIncomingArgHandler { 164 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 165 MachineInstrBuilder MIB) 166 : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} 167 168 void markPhysRegUsed(unsigned PhysReg) override { 169 MIB.addDef(PhysReg, RegState::Implicit); 170 } 171 172 MachineInstrBuilder MIB; 173 }; 174 175 struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { 176 /// For tail calls, the byte offset of the call's argument area from the 177 /// callee's. Unused elsewhere. 178 int FPDiff; 179 180 // Cache the SP register vreg if we need it more than once in this call site. 181 Register SPReg; 182 183 bool IsTailCall; 184 185 AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, 186 MachineRegisterInfo &MRI, MachineInstrBuilder MIB, 187 bool IsTailCall = false, int FPDiff = 0) 188 : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), 189 IsTailCall(IsTailCall) {} 190 191 Register getStackAddress(uint64_t Size, int64_t Offset, 192 MachinePointerInfo &MPO, 193 ISD::ArgFlagsTy Flags) override { 194 MachineFunction &MF = MIRBuilder.getMF(); 195 const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); 196 const LLT S32 = LLT::scalar(32); 197 198 if (IsTailCall) { 199 Offset += FPDiff; 200 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); 201 auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); 202 MPO = MachinePointerInfo::getFixedStack(MF, FI); 203 return FIReg.getReg(0); 204 } 205 206 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 207 208 if (!SPReg) { 209 const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>(); 210 if (ST.enableFlatScratch()) { 211 // The stack is accessed unswizzled, so we can use a regular copy. 212 SPReg = MIRBuilder.buildCopy(PtrTy, 213 MFI->getStackPtrOffsetReg()).getReg(0); 214 } else { 215 // The address we produce here, without knowing the use context, is going 216 // to be interpreted as a vector address, so we need to convert to a 217 // swizzled address. 218 SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy}, 219 {MFI->getStackPtrOffsetReg()}).getReg(0); 220 } 221 } 222 223 auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); 224 225 auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); 226 MPO = MachinePointerInfo::getStack(MF, Offset); 227 return AddrReg.getReg(0); 228 } 229 230 void assignValueToReg(Register ValVReg, Register PhysReg, 231 CCValAssign VA) override { 232 MIB.addUse(PhysReg, RegState::Implicit); 233 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 234 MIRBuilder.buildCopy(PhysReg, ExtReg); 235 } 236 237 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 238 MachinePointerInfo &MPO, CCValAssign &VA) override { 239 MachineFunction &MF = MIRBuilder.getMF(); 240 uint64_t LocMemOffset = VA.getLocMemOffset(); 241 const auto &ST = MF.getSubtarget<GCNSubtarget>(); 242 243 auto MMO = MF.getMachineMemOperand( 244 MPO, MachineMemOperand::MOStore, MemTy, 245 commonAlignment(ST.getStackAlignment(), LocMemOffset)); 246 MIRBuilder.buildStore(ValVReg, Addr, *MMO); 247 } 248 249 void assignValueToAddress(const CallLowering::ArgInfo &Arg, 250 unsigned ValRegIndex, Register Addr, LLT MemTy, 251 MachinePointerInfo &MPO, CCValAssign &VA) override { 252 Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt 253 ? extendRegister(Arg.Regs[ValRegIndex], VA) 254 : Arg.Regs[ValRegIndex]; 255 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); 256 } 257 }; 258 } 259 260 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 261 : CallLowering(&TLI) { 262 } 263 264 // FIXME: Compatibility shim 265 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 266 switch (MIOpc) { 267 case TargetOpcode::G_SEXT: 268 return ISD::SIGN_EXTEND; 269 case TargetOpcode::G_ZEXT: 270 return ISD::ZERO_EXTEND; 271 case TargetOpcode::G_ANYEXT: 272 return ISD::ANY_EXTEND; 273 default: 274 llvm_unreachable("not an extend opcode"); 275 } 276 } 277 278 bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, 279 CallingConv::ID CallConv, 280 SmallVectorImpl<BaseArgInfo> &Outs, 281 bool IsVarArg) const { 282 // For shaders. Vector types should be explicitly handled by CC. 283 if (AMDGPU::isEntryFunctionCC(CallConv)) 284 return true; 285 286 SmallVector<CCValAssign, 16> ArgLocs; 287 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 288 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, 289 MF.getFunction().getContext()); 290 291 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); 292 } 293 294 /// Lower the return value for the already existing \p Ret. This assumes that 295 /// \p B's insertion point is correct. 296 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 297 const Value *Val, ArrayRef<Register> VRegs, 298 MachineInstrBuilder &Ret) const { 299 if (!Val) 300 return true; 301 302 auto &MF = B.getMF(); 303 const auto &F = MF.getFunction(); 304 const DataLayout &DL = MF.getDataLayout(); 305 MachineRegisterInfo *MRI = B.getMRI(); 306 LLVMContext &Ctx = F.getContext(); 307 308 CallingConv::ID CC = F.getCallingConv(); 309 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 310 311 SmallVector<EVT, 8> SplitEVTs; 312 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 313 assert(VRegs.size() == SplitEVTs.size() && 314 "For each split Type there should be exactly one VReg."); 315 316 SmallVector<ArgInfo, 8> SplitRetInfos; 317 318 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 319 EVT VT = SplitEVTs[i]; 320 Register Reg = VRegs[i]; 321 ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); 322 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 323 324 if (VT.isScalarInteger()) { 325 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 326 if (RetInfo.Flags[0].isSExt()) { 327 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 328 ExtendOp = TargetOpcode::G_SEXT; 329 } else if (RetInfo.Flags[0].isZExt()) { 330 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 331 ExtendOp = TargetOpcode::G_ZEXT; 332 } 333 334 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 335 extOpcodeToISDExtOpcode(ExtendOp)); 336 if (ExtVT != VT) { 337 RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); 338 LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); 339 Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); 340 } 341 } 342 343 if (Reg != RetInfo.Regs[0]) { 344 RetInfo.Regs[0] = Reg; 345 // Reset the arg flags after modifying Reg. 346 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 347 } 348 349 splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); 350 } 351 352 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 353 354 OutgoingValueAssigner Assigner(AssignFn); 355 AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); 356 return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, 357 CC, F.isVarArg()); 358 } 359 360 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, 361 ArrayRef<Register> VRegs, 362 FunctionLoweringInfo &FLI) const { 363 364 MachineFunction &MF = B.getMF(); 365 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 366 MFI->setIfReturnsVoid(!Val); 367 368 assert(!Val == VRegs.empty() && "Return value without a vreg"); 369 370 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 371 const bool IsShader = AMDGPU::isShader(CC); 372 const bool IsWaveEnd = 373 (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); 374 if (IsWaveEnd) { 375 B.buildInstr(AMDGPU::S_ENDPGM) 376 .addImm(0); 377 return true; 378 } 379 380 unsigned ReturnOpc = 381 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::SI_RETURN; 382 auto Ret = B.buildInstrNoInsert(ReturnOpc); 383 384 if (!FLI.CanLowerReturn) 385 insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); 386 else if (!lowerReturnVal(B, Val, VRegs, Ret)) 387 return false; 388 389 // TODO: Handle CalleeSavedRegsViaCopy. 390 391 B.insertInstr(Ret); 392 return true; 393 } 394 395 void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, 396 uint64_t Offset) const { 397 MachineFunction &MF = B.getMF(); 398 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 399 MachineRegisterInfo &MRI = MF.getRegInfo(); 400 Register KernArgSegmentPtr = 401 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 402 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 403 404 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 405 406 B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); 407 } 408 409 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, 410 uint64_t Offset, 411 Align Alignment) const { 412 MachineFunction &MF = B.getMF(); 413 const Function &F = MF.getFunction(); 414 const DataLayout &DL = F.getParent()->getDataLayout(); 415 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 416 417 LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 418 419 SmallVector<ArgInfo, 32> SplitArgs; 420 SmallVector<uint64_t> FieldOffsets; 421 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); 422 423 unsigned Idx = 0; 424 for (ArgInfo &SplitArg : SplitArgs) { 425 Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); 426 lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); 427 428 LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); 429 if (SplitArg.Flags[0].isPointer()) { 430 // Compensate for losing pointeriness in splitValueTypes. 431 LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), 432 ArgTy.getScalarSizeInBits()); 433 ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) 434 : PtrTy; 435 } 436 437 MachineMemOperand *MMO = MF.getMachineMemOperand( 438 PtrInfo, 439 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 440 MachineMemOperand::MOInvariant, 441 ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); 442 443 assert(SplitArg.Regs.size() == 1); 444 445 B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); 446 ++Idx; 447 } 448 } 449 450 // Allocate special inputs passed in user SGPRs. 451 static void allocateHSAUserSGPRs(CCState &CCInfo, 452 MachineIRBuilder &B, 453 MachineFunction &MF, 454 const SIRegisterInfo &TRI, 455 SIMachineFunctionInfo &Info) { 456 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 457 if (Info.hasPrivateSegmentBuffer()) { 458 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 459 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 460 CCInfo.AllocateReg(PrivateSegmentBufferReg); 461 } 462 463 if (Info.hasDispatchPtr()) { 464 Register DispatchPtrReg = Info.addDispatchPtr(TRI); 465 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 466 CCInfo.AllocateReg(DispatchPtrReg); 467 } 468 469 if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) { 470 Register QueuePtrReg = Info.addQueuePtr(TRI); 471 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 472 CCInfo.AllocateReg(QueuePtrReg); 473 } 474 475 if (Info.hasKernargSegmentPtr()) { 476 MachineRegisterInfo &MRI = MF.getRegInfo(); 477 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 478 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 479 Register VReg = MRI.createGenericVirtualRegister(P4); 480 MRI.addLiveIn(InputPtrReg, VReg); 481 B.getMBB().addLiveIn(InputPtrReg); 482 B.buildCopy(VReg, InputPtrReg); 483 CCInfo.AllocateReg(InputPtrReg); 484 } 485 486 if (Info.hasDispatchID()) { 487 Register DispatchIDReg = Info.addDispatchID(TRI); 488 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 489 CCInfo.AllocateReg(DispatchIDReg); 490 } 491 492 if (Info.hasFlatScratchInit()) { 493 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 494 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 495 CCInfo.AllocateReg(FlatScratchInitReg); 496 } 497 498 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 499 // these from the dispatch pointer. 500 } 501 502 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 503 MachineIRBuilder &B, const Function &F, 504 ArrayRef<ArrayRef<Register>> VRegs) const { 505 MachineFunction &MF = B.getMF(); 506 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 507 MachineRegisterInfo &MRI = MF.getRegInfo(); 508 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 509 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 510 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 511 const DataLayout &DL = F.getParent()->getDataLayout(); 512 513 Info->allocateKnownAddressLDSGlobal(F); 514 515 SmallVector<CCValAssign, 16> ArgLocs; 516 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 517 518 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 519 520 unsigned i = 0; 521 const Align KernArgBaseAlign(16); 522 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 523 uint64_t ExplicitArgOffset = 0; 524 525 // TODO: Align down to dword alignment and extract bits for extending loads. 526 for (auto &Arg : F.args()) { 527 const bool IsByRef = Arg.hasByRefAttr(); 528 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); 529 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 530 if (AllocSize == 0) 531 continue; 532 533 MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt; 534 Align ABIAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy); 535 536 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 537 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 538 539 if (Arg.use_empty()) { 540 ++i; 541 continue; 542 } 543 544 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); 545 546 if (IsByRef) { 547 unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); 548 549 assert(VRegs[i].size() == 1 && 550 "expected only one register for byval pointers"); 551 if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { 552 lowerParameterPtr(VRegs[i][0], B, ArgOffset); 553 } else { 554 const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 555 Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); 556 lowerParameterPtr(PtrReg, B, ArgOffset); 557 558 B.buildAddrSpaceCast(VRegs[i][0], PtrReg); 559 } 560 } else { 561 ArgInfo OrigArg(VRegs[i], Arg, i); 562 const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; 563 setArgFlags(OrigArg, OrigArgIdx, DL, F); 564 lowerParameter(B, OrigArg, ArgOffset, Alignment); 565 } 566 567 ++i; 568 } 569 570 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 571 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 572 return true; 573 } 574 575 bool AMDGPUCallLowering::lowerFormalArguments( 576 MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, 577 FunctionLoweringInfo &FLI) const { 578 CallingConv::ID CC = F.getCallingConv(); 579 580 // The infrastructure for normal calling convention lowering is essentially 581 // useless for kernels. We want to avoid any kind of legalization or argument 582 // splitting. 583 if (CC == CallingConv::AMDGPU_KERNEL) 584 return lowerFormalArgumentsKernel(B, F, VRegs); 585 586 const bool IsGraphics = AMDGPU::isGraphics(CC); 587 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 588 589 MachineFunction &MF = B.getMF(); 590 MachineBasicBlock &MBB = B.getMBB(); 591 MachineRegisterInfo &MRI = MF.getRegInfo(); 592 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 593 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 594 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 595 const DataLayout &DL = F.getParent()->getDataLayout(); 596 597 Info->allocateKnownAddressLDSGlobal(F); 598 599 SmallVector<CCValAssign, 16> ArgLocs; 600 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 601 602 if (Info->hasImplicitBufferPtr()) { 603 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 604 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 605 CCInfo.AllocateReg(ImplicitBufferPtrReg); 606 } 607 608 // FIXME: This probably isn't defined for mesa 609 if (Info->hasFlatScratchInit() && !Subtarget.isAmdPalOS()) { 610 Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 611 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 612 CCInfo.AllocateReg(FlatScratchInitReg); 613 } 614 615 SmallVector<ArgInfo, 32> SplitArgs; 616 unsigned Idx = 0; 617 unsigned PSInputNum = 0; 618 619 // Insert the hidden sret parameter if the return value won't fit in the 620 // return registers. 621 if (!FLI.CanLowerReturn) 622 insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); 623 624 for (auto &Arg : F.args()) { 625 if (DL.getTypeStoreSize(Arg.getType()) == 0) 626 continue; 627 628 const bool InReg = Arg.hasAttribute(Attribute::InReg); 629 630 // SGPR arguments to functions not implemented. 631 if (!IsGraphics && InReg) 632 return false; 633 634 if (Arg.hasAttribute(Attribute::SwiftSelf) || 635 Arg.hasAttribute(Attribute::SwiftError) || 636 Arg.hasAttribute(Attribute::Nest)) 637 return false; 638 639 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 640 const bool ArgUsed = !Arg.use_empty(); 641 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 642 643 if (!SkipArg) { 644 Info->markPSInputAllocated(PSInputNum); 645 if (ArgUsed) 646 Info->markPSInputEnabled(PSInputNum); 647 } 648 649 ++PSInputNum; 650 651 if (SkipArg) { 652 for (Register R : VRegs[Idx]) 653 B.buildUndef(R); 654 655 ++Idx; 656 continue; 657 } 658 } 659 660 ArgInfo OrigArg(VRegs[Idx], Arg, Idx); 661 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 662 setArgFlags(OrigArg, OrigArgIdx, DL, F); 663 664 splitToValueTypes(OrigArg, SplitArgs, DL, CC); 665 ++Idx; 666 } 667 668 // At least one interpolation mode must be enabled or else the GPU will 669 // hang. 670 // 671 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 672 // set PSInputAddr, the user wants to enable some bits after the compilation 673 // based on run-time states. Since we can't know what the final PSInputEna 674 // will look like, so we shouldn't do anything here and the user should take 675 // responsibility for the correct programming. 676 // 677 // Otherwise, the following restrictions apply: 678 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 679 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 680 // enabled too. 681 if (CC == CallingConv::AMDGPU_PS) { 682 if ((Info->getPSInputAddr() & 0x7F) == 0 || 683 ((Info->getPSInputAddr() & 0xF) == 0 && 684 Info->isPSInputAllocated(11))) { 685 CCInfo.AllocateReg(AMDGPU::VGPR0); 686 CCInfo.AllocateReg(AMDGPU::VGPR1); 687 Info->markPSInputAllocated(0); 688 Info->markPSInputEnabled(0); 689 } 690 691 if (Subtarget.isAmdPalOS()) { 692 // For isAmdPalOS, the user does not enable some bits after compilation 693 // based on run-time states; the register values being generated here are 694 // the final ones set in hardware. Therefore we need to apply the 695 // workaround to PSInputAddr and PSInputEnable together. (The case where 696 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 697 // set up an input arg for a particular interpolation mode, but nothing 698 // uses that input arg. Really we should have an earlier pass that removes 699 // such an arg.) 700 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 701 if ((PsInputBits & 0x7F) == 0 || 702 ((PsInputBits & 0xF) == 0 && 703 (PsInputBits >> 11 & 1))) 704 Info->markPSInputEnabled(countTrailingZeros(Info->getPSInputAddr())); 705 } 706 } 707 708 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 709 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 710 711 if (!MBB.empty()) 712 B.setInstr(*MBB.begin()); 713 714 if (!IsEntryFunc && !IsGraphics) { 715 // For the fixed ABI, pass workitem IDs in the last argument register. 716 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 717 } 718 719 IncomingValueAssigner Assigner(AssignFn); 720 if (!determineAssignments(Assigner, SplitArgs, CCInfo)) 721 return false; 722 723 FormalArgHandler Handler(B, MRI); 724 if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) 725 return false; 726 727 uint64_t StackOffset = Assigner.StackOffset; 728 729 // Start adding system SGPRs. 730 if (IsEntryFunc) { 731 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); 732 } else { 733 if (!Subtarget.enableFlatScratch()) 734 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 735 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 736 } 737 738 // When we tail call, we need to check if the callee's arguments will fit on 739 // the caller's stack. So, whenever we lower formal arguments, we should keep 740 // track of this information, since we might lower a tail call in this 741 // function later. 742 Info->setBytesInStackArgArea(StackOffset); 743 744 // Move back to the end of the basic block. 745 B.setMBB(MBB); 746 747 return true; 748 } 749 750 bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, 751 CCState &CCInfo, 752 SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, 753 CallLoweringInfo &Info) const { 754 MachineFunction &MF = MIRBuilder.getMF(); 755 756 // If there's no call site, this doesn't correspond to a call from the IR and 757 // doesn't need implicit inputs. 758 if (!Info.CB) 759 return true; 760 761 const AMDGPUFunctionArgInfo *CalleeArgInfo 762 = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 763 764 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 765 const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); 766 767 768 // TODO: Unify with private memory register handling. This is complicated by 769 // the fact that at least in kernels, the input argument is not necessarily 770 // in the same location as the input. 771 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 772 AMDGPUFunctionArgInfo::DISPATCH_PTR, 773 AMDGPUFunctionArgInfo::QUEUE_PTR, 774 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, 775 AMDGPUFunctionArgInfo::DISPATCH_ID, 776 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 777 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 778 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, 779 AMDGPUFunctionArgInfo::LDS_KERNEL_ID, 780 }; 781 782 static constexpr StringLiteral ImplicitAttrNames[] = { 783 "amdgpu-no-dispatch-ptr", 784 "amdgpu-no-queue-ptr", 785 "amdgpu-no-implicitarg-ptr", 786 "amdgpu-no-dispatch-id", 787 "amdgpu-no-workgroup-id-x", 788 "amdgpu-no-workgroup-id-y", 789 "amdgpu-no-workgroup-id-z", 790 "amdgpu-no-lds-kernel-id", 791 }; 792 793 MachineRegisterInfo &MRI = MF.getRegInfo(); 794 795 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 796 const AMDGPULegalizerInfo *LI 797 = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); 798 799 unsigned I = 0; 800 for (auto InputID : InputRegs) { 801 const ArgDescriptor *OutgoingArg; 802 const TargetRegisterClass *ArgRC; 803 LLT ArgTy; 804 805 // If the callee does not use the attribute value, skip copying the value. 806 if (Info.CB->hasFnAttr(ImplicitAttrNames[I++])) 807 continue; 808 809 std::tie(OutgoingArg, ArgRC, ArgTy) = 810 CalleeArgInfo->getPreloadedValue(InputID); 811 if (!OutgoingArg) 812 continue; 813 814 const ArgDescriptor *IncomingArg; 815 const TargetRegisterClass *IncomingArgRC; 816 std::tie(IncomingArg, IncomingArgRC, ArgTy) = 817 CallerArgInfo.getPreloadedValue(InputID); 818 assert(IncomingArgRC == ArgRC); 819 820 Register InputReg = MRI.createGenericVirtualRegister(ArgTy); 821 822 if (IncomingArg) { 823 LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); 824 } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { 825 LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); 826 } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) { 827 std::optional<uint32_t> Id = 828 AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction()); 829 if (Id) { 830 MIRBuilder.buildConstant(InputReg, *Id); 831 } else { 832 MIRBuilder.buildUndef(InputReg); 833 } 834 } else { 835 // We may have proven the input wasn't needed, although the ABI is 836 // requiring it. We just need to allocate the register appropriately. 837 MIRBuilder.buildUndef(InputReg); 838 } 839 840 if (OutgoingArg->isRegister()) { 841 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 842 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 843 report_fatal_error("failed to allocate implicit input argument"); 844 } else { 845 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 846 return false; 847 } 848 } 849 850 // Pack workitem IDs into a single register or pass it as is if already 851 // packed. 852 const ArgDescriptor *OutgoingArg; 853 const TargetRegisterClass *ArgRC; 854 LLT ArgTy; 855 856 std::tie(OutgoingArg, ArgRC, ArgTy) = 857 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 858 if (!OutgoingArg) 859 std::tie(OutgoingArg, ArgRC, ArgTy) = 860 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 861 if (!OutgoingArg) 862 std::tie(OutgoingArg, ArgRC, ArgTy) = 863 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 864 if (!OutgoingArg) 865 return false; 866 867 auto WorkitemIDX = 868 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 869 auto WorkitemIDY = 870 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 871 auto WorkitemIDZ = 872 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 873 874 const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); 875 const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); 876 const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); 877 const LLT S32 = LLT::scalar(32); 878 879 const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x"); 880 const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y"); 881 const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z"); 882 883 // If incoming ids are not packed we need to pack them. 884 // FIXME: Should consider known workgroup size to eliminate known 0 cases. 885 Register InputReg; 886 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && 887 NeedWorkItemIDX) { 888 if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) { 889 InputReg = MRI.createGenericVirtualRegister(S32); 890 LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, 891 std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); 892 } else { 893 InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0); 894 } 895 } 896 897 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && 898 NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) { 899 Register Y = MRI.createGenericVirtualRegister(S32); 900 LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), 901 std::get<2>(WorkitemIDY)); 902 903 Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); 904 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; 905 } 906 907 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && 908 NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) { 909 Register Z = MRI.createGenericVirtualRegister(S32); 910 LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), 911 std::get<2>(WorkitemIDZ)); 912 913 Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); 914 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; 915 } 916 917 if (!InputReg && 918 (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { 919 InputReg = MRI.createGenericVirtualRegister(S32); 920 if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { 921 // We're in a situation where the outgoing function requires the workitem 922 // ID, but the calling function does not have it (e.g a graphics function 923 // calling a C calling convention function). This is illegal, but we need 924 // to produce something. 925 MIRBuilder.buildUndef(InputReg); 926 } else { 927 // Workitem ids are already packed, any of present incoming arguments will 928 // carry all required fields. 929 ArgDescriptor IncomingArg = ArgDescriptor::createArg( 930 IncomingArgX ? *IncomingArgX : 931 IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); 932 LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, 933 &AMDGPU::VGPR_32RegClass, S32); 934 } 935 } 936 937 if (OutgoingArg->isRegister()) { 938 if (InputReg) 939 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 940 941 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 942 report_fatal_error("failed to allocate implicit input argument"); 943 } else { 944 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 945 return false; 946 } 947 948 return true; 949 } 950 951 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for 952 /// CC. 953 static std::pair<CCAssignFn *, CCAssignFn *> 954 getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { 955 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; 956 } 957 958 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, 959 bool IsTailCall) { 960 assert(!(IsIndirect && IsTailCall) && "Indirect calls can't be tail calls, " 961 "because the address can be divergent"); 962 return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::G_SI_CALL; 963 } 964 965 // Add operands to call instruction to track the callee. 966 static bool addCallTargetOperands(MachineInstrBuilder &CallInst, 967 MachineIRBuilder &MIRBuilder, 968 AMDGPUCallLowering::CallLoweringInfo &Info) { 969 if (Info.Callee.isReg()) { 970 CallInst.addReg(Info.Callee.getReg()); 971 CallInst.addImm(0); 972 } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { 973 // The call lowering lightly assumed we can directly encode a call target in 974 // the instruction, which is not the case. Materialize the address here. 975 const GlobalValue *GV = Info.Callee.getGlobal(); 976 auto Ptr = MIRBuilder.buildGlobalValue( 977 LLT::pointer(GV->getAddressSpace(), 64), GV); 978 CallInst.addReg(Ptr.getReg(0)); 979 CallInst.add(Info.Callee); 980 } else 981 return false; 982 983 return true; 984 } 985 986 bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( 987 CallLoweringInfo &Info, MachineFunction &MF, 988 SmallVectorImpl<ArgInfo> &InArgs) const { 989 const Function &CallerF = MF.getFunction(); 990 CallingConv::ID CalleeCC = Info.CallConv; 991 CallingConv::ID CallerCC = CallerF.getCallingConv(); 992 993 // If the calling conventions match, then everything must be the same. 994 if (CalleeCC == CallerCC) 995 return true; 996 997 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 998 999 // Make sure that the caller and callee preserve all of the same registers. 1000 auto TRI = ST.getRegisterInfo(); 1001 1002 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1003 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 1004 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 1005 return false; 1006 1007 // Check if the caller and callee will handle arguments in the same way. 1008 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1009 CCAssignFn *CalleeAssignFnFixed; 1010 CCAssignFn *CalleeAssignFnVarArg; 1011 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = 1012 getAssignFnsForCC(CalleeCC, TLI); 1013 1014 CCAssignFn *CallerAssignFnFixed; 1015 CCAssignFn *CallerAssignFnVarArg; 1016 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = 1017 getAssignFnsForCC(CallerCC, TLI); 1018 1019 // FIXME: We are not accounting for potential differences in implicitly passed 1020 // inputs, but only the fixed ABI is supported now anyway. 1021 IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, 1022 CalleeAssignFnVarArg); 1023 IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, 1024 CallerAssignFnVarArg); 1025 return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); 1026 } 1027 1028 bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( 1029 CallLoweringInfo &Info, MachineFunction &MF, 1030 SmallVectorImpl<ArgInfo> &OutArgs) const { 1031 // If there are no outgoing arguments, then we are done. 1032 if (OutArgs.empty()) 1033 return true; 1034 1035 const Function &CallerF = MF.getFunction(); 1036 CallingConv::ID CalleeCC = Info.CallConv; 1037 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1038 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1039 1040 CCAssignFn *AssignFnFixed; 1041 CCAssignFn *AssignFnVarArg; 1042 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1043 1044 // We have outgoing arguments. Make sure that we can tail call with them. 1045 SmallVector<CCValAssign, 16> OutLocs; 1046 CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); 1047 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1048 1049 if (!determineAssignments(Assigner, OutArgs, OutInfo)) { 1050 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n"); 1051 return false; 1052 } 1053 1054 // Make sure that they can fit on the caller's stack. 1055 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1056 if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { 1057 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); 1058 return false; 1059 } 1060 1061 // Verify that the parameters in callee-saved registers match. 1062 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1063 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1064 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); 1065 MachineRegisterInfo &MRI = MF.getRegInfo(); 1066 return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); 1067 } 1068 1069 /// Return true if the calling convention is one that we can guarantee TCO for. 1070 static bool canGuaranteeTCO(CallingConv::ID CC) { 1071 return CC == CallingConv::Fast; 1072 } 1073 1074 /// Return true if we might ever do TCO for calls with this calling convention. 1075 static bool mayTailCallThisCC(CallingConv::ID CC) { 1076 switch (CC) { 1077 case CallingConv::C: 1078 case CallingConv::AMDGPU_Gfx: 1079 return true; 1080 default: 1081 return canGuaranteeTCO(CC); 1082 } 1083 } 1084 1085 bool AMDGPUCallLowering::isEligibleForTailCallOptimization( 1086 MachineIRBuilder &B, CallLoweringInfo &Info, 1087 SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { 1088 // Must pass all target-independent checks in order to tail call optimize. 1089 if (!Info.IsTailCall) 1090 return false; 1091 1092 // Indirect calls can't be tail calls, because the address can be divergent. 1093 // TODO Check divergence info if the call really is divergent. 1094 if (Info.Callee.isReg()) 1095 return false; 1096 1097 MachineFunction &MF = B.getMF(); 1098 const Function &CallerF = MF.getFunction(); 1099 CallingConv::ID CalleeCC = Info.CallConv; 1100 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1101 1102 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1103 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1104 // Kernels aren't callable, and don't have a live in return address so it 1105 // doesn't make sense to do a tail call with entry functions. 1106 if (!CallerPreserved) 1107 return false; 1108 1109 if (!mayTailCallThisCC(CalleeCC)) { 1110 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n"); 1111 return false; 1112 } 1113 1114 if (any_of(CallerF.args(), [](const Argument &A) { 1115 return A.hasByValAttr() || A.hasSwiftErrorAttr(); 1116 })) { 1117 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval " 1118 "or swifterror arguments\n"); 1119 return false; 1120 } 1121 1122 // If we have -tailcallopt, then we're done. 1123 if (MF.getTarget().Options.GuaranteedTailCallOpt) 1124 return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); 1125 1126 // Verify that the incoming and outgoing arguments from the callee are 1127 // safe to tail call. 1128 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { 1129 LLVM_DEBUG( 1130 dbgs() 1131 << "... Caller and callee have incompatible calling conventions.\n"); 1132 return false; 1133 } 1134 1135 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) 1136 return false; 1137 1138 LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n"); 1139 return true; 1140 } 1141 1142 // Insert outgoing implicit arguments for a call, by inserting copies to the 1143 // implicit argument registers and adding the necessary implicit uses to the 1144 // call instruction. 1145 void AMDGPUCallLowering::handleImplicitCallArguments( 1146 MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, 1147 const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, 1148 ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { 1149 if (!ST.enableFlatScratch()) { 1150 // Insert copies for the SRD. In the HSA case, this should be an identity 1151 // copy. 1152 auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), 1153 FuncInfo.getScratchRSrcReg()); 1154 MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 1155 CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); 1156 } 1157 1158 for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { 1159 MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); 1160 CallInst.addReg(ArgReg.first, RegState::Implicit); 1161 } 1162 } 1163 1164 bool AMDGPUCallLowering::lowerTailCall( 1165 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, 1166 SmallVectorImpl<ArgInfo> &OutArgs) const { 1167 MachineFunction &MF = MIRBuilder.getMF(); 1168 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1169 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1170 const Function &F = MF.getFunction(); 1171 MachineRegisterInfo &MRI = MF.getRegInfo(); 1172 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1173 1174 // True when we're tail calling, but without -tailcallopt. 1175 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; 1176 1177 // Find out which ABI gets to decide where things go. 1178 CallingConv::ID CalleeCC = Info.CallConv; 1179 CCAssignFn *AssignFnFixed; 1180 CCAssignFn *AssignFnVarArg; 1181 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1182 1183 MachineInstrBuilder CallSeqStart; 1184 if (!IsSibCall) 1185 CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); 1186 1187 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true); 1188 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1189 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1190 return false; 1191 1192 // Byte offset for the tail call. When we are sibcalling, this will always 1193 // be 0. 1194 MIB.addImm(0); 1195 1196 // Tell the call which registers are clobbered. 1197 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1198 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); 1199 MIB.addRegMask(Mask); 1200 1201 // FPDiff is the byte offset of the call's argument area from the callee's. 1202 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1203 // by this amount for a tail call. In a sibling call it must be 0 because the 1204 // caller will deallocate the entire stack and the callee still expects its 1205 // arguments to begin at SP+0. 1206 int FPDiff = 0; 1207 1208 // This will be 0 for sibcalls, potentially nonzero for tail calls produced 1209 // by -tailcallopt. For sibcalls, the memory operands for the call are 1210 // already available in the caller's incoming argument space. 1211 unsigned NumBytes = 0; 1212 if (!IsSibCall) { 1213 // We aren't sibcalling, so we need to compute FPDiff. We need to do this 1214 // before handling assignments, because FPDiff must be known for memory 1215 // arguments. 1216 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1217 SmallVector<CCValAssign, 16> OutLocs; 1218 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); 1219 1220 // FIXME: Not accounting for callee implicit inputs 1221 OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); 1222 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) 1223 return false; 1224 1225 // The callee will pop the argument stack as a tail call. Thus, we must 1226 // keep it 16-byte aligned. 1227 NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); 1228 1229 // FPDiff will be negative if this tail call requires more space than we 1230 // would automatically have in our incoming argument space. Positive if we 1231 // actually shrink the stack. 1232 FPDiff = NumReusableBytes - NumBytes; 1233 1234 // The stack pointer must be 16-byte aligned at all times it's used for a 1235 // memory operation, which in practice means at *all* times and in 1236 // particular across call boundaries. Therefore our own arguments started at 1237 // a 16-byte aligned SP and the delta applied for the tail call should 1238 // satisfy the same constraint. 1239 assert(isAligned(ST.getStackAlignment(), FPDiff) && 1240 "unaligned stack on tail call"); 1241 } 1242 1243 SmallVector<CCValAssign, 16> ArgLocs; 1244 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1245 1246 // We could pass MIB and directly add the implicit uses to the call 1247 // now. However, as an aesthetic choice, place implicit argument operands 1248 // after the ordinary user argument registers. 1249 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1250 1251 if (Info.CallConv != CallingConv::AMDGPU_Gfx) { 1252 // With a fixed ABI, allocate fixed registers before user arguments. 1253 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1254 return false; 1255 } 1256 1257 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1258 1259 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1260 return false; 1261 1262 // Do the actual argument marshalling. 1263 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); 1264 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1265 return false; 1266 1267 handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs); 1268 1269 // If we have -tailcallopt, we need to adjust the stack. We'll do the call 1270 // sequence start and end here. 1271 if (!IsSibCall) { 1272 MIB->getOperand(1).setImm(FPDiff); 1273 CallSeqStart.addImm(NumBytes).addImm(0); 1274 // End the call sequence *before* emitting the call. Normally, we would 1275 // tidy the frame up after the call. However, here, we've laid out the 1276 // parameters so that when SP is reset, they will be in the correct 1277 // location. 1278 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); 1279 } 1280 1281 // Now we can add the actual call instruction to the correct basic block. 1282 MIRBuilder.insertInstr(MIB); 1283 1284 // If Callee is a reg, since it is used by a target specific 1285 // instruction, it must have a register class matching the 1286 // constraint of that instruction. 1287 1288 // FIXME: We should define regbankselectable call instructions to handle 1289 // divergent call targets. 1290 if (MIB->getOperand(0).isReg()) { 1291 MIB->getOperand(0).setReg(constrainOperandRegClass( 1292 MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, 1293 MIB->getDesc(), MIB->getOperand(0), 0)); 1294 } 1295 1296 MF.getFrameInfo().setHasTailCall(); 1297 Info.LoweredTailCall = true; 1298 return true; 1299 } 1300 1301 bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 1302 CallLoweringInfo &Info) const { 1303 if (Info.IsVarArg) { 1304 LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n"); 1305 return false; 1306 } 1307 1308 MachineFunction &MF = MIRBuilder.getMF(); 1309 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1310 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1311 1312 const Function &F = MF.getFunction(); 1313 MachineRegisterInfo &MRI = MF.getRegInfo(); 1314 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1315 const DataLayout &DL = F.getParent()->getDataLayout(); 1316 1317 SmallVector<ArgInfo, 8> OutArgs; 1318 for (auto &OrigArg : Info.OrigArgs) 1319 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); 1320 1321 SmallVector<ArgInfo, 8> InArgs; 1322 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) 1323 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); 1324 1325 // If we can lower as a tail call, do that instead. 1326 bool CanTailCallOpt = 1327 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); 1328 1329 // We must emit a tail call if we have musttail. 1330 if (Info.IsMustTailCall && !CanTailCallOpt) { 1331 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); 1332 return false; 1333 } 1334 1335 Info.IsTailCall = CanTailCallOpt; 1336 if (CanTailCallOpt) 1337 return lowerTailCall(MIRBuilder, Info, OutArgs); 1338 1339 // Find out which ABI gets to decide where things go. 1340 CCAssignFn *AssignFnFixed; 1341 CCAssignFn *AssignFnVarArg; 1342 std::tie(AssignFnFixed, AssignFnVarArg) = 1343 getAssignFnsForCC(Info.CallConv, TLI); 1344 1345 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) 1346 .addImm(0) 1347 .addImm(0); 1348 1349 // Create a temporarily-floating call instruction so we can add the implicit 1350 // uses of arg registers. 1351 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); 1352 1353 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1354 MIB.addDef(TRI->getReturnAddressReg(MF)); 1355 1356 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1357 return false; 1358 1359 // Tell the call which registers are clobbered. 1360 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); 1361 MIB.addRegMask(Mask); 1362 1363 SmallVector<CCValAssign, 16> ArgLocs; 1364 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1365 1366 // We could pass MIB and directly add the implicit uses to the call 1367 // now. However, as an aesthetic choice, place implicit argument operands 1368 // after the ordinary user argument registers. 1369 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1370 1371 if (Info.CallConv != CallingConv::AMDGPU_Gfx) { 1372 // With a fixed ABI, allocate fixed registers before user arguments. 1373 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1374 return false; 1375 } 1376 1377 // Do the actual argument marshalling. 1378 SmallVector<Register, 8> PhysRegs; 1379 1380 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1381 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1382 return false; 1383 1384 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); 1385 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1386 return false; 1387 1388 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1389 1390 handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); 1391 1392 // Get a count of how many bytes are to be pushed on the stack. 1393 unsigned NumBytes = CCInfo.getNextStackOffset(); 1394 1395 // If Callee is a reg, since it is used by a target specific 1396 // instruction, it must have a register class matching the 1397 // constraint of that instruction. 1398 1399 // FIXME: We should define regbankselectable call instructions to handle 1400 // divergent call targets. 1401 if (MIB->getOperand(1).isReg()) { 1402 MIB->getOperand(1).setReg(constrainOperandRegClass( 1403 MF, *TRI, MRI, *ST.getInstrInfo(), 1404 *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), 1405 1)); 1406 } 1407 1408 // Now we can add the actual call instruction to the correct position. 1409 MIRBuilder.insertInstr(MIB); 1410 1411 // Finally we can copy the returned value back into its virtual-register. In 1412 // symmetry with the arguments, the physical register must be an 1413 // implicit-define of the call instruction. 1414 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { 1415 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, 1416 Info.IsVarArg); 1417 IncomingValueAssigner Assigner(RetAssignFn); 1418 CallReturnHandler Handler(MIRBuilder, MRI, MIB); 1419 if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, 1420 Info.CallConv, Info.IsVarArg)) 1421 return false; 1422 } 1423 1424 uint64_t CalleePopBytes = NumBytes; 1425 1426 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) 1427 .addImm(0) 1428 .addImm(CalleePopBytes); 1429 1430 if (!Info.CanLowerReturn) { 1431 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, 1432 Info.DemoteRegister, Info.DemoteStackIndex); 1433 } 1434 1435 return true; 1436 } 1437