1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUISelLowering.h" 18 #include "AMDGPUSubtarget.h" 19 #include "SIISelLowering.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "SIRegisterInfo.h" 22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 23 #include "llvm/CodeGen/Analysis.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/Support/LowLevelTypeImpl.h" 28 29 using namespace llvm; 30 31 namespace { 32 33 struct OutgoingValueHandler : public CallLowering::ValueHandler { 34 OutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 35 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 36 : ValueHandler(B, MRI, AssignFn), MIB(MIB) {} 37 38 MachineInstrBuilder MIB; 39 40 bool isIncomingArgumentHandler() const override { return false; } 41 42 Register getStackAddress(uint64_t Size, int64_t Offset, 43 MachinePointerInfo &MPO) override { 44 llvm_unreachable("not implemented"); 45 } 46 47 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 48 MachinePointerInfo &MPO, CCValAssign &VA) override { 49 llvm_unreachable("not implemented"); 50 } 51 52 void assignValueToReg(Register ValVReg, Register PhysReg, 53 CCValAssign &VA) override { 54 Register ExtReg; 55 if (VA.getLocVT().getSizeInBits() < 32) { 56 // 16-bit types are reported as legal for 32-bit registers. We need to 57 // extend and do a 32-bit copy to avoid the verifier complaining about it. 58 ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 59 } else 60 ExtReg = extendRegister(ValVReg, VA); 61 62 MIRBuilder.buildCopy(PhysReg, ExtReg); 63 MIB.addUse(PhysReg, RegState::Implicit); 64 } 65 66 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, 67 CCValAssign::LocInfo LocInfo, 68 const CallLowering::ArgInfo &Info, 69 ISD::ArgFlagsTy Flags, 70 CCState &State) override { 71 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); 72 } 73 }; 74 75 struct IncomingArgHandler : public CallLowering::ValueHandler { 76 uint64_t StackUsed = 0; 77 78 IncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 79 CCAssignFn *AssignFn) 80 : ValueHandler(B, MRI, AssignFn) {} 81 82 Register getStackAddress(uint64_t Size, int64_t Offset, 83 MachinePointerInfo &MPO) override { 84 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 85 int FI = MFI.CreateFixedObject(Size, Offset, true); 86 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 87 Register AddrReg = MRI.createGenericVirtualRegister( 88 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32)); 89 MIRBuilder.buildFrameIndex(AddrReg, FI); 90 StackUsed = std::max(StackUsed, Size + Offset); 91 return AddrReg; 92 } 93 94 void assignValueToReg(Register ValVReg, Register PhysReg, 95 CCValAssign &VA) override { 96 markPhysRegUsed(PhysReg); 97 98 if (VA.getLocVT().getSizeInBits() < 32) { 99 // 16-bit types are reported as legal for 32-bit registers. We need to do 100 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 101 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 102 MIRBuilder.buildTrunc(ValVReg, Copy); 103 return; 104 } 105 106 switch (VA.getLocInfo()) { 107 case CCValAssign::LocInfo::SExt: 108 case CCValAssign::LocInfo::ZExt: 109 case CCValAssign::LocInfo::AExt: { 110 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 111 MIRBuilder.buildTrunc(ValVReg, Copy); 112 break; 113 } 114 default: 115 MIRBuilder.buildCopy(ValVReg, PhysReg); 116 break; 117 } 118 } 119 120 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 121 MachinePointerInfo &MPO, CCValAssign &VA) override { 122 // FIXME: Get alignment 123 auto MMO = MIRBuilder.getMF().getMachineMemOperand( 124 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 1); 125 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 126 } 127 128 /// How the physical register gets marked varies between formal 129 /// parameters (it's a basic-block live-in), and a call instruction 130 /// (it's an implicit-def of the BL). 131 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 132 133 // FIXME: What is the point of this being a callback? 134 bool isIncomingArgumentHandler() const override { return true; } 135 }; 136 137 struct FormalArgHandler : public IncomingArgHandler { 138 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 139 CCAssignFn *AssignFn) 140 : IncomingArgHandler(B, MRI, AssignFn) {} 141 142 void markPhysRegUsed(unsigned PhysReg) override { 143 MIRBuilder.getMBB().addLiveIn(PhysReg); 144 } 145 }; 146 147 } 148 149 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 150 : CallLowering(&TLI) { 151 } 152 153 void AMDGPUCallLowering::splitToValueTypes( 154 const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs, 155 const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv, 156 SplitArgTy PerformArgSplit) const { 157 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 158 LLVMContext &Ctx = OrigArg.Ty->getContext(); 159 160 if (OrigArg.Ty->isVoidTy()) 161 return; 162 163 SmallVector<EVT, 4> SplitVTs; 164 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs); 165 166 assert(OrigArg.Regs.size() == SplitVTs.size()); 167 168 int SplitIdx = 0; 169 for (EVT VT : SplitVTs) { 170 unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT); 171 Type *Ty = VT.getTypeForEVT(Ctx); 172 173 174 175 if (NumParts == 1) { 176 // No splitting to do, but we want to replace the original type (e.g. [1 x 177 // double] -> double). 178 SplitArgs.emplace_back(OrigArg.Regs[SplitIdx], Ty, 179 OrigArg.Flags, OrigArg.IsFixed); 180 181 ++SplitIdx; 182 continue; 183 } 184 185 LLT LLTy = getLLTForType(*Ty, DL); 186 187 SmallVector<Register, 8> SplitRegs; 188 189 EVT PartVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT); 190 Type *PartTy = PartVT.getTypeForEVT(Ctx); 191 LLT PartLLT = getLLTForType(*PartTy, DL); 192 193 // FIXME: Should we be reporting all of the part registers for a single 194 // argument, and let handleAssignments take care of the repacking? 195 for (unsigned i = 0; i < NumParts; ++i) { 196 Register PartReg = MRI.createGenericVirtualRegister(PartLLT); 197 SplitRegs.push_back(PartReg); 198 SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags); 199 } 200 201 PerformArgSplit(SplitRegs, LLTy, PartLLT, SplitIdx); 202 203 ++SplitIdx; 204 } 205 } 206 207 // Get the appropriate type to make \p OrigTy \p Factor times bigger. 208 static LLT getMultipleType(LLT OrigTy, int Factor) { 209 if (OrigTy.isVector()) { 210 return LLT::vector(OrigTy.getNumElements() * Factor, 211 OrigTy.getElementType()); 212 } 213 214 return LLT::scalar(OrigTy.getSizeInBits() * Factor); 215 } 216 217 // TODO: Move to generic code 218 static void unpackRegsToOrigType(MachineIRBuilder &B, 219 ArrayRef<Register> DstRegs, 220 Register SrcReg, 221 LLT SrcTy, 222 LLT PartTy) { 223 assert(DstRegs.size() > 1 && "Nothing to unpack"); 224 225 MachineFunction &MF = B.getMF(); 226 MachineRegisterInfo &MRI = MF.getRegInfo(); 227 228 const unsigned SrcSize = SrcTy.getSizeInBits(); 229 const unsigned PartSize = PartTy.getSizeInBits(); 230 231 if (SrcTy.isVector() && !PartTy.isVector() && 232 PartSize > SrcTy.getElementType().getSizeInBits()) { 233 // Vector was scalarized, and the elements extended. 234 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), 235 SrcReg); 236 for (int i = 0, e = DstRegs.size(); i != e; ++i) 237 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i)); 238 return; 239 } 240 241 if (SrcSize % PartSize == 0) { 242 B.buildUnmerge(DstRegs, SrcReg); 243 return; 244 } 245 246 const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize; 247 248 LLT BigTy = getMultipleType(PartTy, NumRoundedParts); 249 auto ImpDef = B.buildUndef(BigTy); 250 251 Register BigReg = MRI.createGenericVirtualRegister(BigTy); 252 B.buildInsert(BigReg, ImpDef.getReg(0), SrcReg, 0).getReg(0); 253 254 int64_t Offset = 0; 255 for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize) 256 B.buildExtract(DstRegs[i], BigReg, Offset); 257 } 258 259 /// Lower the return value for the already existing \p Ret. This assumes that 260 /// \p B's insertion point is correct. 261 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 262 const Value *Val, ArrayRef<Register> VRegs, 263 MachineInstrBuilder &Ret) const { 264 if (!Val) 265 return true; 266 267 auto &MF = B.getMF(); 268 const auto &F = MF.getFunction(); 269 const DataLayout &DL = MF.getDataLayout(); 270 271 CallingConv::ID CC = F.getCallingConv(); 272 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 273 MachineRegisterInfo &MRI = MF.getRegInfo(); 274 275 ArgInfo OrigRetInfo(VRegs, Val->getType()); 276 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); 277 SmallVector<ArgInfo, 4> SplitRetInfos; 278 279 splitToValueTypes( 280 OrigRetInfo, SplitRetInfos, DL, MRI, CC, 281 [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) { 282 unpackRegsToOrigType(B, Regs, VRegs[VTSplitIdx], LLTy, PartLLT); 283 }); 284 285 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 286 287 OutgoingValueHandler RetHandler(B, MF.getRegInfo(), Ret, AssignFn); 288 return handleAssignments(B, SplitRetInfos, RetHandler); 289 } 290 291 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, 292 const Value *Val, 293 ArrayRef<Register> VRegs) const { 294 295 MachineFunction &MF = B.getMF(); 296 MachineRegisterInfo &MRI = MF.getRegInfo(); 297 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 298 MFI->setIfReturnsVoid(!Val); 299 300 assert(!Val == VRegs.empty() && "Return value without a vreg"); 301 302 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 303 const bool IsShader = AMDGPU::isShader(CC); 304 const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) || 305 AMDGPU::isKernel(CC); 306 if (IsWaveEnd) { 307 B.buildInstr(AMDGPU::S_ENDPGM) 308 .addImm(0); 309 return true; 310 } 311 312 auto const &ST = B.getMF().getSubtarget<GCNSubtarget>(); 313 314 unsigned ReturnOpc = 315 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; 316 317 auto Ret = B.buildInstrNoInsert(ReturnOpc); 318 Register ReturnAddrVReg; 319 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 320 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 321 Ret.addUse(ReturnAddrVReg); 322 } 323 324 if (!lowerReturnVal(B, Val, VRegs, Ret)) 325 return false; 326 327 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 328 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 329 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 330 &AMDGPU::SGPR_64RegClass); 331 B.buildCopy(ReturnAddrVReg, LiveInReturn); 332 } 333 334 // TODO: Handle CalleeSavedRegsViaCopy. 335 336 B.insertInstr(Ret); 337 return true; 338 } 339 340 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B, 341 Type *ParamTy, 342 uint64_t Offset) const { 343 344 MachineFunction &MF = B.getMF(); 345 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 346 MachineRegisterInfo &MRI = MF.getRegInfo(); 347 const Function &F = MF.getFunction(); 348 const DataLayout &DL = F.getParent()->getDataLayout(); 349 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); 350 LLT PtrType = getLLTForType(*PtrTy, DL); 351 Register DstReg = MRI.createGenericVirtualRegister(PtrType); 352 Register KernArgSegmentPtr = 353 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 354 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 355 356 Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); 357 B.buildConstant(OffsetReg, Offset); 358 359 B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); 360 361 return DstReg; 362 } 363 364 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, 365 Type *ParamTy, uint64_t Offset, 366 unsigned Align, 367 Register DstReg) const { 368 MachineFunction &MF = B.getMF(); 369 const Function &F = MF.getFunction(); 370 const DataLayout &DL = F.getParent()->getDataLayout(); 371 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 372 unsigned TypeSize = DL.getTypeStoreSize(ParamTy); 373 Register PtrReg = lowerParameterPtr(B, ParamTy, Offset); 374 375 MachineMemOperand *MMO = 376 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | 377 MachineMemOperand::MODereferenceable | 378 MachineMemOperand::MOInvariant, 379 TypeSize, Align); 380 381 B.buildLoad(DstReg, PtrReg, *MMO); 382 } 383 384 // Allocate special inputs passed in user SGPRs. 385 static void allocateHSAUserSGPRs(CCState &CCInfo, 386 MachineIRBuilder &B, 387 MachineFunction &MF, 388 const SIRegisterInfo &TRI, 389 SIMachineFunctionInfo &Info) { 390 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 391 if (Info.hasPrivateSegmentBuffer()) { 392 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 393 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 394 CCInfo.AllocateReg(PrivateSegmentBufferReg); 395 } 396 397 if (Info.hasDispatchPtr()) { 398 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 399 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 400 CCInfo.AllocateReg(DispatchPtrReg); 401 } 402 403 if (Info.hasQueuePtr()) { 404 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 405 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 406 CCInfo.AllocateReg(QueuePtrReg); 407 } 408 409 if (Info.hasKernargSegmentPtr()) { 410 MachineRegisterInfo &MRI = MF.getRegInfo(); 411 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 412 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 413 Register VReg = MRI.createGenericVirtualRegister(P4); 414 MRI.addLiveIn(InputPtrReg, VReg); 415 B.getMBB().addLiveIn(InputPtrReg); 416 B.buildCopy(VReg, InputPtrReg); 417 CCInfo.AllocateReg(InputPtrReg); 418 } 419 420 if (Info.hasDispatchID()) { 421 unsigned DispatchIDReg = Info.addDispatchID(TRI); 422 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 423 CCInfo.AllocateReg(DispatchIDReg); 424 } 425 426 if (Info.hasFlatScratchInit()) { 427 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 428 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 429 CCInfo.AllocateReg(FlatScratchInitReg); 430 } 431 432 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 433 // these from the dispatch pointer. 434 } 435 436 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 437 MachineIRBuilder &B, const Function &F, 438 ArrayRef<ArrayRef<Register>> VRegs) const { 439 MachineFunction &MF = B.getMF(); 440 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 441 MachineRegisterInfo &MRI = MF.getRegInfo(); 442 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 443 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 444 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 445 446 const DataLayout &DL = F.getParent()->getDataLayout(); 447 448 SmallVector<CCValAssign, 16> ArgLocs; 449 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 450 451 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 452 453 unsigned i = 0; 454 const unsigned KernArgBaseAlign = 16; 455 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 456 uint64_t ExplicitArgOffset = 0; 457 458 // TODO: Align down to dword alignment and extract bits for extending loads. 459 for (auto &Arg : F.args()) { 460 Type *ArgTy = Arg.getType(); 461 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 462 if (AllocSize == 0) 463 continue; 464 465 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy); 466 467 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 468 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 469 470 ArrayRef<Register> OrigArgRegs = VRegs[i]; 471 Register ArgReg = 472 OrigArgRegs.size() == 1 473 ? OrigArgRegs[0] 474 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL)); 475 unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset); 476 ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy)); 477 lowerParameter(B, ArgTy, ArgOffset, Align, ArgReg); 478 if (OrigArgRegs.size() > 1) 479 unpackRegs(OrigArgRegs, ArgReg, ArgTy, B); 480 ++i; 481 } 482 483 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 484 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 485 return true; 486 } 487 488 // TODO: Move this to generic code 489 static void packSplitRegsToOrigType(MachineIRBuilder &B, 490 ArrayRef<Register> OrigRegs, 491 ArrayRef<Register> Regs, 492 LLT LLTy, 493 LLT PartLLT) { 494 if (!LLTy.isVector() && !PartLLT.isVector()) { 495 B.buildMerge(OrigRegs[0], Regs); 496 return; 497 } 498 499 if (LLTy.isVector() && PartLLT.isVector()) { 500 assert(LLTy.getElementType() == PartLLT.getElementType()); 501 502 int DstElts = LLTy.getNumElements(); 503 int PartElts = PartLLT.getNumElements(); 504 if (DstElts % PartElts == 0) 505 B.buildConcatVectors(OrigRegs[0], Regs); 506 else { 507 // Deal with v3s16 split into v2s16 508 assert(PartElts == 2 && DstElts % 2 != 0); 509 int RoundedElts = PartElts * ((DstElts + PartElts - 1) / PartElts); 510 511 LLT RoundedDestTy = LLT::vector(RoundedElts, PartLLT.getElementType()); 512 auto RoundedConcat = B.buildConcatVectors(RoundedDestTy, Regs); 513 B.buildExtract(OrigRegs[0], RoundedConcat, 0); 514 } 515 516 return; 517 } 518 519 MachineRegisterInfo &MRI = *B.getMRI(); 520 521 assert(LLTy.isVector() && !PartLLT.isVector()); 522 523 LLT DstEltTy = LLTy.getElementType(); 524 525 // Pointer information was discarded. We'll need to coerce some register types 526 // to avoid violating type constraints. 527 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType(); 528 529 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits()); 530 531 if (DstEltTy == PartLLT) { 532 // Vector was trivially scalarized. 533 534 if (RealDstEltTy.isPointer()) { 535 for (Register Reg : Regs) 536 MRI.setType(Reg, RealDstEltTy); 537 } 538 539 B.buildBuildVector(OrigRegs[0], Regs); 540 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) { 541 // Deal with vector with 64-bit elements decomposed to 32-bit 542 // registers. Need to create intermediate 64-bit elements. 543 SmallVector<Register, 8> EltMerges; 544 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits(); 545 546 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0); 547 548 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) { 549 auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt)); 550 // Fix the type in case this is really a vector of pointers. 551 MRI.setType(Merge.getReg(0), RealDstEltTy); 552 EltMerges.push_back(Merge.getReg(0)); 553 Regs = Regs.drop_front(PartsPerElt); 554 } 555 556 B.buildBuildVector(OrigRegs[0], EltMerges); 557 } else { 558 // Vector was split, and elements promoted to a wider type. 559 LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT); 560 auto BV = B.buildBuildVector(BVType, Regs); 561 B.buildTrunc(OrigRegs[0], BV); 562 } 563 } 564 565 bool AMDGPUCallLowering::lowerFormalArguments( 566 MachineIRBuilder &B, const Function &F, 567 ArrayRef<ArrayRef<Register>> VRegs) const { 568 CallingConv::ID CC = F.getCallingConv(); 569 570 // The infrastructure for normal calling convention lowering is essentially 571 // useless for kernels. We want to avoid any kind of legalization or argument 572 // splitting. 573 if (CC == CallingConv::AMDGPU_KERNEL) 574 return lowerFormalArgumentsKernel(B, F, VRegs); 575 576 const bool IsShader = AMDGPU::isShader(CC); 577 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 578 579 MachineFunction &MF = B.getMF(); 580 MachineBasicBlock &MBB = B.getMBB(); 581 MachineRegisterInfo &MRI = MF.getRegInfo(); 582 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 583 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 584 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 585 const DataLayout &DL = F.getParent()->getDataLayout(); 586 587 588 SmallVector<CCValAssign, 16> ArgLocs; 589 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 590 591 if (!IsEntryFunc) { 592 Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 593 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 594 &AMDGPU::SGPR_64RegClass); 595 MBB.addLiveIn(ReturnAddrReg); 596 B.buildCopy(LiveInReturn, ReturnAddrReg); 597 } 598 599 if (Info->hasImplicitBufferPtr()) { 600 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 601 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 602 CCInfo.AllocateReg(ImplicitBufferPtrReg); 603 } 604 605 606 SmallVector<ArgInfo, 32> SplitArgs; 607 unsigned Idx = 0; 608 unsigned PSInputNum = 0; 609 610 for (auto &Arg : F.args()) { 611 if (DL.getTypeStoreSize(Arg.getType()) == 0) 612 continue; 613 614 const bool InReg = Arg.hasAttribute(Attribute::InReg); 615 616 // SGPR arguments to functions not implemented. 617 if (!IsShader && InReg) 618 return false; 619 620 if (Arg.hasAttribute(Attribute::SwiftSelf) || 621 Arg.hasAttribute(Attribute::SwiftError) || 622 Arg.hasAttribute(Attribute::Nest)) 623 return false; 624 625 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 626 const bool ArgUsed = !Arg.use_empty(); 627 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 628 629 if (!SkipArg) { 630 Info->markPSInputAllocated(PSInputNum); 631 if (ArgUsed) 632 Info->markPSInputEnabled(PSInputNum); 633 } 634 635 ++PSInputNum; 636 637 if (SkipArg) { 638 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) 639 B.buildUndef(VRegs[Idx][I]); 640 641 ++Idx; 642 continue; 643 } 644 } 645 646 ArgInfo OrigArg(VRegs[Idx], Arg.getType()); 647 setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F); 648 649 splitToValueTypes( 650 OrigArg, SplitArgs, DL, MRI, CC, 651 // FIXME: We should probably be passing multiple registers to 652 // handleAssignments to do this 653 [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) { 654 packSplitRegsToOrigType(B, VRegs[Idx][VTSplitIdx], Regs, 655 LLTy, PartLLT); 656 }); 657 658 ++Idx; 659 } 660 661 // At least one interpolation mode must be enabled or else the GPU will 662 // hang. 663 // 664 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 665 // set PSInputAddr, the user wants to enable some bits after the compilation 666 // based on run-time states. Since we can't know what the final PSInputEna 667 // will look like, so we shouldn't do anything here and the user should take 668 // responsibility for the correct programming. 669 // 670 // Otherwise, the following restrictions apply: 671 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 672 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 673 // enabled too. 674 if (CC == CallingConv::AMDGPU_PS) { 675 if ((Info->getPSInputAddr() & 0x7F) == 0 || 676 ((Info->getPSInputAddr() & 0xF) == 0 && 677 Info->isPSInputAllocated(11))) { 678 CCInfo.AllocateReg(AMDGPU::VGPR0); 679 CCInfo.AllocateReg(AMDGPU::VGPR1); 680 Info->markPSInputAllocated(0); 681 Info->markPSInputEnabled(0); 682 } 683 684 if (Subtarget.isAmdPalOS()) { 685 // For isAmdPalOS, the user does not enable some bits after compilation 686 // based on run-time states; the register values being generated here are 687 // the final ones set in hardware. Therefore we need to apply the 688 // workaround to PSInputAddr and PSInputEnable together. (The case where 689 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 690 // set up an input arg for a particular interpolation mode, but nothing 691 // uses that input arg. Really we should have an earlier pass that removes 692 // such an arg.) 693 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 694 if ((PsInputBits & 0x7F) == 0 || 695 ((PsInputBits & 0xF) == 0 && 696 (PsInputBits >> 11 & 1))) 697 Info->markPSInputEnabled( 698 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 699 } 700 } 701 702 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 703 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 704 705 if (!MBB.empty()) 706 B.setInstr(*MBB.begin()); 707 708 FormalArgHandler Handler(B, MRI, AssignFn); 709 if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler)) 710 return false; 711 712 if (!IsEntryFunc) { 713 // Special inputs come after user arguments. 714 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 715 } 716 717 // Start adding system SGPRs. 718 if (IsEntryFunc) { 719 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader); 720 } else { 721 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 722 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 723 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 724 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 725 } 726 727 // Move back to the end of the basic block. 728 B.setMBB(MBB); 729 730 return true; 731 } 732