1 //===-- RISCVCallLowering.cpp - Call lowering -------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "RISCVCallLowering.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22
23 using namespace llvm;
24
25 namespace {
26
27 struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
28 private:
29 // The function used internally to assign args - we ignore the AssignFn stored
30 // by OutgoingValueAssigner since RISC-V implements its CC using a custom
31 // function with a different signature.
32 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
33
34 // Whether this is assigning args for a return.
35 bool IsRet;
36
37 RVVArgDispatcher &RVVDispatcher;
38
39 public:
RISCVOutgoingValueAssigner__anonde0932290111::RISCVOutgoingValueAssigner40 RISCVOutgoingValueAssigner(
41 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
42 RVVArgDispatcher &RVVDispatcher)
43 : CallLowering::OutgoingValueAssigner(nullptr),
44 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
45 RVVDispatcher(RVVDispatcher) {}
46
assignArg__anonde0932290111::RISCVOutgoingValueAssigner47 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
48 CCValAssign::LocInfo LocInfo,
49 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
50 CCState &State) override {
51 MachineFunction &MF = State.getMachineFunction();
52 const DataLayout &DL = MF.getDataLayout();
53 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
54
55 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
56 LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
57 *Subtarget.getTargetLowering(), RVVDispatcher))
58 return true;
59
60 StackSize = State.getStackSize();
61 return false;
62 }
63 };
64
65 struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
RISCVOutgoingValueHandler__anonde0932290111::RISCVOutgoingValueHandler66 RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
67 MachineInstrBuilder MIB)
68 : OutgoingValueHandler(B, MRI), MIB(MIB),
69 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
getStackAddress__anonde0932290111::RISCVOutgoingValueHandler70 Register getStackAddress(uint64_t MemSize, int64_t Offset,
71 MachinePointerInfo &MPO,
72 ISD::ArgFlagsTy Flags) override {
73 MachineFunction &MF = MIRBuilder.getMF();
74 LLT p0 = LLT::pointer(0, Subtarget.getXLen());
75 LLT sXLen = LLT::scalar(Subtarget.getXLen());
76
77 if (!SPReg)
78 SPReg = MIRBuilder.buildCopy(p0, Register(RISCV::X2)).getReg(0);
79
80 auto OffsetReg = MIRBuilder.buildConstant(sXLen, Offset);
81
82 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
83
84 MPO = MachinePointerInfo::getStack(MF, Offset);
85 return AddrReg.getReg(0);
86 }
87
assignValueToAddress__anonde0932290111::RISCVOutgoingValueHandler88 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
89 const MachinePointerInfo &MPO,
90 const CCValAssign &VA) override {
91 MachineFunction &MF = MIRBuilder.getMF();
92 uint64_t LocMemOffset = VA.getLocMemOffset();
93
94 // TODO: Move StackAlignment to subtarget and share with FrameLowering.
95 auto MMO =
96 MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
97 commonAlignment(Align(16), LocMemOffset));
98
99 Register ExtReg = extendRegister(ValVReg, VA);
100 MIRBuilder.buildStore(ExtReg, Addr, *MMO);
101 }
102
assignValueToReg__anonde0932290111::RISCVOutgoingValueHandler103 void assignValueToReg(Register ValVReg, Register PhysReg,
104 const CCValAssign &VA) override {
105 // If we're passing a smaller fp value into a larger integer register,
106 // anyextend before copying.
107 if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
108 ((VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::i64) &&
109 VA.getValVT() == MVT::f16)) {
110 LLT DstTy = LLT::scalar(VA.getLocVT().getSizeInBits());
111 ValVReg = MIRBuilder.buildAnyExt(DstTy, ValVReg).getReg(0);
112 }
113
114 Register ExtReg = extendRegister(ValVReg, VA);
115 MIRBuilder.buildCopy(PhysReg, ExtReg);
116 MIB.addUse(PhysReg, RegState::Implicit);
117 }
118
assignCustomValue__anonde0932290111::RISCVOutgoingValueHandler119 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
120 ArrayRef<CCValAssign> VAs,
121 std::function<void()> *Thunk) override {
122 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
123 const CCValAssign &VALo = VAs[0];
124 const CCValAssign &VAHi = VAs[1];
125
126 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
127 assert(VALo.getValNo() == VAHi.getValNo() &&
128 "Values belong to different arguments");
129
130 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
131 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
132 "unexpected custom value");
133
134 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
135 MRI.createGenericVirtualRegister(LLT::scalar(32))};
136 MIRBuilder.buildUnmerge(NewRegs, Arg.Regs[0]);
137
138 if (VAHi.isMemLoc()) {
139 LLT MemTy(VAHi.getLocVT());
140
141 MachinePointerInfo MPO;
142 Register StackAddr = getStackAddress(
143 MemTy.getSizeInBytes(), VAHi.getLocMemOffset(), MPO, Arg.Flags[0]);
144
145 assignValueToAddress(NewRegs[1], StackAddr, MemTy, MPO,
146 const_cast<CCValAssign &>(VAHi));
147 }
148
149 auto assignFunc = [=]() {
150 assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
151 if (VAHi.isRegLoc())
152 assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
153 };
154
155 if (Thunk) {
156 *Thunk = assignFunc;
157 return 2;
158 }
159
160 assignFunc();
161 return 2;
162 }
163
164 private:
165 MachineInstrBuilder MIB;
166
167 // Cache the SP register vreg if we need it more than once in this call site.
168 Register SPReg;
169
170 const RISCVSubtarget &Subtarget;
171 };
172
173 struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
174 private:
175 // The function used internally to assign args - we ignore the AssignFn stored
176 // by IncomingValueAssigner since RISC-V implements its CC using a custom
177 // function with a different signature.
178 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
179
180 // Whether this is assigning args from a return.
181 bool IsRet;
182
183 RVVArgDispatcher &RVVDispatcher;
184
185 public:
RISCVIncomingValueAssigner__anonde0932290111::RISCVIncomingValueAssigner186 RISCVIncomingValueAssigner(
187 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
188 RVVArgDispatcher &RVVDispatcher)
189 : CallLowering::IncomingValueAssigner(nullptr),
190 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
191 RVVDispatcher(RVVDispatcher) {}
192
assignArg__anonde0932290111::RISCVIncomingValueAssigner193 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
194 CCValAssign::LocInfo LocInfo,
195 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
196 CCState &State) override {
197 MachineFunction &MF = State.getMachineFunction();
198 const DataLayout &DL = MF.getDataLayout();
199 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
200
201 if (LocVT.isScalableVector())
202 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
203
204 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
205 LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
206 *Subtarget.getTargetLowering(), RVVDispatcher))
207 return true;
208
209 StackSize = State.getStackSize();
210 return false;
211 }
212 };
213
214 struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
RISCVIncomingValueHandler__anonde0932290111::RISCVIncomingValueHandler215 RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
216 : IncomingValueHandler(B, MRI),
217 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
218
getStackAddress__anonde0932290111::RISCVIncomingValueHandler219 Register getStackAddress(uint64_t MemSize, int64_t Offset,
220 MachinePointerInfo &MPO,
221 ISD::ArgFlagsTy Flags) override {
222 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();
223
224 int FI = MFI.CreateFixedObject(MemSize, Offset, /*Immutable=*/true);
225 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
226 return MIRBuilder.buildFrameIndex(LLT::pointer(0, Subtarget.getXLen()), FI)
227 .getReg(0);
228 }
229
assignValueToAddress__anonde0932290111::RISCVIncomingValueHandler230 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
231 const MachinePointerInfo &MPO,
232 const CCValAssign &VA) override {
233 MachineFunction &MF = MIRBuilder.getMF();
234 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, MemTy,
235 inferAlignFromPtrInfo(MF, MPO));
236 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
237 }
238
assignValueToReg__anonde0932290111::RISCVIncomingValueHandler239 void assignValueToReg(Register ValVReg, Register PhysReg,
240 const CCValAssign &VA) override {
241 markPhysRegUsed(PhysReg);
242 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
243 }
244
assignCustomValue__anonde0932290111::RISCVIncomingValueHandler245 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
246 ArrayRef<CCValAssign> VAs,
247 std::function<void()> *Thunk) override {
248 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
249 const CCValAssign &VALo = VAs[0];
250 const CCValAssign &VAHi = VAs[1];
251
252 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
253 assert(VALo.getValNo() == VAHi.getValNo() &&
254 "Values belong to different arguments");
255
256 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
257 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
258 "unexpected custom value");
259
260 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
261 MRI.createGenericVirtualRegister(LLT::scalar(32))};
262
263 if (VAHi.isMemLoc()) {
264 LLT MemTy(VAHi.getLocVT());
265
266 MachinePointerInfo MPO;
267 Register StackAddr = getStackAddress(
268 MemTy.getSizeInBytes(), VAHi.getLocMemOffset(), MPO, Arg.Flags[0]);
269
270 assignValueToAddress(NewRegs[1], StackAddr, MemTy, MPO,
271 const_cast<CCValAssign &>(VAHi));
272 }
273
274 assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
275 if (VAHi.isRegLoc())
276 assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
277
278 MIRBuilder.buildMergeLikeInstr(Arg.Regs[0], NewRegs);
279
280 return 2;
281 }
282
283 /// How the physical register gets marked varies between formal
284 /// parameters (it's a basic-block live-in), and a call instruction
285 /// (it's an implicit-def of the BL).
286 virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
287
288 private:
289 const RISCVSubtarget &Subtarget;
290 };
291
292 struct RISCVFormalArgHandler : public RISCVIncomingValueHandler {
RISCVFormalArgHandler__anonde0932290111::RISCVFormalArgHandler293 RISCVFormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
294 : RISCVIncomingValueHandler(B, MRI) {}
295
markPhysRegUsed__anonde0932290111::RISCVFormalArgHandler296 void markPhysRegUsed(MCRegister PhysReg) override {
297 MIRBuilder.getMRI()->addLiveIn(PhysReg);
298 MIRBuilder.getMBB().addLiveIn(PhysReg);
299 }
300 };
301
302 struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
RISCVCallReturnHandler__anonde0932290111::RISCVCallReturnHandler303 RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
304 MachineInstrBuilder &MIB)
305 : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {}
306
markPhysRegUsed__anonde0932290111::RISCVCallReturnHandler307 void markPhysRegUsed(MCRegister PhysReg) override {
308 MIB.addDef(PhysReg, RegState::Implicit);
309 }
310
311 MachineInstrBuilder MIB;
312 };
313
314 } // namespace
315
RISCVCallLowering(const RISCVTargetLowering & TLI)316 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
317 : CallLowering(&TLI) {}
318
319 /// Return true if scalable vector with ScalarTy is legal for lowering.
isLegalElementTypeForRVV(Type * EltTy,const RISCVSubtarget & Subtarget)320 static bool isLegalElementTypeForRVV(Type *EltTy,
321 const RISCVSubtarget &Subtarget) {
322 if (EltTy->isPointerTy())
323 return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
324 if (EltTy->isIntegerTy(1) || EltTy->isIntegerTy(8) ||
325 EltTy->isIntegerTy(16) || EltTy->isIntegerTy(32))
326 return true;
327 if (EltTy->isIntegerTy(64))
328 return Subtarget.hasVInstructionsI64();
329 if (EltTy->isHalfTy())
330 return Subtarget.hasVInstructionsF16();
331 if (EltTy->isBFloatTy())
332 return Subtarget.hasVInstructionsBF16();
333 if (EltTy->isFloatTy())
334 return Subtarget.hasVInstructionsF32();
335 if (EltTy->isDoubleTy())
336 return Subtarget.hasVInstructionsF64();
337 return false;
338 }
339
340 // TODO: Support all argument types.
341 // TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall.
isSupportedArgumentType(Type * T,const RISCVSubtarget & Subtarget,bool IsLowerArgs=false)342 static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget,
343 bool IsLowerArgs = false) {
344 if (T->isIntegerTy())
345 return true;
346 if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy())
347 return true;
348 if (T->isPointerTy())
349 return true;
350 // TODO: Support fixed vector types.
351 if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() &&
352 T->isScalableTy() &&
353 isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
354 return true;
355 return false;
356 }
357
358 // TODO: Only integer, pointer and aggregate types are supported now.
359 // TODO: Remove IsLowerRetVal argument by adding support for vectors in
360 // lowerCall.
isSupportedReturnType(Type * T,const RISCVSubtarget & Subtarget,bool IsLowerRetVal=false)361 static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget,
362 bool IsLowerRetVal = false) {
363 // TODO: Integers larger than 2*XLen are passed indirectly which is not
364 // supported yet.
365 if (T->isIntegerTy())
366 return T->getIntegerBitWidth() <= Subtarget.getXLen() * 2;
367 if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy())
368 return true;
369 if (T->isPointerTy())
370 return true;
371
372 if (T->isArrayTy())
373 return isSupportedReturnType(T->getArrayElementType(), Subtarget);
374
375 if (T->isStructTy()) {
376 auto StructT = cast<StructType>(T);
377 for (unsigned i = 0, e = StructT->getNumElements(); i != e; ++i)
378 if (!isSupportedReturnType(StructT->getElementType(i), Subtarget))
379 return false;
380 return true;
381 }
382
383 if (IsLowerRetVal && T->isVectorTy() && Subtarget.hasVInstructions() &&
384 T->isScalableTy() &&
385 isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
386 return true;
387
388 return false;
389 }
390
lowerReturnVal(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,MachineInstrBuilder & Ret) const391 bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
392 const Value *Val,
393 ArrayRef<Register> VRegs,
394 MachineInstrBuilder &Ret) const {
395 if (!Val)
396 return true;
397
398 const RISCVSubtarget &Subtarget =
399 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
400 if (!isSupportedReturnType(Val->getType(), Subtarget, /*IsLowerRetVal=*/true))
401 return false;
402
403 MachineFunction &MF = MIRBuilder.getMF();
404 const DataLayout &DL = MF.getDataLayout();
405 const Function &F = MF.getFunction();
406 CallingConv::ID CC = F.getCallingConv();
407
408 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
409 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F);
410
411 SmallVector<ArgInfo, 4> SplitRetInfos;
412 splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC);
413
414 RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
415 ArrayRef(F.getReturnType())};
416 RISCVOutgoingValueAssigner Assigner(
417 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
418 /*IsRet=*/true, Dispatcher);
419 RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
420 return determineAndHandleAssignments(Handler, Assigner, SplitRetInfos,
421 MIRBuilder, CC, F.isVarArg());
422 }
423
lowerReturn(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,FunctionLoweringInfo & FLI) const424 bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
425 const Value *Val, ArrayRef<Register> VRegs,
426 FunctionLoweringInfo &FLI) const {
427 assert(!Val == VRegs.empty() && "Return value without a vreg");
428 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET);
429
430 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
431 return false;
432
433 MIRBuilder.insertInstr(Ret);
434 return true;
435 }
436
437 /// If there are varargs that were passed in a0-a7, the data in those registers
438 /// must be copied to the varargs save area on the stack.
saveVarArgRegisters(MachineIRBuilder & MIRBuilder,CallLowering::IncomingValueHandler & Handler,IncomingValueAssigner & Assigner,CCState & CCInfo) const439 void RISCVCallLowering::saveVarArgRegisters(
440 MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
441 IncomingValueAssigner &Assigner, CCState &CCInfo) const {
442 MachineFunction &MF = MIRBuilder.getMF();
443 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
444 unsigned XLenInBytes = Subtarget.getXLen() / 8;
445 ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs(Subtarget.getTargetABI());
446 MachineRegisterInfo &MRI = MF.getRegInfo();
447 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
448 MachineFrameInfo &MFI = MF.getFrameInfo();
449 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
450
451 // Size of the vararg save area. For now, the varargs save area is either
452 // zero or large enough to hold a0-a7.
453 int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
454 int FI;
455
456 // If all registers are allocated, then all varargs must be passed on the
457 // stack and we don't need to save any argregs.
458 if (VarArgsSaveSize == 0) {
459 int VaArgOffset = Assigner.StackSize;
460 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
461 } else {
462 int VaArgOffset = -VarArgsSaveSize;
463 FI = MFI.CreateFixedObject(VarArgsSaveSize, VaArgOffset, true);
464
465 // If saving an odd number of registers then create an extra stack slot to
466 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
467 // offsets to even-numbered registered remain 2*XLEN-aligned.
468 if (Idx % 2) {
469 MFI.CreateFixedObject(XLenInBytes,
470 VaArgOffset - static_cast<int>(XLenInBytes), true);
471 VarArgsSaveSize += XLenInBytes;
472 }
473
474 const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(),
475 Subtarget.getXLen());
476 const LLT sXLen = LLT::scalar(Subtarget.getXLen());
477
478 auto FIN = MIRBuilder.buildFrameIndex(p0, FI);
479 auto Offset = MIRBuilder.buildConstant(
480 MRI.createGenericVirtualRegister(sXLen), XLenInBytes);
481
482 // Copy the integer registers that may have been used for passing varargs
483 // to the vararg save area.
484 const MVT XLenVT = Subtarget.getXLenVT();
485 for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
486 const Register VReg = MRI.createGenericVirtualRegister(sXLen);
487 Handler.assignValueToReg(
488 VReg, ArgRegs[I],
489 CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT,
490 ArgRegs[I], XLenVT, CCValAssign::Full));
491 auto MPO =
492 MachinePointerInfo::getFixedStack(MF, FI, (I - Idx) * XLenInBytes);
493 MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
494 FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
495 FIN.getReg(0), Offset);
496 }
497 }
498
499 // Record the frame index of the first variable argument which is a value
500 // necessary to G_VASTART.
501 RVFI->setVarArgsFrameIndex(FI);
502 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
503 }
504
lowerFormalArguments(MachineIRBuilder & MIRBuilder,const Function & F,ArrayRef<ArrayRef<Register>> VRegs,FunctionLoweringInfo & FLI) const505 bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
506 const Function &F,
507 ArrayRef<ArrayRef<Register>> VRegs,
508 FunctionLoweringInfo &FLI) const {
509 // Early exit if there are no arguments. varargs are not part of F.args() but
510 // must be lowered.
511 if (F.arg_empty() && !F.isVarArg())
512 return true;
513
514 const RISCVSubtarget &Subtarget =
515 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
516 for (auto &Arg : F.args()) {
517 if (!isSupportedArgumentType(Arg.getType(), Subtarget,
518 /*IsLowerArgs=*/true))
519 return false;
520 }
521
522 MachineFunction &MF = MIRBuilder.getMF();
523 const DataLayout &DL = MF.getDataLayout();
524 CallingConv::ID CC = F.getCallingConv();
525
526 SmallVector<ArgInfo, 32> SplitArgInfos;
527 SmallVector<Type *, 4> TypeList;
528 unsigned Index = 0;
529 for (auto &Arg : F.args()) {
530 // Construct the ArgInfo object from destination register and argument type.
531 ArgInfo AInfo(VRegs[Index], Arg.getType(), Index);
532 setArgFlags(AInfo, Index + AttributeList::FirstArgIndex, DL, F);
533
534 // Handle any required merging from split value types from physical
535 // registers into the desired VReg. ArgInfo objects are constructed
536 // correspondingly and appended to SplitArgInfos.
537 splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
538
539 TypeList.push_back(Arg.getType());
540
541 ++Index;
542 }
543
544 RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
545 ArrayRef(TypeList)};
546 RISCVIncomingValueAssigner Assigner(
547 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
548 /*IsRet=*/false, Dispatcher);
549 RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
550
551 SmallVector<CCValAssign, 16> ArgLocs;
552 CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
553 if (!determineAssignments(Assigner, SplitArgInfos, CCInfo) ||
554 !handleAssignments(Handler, SplitArgInfos, CCInfo, ArgLocs, MIRBuilder))
555 return false;
556
557 if (F.isVarArg())
558 saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
559
560 return true;
561 }
562
lowerCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info) const563 bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
564 CallLoweringInfo &Info) const {
565 MachineFunction &MF = MIRBuilder.getMF();
566 const DataLayout &DL = MF.getDataLayout();
567 const Function &F = MF.getFunction();
568 CallingConv::ID CC = F.getCallingConv();
569
570 const RISCVSubtarget &Subtarget =
571 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
572 for (auto &AInfo : Info.OrigArgs) {
573 if (!isSupportedArgumentType(AInfo.Ty, Subtarget))
574 return false;
575 }
576
577 if (!Info.OrigRet.Ty->isVoidTy() &&
578 !isSupportedReturnType(Info.OrigRet.Ty, Subtarget))
579 return false;
580
581 MachineInstrBuilder CallSeqStart =
582 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKDOWN);
583
584 SmallVector<ArgInfo, 32> SplitArgInfos;
585 SmallVector<ISD::OutputArg, 8> Outs;
586 SmallVector<Type *, 4> TypeList;
587 for (auto &AInfo : Info.OrigArgs) {
588 // Handle any required unmerging of split value types from a given VReg into
589 // physical registers. ArgInfo objects are constructed correspondingly and
590 // appended to SplitArgInfos.
591 splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
592 TypeList.push_back(AInfo.Ty);
593 }
594
595 // TODO: Support tail calls.
596 Info.IsTailCall = false;
597
598 // Select the recommended relocation type R_RISCV_CALL_PLT.
599 if (!Info.Callee.isReg())
600 Info.Callee.setTargetFlags(RISCVII::MO_CALL);
601
602 MachineInstrBuilder Call =
603 MIRBuilder
604 .buildInstrNoInsert(Info.Callee.isReg() ? RISCV::PseudoCALLIndirect
605 : RISCV::PseudoCALL)
606 .add(Info.Callee);
607 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
608 Call.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv));
609
610 RVVArgDispatcher ArgDispatcher{&MF, getTLI<RISCVTargetLowering>(),
611 ArrayRef(TypeList)};
612 RISCVOutgoingValueAssigner ArgAssigner(
613 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
614 /*IsRet=*/false, ArgDispatcher);
615 RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
616 if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos,
617 MIRBuilder, CC, Info.IsVarArg))
618 return false;
619
620 MIRBuilder.insertInstr(Call);
621
622 CallSeqStart.addImm(ArgAssigner.StackSize).addImm(0);
623 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKUP)
624 .addImm(ArgAssigner.StackSize)
625 .addImm(0);
626
627 // If Callee is a reg, since it is used by a target specific
628 // instruction, it must have a register class matching the
629 // constraint of that instruction.
630 if (Call->getOperand(0).isReg())
631 constrainOperandRegClass(MF, *TRI, MF.getRegInfo(),
632 *Subtarget.getInstrInfo(),
633 *Subtarget.getRegBankInfo(), *Call,
634 Call->getDesc(), Call->getOperand(0), 0);
635
636 if (Info.OrigRet.Ty->isVoidTy())
637 return true;
638
639 SmallVector<ArgInfo, 4> SplitRetInfos;
640 splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC);
641
642 RVVArgDispatcher RetDispatcher{&MF, getTLI<RISCVTargetLowering>(),
643 ArrayRef(F.getReturnType())};
644 RISCVIncomingValueAssigner RetAssigner(
645 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
646 /*IsRet=*/true, RetDispatcher);
647 RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
648 if (!determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos,
649 MIRBuilder, CC, Info.IsVarArg))
650 return false;
651
652 return true;
653 }
654