xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Mips/MipsCallLowering.cpp (revision 6966ac055c3b7a39266fb982493330df7a097997)
1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 
22 using namespace llvm;
23 
24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
25     : CallLowering(&TLI) {}
26 
27 bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA,
28                                            const EVT &VT) {
29   if (VA.isRegLoc()) {
30     assignValueToReg(VReg, VA, VT);
31   } else if (VA.isMemLoc()) {
32     assignValueToAddress(VReg, VA);
33   } else {
34     return false;
35   }
36   return true;
37 }
38 
39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs,
40                                                 ArrayRef<CCValAssign> ArgLocs,
41                                                 unsigned ArgLocsStartIndex,
42                                                 const EVT &VT) {
43   for (unsigned i = 0; i < VRegs.size(); ++i)
44     if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT))
45       return false;
46   return true;
47 }
48 
49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
50     SmallVectorImpl<Register> &VRegs) {
51   if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
52     std::reverse(VRegs.begin(), VRegs.end());
53 }
54 
55 bool MipsCallLowering::MipsHandler::handle(
56     ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) {
57   SmallVector<Register, 4> VRegs;
58   unsigned SplitLength;
59   const Function &F = MIRBuilder.getMF().getFunction();
60   const DataLayout &DL = F.getParent()->getDataLayout();
61   const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>(
62       MIRBuilder.getMF().getSubtarget().getTargetLowering());
63 
64   for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size();
65        ++ArgsIndex, ArgLocsIndex += SplitLength) {
66     EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty);
67     SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(),
68                                                     F.getCallingConv(), VT);
69     assert(Args[ArgsIndex].Regs.size() == 1 && "Can't handle multple regs yet");
70 
71     if (SplitLength > 1) {
72       VRegs.clear();
73       MVT RegisterVT = TLI.getRegisterTypeForCallingConv(
74           F.getContext(), F.getCallingConv(), VT);
75       for (unsigned i = 0; i < SplitLength; ++i)
76         VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT}));
77 
78       if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Regs[0],
79                        VT))
80         return false;
81     } else {
82       if (!assign(Args[ArgsIndex].Regs[0], ArgLocs[ArgLocsIndex], VT))
83         return false;
84     }
85   }
86   return true;
87 }
88 
89 namespace {
90 class IncomingValueHandler : public MipsCallLowering::MipsHandler {
91 public:
92   IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
93       : MipsHandler(MIRBuilder, MRI) {}
94 
95 private:
96   void assignValueToReg(Register ValVReg, const CCValAssign &VA,
97                         const EVT &VT) override;
98 
99   Register getStackAddress(const CCValAssign &VA,
100                            MachineMemOperand *&MMO) override;
101 
102   void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
103 
104   bool handleSplit(SmallVectorImpl<Register> &VRegs,
105                    ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
106                    Register ArgsReg, const EVT &VT) override;
107 
108   virtual void markPhysRegUsed(unsigned PhysReg) {
109     MIRBuilder.getMBB().addLiveIn(PhysReg);
110   }
111 
112   void buildLoad(Register Val, const CCValAssign &VA) {
113     MachineMemOperand *MMO;
114     Register Addr = getStackAddress(VA, MMO);
115     MIRBuilder.buildLoad(Val, Addr, *MMO);
116   }
117 };
118 
119 class CallReturnHandler : public IncomingValueHandler {
120 public:
121   CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
122                     MachineInstrBuilder &MIB)
123       : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
124 
125 private:
126   void markPhysRegUsed(unsigned PhysReg) override {
127     MIB.addDef(PhysReg, RegState::Implicit);
128   }
129 
130   MachineInstrBuilder &MIB;
131 };
132 
133 } // end anonymous namespace
134 
135 void IncomingValueHandler::assignValueToReg(Register ValVReg,
136                                             const CCValAssign &VA,
137                                             const EVT &VT) {
138   const MipsSubtarget &STI =
139       static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
140   Register PhysReg = VA.getLocReg();
141   if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
142     const MipsSubtarget &STI =
143         static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
144 
145     MIRBuilder
146         .buildInstr(STI.isFP64bit() ? Mips::BuildPairF64_64
147                                     : Mips::BuildPairF64)
148         .addDef(ValVReg)
149         .addUse(PhysReg + (STI.isLittle() ? 0 : 1))
150         .addUse(PhysReg + (STI.isLittle() ? 1 : 0))
151         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
152                           *STI.getRegBankInfo());
153     markPhysRegUsed(PhysReg);
154     markPhysRegUsed(PhysReg + 1);
155   } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
156     MIRBuilder.buildInstr(Mips::MTC1)
157         .addDef(ValVReg)
158         .addUse(PhysReg)
159         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
160                           *STI.getRegBankInfo());
161     markPhysRegUsed(PhysReg);
162   } else {
163     switch (VA.getLocInfo()) {
164     case CCValAssign::LocInfo::SExt:
165     case CCValAssign::LocInfo::ZExt:
166     case CCValAssign::LocInfo::AExt: {
167       auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
168       MIRBuilder.buildTrunc(ValVReg, Copy);
169       break;
170     }
171     default:
172       MIRBuilder.buildCopy(ValVReg, PhysReg);
173       break;
174     }
175     markPhysRegUsed(PhysReg);
176   }
177 }
178 
179 Register IncomingValueHandler::getStackAddress(const CCValAssign &VA,
180                                                MachineMemOperand *&MMO) {
181   MachineFunction &MF = MIRBuilder.getMF();
182   unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
183   unsigned Offset = VA.getLocMemOffset();
184   MachineFrameInfo &MFI = MF.getFrameInfo();
185 
186   int FI = MFI.CreateFixedObject(Size, Offset, true);
187   MachinePointerInfo MPO =
188       MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
189 
190   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
191   unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
192   MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
193 
194   Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
195   MIRBuilder.buildFrameIndex(AddrReg, FI);
196 
197   return AddrReg;
198 }
199 
200 void IncomingValueHandler::assignValueToAddress(Register ValVReg,
201                                                 const CCValAssign &VA) {
202   if (VA.getLocInfo() == CCValAssign::SExt ||
203       VA.getLocInfo() == CCValAssign::ZExt ||
204       VA.getLocInfo() == CCValAssign::AExt) {
205     Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
206     buildLoad(LoadReg, VA);
207     MIRBuilder.buildTrunc(ValVReg, LoadReg);
208   } else
209     buildLoad(ValVReg, VA);
210 }
211 
212 bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
213                                        ArrayRef<CCValAssign> ArgLocs,
214                                        unsigned ArgLocsStartIndex,
215                                        Register ArgsReg, const EVT &VT) {
216   if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
217     return false;
218   setLeastSignificantFirst(VRegs);
219   MIRBuilder.buildMerge(ArgsReg, VRegs);
220   return true;
221 }
222 
223 namespace {
224 class OutgoingValueHandler : public MipsCallLowering::MipsHandler {
225 public:
226   OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
227                        MachineInstrBuilder &MIB)
228       : MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
229 
230 private:
231   void assignValueToReg(Register ValVReg, const CCValAssign &VA,
232                         const EVT &VT) override;
233 
234   Register getStackAddress(const CCValAssign &VA,
235                            MachineMemOperand *&MMO) override;
236 
237   void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
238 
239   bool handleSplit(SmallVectorImpl<Register> &VRegs,
240                    ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
241                    Register ArgsReg, const EVT &VT) override;
242 
243   Register extendRegister(Register ValReg, const CCValAssign &VA);
244 
245   MachineInstrBuilder &MIB;
246 };
247 } // end anonymous namespace
248 
249 void OutgoingValueHandler::assignValueToReg(Register ValVReg,
250                                             const CCValAssign &VA,
251                                             const EVT &VT) {
252   Register PhysReg = VA.getLocReg();
253   const MipsSubtarget &STI =
254       static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
255 
256   if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
257     MIRBuilder
258         .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
259                                     : Mips::ExtractElementF64)
260         .addDef(PhysReg + (STI.isLittle() ? 1 : 0))
261         .addUse(ValVReg)
262         .addImm(1)
263         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
264                           *STI.getRegBankInfo());
265     MIRBuilder
266         .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
267                                     : Mips::ExtractElementF64)
268         .addDef(PhysReg + (STI.isLittle() ? 0 : 1))
269         .addUse(ValVReg)
270         .addImm(0)
271         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
272                           *STI.getRegBankInfo());
273   } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
274     MIRBuilder.buildInstr(Mips::MFC1)
275         .addDef(PhysReg)
276         .addUse(ValVReg)
277         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
278                           *STI.getRegBankInfo());
279   } else {
280     Register ExtReg = extendRegister(ValVReg, VA);
281     MIRBuilder.buildCopy(PhysReg, ExtReg);
282     MIB.addUse(PhysReg, RegState::Implicit);
283   }
284 }
285 
286 Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
287                                                MachineMemOperand *&MMO) {
288   MachineFunction &MF = MIRBuilder.getMF();
289   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
290 
291   LLT p0 = LLT::pointer(0, 32);
292   LLT s32 = LLT::scalar(32);
293   Register SPReg = MRI.createGenericVirtualRegister(p0);
294   MIRBuilder.buildCopy(SPReg, Register(Mips::SP));
295 
296   Register OffsetReg = MRI.createGenericVirtualRegister(s32);
297   unsigned Offset = VA.getLocMemOffset();
298   MIRBuilder.buildConstant(OffsetReg, Offset);
299 
300   Register AddrReg = MRI.createGenericVirtualRegister(p0);
301   MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
302 
303   MachinePointerInfo MPO =
304       MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
305   unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
306   unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
307   MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align);
308 
309   return AddrReg;
310 }
311 
312 void OutgoingValueHandler::assignValueToAddress(Register ValVReg,
313                                                 const CCValAssign &VA) {
314   MachineMemOperand *MMO;
315   Register Addr = getStackAddress(VA, MMO);
316   Register ExtReg = extendRegister(ValVReg, VA);
317   MIRBuilder.buildStore(ExtReg, Addr, *MMO);
318 }
319 
320 Register OutgoingValueHandler::extendRegister(Register ValReg,
321                                               const CCValAssign &VA) {
322   LLT LocTy{VA.getLocVT()};
323   switch (VA.getLocInfo()) {
324   case CCValAssign::SExt: {
325     Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
326     MIRBuilder.buildSExt(ExtReg, ValReg);
327     return ExtReg;
328   }
329   case CCValAssign::ZExt: {
330     Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
331     MIRBuilder.buildZExt(ExtReg, ValReg);
332     return ExtReg;
333   }
334   case CCValAssign::AExt: {
335     Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
336     MIRBuilder.buildAnyExt(ExtReg, ValReg);
337     return ExtReg;
338   }
339   // TODO : handle upper extends
340   case CCValAssign::Full:
341     return ValReg;
342   default:
343     break;
344   }
345   llvm_unreachable("unable to extend register");
346 }
347 
348 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
349                                        ArrayRef<CCValAssign> ArgLocs,
350                                        unsigned ArgLocsStartIndex,
351                                        Register ArgsReg, const EVT &VT) {
352   MIRBuilder.buildUnmerge(VRegs, ArgsReg);
353   setLeastSignificantFirst(VRegs);
354   if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
355     return false;
356 
357   return true;
358 }
359 
360 static bool isSupportedType(Type *T) {
361   if (T->isIntegerTy())
362     return true;
363   if (T->isPointerTy())
364     return true;
365   if (T->isFloatingPointTy())
366     return true;
367   return false;
368 }
369 
370 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
371                                              const ISD::ArgFlagsTy &Flags) {
372   // > does not mean loss of information as type RegisterVT can't hold type VT,
373   // it means that type VT is split into multiple registers of type RegisterVT
374   if (VT.getSizeInBits() >= RegisterVT.getSizeInBits())
375     return CCValAssign::LocInfo::Full;
376   if (Flags.isSExt())
377     return CCValAssign::LocInfo::SExt;
378   if (Flags.isZExt())
379     return CCValAssign::LocInfo::ZExt;
380   return CCValAssign::LocInfo::AExt;
381 }
382 
383 template <typename T>
384 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs,
385                        const SmallVectorImpl<T> &Arguments) {
386   for (unsigned i = 0; i < ArgLocs.size(); ++i) {
387     const CCValAssign &VA = ArgLocs[i];
388     CCValAssign::LocInfo LocInfo = determineLocInfo(
389         Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags);
390     if (VA.isMemLoc())
391       ArgLocs[i] =
392           CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
393                               VA.getLocMemOffset(), VA.getLocVT(), LocInfo);
394     else
395       ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
396                                        VA.getLocReg(), VA.getLocVT(), LocInfo);
397   }
398 }
399 
400 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
401                                    const Value *Val,
402                                    ArrayRef<Register> VRegs) const {
403 
404   MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
405 
406   if (Val != nullptr && !isSupportedType(Val->getType()))
407     return false;
408 
409   if (!VRegs.empty()) {
410     MachineFunction &MF = MIRBuilder.getMF();
411     const Function &F = MF.getFunction();
412     const DataLayout &DL = MF.getDataLayout();
413     const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
414     LLVMContext &Ctx = Val->getType()->getContext();
415 
416     SmallVector<EVT, 4> SplitEVTs;
417     ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
418     assert(VRegs.size() == SplitEVTs.size() &&
419            "For each split Type there should be exactly one VReg.");
420 
421     SmallVector<ArgInfo, 8> RetInfos;
422     SmallVector<unsigned, 8> OrigArgIndices;
423 
424     for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
425       ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
426       setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
427       splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices);
428     }
429 
430     SmallVector<ISD::OutputArg, 8> Outs;
431     subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs);
432 
433     SmallVector<CCValAssign, 16> ArgLocs;
434     MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
435                        F.getContext());
436     CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn());
437     setLocInfo(ArgLocs, Outs);
438 
439     OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
440     if (!RetHandler.handle(ArgLocs, RetInfos)) {
441       return false;
442     }
443   }
444   MIRBuilder.insertInstr(Ret);
445   return true;
446 }
447 
448 bool MipsCallLowering::lowerFormalArguments(
449     MachineIRBuilder &MIRBuilder, const Function &F,
450     ArrayRef<ArrayRef<Register>> VRegs) const {
451 
452   // Quick exit if there aren't any args.
453   if (F.arg_empty())
454     return true;
455 
456   if (F.isVarArg()) {
457     return false;
458   }
459 
460   for (auto &Arg : F.args()) {
461     if (!isSupportedType(Arg.getType()))
462       return false;
463   }
464 
465   MachineFunction &MF = MIRBuilder.getMF();
466   const DataLayout &DL = MF.getDataLayout();
467   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
468 
469   SmallVector<ArgInfo, 8> ArgInfos;
470   SmallVector<unsigned, 8> OrigArgIndices;
471   unsigned i = 0;
472   for (auto &Arg : F.args()) {
473     ArgInfo AInfo(VRegs[i], Arg.getType());
474     setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
475     splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices);
476     ++i;
477   }
478 
479   SmallVector<ISD::InputArg, 8> Ins;
480   subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins);
481 
482   SmallVector<CCValAssign, 16> ArgLocs;
483   MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
484                      F.getContext());
485 
486   const MipsTargetMachine &TM =
487       static_cast<const MipsTargetMachine &>(MF.getTarget());
488   const MipsABIInfo &ABI = TM.getABI();
489   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
490                        1);
491   CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
492   setLocInfo(ArgLocs, Ins);
493 
494   IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
495   if (!Handler.handle(ArgLocs, ArgInfos))
496     return false;
497 
498   return true;
499 }
500 
501 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
502                                  CallingConv::ID CallConv,
503                                  const MachineOperand &Callee,
504                                  const ArgInfo &OrigRet,
505                                  ArrayRef<ArgInfo> OrigArgs) const {
506 
507   if (CallConv != CallingConv::C)
508     return false;
509 
510   for (auto &Arg : OrigArgs) {
511     if (!isSupportedType(Arg.Ty))
512       return false;
513     if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
514       return false;
515   }
516 
517   if (OrigRet.Regs[0] && !isSupportedType(OrigRet.Ty))
518     return false;
519 
520   MachineFunction &MF = MIRBuilder.getMF();
521   const Function &F = MF.getFunction();
522   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
523   const MipsTargetMachine &TM =
524       static_cast<const MipsTargetMachine &>(MF.getTarget());
525   const MipsABIInfo &ABI = TM.getABI();
526 
527   MachineInstrBuilder CallSeqStart =
528       MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN);
529 
530   const bool IsCalleeGlobalPIC =
531       Callee.isGlobal() && TM.isPositionIndependent();
532 
533   MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(
534       Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
535   MIB.addDef(Mips::SP, RegState::Implicit);
536   if (IsCalleeGlobalPIC) {
537     Register CalleeReg =
538         MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
539     MachineInstr *CalleeGlobalValue =
540         MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal());
541     if (!Callee.getGlobal()->hasLocalLinkage())
542       CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL);
543     MIB.addUse(CalleeReg);
544   } else
545     MIB.add(Callee);
546   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
547   MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
548 
549   TargetLowering::ArgListTy FuncOrigArgs;
550   FuncOrigArgs.reserve(OrigArgs.size());
551 
552   SmallVector<ArgInfo, 8> ArgInfos;
553   SmallVector<unsigned, 8> OrigArgIndices;
554   unsigned i = 0;
555   for (auto &Arg : OrigArgs) {
556 
557     TargetLowering::ArgListEntry Entry;
558     Entry.Ty = Arg.Ty;
559     FuncOrigArgs.push_back(Entry);
560 
561     splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices);
562     ++i;
563   }
564 
565   SmallVector<ISD::OutputArg, 8> Outs;
566   subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs);
567 
568   SmallVector<CCValAssign, 8> ArgLocs;
569   MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
570                      F.getContext());
571 
572   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
573   const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr;
574   CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
575   setLocInfo(ArgLocs, Outs);
576 
577   OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
578   if (!RetHandler.handle(ArgLocs, ArgInfos)) {
579     return false;
580   }
581 
582   unsigned NextStackOffset = CCInfo.getNextStackOffset();
583   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
584   unsigned StackAlignment = TFL->getStackAlignment();
585   NextStackOffset = alignTo(NextStackOffset, StackAlignment);
586   CallSeqStart.addImm(NextStackOffset).addImm(0);
587 
588   if (IsCalleeGlobalPIC) {
589     MIRBuilder.buildCopy(
590       Register(Mips::GP),
591       MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel());
592     MIB.addDef(Mips::GP, RegState::Implicit);
593   }
594   MIRBuilder.insertInstr(MIB);
595   if (MIB->getOpcode() == Mips::JALRPseudo) {
596     const MipsSubtarget &STI =
597         static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
598     MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
599                          *STI.getRegBankInfo());
600   }
601 
602   if (OrigRet.Regs[0]) {
603     ArgInfos.clear();
604     SmallVector<unsigned, 8> OrigRetIndices;
605 
606     splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices);
607 
608     SmallVector<ISD::InputArg, 8> Ins;
609     subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins);
610 
611     SmallVector<CCValAssign, 8> ArgLocs;
612     MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
613                        F.getContext());
614 
615     CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call);
616     setLocInfo(ArgLocs, Ins);
617 
618     CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
619     if (!Handler.handle(ArgLocs, ArgInfos))
620       return false;
621   }
622 
623   MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0);
624 
625   return true;
626 }
627 
628 template <typename T>
629 void MipsCallLowering::subTargetRegTypeForCallingConv(
630     const Function &F, ArrayRef<ArgInfo> Args,
631     ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const {
632   const DataLayout &DL = F.getParent()->getDataLayout();
633   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
634 
635   unsigned ArgNo = 0;
636   for (auto &Arg : Args) {
637 
638     EVT VT = TLI.getValueType(DL, Arg.Ty);
639     MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
640                                                        F.getCallingConv(), VT);
641     unsigned NumRegs = TLI.getNumRegistersForCallingConv(
642         F.getContext(), F.getCallingConv(), VT);
643 
644     for (unsigned i = 0; i < NumRegs; ++i) {
645       ISD::ArgFlagsTy Flags = Arg.Flags;
646 
647       if (i == 0)
648         Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
649       else
650         Flags.setOrigAlign(1);
651 
652       ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo],
653                            0);
654     }
655     ++ArgNo;
656   }
657 }
658 
659 void MipsCallLowering::splitToValueTypes(
660     const ArgInfo &OrigArg, unsigned OriginalIndex,
661     SmallVectorImpl<ArgInfo> &SplitArgs,
662     SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const {
663 
664   // TODO : perform structure and array split. For now we only deal with
665   // types that pass isSupportedType check.
666   SplitArgs.push_back(OrigArg);
667   SplitArgsOrigIndices.push_back(OriginalIndex);
668 }
669