xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/Analysis/ObjCARCUtil.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/LowLevelTypeUtils.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/MachineValueType.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/CodeGen/ValueTypes.h"
40 #include "llvm/IR/Argument.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstdint>
48 #include <iterator>
49 
50 #define DEBUG_TYPE "aarch64-call-lowering"
51 
52 using namespace llvm;
53 
54 AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
55   : CallLowering(&TLI) {}
56 
57 static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT,
58                                              MVT &LocVT) {
59   // If ValVT is i1/i8/i16, we should set LocVT to i8/i8/i16. This is a legacy
60   // hack because the DAG calls the assignment function with pre-legalized
61   // register typed values, not the raw type.
62   //
63   // This hack is not applied to return values which are not passed on the
64   // stack.
65   if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
66     ValVT = LocVT = MVT::i8;
67   else if (OrigVT == MVT::i16)
68     ValVT = LocVT = MVT::i16;
69 }
70 
71 // Account for i1/i8/i16 stack passed value hack
72 static LLT getStackValueStoreTypeHack(const CCValAssign &VA) {
73   const MVT ValVT = VA.getValVT();
74   return (ValVT == MVT::i8 || ValVT == MVT::i16) ? LLT(ValVT)
75                                                  : LLT(VA.getLocVT());
76 }
77 
78 namespace {
79 
80 struct AArch64IncomingValueAssigner
81     : public CallLowering::IncomingValueAssigner {
82   AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,
83                                CCAssignFn *AssignFnVarArg_)
84       : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
85 
86   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
87                  CCValAssign::LocInfo LocInfo,
88                  const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
89                  CCState &State) override {
90     applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
91     return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
92                                             LocInfo, Info, Flags, State);
93   }
94 };
95 
96 struct AArch64OutgoingValueAssigner
97     : public CallLowering::OutgoingValueAssigner {
98   const AArch64Subtarget &Subtarget;
99 
100   /// Track if this is used for a return instead of function argument
101   /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
102   /// stack passed returns for them and cannot apply the type adjustment.
103   bool IsReturn;
104 
105   AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,
106                                CCAssignFn *AssignFnVarArg_,
107                                const AArch64Subtarget &Subtarget_,
108                                bool IsReturn)
109       : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
110         Subtarget(Subtarget_), IsReturn(IsReturn) {}
111 
112   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
113                  CCValAssign::LocInfo LocInfo,
114                  const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
115                  CCState &State) override {
116     bool IsCalleeWin = Subtarget.isCallingConvWin64(State.getCallingConv());
117     bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
118 
119     bool Res;
120     if (Info.IsFixed && !UseVarArgsCCForFixed) {
121       if (!IsReturn)
122         applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
123       Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
124     } else
125       Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
126 
127     StackSize = State.getStackSize();
128     return Res;
129   }
130 };
131 
132 struct IncomingArgHandler : public CallLowering::IncomingValueHandler {
133   IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
134       : IncomingValueHandler(MIRBuilder, MRI) {}
135 
136   Register getStackAddress(uint64_t Size, int64_t Offset,
137                            MachinePointerInfo &MPO,
138                            ISD::ArgFlagsTy Flags) override {
139     auto &MFI = MIRBuilder.getMF().getFrameInfo();
140 
141     // Byval is assumed to be writable memory, but other stack passed arguments
142     // are not.
143     const bool IsImmutable = !Flags.isByVal();
144 
145     int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
146     MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
147     auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
148     return AddrReg.getReg(0);
149   }
150 
151   LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
152                              ISD::ArgFlagsTy Flags) const override {
153     // For pointers, we just need to fixup the integer types reported in the
154     // CCValAssign.
155     if (Flags.isPointer())
156       return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
157     return getStackValueStoreTypeHack(VA);
158   }
159 
160   void assignValueToReg(Register ValVReg, Register PhysReg,
161                         CCValAssign VA) override {
162     markPhysRegUsed(PhysReg);
163     IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
164   }
165 
166   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
167                             MachinePointerInfo &MPO, CCValAssign &VA) override {
168     MachineFunction &MF = MIRBuilder.getMF();
169 
170     LLT ValTy(VA.getValVT());
171     LLT LocTy(VA.getLocVT());
172 
173     // Fixup the types for the DAG compatibility hack.
174     if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16)
175       std::swap(ValTy, LocTy);
176     else {
177       // The calling code knows if this is a pointer or not, we're only touching
178       // the LocTy for the i8/i16 hack.
179       assert(LocTy.getSizeInBits() == MemTy.getSizeInBits());
180       LocTy = MemTy;
181     }
182 
183     auto MMO = MF.getMachineMemOperand(
184         MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, LocTy,
185         inferAlignFromPtrInfo(MF, MPO));
186 
187     switch (VA.getLocInfo()) {
188     case CCValAssign::LocInfo::ZExt:
189       MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);
190       return;
191     case CCValAssign::LocInfo::SExt:
192       MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);
193       return;
194     default:
195       MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
196       return;
197     }
198   }
199 
200   /// How the physical register gets marked varies between formal
201   /// parameters (it's a basic-block live-in), and a call instruction
202   /// (it's an implicit-def of the BL).
203   virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
204 };
205 
206 struct FormalArgHandler : public IncomingArgHandler {
207   FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
208       : IncomingArgHandler(MIRBuilder, MRI) {}
209 
210   void markPhysRegUsed(MCRegister PhysReg) override {
211     MIRBuilder.getMRI()->addLiveIn(PhysReg);
212     MIRBuilder.getMBB().addLiveIn(PhysReg);
213   }
214 };
215 
216 struct CallReturnHandler : public IncomingArgHandler {
217   CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
218                     MachineInstrBuilder MIB)
219       : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
220 
221   void markPhysRegUsed(MCRegister PhysReg) override {
222     MIB.addDef(PhysReg, RegState::Implicit);
223   }
224 
225   MachineInstrBuilder MIB;
226 };
227 
228 /// A special return arg handler for "returned" attribute arg calls.
229 struct ReturnedArgCallReturnHandler : public CallReturnHandler {
230   ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
231                                MachineRegisterInfo &MRI,
232                                MachineInstrBuilder MIB)
233       : CallReturnHandler(MIRBuilder, MRI, MIB) {}
234 
235   void markPhysRegUsed(MCRegister PhysReg) override {}
236 };
237 
238 struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
239   OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
240                      MachineInstrBuilder MIB, bool IsTailCall = false,
241                      int FPDiff = 0)
242       : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),
243         FPDiff(FPDiff),
244         Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
245 
246   Register getStackAddress(uint64_t Size, int64_t Offset,
247                            MachinePointerInfo &MPO,
248                            ISD::ArgFlagsTy Flags) override {
249     MachineFunction &MF = MIRBuilder.getMF();
250     LLT p0 = LLT::pointer(0, 64);
251     LLT s64 = LLT::scalar(64);
252 
253     if (IsTailCall) {
254       assert(!Flags.isByVal() && "byval unhandled with tail calls");
255 
256       Offset += FPDiff;
257       int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
258       auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
259       MPO = MachinePointerInfo::getFixedStack(MF, FI);
260       return FIReg.getReg(0);
261     }
262 
263     if (!SPReg)
264       SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);
265 
266     auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
267 
268     auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
269 
270     MPO = MachinePointerInfo::getStack(MF, Offset);
271     return AddrReg.getReg(0);
272   }
273 
274   /// We need to fixup the reported store size for certain value types because
275   /// we invert the interpretation of ValVT and LocVT in certain cases. This is
276   /// for compatability with the DAG call lowering implementation, which we're
277   /// currently building on top of.
278   LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
279                              ISD::ArgFlagsTy Flags) const override {
280     if (Flags.isPointer())
281       return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
282     return getStackValueStoreTypeHack(VA);
283   }
284 
285   void assignValueToReg(Register ValVReg, Register PhysReg,
286                         CCValAssign VA) override {
287     MIB.addUse(PhysReg, RegState::Implicit);
288     Register ExtReg = extendRegister(ValVReg, VA);
289     MIRBuilder.buildCopy(PhysReg, ExtReg);
290   }
291 
292   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
293                             MachinePointerInfo &MPO, CCValAssign &VA) override {
294     MachineFunction &MF = MIRBuilder.getMF();
295     auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
296                                        inferAlignFromPtrInfo(MF, MPO));
297     MIRBuilder.buildStore(ValVReg, Addr, *MMO);
298   }
299 
300   void assignValueToAddress(const CallLowering::ArgInfo &Arg, unsigned RegIndex,
301                             Register Addr, LLT MemTy, MachinePointerInfo &MPO,
302                             CCValAssign &VA) override {
303     unsigned MaxSize = MemTy.getSizeInBytes() * 8;
304     // For varargs, we always want to extend them to 8 bytes, in which case
305     // we disable setting a max.
306     if (!Arg.IsFixed)
307       MaxSize = 0;
308 
309     Register ValVReg = Arg.Regs[RegIndex];
310     if (VA.getLocInfo() != CCValAssign::LocInfo::FPExt) {
311       MVT LocVT = VA.getLocVT();
312       MVT ValVT = VA.getValVT();
313 
314       if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16) {
315         std::swap(ValVT, LocVT);
316         MemTy = LLT(VA.getValVT());
317       }
318 
319       ValVReg = extendRegister(ValVReg, VA, MaxSize);
320     } else {
321       // The store does not cover the full allocated stack slot.
322       MemTy = LLT(VA.getValVT());
323     }
324 
325     assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
326   }
327 
328   MachineInstrBuilder MIB;
329 
330   bool IsTailCall;
331 
332   /// For tail calls, the byte offset of the call's argument area from the
333   /// callee's. Unused elsewhere.
334   int FPDiff;
335 
336   // Cache the SP register vreg if we need it more than once in this call site.
337   Register SPReg;
338 
339   const AArch64Subtarget &Subtarget;
340 };
341 } // namespace
342 
343 static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
344   return (CallConv == CallingConv::Fast && TailCallOpt) ||
345          CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
346 }
347 
348 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
349                                       const Value *Val,
350                                       ArrayRef<Register> VRegs,
351                                       FunctionLoweringInfo &FLI,
352                                       Register SwiftErrorVReg) const {
353   auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
354   assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
355          "Return value without a vreg");
356 
357   bool Success = true;
358   if (!FLI.CanLowerReturn) {
359     insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
360   } else if (!VRegs.empty()) {
361     MachineFunction &MF = MIRBuilder.getMF();
362     const Function &F = MF.getFunction();
363     const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
364 
365     MachineRegisterInfo &MRI = MF.getRegInfo();
366     const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
367     CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
368     auto &DL = F.getParent()->getDataLayout();
369     LLVMContext &Ctx = Val->getType()->getContext();
370 
371     SmallVector<EVT, 4> SplitEVTs;
372     ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
373     assert(VRegs.size() == SplitEVTs.size() &&
374            "For each split Type there should be exactly one VReg.");
375 
376     SmallVector<ArgInfo, 8> SplitArgs;
377     CallingConv::ID CC = F.getCallingConv();
378 
379     for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
380       Register CurVReg = VRegs[i];
381       ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
382       setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
383 
384       // i1 is a special case because SDAG i1 true is naturally zero extended
385       // when widened using ANYEXT. We need to do it explicitly here.
386       auto &Flags = CurArgInfo.Flags[0];
387       if (MRI.getType(CurVReg).getSizeInBits() == 1 && !Flags.isSExt() &&
388           !Flags.isZExt()) {
389         CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
390       } else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==
391                  1) {
392         // Some types will need extending as specified by the CC.
393         MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
394         if (EVT(NewVT) != SplitEVTs[i]) {
395           unsigned ExtendOp = TargetOpcode::G_ANYEXT;
396           if (F.getAttributes().hasRetAttr(Attribute::SExt))
397             ExtendOp = TargetOpcode::G_SEXT;
398           else if (F.getAttributes().hasRetAttr(Attribute::ZExt))
399             ExtendOp = TargetOpcode::G_ZEXT;
400 
401           LLT NewLLT(NewVT);
402           LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
403           CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
404           // Instead of an extend, we might have a vector type which needs
405           // padding with more elements, e.g. <2 x half> -> <4 x half>.
406           if (NewVT.isVector()) {
407             if (OldLLT.isVector()) {
408               if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
409 
410                 CurVReg =
411                     MIRBuilder.buildPadVectorWithUndefElements(NewLLT, CurVReg)
412                         .getReg(0);
413               } else {
414                 // Just do a vector extend.
415                 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
416                               .getReg(0);
417               }
418             } else if (NewLLT.getNumElements() >= 2 &&
419                        NewLLT.getNumElements() <= 8) {
420               // We need to pad a <1 x S> type to <2/4/8 x S>. Since we don't
421               // have <1 x S> vector types in GISel we use a build_vector
422               // instead of a vector merge/concat.
423               CurVReg =
424                   MIRBuilder.buildPadVectorWithUndefElements(NewLLT, CurVReg)
425                       .getReg(0);
426             } else {
427               LLVM_DEBUG(dbgs() << "Could not handle ret ty\n");
428               return false;
429             }
430           } else {
431             // If the split EVT was a <1 x T> vector, and NewVT is T, then we
432             // don't have to do anything since we don't distinguish between the
433             // two.
434             if (NewLLT != MRI.getType(CurVReg)) {
435               // A scalar extend.
436               CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
437                             .getReg(0);
438             }
439           }
440         }
441       }
442       if (CurVReg != CurArgInfo.Regs[0]) {
443         CurArgInfo.Regs[0] = CurVReg;
444         // Reset the arg flags after modifying CurVReg.
445         setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
446       }
447       splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
448     }
449 
450     AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
451                                           /*IsReturn*/ true);
452     OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
453     Success = determineAndHandleAssignments(Handler, Assigner, SplitArgs,
454                                             MIRBuilder, CC, F.isVarArg());
455   }
456 
457   if (SwiftErrorVReg) {
458     MIB.addUse(AArch64::X21, RegState::Implicit);
459     MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
460   }
461 
462   MIRBuilder.insertInstr(MIB);
463   return Success;
464 }
465 
466 bool AArch64CallLowering::canLowerReturn(MachineFunction &MF,
467                                          CallingConv::ID CallConv,
468                                          SmallVectorImpl<BaseArgInfo> &Outs,
469                                          bool IsVarArg) const {
470   SmallVector<CCValAssign, 16> ArgLocs;
471   const auto &TLI = *getTLI<AArch64TargetLowering>();
472   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
473                  MF.getFunction().getContext());
474 
475   return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
476 }
477 
478 /// Helper function to compute forwarded registers for musttail calls. Computes
479 /// the forwarded registers, sets MBB liveness, and emits COPY instructions that
480 /// can be used to save + restore registers later.
481 static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
482                                              CCAssignFn *AssignFn) {
483   MachineBasicBlock &MBB = MIRBuilder.getMBB();
484   MachineFunction &MF = MIRBuilder.getMF();
485   MachineFrameInfo &MFI = MF.getFrameInfo();
486 
487   if (!MFI.hasMustTailInVarArgFunc())
488     return;
489 
490   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
491   const Function &F = MF.getFunction();
492   assert(F.isVarArg() && "Expected F to be vararg?");
493 
494   // Compute the set of forwarded registers. The rest are scratch.
495   SmallVector<CCValAssign, 16> ArgLocs;
496   CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
497                  F.getContext());
498   SmallVector<MVT, 2> RegParmTypes;
499   RegParmTypes.push_back(MVT::i64);
500   RegParmTypes.push_back(MVT::f128);
501 
502   // Later on, we can use this vector to restore the registers if necessary.
503   SmallVectorImpl<ForwardedRegister> &Forwards =
504       FuncInfo->getForwardedMustTailRegParms();
505   CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
506 
507   // Conservatively forward X8, since it might be used for an aggregate
508   // return.
509   if (!CCInfo.isAllocated(AArch64::X8)) {
510     Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
511     Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
512   }
513 
514   // Add the forwards to the MachineBasicBlock and MachineFunction.
515   for (const auto &F : Forwards) {
516     MBB.addLiveIn(F.PReg);
517     MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
518   }
519 }
520 
521 bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction &MF) const {
522   auto &F = MF.getFunction();
523   if (F.getReturnType()->isScalableTy() ||
524       llvm::any_of(F.args(), [](const Argument &A) {
525         return A.getType()->isScalableTy();
526       }))
527     return true;
528   const auto &ST = MF.getSubtarget<AArch64Subtarget>();
529   if (!ST.hasNEON() || !ST.hasFPARMv8()) {
530     LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n");
531     return true;
532   }
533 
534   SMEAttrs Attrs(F);
535   if (Attrs.hasNewZAInterface() ||
536       (!Attrs.hasStreamingInterface() && Attrs.hasStreamingBody()))
537     return true;
538 
539   return false;
540 }
541 
542 void AArch64CallLowering::saveVarArgRegisters(
543     MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
544     CCState &CCInfo) const {
545   auto GPRArgRegs = AArch64::getGPRArgRegs();
546   auto FPRArgRegs = AArch64::getFPRArgRegs();
547 
548   MachineFunction &MF = MIRBuilder.getMF();
549   MachineRegisterInfo &MRI = MF.getRegInfo();
550   MachineFrameInfo &MFI = MF.getFrameInfo();
551   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
552   auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
553   bool IsWin64CC =
554       Subtarget.isCallingConvWin64(CCInfo.getCallingConv());
555   const LLT p0 = LLT::pointer(0, 64);
556   const LLT s64 = LLT::scalar(64);
557 
558   unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
559   unsigned NumVariadicGPRArgRegs = GPRArgRegs.size() - FirstVariadicGPR + 1;
560 
561   unsigned GPRSaveSize = 8 * (GPRArgRegs.size() - FirstVariadicGPR);
562   int GPRIdx = 0;
563   if (GPRSaveSize != 0) {
564     if (IsWin64CC) {
565       GPRIdx = MFI.CreateFixedObject(GPRSaveSize,
566                                      -static_cast<int>(GPRSaveSize), false);
567       if (GPRSaveSize & 15)
568         // The extra size here, if triggered, will always be 8.
569         MFI.CreateFixedObject(16 - (GPRSaveSize & 15),
570                               -static_cast<int>(alignTo(GPRSaveSize, 16)),
571                               false);
572     } else
573       GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
574 
575     auto FIN = MIRBuilder.buildFrameIndex(p0, GPRIdx);
576     auto Offset =
577         MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 8);
578 
579     for (unsigned i = FirstVariadicGPR; i < GPRArgRegs.size(); ++i) {
580       Register Val = MRI.createGenericVirtualRegister(s64);
581       Handler.assignValueToReg(
582           Val, GPRArgRegs[i],
583           CCValAssign::getReg(i + MF.getFunction().getNumOperands(), MVT::i64,
584                               GPRArgRegs[i], MVT::i64, CCValAssign::Full));
585       auto MPO = IsWin64CC ? MachinePointerInfo::getFixedStack(
586                                MF, GPRIdx, (i - FirstVariadicGPR) * 8)
587                          : MachinePointerInfo::getStack(MF, i * 8);
588       MIRBuilder.buildStore(Val, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
589 
590       FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
591                                    FIN.getReg(0), Offset);
592     }
593   }
594   FuncInfo->setVarArgsGPRIndex(GPRIdx);
595   FuncInfo->setVarArgsGPRSize(GPRSaveSize);
596 
597   if (Subtarget.hasFPARMv8() && !IsWin64CC) {
598     unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
599 
600     unsigned FPRSaveSize = 16 * (FPRArgRegs.size() - FirstVariadicFPR);
601     int FPRIdx = 0;
602     if (FPRSaveSize != 0) {
603       FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
604 
605       auto FIN = MIRBuilder.buildFrameIndex(p0, FPRIdx);
606       auto Offset =
607           MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 16);
608 
609       for (unsigned i = FirstVariadicFPR; i < FPRArgRegs.size(); ++i) {
610         Register Val = MRI.createGenericVirtualRegister(LLT::scalar(128));
611         Handler.assignValueToReg(
612             Val, FPRArgRegs[i],
613             CCValAssign::getReg(
614                 i + MF.getFunction().getNumOperands() + NumVariadicGPRArgRegs,
615                 MVT::f128, FPRArgRegs[i], MVT::f128, CCValAssign::Full));
616 
617         auto MPO = MachinePointerInfo::getStack(MF, i * 16);
618         MIRBuilder.buildStore(Val, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
619 
620         FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
621                                      FIN.getReg(0), Offset);
622       }
623     }
624     FuncInfo->setVarArgsFPRIndex(FPRIdx);
625     FuncInfo->setVarArgsFPRSize(FPRSaveSize);
626   }
627 }
628 
629 bool AArch64CallLowering::lowerFormalArguments(
630     MachineIRBuilder &MIRBuilder, const Function &F,
631     ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const {
632   MachineFunction &MF = MIRBuilder.getMF();
633   MachineBasicBlock &MBB = MIRBuilder.getMBB();
634   MachineRegisterInfo &MRI = MF.getRegInfo();
635   auto &DL = F.getParent()->getDataLayout();
636   auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
637   // TODO: Support Arm64EC
638   bool IsWin64 = Subtarget.isCallingConvWin64(F.getCallingConv()) && !Subtarget.isWindowsArm64EC();
639 
640   SmallVector<ArgInfo, 8> SplitArgs;
641   SmallVector<std::pair<Register, Register>> BoolArgs;
642 
643   // Insert the hidden sret parameter if the return value won't fit in the
644   // return registers.
645   if (!FLI.CanLowerReturn)
646     insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
647 
648   unsigned i = 0;
649   for (auto &Arg : F.args()) {
650     if (DL.getTypeStoreSize(Arg.getType()).isZero())
651       continue;
652 
653     ArgInfo OrigArg{VRegs[i], Arg, i};
654     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
655 
656     // i1 arguments are zero-extended to i8 by the caller. Emit a
657     // hint to reflect this.
658     if (OrigArg.Ty->isIntegerTy(1)) {
659       assert(OrigArg.Regs.size() == 1 &&
660              MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
661              "Unexpected registers used for i1 arg");
662 
663       auto &Flags = OrigArg.Flags[0];
664       if (!Flags.isZExt() && !Flags.isSExt()) {
665         // Lower i1 argument as i8, and insert AssertZExt + Trunc later.
666         Register OrigReg = OrigArg.Regs[0];
667         Register WideReg = MRI.createGenericVirtualRegister(LLT::scalar(8));
668         OrigArg.Regs[0] = WideReg;
669         BoolArgs.push_back({OrigReg, WideReg});
670       }
671     }
672 
673     if (Arg.hasAttribute(Attribute::SwiftAsync))
674       MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
675 
676     splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv());
677     ++i;
678   }
679 
680   if (!MBB.empty())
681     MIRBuilder.setInstr(*MBB.begin());
682 
683   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
684   CCAssignFn *AssignFn = TLI.CCAssignFnForCall(F.getCallingConv(), IsWin64 && F.isVarArg());
685 
686   AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
687   FormalArgHandler Handler(MIRBuilder, MRI);
688   SmallVector<CCValAssign, 16> ArgLocs;
689   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
690   if (!determineAssignments(Assigner, SplitArgs, CCInfo) ||
691       !handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, MIRBuilder))
692     return false;
693 
694   if (!BoolArgs.empty()) {
695     for (auto &KV : BoolArgs) {
696       Register OrigReg = KV.first;
697       Register WideReg = KV.second;
698       LLT WideTy = MRI.getType(WideReg);
699       assert(MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&
700              "Unexpected bit size of a bool arg");
701       MIRBuilder.buildTrunc(
702           OrigReg, MIRBuilder.buildAssertZExt(WideTy, WideReg, 1).getReg(0));
703     }
704   }
705 
706   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
707   uint64_t StackSize = Assigner.StackSize;
708   if (F.isVarArg()) {
709     if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {
710       // The AAPCS variadic function ABI is identical to the non-variadic
711       // one. As a result there may be more arguments in registers and we should
712       // save them for future reference.
713       // Win64 variadic functions also pass arguments in registers, but all
714       // float arguments are passed in integer registers.
715       saveVarArgRegisters(MIRBuilder, Handler, CCInfo);
716     } else if (Subtarget.isWindowsArm64EC()) {
717       return false;
718     }
719 
720     // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
721     StackSize = alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);
722 
723     auto &MFI = MIRBuilder.getMF().getFrameInfo();
724     FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackSize, true));
725   }
726 
727   if (doesCalleeRestoreStack(F.getCallingConv(),
728                              MF.getTarget().Options.GuaranteedTailCallOpt)) {
729     // We have a non-standard ABI, so why not make full use of the stack that
730     // we're going to pop? It must be aligned to 16 B in any case.
731     StackSize = alignTo(StackSize, 16);
732 
733     // If we're expected to restore the stack (e.g. fastcc), then we'll be
734     // adding a multiple of 16.
735     FuncInfo->setArgumentStackToRestore(StackSize);
736 
737     // Our own callers will guarantee that the space is free by giving an
738     // aligned value to CALLSEQ_START.
739   }
740 
741   // When we tail call, we need to check if the callee's arguments
742   // will fit on the caller's stack. So, whenever we lower formal arguments,
743   // we should keep track of this information, since we might lower a tail call
744   // in this function later.
745   FuncInfo->setBytesInStackArgArea(StackSize);
746 
747   if (Subtarget.hasCustomCallingConv())
748     Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
749 
750   handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
751 
752   // Move back to the end of the basic block.
753   MIRBuilder.setMBB(MBB);
754 
755   return true;
756 }
757 
758 /// Return true if the calling convention is one that we can guarantee TCO for.
759 static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
760   return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
761          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
762 }
763 
764 /// Return true if we might ever do TCO for calls with this calling convention.
765 static bool mayTailCallThisCC(CallingConv::ID CC) {
766   switch (CC) {
767   case CallingConv::C:
768   case CallingConv::PreserveMost:
769   case CallingConv::PreserveAll:
770   case CallingConv::Swift:
771   case CallingConv::SwiftTail:
772   case CallingConv::Tail:
773   case CallingConv::Fast:
774     return true;
775   default:
776     return false;
777   }
778 }
779 
780 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
781 /// CC.
782 static std::pair<CCAssignFn *, CCAssignFn *>
783 getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
784   return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
785 }
786 
787 bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
788     CallLoweringInfo &Info, MachineFunction &MF,
789     SmallVectorImpl<ArgInfo> &InArgs) const {
790   const Function &CallerF = MF.getFunction();
791   CallingConv::ID CalleeCC = Info.CallConv;
792   CallingConv::ID CallerCC = CallerF.getCallingConv();
793 
794   // If the calling conventions match, then everything must be the same.
795   if (CalleeCC == CallerCC)
796     return true;
797 
798   // Check if the caller and callee will handle arguments in the same way.
799   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
800   CCAssignFn *CalleeAssignFnFixed;
801   CCAssignFn *CalleeAssignFnVarArg;
802   std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
803       getAssignFnsForCC(CalleeCC, TLI);
804 
805   CCAssignFn *CallerAssignFnFixed;
806   CCAssignFn *CallerAssignFnVarArg;
807   std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
808       getAssignFnsForCC(CallerCC, TLI);
809 
810   AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
811                                               CalleeAssignFnVarArg);
812   AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
813                                               CallerAssignFnVarArg);
814 
815   if (!resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner))
816     return false;
817 
818   // Make sure that the caller and callee preserve all of the same registers.
819   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
820   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
821   const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
822   if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
823     TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
824     TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
825   }
826 
827   return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
828 }
829 
830 bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
831     CallLoweringInfo &Info, MachineFunction &MF,
832     SmallVectorImpl<ArgInfo> &OrigOutArgs) const {
833   // If there are no outgoing arguments, then we are done.
834   if (OrigOutArgs.empty())
835     return true;
836 
837   const Function &CallerF = MF.getFunction();
838   LLVMContext &Ctx = CallerF.getContext();
839   CallingConv::ID CalleeCC = Info.CallConv;
840   CallingConv::ID CallerCC = CallerF.getCallingConv();
841   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
842   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
843 
844   CCAssignFn *AssignFnFixed;
845   CCAssignFn *AssignFnVarArg;
846   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
847 
848   // We have outgoing arguments. Make sure that we can tail call with them.
849   SmallVector<CCValAssign, 16> OutLocs;
850   CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);
851 
852   AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
853                                               Subtarget, /*IsReturn*/ false);
854   // determineAssignments() may modify argument flags, so make a copy.
855   SmallVector<ArgInfo, 8> OutArgs;
856   append_range(OutArgs, OrigOutArgs);
857   if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) {
858     LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
859     return false;
860   }
861 
862   // Make sure that they can fit on the caller's stack.
863   const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
864   if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) {
865     LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
866     return false;
867   }
868 
869   // Verify that the parameters in callee-saved registers match.
870   // TODO: Port this over to CallLowering as general code once swiftself is
871   // supported.
872   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
873   const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
874   MachineRegisterInfo &MRI = MF.getRegInfo();
875 
876   if (Info.IsVarArg) {
877     // Be conservative and disallow variadic memory operands to match SDAG's
878     // behaviour.
879     // FIXME: If the caller's calling convention is C, then we can
880     // potentially use its argument area. However, for cases like fastcc,
881     // we can't do anything.
882     for (unsigned i = 0; i < OutLocs.size(); ++i) {
883       auto &ArgLoc = OutLocs[i];
884       if (ArgLoc.isRegLoc())
885         continue;
886 
887       LLVM_DEBUG(
888           dbgs()
889           << "... Cannot tail call vararg function with stack arguments\n");
890       return false;
891     }
892   }
893 
894   return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
895 }
896 
897 bool AArch64CallLowering::isEligibleForTailCallOptimization(
898     MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
899     SmallVectorImpl<ArgInfo> &InArgs,
900     SmallVectorImpl<ArgInfo> &OutArgs) const {
901 
902   // Must pass all target-independent checks in order to tail call optimize.
903   if (!Info.IsTailCall)
904     return false;
905 
906   CallingConv::ID CalleeCC = Info.CallConv;
907   MachineFunction &MF = MIRBuilder.getMF();
908   const Function &CallerF = MF.getFunction();
909 
910   LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");
911 
912   if (Info.SwiftErrorVReg) {
913     // TODO: We should handle this.
914     // Note that this is also handled by the check for no outgoing arguments.
915     // Proactively disabling this though, because the swifterror handling in
916     // lowerCall inserts a COPY *after* the location of the call.
917     LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");
918     return false;
919   }
920 
921   if (!mayTailCallThisCC(CalleeCC)) {
922     LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
923     return false;
924   }
925 
926   // Byval parameters hand the function a pointer directly into the stack area
927   // we want to reuse during a tail call. Working around this *is* possible (see
928   // X86).
929   //
930   // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
931   // it?
932   //
933   // On Windows, "inreg" attributes signify non-aggregate indirect returns.
934   // In this case, it is necessary to save/restore X0 in the callee. Tail
935   // call opt interferes with this. So we disable tail call opt when the
936   // caller has an argument with "inreg" attribute.
937   //
938   // FIXME: Check whether the callee also has an "inreg" argument.
939   //
940   // When the caller has a swifterror argument, we don't want to tail call
941   // because would have to move into the swifterror register before the
942   // tail call.
943   if (any_of(CallerF.args(), [](const Argument &A) {
944         return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
945       })) {
946     LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "
947                          "inreg, or swifterror arguments\n");
948     return false;
949   }
950 
951   // Externally-defined functions with weak linkage should not be
952   // tail-called on AArch64 when the OS does not support dynamic
953   // pre-emption of symbols, as the AAELF spec requires normal calls
954   // to undefined weak functions to be replaced with a NOP or jump to the
955   // next instruction. The behaviour of branch instructions in this
956   // situation (as used for tail calls) is implementation-defined, so we
957   // cannot rely on the linker replacing the tail call with a return.
958   if (Info.Callee.isGlobal()) {
959     const GlobalValue *GV = Info.Callee.getGlobal();
960     const Triple &TT = MF.getTarget().getTargetTriple();
961     if (GV->hasExternalWeakLinkage() &&
962         (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
963          TT.isOSBinFormatMachO())) {
964       LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
965                            "with weak linkage for this OS.\n");
966       return false;
967     }
968   }
969 
970   // If we have -tailcallopt, then we're done.
971   if (canGuaranteeTCO(CalleeCC, MF.getTarget().Options.GuaranteedTailCallOpt))
972     return CalleeCC == CallerF.getCallingConv();
973 
974   // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
975   // Try to find cases where we can do that.
976 
977   // I want anyone implementing a new calling convention to think long and hard
978   // about this assert.
979   assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&
980          "Unexpected variadic calling convention");
981 
982   // Verify that the incoming and outgoing arguments from the callee are
983   // safe to tail call.
984   if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
985     LLVM_DEBUG(
986         dbgs()
987         << "... Caller and callee have incompatible calling conventions.\n");
988     return false;
989   }
990 
991   if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
992     return false;
993 
994   LLVM_DEBUG(
995       dbgs() << "... Call is eligible for tail call optimization.\n");
996   return true;
997 }
998 
999 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
1000                               bool IsTailCall) {
1001   if (!IsTailCall)
1002     return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL;
1003 
1004   if (!IsIndirect)
1005     return AArch64::TCRETURNdi;
1006 
1007   // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
1008   // x16 or x17.
1009   if (CallerF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
1010     return AArch64::TCRETURNriBTI;
1011 
1012   return AArch64::TCRETURNri;
1013 }
1014 
1015 static const uint32_t *
1016 getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> &OutArgs,
1017                AArch64CallLowering::CallLoweringInfo &Info,
1018                const AArch64RegisterInfo &TRI, MachineFunction &MF) {
1019   const uint32_t *Mask;
1020   if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {
1021     // For 'this' returns, use the X0-preserving mask if applicable
1022     Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);
1023     if (!Mask) {
1024       OutArgs[0].Flags[0].setReturned(false);
1025       Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1026     }
1027   } else {
1028     Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1029   }
1030   return Mask;
1031 }
1032 
1033 bool AArch64CallLowering::lowerTailCall(
1034     MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
1035     SmallVectorImpl<ArgInfo> &OutArgs) const {
1036   MachineFunction &MF = MIRBuilder.getMF();
1037   const Function &F = MF.getFunction();
1038   MachineRegisterInfo &MRI = MF.getRegInfo();
1039   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
1040   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
1041 
1042   // True when we're tail calling, but without -tailcallopt.
1043   bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt &&
1044                    Info.CallConv != CallingConv::Tail &&
1045                    Info.CallConv != CallingConv::SwiftTail;
1046 
1047   // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
1048   // register class. Until we can do that, we should fall back here.
1049   if (MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) {
1050     LLVM_DEBUG(
1051         dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
1052     return false;
1053   }
1054 
1055   // Find out which ABI gets to decide where things go.
1056   CallingConv::ID CalleeCC = Info.CallConv;
1057   CCAssignFn *AssignFnFixed;
1058   CCAssignFn *AssignFnVarArg;
1059   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
1060 
1061   MachineInstrBuilder CallSeqStart;
1062   if (!IsSibCall)
1063     CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
1064 
1065   unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
1066   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1067   MIB.add(Info.Callee);
1068 
1069   // Byte offset for the tail call. When we are sibcalling, this will always
1070   // be 0.
1071   MIB.addImm(0);
1072 
1073   // Tell the call which registers are clobbered.
1074   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1075   auto TRI = Subtarget.getRegisterInfo();
1076   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
1077   if (Subtarget.hasCustomCallingConv())
1078     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1079   MIB.addRegMask(Mask);
1080 
1081   if (Info.CFIType)
1082     MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1083 
1084   if (TRI->isAnyArgRegReserved(MF))
1085     TRI->emitReservedArgRegCallError(MF);
1086 
1087   // FPDiff is the byte offset of the call's argument area from the callee's.
1088   // Stores to callee stack arguments will be placed in FixedStackSlots offset
1089   // by this amount for a tail call. In a sibling call it must be 0 because the
1090   // caller will deallocate the entire stack and the callee still expects its
1091   // arguments to begin at SP+0.
1092   int FPDiff = 0;
1093 
1094   // This will be 0 for sibcalls, potentially nonzero for tail calls produced
1095   // by -tailcallopt. For sibcalls, the memory operands for the call are
1096   // already available in the caller's incoming argument space.
1097   unsigned NumBytes = 0;
1098   if (!IsSibCall) {
1099     // We aren't sibcalling, so we need to compute FPDiff. We need to do this
1100     // before handling assignments, because FPDiff must be known for memory
1101     // arguments.
1102     unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1103     SmallVector<CCValAssign, 16> OutLocs;
1104     CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
1105 
1106     AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
1107                                                 Subtarget, /*IsReturn*/ false);
1108     if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
1109       return false;
1110 
1111     // The callee will pop the argument stack as a tail call. Thus, we must
1112     // keep it 16-byte aligned.
1113     NumBytes = alignTo(OutInfo.getStackSize(), 16);
1114 
1115     // FPDiff will be negative if this tail call requires more space than we
1116     // would automatically have in our incoming argument space. Positive if we
1117     // actually shrink the stack.
1118     FPDiff = NumReusableBytes - NumBytes;
1119 
1120     // Update the required reserved area if this is the tail call requiring the
1121     // most argument stack space.
1122     if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
1123       FuncInfo->setTailCallReservedStack(-FPDiff);
1124 
1125     // The stack pointer must be 16-byte aligned at all times it's used for a
1126     // memory operation, which in practice means at *all* times and in
1127     // particular across call boundaries. Therefore our own arguments started at
1128     // a 16-byte aligned SP and the delta applied for the tail call should
1129     // satisfy the same constraint.
1130     assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1131   }
1132 
1133   const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
1134 
1135   AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1136                                         Subtarget, /*IsReturn*/ false);
1137 
1138   // Do the actual argument marshalling.
1139   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,
1140                              /*IsTailCall*/ true, FPDiff);
1141   if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1142                                      CalleeCC, Info.IsVarArg))
1143     return false;
1144 
1145   Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1146 
1147   if (Info.IsVarArg && Info.IsMustTailCall) {
1148     // Now we know what's being passed to the function. Add uses to the call for
1149     // the forwarded registers that we *aren't* passing as parameters. This will
1150     // preserve the copies we build earlier.
1151     for (const auto &F : Forwards) {
1152       Register ForwardedReg = F.PReg;
1153       // If the register is already passed, or aliases a register which is
1154       // already being passed, then skip it.
1155       if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
1156             if (!Use.isReg())
1157               return false;
1158             return TRI->regsOverlap(Use.getReg(), ForwardedReg);
1159           }))
1160         continue;
1161 
1162       // We aren't passing it already, so we should add it to the call.
1163       MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
1164       MIB.addReg(ForwardedReg, RegState::Implicit);
1165     }
1166   }
1167 
1168   // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1169   // sequence start and end here.
1170   if (!IsSibCall) {
1171     MIB->getOperand(1).setImm(FPDiff);
1172     CallSeqStart.addImm(0).addImm(0);
1173     // End the call sequence *before* emitting the call. Normally, we would
1174     // tidy the frame up after the call. However, here, we've laid out the
1175     // parameters so that when SP is reset, they will be in the correct
1176     // location.
1177     MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(0).addImm(0);
1178   }
1179 
1180   // Now we can add the actual call instruction to the correct basic block.
1181   MIRBuilder.insertInstr(MIB);
1182 
1183   // If Callee is a reg, since it is used by a target specific instruction,
1184   // it must have a register class matching the constraint of that instruction.
1185   if (MIB->getOperand(0).isReg())
1186     constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1187                              *MF.getSubtarget().getRegBankInfo(), *MIB,
1188                              MIB->getDesc(), MIB->getOperand(0), 0);
1189 
1190   MF.getFrameInfo().setHasTailCall();
1191   Info.LoweredTailCall = true;
1192   return true;
1193 }
1194 
1195 bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
1196                                     CallLoweringInfo &Info) const {
1197   MachineFunction &MF = MIRBuilder.getMF();
1198   const Function &F = MF.getFunction();
1199   MachineRegisterInfo &MRI = MF.getRegInfo();
1200   auto &DL = F.getParent()->getDataLayout();
1201   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
1202   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1203 
1204   // Arm64EC has extra requirements for varargs calls; bail out for now.
1205   if (Info.IsVarArg && Subtarget.isWindowsArm64EC())
1206     return false;
1207 
1208   SmallVector<ArgInfo, 8> OutArgs;
1209   for (auto &OrigArg : Info.OrigArgs) {
1210     splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
1211     // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
1212     auto &Flags = OrigArg.Flags[0];
1213     if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
1214       ArgInfo &OutArg = OutArgs.back();
1215       assert(OutArg.Regs.size() == 1 &&
1216              MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 &&
1217              "Unexpected registers used for i1 arg");
1218 
1219       // We cannot use a ZExt ArgInfo flag here, because it will
1220       // zero-extend the argument to i32 instead of just i8.
1221       OutArg.Regs[0] =
1222           MIRBuilder.buildZExt(LLT::scalar(8), OutArg.Regs[0]).getReg(0);
1223       LLVMContext &Ctx = MF.getFunction().getContext();
1224       OutArg.Ty = Type::getInt8Ty(Ctx);
1225     }
1226   }
1227 
1228   SmallVector<ArgInfo, 8> InArgs;
1229   if (!Info.OrigRet.Ty->isVoidTy())
1230     splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
1231 
1232   // If we can lower as a tail call, do that instead.
1233   bool CanTailCallOpt =
1234       isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
1235 
1236   // We must emit a tail call if we have musttail.
1237   if (Info.IsMustTailCall && !CanTailCallOpt) {
1238     // There are types of incoming/outgoing arguments we can't handle yet, so
1239     // it doesn't make sense to actually die here like in ISelLowering. Instead,
1240     // fall back to SelectionDAG and let it try to handle this.
1241     LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
1242     return false;
1243   }
1244 
1245   Info.IsTailCall = CanTailCallOpt;
1246   if (CanTailCallOpt)
1247     return lowerTailCall(MIRBuilder, Info, OutArgs);
1248 
1249   // Find out which ABI gets to decide where things go.
1250   CCAssignFn *AssignFnFixed;
1251   CCAssignFn *AssignFnVarArg;
1252   std::tie(AssignFnFixed, AssignFnVarArg) =
1253       getAssignFnsForCC(Info.CallConv, TLI);
1254 
1255   MachineInstrBuilder CallSeqStart;
1256   CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
1257 
1258   // Create a temporarily-floating call instruction so we can add the implicit
1259   // uses of arg registers.
1260 
1261   unsigned Opc = 0;
1262   // Calls with operand bundle "clang.arc.attachedcall" are special. They should
1263   // be expanded to the call, directly followed by a special marker sequence and
1264   // a call to an ObjC library function.
1265   if (Info.CB && objcarc::hasAttachedCallOpBundle(Info.CB))
1266     Opc = AArch64::BLR_RVMARKER;
1267   // A call to a returns twice function like setjmp must be followed by a bti
1268   // instruction.
1269   else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&
1270            !Subtarget.noBTIAtReturnTwice() &&
1271            MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
1272     Opc = AArch64::BLR_BTI;
1273   else
1274     Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
1275 
1276   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1277   unsigned CalleeOpNo = 0;
1278 
1279   if (Opc == AArch64::BLR_RVMARKER) {
1280     // Add a target global address for the retainRV/claimRV runtime function
1281     // just before the call target.
1282     Function *ARCFn = *objcarc::getAttachedARCFunction(Info.CB);
1283     MIB.addGlobalAddress(ARCFn);
1284     ++CalleeOpNo;
1285   } else if (Info.CFIType) {
1286     MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1287   }
1288 
1289   MIB.add(Info.Callee);
1290 
1291   // Tell the call which registers are clobbered.
1292   const uint32_t *Mask;
1293   const auto *TRI = Subtarget.getRegisterInfo();
1294 
1295   AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1296                                         Subtarget, /*IsReturn*/ false);
1297   // Do the actual argument marshalling.
1298   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, /*IsReturn*/ false);
1299   if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1300                                      Info.CallConv, Info.IsVarArg))
1301     return false;
1302 
1303   Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1304 
1305   if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
1306     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1307   MIB.addRegMask(Mask);
1308 
1309   if (TRI->isAnyArgRegReserved(MF))
1310     TRI->emitReservedArgRegCallError(MF);
1311 
1312   // Now we can add the actual call instruction to the correct basic block.
1313   MIRBuilder.insertInstr(MIB);
1314 
1315   uint64_t CalleePopBytes =
1316       doesCalleeRestoreStack(Info.CallConv,
1317                              MF.getTarget().Options.GuaranteedTailCallOpt)
1318           ? alignTo(Assigner.StackSize, 16)
1319           : 0;
1320 
1321   CallSeqStart.addImm(Assigner.StackSize).addImm(0);
1322   MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
1323       .addImm(Assigner.StackSize)
1324       .addImm(CalleePopBytes);
1325 
1326   // If Callee is a reg, since it is used by a target specific
1327   // instruction, it must have a register class matching the
1328   // constraint of that instruction.
1329   if (MIB->getOperand(CalleeOpNo).isReg())
1330     constrainOperandRegClass(MF, *TRI, MRI, *Subtarget.getInstrInfo(),
1331                              *Subtarget.getRegBankInfo(), *MIB, MIB->getDesc(),
1332                              MIB->getOperand(CalleeOpNo), CalleeOpNo);
1333 
1334   // Finally we can copy the returned value back into its virtual-register. In
1335   // symmetry with the arguments, the physical register must be an
1336   // implicit-define of the call instruction.
1337   if (Info.CanLowerReturn  && !Info.OrigRet.Ty->isVoidTy()) {
1338     CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1339     CallReturnHandler Handler(MIRBuilder, MRI, MIB);
1340     bool UsingReturnedArg =
1341         !OutArgs.empty() && OutArgs[0].Flags[0].isReturned();
1342 
1343     AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
1344                                           /*IsReturn*/ false);
1345     ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);
1346     if (!determineAndHandleAssignments(
1347             UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
1348             MIRBuilder, Info.CallConv, Info.IsVarArg,
1349             UsingReturnedArg ? ArrayRef(OutArgs[0].Regs) : std::nullopt))
1350       return false;
1351   }
1352 
1353   if (Info.SwiftErrorVReg) {
1354     MIB.addDef(AArch64::X21, RegState::Implicit);
1355     MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
1356   }
1357 
1358   if (!Info.CanLowerReturn) {
1359     insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1360                     Info.DemoteRegister, Info.DemoteStackIndex);
1361   }
1362   return true;
1363 }
1364 
1365 bool AArch64CallLowering::isTypeIsValidForThisReturn(EVT Ty) const {
1366   return Ty.getSizeInBits() == 64;
1367 }
1368