15ffd83dbSDimitry Andric //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
25ffd83dbSDimitry Andric //
35ffd83dbSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
45ffd83dbSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
55ffd83dbSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
65ffd83dbSDimitry Andric //
75ffd83dbSDimitry Andric //===----------------------------------------------------------------------===//
85ffd83dbSDimitry Andric ///
95ffd83dbSDimitry Andric /// \file
105ffd83dbSDimitry Andric /// This file implements the lowering of LLVM calls to machine code calls for
115ffd83dbSDimitry Andric /// GlobalISel.
125ffd83dbSDimitry Andric ///
135ffd83dbSDimitry Andric //===----------------------------------------------------------------------===//
145ffd83dbSDimitry Andric
155ffd83dbSDimitry Andric #include "AArch64CallLowering.h"
16*0fca6ea1SDimitry Andric #include "AArch64GlobalISelUtils.h"
175ffd83dbSDimitry Andric #include "AArch64ISelLowering.h"
185ffd83dbSDimitry Andric #include "AArch64MachineFunctionInfo.h"
1906c3fb27SDimitry Andric #include "AArch64RegisterInfo.h"
205ffd83dbSDimitry Andric #include "AArch64Subtarget.h"
215ffd83dbSDimitry Andric #include "llvm/ADT/ArrayRef.h"
225ffd83dbSDimitry Andric #include "llvm/ADT/SmallVector.h"
2381ad6265SDimitry Andric #include "llvm/Analysis/ObjCARCUtil.h"
245ffd83dbSDimitry Andric #include "llvm/CodeGen/Analysis.h"
255ffd83dbSDimitry Andric #include "llvm/CodeGen/CallingConvLower.h"
26753f127fSDimitry Andric #include "llvm/CodeGen/FunctionLoweringInfo.h"
275ffd83dbSDimitry Andric #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
285ffd83dbSDimitry Andric #include "llvm/CodeGen/GlobalISel/Utils.h"
2906c3fb27SDimitry Andric #include "llvm/CodeGen/LowLevelTypeUtils.h"
305ffd83dbSDimitry Andric #include "llvm/CodeGen/MachineBasicBlock.h"
315ffd83dbSDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h"
325ffd83dbSDimitry Andric #include "llvm/CodeGen/MachineFunction.h"
335ffd83dbSDimitry Andric #include "llvm/CodeGen/MachineInstrBuilder.h"
345ffd83dbSDimitry Andric #include "llvm/CodeGen/MachineMemOperand.h"
355ffd83dbSDimitry Andric #include "llvm/CodeGen/MachineOperand.h"
365ffd83dbSDimitry Andric #include "llvm/CodeGen/MachineRegisterInfo.h"
375ffd83dbSDimitry Andric #include "llvm/CodeGen/TargetRegisterInfo.h"
385ffd83dbSDimitry Andric #include "llvm/CodeGen/TargetSubtargetInfo.h"
395ffd83dbSDimitry Andric #include "llvm/CodeGen/ValueTypes.h"
40*0fca6ea1SDimitry Andric #include "llvm/CodeGenTypes/MachineValueType.h"
415ffd83dbSDimitry Andric #include "llvm/IR/Argument.h"
425ffd83dbSDimitry Andric #include "llvm/IR/Attributes.h"
435ffd83dbSDimitry Andric #include "llvm/IR/Function.h"
445ffd83dbSDimitry Andric #include "llvm/IR/Type.h"
455ffd83dbSDimitry Andric #include "llvm/IR/Value.h"
465ffd83dbSDimitry Andric #include <algorithm>
475ffd83dbSDimitry Andric #include <cassert>
485ffd83dbSDimitry Andric #include <cstdint>
495ffd83dbSDimitry Andric #include <iterator>
505ffd83dbSDimitry Andric
515ffd83dbSDimitry Andric #define DEBUG_TYPE "aarch64-call-lowering"
525ffd83dbSDimitry Andric
535ffd83dbSDimitry Andric using namespace llvm;
54*0fca6ea1SDimitry Andric using namespace AArch64GISelUtils;
55*0fca6ea1SDimitry Andric
56*0fca6ea1SDimitry Andric extern cl::opt<bool> EnableSVEGISel;
575ffd83dbSDimitry Andric
AArch64CallLowering(const AArch64TargetLowering & TLI)585ffd83dbSDimitry Andric AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
595ffd83dbSDimitry Andric : CallLowering(&TLI) {}
605ffd83dbSDimitry Andric
applyStackPassedSmallTypeDAGHack(EVT OrigVT,MVT & ValVT,MVT & LocVT)61fe6060f1SDimitry Andric static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT,
62fe6060f1SDimitry Andric MVT &LocVT) {
63fe6060f1SDimitry Andric // If ValVT is i1/i8/i16, we should set LocVT to i8/i8/i16. This is a legacy
64fe6060f1SDimitry Andric // hack because the DAG calls the assignment function with pre-legalized
65fe6060f1SDimitry Andric // register typed values, not the raw type.
66fe6060f1SDimitry Andric //
67fe6060f1SDimitry Andric // This hack is not applied to return values which are not passed on the
68fe6060f1SDimitry Andric // stack.
69fe6060f1SDimitry Andric if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
70fe6060f1SDimitry Andric ValVT = LocVT = MVT::i8;
71fe6060f1SDimitry Andric else if (OrigVT == MVT::i16)
72fe6060f1SDimitry Andric ValVT = LocVT = MVT::i16;
73fe6060f1SDimitry Andric }
74fe6060f1SDimitry Andric
75fe6060f1SDimitry Andric // Account for i1/i8/i16 stack passed value hack
getStackValueStoreTypeHack(const CCValAssign & VA)76fe6060f1SDimitry Andric static LLT getStackValueStoreTypeHack(const CCValAssign &VA) {
77fe6060f1SDimitry Andric const MVT ValVT = VA.getValVT();
78fe6060f1SDimitry Andric return (ValVT == MVT::i8 || ValVT == MVT::i16) ? LLT(ValVT)
79fe6060f1SDimitry Andric : LLT(VA.getLocVT());
80fe6060f1SDimitry Andric }
81fe6060f1SDimitry Andric
825ffd83dbSDimitry Andric namespace {
83fe6060f1SDimitry Andric
84fe6060f1SDimitry Andric struct AArch64IncomingValueAssigner
85fe6060f1SDimitry Andric : public CallLowering::IncomingValueAssigner {
AArch64IncomingValueAssigner__anon2d01892d0111::AArch64IncomingValueAssigner86fe6060f1SDimitry Andric AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,
87fe6060f1SDimitry Andric CCAssignFn *AssignFnVarArg_)
88fe6060f1SDimitry Andric : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
89fe6060f1SDimitry Andric
assignArg__anon2d01892d0111::AArch64IncomingValueAssigner90fe6060f1SDimitry Andric bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
91fe6060f1SDimitry Andric CCValAssign::LocInfo LocInfo,
92fe6060f1SDimitry Andric const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
93fe6060f1SDimitry Andric CCState &State) override {
94fe6060f1SDimitry Andric applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
95fe6060f1SDimitry Andric return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
96fe6060f1SDimitry Andric LocInfo, Info, Flags, State);
97fe6060f1SDimitry Andric }
98fe6060f1SDimitry Andric };
99fe6060f1SDimitry Andric
100fe6060f1SDimitry Andric struct AArch64OutgoingValueAssigner
101fe6060f1SDimitry Andric : public CallLowering::OutgoingValueAssigner {
102fe6060f1SDimitry Andric const AArch64Subtarget &Subtarget;
103fe6060f1SDimitry Andric
104fe6060f1SDimitry Andric /// Track if this is used for a return instead of function argument
105fe6060f1SDimitry Andric /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
106fe6060f1SDimitry Andric /// stack passed returns for them and cannot apply the type adjustment.
107fe6060f1SDimitry Andric bool IsReturn;
108fe6060f1SDimitry Andric
AArch64OutgoingValueAssigner__anon2d01892d0111::AArch64OutgoingValueAssigner109fe6060f1SDimitry Andric AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,
110fe6060f1SDimitry Andric CCAssignFn *AssignFnVarArg_,
111fe6060f1SDimitry Andric const AArch64Subtarget &Subtarget_,
112fe6060f1SDimitry Andric bool IsReturn)
113fe6060f1SDimitry Andric : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
114fe6060f1SDimitry Andric Subtarget(Subtarget_), IsReturn(IsReturn) {}
115fe6060f1SDimitry Andric
assignArg__anon2d01892d0111::AArch64OutgoingValueAssigner116fe6060f1SDimitry Andric bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
117fe6060f1SDimitry Andric CCValAssign::LocInfo LocInfo,
118fe6060f1SDimitry Andric const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
119fe6060f1SDimitry Andric CCState &State) override {
120*0fca6ea1SDimitry Andric const Function &F = State.getMachineFunction().getFunction();
121*0fca6ea1SDimitry Andric bool IsCalleeWin =
122*0fca6ea1SDimitry Andric Subtarget.isCallingConvWin64(State.getCallingConv(), F.isVarArg());
123fe6060f1SDimitry Andric bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
124fe6060f1SDimitry Andric
125fe6060f1SDimitry Andric bool Res;
126bdd1243dSDimitry Andric if (Info.IsFixed && !UseVarArgsCCForFixed) {
127bdd1243dSDimitry Andric if (!IsReturn)
128bdd1243dSDimitry Andric applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
129fe6060f1SDimitry Andric Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
130bdd1243dSDimitry Andric } else
131fe6060f1SDimitry Andric Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
132fe6060f1SDimitry Andric
13306c3fb27SDimitry Andric StackSize = State.getStackSize();
134fe6060f1SDimitry Andric return Res;
135fe6060f1SDimitry Andric }
136fe6060f1SDimitry Andric };
137fe6060f1SDimitry Andric
138e8d8bef9SDimitry Andric struct IncomingArgHandler : public CallLowering::IncomingValueHandler {
IncomingArgHandler__anon2d01892d0111::IncomingArgHandler139fe6060f1SDimitry Andric IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
140fe6060f1SDimitry Andric : IncomingValueHandler(MIRBuilder, MRI) {}
1415ffd83dbSDimitry Andric
getStackAddress__anon2d01892d0111::IncomingArgHandler1425ffd83dbSDimitry Andric Register getStackAddress(uint64_t Size, int64_t Offset,
143fe6060f1SDimitry Andric MachinePointerInfo &MPO,
144fe6060f1SDimitry Andric ISD::ArgFlagsTy Flags) override {
1455ffd83dbSDimitry Andric auto &MFI = MIRBuilder.getMF().getFrameInfo();
146fe6060f1SDimitry Andric
147fe6060f1SDimitry Andric // Byval is assumed to be writable memory, but other stack passed arguments
148fe6060f1SDimitry Andric // are not.
149fe6060f1SDimitry Andric const bool IsImmutable = !Flags.isByVal();
150fe6060f1SDimitry Andric
151fe6060f1SDimitry Andric int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
1525ffd83dbSDimitry Andric MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
1535ffd83dbSDimitry Andric auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
1545ffd83dbSDimitry Andric return AddrReg.getReg(0);
1555ffd83dbSDimitry Andric }
1565ffd83dbSDimitry Andric
getStackValueStoreType__anon2d01892d0111::IncomingArgHandler157fe6060f1SDimitry Andric LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
158fe6060f1SDimitry Andric ISD::ArgFlagsTy Flags) const override {
159fe6060f1SDimitry Andric // For pointers, we just need to fixup the integer types reported in the
160fe6060f1SDimitry Andric // CCValAssign.
161fe6060f1SDimitry Andric if (Flags.isPointer())
162fe6060f1SDimitry Andric return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
163fe6060f1SDimitry Andric return getStackValueStoreTypeHack(VA);
164fe6060f1SDimitry Andric }
165fe6060f1SDimitry Andric
assignValueToReg__anon2d01892d0111::IncomingArgHandler1665ffd83dbSDimitry Andric void assignValueToReg(Register ValVReg, Register PhysReg,
1675f757f3fSDimitry Andric const CCValAssign &VA) override {
1685ffd83dbSDimitry Andric markPhysRegUsed(PhysReg);
169fe6060f1SDimitry Andric IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
1705ffd83dbSDimitry Andric }
1715ffd83dbSDimitry Andric
assignValueToAddress__anon2d01892d0111::IncomingArgHandler172fe6060f1SDimitry Andric void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
1735f757f3fSDimitry Andric const MachinePointerInfo &MPO,
1745f757f3fSDimitry Andric const CCValAssign &VA) override {
1755ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
1768833aad7SDimitry Andric
177fe6060f1SDimitry Andric LLT ValTy(VA.getValVT());
178fe6060f1SDimitry Andric LLT LocTy(VA.getLocVT());
179fe6060f1SDimitry Andric
180fe6060f1SDimitry Andric // Fixup the types for the DAG compatibility hack.
181fe6060f1SDimitry Andric if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16)
182fe6060f1SDimitry Andric std::swap(ValTy, LocTy);
183fe6060f1SDimitry Andric else {
184fe6060f1SDimitry Andric // The calling code knows if this is a pointer or not, we're only touching
185fe6060f1SDimitry Andric // the LocTy for the i8/i16 hack.
186fe6060f1SDimitry Andric assert(LocTy.getSizeInBits() == MemTy.getSizeInBits());
187fe6060f1SDimitry Andric LocTy = MemTy;
188fe6060f1SDimitry Andric }
1898833aad7SDimitry Andric
1905ffd83dbSDimitry Andric auto MMO = MF.getMachineMemOperand(
191fe6060f1SDimitry Andric MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, LocTy,
1925ffd83dbSDimitry Andric inferAlignFromPtrInfo(MF, MPO));
193349cc55cSDimitry Andric
194349cc55cSDimitry Andric switch (VA.getLocInfo()) {
195349cc55cSDimitry Andric case CCValAssign::LocInfo::ZExt:
196349cc55cSDimitry Andric MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);
197349cc55cSDimitry Andric return;
198349cc55cSDimitry Andric case CCValAssign::LocInfo::SExt:
199349cc55cSDimitry Andric MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);
200349cc55cSDimitry Andric return;
201349cc55cSDimitry Andric default:
2025ffd83dbSDimitry Andric MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
203349cc55cSDimitry Andric return;
204349cc55cSDimitry Andric }
2055ffd83dbSDimitry Andric }
2065ffd83dbSDimitry Andric
2075ffd83dbSDimitry Andric /// How the physical register gets marked varies between formal
2085ffd83dbSDimitry Andric /// parameters (it's a basic-block live-in), and a call instruction
2095ffd83dbSDimitry Andric /// (it's an implicit-def of the BL).
210e8d8bef9SDimitry Andric virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
2115ffd83dbSDimitry Andric };
2125ffd83dbSDimitry Andric
2135ffd83dbSDimitry Andric struct FormalArgHandler : public IncomingArgHandler {
FormalArgHandler__anon2d01892d0111::FormalArgHandler214fe6060f1SDimitry Andric FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
215fe6060f1SDimitry Andric : IncomingArgHandler(MIRBuilder, MRI) {}
2165ffd83dbSDimitry Andric
markPhysRegUsed__anon2d01892d0111::FormalArgHandler217e8d8bef9SDimitry Andric void markPhysRegUsed(MCRegister PhysReg) override {
2185ffd83dbSDimitry Andric MIRBuilder.getMRI()->addLiveIn(PhysReg);
2195ffd83dbSDimitry Andric MIRBuilder.getMBB().addLiveIn(PhysReg);
2205ffd83dbSDimitry Andric }
2215ffd83dbSDimitry Andric };
2225ffd83dbSDimitry Andric
2235ffd83dbSDimitry Andric struct CallReturnHandler : public IncomingArgHandler {
CallReturnHandler__anon2d01892d0111::CallReturnHandler2245ffd83dbSDimitry Andric CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
225fe6060f1SDimitry Andric MachineInstrBuilder MIB)
226fe6060f1SDimitry Andric : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
2275ffd83dbSDimitry Andric
markPhysRegUsed__anon2d01892d0111::CallReturnHandler228e8d8bef9SDimitry Andric void markPhysRegUsed(MCRegister PhysReg) override {
2295ffd83dbSDimitry Andric MIB.addDef(PhysReg, RegState::Implicit);
2305ffd83dbSDimitry Andric }
2315ffd83dbSDimitry Andric
2325ffd83dbSDimitry Andric MachineInstrBuilder MIB;
2335ffd83dbSDimitry Andric };
2345ffd83dbSDimitry Andric
235fe6060f1SDimitry Andric /// A special return arg handler for "returned" attribute arg calls.
236fe6060f1SDimitry Andric struct ReturnedArgCallReturnHandler : public CallReturnHandler {
ReturnedArgCallReturnHandler__anon2d01892d0111::ReturnedArgCallReturnHandler237fe6060f1SDimitry Andric ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
238fe6060f1SDimitry Andric MachineRegisterInfo &MRI,
239fe6060f1SDimitry Andric MachineInstrBuilder MIB)
240fe6060f1SDimitry Andric : CallReturnHandler(MIRBuilder, MRI, MIB) {}
241fe6060f1SDimitry Andric
markPhysRegUsed__anon2d01892d0111::ReturnedArgCallReturnHandler242fe6060f1SDimitry Andric void markPhysRegUsed(MCRegister PhysReg) override {}
243fe6060f1SDimitry Andric };
244fe6060f1SDimitry Andric
245e8d8bef9SDimitry Andric struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
OutgoingArgHandler__anon2d01892d0111::OutgoingArgHandler2465ffd83dbSDimitry Andric OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
247fe6060f1SDimitry Andric MachineInstrBuilder MIB, bool IsTailCall = false,
2485ffd83dbSDimitry Andric int FPDiff = 0)
249fe6060f1SDimitry Andric : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),
250fe6060f1SDimitry Andric FPDiff(FPDiff),
251fe6060f1SDimitry Andric Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
2525ffd83dbSDimitry Andric
getStackAddress__anon2d01892d0111::OutgoingArgHandler2535ffd83dbSDimitry Andric Register getStackAddress(uint64_t Size, int64_t Offset,
254fe6060f1SDimitry Andric MachinePointerInfo &MPO,
255fe6060f1SDimitry Andric ISD::ArgFlagsTy Flags) override {
2565ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
2575ffd83dbSDimitry Andric LLT p0 = LLT::pointer(0, 64);
2585ffd83dbSDimitry Andric LLT s64 = LLT::scalar(64);
2595ffd83dbSDimitry Andric
2605ffd83dbSDimitry Andric if (IsTailCall) {
261fe6060f1SDimitry Andric assert(!Flags.isByVal() && "byval unhandled with tail calls");
262fe6060f1SDimitry Andric
2635ffd83dbSDimitry Andric Offset += FPDiff;
2645ffd83dbSDimitry Andric int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
2655ffd83dbSDimitry Andric auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
2665ffd83dbSDimitry Andric MPO = MachinePointerInfo::getFixedStack(MF, FI);
2675ffd83dbSDimitry Andric return FIReg.getReg(0);
2685ffd83dbSDimitry Andric }
2695ffd83dbSDimitry Andric
2705ffd83dbSDimitry Andric if (!SPReg)
2715ffd83dbSDimitry Andric SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);
2725ffd83dbSDimitry Andric
2735ffd83dbSDimitry Andric auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
2745ffd83dbSDimitry Andric
2755ffd83dbSDimitry Andric auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
2765ffd83dbSDimitry Andric
2775ffd83dbSDimitry Andric MPO = MachinePointerInfo::getStack(MF, Offset);
2785ffd83dbSDimitry Andric return AddrReg.getReg(0);
2795ffd83dbSDimitry Andric }
2805ffd83dbSDimitry Andric
281fe6060f1SDimitry Andric /// We need to fixup the reported store size for certain value types because
282fe6060f1SDimitry Andric /// we invert the interpretation of ValVT and LocVT in certain cases. This is
283fe6060f1SDimitry Andric /// for compatability with the DAG call lowering implementation, which we're
284fe6060f1SDimitry Andric /// currently building on top of.
getStackValueStoreType__anon2d01892d0111::OutgoingArgHandler285fe6060f1SDimitry Andric LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
286fe6060f1SDimitry Andric ISD::ArgFlagsTy Flags) const override {
287fe6060f1SDimitry Andric if (Flags.isPointer())
288fe6060f1SDimitry Andric return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
289fe6060f1SDimitry Andric return getStackValueStoreTypeHack(VA);
290fe6060f1SDimitry Andric }
291fe6060f1SDimitry Andric
assignValueToReg__anon2d01892d0111::OutgoingArgHandler2925ffd83dbSDimitry Andric void assignValueToReg(Register ValVReg, Register PhysReg,
2935f757f3fSDimitry Andric const CCValAssign &VA) override {
2945ffd83dbSDimitry Andric MIB.addUse(PhysReg, RegState::Implicit);
2955ffd83dbSDimitry Andric Register ExtReg = extendRegister(ValVReg, VA);
2965ffd83dbSDimitry Andric MIRBuilder.buildCopy(PhysReg, ExtReg);
2975ffd83dbSDimitry Andric }
2985ffd83dbSDimitry Andric
assignValueToAddress__anon2d01892d0111::OutgoingArgHandler299fe6060f1SDimitry Andric void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
3005f757f3fSDimitry Andric const MachinePointerInfo &MPO,
3015f757f3fSDimitry Andric const CCValAssign &VA) override {
3025ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
303fe6060f1SDimitry Andric auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
3045ffd83dbSDimitry Andric inferAlignFromPtrInfo(MF, MPO));
3055ffd83dbSDimitry Andric MIRBuilder.buildStore(ValVReg, Addr, *MMO);
3065ffd83dbSDimitry Andric }
3075ffd83dbSDimitry Andric
assignValueToAddress__anon2d01892d0111::OutgoingArgHandler308fe6060f1SDimitry Andric void assignValueToAddress(const CallLowering::ArgInfo &Arg, unsigned RegIndex,
3095f757f3fSDimitry Andric Register Addr, LLT MemTy,
3105f757f3fSDimitry Andric const MachinePointerInfo &MPO,
3115f757f3fSDimitry Andric const CCValAssign &VA) override {
312fe6060f1SDimitry Andric unsigned MaxSize = MemTy.getSizeInBytes() * 8;
3135ffd83dbSDimitry Andric // For varargs, we always want to extend them to 8 bytes, in which case
3145ffd83dbSDimitry Andric // we disable setting a max.
3155ffd83dbSDimitry Andric if (!Arg.IsFixed)
3165ffd83dbSDimitry Andric MaxSize = 0;
3175ffd83dbSDimitry Andric
318fe6060f1SDimitry Andric Register ValVReg = Arg.Regs[RegIndex];
319fe6060f1SDimitry Andric if (VA.getLocInfo() != CCValAssign::LocInfo::FPExt) {
320fe6060f1SDimitry Andric MVT LocVT = VA.getLocVT();
321fe6060f1SDimitry Andric MVT ValVT = VA.getValVT();
322e8d8bef9SDimitry Andric
323fe6060f1SDimitry Andric if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16) {
324fe6060f1SDimitry Andric std::swap(ValVT, LocVT);
325fe6060f1SDimitry Andric MemTy = LLT(VA.getValVT());
3265ffd83dbSDimitry Andric }
3275ffd83dbSDimitry Andric
328fe6060f1SDimitry Andric ValVReg = extendRegister(ValVReg, VA, MaxSize);
329fe6060f1SDimitry Andric } else {
330fe6060f1SDimitry Andric // The store does not cover the full allocated stack slot.
331fe6060f1SDimitry Andric MemTy = LLT(VA.getValVT());
332fe6060f1SDimitry Andric }
3335ffd83dbSDimitry Andric
334fe6060f1SDimitry Andric assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
3355ffd83dbSDimitry Andric }
3365ffd83dbSDimitry Andric
3375ffd83dbSDimitry Andric MachineInstrBuilder MIB;
338fe6060f1SDimitry Andric
3395ffd83dbSDimitry Andric bool IsTailCall;
3405ffd83dbSDimitry Andric
3415ffd83dbSDimitry Andric /// For tail calls, the byte offset of the call's argument area from the
3425ffd83dbSDimitry Andric /// callee's. Unused elsewhere.
3435ffd83dbSDimitry Andric int FPDiff;
3445ffd83dbSDimitry Andric
3455ffd83dbSDimitry Andric // Cache the SP register vreg if we need it more than once in this call site.
3465ffd83dbSDimitry Andric Register SPReg;
347fe6060f1SDimitry Andric
348fe6060f1SDimitry Andric const AArch64Subtarget &Subtarget;
3495ffd83dbSDimitry Andric };
3505ffd83dbSDimitry Andric } // namespace
3515ffd83dbSDimitry Andric
doesCalleeRestoreStack(CallingConv::ID CallConv,bool TailCallOpt)3525ffd83dbSDimitry Andric static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
353fe6060f1SDimitry Andric return (CallConv == CallingConv::Fast && TailCallOpt) ||
354fe6060f1SDimitry Andric CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
3555ffd83dbSDimitry Andric }
3565ffd83dbSDimitry Andric
lowerReturn(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,FunctionLoweringInfo & FLI,Register SwiftErrorVReg) const3575ffd83dbSDimitry Andric bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
3585ffd83dbSDimitry Andric const Value *Val,
3595ffd83dbSDimitry Andric ArrayRef<Register> VRegs,
360e8d8bef9SDimitry Andric FunctionLoweringInfo &FLI,
3615ffd83dbSDimitry Andric Register SwiftErrorVReg) const {
3625ffd83dbSDimitry Andric auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
3635ffd83dbSDimitry Andric assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
3645ffd83dbSDimitry Andric "Return value without a vreg");
3655ffd83dbSDimitry Andric
3665ffd83dbSDimitry Andric bool Success = true;
367753f127fSDimitry Andric if (!FLI.CanLowerReturn) {
368753f127fSDimitry Andric insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
369753f127fSDimitry Andric } else if (!VRegs.empty()) {
3705ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
3715ffd83dbSDimitry Andric const Function &F = MF.getFunction();
372fe6060f1SDimitry Andric const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
3735ffd83dbSDimitry Andric
3745ffd83dbSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
3755ffd83dbSDimitry Andric const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
3765ffd83dbSDimitry Andric CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
377*0fca6ea1SDimitry Andric auto &DL = F.getDataLayout();
3785ffd83dbSDimitry Andric LLVMContext &Ctx = Val->getType()->getContext();
3795ffd83dbSDimitry Andric
3805ffd83dbSDimitry Andric SmallVector<EVT, 4> SplitEVTs;
3815ffd83dbSDimitry Andric ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
3825ffd83dbSDimitry Andric assert(VRegs.size() == SplitEVTs.size() &&
3835ffd83dbSDimitry Andric "For each split Type there should be exactly one VReg.");
3845ffd83dbSDimitry Andric
3855ffd83dbSDimitry Andric SmallVector<ArgInfo, 8> SplitArgs;
3865ffd83dbSDimitry Andric CallingConv::ID CC = F.getCallingConv();
3875ffd83dbSDimitry Andric
3885ffd83dbSDimitry Andric for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
3895ffd83dbSDimitry Andric Register CurVReg = VRegs[i];
390fe6060f1SDimitry Andric ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
3915ffd83dbSDimitry Andric setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
3925ffd83dbSDimitry Andric
3935ffd83dbSDimitry Andric // i1 is a special case because SDAG i1 true is naturally zero extended
3945ffd83dbSDimitry Andric // when widened using ANYEXT. We need to do it explicitly here.
395bdd1243dSDimitry Andric auto &Flags = CurArgInfo.Flags[0];
396bdd1243dSDimitry Andric if (MRI.getType(CurVReg).getSizeInBits() == 1 && !Flags.isSExt() &&
397bdd1243dSDimitry Andric !Flags.isZExt()) {
3985ffd83dbSDimitry Andric CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
399fe6060f1SDimitry Andric } else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==
400fe6060f1SDimitry Andric 1) {
4015ffd83dbSDimitry Andric // Some types will need extending as specified by the CC.
4025ffd83dbSDimitry Andric MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
4035ffd83dbSDimitry Andric if (EVT(NewVT) != SplitEVTs[i]) {
4045ffd83dbSDimitry Andric unsigned ExtendOp = TargetOpcode::G_ANYEXT;
405349cc55cSDimitry Andric if (F.getAttributes().hasRetAttr(Attribute::SExt))
4065ffd83dbSDimitry Andric ExtendOp = TargetOpcode::G_SEXT;
407349cc55cSDimitry Andric else if (F.getAttributes().hasRetAttr(Attribute::ZExt))
4085ffd83dbSDimitry Andric ExtendOp = TargetOpcode::G_ZEXT;
4095ffd83dbSDimitry Andric
4105ffd83dbSDimitry Andric LLT NewLLT(NewVT);
411*0fca6ea1SDimitry Andric LLT OldLLT = getLLTForType(*CurArgInfo.Ty, DL);
4125ffd83dbSDimitry Andric CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
4135ffd83dbSDimitry Andric // Instead of an extend, we might have a vector type which needs
4145ffd83dbSDimitry Andric // padding with more elements, e.g. <2 x half> -> <4 x half>.
4155ffd83dbSDimitry Andric if (NewVT.isVector()) {
4165ffd83dbSDimitry Andric if (OldLLT.isVector()) {
4175ffd83dbSDimitry Andric if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
4185ffd83dbSDimitry Andric CurVReg =
41906c3fb27SDimitry Andric MIRBuilder.buildPadVectorWithUndefElements(NewLLT, CurVReg)
420bdd1243dSDimitry Andric .getReg(0);
4215ffd83dbSDimitry Andric } else {
4225ffd83dbSDimitry Andric // Just do a vector extend.
4235ffd83dbSDimitry Andric CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
4245ffd83dbSDimitry Andric .getReg(0);
4255ffd83dbSDimitry Andric }
42606c3fb27SDimitry Andric } else if (NewLLT.getNumElements() >= 2 &&
42706c3fb27SDimitry Andric NewLLT.getNumElements() <= 8) {
42806c3fb27SDimitry Andric // We need to pad a <1 x S> type to <2/4/8 x S>. Since we don't
42906c3fb27SDimitry Andric // have <1 x S> vector types in GISel we use a build_vector
43006c3fb27SDimitry Andric // instead of a vector merge/concat.
4315ffd83dbSDimitry Andric CurVReg =
43206c3fb27SDimitry Andric MIRBuilder.buildPadVectorWithUndefElements(NewLLT, CurVReg)
4335ffd83dbSDimitry Andric .getReg(0);
4345ffd83dbSDimitry Andric } else {
435fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Could not handle ret ty\n");
4365ffd83dbSDimitry Andric return false;
4375ffd83dbSDimitry Andric }
4385ffd83dbSDimitry Andric } else {
439fe6060f1SDimitry Andric // If the split EVT was a <1 x T> vector, and NewVT is T, then we
440fe6060f1SDimitry Andric // don't have to do anything since we don't distinguish between the
441fe6060f1SDimitry Andric // two.
442fe6060f1SDimitry Andric if (NewLLT != MRI.getType(CurVReg)) {
4435ffd83dbSDimitry Andric // A scalar extend.
444fe6060f1SDimitry Andric CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
445fe6060f1SDimitry Andric .getReg(0);
446fe6060f1SDimitry Andric }
4475ffd83dbSDimitry Andric }
4485ffd83dbSDimitry Andric }
4495ffd83dbSDimitry Andric }
4505ffd83dbSDimitry Andric if (CurVReg != CurArgInfo.Regs[0]) {
4515ffd83dbSDimitry Andric CurArgInfo.Regs[0] = CurVReg;
4525ffd83dbSDimitry Andric // Reset the arg flags after modifying CurVReg.
4535ffd83dbSDimitry Andric setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
4545ffd83dbSDimitry Andric }
455fe6060f1SDimitry Andric splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
4565ffd83dbSDimitry Andric }
4575ffd83dbSDimitry Andric
458fe6060f1SDimitry Andric AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
459fe6060f1SDimitry Andric /*IsReturn*/ true);
460fe6060f1SDimitry Andric OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
461fe6060f1SDimitry Andric Success = determineAndHandleAssignments(Handler, Assigner, SplitArgs,
462fe6060f1SDimitry Andric MIRBuilder, CC, F.isVarArg());
4635ffd83dbSDimitry Andric }
4645ffd83dbSDimitry Andric
4655ffd83dbSDimitry Andric if (SwiftErrorVReg) {
4665ffd83dbSDimitry Andric MIB.addUse(AArch64::X21, RegState::Implicit);
4675ffd83dbSDimitry Andric MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
4685ffd83dbSDimitry Andric }
4695ffd83dbSDimitry Andric
4705ffd83dbSDimitry Andric MIRBuilder.insertInstr(MIB);
4715ffd83dbSDimitry Andric return Success;
4725ffd83dbSDimitry Andric }
4735ffd83dbSDimitry Andric
canLowerReturn(MachineFunction & MF,CallingConv::ID CallConv,SmallVectorImpl<BaseArgInfo> & Outs,bool IsVarArg) const474753f127fSDimitry Andric bool AArch64CallLowering::canLowerReturn(MachineFunction &MF,
475753f127fSDimitry Andric CallingConv::ID CallConv,
476753f127fSDimitry Andric SmallVectorImpl<BaseArgInfo> &Outs,
477753f127fSDimitry Andric bool IsVarArg) const {
478753f127fSDimitry Andric SmallVector<CCValAssign, 16> ArgLocs;
479753f127fSDimitry Andric const auto &TLI = *getTLI<AArch64TargetLowering>();
480753f127fSDimitry Andric CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
481753f127fSDimitry Andric MF.getFunction().getContext());
482753f127fSDimitry Andric
483753f127fSDimitry Andric return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
484753f127fSDimitry Andric }
485753f127fSDimitry Andric
4865ffd83dbSDimitry Andric /// Helper function to compute forwarded registers for musttail calls. Computes
4875ffd83dbSDimitry Andric /// the forwarded registers, sets MBB liveness, and emits COPY instructions that
4885ffd83dbSDimitry Andric /// can be used to save + restore registers later.
handleMustTailForwardedRegisters(MachineIRBuilder & MIRBuilder,CCAssignFn * AssignFn)4895ffd83dbSDimitry Andric static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
4905ffd83dbSDimitry Andric CCAssignFn *AssignFn) {
4915ffd83dbSDimitry Andric MachineBasicBlock &MBB = MIRBuilder.getMBB();
4925ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
4935ffd83dbSDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo();
4945ffd83dbSDimitry Andric
4955ffd83dbSDimitry Andric if (!MFI.hasMustTailInVarArgFunc())
4965ffd83dbSDimitry Andric return;
4975ffd83dbSDimitry Andric
4985ffd83dbSDimitry Andric AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
4995ffd83dbSDimitry Andric const Function &F = MF.getFunction();
5005ffd83dbSDimitry Andric assert(F.isVarArg() && "Expected F to be vararg?");
5015ffd83dbSDimitry Andric
5025ffd83dbSDimitry Andric // Compute the set of forwarded registers. The rest are scratch.
5035ffd83dbSDimitry Andric SmallVector<CCValAssign, 16> ArgLocs;
5045ffd83dbSDimitry Andric CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
5055ffd83dbSDimitry Andric F.getContext());
5065ffd83dbSDimitry Andric SmallVector<MVT, 2> RegParmTypes;
5075ffd83dbSDimitry Andric RegParmTypes.push_back(MVT::i64);
5085ffd83dbSDimitry Andric RegParmTypes.push_back(MVT::f128);
5095ffd83dbSDimitry Andric
5105ffd83dbSDimitry Andric // Later on, we can use this vector to restore the registers if necessary.
5115ffd83dbSDimitry Andric SmallVectorImpl<ForwardedRegister> &Forwards =
5125ffd83dbSDimitry Andric FuncInfo->getForwardedMustTailRegParms();
5135ffd83dbSDimitry Andric CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
5145ffd83dbSDimitry Andric
5155ffd83dbSDimitry Andric // Conservatively forward X8, since it might be used for an aggregate
5165ffd83dbSDimitry Andric // return.
5175ffd83dbSDimitry Andric if (!CCInfo.isAllocated(AArch64::X8)) {
518e8d8bef9SDimitry Andric Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
5195ffd83dbSDimitry Andric Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
5205ffd83dbSDimitry Andric }
5215ffd83dbSDimitry Andric
5225ffd83dbSDimitry Andric // Add the forwards to the MachineBasicBlock and MachineFunction.
5235ffd83dbSDimitry Andric for (const auto &F : Forwards) {
5245ffd83dbSDimitry Andric MBB.addLiveIn(F.PReg);
5255ffd83dbSDimitry Andric MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
5265ffd83dbSDimitry Andric }
5275ffd83dbSDimitry Andric }
5285ffd83dbSDimitry Andric
fallBackToDAGISel(const MachineFunction & MF) const529fe6060f1SDimitry Andric bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction &MF) const {
530fe6060f1SDimitry Andric auto &F = MF.getFunction();
531*0fca6ea1SDimitry Andric if (!EnableSVEGISel && (F.getReturnType()->isScalableTy() ||
53206c3fb27SDimitry Andric llvm::any_of(F.args(), [](const Argument &A) {
53306c3fb27SDimitry Andric return A.getType()->isScalableTy();
534*0fca6ea1SDimitry Andric })))
535fe6060f1SDimitry Andric return true;
536fe6060f1SDimitry Andric const auto &ST = MF.getSubtarget<AArch64Subtarget>();
537fe6060f1SDimitry Andric if (!ST.hasNEON() || !ST.hasFPARMv8()) {
538fe6060f1SDimitry Andric LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n");
539fe6060f1SDimitry Andric return true;
540fe6060f1SDimitry Andric }
541bdd1243dSDimitry Andric
542bdd1243dSDimitry Andric SMEAttrs Attrs(F);
543*0fca6ea1SDimitry Andric if (Attrs.hasZAState() || Attrs.hasZT0State() ||
544*0fca6ea1SDimitry Andric Attrs.hasStreamingInterfaceOrBody() ||
5455f757f3fSDimitry Andric Attrs.hasStreamingCompatibleInterface())
546bdd1243dSDimitry Andric return true;
547bdd1243dSDimitry Andric
548fe6060f1SDimitry Andric return false;
5495ffd83dbSDimitry Andric }
5505ffd83dbSDimitry Andric
saveVarArgRegisters(MachineIRBuilder & MIRBuilder,CallLowering::IncomingValueHandler & Handler,CCState & CCInfo) const55106c3fb27SDimitry Andric void AArch64CallLowering::saveVarArgRegisters(
55206c3fb27SDimitry Andric MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
55306c3fb27SDimitry Andric CCState &CCInfo) const {
55406c3fb27SDimitry Andric auto GPRArgRegs = AArch64::getGPRArgRegs();
55506c3fb27SDimitry Andric auto FPRArgRegs = AArch64::getFPRArgRegs();
55606c3fb27SDimitry Andric
55706c3fb27SDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
55806c3fb27SDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
55906c3fb27SDimitry Andric MachineFrameInfo &MFI = MF.getFrameInfo();
56006c3fb27SDimitry Andric AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
56106c3fb27SDimitry Andric auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
562*0fca6ea1SDimitry Andric bool IsWin64CC = Subtarget.isCallingConvWin64(CCInfo.getCallingConv(),
563*0fca6ea1SDimitry Andric MF.getFunction().isVarArg());
56406c3fb27SDimitry Andric const LLT p0 = LLT::pointer(0, 64);
56506c3fb27SDimitry Andric const LLT s64 = LLT::scalar(64);
56606c3fb27SDimitry Andric
56706c3fb27SDimitry Andric unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
56806c3fb27SDimitry Andric unsigned NumVariadicGPRArgRegs = GPRArgRegs.size() - FirstVariadicGPR + 1;
56906c3fb27SDimitry Andric
57006c3fb27SDimitry Andric unsigned GPRSaveSize = 8 * (GPRArgRegs.size() - FirstVariadicGPR);
57106c3fb27SDimitry Andric int GPRIdx = 0;
57206c3fb27SDimitry Andric if (GPRSaveSize != 0) {
57306c3fb27SDimitry Andric if (IsWin64CC) {
57406c3fb27SDimitry Andric GPRIdx = MFI.CreateFixedObject(GPRSaveSize,
57506c3fb27SDimitry Andric -static_cast<int>(GPRSaveSize), false);
5768a4dda33SDimitry Andric if (GPRSaveSize & 15)
5778a4dda33SDimitry Andric // The extra size here, if triggered, will always be 8.
5788a4dda33SDimitry Andric MFI.CreateFixedObject(16 - (GPRSaveSize & 15),
5798a4dda33SDimitry Andric -static_cast<int>(alignTo(GPRSaveSize, 16)),
5808a4dda33SDimitry Andric false);
58106c3fb27SDimitry Andric } else
58206c3fb27SDimitry Andric GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
58306c3fb27SDimitry Andric
58406c3fb27SDimitry Andric auto FIN = MIRBuilder.buildFrameIndex(p0, GPRIdx);
58506c3fb27SDimitry Andric auto Offset =
58606c3fb27SDimitry Andric MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 8);
58706c3fb27SDimitry Andric
58806c3fb27SDimitry Andric for (unsigned i = FirstVariadicGPR; i < GPRArgRegs.size(); ++i) {
58906c3fb27SDimitry Andric Register Val = MRI.createGenericVirtualRegister(s64);
59006c3fb27SDimitry Andric Handler.assignValueToReg(
59106c3fb27SDimitry Andric Val, GPRArgRegs[i],
59206c3fb27SDimitry Andric CCValAssign::getReg(i + MF.getFunction().getNumOperands(), MVT::i64,
59306c3fb27SDimitry Andric GPRArgRegs[i], MVT::i64, CCValAssign::Full));
59406c3fb27SDimitry Andric auto MPO = IsWin64CC ? MachinePointerInfo::getFixedStack(
59506c3fb27SDimitry Andric MF, GPRIdx, (i - FirstVariadicGPR) * 8)
59606c3fb27SDimitry Andric : MachinePointerInfo::getStack(MF, i * 8);
59706c3fb27SDimitry Andric MIRBuilder.buildStore(Val, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
59806c3fb27SDimitry Andric
59906c3fb27SDimitry Andric FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
60006c3fb27SDimitry Andric FIN.getReg(0), Offset);
60106c3fb27SDimitry Andric }
60206c3fb27SDimitry Andric }
60306c3fb27SDimitry Andric FuncInfo->setVarArgsGPRIndex(GPRIdx);
60406c3fb27SDimitry Andric FuncInfo->setVarArgsGPRSize(GPRSaveSize);
60506c3fb27SDimitry Andric
60606c3fb27SDimitry Andric if (Subtarget.hasFPARMv8() && !IsWin64CC) {
60706c3fb27SDimitry Andric unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
60806c3fb27SDimitry Andric
60906c3fb27SDimitry Andric unsigned FPRSaveSize = 16 * (FPRArgRegs.size() - FirstVariadicFPR);
61006c3fb27SDimitry Andric int FPRIdx = 0;
61106c3fb27SDimitry Andric if (FPRSaveSize != 0) {
61206c3fb27SDimitry Andric FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
61306c3fb27SDimitry Andric
61406c3fb27SDimitry Andric auto FIN = MIRBuilder.buildFrameIndex(p0, FPRIdx);
61506c3fb27SDimitry Andric auto Offset =
61606c3fb27SDimitry Andric MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 16);
61706c3fb27SDimitry Andric
61806c3fb27SDimitry Andric for (unsigned i = FirstVariadicFPR; i < FPRArgRegs.size(); ++i) {
61906c3fb27SDimitry Andric Register Val = MRI.createGenericVirtualRegister(LLT::scalar(128));
62006c3fb27SDimitry Andric Handler.assignValueToReg(
62106c3fb27SDimitry Andric Val, FPRArgRegs[i],
62206c3fb27SDimitry Andric CCValAssign::getReg(
62306c3fb27SDimitry Andric i + MF.getFunction().getNumOperands() + NumVariadicGPRArgRegs,
62406c3fb27SDimitry Andric MVT::f128, FPRArgRegs[i], MVT::f128, CCValAssign::Full));
62506c3fb27SDimitry Andric
62606c3fb27SDimitry Andric auto MPO = MachinePointerInfo::getStack(MF, i * 16);
62706c3fb27SDimitry Andric MIRBuilder.buildStore(Val, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
62806c3fb27SDimitry Andric
62906c3fb27SDimitry Andric FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
63006c3fb27SDimitry Andric FIN.getReg(0), Offset);
63106c3fb27SDimitry Andric }
63206c3fb27SDimitry Andric }
63306c3fb27SDimitry Andric FuncInfo->setVarArgsFPRIndex(FPRIdx);
63406c3fb27SDimitry Andric FuncInfo->setVarArgsFPRSize(FPRSaveSize);
63506c3fb27SDimitry Andric }
63606c3fb27SDimitry Andric }
63706c3fb27SDimitry Andric
lowerFormalArguments(MachineIRBuilder & MIRBuilder,const Function & F,ArrayRef<ArrayRef<Register>> VRegs,FunctionLoweringInfo & FLI) const6385ffd83dbSDimitry Andric bool AArch64CallLowering::lowerFormalArguments(
6395ffd83dbSDimitry Andric MachineIRBuilder &MIRBuilder, const Function &F,
640e8d8bef9SDimitry Andric ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const {
6415ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
6425ffd83dbSDimitry Andric MachineBasicBlock &MBB = MIRBuilder.getMBB();
6435ffd83dbSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
644*0fca6ea1SDimitry Andric auto &DL = F.getDataLayout();
64506c3fb27SDimitry Andric auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
6467a6dacacSDimitry Andric
6477a6dacacSDimitry Andric // Arm64EC has extra requirements for varargs calls which are only implemented
6487a6dacacSDimitry Andric // in SelectionDAG; bail out for now.
6497a6dacacSDimitry Andric if (F.isVarArg() && Subtarget.isWindowsArm64EC())
6507a6dacacSDimitry Andric return false;
6517a6dacacSDimitry Andric
6527a6dacacSDimitry Andric // Arm64EC thunks have a special calling convention which is only implemented
6537a6dacacSDimitry Andric // in SelectionDAG; bail out for now.
6547a6dacacSDimitry Andric if (F.getCallingConv() == CallingConv::ARM64EC_Thunk_Native ||
6557a6dacacSDimitry Andric F.getCallingConv() == CallingConv::ARM64EC_Thunk_X64)
6567a6dacacSDimitry Andric return false;
6577a6dacacSDimitry Andric
658*0fca6ea1SDimitry Andric bool IsWin64 =
659*0fca6ea1SDimitry Andric Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg()) &&
660*0fca6ea1SDimitry Andric !Subtarget.isWindowsArm64EC();
6615ffd83dbSDimitry Andric
6625ffd83dbSDimitry Andric SmallVector<ArgInfo, 8> SplitArgs;
663349cc55cSDimitry Andric SmallVector<std::pair<Register, Register>> BoolArgs;
664753f127fSDimitry Andric
665753f127fSDimitry Andric // Insert the hidden sret parameter if the return value won't fit in the
666753f127fSDimitry Andric // return registers.
667753f127fSDimitry Andric if (!FLI.CanLowerReturn)
668753f127fSDimitry Andric insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
669753f127fSDimitry Andric
6705ffd83dbSDimitry Andric unsigned i = 0;
6715ffd83dbSDimitry Andric for (auto &Arg : F.args()) {
6725ffd83dbSDimitry Andric if (DL.getTypeStoreSize(Arg.getType()).isZero())
6735ffd83dbSDimitry Andric continue;
6745ffd83dbSDimitry Andric
675fe6060f1SDimitry Andric ArgInfo OrigArg{VRegs[i], Arg, i};
6765ffd83dbSDimitry Andric setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
6775ffd83dbSDimitry Andric
678349cc55cSDimitry Andric // i1 arguments are zero-extended to i8 by the caller. Emit a
679349cc55cSDimitry Andric // hint to reflect this.
680349cc55cSDimitry Andric if (OrigArg.Ty->isIntegerTy(1)) {
681349cc55cSDimitry Andric assert(OrigArg.Regs.size() == 1 &&
682349cc55cSDimitry Andric MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
683349cc55cSDimitry Andric "Unexpected registers used for i1 arg");
684349cc55cSDimitry Andric
685bdd1243dSDimitry Andric auto &Flags = OrigArg.Flags[0];
686bdd1243dSDimitry Andric if (!Flags.isZExt() && !Flags.isSExt()) {
687349cc55cSDimitry Andric // Lower i1 argument as i8, and insert AssertZExt + Trunc later.
688349cc55cSDimitry Andric Register OrigReg = OrigArg.Regs[0];
689349cc55cSDimitry Andric Register WideReg = MRI.createGenericVirtualRegister(LLT::scalar(8));
690349cc55cSDimitry Andric OrigArg.Regs[0] = WideReg;
691349cc55cSDimitry Andric BoolArgs.push_back({OrigReg, WideReg});
692349cc55cSDimitry Andric }
693349cc55cSDimitry Andric }
694349cc55cSDimitry Andric
695fe6060f1SDimitry Andric if (Arg.hasAttribute(Attribute::SwiftAsync))
696fe6060f1SDimitry Andric MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
697fe6060f1SDimitry Andric
698fe6060f1SDimitry Andric splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv());
6995ffd83dbSDimitry Andric ++i;
7005ffd83dbSDimitry Andric }
7015ffd83dbSDimitry Andric
7025ffd83dbSDimitry Andric if (!MBB.empty())
7035ffd83dbSDimitry Andric MIRBuilder.setInstr(*MBB.begin());
7045ffd83dbSDimitry Andric
7055ffd83dbSDimitry Andric const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
70606c3fb27SDimitry Andric CCAssignFn *AssignFn = TLI.CCAssignFnForCall(F.getCallingConv(), IsWin64 && F.isVarArg());
7075ffd83dbSDimitry Andric
708fe6060f1SDimitry Andric AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
709fe6060f1SDimitry Andric FormalArgHandler Handler(MIRBuilder, MRI);
71006c3fb27SDimitry Andric SmallVector<CCValAssign, 16> ArgLocs;
71106c3fb27SDimitry Andric CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
71206c3fb27SDimitry Andric if (!determineAssignments(Assigner, SplitArgs, CCInfo) ||
71306c3fb27SDimitry Andric !handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, MIRBuilder))
7145ffd83dbSDimitry Andric return false;
7155ffd83dbSDimitry Andric
716349cc55cSDimitry Andric if (!BoolArgs.empty()) {
717349cc55cSDimitry Andric for (auto &KV : BoolArgs) {
718349cc55cSDimitry Andric Register OrigReg = KV.first;
719349cc55cSDimitry Andric Register WideReg = KV.second;
720349cc55cSDimitry Andric LLT WideTy = MRI.getType(WideReg);
721349cc55cSDimitry Andric assert(MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&
722349cc55cSDimitry Andric "Unexpected bit size of a bool arg");
723349cc55cSDimitry Andric MIRBuilder.buildTrunc(
724349cc55cSDimitry Andric OrigReg, MIRBuilder.buildAssertZExt(WideTy, WideReg, 1).getReg(0));
725349cc55cSDimitry Andric }
726349cc55cSDimitry Andric }
727349cc55cSDimitry Andric
7285ffd83dbSDimitry Andric AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
72906c3fb27SDimitry Andric uint64_t StackSize = Assigner.StackSize;
7305ffd83dbSDimitry Andric if (F.isVarArg()) {
73106c3fb27SDimitry Andric if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {
73206c3fb27SDimitry Andric // The AAPCS variadic function ABI is identical to the non-variadic
73306c3fb27SDimitry Andric // one. As a result there may be more arguments in registers and we should
73406c3fb27SDimitry Andric // save them for future reference.
73506c3fb27SDimitry Andric // Win64 variadic functions also pass arguments in registers, but all
73606c3fb27SDimitry Andric // float arguments are passed in integer registers.
73706c3fb27SDimitry Andric saveVarArgRegisters(MIRBuilder, Handler, CCInfo);
73806c3fb27SDimitry Andric } else if (Subtarget.isWindowsArm64EC()) {
7395ffd83dbSDimitry Andric return false;
7405ffd83dbSDimitry Andric }
7415ffd83dbSDimitry Andric
7425ffd83dbSDimitry Andric // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
74306c3fb27SDimitry Andric StackSize = alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);
7445ffd83dbSDimitry Andric
7455ffd83dbSDimitry Andric auto &MFI = MIRBuilder.getMF().getFrameInfo();
74606c3fb27SDimitry Andric FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackSize, true));
7475ffd83dbSDimitry Andric }
7485ffd83dbSDimitry Andric
7495ffd83dbSDimitry Andric if (doesCalleeRestoreStack(F.getCallingConv(),
7505ffd83dbSDimitry Andric MF.getTarget().Options.GuaranteedTailCallOpt)) {
7515ffd83dbSDimitry Andric // We have a non-standard ABI, so why not make full use of the stack that
7525ffd83dbSDimitry Andric // we're going to pop? It must be aligned to 16 B in any case.
75306c3fb27SDimitry Andric StackSize = alignTo(StackSize, 16);
7545ffd83dbSDimitry Andric
7555ffd83dbSDimitry Andric // If we're expected to restore the stack (e.g. fastcc), then we'll be
7565ffd83dbSDimitry Andric // adding a multiple of 16.
75706c3fb27SDimitry Andric FuncInfo->setArgumentStackToRestore(StackSize);
7585ffd83dbSDimitry Andric
7595ffd83dbSDimitry Andric // Our own callers will guarantee that the space is free by giving an
7605ffd83dbSDimitry Andric // aligned value to CALLSEQ_START.
7615ffd83dbSDimitry Andric }
7625ffd83dbSDimitry Andric
7635ffd83dbSDimitry Andric // When we tail call, we need to check if the callee's arguments
7645ffd83dbSDimitry Andric // will fit on the caller's stack. So, whenever we lower formal arguments,
7655ffd83dbSDimitry Andric // we should keep track of this information, since we might lower a tail call
7665ffd83dbSDimitry Andric // in this function later.
76706c3fb27SDimitry Andric FuncInfo->setBytesInStackArgArea(StackSize);
7685ffd83dbSDimitry Andric
7695ffd83dbSDimitry Andric if (Subtarget.hasCustomCallingConv())
7705ffd83dbSDimitry Andric Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
7715ffd83dbSDimitry Andric
7725ffd83dbSDimitry Andric handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
7735ffd83dbSDimitry Andric
7745ffd83dbSDimitry Andric // Move back to the end of the basic block.
7755ffd83dbSDimitry Andric MIRBuilder.setMBB(MBB);
7765ffd83dbSDimitry Andric
7775ffd83dbSDimitry Andric return true;
7785ffd83dbSDimitry Andric }
7795ffd83dbSDimitry Andric
7805ffd83dbSDimitry Andric /// Return true if the calling convention is one that we can guarantee TCO for.
canGuaranteeTCO(CallingConv::ID CC,bool GuaranteeTailCalls)781fe6060f1SDimitry Andric static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
782fe6060f1SDimitry Andric return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
783fe6060f1SDimitry Andric CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
7845ffd83dbSDimitry Andric }
7855ffd83dbSDimitry Andric
7865ffd83dbSDimitry Andric /// Return true if we might ever do TCO for calls with this calling convention.
mayTailCallThisCC(CallingConv::ID CC)7875ffd83dbSDimitry Andric static bool mayTailCallThisCC(CallingConv::ID CC) {
7885ffd83dbSDimitry Andric switch (CC) {
7895ffd83dbSDimitry Andric case CallingConv::C:
7905ffd83dbSDimitry Andric case CallingConv::PreserveMost:
79106c3fb27SDimitry Andric case CallingConv::PreserveAll:
792*0fca6ea1SDimitry Andric case CallingConv::PreserveNone:
7935ffd83dbSDimitry Andric case CallingConv::Swift:
794fe6060f1SDimitry Andric case CallingConv::SwiftTail:
795fe6060f1SDimitry Andric case CallingConv::Tail:
796fe6060f1SDimitry Andric case CallingConv::Fast:
7975ffd83dbSDimitry Andric return true;
7985ffd83dbSDimitry Andric default:
799fe6060f1SDimitry Andric return false;
8005ffd83dbSDimitry Andric }
8015ffd83dbSDimitry Andric }
8025ffd83dbSDimitry Andric
8035ffd83dbSDimitry Andric /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
8045ffd83dbSDimitry Andric /// CC.
8055ffd83dbSDimitry Andric static std::pair<CCAssignFn *, CCAssignFn *>
getAssignFnsForCC(CallingConv::ID CC,const AArch64TargetLowering & TLI)8065ffd83dbSDimitry Andric getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
8075ffd83dbSDimitry Andric return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
8085ffd83dbSDimitry Andric }
8095ffd83dbSDimitry Andric
doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & InArgs) const8105ffd83dbSDimitry Andric bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
8115ffd83dbSDimitry Andric CallLoweringInfo &Info, MachineFunction &MF,
8125ffd83dbSDimitry Andric SmallVectorImpl<ArgInfo> &InArgs) const {
8135ffd83dbSDimitry Andric const Function &CallerF = MF.getFunction();
8145ffd83dbSDimitry Andric CallingConv::ID CalleeCC = Info.CallConv;
8155ffd83dbSDimitry Andric CallingConv::ID CallerCC = CallerF.getCallingConv();
8165ffd83dbSDimitry Andric
8175ffd83dbSDimitry Andric // If the calling conventions match, then everything must be the same.
8185ffd83dbSDimitry Andric if (CalleeCC == CallerCC)
8195ffd83dbSDimitry Andric return true;
8205ffd83dbSDimitry Andric
8215ffd83dbSDimitry Andric // Check if the caller and callee will handle arguments in the same way.
8225ffd83dbSDimitry Andric const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
8235ffd83dbSDimitry Andric CCAssignFn *CalleeAssignFnFixed;
8245ffd83dbSDimitry Andric CCAssignFn *CalleeAssignFnVarArg;
8255ffd83dbSDimitry Andric std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
8265ffd83dbSDimitry Andric getAssignFnsForCC(CalleeCC, TLI);
8275ffd83dbSDimitry Andric
8285ffd83dbSDimitry Andric CCAssignFn *CallerAssignFnFixed;
8295ffd83dbSDimitry Andric CCAssignFn *CallerAssignFnVarArg;
8305ffd83dbSDimitry Andric std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
8315ffd83dbSDimitry Andric getAssignFnsForCC(CallerCC, TLI);
8325ffd83dbSDimitry Andric
833fe6060f1SDimitry Andric AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
834fe6060f1SDimitry Andric CalleeAssignFnVarArg);
835fe6060f1SDimitry Andric AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
836fe6060f1SDimitry Andric CallerAssignFnVarArg);
837fe6060f1SDimitry Andric
838fe6060f1SDimitry Andric if (!resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner))
8395ffd83dbSDimitry Andric return false;
8405ffd83dbSDimitry Andric
8415ffd83dbSDimitry Andric // Make sure that the caller and callee preserve all of the same registers.
8425ffd83dbSDimitry Andric auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
8435ffd83dbSDimitry Andric const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
8445ffd83dbSDimitry Andric const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
8455ffd83dbSDimitry Andric if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
8465ffd83dbSDimitry Andric TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
8475ffd83dbSDimitry Andric TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
8485ffd83dbSDimitry Andric }
8495ffd83dbSDimitry Andric
8505ffd83dbSDimitry Andric return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
8515ffd83dbSDimitry Andric }
8525ffd83dbSDimitry Andric
areCalleeOutgoingArgsTailCallable(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & OrigOutArgs) const8535ffd83dbSDimitry Andric bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
8545ffd83dbSDimitry Andric CallLoweringInfo &Info, MachineFunction &MF,
855b121cb00SDimitry Andric SmallVectorImpl<ArgInfo> &OrigOutArgs) const {
8565ffd83dbSDimitry Andric // If there are no outgoing arguments, then we are done.
857b121cb00SDimitry Andric if (OrigOutArgs.empty())
8585ffd83dbSDimitry Andric return true;
8595ffd83dbSDimitry Andric
8605ffd83dbSDimitry Andric const Function &CallerF = MF.getFunction();
861fe6060f1SDimitry Andric LLVMContext &Ctx = CallerF.getContext();
8625ffd83dbSDimitry Andric CallingConv::ID CalleeCC = Info.CallConv;
8635ffd83dbSDimitry Andric CallingConv::ID CallerCC = CallerF.getCallingConv();
8645ffd83dbSDimitry Andric const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
865fe6060f1SDimitry Andric const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
8665ffd83dbSDimitry Andric
8675ffd83dbSDimitry Andric CCAssignFn *AssignFnFixed;
8685ffd83dbSDimitry Andric CCAssignFn *AssignFnVarArg;
8695ffd83dbSDimitry Andric std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
8705ffd83dbSDimitry Andric
8715ffd83dbSDimitry Andric // We have outgoing arguments. Make sure that we can tail call with them.
8725ffd83dbSDimitry Andric SmallVector<CCValAssign, 16> OutLocs;
873fe6060f1SDimitry Andric CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);
8745ffd83dbSDimitry Andric
875fe6060f1SDimitry Andric AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
876fe6060f1SDimitry Andric Subtarget, /*IsReturn*/ false);
877b121cb00SDimitry Andric // determineAssignments() may modify argument flags, so make a copy.
878b121cb00SDimitry Andric SmallVector<ArgInfo, 8> OutArgs;
879b121cb00SDimitry Andric append_range(OutArgs, OrigOutArgs);
880fe6060f1SDimitry Andric if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) {
8815ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
8825ffd83dbSDimitry Andric return false;
8835ffd83dbSDimitry Andric }
8845ffd83dbSDimitry Andric
8855ffd83dbSDimitry Andric // Make sure that they can fit on the caller's stack.
8865ffd83dbSDimitry Andric const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
88706c3fb27SDimitry Andric if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) {
8885ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
8895ffd83dbSDimitry Andric return false;
8905ffd83dbSDimitry Andric }
8915ffd83dbSDimitry Andric
8925ffd83dbSDimitry Andric // Verify that the parameters in callee-saved registers match.
8935ffd83dbSDimitry Andric // TODO: Port this over to CallLowering as general code once swiftself is
8945ffd83dbSDimitry Andric // supported.
8955ffd83dbSDimitry Andric auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
8965ffd83dbSDimitry Andric const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
8975ffd83dbSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
8985ffd83dbSDimitry Andric
8995ffd83dbSDimitry Andric if (Info.IsVarArg) {
9005ffd83dbSDimitry Andric // Be conservative and disallow variadic memory operands to match SDAG's
9015ffd83dbSDimitry Andric // behaviour.
9025ffd83dbSDimitry Andric // FIXME: If the caller's calling convention is C, then we can
9035ffd83dbSDimitry Andric // potentially use its argument area. However, for cases like fastcc,
9045ffd83dbSDimitry Andric // we can't do anything.
905e8d8bef9SDimitry Andric for (unsigned i = 0; i < OutLocs.size(); ++i) {
906e8d8bef9SDimitry Andric auto &ArgLoc = OutLocs[i];
907e8d8bef9SDimitry Andric if (ArgLoc.isRegLoc())
908e8d8bef9SDimitry Andric continue;
909e8d8bef9SDimitry Andric
9105ffd83dbSDimitry Andric LLVM_DEBUG(
9115ffd83dbSDimitry Andric dbgs()
9125ffd83dbSDimitry Andric << "... Cannot tail call vararg function with stack arguments\n");
9135ffd83dbSDimitry Andric return false;
9145ffd83dbSDimitry Andric }
9155ffd83dbSDimitry Andric }
9165ffd83dbSDimitry Andric
917e8d8bef9SDimitry Andric return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
9185ffd83dbSDimitry Andric }
9195ffd83dbSDimitry Andric
isEligibleForTailCallOptimization(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & InArgs,SmallVectorImpl<ArgInfo> & OutArgs) const9205ffd83dbSDimitry Andric bool AArch64CallLowering::isEligibleForTailCallOptimization(
9215ffd83dbSDimitry Andric MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
9225ffd83dbSDimitry Andric SmallVectorImpl<ArgInfo> &InArgs,
9235ffd83dbSDimitry Andric SmallVectorImpl<ArgInfo> &OutArgs) const {
9245ffd83dbSDimitry Andric
9255ffd83dbSDimitry Andric // Must pass all target-independent checks in order to tail call optimize.
9265ffd83dbSDimitry Andric if (!Info.IsTailCall)
9275ffd83dbSDimitry Andric return false;
9285ffd83dbSDimitry Andric
9295ffd83dbSDimitry Andric CallingConv::ID CalleeCC = Info.CallConv;
9305ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
9315ffd83dbSDimitry Andric const Function &CallerF = MF.getFunction();
9325ffd83dbSDimitry Andric
9335ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");
9345ffd83dbSDimitry Andric
9355ffd83dbSDimitry Andric if (Info.SwiftErrorVReg) {
9365ffd83dbSDimitry Andric // TODO: We should handle this.
9375ffd83dbSDimitry Andric // Note that this is also handled by the check for no outgoing arguments.
9385ffd83dbSDimitry Andric // Proactively disabling this though, because the swifterror handling in
9395ffd83dbSDimitry Andric // lowerCall inserts a COPY *after* the location of the call.
9405ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");
9415ffd83dbSDimitry Andric return false;
9425ffd83dbSDimitry Andric }
9435ffd83dbSDimitry Andric
9445ffd83dbSDimitry Andric if (!mayTailCallThisCC(CalleeCC)) {
9455ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
9465ffd83dbSDimitry Andric return false;
9475ffd83dbSDimitry Andric }
9485ffd83dbSDimitry Andric
9495ffd83dbSDimitry Andric // Byval parameters hand the function a pointer directly into the stack area
9505ffd83dbSDimitry Andric // we want to reuse during a tail call. Working around this *is* possible (see
9515ffd83dbSDimitry Andric // X86).
9525ffd83dbSDimitry Andric //
9535ffd83dbSDimitry Andric // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
9545ffd83dbSDimitry Andric // it?
9555ffd83dbSDimitry Andric //
9565ffd83dbSDimitry Andric // On Windows, "inreg" attributes signify non-aggregate indirect returns.
9575ffd83dbSDimitry Andric // In this case, it is necessary to save/restore X0 in the callee. Tail
9585ffd83dbSDimitry Andric // call opt interferes with this. So we disable tail call opt when the
9595ffd83dbSDimitry Andric // caller has an argument with "inreg" attribute.
9605ffd83dbSDimitry Andric //
9615ffd83dbSDimitry Andric // FIXME: Check whether the callee also has an "inreg" argument.
9625ffd83dbSDimitry Andric //
9635ffd83dbSDimitry Andric // When the caller has a swifterror argument, we don't want to tail call
9645ffd83dbSDimitry Andric // because would have to move into the swifterror register before the
9655ffd83dbSDimitry Andric // tail call.
9665ffd83dbSDimitry Andric if (any_of(CallerF.args(), [](const Argument &A) {
9675ffd83dbSDimitry Andric return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
9685ffd83dbSDimitry Andric })) {
9695ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "
9705ffd83dbSDimitry Andric "inreg, or swifterror arguments\n");
9715ffd83dbSDimitry Andric return false;
9725ffd83dbSDimitry Andric }
9735ffd83dbSDimitry Andric
9745ffd83dbSDimitry Andric // Externally-defined functions with weak linkage should not be
9755ffd83dbSDimitry Andric // tail-called on AArch64 when the OS does not support dynamic
9765ffd83dbSDimitry Andric // pre-emption of symbols, as the AAELF spec requires normal calls
9775ffd83dbSDimitry Andric // to undefined weak functions to be replaced with a NOP or jump to the
9785ffd83dbSDimitry Andric // next instruction. The behaviour of branch instructions in this
9795ffd83dbSDimitry Andric // situation (as used for tail calls) is implementation-defined, so we
9805ffd83dbSDimitry Andric // cannot rely on the linker replacing the tail call with a return.
9815ffd83dbSDimitry Andric if (Info.Callee.isGlobal()) {
9825ffd83dbSDimitry Andric const GlobalValue *GV = Info.Callee.getGlobal();
9835ffd83dbSDimitry Andric const Triple &TT = MF.getTarget().getTargetTriple();
9845ffd83dbSDimitry Andric if (GV->hasExternalWeakLinkage() &&
9855ffd83dbSDimitry Andric (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
9865ffd83dbSDimitry Andric TT.isOSBinFormatMachO())) {
9875ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
9885ffd83dbSDimitry Andric "with weak linkage for this OS.\n");
9895ffd83dbSDimitry Andric return false;
9905ffd83dbSDimitry Andric }
9915ffd83dbSDimitry Andric }
9925ffd83dbSDimitry Andric
9935ffd83dbSDimitry Andric // If we have -tailcallopt, then we're done.
994fe6060f1SDimitry Andric if (canGuaranteeTCO(CalleeCC, MF.getTarget().Options.GuaranteedTailCallOpt))
995fe6060f1SDimitry Andric return CalleeCC == CallerF.getCallingConv();
9965ffd83dbSDimitry Andric
9975ffd83dbSDimitry Andric // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
9985ffd83dbSDimitry Andric // Try to find cases where we can do that.
9995ffd83dbSDimitry Andric
10005ffd83dbSDimitry Andric // I want anyone implementing a new calling convention to think long and hard
10015ffd83dbSDimitry Andric // about this assert.
10025ffd83dbSDimitry Andric assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&
10035ffd83dbSDimitry Andric "Unexpected variadic calling convention");
10045ffd83dbSDimitry Andric
10055ffd83dbSDimitry Andric // Verify that the incoming and outgoing arguments from the callee are
10065ffd83dbSDimitry Andric // safe to tail call.
10075ffd83dbSDimitry Andric if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
10085ffd83dbSDimitry Andric LLVM_DEBUG(
10095ffd83dbSDimitry Andric dbgs()
10105ffd83dbSDimitry Andric << "... Caller and callee have incompatible calling conventions.\n");
10115ffd83dbSDimitry Andric return false;
10125ffd83dbSDimitry Andric }
10135ffd83dbSDimitry Andric
10145ffd83dbSDimitry Andric if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
10155ffd83dbSDimitry Andric return false;
10165ffd83dbSDimitry Andric
10175ffd83dbSDimitry Andric LLVM_DEBUG(
10185ffd83dbSDimitry Andric dbgs() << "... Call is eligible for tail call optimization.\n");
10195ffd83dbSDimitry Andric return true;
10205ffd83dbSDimitry Andric }
10215ffd83dbSDimitry Andric
getCallOpcode(const MachineFunction & CallerF,bool IsIndirect,bool IsTailCall,std::optional<CallLowering::PtrAuthInfo> & PAI,MachineRegisterInfo & MRI)10225ffd83dbSDimitry Andric static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
1023*0fca6ea1SDimitry Andric bool IsTailCall,
1024*0fca6ea1SDimitry Andric std::optional<CallLowering::PtrAuthInfo> &PAI,
1025*0fca6ea1SDimitry Andric MachineRegisterInfo &MRI) {
1026*0fca6ea1SDimitry Andric const AArch64FunctionInfo *FuncInfo = CallerF.getInfo<AArch64FunctionInfo>();
1027*0fca6ea1SDimitry Andric
1028*0fca6ea1SDimitry Andric if (!IsTailCall) {
1029*0fca6ea1SDimitry Andric if (!PAI)
10305ffd83dbSDimitry Andric return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL;
10315ffd83dbSDimitry Andric
1032*0fca6ea1SDimitry Andric assert(IsIndirect && "Direct call should not be authenticated");
1033*0fca6ea1SDimitry Andric assert((PAI->Key == AArch64PACKey::IA || PAI->Key == AArch64PACKey::IB) &&
1034*0fca6ea1SDimitry Andric "Invalid auth call key");
1035*0fca6ea1SDimitry Andric return AArch64::BLRA;
1036*0fca6ea1SDimitry Andric }
1037*0fca6ea1SDimitry Andric
10385ffd83dbSDimitry Andric if (!IsIndirect)
10395ffd83dbSDimitry Andric return AArch64::TCRETURNdi;
10405ffd83dbSDimitry Andric
1041*0fca6ea1SDimitry Andric // When BTI or PAuthLR are enabled, there are restrictions on using x16 and
1042*0fca6ea1SDimitry Andric // x17 to hold the function pointer.
1043*0fca6ea1SDimitry Andric if (FuncInfo->branchTargetEnforcement()) {
1044*0fca6ea1SDimitry Andric if (FuncInfo->branchProtectionPAuthLR()) {
1045*0fca6ea1SDimitry Andric assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");
1046*0fca6ea1SDimitry Andric return AArch64::TCRETURNrix17;
1047*0fca6ea1SDimitry Andric }
1048*0fca6ea1SDimitry Andric if (PAI)
1049*0fca6ea1SDimitry Andric return AArch64::AUTH_TCRETURN_BTI;
1050*0fca6ea1SDimitry Andric return AArch64::TCRETURNrix16x17;
1051*0fca6ea1SDimitry Andric }
10525ffd83dbSDimitry Andric
1053*0fca6ea1SDimitry Andric if (FuncInfo->branchProtectionPAuthLR()) {
1054*0fca6ea1SDimitry Andric assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");
1055*0fca6ea1SDimitry Andric return AArch64::TCRETURNrinotx16;
1056*0fca6ea1SDimitry Andric }
1057*0fca6ea1SDimitry Andric
1058*0fca6ea1SDimitry Andric if (PAI)
1059*0fca6ea1SDimitry Andric return AArch64::AUTH_TCRETURN;
10605ffd83dbSDimitry Andric return AArch64::TCRETURNri;
10615ffd83dbSDimitry Andric }
10625ffd83dbSDimitry Andric
1063fe6060f1SDimitry Andric static const uint32_t *
getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> & OutArgs,AArch64CallLowering::CallLoweringInfo & Info,const AArch64RegisterInfo & TRI,MachineFunction & MF)1064fe6060f1SDimitry Andric getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> &OutArgs,
1065fe6060f1SDimitry Andric AArch64CallLowering::CallLoweringInfo &Info,
1066fe6060f1SDimitry Andric const AArch64RegisterInfo &TRI, MachineFunction &MF) {
1067fe6060f1SDimitry Andric const uint32_t *Mask;
1068fe6060f1SDimitry Andric if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {
1069fe6060f1SDimitry Andric // For 'this' returns, use the X0-preserving mask if applicable
1070fe6060f1SDimitry Andric Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);
1071fe6060f1SDimitry Andric if (!Mask) {
1072fe6060f1SDimitry Andric OutArgs[0].Flags[0].setReturned(false);
1073fe6060f1SDimitry Andric Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1074fe6060f1SDimitry Andric }
1075fe6060f1SDimitry Andric } else {
1076fe6060f1SDimitry Andric Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1077fe6060f1SDimitry Andric }
1078fe6060f1SDimitry Andric return Mask;
1079fe6060f1SDimitry Andric }
1080fe6060f1SDimitry Andric
lowerTailCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & OutArgs) const10815ffd83dbSDimitry Andric bool AArch64CallLowering::lowerTailCall(
10825ffd83dbSDimitry Andric MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
10835ffd83dbSDimitry Andric SmallVectorImpl<ArgInfo> &OutArgs) const {
10845ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
10855ffd83dbSDimitry Andric const Function &F = MF.getFunction();
10865ffd83dbSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
10875ffd83dbSDimitry Andric const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
10885ffd83dbSDimitry Andric AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
10895ffd83dbSDimitry Andric
10905ffd83dbSDimitry Andric // True when we're tail calling, but without -tailcallopt.
1091fe6060f1SDimitry Andric bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt &&
1092fe6060f1SDimitry Andric Info.CallConv != CallingConv::Tail &&
1093fe6060f1SDimitry Andric Info.CallConv != CallingConv::SwiftTail;
10945ffd83dbSDimitry Andric
10955ffd83dbSDimitry Andric // Find out which ABI gets to decide where things go.
10965ffd83dbSDimitry Andric CallingConv::ID CalleeCC = Info.CallConv;
10975ffd83dbSDimitry Andric CCAssignFn *AssignFnFixed;
10985ffd83dbSDimitry Andric CCAssignFn *AssignFnVarArg;
10995ffd83dbSDimitry Andric std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
11005ffd83dbSDimitry Andric
11015ffd83dbSDimitry Andric MachineInstrBuilder CallSeqStart;
11025ffd83dbSDimitry Andric if (!IsSibCall)
11035ffd83dbSDimitry Andric CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
11045ffd83dbSDimitry Andric
1105*0fca6ea1SDimitry Andric unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true, Info.PAI, MRI);
11065ffd83dbSDimitry Andric auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
11075ffd83dbSDimitry Andric MIB.add(Info.Callee);
11085ffd83dbSDimitry Andric
1109*0fca6ea1SDimitry Andric // Tell the call which registers are clobbered.
1110*0fca6ea1SDimitry Andric const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1111*0fca6ea1SDimitry Andric auto TRI = Subtarget.getRegisterInfo();
1112*0fca6ea1SDimitry Andric
11135ffd83dbSDimitry Andric // Byte offset for the tail call. When we are sibcalling, this will always
11145ffd83dbSDimitry Andric // be 0.
11155ffd83dbSDimitry Andric MIB.addImm(0);
11165ffd83dbSDimitry Andric
1117*0fca6ea1SDimitry Andric // Authenticated tail calls always take key/discriminator arguments.
1118*0fca6ea1SDimitry Andric if (Opc == AArch64::AUTH_TCRETURN || Opc == AArch64::AUTH_TCRETURN_BTI) {
1119*0fca6ea1SDimitry Andric assert((Info.PAI->Key == AArch64PACKey::IA ||
1120*0fca6ea1SDimitry Andric Info.PAI->Key == AArch64PACKey::IB) &&
1121*0fca6ea1SDimitry Andric "Invalid auth call key");
1122*0fca6ea1SDimitry Andric MIB.addImm(Info.PAI->Key);
1123*0fca6ea1SDimitry Andric
1124*0fca6ea1SDimitry Andric Register AddrDisc = 0;
1125*0fca6ea1SDimitry Andric uint16_t IntDisc = 0;
1126*0fca6ea1SDimitry Andric std::tie(IntDisc, AddrDisc) =
1127*0fca6ea1SDimitry Andric extractPtrauthBlendDiscriminators(Info.PAI->Discriminator, MRI);
1128*0fca6ea1SDimitry Andric
1129*0fca6ea1SDimitry Andric MIB.addImm(IntDisc);
1130*0fca6ea1SDimitry Andric MIB.addUse(AddrDisc);
1131*0fca6ea1SDimitry Andric if (AddrDisc != AArch64::NoRegister) {
1132*0fca6ea1SDimitry Andric MIB->getOperand(4).setReg(constrainOperandRegClass(
1133*0fca6ea1SDimitry Andric MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1134*0fca6ea1SDimitry Andric *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(),
1135*0fca6ea1SDimitry Andric MIB->getOperand(4), 4));
1136*0fca6ea1SDimitry Andric }
1137*0fca6ea1SDimitry Andric }
1138*0fca6ea1SDimitry Andric
11395ffd83dbSDimitry Andric // Tell the call which registers are clobbered.
11405ffd83dbSDimitry Andric const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
1141fe6060f1SDimitry Andric if (Subtarget.hasCustomCallingConv())
11425ffd83dbSDimitry Andric TRI->UpdateCustomCallPreservedMask(MF, &Mask);
11435ffd83dbSDimitry Andric MIB.addRegMask(Mask);
11445ffd83dbSDimitry Andric
1145bdd1243dSDimitry Andric if (Info.CFIType)
1146bdd1243dSDimitry Andric MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1147bdd1243dSDimitry Andric
11485ffd83dbSDimitry Andric if (TRI->isAnyArgRegReserved(MF))
11495ffd83dbSDimitry Andric TRI->emitReservedArgRegCallError(MF);
11505ffd83dbSDimitry Andric
11515ffd83dbSDimitry Andric // FPDiff is the byte offset of the call's argument area from the callee's.
11525ffd83dbSDimitry Andric // Stores to callee stack arguments will be placed in FixedStackSlots offset
11535ffd83dbSDimitry Andric // by this amount for a tail call. In a sibling call it must be 0 because the
11545ffd83dbSDimitry Andric // caller will deallocate the entire stack and the callee still expects its
11555ffd83dbSDimitry Andric // arguments to begin at SP+0.
11565ffd83dbSDimitry Andric int FPDiff = 0;
11575ffd83dbSDimitry Andric
11585ffd83dbSDimitry Andric // This will be 0 for sibcalls, potentially nonzero for tail calls produced
11595ffd83dbSDimitry Andric // by -tailcallopt. For sibcalls, the memory operands for the call are
11605ffd83dbSDimitry Andric // already available in the caller's incoming argument space.
11615ffd83dbSDimitry Andric unsigned NumBytes = 0;
11625ffd83dbSDimitry Andric if (!IsSibCall) {
11635ffd83dbSDimitry Andric // We aren't sibcalling, so we need to compute FPDiff. We need to do this
11645ffd83dbSDimitry Andric // before handling assignments, because FPDiff must be known for memory
11655ffd83dbSDimitry Andric // arguments.
11665ffd83dbSDimitry Andric unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
11675ffd83dbSDimitry Andric SmallVector<CCValAssign, 16> OutLocs;
11685ffd83dbSDimitry Andric CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
1169fe6060f1SDimitry Andric
1170fe6060f1SDimitry Andric AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
1171fe6060f1SDimitry Andric Subtarget, /*IsReturn*/ false);
1172fe6060f1SDimitry Andric if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
1173fe6060f1SDimitry Andric return false;
11745ffd83dbSDimitry Andric
11755ffd83dbSDimitry Andric // The callee will pop the argument stack as a tail call. Thus, we must
11765ffd83dbSDimitry Andric // keep it 16-byte aligned.
117706c3fb27SDimitry Andric NumBytes = alignTo(OutInfo.getStackSize(), 16);
11785ffd83dbSDimitry Andric
11795ffd83dbSDimitry Andric // FPDiff will be negative if this tail call requires more space than we
11805ffd83dbSDimitry Andric // would automatically have in our incoming argument space. Positive if we
11815ffd83dbSDimitry Andric // actually shrink the stack.
11825ffd83dbSDimitry Andric FPDiff = NumReusableBytes - NumBytes;
11835ffd83dbSDimitry Andric
1184fe6060f1SDimitry Andric // Update the required reserved area if this is the tail call requiring the
1185fe6060f1SDimitry Andric // most argument stack space.
1186fe6060f1SDimitry Andric if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
1187fe6060f1SDimitry Andric FuncInfo->setTailCallReservedStack(-FPDiff);
1188fe6060f1SDimitry Andric
11895ffd83dbSDimitry Andric // The stack pointer must be 16-byte aligned at all times it's used for a
11905ffd83dbSDimitry Andric // memory operation, which in practice means at *all* times and in
11915ffd83dbSDimitry Andric // particular across call boundaries. Therefore our own arguments started at
11925ffd83dbSDimitry Andric // a 16-byte aligned SP and the delta applied for the tail call should
11935ffd83dbSDimitry Andric // satisfy the same constraint.
11945ffd83dbSDimitry Andric assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
11955ffd83dbSDimitry Andric }
11965ffd83dbSDimitry Andric
11975ffd83dbSDimitry Andric const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
11985ffd83dbSDimitry Andric
1199fe6060f1SDimitry Andric AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1200fe6060f1SDimitry Andric Subtarget, /*IsReturn*/ false);
1201fe6060f1SDimitry Andric
12025ffd83dbSDimitry Andric // Do the actual argument marshalling.
1203fe6060f1SDimitry Andric OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,
1204fe6060f1SDimitry Andric /*IsTailCall*/ true, FPDiff);
1205fe6060f1SDimitry Andric if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1206fe6060f1SDimitry Andric CalleeCC, Info.IsVarArg))
12075ffd83dbSDimitry Andric return false;
12085ffd83dbSDimitry Andric
1209fe6060f1SDimitry Andric Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1210fe6060f1SDimitry Andric
12115ffd83dbSDimitry Andric if (Info.IsVarArg && Info.IsMustTailCall) {
12125ffd83dbSDimitry Andric // Now we know what's being passed to the function. Add uses to the call for
12135ffd83dbSDimitry Andric // the forwarded registers that we *aren't* passing as parameters. This will
12145ffd83dbSDimitry Andric // preserve the copies we build earlier.
12155ffd83dbSDimitry Andric for (const auto &F : Forwards) {
12165ffd83dbSDimitry Andric Register ForwardedReg = F.PReg;
12175ffd83dbSDimitry Andric // If the register is already passed, or aliases a register which is
12185ffd83dbSDimitry Andric // already being passed, then skip it.
12195ffd83dbSDimitry Andric if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
12205ffd83dbSDimitry Andric if (!Use.isReg())
12215ffd83dbSDimitry Andric return false;
12225ffd83dbSDimitry Andric return TRI->regsOverlap(Use.getReg(), ForwardedReg);
12235ffd83dbSDimitry Andric }))
12245ffd83dbSDimitry Andric continue;
12255ffd83dbSDimitry Andric
12265ffd83dbSDimitry Andric // We aren't passing it already, so we should add it to the call.
12275ffd83dbSDimitry Andric MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
12285ffd83dbSDimitry Andric MIB.addReg(ForwardedReg, RegState::Implicit);
12295ffd83dbSDimitry Andric }
12305ffd83dbSDimitry Andric }
12315ffd83dbSDimitry Andric
12325ffd83dbSDimitry Andric // If we have -tailcallopt, we need to adjust the stack. We'll do the call
12335ffd83dbSDimitry Andric // sequence start and end here.
12345ffd83dbSDimitry Andric if (!IsSibCall) {
12355ffd83dbSDimitry Andric MIB->getOperand(1).setImm(FPDiff);
1236fe6060f1SDimitry Andric CallSeqStart.addImm(0).addImm(0);
12375ffd83dbSDimitry Andric // End the call sequence *before* emitting the call. Normally, we would
12385ffd83dbSDimitry Andric // tidy the frame up after the call. However, here, we've laid out the
12395ffd83dbSDimitry Andric // parameters so that when SP is reset, they will be in the correct
12405ffd83dbSDimitry Andric // location.
1241fe6060f1SDimitry Andric MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(0).addImm(0);
12425ffd83dbSDimitry Andric }
12435ffd83dbSDimitry Andric
12445ffd83dbSDimitry Andric // Now we can add the actual call instruction to the correct basic block.
12455ffd83dbSDimitry Andric MIRBuilder.insertInstr(MIB);
12465ffd83dbSDimitry Andric
12475ffd83dbSDimitry Andric // If Callee is a reg, since it is used by a target specific instruction,
12485ffd83dbSDimitry Andric // it must have a register class matching the constraint of that instruction.
124981ad6265SDimitry Andric if (MIB->getOperand(0).isReg())
1250e8d8bef9SDimitry Andric constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1251e8d8bef9SDimitry Andric *MF.getSubtarget().getRegBankInfo(), *MIB,
125281ad6265SDimitry Andric MIB->getDesc(), MIB->getOperand(0), 0);
12535ffd83dbSDimitry Andric
12545ffd83dbSDimitry Andric MF.getFrameInfo().setHasTailCall();
12555ffd83dbSDimitry Andric Info.LoweredTailCall = true;
12565ffd83dbSDimitry Andric return true;
12575ffd83dbSDimitry Andric }
12585ffd83dbSDimitry Andric
lowerCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info) const12595ffd83dbSDimitry Andric bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
12605ffd83dbSDimitry Andric CallLoweringInfo &Info) const {
12615ffd83dbSDimitry Andric MachineFunction &MF = MIRBuilder.getMF();
12625ffd83dbSDimitry Andric const Function &F = MF.getFunction();
12635ffd83dbSDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo();
1264*0fca6ea1SDimitry Andric auto &DL = F.getDataLayout();
12655ffd83dbSDimitry Andric const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
1266bdd1243dSDimitry Andric const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1267bdd1243dSDimitry Andric
1268bdd1243dSDimitry Andric // Arm64EC has extra requirements for varargs calls; bail out for now.
12697a6dacacSDimitry Andric //
12707a6dacacSDimitry Andric // Arm64EC has special mangling rules for calls; bail out on all calls for
12717a6dacacSDimitry Andric // now.
12727a6dacacSDimitry Andric if (Subtarget.isWindowsArm64EC())
12737a6dacacSDimitry Andric return false;
12747a6dacacSDimitry Andric
12757a6dacacSDimitry Andric // Arm64EC thunks have a special calling convention which is only implemented
12767a6dacacSDimitry Andric // in SelectionDAG; bail out for now.
12777a6dacacSDimitry Andric if (Info.CallConv == CallingConv::ARM64EC_Thunk_Native ||
12787a6dacacSDimitry Andric Info.CallConv == CallingConv::ARM64EC_Thunk_X64)
1279bdd1243dSDimitry Andric return false;
12805ffd83dbSDimitry Andric
12815ffd83dbSDimitry Andric SmallVector<ArgInfo, 8> OutArgs;
12825ffd83dbSDimitry Andric for (auto &OrigArg : Info.OrigArgs) {
1283fe6060f1SDimitry Andric splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
12845ffd83dbSDimitry Andric // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
1285bdd1243dSDimitry Andric auto &Flags = OrigArg.Flags[0];
1286bdd1243dSDimitry Andric if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
1287349cc55cSDimitry Andric ArgInfo &OutArg = OutArgs.back();
1288349cc55cSDimitry Andric assert(OutArg.Regs.size() == 1 &&
1289349cc55cSDimitry Andric MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 &&
1290349cc55cSDimitry Andric "Unexpected registers used for i1 arg");
1291349cc55cSDimitry Andric
1292349cc55cSDimitry Andric // We cannot use a ZExt ArgInfo flag here, because it will
1293349cc55cSDimitry Andric // zero-extend the argument to i32 instead of just i8.
1294349cc55cSDimitry Andric OutArg.Regs[0] =
1295349cc55cSDimitry Andric MIRBuilder.buildZExt(LLT::scalar(8), OutArg.Regs[0]).getReg(0);
1296349cc55cSDimitry Andric LLVMContext &Ctx = MF.getFunction().getContext();
1297349cc55cSDimitry Andric OutArg.Ty = Type::getInt8Ty(Ctx);
1298349cc55cSDimitry Andric }
12995ffd83dbSDimitry Andric }
13005ffd83dbSDimitry Andric
13015ffd83dbSDimitry Andric SmallVector<ArgInfo, 8> InArgs;
13025ffd83dbSDimitry Andric if (!Info.OrigRet.Ty->isVoidTy())
1303fe6060f1SDimitry Andric splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
13045ffd83dbSDimitry Andric
13055ffd83dbSDimitry Andric // If we can lower as a tail call, do that instead.
13065ffd83dbSDimitry Andric bool CanTailCallOpt =
13075ffd83dbSDimitry Andric isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
13085ffd83dbSDimitry Andric
13095ffd83dbSDimitry Andric // We must emit a tail call if we have musttail.
13105ffd83dbSDimitry Andric if (Info.IsMustTailCall && !CanTailCallOpt) {
13115ffd83dbSDimitry Andric // There are types of incoming/outgoing arguments we can't handle yet, so
13125ffd83dbSDimitry Andric // it doesn't make sense to actually die here like in ISelLowering. Instead,
13135ffd83dbSDimitry Andric // fall back to SelectionDAG and let it try to handle this.
13145ffd83dbSDimitry Andric LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
13155ffd83dbSDimitry Andric return false;
13165ffd83dbSDimitry Andric }
13175ffd83dbSDimitry Andric
131804eeddc0SDimitry Andric Info.IsTailCall = CanTailCallOpt;
13195ffd83dbSDimitry Andric if (CanTailCallOpt)
13205ffd83dbSDimitry Andric return lowerTailCall(MIRBuilder, Info, OutArgs);
13215ffd83dbSDimitry Andric
13225ffd83dbSDimitry Andric // Find out which ABI gets to decide where things go.
13235ffd83dbSDimitry Andric CCAssignFn *AssignFnFixed;
13245ffd83dbSDimitry Andric CCAssignFn *AssignFnVarArg;
13255ffd83dbSDimitry Andric std::tie(AssignFnFixed, AssignFnVarArg) =
13265ffd83dbSDimitry Andric getAssignFnsForCC(Info.CallConv, TLI);
13275ffd83dbSDimitry Andric
13285ffd83dbSDimitry Andric MachineInstrBuilder CallSeqStart;
13295ffd83dbSDimitry Andric CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
13305ffd83dbSDimitry Andric
13315ffd83dbSDimitry Andric // Create a temporarily-floating call instruction so we can add the implicit
13325ffd83dbSDimitry Andric // uses of arg registers.
133381ad6265SDimitry Andric
13343a9a9c0cSDimitry Andric unsigned Opc = 0;
133581ad6265SDimitry Andric // Calls with operand bundle "clang.arc.attachedcall" are special. They should
133681ad6265SDimitry Andric // be expanded to the call, directly followed by a special marker sequence and
133781ad6265SDimitry Andric // a call to an ObjC library function.
133881ad6265SDimitry Andric if (Info.CB && objcarc::hasAttachedCallOpBundle(Info.CB))
1339*0fca6ea1SDimitry Andric Opc = Info.PAI ? AArch64::BLRA_RVMARKER : AArch64::BLR_RVMARKER;
13403a9a9c0cSDimitry Andric // A call to a returns twice function like setjmp must be followed by a bti
13413a9a9c0cSDimitry Andric // instruction.
134206c3fb27SDimitry Andric else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&
13433a9a9c0cSDimitry Andric !Subtarget.noBTIAtReturnTwice() &&
13443a9a9c0cSDimitry Andric MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
13453a9a9c0cSDimitry Andric Opc = AArch64::BLR_BTI;
1346*0fca6ea1SDimitry Andric else {
1347*0fca6ea1SDimitry Andric // For an intrinsic call (e.g. memset), use GOT if "RtLibUseGOT" (-fno-plt)
1348*0fca6ea1SDimitry Andric // is set.
1349*0fca6ea1SDimitry Andric if (Info.Callee.isSymbol() && F.getParent()->getRtLibUseGOT()) {
1350*0fca6ea1SDimitry Andric auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_GLOBAL_VALUE);
1351*0fca6ea1SDimitry Andric DstOp(getLLTForType(*F.getType(), DL)).addDefToMIB(MRI, MIB);
1352*0fca6ea1SDimitry Andric MIB.addExternalSymbol(Info.Callee.getSymbolName(), AArch64II::MO_GOT);
1353*0fca6ea1SDimitry Andric Info.Callee = MachineOperand::CreateReg(MIB.getReg(0), false);
1354*0fca6ea1SDimitry Andric }
1355*0fca6ea1SDimitry Andric Opc = getCallOpcode(MF, Info.Callee.isReg(), false, Info.PAI, MRI);
1356*0fca6ea1SDimitry Andric }
13575ffd83dbSDimitry Andric
13585ffd83dbSDimitry Andric auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
135981ad6265SDimitry Andric unsigned CalleeOpNo = 0;
136081ad6265SDimitry Andric
1361*0fca6ea1SDimitry Andric if (Opc == AArch64::BLR_RVMARKER || Opc == AArch64::BLRA_RVMARKER) {
136281ad6265SDimitry Andric // Add a target global address for the retainRV/claimRV runtime function
136381ad6265SDimitry Andric // just before the call target.
136481ad6265SDimitry Andric Function *ARCFn = *objcarc::getAttachedARCFunction(Info.CB);
136581ad6265SDimitry Andric MIB.addGlobalAddress(ARCFn);
136681ad6265SDimitry Andric ++CalleeOpNo;
1367bdd1243dSDimitry Andric } else if (Info.CFIType) {
1368bdd1243dSDimitry Andric MIB->setCFIType(MF, Info.CFIType->getZExtValue());
136981ad6265SDimitry Andric }
137081ad6265SDimitry Andric
13715ffd83dbSDimitry Andric MIB.add(Info.Callee);
13725ffd83dbSDimitry Andric
13735ffd83dbSDimitry Andric // Tell the call which registers are clobbered.
1374fe6060f1SDimitry Andric const uint32_t *Mask;
1375fe6060f1SDimitry Andric const auto *TRI = Subtarget.getRegisterInfo();
1376fe6060f1SDimitry Andric
1377fe6060f1SDimitry Andric AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1378fe6060f1SDimitry Andric Subtarget, /*IsReturn*/ false);
1379fe6060f1SDimitry Andric // Do the actual argument marshalling.
1380fe6060f1SDimitry Andric OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, /*IsReturn*/ false);
1381fe6060f1SDimitry Andric if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1382fe6060f1SDimitry Andric Info.CallConv, Info.IsVarArg))
1383fe6060f1SDimitry Andric return false;
1384fe6060f1SDimitry Andric
1385fe6060f1SDimitry Andric Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1386fe6060f1SDimitry Andric
1387*0fca6ea1SDimitry Andric if (Opc == AArch64::BLRA || Opc == AArch64::BLRA_RVMARKER) {
1388*0fca6ea1SDimitry Andric assert((Info.PAI->Key == AArch64PACKey::IA ||
1389*0fca6ea1SDimitry Andric Info.PAI->Key == AArch64PACKey::IB) &&
1390*0fca6ea1SDimitry Andric "Invalid auth call key");
1391*0fca6ea1SDimitry Andric MIB.addImm(Info.PAI->Key);
1392*0fca6ea1SDimitry Andric
1393*0fca6ea1SDimitry Andric Register AddrDisc = 0;
1394*0fca6ea1SDimitry Andric uint16_t IntDisc = 0;
1395*0fca6ea1SDimitry Andric std::tie(IntDisc, AddrDisc) =
1396*0fca6ea1SDimitry Andric extractPtrauthBlendDiscriminators(Info.PAI->Discriminator, MRI);
1397*0fca6ea1SDimitry Andric
1398*0fca6ea1SDimitry Andric MIB.addImm(IntDisc);
1399*0fca6ea1SDimitry Andric MIB.addUse(AddrDisc);
1400*0fca6ea1SDimitry Andric if (AddrDisc != AArch64::NoRegister) {
1401*0fca6ea1SDimitry Andric constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1402*0fca6ea1SDimitry Andric *MF.getSubtarget().getRegBankInfo(), *MIB,
1403*0fca6ea1SDimitry Andric MIB->getDesc(), MIB->getOperand(CalleeOpNo + 3),
1404*0fca6ea1SDimitry Andric CalleeOpNo + 3);
1405*0fca6ea1SDimitry Andric }
1406*0fca6ea1SDimitry Andric }
1407*0fca6ea1SDimitry Andric
1408*0fca6ea1SDimitry Andric // Tell the call which registers are clobbered.
14095ffd83dbSDimitry Andric if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
14105ffd83dbSDimitry Andric TRI->UpdateCustomCallPreservedMask(MF, &Mask);
14115ffd83dbSDimitry Andric MIB.addRegMask(Mask);
14125ffd83dbSDimitry Andric
14135ffd83dbSDimitry Andric if (TRI->isAnyArgRegReserved(MF))
14145ffd83dbSDimitry Andric TRI->emitReservedArgRegCallError(MF);
14155ffd83dbSDimitry Andric
14165ffd83dbSDimitry Andric // Now we can add the actual call instruction to the correct basic block.
14175ffd83dbSDimitry Andric MIRBuilder.insertInstr(MIB);
14185ffd83dbSDimitry Andric
141906c3fb27SDimitry Andric uint64_t CalleePopBytes =
142006c3fb27SDimitry Andric doesCalleeRestoreStack(Info.CallConv,
142106c3fb27SDimitry Andric MF.getTarget().Options.GuaranteedTailCallOpt)
142206c3fb27SDimitry Andric ? alignTo(Assigner.StackSize, 16)
142306c3fb27SDimitry Andric : 0;
142406c3fb27SDimitry Andric
142506c3fb27SDimitry Andric CallSeqStart.addImm(Assigner.StackSize).addImm(0);
142606c3fb27SDimitry Andric MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
142706c3fb27SDimitry Andric .addImm(Assigner.StackSize)
142806c3fb27SDimitry Andric .addImm(CalleePopBytes);
142906c3fb27SDimitry Andric
14305ffd83dbSDimitry Andric // If Callee is a reg, since it is used by a target specific
14315ffd83dbSDimitry Andric // instruction, it must have a register class matching the
14325ffd83dbSDimitry Andric // constraint of that instruction.
143381ad6265SDimitry Andric if (MIB->getOperand(CalleeOpNo).isReg())
1434fe6060f1SDimitry Andric constrainOperandRegClass(MF, *TRI, MRI, *Subtarget.getInstrInfo(),
1435fe6060f1SDimitry Andric *Subtarget.getRegBankInfo(), *MIB, MIB->getDesc(),
143681ad6265SDimitry Andric MIB->getOperand(CalleeOpNo), CalleeOpNo);
14375ffd83dbSDimitry Andric
14385ffd83dbSDimitry Andric // Finally we can copy the returned value back into its virtual-register. In
14395ffd83dbSDimitry Andric // symmetry with the arguments, the physical register must be an
14405ffd83dbSDimitry Andric // implicit-define of the call instruction.
1441753f127fSDimitry Andric if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
14425ffd83dbSDimitry Andric CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1443fe6060f1SDimitry Andric CallReturnHandler Handler(MIRBuilder, MRI, MIB);
1444fe6060f1SDimitry Andric bool UsingReturnedArg =
1445fe6060f1SDimitry Andric !OutArgs.empty() && OutArgs[0].Flags[0].isReturned();
1446fe6060f1SDimitry Andric
1447fe6060f1SDimitry Andric AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
1448fe6060f1SDimitry Andric /*IsReturn*/ false);
1449fe6060f1SDimitry Andric ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);
1450fe6060f1SDimitry Andric if (!determineAndHandleAssignments(
1451fe6060f1SDimitry Andric UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
1452fe6060f1SDimitry Andric MIRBuilder, Info.CallConv, Info.IsVarArg,
1453bdd1243dSDimitry Andric UsingReturnedArg ? ArrayRef(OutArgs[0].Regs) : std::nullopt))
14545ffd83dbSDimitry Andric return false;
14555ffd83dbSDimitry Andric }
14565ffd83dbSDimitry Andric
14575ffd83dbSDimitry Andric if (Info.SwiftErrorVReg) {
14585ffd83dbSDimitry Andric MIB.addDef(AArch64::X21, RegState::Implicit);
14595ffd83dbSDimitry Andric MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
14605ffd83dbSDimitry Andric }
14615ffd83dbSDimitry Andric
1462753f127fSDimitry Andric if (!Info.CanLowerReturn) {
1463753f127fSDimitry Andric insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1464753f127fSDimitry Andric Info.DemoteRegister, Info.DemoteStackIndex);
1465753f127fSDimitry Andric }
14665ffd83dbSDimitry Andric return true;
14675ffd83dbSDimitry Andric }
1468fe6060f1SDimitry Andric
isTypeIsValidForThisReturn(EVT Ty) const1469fe6060f1SDimitry Andric bool AArch64CallLowering::isTypeIsValidForThisReturn(EVT Ty) const {
1470fe6060f1SDimitry Andric return Ty.getSizeInBits() == 64;
1471fe6060f1SDimitry Andric }
1472