xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1*0b57cec5SDimitry Andric //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2*0b57cec5SDimitry Andric //
3*0b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*0b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*0b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*0b57cec5SDimitry Andric //
7*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
8*0b57cec5SDimitry Andric //
9*0b57cec5SDimitry Andric // This file contains the AArch64 implementation of the TargetRegisterInfo
10*0b57cec5SDimitry Andric // class.
11*0b57cec5SDimitry Andric //
12*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
13*0b57cec5SDimitry Andric 
14*0b57cec5SDimitry Andric #include "AArch64RegisterInfo.h"
15*0b57cec5SDimitry Andric #include "AArch64FrameLowering.h"
16*0b57cec5SDimitry Andric #include "AArch64InstrInfo.h"
17*0b57cec5SDimitry Andric #include "AArch64MachineFunctionInfo.h"
18*0b57cec5SDimitry Andric #include "AArch64Subtarget.h"
19*0b57cec5SDimitry Andric #include "MCTargetDesc/AArch64AddressingModes.h"
20*0b57cec5SDimitry Andric #include "llvm/ADT/BitVector.h"
21*0b57cec5SDimitry Andric #include "llvm/ADT/Triple.h"
22*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h"
23*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstrBuilder.h"
24*0b57cec5SDimitry Andric #include "llvm/CodeGen/MachineRegisterInfo.h"
25*0b57cec5SDimitry Andric #include "llvm/CodeGen/RegisterScavenging.h"
26*0b57cec5SDimitry Andric #include "llvm/IR/Function.h"
27*0b57cec5SDimitry Andric #include "llvm/IR/DiagnosticInfo.h"
28*0b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
29*0b57cec5SDimitry Andric #include "llvm/CodeGen/TargetFrameLowering.h"
30*0b57cec5SDimitry Andric #include "llvm/Target/TargetOptions.h"
31*0b57cec5SDimitry Andric 
32*0b57cec5SDimitry Andric using namespace llvm;
33*0b57cec5SDimitry Andric 
34*0b57cec5SDimitry Andric #define GET_REGINFO_TARGET_DESC
35*0b57cec5SDimitry Andric #include "AArch64GenRegisterInfo.inc"
36*0b57cec5SDimitry Andric 
37*0b57cec5SDimitry Andric AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
38*0b57cec5SDimitry Andric     : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
39*0b57cec5SDimitry Andric   AArch64_MC::initLLVMToCVRegMapping(this);
40*0b57cec5SDimitry Andric }
41*0b57cec5SDimitry Andric 
42*0b57cec5SDimitry Andric const MCPhysReg *
43*0b57cec5SDimitry Andric AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
44*0b57cec5SDimitry Andric   assert(MF && "Invalid MachineFunction pointer.");
45*0b57cec5SDimitry Andric   if (MF->getSubtarget<AArch64Subtarget>().isTargetWindows())
46*0b57cec5SDimitry Andric     return CSR_Win_AArch64_AAPCS_SaveList;
47*0b57cec5SDimitry Andric   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
48*0b57cec5SDimitry Andric     // GHC set of callee saved regs is empty as all those regs are
49*0b57cec5SDimitry Andric     // used for passing STG regs around
50*0b57cec5SDimitry Andric     return CSR_AArch64_NoRegs_SaveList;
51*0b57cec5SDimitry Andric   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
52*0b57cec5SDimitry Andric     return CSR_AArch64_AllRegs_SaveList;
53*0b57cec5SDimitry Andric   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
54*0b57cec5SDimitry Andric     return CSR_AArch64_AAVPCS_SaveList;
55*0b57cec5SDimitry Andric   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
56*0b57cec5SDimitry Andric     return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ?
57*0b57cec5SDimitry Andric            CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
58*0b57cec5SDimitry Andric            CSR_AArch64_CXX_TLS_Darwin_SaveList;
59*0b57cec5SDimitry Andric   if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
60*0b57cec5SDimitry Andric           ->supportSwiftError() &&
61*0b57cec5SDimitry Andric       MF->getFunction().getAttributes().hasAttrSomewhere(
62*0b57cec5SDimitry Andric           Attribute::SwiftError))
63*0b57cec5SDimitry Andric     return CSR_AArch64_AAPCS_SwiftError_SaveList;
64*0b57cec5SDimitry Andric   if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
65*0b57cec5SDimitry Andric     return CSR_AArch64_RT_MostRegs_SaveList;
66*0b57cec5SDimitry Andric   else
67*0b57cec5SDimitry Andric     return CSR_AArch64_AAPCS_SaveList;
68*0b57cec5SDimitry Andric }
69*0b57cec5SDimitry Andric 
70*0b57cec5SDimitry Andric const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
71*0b57cec5SDimitry Andric     const MachineFunction *MF) const {
72*0b57cec5SDimitry Andric   assert(MF && "Invalid MachineFunction pointer.");
73*0b57cec5SDimitry Andric   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
74*0b57cec5SDimitry Andric       MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
75*0b57cec5SDimitry Andric     return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList;
76*0b57cec5SDimitry Andric   return nullptr;
77*0b57cec5SDimitry Andric }
78*0b57cec5SDimitry Andric 
79*0b57cec5SDimitry Andric void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs(
80*0b57cec5SDimitry Andric     MachineFunction &MF) const {
81*0b57cec5SDimitry Andric   const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
82*0b57cec5SDimitry Andric   SmallVector<MCPhysReg, 32> UpdatedCSRs;
83*0b57cec5SDimitry Andric   for (const MCPhysReg *I = CSRs; *I; ++I)
84*0b57cec5SDimitry Andric     UpdatedCSRs.push_back(*I);
85*0b57cec5SDimitry Andric 
86*0b57cec5SDimitry Andric   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
87*0b57cec5SDimitry Andric     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
88*0b57cec5SDimitry Andric       UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
89*0b57cec5SDimitry Andric     }
90*0b57cec5SDimitry Andric   }
91*0b57cec5SDimitry Andric   // Register lists are zero-terminated.
92*0b57cec5SDimitry Andric   UpdatedCSRs.push_back(0);
93*0b57cec5SDimitry Andric   MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
94*0b57cec5SDimitry Andric }
95*0b57cec5SDimitry Andric 
96*0b57cec5SDimitry Andric const TargetRegisterClass *
97*0b57cec5SDimitry Andric AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
98*0b57cec5SDimitry Andric                                        unsigned Idx) const {
99*0b57cec5SDimitry Andric   // edge case for GPR/FPR register classes
100*0b57cec5SDimitry Andric   if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
101*0b57cec5SDimitry Andric     return &AArch64::FPR32RegClass;
102*0b57cec5SDimitry Andric   else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
103*0b57cec5SDimitry Andric     return &AArch64::FPR64RegClass;
104*0b57cec5SDimitry Andric 
105*0b57cec5SDimitry Andric   // Forward to TableGen's default version.
106*0b57cec5SDimitry Andric   return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
107*0b57cec5SDimitry Andric }
108*0b57cec5SDimitry Andric 
109*0b57cec5SDimitry Andric const uint32_t *
110*0b57cec5SDimitry Andric AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
111*0b57cec5SDimitry Andric                                           CallingConv::ID CC) const {
112*0b57cec5SDimitry Andric   bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
113*0b57cec5SDimitry Andric   if (CC == CallingConv::GHC)
114*0b57cec5SDimitry Andric     // This is academic because all GHC calls are (supposed to be) tail calls
115*0b57cec5SDimitry Andric     return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
116*0b57cec5SDimitry Andric   if (CC == CallingConv::AnyReg)
117*0b57cec5SDimitry Andric     return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
118*0b57cec5SDimitry Andric   if (CC == CallingConv::CXX_FAST_TLS)
119*0b57cec5SDimitry Andric     return SCS ? CSR_AArch64_CXX_TLS_Darwin_SCS_RegMask
120*0b57cec5SDimitry Andric                : CSR_AArch64_CXX_TLS_Darwin_RegMask;
121*0b57cec5SDimitry Andric   if (CC == CallingConv::AArch64_VectorCall)
122*0b57cec5SDimitry Andric     return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
123*0b57cec5SDimitry Andric   if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
124*0b57cec5SDimitry Andric           ->supportSwiftError() &&
125*0b57cec5SDimitry Andric       MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
126*0b57cec5SDimitry Andric     return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
127*0b57cec5SDimitry Andric                : CSR_AArch64_AAPCS_SwiftError_RegMask;
128*0b57cec5SDimitry Andric   if (CC == CallingConv::PreserveMost)
129*0b57cec5SDimitry Andric     return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
130*0b57cec5SDimitry Andric                : CSR_AArch64_RT_MostRegs_RegMask;
131*0b57cec5SDimitry Andric   else
132*0b57cec5SDimitry Andric     return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
133*0b57cec5SDimitry Andric }
134*0b57cec5SDimitry Andric 
135*0b57cec5SDimitry Andric const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
136*0b57cec5SDimitry Andric   if (TT.isOSDarwin())
137*0b57cec5SDimitry Andric     return CSR_AArch64_TLS_Darwin_RegMask;
138*0b57cec5SDimitry Andric 
139*0b57cec5SDimitry Andric   assert(TT.isOSBinFormatELF() && "Invalid target");
140*0b57cec5SDimitry Andric   return CSR_AArch64_TLS_ELF_RegMask;
141*0b57cec5SDimitry Andric }
142*0b57cec5SDimitry Andric 
143*0b57cec5SDimitry Andric void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF,
144*0b57cec5SDimitry Andric                                                  const uint32_t **Mask) const {
145*0b57cec5SDimitry Andric   uint32_t *UpdatedMask = MF.allocateRegMask();
146*0b57cec5SDimitry Andric   unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
147*0b57cec5SDimitry Andric   memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
148*0b57cec5SDimitry Andric 
149*0b57cec5SDimitry Andric   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
150*0b57cec5SDimitry Andric     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
151*0b57cec5SDimitry Andric       for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
152*0b57cec5SDimitry Andric                                    this, true);
153*0b57cec5SDimitry Andric            SubReg.isValid(); ++SubReg) {
154*0b57cec5SDimitry Andric         // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
155*0b57cec5SDimitry Andric         // register mask.
156*0b57cec5SDimitry Andric         UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
157*0b57cec5SDimitry Andric       }
158*0b57cec5SDimitry Andric     }
159*0b57cec5SDimitry Andric   }
160*0b57cec5SDimitry Andric   *Mask = UpdatedMask;
161*0b57cec5SDimitry Andric }
162*0b57cec5SDimitry Andric 
163*0b57cec5SDimitry Andric const uint32_t *AArch64RegisterInfo::getNoPreservedMask() const {
164*0b57cec5SDimitry Andric   return CSR_AArch64_NoRegs_RegMask;
165*0b57cec5SDimitry Andric }
166*0b57cec5SDimitry Andric 
167*0b57cec5SDimitry Andric const uint32_t *
168*0b57cec5SDimitry Andric AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
169*0b57cec5SDimitry Andric                                                 CallingConv::ID CC) const {
170*0b57cec5SDimitry Andric   // This should return a register mask that is the same as that returned by
171*0b57cec5SDimitry Andric   // getCallPreservedMask but that additionally preserves the register used for
172*0b57cec5SDimitry Andric   // the first i64 argument (which must also be the register used to return a
173*0b57cec5SDimitry Andric   // single i64 return value)
174*0b57cec5SDimitry Andric   //
175*0b57cec5SDimitry Andric   // In case that the calling convention does not use the same register for
176*0b57cec5SDimitry Andric   // both, the function should return NULL (does not currently apply)
177*0b57cec5SDimitry Andric   assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
178*0b57cec5SDimitry Andric   return CSR_AArch64_AAPCS_ThisReturn_RegMask;
179*0b57cec5SDimitry Andric }
180*0b57cec5SDimitry Andric 
181*0b57cec5SDimitry Andric const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
182*0b57cec5SDimitry Andric   return CSR_AArch64_StackProbe_Windows_RegMask;
183*0b57cec5SDimitry Andric }
184*0b57cec5SDimitry Andric 
185*0b57cec5SDimitry Andric BitVector
186*0b57cec5SDimitry Andric AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
187*0b57cec5SDimitry Andric   const AArch64FrameLowering *TFI = getFrameLowering(MF);
188*0b57cec5SDimitry Andric 
189*0b57cec5SDimitry Andric   // FIXME: avoid re-calculating this every time.
190*0b57cec5SDimitry Andric   BitVector Reserved(getNumRegs());
191*0b57cec5SDimitry Andric   markSuperRegs(Reserved, AArch64::WSP);
192*0b57cec5SDimitry Andric   markSuperRegs(Reserved, AArch64::WZR);
193*0b57cec5SDimitry Andric 
194*0b57cec5SDimitry Andric   if (TFI->hasFP(MF) || TT.isOSDarwin())
195*0b57cec5SDimitry Andric     markSuperRegs(Reserved, AArch64::W29);
196*0b57cec5SDimitry Andric 
197*0b57cec5SDimitry Andric   for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
198*0b57cec5SDimitry Andric     if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
199*0b57cec5SDimitry Andric       markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
200*0b57cec5SDimitry Andric   }
201*0b57cec5SDimitry Andric 
202*0b57cec5SDimitry Andric   if (hasBasePointer(MF))
203*0b57cec5SDimitry Andric     markSuperRegs(Reserved, AArch64::W19);
204*0b57cec5SDimitry Andric 
205*0b57cec5SDimitry Andric   // SLH uses register W16/X16 as the taint register.
206*0b57cec5SDimitry Andric   if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
207*0b57cec5SDimitry Andric     markSuperRegs(Reserved, AArch64::W16);
208*0b57cec5SDimitry Andric 
209*0b57cec5SDimitry Andric   assert(checkAllSuperRegsMarked(Reserved));
210*0b57cec5SDimitry Andric   return Reserved;
211*0b57cec5SDimitry Andric }
212*0b57cec5SDimitry Andric 
213*0b57cec5SDimitry Andric bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
214*0b57cec5SDimitry Andric                                       unsigned Reg) const {
215*0b57cec5SDimitry Andric   return getReservedRegs(MF)[Reg];
216*0b57cec5SDimitry Andric }
217*0b57cec5SDimitry Andric 
218*0b57cec5SDimitry Andric bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const {
219*0b57cec5SDimitry Andric   return std::any_of(std::begin(*AArch64::GPR64argRegClass.MC),
220*0b57cec5SDimitry Andric                      std::end(*AArch64::GPR64argRegClass.MC),
221*0b57cec5SDimitry Andric                      [this, &MF](MCPhysReg r){return isReservedReg(MF, r);});
222*0b57cec5SDimitry Andric }
223*0b57cec5SDimitry Andric 
224*0b57cec5SDimitry Andric void AArch64RegisterInfo::emitReservedArgRegCallError(
225*0b57cec5SDimitry Andric     const MachineFunction &MF) const {
226*0b57cec5SDimitry Andric   const Function &F = MF.getFunction();
227*0b57cec5SDimitry Andric   F.getContext().diagnose(DiagnosticInfoUnsupported{F, "AArch64 doesn't support"
228*0b57cec5SDimitry Andric     " function calls if any of the argument registers is reserved."});
229*0b57cec5SDimitry Andric }
230*0b57cec5SDimitry Andric 
231*0b57cec5SDimitry Andric bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
232*0b57cec5SDimitry Andric                                           unsigned PhysReg) const {
233*0b57cec5SDimitry Andric   return !isReservedReg(MF, PhysReg);
234*0b57cec5SDimitry Andric }
235*0b57cec5SDimitry Andric 
236*0b57cec5SDimitry Andric bool AArch64RegisterInfo::isConstantPhysReg(unsigned PhysReg) const {
237*0b57cec5SDimitry Andric   return PhysReg == AArch64::WZR || PhysReg == AArch64::XZR;
238*0b57cec5SDimitry Andric }
239*0b57cec5SDimitry Andric 
240*0b57cec5SDimitry Andric const TargetRegisterClass *
241*0b57cec5SDimitry Andric AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
242*0b57cec5SDimitry Andric                                       unsigned Kind) const {
243*0b57cec5SDimitry Andric   return &AArch64::GPR64spRegClass;
244*0b57cec5SDimitry Andric }
245*0b57cec5SDimitry Andric 
246*0b57cec5SDimitry Andric const TargetRegisterClass *
247*0b57cec5SDimitry Andric AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
248*0b57cec5SDimitry Andric   if (RC == &AArch64::CCRRegClass)
249*0b57cec5SDimitry Andric     return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
250*0b57cec5SDimitry Andric   return RC;
251*0b57cec5SDimitry Andric }
252*0b57cec5SDimitry Andric 
253*0b57cec5SDimitry Andric unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
254*0b57cec5SDimitry Andric 
255*0b57cec5SDimitry Andric bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
256*0b57cec5SDimitry Andric   const MachineFrameInfo &MFI = MF.getFrameInfo();
257*0b57cec5SDimitry Andric 
258*0b57cec5SDimitry Andric   // In the presence of variable sized objects or funclets, if the fixed stack
259*0b57cec5SDimitry Andric   // size is large enough that referencing from the FP won't result in things
260*0b57cec5SDimitry Andric   // being in range relatively often, we can use a base pointer to allow access
261*0b57cec5SDimitry Andric   // from the other direction like the SP normally works.
262*0b57cec5SDimitry Andric   //
263*0b57cec5SDimitry Andric   // Furthermore, if both variable sized objects are present, and the
264*0b57cec5SDimitry Andric   // stack needs to be dynamically re-aligned, the base pointer is the only
265*0b57cec5SDimitry Andric   // reliable way to reference the locals.
266*0b57cec5SDimitry Andric   if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
267*0b57cec5SDimitry Andric     if (needsStackRealignment(MF))
268*0b57cec5SDimitry Andric       return true;
269*0b57cec5SDimitry Andric     // Conservatively estimate whether the negative offset from the frame
270*0b57cec5SDimitry Andric     // pointer will be sufficient to reach. If a function has a smallish
271*0b57cec5SDimitry Andric     // frame, it's less likely to have lots of spills and callee saved
272*0b57cec5SDimitry Andric     // space, so it's all more likely to be within range of the frame pointer.
273*0b57cec5SDimitry Andric     // If it's wrong, we'll materialize the constant and still get to the
274*0b57cec5SDimitry Andric     // object; it's just suboptimal. Negative offsets use the unscaled
275*0b57cec5SDimitry Andric     // load/store instructions, which have a 9-bit signed immediate.
276*0b57cec5SDimitry Andric     return MFI.getLocalFrameSize() >= 256;
277*0b57cec5SDimitry Andric   }
278*0b57cec5SDimitry Andric 
279*0b57cec5SDimitry Andric   return false;
280*0b57cec5SDimitry Andric }
281*0b57cec5SDimitry Andric 
282*0b57cec5SDimitry Andric Register
283*0b57cec5SDimitry Andric AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
284*0b57cec5SDimitry Andric   const AArch64FrameLowering *TFI = getFrameLowering(MF);
285*0b57cec5SDimitry Andric   return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
286*0b57cec5SDimitry Andric }
287*0b57cec5SDimitry Andric 
288*0b57cec5SDimitry Andric bool AArch64RegisterInfo::requiresRegisterScavenging(
289*0b57cec5SDimitry Andric     const MachineFunction &MF) const {
290*0b57cec5SDimitry Andric   return true;
291*0b57cec5SDimitry Andric }
292*0b57cec5SDimitry Andric 
293*0b57cec5SDimitry Andric bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
294*0b57cec5SDimitry Andric     const MachineFunction &MF) const {
295*0b57cec5SDimitry Andric   return true;
296*0b57cec5SDimitry Andric }
297*0b57cec5SDimitry Andric 
298*0b57cec5SDimitry Andric bool
299*0b57cec5SDimitry Andric AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
300*0b57cec5SDimitry Andric   // This function indicates whether the emergency spillslot should be placed
301*0b57cec5SDimitry Andric   // close to the beginning of the stackframe (closer to FP) or the end
302*0b57cec5SDimitry Andric   // (closer to SP).
303*0b57cec5SDimitry Andric   //
304*0b57cec5SDimitry Andric   // The beginning works most reliably if we have a frame pointer.
305*0b57cec5SDimitry Andric   const AArch64FrameLowering &TFI = *getFrameLowering(MF);
306*0b57cec5SDimitry Andric   return TFI.hasFP(MF);
307*0b57cec5SDimitry Andric }
308*0b57cec5SDimitry Andric 
309*0b57cec5SDimitry Andric bool AArch64RegisterInfo::requiresFrameIndexScavenging(
310*0b57cec5SDimitry Andric     const MachineFunction &MF) const {
311*0b57cec5SDimitry Andric   return true;
312*0b57cec5SDimitry Andric }
313*0b57cec5SDimitry Andric 
314*0b57cec5SDimitry Andric bool
315*0b57cec5SDimitry Andric AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
316*0b57cec5SDimitry Andric   const MachineFrameInfo &MFI = MF.getFrameInfo();
317*0b57cec5SDimitry Andric   if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack())
318*0b57cec5SDimitry Andric     return true;
319*0b57cec5SDimitry Andric   return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
320*0b57cec5SDimitry Andric }
321*0b57cec5SDimitry Andric 
322*0b57cec5SDimitry Andric /// needsFrameBaseReg - Returns true if the instruction's frame index
323*0b57cec5SDimitry Andric /// reference would be better served by a base register other than FP
324*0b57cec5SDimitry Andric /// or SP. Used by LocalStackFrameAllocation to determine which frame index
325*0b57cec5SDimitry Andric /// references it should create new base registers for.
326*0b57cec5SDimitry Andric bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
327*0b57cec5SDimitry Andric                                             int64_t Offset) const {
328*0b57cec5SDimitry Andric   for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
329*0b57cec5SDimitry Andric     assert(i < MI->getNumOperands() &&
330*0b57cec5SDimitry Andric            "Instr doesn't have FrameIndex operand!");
331*0b57cec5SDimitry Andric 
332*0b57cec5SDimitry Andric   // It's the load/store FI references that cause issues, as it can be difficult
333*0b57cec5SDimitry Andric   // to materialize the offset if it won't fit in the literal field. Estimate
334*0b57cec5SDimitry Andric   // based on the size of the local frame and some conservative assumptions
335*0b57cec5SDimitry Andric   // about the rest of the stack frame (note, this is pre-regalloc, so
336*0b57cec5SDimitry Andric   // we don't know everything for certain yet) whether this offset is likely
337*0b57cec5SDimitry Andric   // to be out of range of the immediate. Return true if so.
338*0b57cec5SDimitry Andric 
339*0b57cec5SDimitry Andric   // We only generate virtual base registers for loads and stores, so
340*0b57cec5SDimitry Andric   // return false for everything else.
341*0b57cec5SDimitry Andric   if (!MI->mayLoad() && !MI->mayStore())
342*0b57cec5SDimitry Andric     return false;
343*0b57cec5SDimitry Andric 
344*0b57cec5SDimitry Andric   // Without a virtual base register, if the function has variable sized
345*0b57cec5SDimitry Andric   // objects, all fixed-size local references will be via the frame pointer,
346*0b57cec5SDimitry Andric   // Approximate the offset and see if it's legal for the instruction.
347*0b57cec5SDimitry Andric   // Note that the incoming offset is based on the SP value at function entry,
348*0b57cec5SDimitry Andric   // so it'll be negative.
349*0b57cec5SDimitry Andric   MachineFunction &MF = *MI->getParent()->getParent();
350*0b57cec5SDimitry Andric   const AArch64FrameLowering *TFI = getFrameLowering(MF);
351*0b57cec5SDimitry Andric   MachineFrameInfo &MFI = MF.getFrameInfo();
352*0b57cec5SDimitry Andric 
353*0b57cec5SDimitry Andric   // Estimate an offset from the frame pointer.
354*0b57cec5SDimitry Andric   // Conservatively assume all GPR callee-saved registers get pushed.
355*0b57cec5SDimitry Andric   // FP, LR, X19-X28, D8-D15. 64-bits each.
356*0b57cec5SDimitry Andric   int64_t FPOffset = Offset - 16 * 20;
357*0b57cec5SDimitry Andric   // Estimate an offset from the stack pointer.
358*0b57cec5SDimitry Andric   // The incoming offset is relating to the SP at the start of the function,
359*0b57cec5SDimitry Andric   // but when we access the local it'll be relative to the SP after local
360*0b57cec5SDimitry Andric   // allocation, so adjust our SP-relative offset by that allocation size.
361*0b57cec5SDimitry Andric   Offset += MFI.getLocalFrameSize();
362*0b57cec5SDimitry Andric   // Assume that we'll have at least some spill slots allocated.
363*0b57cec5SDimitry Andric   // FIXME: This is a total SWAG number. We should run some statistics
364*0b57cec5SDimitry Andric   //        and pick a real one.
365*0b57cec5SDimitry Andric   Offset += 128; // 128 bytes of spill slots
366*0b57cec5SDimitry Andric 
367*0b57cec5SDimitry Andric   // If there is a frame pointer, try using it.
368*0b57cec5SDimitry Andric   // The FP is only available if there is no dynamic realignment. We
369*0b57cec5SDimitry Andric   // don't know for sure yet whether we'll need that, so we guess based
370*0b57cec5SDimitry Andric   // on whether there are any local variables that would trigger it.
371*0b57cec5SDimitry Andric   if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
372*0b57cec5SDimitry Andric     return false;
373*0b57cec5SDimitry Andric 
374*0b57cec5SDimitry Andric   // If we can reference via the stack pointer or base pointer, try that.
375*0b57cec5SDimitry Andric   // FIXME: This (and the code that resolves the references) can be improved
376*0b57cec5SDimitry Andric   //        to only disallow SP relative references in the live range of
377*0b57cec5SDimitry Andric   //        the VLA(s). In practice, it's unclear how much difference that
378*0b57cec5SDimitry Andric   //        would make, but it may be worth doing.
379*0b57cec5SDimitry Andric   if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
380*0b57cec5SDimitry Andric     return false;
381*0b57cec5SDimitry Andric 
382*0b57cec5SDimitry Andric   // The offset likely isn't legal; we want to allocate a virtual base register.
383*0b57cec5SDimitry Andric   return true;
384*0b57cec5SDimitry Andric }
385*0b57cec5SDimitry Andric 
386*0b57cec5SDimitry Andric bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
387*0b57cec5SDimitry Andric                                              unsigned BaseReg,
388*0b57cec5SDimitry Andric                                              int64_t Offset) const {
389*0b57cec5SDimitry Andric   assert(Offset <= INT_MAX && "Offset too big to fit in int.");
390*0b57cec5SDimitry Andric   assert(MI && "Unable to get the legal offset for nil instruction.");
391*0b57cec5SDimitry Andric   int SaveOffset = Offset;
392*0b57cec5SDimitry Andric   return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
393*0b57cec5SDimitry Andric }
394*0b57cec5SDimitry Andric 
395*0b57cec5SDimitry Andric /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
396*0b57cec5SDimitry Andric /// at the beginning of the basic block.
397*0b57cec5SDimitry Andric void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
398*0b57cec5SDimitry Andric                                                        unsigned BaseReg,
399*0b57cec5SDimitry Andric                                                        int FrameIdx,
400*0b57cec5SDimitry Andric                                                        int64_t Offset) const {
401*0b57cec5SDimitry Andric   MachineBasicBlock::iterator Ins = MBB->begin();
402*0b57cec5SDimitry Andric   DebugLoc DL; // Defaults to "unknown"
403*0b57cec5SDimitry Andric   if (Ins != MBB->end())
404*0b57cec5SDimitry Andric     DL = Ins->getDebugLoc();
405*0b57cec5SDimitry Andric   const MachineFunction &MF = *MBB->getParent();
406*0b57cec5SDimitry Andric   const AArch64InstrInfo *TII =
407*0b57cec5SDimitry Andric       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
408*0b57cec5SDimitry Andric   const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
409*0b57cec5SDimitry Andric   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
410*0b57cec5SDimitry Andric   MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
411*0b57cec5SDimitry Andric   unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
412*0b57cec5SDimitry Andric 
413*0b57cec5SDimitry Andric   BuildMI(*MBB, Ins, DL, MCID, BaseReg)
414*0b57cec5SDimitry Andric       .addFrameIndex(FrameIdx)
415*0b57cec5SDimitry Andric       .addImm(Offset)
416*0b57cec5SDimitry Andric       .addImm(Shifter);
417*0b57cec5SDimitry Andric }
418*0b57cec5SDimitry Andric 
419*0b57cec5SDimitry Andric void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
420*0b57cec5SDimitry Andric                                             int64_t Offset) const {
421*0b57cec5SDimitry Andric   int Off = Offset; // ARM doesn't need the general 64-bit offsets
422*0b57cec5SDimitry Andric   unsigned i = 0;
423*0b57cec5SDimitry Andric 
424*0b57cec5SDimitry Andric   while (!MI.getOperand(i).isFI()) {
425*0b57cec5SDimitry Andric     ++i;
426*0b57cec5SDimitry Andric     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
427*0b57cec5SDimitry Andric   }
428*0b57cec5SDimitry Andric   const MachineFunction *MF = MI.getParent()->getParent();
429*0b57cec5SDimitry Andric   const AArch64InstrInfo *TII =
430*0b57cec5SDimitry Andric       MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
431*0b57cec5SDimitry Andric   bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
432*0b57cec5SDimitry Andric   assert(Done && "Unable to resolve frame index!");
433*0b57cec5SDimitry Andric   (void)Done;
434*0b57cec5SDimitry Andric }
435*0b57cec5SDimitry Andric 
436*0b57cec5SDimitry Andric void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
437*0b57cec5SDimitry Andric                                               int SPAdj, unsigned FIOperandNum,
438*0b57cec5SDimitry Andric                                               RegScavenger *RS) const {
439*0b57cec5SDimitry Andric   assert(SPAdj == 0 && "Unexpected");
440*0b57cec5SDimitry Andric 
441*0b57cec5SDimitry Andric   MachineInstr &MI = *II;
442*0b57cec5SDimitry Andric   MachineBasicBlock &MBB = *MI.getParent();
443*0b57cec5SDimitry Andric   MachineFunction &MF = *MBB.getParent();
444*0b57cec5SDimitry Andric   const AArch64InstrInfo *TII =
445*0b57cec5SDimitry Andric       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
446*0b57cec5SDimitry Andric   const AArch64FrameLowering *TFI = getFrameLowering(MF);
447*0b57cec5SDimitry Andric 
448*0b57cec5SDimitry Andric   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
449*0b57cec5SDimitry Andric   unsigned FrameReg;
450*0b57cec5SDimitry Andric   int Offset;
451*0b57cec5SDimitry Andric 
452*0b57cec5SDimitry Andric   // Special handling of dbg_value, stackmap and patchpoint instructions.
453*0b57cec5SDimitry Andric   if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP ||
454*0b57cec5SDimitry Andric       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
455*0b57cec5SDimitry Andric     Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
456*0b57cec5SDimitry Andric                                              /*PreferFP=*/true,
457*0b57cec5SDimitry Andric                                              /*ForSimm=*/false);
458*0b57cec5SDimitry Andric     Offset += MI.getOperand(FIOperandNum + 1).getImm();
459*0b57cec5SDimitry Andric     MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
460*0b57cec5SDimitry Andric     MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
461*0b57cec5SDimitry Andric     return;
462*0b57cec5SDimitry Andric   }
463*0b57cec5SDimitry Andric 
464*0b57cec5SDimitry Andric   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
465*0b57cec5SDimitry Andric     MachineOperand &FI = MI.getOperand(FIOperandNum);
466*0b57cec5SDimitry Andric     Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
467*0b57cec5SDimitry Andric     FI.ChangeToImmediate(Offset);
468*0b57cec5SDimitry Andric     return;
469*0b57cec5SDimitry Andric   }
470*0b57cec5SDimitry Andric 
471*0b57cec5SDimitry Andric   if (MI.getOpcode() == AArch64::TAGPstack) {
472*0b57cec5SDimitry Andric     // TAGPstack must use the virtual frame register in its 3rd operand.
473*0b57cec5SDimitry Andric     const MachineFrameInfo &MFI = MF.getFrameInfo();
474*0b57cec5SDimitry Andric     const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
475*0b57cec5SDimitry Andric     FrameReg = MI.getOperand(3).getReg();
476*0b57cec5SDimitry Andric     Offset =
477*0b57cec5SDimitry Andric         MFI.getObjectOffset(FrameIndex) + AFI->getTaggedBasePointerOffset();
478*0b57cec5SDimitry Andric   } else {
479*0b57cec5SDimitry Andric     Offset = TFI->resolveFrameIndexReference(
480*0b57cec5SDimitry Andric         MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
481*0b57cec5SDimitry Andric   }
482*0b57cec5SDimitry Andric 
483*0b57cec5SDimitry Andric   // Modify MI as necessary to handle as much of 'Offset' as possible
484*0b57cec5SDimitry Andric   if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
485*0b57cec5SDimitry Andric     return;
486*0b57cec5SDimitry Andric 
487*0b57cec5SDimitry Andric   assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
488*0b57cec5SDimitry Andric          "Emergency spill slot is out of reach");
489*0b57cec5SDimitry Andric 
490*0b57cec5SDimitry Andric   // If we get here, the immediate doesn't fit into the instruction.  We folded
491*0b57cec5SDimitry Andric   // as much as possible above.  Handle the rest, providing a register that is
492*0b57cec5SDimitry Andric   // SP+LargeImm.
493*0b57cec5SDimitry Andric   unsigned ScratchReg =
494*0b57cec5SDimitry Andric       MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
495*0b57cec5SDimitry Andric   emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
496*0b57cec5SDimitry Andric   MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
497*0b57cec5SDimitry Andric }
498*0b57cec5SDimitry Andric 
499*0b57cec5SDimitry Andric unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
500*0b57cec5SDimitry Andric                                                   MachineFunction &MF) const {
501*0b57cec5SDimitry Andric   const AArch64FrameLowering *TFI = getFrameLowering(MF);
502*0b57cec5SDimitry Andric 
503*0b57cec5SDimitry Andric   switch (RC->getID()) {
504*0b57cec5SDimitry Andric   default:
505*0b57cec5SDimitry Andric     return 0;
506*0b57cec5SDimitry Andric   case AArch64::GPR32RegClassID:
507*0b57cec5SDimitry Andric   case AArch64::GPR32spRegClassID:
508*0b57cec5SDimitry Andric   case AArch64::GPR32allRegClassID:
509*0b57cec5SDimitry Andric   case AArch64::GPR64spRegClassID:
510*0b57cec5SDimitry Andric   case AArch64::GPR64allRegClassID:
511*0b57cec5SDimitry Andric   case AArch64::GPR64RegClassID:
512*0b57cec5SDimitry Andric   case AArch64::GPR32commonRegClassID:
513*0b57cec5SDimitry Andric   case AArch64::GPR64commonRegClassID:
514*0b57cec5SDimitry Andric     return 32 - 1                                   // XZR/SP
515*0b57cec5SDimitry Andric               - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
516*0b57cec5SDimitry Andric               - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
517*0b57cec5SDimitry Andric               - hasBasePointer(MF);  // X19
518*0b57cec5SDimitry Andric   case AArch64::FPR8RegClassID:
519*0b57cec5SDimitry Andric   case AArch64::FPR16RegClassID:
520*0b57cec5SDimitry Andric   case AArch64::FPR32RegClassID:
521*0b57cec5SDimitry Andric   case AArch64::FPR64RegClassID:
522*0b57cec5SDimitry Andric   case AArch64::FPR128RegClassID:
523*0b57cec5SDimitry Andric     return 32;
524*0b57cec5SDimitry Andric 
525*0b57cec5SDimitry Andric   case AArch64::DDRegClassID:
526*0b57cec5SDimitry Andric   case AArch64::DDDRegClassID:
527*0b57cec5SDimitry Andric   case AArch64::DDDDRegClassID:
528*0b57cec5SDimitry Andric   case AArch64::QQRegClassID:
529*0b57cec5SDimitry Andric   case AArch64::QQQRegClassID:
530*0b57cec5SDimitry Andric   case AArch64::QQQQRegClassID:
531*0b57cec5SDimitry Andric     return 32;
532*0b57cec5SDimitry Andric 
533*0b57cec5SDimitry Andric   case AArch64::FPR128_loRegClassID:
534*0b57cec5SDimitry Andric     return 16;
535*0b57cec5SDimitry Andric   }
536*0b57cec5SDimitry Andric }
537*0b57cec5SDimitry Andric 
538*0b57cec5SDimitry Andric unsigned AArch64RegisterInfo::getLocalAddressRegister(
539*0b57cec5SDimitry Andric   const MachineFunction &MF) const {
540*0b57cec5SDimitry Andric   const auto &MFI = MF.getFrameInfo();
541*0b57cec5SDimitry Andric   if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
542*0b57cec5SDimitry Andric     return AArch64::SP;
543*0b57cec5SDimitry Andric   else if (needsStackRealignment(MF))
544*0b57cec5SDimitry Andric     return getBaseRegister();
545*0b57cec5SDimitry Andric   return getFrameRegister(MF);
546*0b57cec5SDimitry Andric }
547