xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64RegisterInfo.h"
15 #include "AArch64FrameLowering.h"
16 #include "AArch64InstrInfo.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64StackOffset.h"
19 #include "AArch64Subtarget.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/CodeGen/TargetFrameLowering.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetOptions.h"
32 
33 using namespace llvm;
34 
35 #define GET_REGINFO_TARGET_DESC
36 #include "AArch64GenRegisterInfo.inc"
37 
38 AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
39     : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
40   AArch64_MC::initLLVMToCVRegMapping(this);
41 }
42 
43 const MCPhysReg *
44 AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
45   assert(MF && "Invalid MachineFunction pointer.");
46 
47   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
48     // GHC set of callee saved regs is empty as all those regs are
49     // used for passing STG regs around
50     return CSR_AArch64_NoRegs_SaveList;
51   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
52     return CSR_AArch64_AllRegs_SaveList;
53 
54   // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
55   // lists depending on that will need to have their Darwin variant as well.
56   if (MF->getSubtarget<AArch64Subtarget>().isTargetDarwin())
57     return getDarwinCalleeSavedRegs(MF);
58 
59   if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
60     return CSR_Win_AArch64_CFGuard_Check_SaveList;
61   if (MF->getSubtarget<AArch64Subtarget>().isTargetWindows())
62     return CSR_Win_AArch64_AAPCS_SaveList;
63   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
64     return CSR_AArch64_AAVPCS_SaveList;
65   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
66     return CSR_AArch64_SVE_AAPCS_SaveList;
67   if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
68           ->supportSwiftError() &&
69       MF->getFunction().getAttributes().hasAttrSomewhere(
70           Attribute::SwiftError))
71     return CSR_AArch64_AAPCS_SwiftError_SaveList;
72   if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
73     return CSR_AArch64_RT_MostRegs_SaveList;
74   if (MF->getFunction().getCallingConv() == CallingConv::Win64)
75     // This is for OSes other than Windows; Windows is a separate case further
76     // above.
77     return CSR_AArch64_AAPCS_X18_SaveList;
78   return CSR_AArch64_AAPCS_SaveList;
79 }
80 
81 const MCPhysReg *
82 AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const {
83   assert(MF && "Invalid MachineFunction pointer.");
84   assert(MF->getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
85          "Invalid subtarget for getDarwinCalleeSavedRegs");
86 
87   if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
88     report_fatal_error(
89         "Calling convention CFGuard_Check is unsupported on Darwin.");
90   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
91     return CSR_Darwin_AArch64_AAVPCS_SaveList;
92   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
93     report_fatal_error(
94         "Calling convention SVE_VectorCall is unsupported on Darwin.");
95   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
96     return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
97                ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
98                : CSR_Darwin_AArch64_CXX_TLS_SaveList;
99   if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
100           ->supportSwiftError() &&
101       MF->getFunction().getAttributes().hasAttrSomewhere(
102           Attribute::SwiftError))
103     return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
104   if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
105     return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
106   return CSR_Darwin_AArch64_AAPCS_SaveList;
107 }
108 
109 const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
110     const MachineFunction *MF) const {
111   assert(MF && "Invalid MachineFunction pointer.");
112   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
113       MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
114     return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
115   return nullptr;
116 }
117 
118 void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs(
119     MachineFunction &MF) const {
120   const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
121   SmallVector<MCPhysReg, 32> UpdatedCSRs;
122   for (const MCPhysReg *I = CSRs; *I; ++I)
123     UpdatedCSRs.push_back(*I);
124 
125   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
126     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
127       UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
128     }
129   }
130   // Register lists are zero-terminated.
131   UpdatedCSRs.push_back(0);
132   MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
133 }
134 
135 const TargetRegisterClass *
136 AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
137                                        unsigned Idx) const {
138   // edge case for GPR/FPR register classes
139   if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
140     return &AArch64::FPR32RegClass;
141   else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
142     return &AArch64::FPR64RegClass;
143 
144   // Forward to TableGen's default version.
145   return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
146 }
147 
148 const uint32_t *
149 AArch64RegisterInfo::getDarwinCallPreservedMask(const MachineFunction &MF,
150                                                 CallingConv::ID CC) const {
151   assert(MF.getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
152          "Invalid subtarget for getDarwinCallPreservedMask");
153 
154   if (CC == CallingConv::CXX_FAST_TLS)
155     return CSR_Darwin_AArch64_CXX_TLS_RegMask;
156   if (CC == CallingConv::AArch64_VectorCall)
157     return CSR_Darwin_AArch64_AAVPCS_RegMask;
158   if (CC == CallingConv::AArch64_SVE_VectorCall)
159     report_fatal_error(
160         "Calling convention SVE_VectorCall is unsupported on Darwin.");
161   if (CC == CallingConv::CFGuard_Check)
162     report_fatal_error(
163         "Calling convention CFGuard_Check is unsupported on Darwin.");
164   if (MF.getSubtarget<AArch64Subtarget>()
165           .getTargetLowering()
166           ->supportSwiftError() &&
167       MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
168     return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
169   if (CC == CallingConv::PreserveMost)
170     return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
171   return CSR_Darwin_AArch64_AAPCS_RegMask;
172 }
173 
174 const uint32_t *
175 AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
176                                           CallingConv::ID CC) const {
177   bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
178   if (CC == CallingConv::GHC)
179     // This is academic because all GHC calls are (supposed to be) tail calls
180     return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
181   if (CC == CallingConv::AnyReg)
182     return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
183 
184   // All the following calling conventions are handled differently on Darwin.
185   if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin()) {
186     if (SCS)
187       report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
188     return getDarwinCallPreservedMask(MF, CC);
189   }
190 
191   if (CC == CallingConv::AArch64_VectorCall)
192     return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
193   if (CC == CallingConv::AArch64_SVE_VectorCall)
194     return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
195                : CSR_AArch64_SVE_AAPCS_RegMask;
196   if (CC == CallingConv::CFGuard_Check)
197     return CSR_Win_AArch64_CFGuard_Check_RegMask;
198   if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
199           ->supportSwiftError() &&
200       MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
201     return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
202                : CSR_AArch64_AAPCS_SwiftError_RegMask;
203   if (CC == CallingConv::PreserveMost)
204     return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
205                : CSR_AArch64_RT_MostRegs_RegMask;
206   else
207     return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
208 }
209 
210 const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
211   if (TT.isOSDarwin())
212     return CSR_Darwin_AArch64_TLS_RegMask;
213 
214   assert(TT.isOSBinFormatELF() && "Invalid target");
215   return CSR_AArch64_TLS_ELF_RegMask;
216 }
217 
218 void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF,
219                                                  const uint32_t **Mask) const {
220   uint32_t *UpdatedMask = MF.allocateRegMask();
221   unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
222   memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
223 
224   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
225     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
226       for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
227                                    this, true);
228            SubReg.isValid(); ++SubReg) {
229         // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
230         // register mask.
231         UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
232       }
233     }
234   }
235   *Mask = UpdatedMask;
236 }
237 
238 const uint32_t *AArch64RegisterInfo::getNoPreservedMask() const {
239   return CSR_AArch64_NoRegs_RegMask;
240 }
241 
242 const uint32_t *
243 AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
244                                                 CallingConv::ID CC) const {
245   // This should return a register mask that is the same as that returned by
246   // getCallPreservedMask but that additionally preserves the register used for
247   // the first i64 argument (which must also be the register used to return a
248   // single i64 return value)
249   //
250   // In case that the calling convention does not use the same register for
251   // both, the function should return NULL (does not currently apply)
252   assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
253   if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin())
254     return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
255   return CSR_AArch64_AAPCS_ThisReturn_RegMask;
256 }
257 
258 const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
259   return CSR_AArch64_StackProbe_Windows_RegMask;
260 }
261 
262 BitVector
263 AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
264   const AArch64FrameLowering *TFI = getFrameLowering(MF);
265 
266   // FIXME: avoid re-calculating this every time.
267   BitVector Reserved(getNumRegs());
268   markSuperRegs(Reserved, AArch64::WSP);
269   markSuperRegs(Reserved, AArch64::WZR);
270 
271   if (TFI->hasFP(MF) || TT.isOSDarwin())
272     markSuperRegs(Reserved, AArch64::W29);
273 
274   for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
275     if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
276       markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
277   }
278 
279   if (hasBasePointer(MF))
280     markSuperRegs(Reserved, AArch64::W19);
281 
282   // SLH uses register W16/X16 as the taint register.
283   if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
284     markSuperRegs(Reserved, AArch64::W16);
285 
286   assert(checkAllSuperRegsMarked(Reserved));
287   return Reserved;
288 }
289 
290 bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
291                                         MCRegister Reg) const {
292   return getReservedRegs(MF)[Reg];
293 }
294 
295 bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const {
296   return std::any_of(std::begin(*AArch64::GPR64argRegClass.MC),
297                      std::end(*AArch64::GPR64argRegClass.MC),
298                      [this, &MF](MCPhysReg r){return isReservedReg(MF, r);});
299 }
300 
301 void AArch64RegisterInfo::emitReservedArgRegCallError(
302     const MachineFunction &MF) const {
303   const Function &F = MF.getFunction();
304   F.getContext().diagnose(DiagnosticInfoUnsupported{F, "AArch64 doesn't support"
305     " function calls if any of the argument registers is reserved."});
306 }
307 
308 bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
309                                           MCRegister PhysReg) const {
310   return !isReservedReg(MF, PhysReg);
311 }
312 
313 bool AArch64RegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
314   return PhysReg == AArch64::WZR || PhysReg == AArch64::XZR;
315 }
316 
317 const TargetRegisterClass *
318 AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
319                                       unsigned Kind) const {
320   return &AArch64::GPR64spRegClass;
321 }
322 
323 const TargetRegisterClass *
324 AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
325   if (RC == &AArch64::CCRRegClass)
326     return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
327   return RC;
328 }
329 
330 unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
331 
332 bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
333   const MachineFrameInfo &MFI = MF.getFrameInfo();
334 
335   // In the presence of variable sized objects or funclets, if the fixed stack
336   // size is large enough that referencing from the FP won't result in things
337   // being in range relatively often, we can use a base pointer to allow access
338   // from the other direction like the SP normally works.
339   //
340   // Furthermore, if both variable sized objects are present, and the
341   // stack needs to be dynamically re-aligned, the base pointer is the only
342   // reliable way to reference the locals.
343   if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
344     if (needsStackRealignment(MF))
345       return true;
346     // Conservatively estimate whether the negative offset from the frame
347     // pointer will be sufficient to reach. If a function has a smallish
348     // frame, it's less likely to have lots of spills and callee saved
349     // space, so it's all more likely to be within range of the frame pointer.
350     // If it's wrong, we'll materialize the constant and still get to the
351     // object; it's just suboptimal. Negative offsets use the unscaled
352     // load/store instructions, which have a 9-bit signed immediate.
353     return MFI.getLocalFrameSize() >= 256;
354   }
355 
356   return false;
357 }
358 
359 Register
360 AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
361   const AArch64FrameLowering *TFI = getFrameLowering(MF);
362   return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
363 }
364 
365 bool AArch64RegisterInfo::requiresRegisterScavenging(
366     const MachineFunction &MF) const {
367   return true;
368 }
369 
370 bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
371     const MachineFunction &MF) const {
372   return true;
373 }
374 
375 bool
376 AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
377   // This function indicates whether the emergency spillslot should be placed
378   // close to the beginning of the stackframe (closer to FP) or the end
379   // (closer to SP).
380   //
381   // The beginning works most reliably if we have a frame pointer.
382   const AArch64FrameLowering &TFI = *getFrameLowering(MF);
383   return TFI.hasFP(MF);
384 }
385 
386 bool AArch64RegisterInfo::requiresFrameIndexScavenging(
387     const MachineFunction &MF) const {
388   return true;
389 }
390 
391 bool
392 AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
393   const MachineFrameInfo &MFI = MF.getFrameInfo();
394   if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack())
395     return true;
396   return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
397 }
398 
399 /// needsFrameBaseReg - Returns true if the instruction's frame index
400 /// reference would be better served by a base register other than FP
401 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
402 /// references it should create new base registers for.
403 bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
404                                             int64_t Offset) const {
405   for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
406     assert(i < MI->getNumOperands() &&
407            "Instr doesn't have FrameIndex operand!");
408 
409   // It's the load/store FI references that cause issues, as it can be difficult
410   // to materialize the offset if it won't fit in the literal field. Estimate
411   // based on the size of the local frame and some conservative assumptions
412   // about the rest of the stack frame (note, this is pre-regalloc, so
413   // we don't know everything for certain yet) whether this offset is likely
414   // to be out of range of the immediate. Return true if so.
415 
416   // We only generate virtual base registers for loads and stores, so
417   // return false for everything else.
418   if (!MI->mayLoad() && !MI->mayStore())
419     return false;
420 
421   // Without a virtual base register, if the function has variable sized
422   // objects, all fixed-size local references will be via the frame pointer,
423   // Approximate the offset and see if it's legal for the instruction.
424   // Note that the incoming offset is based on the SP value at function entry,
425   // so it'll be negative.
426   MachineFunction &MF = *MI->getParent()->getParent();
427   const AArch64FrameLowering *TFI = getFrameLowering(MF);
428   MachineFrameInfo &MFI = MF.getFrameInfo();
429 
430   // Estimate an offset from the frame pointer.
431   // Conservatively assume all GPR callee-saved registers get pushed.
432   // FP, LR, X19-X28, D8-D15. 64-bits each.
433   int64_t FPOffset = Offset - 16 * 20;
434   // Estimate an offset from the stack pointer.
435   // The incoming offset is relating to the SP at the start of the function,
436   // but when we access the local it'll be relative to the SP after local
437   // allocation, so adjust our SP-relative offset by that allocation size.
438   Offset += MFI.getLocalFrameSize();
439   // Assume that we'll have at least some spill slots allocated.
440   // FIXME: This is a total SWAG number. We should run some statistics
441   //        and pick a real one.
442   Offset += 128; // 128 bytes of spill slots
443 
444   // If there is a frame pointer, try using it.
445   // The FP is only available if there is no dynamic realignment. We
446   // don't know for sure yet whether we'll need that, so we guess based
447   // on whether there are any local variables that would trigger it.
448   if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
449     return false;
450 
451   // If we can reference via the stack pointer or base pointer, try that.
452   // FIXME: This (and the code that resolves the references) can be improved
453   //        to only disallow SP relative references in the live range of
454   //        the VLA(s). In practice, it's unclear how much difference that
455   //        would make, but it may be worth doing.
456   if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
457     return false;
458 
459   // If even offset 0 is illegal, we don't want a virtual base register.
460   if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
461     return false;
462 
463   // The offset likely isn't legal; we want to allocate a virtual base register.
464   return true;
465 }
466 
467 bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
468                                              Register BaseReg,
469                                              int64_t Offset) const {
470   assert(MI && "Unable to get the legal offset for nil instruction.");
471   StackOffset SaveOffset(Offset, MVT::i8);
472   return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
473 }
474 
475 /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
476 /// at the beginning of the basic block.
477 void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
478                                                        Register BaseReg,
479                                                        int FrameIdx,
480                                                        int64_t Offset) const {
481   MachineBasicBlock::iterator Ins = MBB->begin();
482   DebugLoc DL; // Defaults to "unknown"
483   if (Ins != MBB->end())
484     DL = Ins->getDebugLoc();
485   const MachineFunction &MF = *MBB->getParent();
486   const AArch64InstrInfo *TII =
487       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
488   const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
489   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
490   MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
491   unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
492 
493   BuildMI(*MBB, Ins, DL, MCID, BaseReg)
494       .addFrameIndex(FrameIdx)
495       .addImm(Offset)
496       .addImm(Shifter);
497 }
498 
499 void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
500                                             int64_t Offset) const {
501   // ARM doesn't need the general 64-bit offsets
502   StackOffset Off(Offset, MVT::i8);
503 
504   unsigned i = 0;
505 
506   while (!MI.getOperand(i).isFI()) {
507     ++i;
508     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
509   }
510   const MachineFunction *MF = MI.getParent()->getParent();
511   const AArch64InstrInfo *TII =
512       MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
513   bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
514   assert(Done && "Unable to resolve frame index!");
515   (void)Done;
516 }
517 
518 // Create a scratch register for the frame index elimination in an instruction.
519 // This function has special handling of stack tagging loop pseudos, in which
520 // case it can also change the instruction opcode (but not the operands).
521 static Register
522 createScratchRegisterForInstruction(MachineInstr &MI,
523                                     const AArch64InstrInfo *TII) {
524   // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
525   // replace the instruction with the writeback variant because it will now
526   // satisfy the operand constraints for it.
527   if (MI.getOpcode() == AArch64::STGloop) {
528     MI.setDesc(TII->get(AArch64::STGloop_wback));
529     return MI.getOperand(1).getReg();
530   } else if (MI.getOpcode() == AArch64::STZGloop) {
531     MI.setDesc(TII->get(AArch64::STZGloop_wback));
532     return MI.getOperand(1).getReg();
533   } else {
534     return MI.getMF()->getRegInfo().createVirtualRegister(
535         &AArch64::GPR64RegClass);
536   }
537 }
538 
539 void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
540                                               int SPAdj, unsigned FIOperandNum,
541                                               RegScavenger *RS) const {
542   assert(SPAdj == 0 && "Unexpected");
543 
544   MachineInstr &MI = *II;
545   MachineBasicBlock &MBB = *MI.getParent();
546   MachineFunction &MF = *MBB.getParent();
547   const MachineFrameInfo &MFI = MF.getFrameInfo();
548   const AArch64InstrInfo *TII =
549       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
550   const AArch64FrameLowering *TFI = getFrameLowering(MF);
551 
552   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
553   bool Tagged =
554       MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
555   Register FrameReg;
556 
557   // Special handling of dbg_value, stackmap and patchpoint instructions.
558   if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP ||
559       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
560     StackOffset Offset =
561         TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
562                                         /*PreferFP=*/true,
563                                         /*ForSimm=*/false);
564     Offset += StackOffset(MI.getOperand(FIOperandNum + 1).getImm(), MVT::i8);
565     MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
566     MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getBytes());
567     return;
568   }
569 
570   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
571     MachineOperand &FI = MI.getOperand(FIOperandNum);
572     int Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
573     FI.ChangeToImmediate(Offset);
574     return;
575   }
576 
577   StackOffset Offset;
578   if (MI.getOpcode() == AArch64::TAGPstack) {
579     // TAGPstack must use the virtual frame register in its 3rd operand.
580     const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
581     FrameReg = MI.getOperand(3).getReg();
582     Offset = {MFI.getObjectOffset(FrameIndex) +
583                   AFI->getTaggedBasePointerOffset(),
584               MVT::i8};
585   } else if (Tagged) {
586     StackOffset SPOffset = {
587         MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize(), MVT::i8};
588     if (MFI.hasVarSizedObjects() ||
589         isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
590             (AArch64FrameOffsetCanUpdate | AArch64FrameOffsetIsLegal)) {
591       // Can't update to SP + offset in place. Precalculate the tagged pointer
592       // in a scratch register.
593       Offset = TFI->resolveFrameIndexReference(
594           MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
595       Register ScratchReg =
596           MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
597       emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
598                       TII);
599       BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
600           .addReg(ScratchReg)
601           .addReg(ScratchReg)
602           .addImm(0);
603       MI.getOperand(FIOperandNum)
604           .ChangeToRegister(ScratchReg, false, false, true);
605       return;
606     }
607     FrameReg = AArch64::SP;
608     Offset = {MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize(),
609               MVT::i8};
610   } else {
611     Offset = TFI->resolveFrameIndexReference(
612         MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
613   }
614 
615   // Modify MI as necessary to handle as much of 'Offset' as possible
616   if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
617     return;
618 
619   assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
620          "Emergency spill slot is out of reach");
621 
622   // If we get here, the immediate doesn't fit into the instruction.  We folded
623   // as much as possible above.  Handle the rest, providing a register that is
624   // SP+LargeImm.
625   Register ScratchReg = createScratchRegisterForInstruction(MI, TII);
626   emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
627   MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
628 }
629 
630 unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
631                                                   MachineFunction &MF) const {
632   const AArch64FrameLowering *TFI = getFrameLowering(MF);
633 
634   switch (RC->getID()) {
635   default:
636     return 0;
637   case AArch64::GPR32RegClassID:
638   case AArch64::GPR32spRegClassID:
639   case AArch64::GPR32allRegClassID:
640   case AArch64::GPR64spRegClassID:
641   case AArch64::GPR64allRegClassID:
642   case AArch64::GPR64RegClassID:
643   case AArch64::GPR32commonRegClassID:
644   case AArch64::GPR64commonRegClassID:
645     return 32 - 1                                   // XZR/SP
646               - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
647               - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
648               - hasBasePointer(MF);  // X19
649   case AArch64::FPR8RegClassID:
650   case AArch64::FPR16RegClassID:
651   case AArch64::FPR32RegClassID:
652   case AArch64::FPR64RegClassID:
653   case AArch64::FPR128RegClassID:
654     return 32;
655 
656   case AArch64::DDRegClassID:
657   case AArch64::DDDRegClassID:
658   case AArch64::DDDDRegClassID:
659   case AArch64::QQRegClassID:
660   case AArch64::QQQRegClassID:
661   case AArch64::QQQQRegClassID:
662     return 32;
663 
664   case AArch64::FPR128_loRegClassID:
665   case AArch64::FPR64_loRegClassID:
666   case AArch64::FPR16_loRegClassID:
667     return 16;
668   }
669 }
670 
671 unsigned AArch64RegisterInfo::getLocalAddressRegister(
672   const MachineFunction &MF) const {
673   const auto &MFI = MF.getFrameInfo();
674   if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
675     return AArch64::SP;
676   else if (needsStackRealignment(MF))
677     return getBaseRegister();
678   return getFrameRegister(MF);
679 }
680