1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64RegisterInfo.h"
15 #include "AArch64FrameLowering.h"
16 #include "AArch64InstrInfo.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "MCTargetDesc/AArch64InstPrinter.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/BinaryFormat/Dwarf.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/CodeGen/TargetFrameLowering.h"
28 #include "llvm/IR/DebugInfoMetadata.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetOptions.h"
33 #include "llvm/TargetParser/Triple.h"
34
35 using namespace llvm;
36
37 #define GET_CC_REGISTER_LISTS
38 #include "AArch64GenCallingConv.inc"
39 #define GET_REGINFO_TARGET_DESC
40 #include "AArch64GenRegisterInfo.inc"
41
AArch64RegisterInfo(const Triple & TT)42 AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
43 : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
44 AArch64_MC::initLLVMToCVRegMapping(this);
45 }
46
47 /// Return whether the register needs a CFI entry. Not all unwinders may know
48 /// about SVE registers, so we assume the lowest common denominator, i.e. the
49 /// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
50 /// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
51 /// returned in \p RegToUseForCFI.
regNeedsCFI(unsigned Reg,unsigned & RegToUseForCFI) const52 bool AArch64RegisterInfo::regNeedsCFI(unsigned Reg,
53 unsigned &RegToUseForCFI) const {
54 if (AArch64::PPRRegClass.contains(Reg))
55 return false;
56
57 if (AArch64::ZPRRegClass.contains(Reg)) {
58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
60 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
61 return true;
62 }
63 return false;
64 }
65
66 RegToUseForCFI = Reg;
67 return true;
68 }
69
70 const MCPhysReg *
getCalleeSavedRegs(const MachineFunction * MF) const71 AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
72 assert(MF && "Invalid MachineFunction pointer.");
73
74 if (MF->getFunction().getCallingConv() == CallingConv::GHC)
75 // GHC set of callee saved regs is empty as all those regs are
76 // used for passing STG regs around
77 return CSR_AArch64_NoRegs_SaveList;
78 if (MF->getFunction().getCallingConv() == CallingConv::PreserveNone)
79 return CSR_AArch64_NoneRegs_SaveList;
80 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
81 return CSR_AArch64_AllRegs_SaveList;
82
83 if (MF->getFunction().getCallingConv() == CallingConv::ARM64EC_Thunk_X64)
84 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
85
86 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
87 // lists depending on that will need to have their Darwin variant as well.
88 if (MF->getSubtarget<AArch64Subtarget>().isTargetDarwin())
89 return getDarwinCalleeSavedRegs(MF);
90
91 if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
92 return CSR_Win_AArch64_CFGuard_Check_SaveList;
93 if (MF->getSubtarget<AArch64Subtarget>().isTargetWindows()) {
94 if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
95 ->supportSwiftError() &&
96 MF->getFunction().getAttributes().hasAttrSomewhere(
97 Attribute::SwiftError))
98 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
99 if (MF->getFunction().getCallingConv() == CallingConv::SwiftTail)
100 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
101 return CSR_Win_AArch64_AAPCS_SaveList;
102 }
103 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
104 return CSR_AArch64_AAVPCS_SaveList;
105 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
106 return CSR_AArch64_SVE_AAPCS_SaveList;
107 if (MF->getFunction().getCallingConv() ==
108 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0)
109 report_fatal_error(
110 "Calling convention "
111 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is only "
112 "supported to improve calls to SME ACLE save/restore/disable-za "
113 "functions, and is not intended to be used beyond that scope.");
114 if (MF->getFunction().getCallingConv() ==
115 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1)
116 report_fatal_error(
117 "Calling convention "
118 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
119 "only supported to improve calls to SME ACLE __arm_get_current_vg "
120 "function, and is not intended to be used beyond that scope.");
121 if (MF->getFunction().getCallingConv() ==
122 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2)
123 report_fatal_error(
124 "Calling convention "
125 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
126 "only supported to improve calls to SME ACLE __arm_sme_state "
127 "and is not intended to be used beyond that scope.");
128 if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
129 ->supportSwiftError() &&
130 MF->getFunction().getAttributes().hasAttrSomewhere(
131 Attribute::SwiftError))
132 return CSR_AArch64_AAPCS_SwiftError_SaveList;
133 if (MF->getFunction().getCallingConv() == CallingConv::SwiftTail)
134 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
135 if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
136 return CSR_AArch64_RT_MostRegs_SaveList;
137 if (MF->getFunction().getCallingConv() == CallingConv::PreserveAll)
138 return CSR_AArch64_RT_AllRegs_SaveList;
139 if (MF->getFunction().getCallingConv() == CallingConv::Win64)
140 // This is for OSes other than Windows; Windows is a separate case further
141 // above.
142 return CSR_AArch64_AAPCS_X18_SaveList;
143 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
144 return CSR_AArch64_SVE_AAPCS_SaveList;
145 return CSR_AArch64_AAPCS_SaveList;
146 }
147
148 const MCPhysReg *
getDarwinCalleeSavedRegs(const MachineFunction * MF) const149 AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const {
150 assert(MF && "Invalid MachineFunction pointer.");
151 assert(MF->getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
152 "Invalid subtarget for getDarwinCalleeSavedRegs");
153
154 if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
155 report_fatal_error(
156 "Calling convention CFGuard_Check is unsupported on Darwin.");
157 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
158 return CSR_Darwin_AArch64_AAVPCS_SaveList;
159 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
160 report_fatal_error(
161 "Calling convention SVE_VectorCall is unsupported on Darwin.");
162 if (MF->getFunction().getCallingConv() ==
163 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0)
164 report_fatal_error(
165 "Calling convention "
166 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
167 "only supported to improve calls to SME ACLE save/restore/disable-za "
168 "functions, and is not intended to be used beyond that scope.");
169 if (MF->getFunction().getCallingConv() ==
170 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1)
171 report_fatal_error(
172 "Calling convention "
173 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
174 "only supported to improve calls to SME ACLE __arm_get_current_vg "
175 "function, and is not intended to be used beyond that scope.");
176 if (MF->getFunction().getCallingConv() ==
177 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2)
178 report_fatal_error(
179 "Calling convention "
180 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
181 "only supported to improve calls to SME ACLE __arm_sme_state "
182 "and is not intended to be used beyond that scope.");
183 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
184 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
185 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
186 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
187 if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
188 ->supportSwiftError() &&
189 MF->getFunction().getAttributes().hasAttrSomewhere(
190 Attribute::SwiftError))
191 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
192 if (MF->getFunction().getCallingConv() == CallingConv::SwiftTail)
193 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
194 if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
195 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
196 if (MF->getFunction().getCallingConv() == CallingConv::PreserveAll)
197 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
198 if (MF->getFunction().getCallingConv() == CallingConv::Win64)
199 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
200 return CSR_Darwin_AArch64_AAPCS_SaveList;
201 }
202
getCalleeSavedRegsViaCopy(const MachineFunction * MF) const203 const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
204 const MachineFunction *MF) const {
205 assert(MF && "Invalid MachineFunction pointer.");
206 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
207 MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
208 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
209 return nullptr;
210 }
211
UpdateCustomCalleeSavedRegs(MachineFunction & MF) const212 void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs(
213 MachineFunction &MF) const {
214 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
215 SmallVector<MCPhysReg, 32> UpdatedCSRs;
216 for (const MCPhysReg *I = CSRs; *I; ++I)
217 UpdatedCSRs.push_back(*I);
218
219 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
220 if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
221 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
222 }
223 }
224 // Register lists are zero-terminated.
225 UpdatedCSRs.push_back(0);
226 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
227 }
228
229 const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass * RC,unsigned Idx) const230 AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
231 unsigned Idx) const {
232 // edge case for GPR/FPR register classes
233 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
234 return &AArch64::FPR32RegClass;
235 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
236 return &AArch64::FPR64RegClass;
237
238 // Forward to TableGen's default version.
239 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
240 }
241
242 const uint32_t *
getDarwinCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const243 AArch64RegisterInfo::getDarwinCallPreservedMask(const MachineFunction &MF,
244 CallingConv::ID CC) const {
245 assert(MF.getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
246 "Invalid subtarget for getDarwinCallPreservedMask");
247
248 if (CC == CallingConv::CXX_FAST_TLS)
249 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
250 if (CC == CallingConv::AArch64_VectorCall)
251 return CSR_Darwin_AArch64_AAVPCS_RegMask;
252 if (CC == CallingConv::AArch64_SVE_VectorCall)
253 report_fatal_error(
254 "Calling convention SVE_VectorCall is unsupported on Darwin.");
255 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0)
256 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
257 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1)
258 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
259 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2)
260 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
261 if (CC == CallingConv::CFGuard_Check)
262 report_fatal_error(
263 "Calling convention CFGuard_Check is unsupported on Darwin.");
264 if (MF.getSubtarget<AArch64Subtarget>()
265 .getTargetLowering()
266 ->supportSwiftError() &&
267 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
268 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
269 if (CC == CallingConv::SwiftTail)
270 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
271 if (CC == CallingConv::PreserveMost)
272 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
273 if (CC == CallingConv::PreserveAll)
274 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
275 return CSR_Darwin_AArch64_AAPCS_RegMask;
276 }
277
278 const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const279 AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
280 CallingConv::ID CC) const {
281 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
282 if (CC == CallingConv::GHC)
283 // This is academic because all GHC calls are (supposed to be) tail calls
284 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
285 if (CC == CallingConv::PreserveNone)
286 return SCS ? CSR_AArch64_NoneRegs_SCS_RegMask
287 : CSR_AArch64_NoneRegs_RegMask;
288 if (CC == CallingConv::AnyReg)
289 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
290
291 // All the following calling conventions are handled differently on Darwin.
292 if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin()) {
293 if (SCS)
294 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
295 return getDarwinCallPreservedMask(MF, CC);
296 }
297
298 if (CC == CallingConv::AArch64_VectorCall)
299 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
300 if (CC == CallingConv::AArch64_SVE_VectorCall)
301 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
302 : CSR_AArch64_SVE_AAPCS_RegMask;
303 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0)
304 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
305 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1)
306 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
307 if (CC == CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2)
308 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
309 if (CC == CallingConv::CFGuard_Check)
310 return CSR_Win_AArch64_CFGuard_Check_RegMask;
311 if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
312 ->supportSwiftError() &&
313 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
314 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
315 : CSR_AArch64_AAPCS_SwiftError_RegMask;
316 if (CC == CallingConv::SwiftTail) {
317 if (SCS)
318 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
319 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
320 }
321 if (CC == CallingConv::PreserveMost)
322 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
323 : CSR_AArch64_RT_MostRegs_RegMask;
324 if (CC == CallingConv::PreserveAll)
325 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
326 : CSR_AArch64_RT_AllRegs_RegMask;
327
328 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
329 }
330
getCustomEHPadPreservedMask(const MachineFunction & MF) const331 const uint32_t *AArch64RegisterInfo::getCustomEHPadPreservedMask(
332 const MachineFunction &MF) const {
333 if (MF.getSubtarget<AArch64Subtarget>().isTargetLinux())
334 return CSR_AArch64_AAPCS_RegMask;
335
336 return nullptr;
337 }
338
getTLSCallPreservedMask() const339 const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
340 if (TT.isOSDarwin())
341 return CSR_Darwin_AArch64_TLS_RegMask;
342
343 assert(TT.isOSBinFormatELF() && "Invalid target");
344 return CSR_AArch64_TLS_ELF_RegMask;
345 }
346
UpdateCustomCallPreservedMask(MachineFunction & MF,const uint32_t ** Mask) const347 void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF,
348 const uint32_t **Mask) const {
349 uint32_t *UpdatedMask = MF.allocateRegMask();
350 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
351 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
352
353 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
354 if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
355 for (MCPhysReg SubReg :
356 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
357 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
358 // register mask.
359 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
360 }
361 }
362 }
363 *Mask = UpdatedMask;
364 }
365
getSMStartStopCallPreservedMask() const366 const uint32_t *AArch64RegisterInfo::getSMStartStopCallPreservedMask() const {
367 return CSR_AArch64_SMStartStop_RegMask;
368 }
369
370 const uint32_t *
SMEABISupportRoutinesCallPreservedMaskFromX0() const371 AArch64RegisterInfo::SMEABISupportRoutinesCallPreservedMaskFromX0() const {
372 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
373 }
374
getNoPreservedMask() const375 const uint32_t *AArch64RegisterInfo::getNoPreservedMask() const {
376 return CSR_AArch64_NoRegs_RegMask;
377 }
378
379 const uint32_t *
getThisReturnPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const380 AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
381 CallingConv::ID CC) const {
382 // This should return a register mask that is the same as that returned by
383 // getCallPreservedMask but that additionally preserves the register used for
384 // the first i64 argument (which must also be the register used to return a
385 // single i64 return value)
386 //
387 // In case that the calling convention does not use the same register for
388 // both, the function should return NULL (does not currently apply)
389 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
390 if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin())
391 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
392 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
393 }
394
getWindowsStackProbePreservedMask() const395 const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
396 return CSR_AArch64_StackProbe_Windows_RegMask;
397 }
398
399 std::optional<std::string>
explainReservedReg(const MachineFunction & MF,MCRegister PhysReg) const400 AArch64RegisterInfo::explainReservedReg(const MachineFunction &MF,
401 MCRegister PhysReg) const {
402 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
403 return std::string("X19 is used as the frame base pointer register.");
404
405 if (MF.getSubtarget<AArch64Subtarget>().isWindowsArm64EC()) {
406 bool warn = false;
407 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
408 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
409 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
410 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
411 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
412 warn = true;
413
414 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
415 if (MCRegisterInfo::regsOverlap(PhysReg, i))
416 warn = true;
417
418 if (warn)
419 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
420 " is clobbered by asynchronous signals when using Arm64EC.";
421 }
422
423 return {};
424 }
425
426 BitVector
getStrictlyReservedRegs(const MachineFunction & MF) const427 AArch64RegisterInfo::getStrictlyReservedRegs(const MachineFunction &MF) const {
428 const AArch64FrameLowering *TFI = getFrameLowering(MF);
429
430 // FIXME: avoid re-calculating this every time.
431 BitVector Reserved(getNumRegs());
432 markSuperRegs(Reserved, AArch64::WSP);
433 markSuperRegs(Reserved, AArch64::WZR);
434
435 if (TFI->hasFP(MF) || TT.isOSDarwin())
436 markSuperRegs(Reserved, AArch64::W29);
437
438 if (MF.getSubtarget<AArch64Subtarget>().isWindowsArm64EC()) {
439 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
440 // signals, so we can't ever use them.
441 markSuperRegs(Reserved, AArch64::W13);
442 markSuperRegs(Reserved, AArch64::W14);
443 markSuperRegs(Reserved, AArch64::W23);
444 markSuperRegs(Reserved, AArch64::W24);
445 markSuperRegs(Reserved, AArch64::W28);
446 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
447 markSuperRegs(Reserved, i);
448 }
449
450 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
451 if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
452 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
453 }
454
455 if (hasBasePointer(MF))
456 markSuperRegs(Reserved, AArch64::W19);
457
458 // SLH uses register W16/X16 as the taint register.
459 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
460 markSuperRegs(Reserved, AArch64::W16);
461
462 // FFR is modelled as global state that cannot be allocated.
463 if (MF.getSubtarget<AArch64Subtarget>().hasSVE())
464 Reserved.set(AArch64::FFR);
465
466 // SME tiles are not allocatable.
467 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
468 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
469 Reserved.set(SubReg);
470 }
471
472 // VG cannot be allocated
473 Reserved.set(AArch64::VG);
474
475 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
476 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
477 SubReg.isValid(); ++SubReg)
478 Reserved.set(*SubReg);
479 }
480
481 markSuperRegs(Reserved, AArch64::FPCR);
482 markSuperRegs(Reserved, AArch64::FPSR);
483
484 if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) {
485 markSuperRegs(Reserved, AArch64::X27);
486 markSuperRegs(Reserved, AArch64::X28);
487 markSuperRegs(Reserved, AArch64::W27);
488 markSuperRegs(Reserved, AArch64::W28);
489 }
490
491 assert(checkAllSuperRegsMarked(Reserved));
492 return Reserved;
493 }
494
495 BitVector
getReservedRegs(const MachineFunction & MF) const496 AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
497 BitVector Reserved = getStrictlyReservedRegs(MF);
498
499 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
500 if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReservedForRA(i))
501 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
502 }
503
504 if (MF.getSubtarget<AArch64Subtarget>().isLRReservedForRA()) {
505 // In order to prevent the register allocator from using LR, we need to
506 // mark it as reserved. However we don't want to keep it reserved throughout
507 // the pipeline since it prevents other infrastructure from reasoning about
508 // it's liveness. We use the NoVRegs property instead of IsSSA because
509 // IsSSA is removed before VirtRegRewriter runs.
510 if (!MF.getProperties().hasProperty(
511 MachineFunctionProperties::Property::NoVRegs))
512 markSuperRegs(Reserved, AArch64::LR);
513 }
514
515 assert(checkAllSuperRegsMarked(Reserved));
516 return Reserved;
517 }
518
isReservedReg(const MachineFunction & MF,MCRegister Reg) const519 bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
520 MCRegister Reg) const {
521 return getReservedRegs(MF)[Reg];
522 }
523
isStrictlyReservedReg(const MachineFunction & MF,MCRegister Reg) const524 bool AArch64RegisterInfo::isStrictlyReservedReg(const MachineFunction &MF,
525 MCRegister Reg) const {
526 return getStrictlyReservedRegs(MF)[Reg];
527 }
528
isAnyArgRegReserved(const MachineFunction & MF) const529 bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const {
530 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
531 return isStrictlyReservedReg(MF, r);
532 });
533 }
534
emitReservedArgRegCallError(const MachineFunction & MF) const535 void AArch64RegisterInfo::emitReservedArgRegCallError(
536 const MachineFunction &MF) const {
537 const Function &F = MF.getFunction();
538 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
539 " function calls if any of the argument registers is reserved.")});
540 }
541
isAsmClobberable(const MachineFunction & MF,MCRegister PhysReg) const542 bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
543 MCRegister PhysReg) const {
544 // SLH uses register X16 as the taint register but it will fallback to a different
545 // method if the user clobbers it. So X16 is not reserved for inline asm but is
546 // for normal codegen.
547 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
548 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
549 return true;
550
551 // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
552 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
553 return true;
554
555 return !isReservedReg(MF, PhysReg);
556 }
557
558 const TargetRegisterClass *
getPointerRegClass(const MachineFunction & MF,unsigned Kind) const559 AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
560 unsigned Kind) const {
561 return &AArch64::GPR64spRegClass;
562 }
563
564 const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass * RC) const565 AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
566 if (RC == &AArch64::CCRRegClass)
567 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
568 return RC;
569 }
570
getBaseRegister() const571 unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
572
hasBasePointer(const MachineFunction & MF) const573 bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
574 const MachineFrameInfo &MFI = MF.getFrameInfo();
575
576 // In the presence of variable sized objects or funclets, if the fixed stack
577 // size is large enough that referencing from the FP won't result in things
578 // being in range relatively often, we can use a base pointer to allow access
579 // from the other direction like the SP normally works.
580 //
581 // Furthermore, if both variable sized objects are present, and the
582 // stack needs to be dynamically re-aligned, the base pointer is the only
583 // reliable way to reference the locals.
584 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
585 if (hasStackRealignment(MF))
586 return true;
587
588 auto &ST = MF.getSubtarget<AArch64Subtarget>();
589 if (ST.hasSVE() || ST.isStreaming()) {
590 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
591 // Frames that have variable sized objects and scalable SVE objects,
592 // should always use a basepointer.
593 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
594 return true;
595 }
596
597 // Conservatively estimate whether the negative offset from the frame
598 // pointer will be sufficient to reach. If a function has a smallish
599 // frame, it's less likely to have lots of spills and callee saved
600 // space, so it's all more likely to be within range of the frame pointer.
601 // If it's wrong, we'll materialize the constant and still get to the
602 // object; it's just suboptimal. Negative offsets use the unscaled
603 // load/store instructions, which have a 9-bit signed immediate.
604 return MFI.getLocalFrameSize() >= 256;
605 }
606
607 return false;
608 }
609
isArgumentRegister(const MachineFunction & MF,MCRegister Reg) const610 bool AArch64RegisterInfo::isArgumentRegister(const MachineFunction &MF,
611 MCRegister Reg) const {
612 CallingConv::ID CC = MF.getFunction().getCallingConv();
613 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>();
614 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv(),
615 MF.getFunction().isVarArg());
616
617 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
618 return llvm::is_contained(RegList, Reg);
619 };
620
621 switch (CC) {
622 default:
623 report_fatal_error("Unsupported calling convention.");
624 case CallingConv::GHC:
625 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
626 case CallingConv::PreserveNone:
627 if (!MF.getFunction().isVarArg())
628 return HasReg(CC_AArch64_Preserve_None_ArgRegs, Reg);
629 [[fallthrough]];
630 case CallingConv::C:
631 case CallingConv::Fast:
632 case CallingConv::PreserveMost:
633 case CallingConv::PreserveAll:
634 case CallingConv::CXX_FAST_TLS:
635 case CallingConv::Swift:
636 case CallingConv::SwiftTail:
637 case CallingConv::Tail:
638 if (STI.isTargetWindows()) {
639 if (IsVarArg)
640 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
641 switch (CC) {
642 default:
643 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
644 case CallingConv::Swift:
645 case CallingConv::SwiftTail:
646 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
647 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
648 }
649 }
650 if (!STI.isTargetDarwin()) {
651 switch (CC) {
652 default:
653 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
654 case CallingConv::Swift:
655 case CallingConv::SwiftTail:
656 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
657 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
658 }
659 }
660 if (!IsVarArg) {
661 switch (CC) {
662 default:
663 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
664 case CallingConv::Swift:
665 case CallingConv::SwiftTail:
666 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
667 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
668 }
669 }
670 if (STI.isTargetILP32())
671 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
672 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
673 case CallingConv::Win64:
674 if (IsVarArg)
675 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
676 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
677 case CallingConv::CFGuard_Check:
678 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
679 case CallingConv::AArch64_VectorCall:
680 case CallingConv::AArch64_SVE_VectorCall:
681 case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0:
682 case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1:
683 case CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2:
684 if (STI.isTargetWindows())
685 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
686 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
687 }
688 }
689
690 Register
getFrameRegister(const MachineFunction & MF) const691 AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
692 const AArch64FrameLowering *TFI = getFrameLowering(MF);
693 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
694 }
695
requiresRegisterScavenging(const MachineFunction & MF) const696 bool AArch64RegisterInfo::requiresRegisterScavenging(
697 const MachineFunction &MF) const {
698 return true;
699 }
700
requiresVirtualBaseRegisters(const MachineFunction & MF) const701 bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
702 const MachineFunction &MF) const {
703 return true;
704 }
705
706 bool
useFPForScavengingIndex(const MachineFunction & MF) const707 AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
708 // This function indicates whether the emergency spillslot should be placed
709 // close to the beginning of the stackframe (closer to FP) or the end
710 // (closer to SP).
711 //
712 // The beginning works most reliably if we have a frame pointer.
713 // In the presence of any non-constant space between FP and locals,
714 // (e.g. in case of stack realignment or a scalable SVE area), it is
715 // better to use SP or BP.
716 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
717 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
718 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
719 AFI->hasCalculatedStackSizeSVE()) &&
720 "Expected SVE area to be calculated by this point");
721 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
722 }
723
requiresFrameIndexScavenging(const MachineFunction & MF) const724 bool AArch64RegisterInfo::requiresFrameIndexScavenging(
725 const MachineFunction &MF) const {
726 return true;
727 }
728
729 bool
cannotEliminateFrame(const MachineFunction & MF) const730 AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
731 const MachineFrameInfo &MFI = MF.getFrameInfo();
732 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack())
733 return true;
734 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
735 }
736
737 /// needsFrameBaseReg - Returns true if the instruction's frame index
738 /// reference would be better served by a base register other than FP
739 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
740 /// references it should create new base registers for.
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const741 bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
742 int64_t Offset) const {
743 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
744 assert(i < MI->getNumOperands() &&
745 "Instr doesn't have FrameIndex operand!");
746
747 // It's the load/store FI references that cause issues, as it can be difficult
748 // to materialize the offset if it won't fit in the literal field. Estimate
749 // based on the size of the local frame and some conservative assumptions
750 // about the rest of the stack frame (note, this is pre-regalloc, so
751 // we don't know everything for certain yet) whether this offset is likely
752 // to be out of range of the immediate. Return true if so.
753
754 // We only generate virtual base registers for loads and stores, so
755 // return false for everything else.
756 if (!MI->mayLoad() && !MI->mayStore())
757 return false;
758
759 // Without a virtual base register, if the function has variable sized
760 // objects, all fixed-size local references will be via the frame pointer,
761 // Approximate the offset and see if it's legal for the instruction.
762 // Note that the incoming offset is based on the SP value at function entry,
763 // so it'll be negative.
764 MachineFunction &MF = *MI->getParent()->getParent();
765 const AArch64FrameLowering *TFI = getFrameLowering(MF);
766 MachineFrameInfo &MFI = MF.getFrameInfo();
767
768 // Estimate an offset from the frame pointer.
769 // Conservatively assume all GPR callee-saved registers get pushed.
770 // FP, LR, X19-X28, D8-D15. 64-bits each.
771 int64_t FPOffset = Offset - 16 * 20;
772 // Estimate an offset from the stack pointer.
773 // The incoming offset is relating to the SP at the start of the function,
774 // but when we access the local it'll be relative to the SP after local
775 // allocation, so adjust our SP-relative offset by that allocation size.
776 Offset += MFI.getLocalFrameSize();
777 // Assume that we'll have at least some spill slots allocated.
778 // FIXME: This is a total SWAG number. We should run some statistics
779 // and pick a real one.
780 Offset += 128; // 128 bytes of spill slots
781
782 // If there is a frame pointer, try using it.
783 // The FP is only available if there is no dynamic realignment. We
784 // don't know for sure yet whether we'll need that, so we guess based
785 // on whether there are any local variables that would trigger it.
786 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
787 return false;
788
789 // If we can reference via the stack pointer or base pointer, try that.
790 // FIXME: This (and the code that resolves the references) can be improved
791 // to only disallow SP relative references in the live range of
792 // the VLA(s). In practice, it's unclear how much difference that
793 // would make, but it may be worth doing.
794 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
795 return false;
796
797 // If even offset 0 is illegal, we don't want a virtual base register.
798 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
799 return false;
800
801 // The offset likely isn't legal; we want to allocate a virtual base register.
802 return true;
803 }
804
isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset) const805 bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
806 Register BaseReg,
807 int64_t Offset) const {
808 assert(MI && "Unable to get the legal offset for nil instruction.");
809 StackOffset SaveOffset = StackOffset::getFixed(Offset);
810 return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
811 }
812
813 /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
814 /// at the beginning of the basic block.
815 Register
materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset) const816 AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
817 int FrameIdx,
818 int64_t Offset) const {
819 MachineBasicBlock::iterator Ins = MBB->begin();
820 DebugLoc DL; // Defaults to "unknown"
821 if (Ins != MBB->end())
822 DL = Ins->getDebugLoc();
823 const MachineFunction &MF = *MBB->getParent();
824 const AArch64InstrInfo *TII =
825 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
826 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
827 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
828 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
829 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
830 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
831
832 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
833 .addFrameIndex(FrameIdx)
834 .addImm(Offset)
835 .addImm(Shifter);
836
837 return BaseReg;
838 }
839
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset) const840 void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
841 int64_t Offset) const {
842 // ARM doesn't need the general 64-bit offsets
843 StackOffset Off = StackOffset::getFixed(Offset);
844
845 unsigned i = 0;
846 while (!MI.getOperand(i).isFI()) {
847 ++i;
848 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
849 }
850
851 const MachineFunction *MF = MI.getParent()->getParent();
852 const AArch64InstrInfo *TII =
853 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
854 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
855 assert(Done && "Unable to resolve frame index!");
856 (void)Done;
857 }
858
859 // Create a scratch register for the frame index elimination in an instruction.
860 // This function has special handling of stack tagging loop pseudos, in which
861 // case it can also change the instruction opcode.
862 static Register
createScratchRegisterForInstruction(MachineInstr & MI,unsigned FIOperandNum,const AArch64InstrInfo * TII)863 createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum,
864 const AArch64InstrInfo *TII) {
865 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
866 // replace the instruction with the writeback variant because it will now
867 // satisfy the operand constraints for it.
868 Register ScratchReg;
869 if (MI.getOpcode() == AArch64::STGloop ||
870 MI.getOpcode() == AArch64::STZGloop) {
871 assert(FIOperandNum == 3 &&
872 "Wrong frame index operand for STGloop/STZGloop");
873 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
874 : AArch64::STZGloop_wback;
875 ScratchReg = MI.getOperand(1).getReg();
876 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
877 MI.setDesc(TII->get(Op));
878 MI.tieOperands(1, 3);
879 } else {
880 ScratchReg =
881 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
882 MI.getOperand(FIOperandNum)
883 .ChangeToRegister(ScratchReg, false, false, true);
884 }
885 return ScratchReg;
886 }
887
getOffsetOpcodes(const StackOffset & Offset,SmallVectorImpl<uint64_t> & Ops) const888 void AArch64RegisterInfo::getOffsetOpcodes(
889 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
890 // The smallest scalable element supported by scaled SVE addressing
891 // modes are predicates, which are 2 scalable bytes in size. So the scalable
892 // byte offset must always be a multiple of 2.
893 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
894
895 // Add fixed-sized offset using existing DIExpression interface.
896 DIExpression::appendOffset(Ops, Offset.getFixed());
897
898 unsigned VG = getDwarfRegNum(AArch64::VG, true);
899 int64_t VGSized = Offset.getScalable() / 2;
900 if (VGSized > 0) {
901 Ops.push_back(dwarf::DW_OP_constu);
902 Ops.push_back(VGSized);
903 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
904 Ops.push_back(dwarf::DW_OP_mul);
905 Ops.push_back(dwarf::DW_OP_plus);
906 } else if (VGSized < 0) {
907 Ops.push_back(dwarf::DW_OP_constu);
908 Ops.push_back(-VGSized);
909 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
910 Ops.push_back(dwarf::DW_OP_mul);
911 Ops.push_back(dwarf::DW_OP_minus);
912 }
913 }
914
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const915 bool AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
916 int SPAdj, unsigned FIOperandNum,
917 RegScavenger *RS) const {
918 assert(SPAdj == 0 && "Unexpected");
919
920 MachineInstr &MI = *II;
921 MachineBasicBlock &MBB = *MI.getParent();
922 MachineFunction &MF = *MBB.getParent();
923 const MachineFrameInfo &MFI = MF.getFrameInfo();
924 const AArch64InstrInfo *TII =
925 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
926 const AArch64FrameLowering *TFI = getFrameLowering(MF);
927 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
928 bool Tagged =
929 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
930 Register FrameReg;
931
932 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
933 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
934 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
935 MI.getOpcode() == TargetOpcode::STATEPOINT) {
936 StackOffset Offset =
937 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
938 /*PreferFP=*/true,
939 /*ForSimm=*/false);
940 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
941 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
942 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
943 return false;
944 }
945
946 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
947 MachineOperand &FI = MI.getOperand(FIOperandNum);
948 StackOffset Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
949 assert(!Offset.getScalable() &&
950 "Frame offsets with a scalable component are not supported");
951 FI.ChangeToImmediate(Offset.getFixed());
952 return false;
953 }
954
955 StackOffset Offset;
956 if (MI.getOpcode() == AArch64::TAGPstack) {
957 // TAGPstack must use the virtual frame register in its 3rd operand.
958 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
959 FrameReg = MI.getOperand(3).getReg();
960 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
961 AFI->getTaggedBasePointerOffset());
962 } else if (Tagged) {
963 StackOffset SPOffset = StackOffset::getFixed(
964 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
965 if (MFI.hasVarSizedObjects() ||
966 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
967 (AArch64FrameOffsetCanUpdate | AArch64FrameOffsetIsLegal)) {
968 // Can't update to SP + offset in place. Precalculate the tagged pointer
969 // in a scratch register.
970 Offset = TFI->resolveFrameIndexReference(
971 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
972 Register ScratchReg =
973 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
974 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
975 TII);
976 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
977 .addReg(ScratchReg)
978 .addReg(ScratchReg)
979 .addImm(0);
980 MI.getOperand(FIOperandNum)
981 .ChangeToRegister(ScratchReg, false, false, true);
982 return false;
983 }
984 FrameReg = AArch64::SP;
985 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
986 (int64_t)MFI.getStackSize());
987 } else {
988 Offset = TFI->resolveFrameIndexReference(
989 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
990 }
991
992 // Modify MI as necessary to handle as much of 'Offset' as possible
993 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
994 return true;
995
996 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
997 "Emergency spill slot is out of reach");
998
999 // If we get here, the immediate doesn't fit into the instruction. We folded
1000 // as much as possible above. Handle the rest, providing a register that is
1001 // SP+LargeImm.
1002 Register ScratchReg =
1003 createScratchRegisterForInstruction(MI, FIOperandNum, TII);
1004 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
1005 return false;
1006 }
1007
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF) const1008 unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
1009 MachineFunction &MF) const {
1010 const AArch64FrameLowering *TFI = getFrameLowering(MF);
1011
1012 switch (RC->getID()) {
1013 default:
1014 return 0;
1015 case AArch64::GPR32RegClassID:
1016 case AArch64::GPR32spRegClassID:
1017 case AArch64::GPR32allRegClassID:
1018 case AArch64::GPR64spRegClassID:
1019 case AArch64::GPR64allRegClassID:
1020 case AArch64::GPR64RegClassID:
1021 case AArch64::GPR32commonRegClassID:
1022 case AArch64::GPR64commonRegClassID:
1023 return 32 - 1 // XZR/SP
1024 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
1025 - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
1026 - hasBasePointer(MF); // X19
1027 case AArch64::FPR8RegClassID:
1028 case AArch64::FPR16RegClassID:
1029 case AArch64::FPR32RegClassID:
1030 case AArch64::FPR64RegClassID:
1031 case AArch64::FPR128RegClassID:
1032 return 32;
1033
1034 case AArch64::MatrixIndexGPR32_8_11RegClassID:
1035 case AArch64::MatrixIndexGPR32_12_15RegClassID:
1036 return 4;
1037
1038 case AArch64::DDRegClassID:
1039 case AArch64::DDDRegClassID:
1040 case AArch64::DDDDRegClassID:
1041 case AArch64::QQRegClassID:
1042 case AArch64::QQQRegClassID:
1043 case AArch64::QQQQRegClassID:
1044 return 32;
1045
1046 case AArch64::FPR128_loRegClassID:
1047 case AArch64::FPR64_loRegClassID:
1048 case AArch64::FPR16_loRegClassID:
1049 return 16;
1050 case AArch64::FPR128_0to7RegClassID:
1051 return 8;
1052 }
1053 }
1054
getLocalAddressRegister(const MachineFunction & MF) const1055 unsigned AArch64RegisterInfo::getLocalAddressRegister(
1056 const MachineFunction &MF) const {
1057 const auto &MFI = MF.getFrameInfo();
1058 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1059 return AArch64::SP;
1060 else if (hasStackRealignment(MF))
1061 return getBaseRegister();
1062 return getFrameRegister(MF);
1063 }
1064
1065 /// SrcRC and DstRC will be morphed into NewRC if this returns true
shouldCoalesce(MachineInstr * MI,const TargetRegisterClass * SrcRC,unsigned SubReg,const TargetRegisterClass * DstRC,unsigned DstSubReg,const TargetRegisterClass * NewRC,LiveIntervals & LIS) const1066 bool AArch64RegisterInfo::shouldCoalesce(
1067 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1068 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1069 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1070 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1071
1072 if (MI->isCopy() &&
1073 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1074 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1075 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1076 // Do not coalesce in the case of a 32-bit subregister copy
1077 // which implements a 32 to 64 bit zero extension
1078 // which relies on the upper 32 bits being zeroed.
1079 return false;
1080
1081 auto IsCoalescerBarrier = [](const MachineInstr &MI) {
1082 switch (MI.getOpcode()) {
1083 case AArch64::COALESCER_BARRIER_FPR16:
1084 case AArch64::COALESCER_BARRIER_FPR32:
1085 case AArch64::COALESCER_BARRIER_FPR64:
1086 case AArch64::COALESCER_BARRIER_FPR128:
1087 return true;
1088 default:
1089 return false;
1090 }
1091 };
1092
1093 // For calls that temporarily have to toggle streaming mode as part of the
1094 // call-sequence, we need to be more careful when coalescing copy instructions
1095 // so that we don't end up coalescing the NEON/FP result or argument register
1096 // with a whole Z-register, such that after coalescing the register allocator
1097 // will try to spill/reload the entire Z register.
1098 //
1099 // We do this by checking if the node has any defs/uses that are
1100 // COALESCER_BARRIER pseudos. These are 'nops' in practice, but they exist to
1101 // instruct the coalescer to avoid coalescing the copy.
1102 if (MI->isCopy() && SubReg != DstSubReg &&
1103 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1104 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1105 unsigned SrcReg = MI->getOperand(1).getReg();
1106 if (any_of(MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1107 return false;
1108 unsigned DstReg = MI->getOperand(0).getReg();
1109 if (any_of(MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1110 return false;
1111 }
1112
1113 return true;
1114 }
1115
shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const1116 bool AArch64RegisterInfo::shouldAnalyzePhysregInMachineLoopInfo(
1117 MCRegister R) const {
1118 return R == AArch64::VG;
1119 }
1120