1 //===-- AArch64Subtarget.cpp - AArch64 Subtarget Information ----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64 specific subclass of TargetSubtarget.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "AArch64Subtarget.h"
14
15 #include "AArch64.h"
16 #include "AArch64InstrInfo.h"
17 #include "AArch64PBQPRegAlloc.h"
18 #include "AArch64TargetMachine.h"
19 #include "GISel/AArch64CallLowering.h"
20 #include "GISel/AArch64LegalizerInfo.h"
21 #include "GISel/AArch64RegisterBankInfo.h"
22 #include "MCTargetDesc/AArch64AddressingModes.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineScheduler.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/Support/SipHash.h"
28 #include "llvm/TargetParser/AArch64TargetParser.h"
29
30 using namespace llvm;
31
32 #define DEBUG_TYPE "aarch64-subtarget"
33
34 #define GET_SUBTARGETINFO_CTOR
35 #define GET_SUBTARGETINFO_TARGET_DESC
36 #include "AArch64GenSubtargetInfo.inc"
37
38 static cl::opt<bool>
39 EnableEarlyIfConvert("aarch64-early-ifcvt", cl::desc("Enable the early if "
40 "converter pass"), cl::init(true), cl::Hidden);
41
42 // If OS supports TBI, use this flag to enable it.
43 static cl::opt<bool>
44 UseAddressTopByteIgnored("aarch64-use-tbi", cl::desc("Assume that top byte of "
45 "an address is ignored"), cl::init(false), cl::Hidden);
46
47 static cl::opt<bool> MachOUseNonLazyBind(
48 "aarch64-macho-enable-nonlazybind",
49 cl::desc("Call nonlazybind functions via direct GOT load for Mach-O"),
50 cl::Hidden);
51
52 static cl::opt<bool> UseAA("aarch64-use-aa", cl::init(true),
53 cl::desc("Enable the use of AA during codegen."));
54
55 static cl::opt<unsigned> OverrideVectorInsertExtractBaseCost(
56 "aarch64-insert-extract-base-cost",
57 cl::desc("Base cost of vector insert/extract element"), cl::Hidden);
58
59 // Reserve a list of X# registers, so they are unavailable for register
60 // allocator, but can still be used as ABI requests, such as passing arguments
61 // to function call.
62 static cl::list<std::string>
63 ReservedRegsForRA("reserve-regs-for-regalloc", cl::desc("Reserve physical "
64 "registers, so they can't be used by register allocator. "
65 "Should only be used for testing register allocator."),
66 cl::CommaSeparated, cl::Hidden);
67
68 static cl::opt<AArch64PAuth::AuthCheckMethod>
69 AuthenticatedLRCheckMethod("aarch64-authenticated-lr-check-method",
70 cl::Hidden,
71 cl::desc("Override the variant of check applied "
72 "to authenticated LR during tail call"),
73 cl::values(AUTH_CHECK_METHOD_CL_VALUES_LR));
74
75 static cl::opt<unsigned> AArch64MinimumJumpTableEntries(
76 "aarch64-min-jump-table-entries", cl::init(13), cl::Hidden,
77 cl::desc("Set minimum number of entries to use a jump table on AArch64"));
78
getVectorInsertExtractBaseCost() const79 unsigned AArch64Subtarget::getVectorInsertExtractBaseCost() const {
80 if (OverrideVectorInsertExtractBaseCost.getNumOccurrences() > 0)
81 return OverrideVectorInsertExtractBaseCost;
82 return VectorInsertExtractBaseCost;
83 }
84
initializeSubtargetDependencies(StringRef FS,StringRef CPUString,StringRef TuneCPUString,bool HasMinSize)85 AArch64Subtarget &AArch64Subtarget::initializeSubtargetDependencies(
86 StringRef FS, StringRef CPUString, StringRef TuneCPUString,
87 bool HasMinSize) {
88 // Determine default and user-specified characteristics
89
90 if (CPUString.empty())
91 CPUString = "generic";
92
93 if (TuneCPUString.empty())
94 TuneCPUString = CPUString;
95
96 ParseSubtargetFeatures(CPUString, TuneCPUString, FS);
97 initializeProperties(HasMinSize);
98
99 return *this;
100 }
101
initializeProperties(bool HasMinSize)102 void AArch64Subtarget::initializeProperties(bool HasMinSize) {
103 // Initialize CPU specific properties. We should add a tablegen feature for
104 // this in the future so we can specify it together with the subtarget
105 // features.
106 switch (ARMProcFamily) {
107 case Others:
108 break;
109 case Carmel:
110 CacheLineSize = 64;
111 break;
112 case CortexA35:
113 case CortexA53:
114 case CortexA55:
115 case CortexR82:
116 case CortexR82AE:
117 PrefFunctionAlignment = Align(16);
118 PrefLoopAlignment = Align(16);
119 MaxBytesForLoopAlignment = 8;
120 break;
121 case CortexA57:
122 MaxInterleaveFactor = 4;
123 PrefFunctionAlignment = Align(16);
124 PrefLoopAlignment = Align(16);
125 MaxBytesForLoopAlignment = 8;
126 break;
127 case CortexA65:
128 PrefFunctionAlignment = Align(8);
129 break;
130 case CortexA72:
131 case CortexA73:
132 case CortexA75:
133 PrefFunctionAlignment = Align(16);
134 PrefLoopAlignment = Align(16);
135 MaxBytesForLoopAlignment = 8;
136 break;
137 case CortexA76:
138 case CortexA77:
139 case CortexA78:
140 case CortexA78AE:
141 case CortexA78C:
142 case CortexX1:
143 PrefFunctionAlignment = Align(16);
144 PrefLoopAlignment = Align(32);
145 MaxBytesForLoopAlignment = 16;
146 break;
147 case CortexA510:
148 case CortexA520:
149 PrefFunctionAlignment = Align(16);
150 VScaleForTuning = 1;
151 PrefLoopAlignment = Align(16);
152 MaxBytesForLoopAlignment = 8;
153 break;
154 case CortexA710:
155 case CortexA715:
156 case CortexA720:
157 case CortexA725:
158 case CortexX2:
159 case CortexX3:
160 case CortexX4:
161 case CortexX925:
162 PrefFunctionAlignment = Align(16);
163 VScaleForTuning = 1;
164 PrefLoopAlignment = Align(32);
165 MaxBytesForLoopAlignment = 16;
166 break;
167 case A64FX:
168 CacheLineSize = 256;
169 PrefFunctionAlignment = Align(8);
170 PrefLoopAlignment = Align(4);
171 MaxInterleaveFactor = 4;
172 PrefetchDistance = 128;
173 MinPrefetchStride = 1024;
174 MaxPrefetchIterationsAhead = 4;
175 VScaleForTuning = 4;
176 break;
177 case AppleA7:
178 case AppleA10:
179 case AppleA11:
180 case AppleA12:
181 case AppleA13:
182 case AppleA14:
183 case AppleA15:
184 case AppleA16:
185 case AppleA17:
186 case AppleM4:
187 CacheLineSize = 64;
188 PrefetchDistance = 280;
189 MinPrefetchStride = 2048;
190 MaxPrefetchIterationsAhead = 3;
191 switch (ARMProcFamily) {
192 case AppleA14:
193 case AppleA15:
194 case AppleA16:
195 case AppleA17:
196 case AppleM4:
197 MaxInterleaveFactor = 4;
198 break;
199 default:
200 break;
201 }
202 break;
203 case ExynosM3:
204 MaxInterleaveFactor = 4;
205 MaxJumpTableSize = 20;
206 PrefFunctionAlignment = Align(32);
207 PrefLoopAlignment = Align(16);
208 break;
209 case Falkor:
210 MaxInterleaveFactor = 4;
211 // FIXME: remove this to enable 64-bit SLP if performance looks good.
212 MinVectorRegisterBitWidth = 128;
213 CacheLineSize = 128;
214 PrefetchDistance = 820;
215 MinPrefetchStride = 2048;
216 MaxPrefetchIterationsAhead = 8;
217 break;
218 case Kryo:
219 MaxInterleaveFactor = 4;
220 VectorInsertExtractBaseCost = 2;
221 CacheLineSize = 128;
222 PrefetchDistance = 740;
223 MinPrefetchStride = 1024;
224 MaxPrefetchIterationsAhead = 11;
225 // FIXME: remove this to enable 64-bit SLP if performance looks good.
226 MinVectorRegisterBitWidth = 128;
227 break;
228 case NeoverseE1:
229 PrefFunctionAlignment = Align(8);
230 break;
231 case NeoverseN1:
232 PrefFunctionAlignment = Align(16);
233 PrefLoopAlignment = Align(32);
234 MaxBytesForLoopAlignment = 16;
235 break;
236 case NeoverseN2:
237 case NeoverseN3:
238 case NeoverseV2:
239 case NeoverseV3:
240 PrefFunctionAlignment = Align(16);
241 PrefLoopAlignment = Align(32);
242 MaxBytesForLoopAlignment = 16;
243 VScaleForTuning = 1;
244 break;
245 case NeoverseV1:
246 PrefFunctionAlignment = Align(16);
247 PrefLoopAlignment = Align(32);
248 MaxBytesForLoopAlignment = 16;
249 VScaleForTuning = 2;
250 DefaultSVETFOpts = TailFoldingOpts::Simple;
251 break;
252 case Neoverse512TVB:
253 PrefFunctionAlignment = Align(16);
254 VScaleForTuning = 1;
255 MaxInterleaveFactor = 4;
256 break;
257 case Saphira:
258 MaxInterleaveFactor = 4;
259 // FIXME: remove this to enable 64-bit SLP if performance looks good.
260 MinVectorRegisterBitWidth = 128;
261 break;
262 case ThunderX2T99:
263 CacheLineSize = 64;
264 PrefFunctionAlignment = Align(8);
265 PrefLoopAlignment = Align(4);
266 MaxInterleaveFactor = 4;
267 PrefetchDistance = 128;
268 MinPrefetchStride = 1024;
269 MaxPrefetchIterationsAhead = 4;
270 // FIXME: remove this to enable 64-bit SLP if performance looks good.
271 MinVectorRegisterBitWidth = 128;
272 break;
273 case ThunderX:
274 case ThunderXT88:
275 case ThunderXT81:
276 case ThunderXT83:
277 CacheLineSize = 128;
278 PrefFunctionAlignment = Align(8);
279 PrefLoopAlignment = Align(4);
280 // FIXME: remove this to enable 64-bit SLP if performance looks good.
281 MinVectorRegisterBitWidth = 128;
282 break;
283 case TSV110:
284 CacheLineSize = 64;
285 PrefFunctionAlignment = Align(16);
286 PrefLoopAlignment = Align(4);
287 break;
288 case ThunderX3T110:
289 CacheLineSize = 64;
290 PrefFunctionAlignment = Align(16);
291 PrefLoopAlignment = Align(4);
292 MaxInterleaveFactor = 4;
293 PrefetchDistance = 128;
294 MinPrefetchStride = 1024;
295 MaxPrefetchIterationsAhead = 4;
296 // FIXME: remove this to enable 64-bit SLP if performance looks good.
297 MinVectorRegisterBitWidth = 128;
298 break;
299 case Ampere1:
300 case Ampere1A:
301 case Ampere1B:
302 CacheLineSize = 64;
303 PrefFunctionAlignment = Align(64);
304 PrefLoopAlignment = Align(64);
305 MaxInterleaveFactor = 4;
306 break;
307 case Oryon:
308 CacheLineSize = 64;
309 PrefFunctionAlignment = Align(16);
310 MaxInterleaveFactor = 4;
311 PrefetchDistance = 128;
312 MinPrefetchStride = 1024;
313 break;
314 }
315
316 if (AArch64MinimumJumpTableEntries.getNumOccurrences() > 0 || !HasMinSize)
317 MinimumJumpTableEntries = AArch64MinimumJumpTableEntries;
318 }
319
AArch64Subtarget(const Triple & TT,StringRef CPU,StringRef TuneCPU,StringRef FS,const TargetMachine & TM,bool LittleEndian,unsigned MinSVEVectorSizeInBitsOverride,unsigned MaxSVEVectorSizeInBitsOverride,bool IsStreaming,bool IsStreamingCompatible,bool HasMinSize)320 AArch64Subtarget::AArch64Subtarget(const Triple &TT, StringRef CPU,
321 StringRef TuneCPU, StringRef FS,
322 const TargetMachine &TM, bool LittleEndian,
323 unsigned MinSVEVectorSizeInBitsOverride,
324 unsigned MaxSVEVectorSizeInBitsOverride,
325 bool IsStreaming, bool IsStreamingCompatible,
326 bool HasMinSize)
327 : AArch64GenSubtargetInfo(TT, CPU, TuneCPU, FS),
328 ReserveXRegister(AArch64::GPR64commonRegClass.getNumRegs()),
329 ReserveXRegisterForRA(AArch64::GPR64commonRegClass.getNumRegs()),
330 CustomCallSavedXRegs(AArch64::GPR64commonRegClass.getNumRegs()),
331 IsLittle(LittleEndian), IsStreaming(IsStreaming),
332 IsStreamingCompatible(IsStreamingCompatible),
333 MinSVEVectorSizeInBits(MinSVEVectorSizeInBitsOverride),
334 MaxSVEVectorSizeInBits(MaxSVEVectorSizeInBitsOverride), TargetTriple(TT),
335 InstrInfo(initializeSubtargetDependencies(FS, CPU, TuneCPU, HasMinSize)),
336 TLInfo(TM, *this) {
337 if (AArch64::isX18ReservedByDefault(TT))
338 ReserveXRegister.set(18);
339
340 CallLoweringInfo.reset(new AArch64CallLowering(*getTargetLowering()));
341 InlineAsmLoweringInfo.reset(new InlineAsmLowering(getTargetLowering()));
342 Legalizer.reset(new AArch64LegalizerInfo(*this));
343
344 auto *RBI = new AArch64RegisterBankInfo(*getRegisterInfo());
345
346 // FIXME: At this point, we can't rely on Subtarget having RBI.
347 // It's awkward to mix passing RBI and the Subtarget; should we pass
348 // TII/TRI as well?
349 InstSelector.reset(createAArch64InstructionSelector(
350 *static_cast<const AArch64TargetMachine *>(&TM), *this, *RBI));
351
352 RegBankInfo.reset(RBI);
353
354 auto TRI = getRegisterInfo();
355 StringSet<> ReservedRegNames;
356 ReservedRegNames.insert(ReservedRegsForRA.begin(), ReservedRegsForRA.end());
357 for (unsigned i = 0; i < 29; ++i) {
358 if (ReservedRegNames.count(TRI->getName(AArch64::X0 + i)))
359 ReserveXRegisterForRA.set(i);
360 }
361 // X30 is named LR, so we can't use TRI->getName to check X30.
362 if (ReservedRegNames.count("X30") || ReservedRegNames.count("LR"))
363 ReserveXRegisterForRA.set(30);
364 // X29 is named FP, so we can't use TRI->getName to check X29.
365 if (ReservedRegNames.count("X29") || ReservedRegNames.count("FP"))
366 ReserveXRegisterForRA.set(29);
367
368 AddressCheckPSV.reset(new AddressCheckPseudoSourceValue(TM));
369 }
370
getCallLowering() const371 const CallLowering *AArch64Subtarget::getCallLowering() const {
372 return CallLoweringInfo.get();
373 }
374
getInlineAsmLowering() const375 const InlineAsmLowering *AArch64Subtarget::getInlineAsmLowering() const {
376 return InlineAsmLoweringInfo.get();
377 }
378
getInstructionSelector() const379 InstructionSelector *AArch64Subtarget::getInstructionSelector() const {
380 return InstSelector.get();
381 }
382
getLegalizerInfo() const383 const LegalizerInfo *AArch64Subtarget::getLegalizerInfo() const {
384 return Legalizer.get();
385 }
386
getRegBankInfo() const387 const RegisterBankInfo *AArch64Subtarget::getRegBankInfo() const {
388 return RegBankInfo.get();
389 }
390
391 /// Find the target operand flags that describe how a global value should be
392 /// referenced for the current subtarget.
393 unsigned
ClassifyGlobalReference(const GlobalValue * GV,const TargetMachine & TM) const394 AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
395 const TargetMachine &TM) const {
396 // MachO large model always goes via a GOT, simply to get a single 8-byte
397 // absolute relocation on all global addresses.
398 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO())
399 return AArch64II::MO_GOT;
400
401 // All globals dynamically protected by MTE must have their address tags
402 // synthesized. This is done by having the loader stash the tag in the GOT
403 // entry. Force all tagged globals (even ones with internal linkage) through
404 // the GOT.
405 if (GV->isTagged())
406 return AArch64II::MO_GOT;
407
408 if (!TM.shouldAssumeDSOLocal(GV)) {
409 if (GV->hasDLLImportStorageClass()) {
410 return AArch64II::MO_GOT | AArch64II::MO_DLLIMPORT;
411 }
412 if (getTargetTriple().isOSWindows())
413 return AArch64II::MO_GOT | AArch64II::MO_COFFSTUB;
414 return AArch64II::MO_GOT;
415 }
416
417 // The small code model's direct accesses use ADRP, which cannot
418 // necessarily produce the value 0 (if the code is above 4GB).
419 // Same for the tiny code model, where we have a pc relative LDR.
420 if ((useSmallAddressing() || TM.getCodeModel() == CodeModel::Tiny) &&
421 GV->hasExternalWeakLinkage())
422 return AArch64II::MO_GOT;
423
424 // References to tagged globals are marked with MO_NC | MO_TAGGED to indicate
425 // that their nominal addresses are tagged and outside of the code model. In
426 // AArch64ExpandPseudo::expandMI we emit an additional instruction to set the
427 // tag if necessary based on MO_TAGGED.
428 if (AllowTaggedGlobals && !isa<FunctionType>(GV->getValueType()))
429 return AArch64II::MO_NC | AArch64II::MO_TAGGED;
430
431 return AArch64II::MO_NO_FLAG;
432 }
433
classifyGlobalFunctionReference(const GlobalValue * GV,const TargetMachine & TM) const434 unsigned AArch64Subtarget::classifyGlobalFunctionReference(
435 const GlobalValue *GV, const TargetMachine &TM) const {
436 // MachO large model always goes via a GOT, because we don't have the
437 // relocations available to do anything else..
438 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO() &&
439 !GV->hasInternalLinkage())
440 return AArch64II::MO_GOT;
441
442 // NonLazyBind goes via GOT unless we know it's available locally.
443 auto *F = dyn_cast<Function>(GV);
444 if ((!isTargetMachO() || MachOUseNonLazyBind) && F &&
445 F->hasFnAttribute(Attribute::NonLazyBind) && !TM.shouldAssumeDSOLocal(GV))
446 return AArch64II::MO_GOT;
447
448 if (getTargetTriple().isOSWindows()) {
449 if (isWindowsArm64EC() && GV->getValueType()->isFunctionTy()) {
450 if (GV->hasDLLImportStorageClass()) {
451 // On Arm64EC, if we're calling a symbol from the import table
452 // directly, use MO_ARM64EC_CALLMANGLE.
453 return AArch64II::MO_GOT | AArch64II::MO_DLLIMPORT |
454 AArch64II::MO_ARM64EC_CALLMANGLE;
455 }
456 if (GV->hasExternalLinkage()) {
457 // If we're calling a symbol directly, use the mangled form in the
458 // call instruction.
459 return AArch64II::MO_ARM64EC_CALLMANGLE;
460 }
461 }
462
463 // Use ClassifyGlobalReference for setting MO_DLLIMPORT/MO_COFFSTUB.
464 return ClassifyGlobalReference(GV, TM);
465 }
466
467 return AArch64II::MO_NO_FLAG;
468 }
469
overrideSchedPolicy(MachineSchedPolicy & Policy,unsigned NumRegionInstrs) const470 void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
471 unsigned NumRegionInstrs) const {
472 // LNT run (at least on Cyclone) showed reasonably significant gains for
473 // bi-directional scheduling. 253.perlbmk.
474 Policy.OnlyTopDown = false;
475 Policy.OnlyBottomUp = false;
476 // Enabling or Disabling the latency heuristic is a close call: It seems to
477 // help nearly no benchmark on out-of-order architectures, on the other hand
478 // it regresses register pressure on a few benchmarking.
479 Policy.DisableLatencyHeuristic = DisableLatencySchedHeuristic;
480 }
481
adjustSchedDependency(SUnit * Def,int DefOpIdx,SUnit * Use,int UseOpIdx,SDep & Dep,const TargetSchedModel * SchedModel) const482 void AArch64Subtarget::adjustSchedDependency(
483 SUnit *Def, int DefOpIdx, SUnit *Use, int UseOpIdx, SDep &Dep,
484 const TargetSchedModel *SchedModel) const {
485 if (!SchedModel || Dep.getKind() != SDep::Kind::Data || !Dep.getReg() ||
486 !Def->isInstr() || !Use->isInstr() ||
487 (Def->getInstr()->getOpcode() != TargetOpcode::BUNDLE &&
488 Use->getInstr()->getOpcode() != TargetOpcode::BUNDLE))
489 return;
490
491 // If the Def is a BUNDLE, find the last instruction in the bundle that defs
492 // the register.
493 const MachineInstr *DefMI = Def->getInstr();
494 if (DefMI->getOpcode() == TargetOpcode::BUNDLE) {
495 Register Reg = DefMI->getOperand(DefOpIdx).getReg();
496 for (const auto &Op : const_mi_bundle_ops(*DefMI)) {
497 if (Op.isReg() && Op.isDef() && Op.getReg() == Reg) {
498 DefMI = Op.getParent();
499 DefOpIdx = Op.getOperandNo();
500 }
501 }
502 }
503
504 // If the Use is a BUNDLE, find the first instruction that uses the Reg.
505 const MachineInstr *UseMI = Use->getInstr();
506 if (UseMI->getOpcode() == TargetOpcode::BUNDLE) {
507 Register Reg = UseMI->getOperand(UseOpIdx).getReg();
508 for (const auto &Op : const_mi_bundle_ops(*UseMI)) {
509 if (Op.isReg() && Op.isUse() && Op.getReg() == Reg) {
510 UseMI = Op.getParent();
511 UseOpIdx = Op.getOperandNo();
512 break;
513 }
514 }
515 }
516
517 Dep.setLatency(
518 SchedModel->computeOperandLatency(DefMI, DefOpIdx, UseMI, UseOpIdx));
519 }
520
enableEarlyIfConversion() const521 bool AArch64Subtarget::enableEarlyIfConversion() const {
522 return EnableEarlyIfConvert;
523 }
524
supportsAddressTopByteIgnored() const525 bool AArch64Subtarget::supportsAddressTopByteIgnored() const {
526 if (!UseAddressTopByteIgnored)
527 return false;
528
529 if (TargetTriple.isDriverKit())
530 return true;
531 if (TargetTriple.isiOS()) {
532 return TargetTriple.getiOSVersion() >= VersionTuple(8);
533 }
534
535 return false;
536 }
537
538 std::unique_ptr<PBQPRAConstraint>
getCustomPBQPConstraints() const539 AArch64Subtarget::getCustomPBQPConstraints() const {
540 return balanceFPOps() ? std::make_unique<A57ChainingConstraint>() : nullptr;
541 }
542
mirFileLoaded(MachineFunction & MF) const543 void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
544 // We usually compute max call frame size after ISel. Do the computation now
545 // if the .mir file didn't specify it. Note that this will probably give you
546 // bogus values after PEI has eliminated the callframe setup/destroy pseudo
547 // instructions, specify explicitly if you need it to be correct.
548 MachineFrameInfo &MFI = MF.getFrameInfo();
549 if (!MFI.isMaxCallFrameSizeComputed())
550 MFI.computeMaxCallFrameSize(MF);
551 }
552
useAA() const553 bool AArch64Subtarget::useAA() const { return UseAA; }
554
555 // If return address signing is enabled, tail calls are emitted as follows:
556 //
557 // ```
558 // <authenticate LR>
559 // <check LR>
560 // TCRETURN ; the callee may sign and spill the LR in its prologue
561 // ```
562 //
563 // LR may require explicit checking because if FEAT_FPAC is not implemented
564 // and LR was tampered with, then `<authenticate LR>` will not generate an
565 // exception on its own. Later, if the callee spills the signed LR value and
566 // neither FEAT_PAuth2 nor FEAT_EPAC are implemented, the valid PAC replaces
567 // the higher bits of LR thus hiding the authentication failure.
getAuthenticatedLRCheckMethod(const MachineFunction & MF) const568 AArch64PAuth::AuthCheckMethod AArch64Subtarget::getAuthenticatedLRCheckMethod(
569 const MachineFunction &MF) const {
570 // TODO: Check subtarget for the scheme. Present variant is a default for
571 // pauthtest ABI.
572 if (MF.getFunction().hasFnAttribute("ptrauth-returns") &&
573 MF.getFunction().hasFnAttribute("ptrauth-auth-traps"))
574 return AArch64PAuth::AuthCheckMethod::HighBitsNoTBI;
575 if (AuthenticatedLRCheckMethod.getNumOccurrences())
576 return AuthenticatedLRCheckMethod;
577
578 // At now, use None by default because checks may introduce an unexpected
579 // performance regression or incompatibility with execute-only mappings.
580 return AArch64PAuth::AuthCheckMethod::None;
581 }
582
583 std::optional<uint16_t>
getPtrAuthBlockAddressDiscriminatorIfEnabled(const Function & ParentFn) const584 AArch64Subtarget::getPtrAuthBlockAddressDiscriminatorIfEnabled(
585 const Function &ParentFn) const {
586 if (!ParentFn.hasFnAttribute("ptrauth-indirect-gotos"))
587 return std::nullopt;
588 // We currently have one simple mechanism for all targets.
589 // This isn't ABI, so we can always do better in the future.
590 return getPointerAuthStableSipHash(
591 (Twine(ParentFn.getName()) + " blockaddress").str());
592 }
593
enableMachinePipeliner() const594 bool AArch64Subtarget::enableMachinePipeliner() const {
595 return getSchedModel().hasInstrSchedModel();
596 }
597