xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares AArch64-specific per-machine-function information.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15 
16 #include "AArch64Subtarget.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MIRYamlMapping.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/MC/MCLinkerOptimizationHint.h"
26 #include "llvm/MC/MCSymbol.h"
27 #include <cassert>
28 #include <optional>
29 
30 namespace llvm {
31 
32 namespace yaml {
33 struct AArch64FunctionInfo;
34 } // end namespace yaml
35 
36 class AArch64Subtarget;
37 class MachineInstr;
38 
39 struct TPIDR2Object {
40   int FrameIndex = std::numeric_limits<int>::max();
41   unsigned Uses = 0;
42 };
43 
44 /// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
45 /// contains private AArch64-specific information for each MachineFunction.
46 class AArch64FunctionInfo final : public MachineFunctionInfo {
47   /// Number of bytes of arguments this function has on the stack. If the callee
48   /// is expected to restore the argument stack this should be a multiple of 16,
49   /// all usable during a tail call.
50   ///
51   /// The alternative would forbid tail call optimisation in some cases: if we
52   /// want to transfer control from a function with 8-bytes of stack-argument
53   /// space to a function with 16-bytes then misalignment of this value would
54   /// make a stack adjustment necessary, which could not be undone by the
55   /// callee.
56   unsigned BytesInStackArgArea = 0;
57 
58   /// The number of bytes to restore to deallocate space for incoming
59   /// arguments. Canonically 0 in the C calling convention, but non-zero when
60   /// callee is expected to pop the args.
61   unsigned ArgumentStackToRestore = 0;
62 
63   /// Space just below incoming stack pointer reserved for arguments being
64   /// passed on the stack during a tail call. This will be the difference
65   /// between the largest tail call argument space needed in this function and
66   /// what's already available by reusing space of incoming arguments.
67   unsigned TailCallReservedStack = 0;
68 
69   /// HasStackFrame - True if this function has a stack frame. Set by
70   /// determineCalleeSaves().
71   bool HasStackFrame = false;
72 
73   /// Amount of stack frame size, not including callee-saved registers.
74   uint64_t LocalStackSize = 0;
75 
76   /// The start and end frame indices for the SVE callee saves.
77   int MinSVECSFrameIndex = 0;
78   int MaxSVECSFrameIndex = 0;
79 
80   /// Amount of stack frame size used for saving callee-saved registers.
81   unsigned CalleeSavedStackSize = 0;
82   unsigned SVECalleeSavedStackSize = 0;
83   bool HasCalleeSavedStackSize = false;
84 
85   /// Number of TLS accesses using the special (combinable)
86   /// _TLS_MODULE_BASE_ symbol.
87   unsigned NumLocalDynamicTLSAccesses = 0;
88 
89   /// FrameIndex for start of varargs area for arguments passed on the
90   /// stack.
91   int VarArgsStackIndex = 0;
92 
93   /// Offset of start of varargs area for arguments passed on the stack.
94   unsigned VarArgsStackOffset = 0;
95 
96   /// FrameIndex for start of varargs area for arguments passed in
97   /// general purpose registers.
98   int VarArgsGPRIndex = 0;
99 
100   /// Size of the varargs area for arguments passed in general purpose
101   /// registers.
102   unsigned VarArgsGPRSize = 0;
103 
104   /// FrameIndex for start of varargs area for arguments passed in
105   /// floating-point registers.
106   int VarArgsFPRIndex = 0;
107 
108   /// Size of the varargs area for arguments passed in floating-point
109   /// registers.
110   unsigned VarArgsFPRSize = 0;
111 
112   /// The stack slots used to add space between FPR and GPR accesses when using
113   /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
114   /// StackHazardSlotIndex is added between (sorted) stack objects.
115   int StackHazardSlotIndex = std::numeric_limits<int>::max();
116   int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
117 
118   /// True if this function has a subset of CSRs that is handled explicitly via
119   /// copies.
120   bool IsSplitCSR = false;
121 
122   /// True when the stack gets realigned dynamically because the size of stack
123   /// frame is unknown at compile time. e.g., in case of VLAs.
124   bool StackRealigned = false;
125 
126   /// True when the callee-save stack area has unused gaps that may be used for
127   /// other stack allocations.
128   bool CalleeSaveStackHasFreeSpace = false;
129 
130   /// SRetReturnReg - sret lowering includes returning the value of the
131   /// returned struct in a register. This field holds the virtual register into
132   /// which the sret argument is passed.
133   Register SRetReturnReg;
134 
135   /// SVE stack size (for predicates and data vectors) are maintained here
136   /// rather than in FrameInfo, as the placement and Stack IDs are target
137   /// specific.
138   uint64_t StackSizeSVE = 0;
139 
140   /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
141   bool HasCalculatedStackSizeSVE = false;
142 
143   /// Has a value when it is known whether or not the function uses a
144   /// redzone, and no value otherwise.
145   /// Initialized during frame lowering, unless the function has the noredzone
146   /// attribute, in which case it is set to false at construction.
147   std::optional<bool> HasRedZone;
148 
149   /// ForwardedMustTailRegParms - A list of virtual and physical registers
150   /// that must be forwarded to every musttail call.
151   SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
152 
153   /// FrameIndex for the tagged base pointer.
154   std::optional<int> TaggedBasePointerIndex;
155 
156   /// Offset from SP-at-entry to the tagged base pointer.
157   /// Tagged base pointer is set up to point to the first (lowest address)
158   /// tagged stack slot.
159   unsigned TaggedBasePointerOffset;
160 
161   /// OutliningStyle denotes, if a function was outined, how it was outlined,
162   /// e.g. Tail Call, Thunk, or Function if none apply.
163   std::optional<std::string> OutliningStyle;
164 
165   // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
166   // CalleeSavedStackSize) to the address of the frame record.
167   int CalleeSaveBaseToFrameRecordOffset = 0;
168 
169   /// SignReturnAddress is true if PAC-RET is enabled for the function with
170   /// defaults being sign non-leaf functions only, with the B key.
171   bool SignReturnAddress = false;
172 
173   /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
174   /// functions as well.
175   bool SignReturnAddressAll = false;
176 
177   /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
178   bool SignWithBKey = false;
179 
180   /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
181   /// within the prologue, so it can be re-used for authentication in the
182   /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
183   MCSymbol *SignInstrLabel = nullptr;
184 
185   /// BranchTargetEnforcement enables placing BTI instructions at potential
186   /// indirect branch destinations.
187   bool BranchTargetEnforcement = false;
188 
189   /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
190   /// This is set by -mbranch-protection and will emit NOP instructions unless
191   /// the subtarget feature +pauthlr is also used (in which case non-NOP
192   /// instructions are emitted).
193   bool BranchProtectionPAuthLR = false;
194 
195   /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
196   /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
197   /// extended record.
198   bool HasSwiftAsyncContext = false;
199 
200   /// The stack slot where the Swift asynchronous context is stored.
201   int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
202 
203   bool IsMTETagged = false;
204 
205   /// The function has Scalable Vector or Scalable Predicate register argument
206   /// or return type
207   bool IsSVECC = false;
208 
209   /// The frame-index for the TPIDR2 object used for lazy saves.
210   TPIDR2Object TPIDR2;
211 
212   /// Whether this function changes streaming mode within the function.
213   bool HasStreamingModeChanges = false;
214 
215   /// True if the function need unwind information.
216   mutable std::optional<bool> NeedsDwarfUnwindInfo;
217 
218   /// True if the function need asynchronous unwind information.
219   mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
220 
221   int64_t StackProbeSize = 0;
222 
223   // Holds a register containing pstate.sm. This is set
224   // on function entry to record the initial pstate of a function.
225   Register PStateSMReg = MCRegister::NoRegister;
226 
227   // Has the PNReg used to build PTRUE instruction.
228   // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
229   unsigned PredicateRegForFillSpill = 0;
230 
231   // The stack slots where VG values are stored to.
232   int64_t VGIdx = std::numeric_limits<int>::max();
233   int64_t StreamingVGIdx = std::numeric_limits<int>::max();
234 
235 public:
236   AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI);
237 
238   MachineFunctionInfo *
239   clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
240         const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
241       const override;
242 
setPredicateRegForFillSpill(unsigned Reg)243   void setPredicateRegForFillSpill(unsigned Reg) {
244     PredicateRegForFillSpill = Reg;
245   }
getPredicateRegForFillSpill()246   unsigned getPredicateRegForFillSpill() const {
247     return PredicateRegForFillSpill;
248   }
249 
getPStateSMReg()250   Register getPStateSMReg() const { return PStateSMReg; };
setPStateSMReg(Register Reg)251   void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
252 
getVGIdx()253   int64_t getVGIdx() const { return VGIdx; };
setVGIdx(unsigned Idx)254   void setVGIdx(unsigned Idx) { VGIdx = Idx; };
255 
getStreamingVGIdx()256   int64_t getStreamingVGIdx() const { return StreamingVGIdx; };
setStreamingVGIdx(unsigned FrameIdx)257   void setStreamingVGIdx(unsigned FrameIdx) { StreamingVGIdx = FrameIdx; };
258 
isSVECC()259   bool isSVECC() const { return IsSVECC; };
setIsSVECC(bool s)260   void setIsSVECC(bool s) { IsSVECC = s; };
261 
getTPIDR2Obj()262   TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
263 
264   void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI);
265 
getBytesInStackArgArea()266   unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
setBytesInStackArgArea(unsigned bytes)267   void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
268 
getArgumentStackToRestore()269   unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
setArgumentStackToRestore(unsigned bytes)270   void setArgumentStackToRestore(unsigned bytes) {
271     ArgumentStackToRestore = bytes;
272   }
273 
getTailCallReservedStack()274   unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
setTailCallReservedStack(unsigned bytes)275   void setTailCallReservedStack(unsigned bytes) {
276     TailCallReservedStack = bytes;
277   }
278 
hasCalculatedStackSizeSVE()279   bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
280 
setStackSizeSVE(uint64_t S)281   void setStackSizeSVE(uint64_t S) {
282     HasCalculatedStackSizeSVE = true;
283     StackSizeSVE = S;
284   }
285 
getStackSizeSVE()286   uint64_t getStackSizeSVE() const { return StackSizeSVE; }
287 
hasStackFrame()288   bool hasStackFrame() const { return HasStackFrame; }
setHasStackFrame(bool s)289   void setHasStackFrame(bool s) { HasStackFrame = s; }
290 
isStackRealigned()291   bool isStackRealigned() const { return StackRealigned; }
setStackRealigned(bool s)292   void setStackRealigned(bool s) { StackRealigned = s; }
293 
hasCalleeSaveStackFreeSpace()294   bool hasCalleeSaveStackFreeSpace() const {
295     return CalleeSaveStackHasFreeSpace;
296   }
setCalleeSaveStackHasFreeSpace(bool s)297   void setCalleeSaveStackHasFreeSpace(bool s) {
298     CalleeSaveStackHasFreeSpace = s;
299   }
isSplitCSR()300   bool isSplitCSR() const { return IsSplitCSR; }
setIsSplitCSR(bool s)301   void setIsSplitCSR(bool s) { IsSplitCSR = s; }
302 
setLocalStackSize(uint64_t Size)303   void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
getLocalStackSize()304   uint64_t getLocalStackSize() const { return LocalStackSize; }
305 
setOutliningStyle(std::string Style)306   void setOutliningStyle(std::string Style) { OutliningStyle = Style; }
getOutliningStyle()307   std::optional<std::string> getOutliningStyle() const {
308     return OutliningStyle;
309   }
310 
setCalleeSavedStackSize(unsigned Size)311   void setCalleeSavedStackSize(unsigned Size) {
312     CalleeSavedStackSize = Size;
313     HasCalleeSavedStackSize = true;
314   }
315 
316   // When CalleeSavedStackSize has not been set (for example when
317   // some MachineIR pass is run in isolation), then recalculate
318   // the CalleeSavedStackSize directly from the CalleeSavedInfo.
319   // Note: This information can only be recalculated after PEI
320   // has assigned offsets to the callee save objects.
getCalleeSavedStackSize(const MachineFrameInfo & MFI)321   unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
322     bool ValidateCalleeSavedStackSize = false;
323 
324 #ifndef NDEBUG
325     // Make sure the calculated size derived from the CalleeSavedInfo
326     // equals the cached size that was calculated elsewhere (e.g. in
327     // determineCalleeSaves).
328     ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
329 #endif
330 
331     if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
332       assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
333       if (MFI.getCalleeSavedInfo().empty())
334         return 0;
335 
336       int64_t MinOffset = std::numeric_limits<int64_t>::max();
337       int64_t MaxOffset = std::numeric_limits<int64_t>::min();
338       for (const auto &Info : MFI.getCalleeSavedInfo()) {
339         int FrameIdx = Info.getFrameIdx();
340         if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
341           continue;
342         int64_t Offset = MFI.getObjectOffset(FrameIdx);
343         int64_t ObjSize = MFI.getObjectSize(FrameIdx);
344         MinOffset = std::min<int64_t>(Offset, MinOffset);
345         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
346       }
347 
348       if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
349         int64_t Offset = MFI.getObjectOffset(getSwiftAsyncContextFrameIdx());
350         int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
351         MinOffset = std::min<int64_t>(Offset, MinOffset);
352         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
353       }
354 
355       if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
356         int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex);
357         int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex);
358         MinOffset = std::min<int64_t>(Offset, MinOffset);
359         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
360       }
361 
362       unsigned Size = alignTo(MaxOffset - MinOffset, 16);
363       assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
364              "Invalid size calculated for callee saves");
365       return Size;
366     }
367 
368     return getCalleeSavedStackSize();
369   }
370 
getCalleeSavedStackSize()371   unsigned getCalleeSavedStackSize() const {
372     assert(HasCalleeSavedStackSize &&
373            "CalleeSavedStackSize has not been calculated");
374     return CalleeSavedStackSize;
375   }
376 
377   // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
setSVECalleeSavedStackSize(unsigned Size)378   void setSVECalleeSavedStackSize(unsigned Size) {
379     SVECalleeSavedStackSize = Size;
380   }
getSVECalleeSavedStackSize()381   unsigned getSVECalleeSavedStackSize() const {
382     return SVECalleeSavedStackSize;
383   }
384 
setMinMaxSVECSFrameIndex(int Min,int Max)385   void setMinMaxSVECSFrameIndex(int Min, int Max) {
386     MinSVECSFrameIndex = Min;
387     MaxSVECSFrameIndex = Max;
388   }
389 
getMinSVECSFrameIndex()390   int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
getMaxSVECSFrameIndex()391   int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
392 
incNumLocalDynamicTLSAccesses()393   void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
getNumLocalDynamicTLSAccesses()394   unsigned getNumLocalDynamicTLSAccesses() const {
395     return NumLocalDynamicTLSAccesses;
396   }
397 
hasRedZone()398   std::optional<bool> hasRedZone() const { return HasRedZone; }
setHasRedZone(bool s)399   void setHasRedZone(bool s) { HasRedZone = s; }
400 
getVarArgsStackIndex()401   int getVarArgsStackIndex() const { return VarArgsStackIndex; }
setVarArgsStackIndex(int Index)402   void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
403 
getVarArgsStackOffset()404   unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
setVarArgsStackOffset(unsigned Offset)405   void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
406 
getVarArgsGPRIndex()407   int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
setVarArgsGPRIndex(int Index)408   void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
409 
getVarArgsGPRSize()410   unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
setVarArgsGPRSize(unsigned Size)411   void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
412 
getVarArgsFPRIndex()413   int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
setVarArgsFPRIndex(int Index)414   void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
415 
getVarArgsFPRSize()416   unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
setVarArgsFPRSize(unsigned Size)417   void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
418 
hasStackHazardSlotIndex()419   bool hasStackHazardSlotIndex() const {
420     return StackHazardSlotIndex != std::numeric_limits<int>::max();
421   }
getStackHazardSlotIndex()422   int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
setStackHazardSlotIndex(int Index)423   void setStackHazardSlotIndex(int Index) {
424     assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
425     StackHazardSlotIndex = Index;
426   }
getStackHazardCSRSlotIndex()427   int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
setStackHazardCSRSlotIndex(int Index)428   void setStackHazardCSRSlotIndex(int Index) {
429     assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
430     StackHazardCSRSlotIndex = Index;
431   }
432 
getSRetReturnReg()433   unsigned getSRetReturnReg() const { return SRetReturnReg; }
setSRetReturnReg(unsigned Reg)434   void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
435 
getJumpTableEntrySize(int Idx)436   unsigned getJumpTableEntrySize(int Idx) const {
437     return JumpTableEntryInfo[Idx].first;
438   }
getJumpTableEntryPCRelSymbol(int Idx)439   MCSymbol *getJumpTableEntryPCRelSymbol(int Idx) const {
440     return JumpTableEntryInfo[Idx].second;
441   }
setJumpTableEntryInfo(int Idx,unsigned Size,MCSymbol * PCRelSym)442   void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
443     if ((unsigned)Idx >= JumpTableEntryInfo.size())
444       JumpTableEntryInfo.resize(Idx+1);
445     JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
446   }
447 
448   using SetOfInstructions = SmallPtrSet<const MachineInstr *, 16>;
449 
getLOHRelated()450   const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
451 
452   // Shortcuts for LOH related types.
453   class MILOHDirective {
454     MCLOHType Kind;
455 
456     /// Arguments of this directive. Order matters.
457     SmallVector<const MachineInstr *, 3> Args;
458 
459   public:
460     using LOHArgs = ArrayRef<const MachineInstr *>;
461 
MILOHDirective(MCLOHType Kind,LOHArgs Args)462     MILOHDirective(MCLOHType Kind, LOHArgs Args)
463         : Kind(Kind), Args(Args.begin(), Args.end()) {
464       assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
465     }
466 
getKind()467     MCLOHType getKind() const { return Kind; }
getArgs()468     LOHArgs getArgs() const { return Args; }
469   };
470 
471   using MILOHArgs = MILOHDirective::LOHArgs;
472   using MILOHContainer = SmallVector<MILOHDirective, 32>;
473 
getLOHContainer()474   const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
475 
476   /// Add a LOH directive of this @p Kind and this @p Args.
addLOHDirective(MCLOHType Kind,MILOHArgs Args)477   void addLOHDirective(MCLOHType Kind, MILOHArgs Args) {
478     LOHContainerSet.push_back(MILOHDirective(Kind, Args));
479     LOHRelated.insert(Args.begin(), Args.end());
480   }
481 
getForwardedMustTailRegParms()482   SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
483     return ForwardedMustTailRegParms;
484   }
485 
getTaggedBasePointerIndex()486   std::optional<int> getTaggedBasePointerIndex() const {
487     return TaggedBasePointerIndex;
488   }
setTaggedBasePointerIndex(int Index)489   void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
490 
getTaggedBasePointerOffset()491   unsigned getTaggedBasePointerOffset() const {
492     return TaggedBasePointerOffset;
493   }
setTaggedBasePointerOffset(unsigned Offset)494   void setTaggedBasePointerOffset(unsigned Offset) {
495     TaggedBasePointerOffset = Offset;
496   }
497 
getCalleeSaveBaseToFrameRecordOffset()498   int getCalleeSaveBaseToFrameRecordOffset() const {
499     return CalleeSaveBaseToFrameRecordOffset;
500   }
setCalleeSaveBaseToFrameRecordOffset(int Offset)501   void setCalleeSaveBaseToFrameRecordOffset(int Offset) {
502     CalleeSaveBaseToFrameRecordOffset = Offset;
503   }
504 
505   bool shouldSignReturnAddress(const MachineFunction &MF) const;
506   bool shouldSignReturnAddress(bool SpillsLR) const;
507 
508   bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const;
509 
shouldSignWithBKey()510   bool shouldSignWithBKey() const { return SignWithBKey; }
511 
getSigningInstrLabel()512   MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
setSigningInstrLabel(MCSymbol * Label)513   void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
514 
isMTETagged()515   bool isMTETagged() const { return IsMTETagged; }
516 
branchTargetEnforcement()517   bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
518 
branchProtectionPAuthLR()519   bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
520 
setHasSwiftAsyncContext(bool HasContext)521   void setHasSwiftAsyncContext(bool HasContext) {
522     HasSwiftAsyncContext = HasContext;
523   }
hasSwiftAsyncContext()524   bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
525 
setSwiftAsyncContextFrameIdx(int FI)526   void setSwiftAsyncContextFrameIdx(int FI) {
527     SwiftAsyncContextFrameIdx = FI;
528   }
getSwiftAsyncContextFrameIdx()529   int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
530 
531   bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
532   bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
533 
hasStreamingModeChanges()534   bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
setHasStreamingModeChanges(bool HasChanges)535   void setHasStreamingModeChanges(bool HasChanges) {
536     HasStreamingModeChanges = HasChanges;
537   }
538 
hasStackProbing()539   bool hasStackProbing() const { return StackProbeSize != 0; }
540 
getStackProbeSize()541   int64_t getStackProbeSize() const { return StackProbeSize; }
542 
543 private:
544   // Hold the lists of LOHs.
545   MILOHContainer LOHContainerSet;
546   SetOfInstructions LOHRelated;
547 
548   SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
549 };
550 
551 namespace yaml {
552 struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
553   std::optional<bool> HasRedZone;
554 
555   AArch64FunctionInfo() = default;
556   AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI);
557 
558   void mappingImpl(yaml::IO &YamlIO) override;
559   ~AArch64FunctionInfo() = default;
560 };
561 
562 template <> struct MappingTraits<AArch64FunctionInfo> {
563   static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
564     YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
565   }
566 };
567 
568 } // end namespace yaml
569 
570 } // end namespace llvm
571 
572 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
573