xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares AArch64-specific per-machine-function information.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15 
16 #include "AArch64Subtarget.h"
17 #include "Utils/AArch64SMEAttributes.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MIRYamlMapping.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/MC/MCLinkerOptimizationHint.h"
27 #include "llvm/MC/MCSymbol.h"
28 #include <cassert>
29 #include <optional>
30 
31 namespace llvm {
32 
33 namespace yaml {
34 struct AArch64FunctionInfo;
35 } // end namespace yaml
36 
37 class AArch64Subtarget;
38 class MachineInstr;
39 
40 struct TPIDR2Object {
41   int FrameIndex = std::numeric_limits<int>::max();
42   unsigned Uses = 0;
43 };
44 
45 /// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
46 /// contains private AArch64-specific information for each MachineFunction.
47 class AArch64FunctionInfo final : public MachineFunctionInfo {
48   /// Number of bytes of arguments this function has on the stack. If the callee
49   /// is expected to restore the argument stack this should be a multiple of 16,
50   /// all usable during a tail call.
51   ///
52   /// The alternative would forbid tail call optimisation in some cases: if we
53   /// want to transfer control from a function with 8-bytes of stack-argument
54   /// space to a function with 16-bytes then misalignment of this value would
55   /// make a stack adjustment necessary, which could not be undone by the
56   /// callee.
57   unsigned BytesInStackArgArea = 0;
58 
59   /// The number of bytes to restore to deallocate space for incoming
60   /// arguments. Canonically 0 in the C calling convention, but non-zero when
61   /// callee is expected to pop the args.
62   unsigned ArgumentStackToRestore = 0;
63 
64   /// Space just below incoming stack pointer reserved for arguments being
65   /// passed on the stack during a tail call. This will be the difference
66   /// between the largest tail call argument space needed in this function and
67   /// what's already available by reusing space of incoming arguments.
68   unsigned TailCallReservedStack = 0;
69 
70   /// HasStackFrame - True if this function has a stack frame. Set by
71   /// determineCalleeSaves().
72   bool HasStackFrame = false;
73 
74   /// Amount of stack frame size, not including callee-saved registers.
75   uint64_t LocalStackSize = 0;
76 
77   /// The start and end frame indices for the SVE callee saves.
78   int MinSVECSFrameIndex = 0;
79   int MaxSVECSFrameIndex = 0;
80 
81   /// Amount of stack frame size used for saving callee-saved registers.
82   unsigned CalleeSavedStackSize = 0;
83   unsigned SVECalleeSavedStackSize = 0;
84   bool HasCalleeSavedStackSize = false;
85   bool HasSVECalleeSavedStackSize = false;
86 
87   /// Number of TLS accesses using the special (combinable)
88   /// _TLS_MODULE_BASE_ symbol.
89   unsigned NumLocalDynamicTLSAccesses = 0;
90 
91   /// FrameIndex for start of varargs area for arguments passed on the
92   /// stack.
93   int VarArgsStackIndex = 0;
94 
95   /// Offset of start of varargs area for arguments passed on the stack.
96   unsigned VarArgsStackOffset = 0;
97 
98   /// FrameIndex for start of varargs area for arguments passed in
99   /// general purpose registers.
100   int VarArgsGPRIndex = 0;
101 
102   /// Size of the varargs area for arguments passed in general purpose
103   /// registers.
104   unsigned VarArgsGPRSize = 0;
105 
106   /// FrameIndex for start of varargs area for arguments passed in
107   /// floating-point registers.
108   int VarArgsFPRIndex = 0;
109 
110   /// Size of the varargs area for arguments passed in floating-point
111   /// registers.
112   unsigned VarArgsFPRSize = 0;
113 
114   /// The stack slots used to add space between FPR and GPR accesses when using
115   /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
116   /// StackHazardSlotIndex is added between (sorted) stack objects.
117   int StackHazardSlotIndex = std::numeric_limits<int>::max();
118   int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
119 
120   /// True if this function has a subset of CSRs that is handled explicitly via
121   /// copies.
122   bool IsSplitCSR = false;
123 
124   /// True when the stack gets realigned dynamically because the size of stack
125   /// frame is unknown at compile time. e.g., in case of VLAs.
126   bool StackRealigned = false;
127 
128   /// True when the callee-save stack area has unused gaps that may be used for
129   /// other stack allocations.
130   bool CalleeSaveStackHasFreeSpace = false;
131 
132   /// SRetReturnReg - sret lowering includes returning the value of the
133   /// returned struct in a register. This field holds the virtual register into
134   /// which the sret argument is passed.
135   Register SRetReturnReg;
136 
137   /// SVE stack size (for predicates and data vectors) are maintained here
138   /// rather than in FrameInfo, as the placement and Stack IDs are target
139   /// specific.
140   uint64_t StackSizeSVE = 0;
141 
142   /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
143   bool HasCalculatedStackSizeSVE = false;
144 
145   /// Has a value when it is known whether or not the function uses a
146   /// redzone, and no value otherwise.
147   /// Initialized during frame lowering, unless the function has the noredzone
148   /// attribute, in which case it is set to false at construction.
149   std::optional<bool> HasRedZone;
150 
151   /// ForwardedMustTailRegParms - A list of virtual and physical registers
152   /// that must be forwarded to every musttail call.
153   SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
154 
155   /// FrameIndex for the tagged base pointer.
156   std::optional<int> TaggedBasePointerIndex;
157 
158   /// Offset from SP-at-entry to the tagged base pointer.
159   /// Tagged base pointer is set up to point to the first (lowest address)
160   /// tagged stack slot.
161   unsigned TaggedBasePointerOffset;
162 
163   /// OutliningStyle denotes, if a function was outined, how it was outlined,
164   /// e.g. Tail Call, Thunk, or Function if none apply.
165   std::optional<std::string> OutliningStyle;
166 
167   // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
168   // CalleeSavedStackSize) to the address of the frame record.
169   int CalleeSaveBaseToFrameRecordOffset = 0;
170 
171   /// SignReturnAddress is true if PAC-RET is enabled for the function with
172   /// defaults being sign non-leaf functions only, with the B key.
173   bool SignReturnAddress = false;
174 
175   /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
176   /// functions as well.
177   bool SignReturnAddressAll = false;
178 
179   /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
180   bool SignWithBKey = false;
181 
182   /// HasELFSignedGOT is true if the target binary format is ELF and the IR
183   /// module containing the corresponding function has "ptrauth-elf-got" flag
184   /// set to 1.
185   bool HasELFSignedGOT = false;
186 
187   /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
188   /// within the prologue, so it can be re-used for authentication in the
189   /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
190   MCSymbol *SignInstrLabel = nullptr;
191 
192   /// BranchTargetEnforcement enables placing BTI instructions at potential
193   /// indirect branch destinations.
194   bool BranchTargetEnforcement = false;
195 
196   /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
197   /// This is set by -mbranch-protection and will emit NOP instructions unless
198   /// the subtarget feature +pauthlr is also used (in which case non-NOP
199   /// instructions are emitted).
200   bool BranchProtectionPAuthLR = false;
201 
202   /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
203   /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
204   /// extended record.
205   bool HasSwiftAsyncContext = false;
206 
207   /// The stack slot where the Swift asynchronous context is stored.
208   int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
209 
210   bool IsMTETagged = false;
211 
212   /// The function has Scalable Vector or Scalable Predicate register argument
213   /// or return type
214   bool IsSVECC = false;
215 
216   /// The frame-index for the TPIDR2 object used for lazy saves.
217   TPIDR2Object TPIDR2;
218 
219   /// Whether this function changes streaming mode within the function.
220   bool HasStreamingModeChanges = false;
221 
222   /// True if the function need unwind information.
223   mutable std::optional<bool> NeedsDwarfUnwindInfo;
224 
225   /// True if the function need asynchronous unwind information.
226   mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
227 
228   int64_t StackProbeSize = 0;
229 
230   // Holds a register containing pstate.sm. This is set
231   // on function entry to record the initial pstate of a function.
232   Register PStateSMReg = MCRegister::NoRegister;
233 
234   // Holds a pointer to a buffer that is large enough to represent
235   // all SME ZA state and any additional state required by the
236   // __arm_sme_save/restore support routines.
237   Register SMESaveBufferAddr = MCRegister::NoRegister;
238 
239   // true if SMESaveBufferAddr is used.
240   bool SMESaveBufferUsed = false;
241 
242   // Has the PNReg used to build PTRUE instruction.
243   // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
244   unsigned PredicateRegForFillSpill = 0;
245 
246   // The stack slots where VG values are stored to.
247   int64_t VGIdx = std::numeric_limits<int>::max();
248   int64_t StreamingVGIdx = std::numeric_limits<int>::max();
249 
250   // Holds the SME function attributes (streaming mode, ZA/ZT0 state).
251   SMEAttrs SMEFnAttrs;
252 
253 public:
254   AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI);
255 
256   MachineFunctionInfo *
257   clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
258         const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
259       const override;
260 
setPredicateRegForFillSpill(unsigned Reg)261   void setPredicateRegForFillSpill(unsigned Reg) {
262     PredicateRegForFillSpill = Reg;
263   }
getPredicateRegForFillSpill()264   unsigned getPredicateRegForFillSpill() const {
265     return PredicateRegForFillSpill;
266   }
267 
getSMESaveBufferAddr()268   Register getSMESaveBufferAddr() const { return SMESaveBufferAddr; };
setSMESaveBufferAddr(Register Reg)269   void setSMESaveBufferAddr(Register Reg) { SMESaveBufferAddr = Reg; };
270 
isSMESaveBufferUsed()271   unsigned isSMESaveBufferUsed() const { return SMESaveBufferUsed; };
272   void setSMESaveBufferUsed(bool Used = true) { SMESaveBufferUsed = Used; };
273 
getPStateSMReg()274   Register getPStateSMReg() const { return PStateSMReg; };
setPStateSMReg(Register Reg)275   void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
276 
getVGIdx()277   int64_t getVGIdx() const { return VGIdx; };
setVGIdx(unsigned Idx)278   void setVGIdx(unsigned Idx) { VGIdx = Idx; };
279 
getStreamingVGIdx()280   int64_t getStreamingVGIdx() const { return StreamingVGIdx; };
setStreamingVGIdx(unsigned FrameIdx)281   void setStreamingVGIdx(unsigned FrameIdx) { StreamingVGIdx = FrameIdx; };
282 
isSVECC()283   bool isSVECC() const { return IsSVECC; };
setIsSVECC(bool s)284   void setIsSVECC(bool s) { IsSVECC = s; };
285 
getTPIDR2Obj()286   TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
287 
288   void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI);
289 
getBytesInStackArgArea()290   unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
setBytesInStackArgArea(unsigned bytes)291   void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
292 
getArgumentStackToRestore()293   unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
setArgumentStackToRestore(unsigned bytes)294   void setArgumentStackToRestore(unsigned bytes) {
295     ArgumentStackToRestore = bytes;
296   }
297 
getTailCallReservedStack()298   unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
setTailCallReservedStack(unsigned bytes)299   void setTailCallReservedStack(unsigned bytes) {
300     TailCallReservedStack = bytes;
301   }
302 
hasCalculatedStackSizeSVE()303   bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
304 
setStackSizeSVE(uint64_t S)305   void setStackSizeSVE(uint64_t S) {
306     HasCalculatedStackSizeSVE = true;
307     StackSizeSVE = S;
308   }
309 
getStackSizeSVE()310   uint64_t getStackSizeSVE() const {
311     assert(hasCalculatedStackSizeSVE());
312     return StackSizeSVE;
313   }
314 
hasStackFrame()315   bool hasStackFrame() const { return HasStackFrame; }
setHasStackFrame(bool s)316   void setHasStackFrame(bool s) { HasStackFrame = s; }
317 
isStackRealigned()318   bool isStackRealigned() const { return StackRealigned; }
setStackRealigned(bool s)319   void setStackRealigned(bool s) { StackRealigned = s; }
320 
hasCalleeSaveStackFreeSpace()321   bool hasCalleeSaveStackFreeSpace() const {
322     return CalleeSaveStackHasFreeSpace;
323   }
setCalleeSaveStackHasFreeSpace(bool s)324   void setCalleeSaveStackHasFreeSpace(bool s) {
325     CalleeSaveStackHasFreeSpace = s;
326   }
isSplitCSR()327   bool isSplitCSR() const { return IsSplitCSR; }
setIsSplitCSR(bool s)328   void setIsSplitCSR(bool s) { IsSplitCSR = s; }
329 
setLocalStackSize(uint64_t Size)330   void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
getLocalStackSize()331   uint64_t getLocalStackSize() const { return LocalStackSize; }
332 
setOutliningStyle(const std::string & Style)333   void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; }
getOutliningStyle()334   std::optional<std::string> getOutliningStyle() const {
335     return OutliningStyle;
336   }
337 
setCalleeSavedStackSize(unsigned Size)338   void setCalleeSavedStackSize(unsigned Size) {
339     CalleeSavedStackSize = Size;
340     HasCalleeSavedStackSize = true;
341   }
342 
343   // When CalleeSavedStackSize has not been set (for example when
344   // some MachineIR pass is run in isolation), then recalculate
345   // the CalleeSavedStackSize directly from the CalleeSavedInfo.
346   // Note: This information can only be recalculated after PEI
347   // has assigned offsets to the callee save objects.
getCalleeSavedStackSize(const MachineFrameInfo & MFI)348   unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
349     bool ValidateCalleeSavedStackSize = false;
350 
351 #ifndef NDEBUG
352     // Make sure the calculated size derived from the CalleeSavedInfo
353     // equals the cached size that was calculated elsewhere (e.g. in
354     // determineCalleeSaves).
355     ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
356 #endif
357 
358     if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
359       assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
360       if (MFI.getCalleeSavedInfo().empty())
361         return 0;
362 
363       int64_t MinOffset = std::numeric_limits<int64_t>::max();
364       int64_t MaxOffset = std::numeric_limits<int64_t>::min();
365       for (const auto &Info : MFI.getCalleeSavedInfo()) {
366         int FrameIdx = Info.getFrameIdx();
367         if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
368           continue;
369         int64_t Offset = MFI.getObjectOffset(FrameIdx);
370         int64_t ObjSize = MFI.getObjectSize(FrameIdx);
371         MinOffset = std::min<int64_t>(Offset, MinOffset);
372         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
373       }
374 
375       if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
376         int64_t Offset = MFI.getObjectOffset(getSwiftAsyncContextFrameIdx());
377         int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
378         MinOffset = std::min<int64_t>(Offset, MinOffset);
379         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
380       }
381 
382       if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
383         int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex);
384         int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex);
385         MinOffset = std::min<int64_t>(Offset, MinOffset);
386         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
387       }
388 
389       unsigned Size = alignTo(MaxOffset - MinOffset, 16);
390       assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
391              "Invalid size calculated for callee saves");
392       return Size;
393     }
394 
395     return getCalleeSavedStackSize();
396   }
397 
getCalleeSavedStackSize()398   unsigned getCalleeSavedStackSize() const {
399     assert(HasCalleeSavedStackSize &&
400            "CalleeSavedStackSize has not been calculated");
401     return CalleeSavedStackSize;
402   }
403 
404   // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
setSVECalleeSavedStackSize(unsigned Size)405   void setSVECalleeSavedStackSize(unsigned Size) {
406     SVECalleeSavedStackSize = Size;
407     HasSVECalleeSavedStackSize = true;
408   }
getSVECalleeSavedStackSize()409   unsigned getSVECalleeSavedStackSize() const {
410     assert(HasSVECalleeSavedStackSize &&
411            "SVECalleeSavedStackSize has not been calculated");
412     return SVECalleeSavedStackSize;
413   }
414 
setMinMaxSVECSFrameIndex(int Min,int Max)415   void setMinMaxSVECSFrameIndex(int Min, int Max) {
416     MinSVECSFrameIndex = Min;
417     MaxSVECSFrameIndex = Max;
418   }
419 
getMinSVECSFrameIndex()420   int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
getMaxSVECSFrameIndex()421   int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
422 
incNumLocalDynamicTLSAccesses()423   void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
getNumLocalDynamicTLSAccesses()424   unsigned getNumLocalDynamicTLSAccesses() const {
425     return NumLocalDynamicTLSAccesses;
426   }
427 
hasRedZone()428   std::optional<bool> hasRedZone() const { return HasRedZone; }
setHasRedZone(bool s)429   void setHasRedZone(bool s) { HasRedZone = s; }
430 
getVarArgsStackIndex()431   int getVarArgsStackIndex() const { return VarArgsStackIndex; }
setVarArgsStackIndex(int Index)432   void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
433 
getVarArgsStackOffset()434   unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
setVarArgsStackOffset(unsigned Offset)435   void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
436 
getVarArgsGPRIndex()437   int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
setVarArgsGPRIndex(int Index)438   void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
439 
getVarArgsGPRSize()440   unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
setVarArgsGPRSize(unsigned Size)441   void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
442 
getVarArgsFPRIndex()443   int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
setVarArgsFPRIndex(int Index)444   void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
445 
getVarArgsFPRSize()446   unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
setVarArgsFPRSize(unsigned Size)447   void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
448 
hasStackHazardSlotIndex()449   bool hasStackHazardSlotIndex() const {
450     return StackHazardSlotIndex != std::numeric_limits<int>::max();
451   }
getStackHazardSlotIndex()452   int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
setStackHazardSlotIndex(int Index)453   void setStackHazardSlotIndex(int Index) {
454     assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
455     StackHazardSlotIndex = Index;
456   }
getStackHazardCSRSlotIndex()457   int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
setStackHazardCSRSlotIndex(int Index)458   void setStackHazardCSRSlotIndex(int Index) {
459     assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
460     StackHazardCSRSlotIndex = Index;
461   }
462 
getSMEFnAttrs()463   SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; }
464 
getSRetReturnReg()465   unsigned getSRetReturnReg() const { return SRetReturnReg; }
setSRetReturnReg(unsigned Reg)466   void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
467 
getJumpTableEntrySize(int Idx)468   unsigned getJumpTableEntrySize(int Idx) const {
469     return JumpTableEntryInfo[Idx].first;
470   }
getJumpTableEntryPCRelSymbol(int Idx)471   MCSymbol *getJumpTableEntryPCRelSymbol(int Idx) const {
472     return JumpTableEntryInfo[Idx].second;
473   }
setJumpTableEntryInfo(int Idx,unsigned Size,MCSymbol * PCRelSym)474   void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
475     if ((unsigned)Idx >= JumpTableEntryInfo.size())
476       JumpTableEntryInfo.resize(Idx+1);
477     JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
478   }
479 
480   using SetOfInstructions = SmallPtrSet<const MachineInstr *, 16>;
481 
getLOHRelated()482   const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
483 
484   // Shortcuts for LOH related types.
485   class MILOHDirective {
486     MCLOHType Kind;
487 
488     /// Arguments of this directive. Order matters.
489     SmallVector<const MachineInstr *, 3> Args;
490 
491   public:
492     using LOHArgs = ArrayRef<const MachineInstr *>;
493 
MILOHDirective(MCLOHType Kind,LOHArgs Args)494     MILOHDirective(MCLOHType Kind, LOHArgs Args)
495         : Kind(Kind), Args(Args.begin(), Args.end()) {
496       assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
497     }
498 
getKind()499     MCLOHType getKind() const { return Kind; }
getArgs()500     LOHArgs getArgs() const { return Args; }
501   };
502 
503   using MILOHArgs = MILOHDirective::LOHArgs;
504   using MILOHContainer = SmallVector<MILOHDirective, 32>;
505 
getLOHContainer()506   const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
507 
508   /// Add a LOH directive of this @p Kind and this @p Args.
addLOHDirective(MCLOHType Kind,MILOHArgs Args)509   void addLOHDirective(MCLOHType Kind, MILOHArgs Args) {
510     LOHContainerSet.push_back(MILOHDirective(Kind, Args));
511     LOHRelated.insert_range(Args);
512   }
513 
514   size_t
clearLinkerOptimizationHints(const SmallPtrSetImpl<MachineInstr * > & MIs)515   clearLinkerOptimizationHints(const SmallPtrSetImpl<MachineInstr *> &MIs) {
516     size_t InitialSize = LOHContainerSet.size();
517     erase_if(LOHContainerSet, [&](const auto &D) {
518       return any_of(D.getArgs(), [&](auto *Arg) { return MIs.contains(Arg); });
519     });
520     // In theory there could be an LOH with one label in MIs and another label
521     // outside MIs, however we don't know if the label outside MIs is used in
522     // any other LOHs, so we can't remove them from LOHRelated. In that case, we
523     // might produce a few extra labels, but it won't break anything.
524     LOHRelated.remove_if([&](auto *MI) { return MIs.contains(MI); });
525     return InitialSize - LOHContainerSet.size();
526   };
527 
getForwardedMustTailRegParms()528   SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
529     return ForwardedMustTailRegParms;
530   }
531 
getTaggedBasePointerIndex()532   std::optional<int> getTaggedBasePointerIndex() const {
533     return TaggedBasePointerIndex;
534   }
setTaggedBasePointerIndex(int Index)535   void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
536 
getTaggedBasePointerOffset()537   unsigned getTaggedBasePointerOffset() const {
538     return TaggedBasePointerOffset;
539   }
setTaggedBasePointerOffset(unsigned Offset)540   void setTaggedBasePointerOffset(unsigned Offset) {
541     TaggedBasePointerOffset = Offset;
542   }
543 
getCalleeSaveBaseToFrameRecordOffset()544   int getCalleeSaveBaseToFrameRecordOffset() const {
545     return CalleeSaveBaseToFrameRecordOffset;
546   }
setCalleeSaveBaseToFrameRecordOffset(int Offset)547   void setCalleeSaveBaseToFrameRecordOffset(int Offset) {
548     CalleeSaveBaseToFrameRecordOffset = Offset;
549   }
550 
551   bool shouldSignReturnAddress(const MachineFunction &MF) const;
552   bool shouldSignReturnAddress(bool SpillsLR) const;
553 
554   bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const;
555 
shouldSignWithBKey()556   bool shouldSignWithBKey() const { return SignWithBKey; }
557 
hasELFSignedGOT()558   bool hasELFSignedGOT() const { return HasELFSignedGOT; }
559 
getSigningInstrLabel()560   MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
setSigningInstrLabel(MCSymbol * Label)561   void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
562 
isMTETagged()563   bool isMTETagged() const { return IsMTETagged; }
564 
branchTargetEnforcement()565   bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
566 
branchProtectionPAuthLR()567   bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
568 
setHasSwiftAsyncContext(bool HasContext)569   void setHasSwiftAsyncContext(bool HasContext) {
570     HasSwiftAsyncContext = HasContext;
571   }
hasSwiftAsyncContext()572   bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
573 
setSwiftAsyncContextFrameIdx(int FI)574   void setSwiftAsyncContextFrameIdx(int FI) {
575     SwiftAsyncContextFrameIdx = FI;
576   }
getSwiftAsyncContextFrameIdx()577   int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
578 
579   bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
580   bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
581 
hasStreamingModeChanges()582   bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
setHasStreamingModeChanges(bool HasChanges)583   void setHasStreamingModeChanges(bool HasChanges) {
584     HasStreamingModeChanges = HasChanges;
585   }
586 
hasStackProbing()587   bool hasStackProbing() const { return StackProbeSize != 0; }
588 
getStackProbeSize()589   int64_t getStackProbeSize() const { return StackProbeSize; }
590 
591 private:
592   // Hold the lists of LOHs.
593   MILOHContainer LOHContainerSet;
594   SetOfInstructions LOHRelated;
595 
596   SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
597 };
598 
599 namespace yaml {
600 struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
601   std::optional<bool> HasRedZone;
602   std::optional<uint64_t> StackSizeSVE;
603 
604   AArch64FunctionInfo() = default;
605   AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI);
606 
607   void mappingImpl(yaml::IO &YamlIO) override;
608   ~AArch64FunctionInfo() = default;
609 };
610 
611 template <> struct MappingTraits<AArch64FunctionInfo> {
612   static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
613     YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
614     YamlIO.mapOptional("stackSizeSVE", MFI.StackSizeSVE);
615   }
616 };
617 
618 } // end namespace yaml
619 
620 } // end namespace llvm
621 
622 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
623