xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares AArch64-specific per-machine-function information.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15 
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MIRYamlMapping.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/MC/MCLinkerOptimizationHint.h"
25 #include <cassert>
26 #include <optional>
27 
28 namespace llvm {
29 
30 namespace yaml {
31 struct AArch64FunctionInfo;
32 } // end namespace yaml
33 
34 class AArch64Subtarget;
35 class MachineInstr;
36 
37 /// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
38 /// contains private AArch64-specific information for each MachineFunction.
39 class AArch64FunctionInfo final : public MachineFunctionInfo {
40   /// Number of bytes of arguments this function has on the stack. If the callee
41   /// is expected to restore the argument stack this should be a multiple of 16,
42   /// all usable during a tail call.
43   ///
44   /// The alternative would forbid tail call optimisation in some cases: if we
45   /// want to transfer control from a function with 8-bytes of stack-argument
46   /// space to a function with 16-bytes then misalignment of this value would
47   /// make a stack adjustment necessary, which could not be undone by the
48   /// callee.
49   unsigned BytesInStackArgArea = 0;
50 
51   /// The number of bytes to restore to deallocate space for incoming
52   /// arguments. Canonically 0 in the C calling convention, but non-zero when
53   /// callee is expected to pop the args.
54   unsigned ArgumentStackToRestore = 0;
55 
56   /// Space just below incoming stack pointer reserved for arguments being
57   /// passed on the stack during a tail call. This will be the difference
58   /// between the largest tail call argument space needed in this function and
59   /// what's already available by reusing space of incoming arguments.
60   unsigned TailCallReservedStack = 0;
61 
62   /// HasStackFrame - True if this function has a stack frame. Set by
63   /// determineCalleeSaves().
64   bool HasStackFrame = false;
65 
66   /// Amount of stack frame size, not including callee-saved registers.
67   uint64_t LocalStackSize = 0;
68 
69   /// The start and end frame indices for the SVE callee saves.
70   int MinSVECSFrameIndex = 0;
71   int MaxSVECSFrameIndex = 0;
72 
73   /// Amount of stack frame size used for saving callee-saved registers.
74   unsigned CalleeSavedStackSize = 0;
75   unsigned SVECalleeSavedStackSize = 0;
76   bool HasCalleeSavedStackSize = false;
77 
78   /// Number of TLS accesses using the special (combinable)
79   /// _TLS_MODULE_BASE_ symbol.
80   unsigned NumLocalDynamicTLSAccesses = 0;
81 
82   /// FrameIndex for start of varargs area for arguments passed on the
83   /// stack.
84   int VarArgsStackIndex = 0;
85 
86   /// Offset of start of varargs area for arguments passed on the stack.
87   unsigned VarArgsStackOffset = 0;
88 
89   /// FrameIndex for start of varargs area for arguments passed in
90   /// general purpose registers.
91   int VarArgsGPRIndex = 0;
92 
93   /// Size of the varargs area for arguments passed in general purpose
94   /// registers.
95   unsigned VarArgsGPRSize = 0;
96 
97   /// FrameIndex for start of varargs area for arguments passed in
98   /// floating-point registers.
99   int VarArgsFPRIndex = 0;
100 
101   /// Size of the varargs area for arguments passed in floating-point
102   /// registers.
103   unsigned VarArgsFPRSize = 0;
104 
105   /// True if this function has a subset of CSRs that is handled explicitly via
106   /// copies.
107   bool IsSplitCSR = false;
108 
109   /// True when the stack gets realigned dynamically because the size of stack
110   /// frame is unknown at compile time. e.g., in case of VLAs.
111   bool StackRealigned = false;
112 
113   /// True when the callee-save stack area has unused gaps that may be used for
114   /// other stack allocations.
115   bool CalleeSaveStackHasFreeSpace = false;
116 
117   /// SRetReturnReg - sret lowering includes returning the value of the
118   /// returned struct in a register. This field holds the virtual register into
119   /// which the sret argument is passed.
120   Register SRetReturnReg;
121 
122   /// SVE stack size (for predicates and data vectors) are maintained here
123   /// rather than in FrameInfo, as the placement and Stack IDs are target
124   /// specific.
125   uint64_t StackSizeSVE = 0;
126 
127   /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
128   bool HasCalculatedStackSizeSVE = false;
129 
130   /// Has a value when it is known whether or not the function uses a
131   /// redzone, and no value otherwise.
132   /// Initialized during frame lowering, unless the function has the noredzone
133   /// attribute, in which case it is set to false at construction.
134   std::optional<bool> HasRedZone;
135 
136   /// ForwardedMustTailRegParms - A list of virtual and physical registers
137   /// that must be forwarded to every musttail call.
138   SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
139 
140   /// FrameIndex for the tagged base pointer.
141   std::optional<int> TaggedBasePointerIndex;
142 
143   /// Offset from SP-at-entry to the tagged base pointer.
144   /// Tagged base pointer is set up to point to the first (lowest address)
145   /// tagged stack slot.
146   unsigned TaggedBasePointerOffset;
147 
148   /// OutliningStyle denotes, if a function was outined, how it was outlined,
149   /// e.g. Tail Call, Thunk, or Function if none apply.
150   std::optional<std::string> OutliningStyle;
151 
152   // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
153   // CalleeSavedStackSize) to the address of the frame record.
154   int CalleeSaveBaseToFrameRecordOffset = 0;
155 
156   /// SignReturnAddress is true if PAC-RET is enabled for the function with
157   /// defaults being sign non-leaf functions only, with the B key.
158   bool SignReturnAddress = false;
159 
160   /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
161   /// functions as well.
162   bool SignReturnAddressAll = false;
163 
164   /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
165   bool SignWithBKey = false;
166 
167   /// BranchTargetEnforcement enables placing BTI instructions at potential
168   /// indirect branch destinations.
169   bool BranchTargetEnforcement = false;
170 
171   /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
172   /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
173   /// extended record.
174   bool HasSwiftAsyncContext = false;
175 
176   /// The stack slot where the Swift asynchronous context is stored.
177   int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
178 
179   bool IsMTETagged = false;
180 
181   /// The function has Scalable Vector or Scalable Predicate register argument
182   /// or return type
183   bool IsSVECC = false;
184 
185   /// The frame-index for the TPIDR2 object used for lazy saves.
186   Register LazySaveTPIDR2Obj = 0;
187 
188 
189   /// True if the function need unwind information.
190   mutable std::optional<bool> NeedsDwarfUnwindInfo;
191 
192   /// True if the function need asynchronous unwind information.
193   mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
194 
195 public:
196   AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI);
197 
198   MachineFunctionInfo *
199   clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
200         const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
201       const override;
202 
203   bool isSVECC() const { return IsSVECC; };
204   void setIsSVECC(bool s) { IsSVECC = s; };
205 
206   unsigned getLazySaveTPIDR2Obj() const { return LazySaveTPIDR2Obj; }
207   void setLazySaveTPIDR2Obj(unsigned Reg) { LazySaveTPIDR2Obj = Reg; }
208 
209   void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI);
210 
211   unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
212   void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
213 
214   unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
215   void setArgumentStackToRestore(unsigned bytes) {
216     ArgumentStackToRestore = bytes;
217   }
218 
219   unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
220   void setTailCallReservedStack(unsigned bytes) {
221     TailCallReservedStack = bytes;
222   }
223 
224   bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
225 
226   void setStackSizeSVE(uint64_t S) {
227     HasCalculatedStackSizeSVE = true;
228     StackSizeSVE = S;
229   }
230 
231   uint64_t getStackSizeSVE() const { return StackSizeSVE; }
232 
233   bool hasStackFrame() const { return HasStackFrame; }
234   void setHasStackFrame(bool s) { HasStackFrame = s; }
235 
236   bool isStackRealigned() const { return StackRealigned; }
237   void setStackRealigned(bool s) { StackRealigned = s; }
238 
239   bool hasCalleeSaveStackFreeSpace() const {
240     return CalleeSaveStackHasFreeSpace;
241   }
242   void setCalleeSaveStackHasFreeSpace(bool s) {
243     CalleeSaveStackHasFreeSpace = s;
244   }
245   bool isSplitCSR() const { return IsSplitCSR; }
246   void setIsSplitCSR(bool s) { IsSplitCSR = s; }
247 
248   void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
249   uint64_t getLocalStackSize() const { return LocalStackSize; }
250 
251   void setOutliningStyle(std::string Style) { OutliningStyle = Style; }
252   std::optional<std::string> getOutliningStyle() const {
253     return OutliningStyle;
254   }
255 
256   void setCalleeSavedStackSize(unsigned Size) {
257     CalleeSavedStackSize = Size;
258     HasCalleeSavedStackSize = true;
259   }
260 
261   // When CalleeSavedStackSize has not been set (for example when
262   // some MachineIR pass is run in isolation), then recalculate
263   // the CalleeSavedStackSize directly from the CalleeSavedInfo.
264   // Note: This information can only be recalculated after PEI
265   // has assigned offsets to the callee save objects.
266   unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
267     bool ValidateCalleeSavedStackSize = false;
268 
269 #ifndef NDEBUG
270     // Make sure the calculated size derived from the CalleeSavedInfo
271     // equals the cached size that was calculated elsewhere (e.g. in
272     // determineCalleeSaves).
273     ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
274 #endif
275 
276     if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
277       assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
278       if (MFI.getCalleeSavedInfo().empty())
279         return 0;
280 
281       int64_t MinOffset = std::numeric_limits<int64_t>::max();
282       int64_t MaxOffset = std::numeric_limits<int64_t>::min();
283       for (const auto &Info : MFI.getCalleeSavedInfo()) {
284         int FrameIdx = Info.getFrameIdx();
285         if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
286           continue;
287         int64_t Offset = MFI.getObjectOffset(FrameIdx);
288         int64_t ObjSize = MFI.getObjectSize(FrameIdx);
289         MinOffset = std::min<int64_t>(Offset, MinOffset);
290         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
291       }
292 
293       if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
294         int64_t Offset = MFI.getObjectOffset(getSwiftAsyncContextFrameIdx());
295         int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
296         MinOffset = std::min<int64_t>(Offset, MinOffset);
297         MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
298       }
299 
300       unsigned Size = alignTo(MaxOffset - MinOffset, 16);
301       assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
302              "Invalid size calculated for callee saves");
303       return Size;
304     }
305 
306     return getCalleeSavedStackSize();
307   }
308 
309   unsigned getCalleeSavedStackSize() const {
310     assert(HasCalleeSavedStackSize &&
311            "CalleeSavedStackSize has not been calculated");
312     return CalleeSavedStackSize;
313   }
314 
315   // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
316   void setSVECalleeSavedStackSize(unsigned Size) {
317     SVECalleeSavedStackSize = Size;
318   }
319   unsigned getSVECalleeSavedStackSize() const {
320     return SVECalleeSavedStackSize;
321   }
322 
323   void setMinMaxSVECSFrameIndex(int Min, int Max) {
324     MinSVECSFrameIndex = Min;
325     MaxSVECSFrameIndex = Max;
326   }
327 
328   int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
329   int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
330 
331   void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
332   unsigned getNumLocalDynamicTLSAccesses() const {
333     return NumLocalDynamicTLSAccesses;
334   }
335 
336   std::optional<bool> hasRedZone() const { return HasRedZone; }
337   void setHasRedZone(bool s) { HasRedZone = s; }
338 
339   int getVarArgsStackIndex() const { return VarArgsStackIndex; }
340   void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
341 
342   unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
343   void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
344 
345   int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
346   void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
347 
348   unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
349   void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
350 
351   int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
352   void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
353 
354   unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
355   void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
356 
357   unsigned getSRetReturnReg() const { return SRetReturnReg; }
358   void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
359 
360   unsigned getJumpTableEntrySize(int Idx) const {
361     return JumpTableEntryInfo[Idx].first;
362   }
363   MCSymbol *getJumpTableEntryPCRelSymbol(int Idx) const {
364     return JumpTableEntryInfo[Idx].second;
365   }
366   void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
367     if ((unsigned)Idx >= JumpTableEntryInfo.size())
368       JumpTableEntryInfo.resize(Idx+1);
369     JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
370   }
371 
372   using SetOfInstructions = SmallPtrSet<const MachineInstr *, 16>;
373 
374   const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
375 
376   // Shortcuts for LOH related types.
377   class MILOHDirective {
378     MCLOHType Kind;
379 
380     /// Arguments of this directive. Order matters.
381     SmallVector<const MachineInstr *, 3> Args;
382 
383   public:
384     using LOHArgs = ArrayRef<const MachineInstr *>;
385 
386     MILOHDirective(MCLOHType Kind, LOHArgs Args)
387         : Kind(Kind), Args(Args.begin(), Args.end()) {
388       assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
389     }
390 
391     MCLOHType getKind() const { return Kind; }
392     LOHArgs getArgs() const { return Args; }
393   };
394 
395   using MILOHArgs = MILOHDirective::LOHArgs;
396   using MILOHContainer = SmallVector<MILOHDirective, 32>;
397 
398   const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
399 
400   /// Add a LOH directive of this @p Kind and this @p Args.
401   void addLOHDirective(MCLOHType Kind, MILOHArgs Args) {
402     LOHContainerSet.push_back(MILOHDirective(Kind, Args));
403     LOHRelated.insert(Args.begin(), Args.end());
404   }
405 
406   SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
407     return ForwardedMustTailRegParms;
408   }
409 
410   std::optional<int> getTaggedBasePointerIndex() const {
411     return TaggedBasePointerIndex;
412   }
413   void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
414 
415   unsigned getTaggedBasePointerOffset() const {
416     return TaggedBasePointerOffset;
417   }
418   void setTaggedBasePointerOffset(unsigned Offset) {
419     TaggedBasePointerOffset = Offset;
420   }
421 
422   int getCalleeSaveBaseToFrameRecordOffset() const {
423     return CalleeSaveBaseToFrameRecordOffset;
424   }
425   void setCalleeSaveBaseToFrameRecordOffset(int Offset) {
426     CalleeSaveBaseToFrameRecordOffset = Offset;
427   }
428 
429   bool shouldSignReturnAddress(const MachineFunction &MF) const;
430   bool shouldSignReturnAddress(bool SpillsLR) const;
431 
432   bool shouldSignWithBKey() const { return SignWithBKey; }
433   bool isMTETagged() const { return IsMTETagged; }
434 
435   bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
436 
437   void setHasSwiftAsyncContext(bool HasContext) {
438     HasSwiftAsyncContext = HasContext;
439   }
440   bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
441 
442   void setSwiftAsyncContextFrameIdx(int FI) {
443     SwiftAsyncContextFrameIdx = FI;
444   }
445   int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
446 
447   bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
448   bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
449 
450 private:
451   // Hold the lists of LOHs.
452   MILOHContainer LOHContainerSet;
453   SetOfInstructions LOHRelated;
454 
455   SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
456 };
457 
458 namespace yaml {
459 struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
460   std::optional<bool> HasRedZone;
461 
462   AArch64FunctionInfo() = default;
463   AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI);
464 
465   void mappingImpl(yaml::IO &YamlIO) override;
466   ~AArch64FunctionInfo() = default;
467 };
468 
469 template <> struct MappingTraits<AArch64FunctionInfo> {
470   static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
471     YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
472   }
473 };
474 
475 } // end namespace yaml
476 
477 } // end namespace llvm
478 
479 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
480