xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //=- AArch64MachineFunctionInfo.cpp - AArch64 Machine Function Info ---------=//
2 
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file implements AArch64-specific per-machine-function
12 /// information.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64InstrInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Metadata.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/MC/MCAsmInfo.h"
23 
24 using namespace llvm;
25 
26 yaml::AArch64FunctionInfo::AArch64FunctionInfo(
27     const llvm::AArch64FunctionInfo &MFI)
28     : HasRedZone(MFI.hasRedZone()),
29       StackSizeSVE(MFI.hasCalculatedStackSizeSVE()
30                        ? std::optional<uint64_t>(MFI.getStackSizeSVE())
31                        : std::nullopt) {}
32 
33 void yaml::AArch64FunctionInfo::mappingImpl(yaml::IO &YamlIO) {
34   MappingTraits<AArch64FunctionInfo>::mapping(YamlIO, *this);
35 }
36 
37 void AArch64FunctionInfo::initializeBaseYamlFields(
38     const yaml::AArch64FunctionInfo &YamlMFI) {
39   if (YamlMFI.HasRedZone)
40     HasRedZone = YamlMFI.HasRedZone;
41   if (YamlMFI.StackSizeSVE)
42     setStackSizeSVE(*YamlMFI.StackSizeSVE);
43 }
44 
45 static std::pair<bool, bool> GetSignReturnAddress(const Function &F) {
46   if (F.hasFnAttribute("ptrauth-returns"))
47     return {true, false}; // non-leaf
48   // The function should be signed in the following situations:
49   // - sign-return-address=all
50   // - sign-return-address=non-leaf and the functions spills the LR
51   if (!F.hasFnAttribute("sign-return-address"))
52     return {false, false};
53 
54   StringRef Scope = F.getFnAttribute("sign-return-address").getValueAsString();
55   if (Scope == "none")
56     return {false, false};
57 
58   if (Scope == "all")
59     return {true, true};
60 
61   assert(Scope == "non-leaf");
62   return {true, false};
63 }
64 
65 static bool ShouldSignWithBKey(const Function &F, const AArch64Subtarget &STI) {
66   if (F.hasFnAttribute("ptrauth-returns"))
67     return true;
68   if (!F.hasFnAttribute("sign-return-address-key")) {
69     if (STI.getTargetTriple().isOSWindows())
70       return true;
71     return false;
72   }
73 
74   const StringRef Key =
75       F.getFnAttribute("sign-return-address-key").getValueAsString();
76   assert(Key == "a_key" || Key == "b_key");
77   return Key == "b_key";
78 }
79 
80 static bool hasELFSignedGOTHelper(const Function &F,
81                                   const AArch64Subtarget *STI) {
82   if (!STI->getTargetTriple().isOSBinFormatELF())
83     return false;
84   const Module *M = F.getParent();
85   const auto *Flag = mdconst::extract_or_null<ConstantInt>(
86       M->getModuleFlag("ptrauth-elf-got"));
87   if (Flag && Flag->getZExtValue() == 1)
88     return true;
89   return false;
90 }
91 
92 AArch64FunctionInfo::AArch64FunctionInfo(const Function &F,
93                                          const AArch64Subtarget *STI) {
94   // If we already know that the function doesn't have a redzone, set
95   // HasRedZone here.
96   if (F.hasFnAttribute(Attribute::NoRedZone))
97     HasRedZone = false;
98   std::tie(SignReturnAddress, SignReturnAddressAll) = GetSignReturnAddress(F);
99   SignWithBKey = ShouldSignWithBKey(F, *STI);
100   HasELFSignedGOT = hasELFSignedGOTHelper(F, STI);
101   // TODO: skip functions that have no instrumented allocas for optimization
102   IsMTETagged = F.hasFnAttribute(Attribute::SanitizeMemTag);
103 
104   // BTI/PAuthLR are set on the function attribute.
105   BranchTargetEnforcement = F.hasFnAttribute("branch-target-enforcement");
106   BranchProtectionPAuthLR = F.hasFnAttribute("branch-protection-pauth-lr");
107 
108   // Parse the SME function attributes.
109   SMEFnAttrs = SMEAttrs(F);
110 
111   // The default stack probe size is 4096 if the function has no
112   // stack-probe-size attribute. This is a safe default because it is the
113   // smallest possible guard page size.
114   uint64_t ProbeSize = 4096;
115   if (F.hasFnAttribute("stack-probe-size"))
116     ProbeSize = F.getFnAttributeAsParsedInteger("stack-probe-size");
117   else if (const auto *PS = mdconst::extract_or_null<ConstantInt>(
118                F.getParent()->getModuleFlag("stack-probe-size")))
119     ProbeSize = PS->getZExtValue();
120   assert(int64_t(ProbeSize) > 0 && "Invalid stack probe size");
121 
122   if (STI->isTargetWindows()) {
123     if (!F.hasFnAttribute("no-stack-arg-probe"))
124       StackProbeSize = ProbeSize;
125   } else {
126     // Round down to the stack alignment.
127     uint64_t StackAlign =
128         STI->getFrameLowering()->getTransientStackAlign().value();
129     ProbeSize = std::max(StackAlign, ProbeSize & ~(StackAlign - 1U));
130     StringRef ProbeKind;
131     if (F.hasFnAttribute("probe-stack"))
132       ProbeKind = F.getFnAttribute("probe-stack").getValueAsString();
133     else if (const auto *PS = dyn_cast_or_null<MDString>(
134                  F.getParent()->getModuleFlag("probe-stack")))
135       ProbeKind = PS->getString();
136     if (ProbeKind.size()) {
137       if (ProbeKind != "inline-asm")
138         report_fatal_error("Unsupported stack probing method");
139       StackProbeSize = ProbeSize;
140     }
141   }
142 }
143 
144 MachineFunctionInfo *AArch64FunctionInfo::clone(
145     BumpPtrAllocator &Allocator, MachineFunction &DestMF,
146     const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
147     const {
148   return DestMF.cloneInfo<AArch64FunctionInfo>(*this);
149 }
150 
151 bool AArch64FunctionInfo::shouldSignReturnAddress(bool SpillsLR) const {
152   if (!SignReturnAddress)
153     return false;
154   if (SignReturnAddressAll)
155     return true;
156   return SpillsLR;
157 }
158 
159 static bool isLRSpilled(const MachineFunction &MF) {
160   return llvm::any_of(
161       MF.getFrameInfo().getCalleeSavedInfo(),
162       [](const auto &Info) { return Info.getReg() == AArch64::LR; });
163 }
164 
165 bool AArch64FunctionInfo::shouldSignReturnAddress(
166     const MachineFunction &MF) const {
167   return shouldSignReturnAddress(isLRSpilled(MF));
168 }
169 
170 bool AArch64FunctionInfo::needsShadowCallStackPrologueEpilogue(
171     MachineFunction &MF) const {
172   if (!(isLRSpilled(MF) &&
173         MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)))
174     return false;
175 
176   if (!MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(18))
177     report_fatal_error("Must reserve x18 to use shadow call stack");
178 
179   return true;
180 }
181 
182 bool AArch64FunctionInfo::needsDwarfUnwindInfo(
183     const MachineFunction &MF) const {
184   if (!NeedsDwarfUnwindInfo)
185     NeedsDwarfUnwindInfo = MF.needsFrameMoves() &&
186                            !MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
187 
188   return *NeedsDwarfUnwindInfo;
189 }
190 
191 bool AArch64FunctionInfo::needsAsyncDwarfUnwindInfo(
192     const MachineFunction &MF) const {
193   if (!NeedsAsyncDwarfUnwindInfo) {
194     const Function &F = MF.getFunction();
195     const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
196     //  The check got "minsize" is because epilogue unwind info is not emitted
197     //  (yet) for homogeneous epilogues, outlined functions, and functions
198     //  outlined from.
199     NeedsAsyncDwarfUnwindInfo =
200         needsDwarfUnwindInfo(MF) &&
201         ((F.getUWTableKind() == UWTableKind::Async && !F.hasMinSize()) ||
202          AFI->hasStreamingModeChanges());
203   }
204   return *NeedsAsyncDwarfUnwindInfo;
205 }
206