xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- AArch64SLSHardening.cpp - Harden Straight Line Missspeculation -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass to insert code to mitigate against side channel
10 // vulnerabilities that may happen under straight line miss-speculation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/CodeGen/IndirectThunks.h"
18 #include "llvm/CodeGen/MachineBasicBlock.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineOperand.h"
23 #include "llvm/CodeGen/RegisterScavenging.h"
24 #include "llvm/IR/DebugLoc.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/FormatVariadic.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include <cassert>
30 #include <climits>
31 #include <tuple>
32 
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "aarch64-sls-hardening"
36 
37 #define AARCH64_SLS_HARDENING_NAME "AArch64 sls hardening pass"
38 
39 // Common name prefix of all thunks generated by this pass.
40 //
41 // The generic form is
42 // __llvm_slsblr_thunk_xN            for BLR thunks
43 // __llvm_slsblr_thunk_(aaz|abz)_xN  for BLRAAZ and BLRABZ thunks
44 // __llvm_slsblr_thunk_(aa|ab)_xN_xM for BLRAA and BLRAB thunks
45 static constexpr StringRef CommonNamePrefix = "__llvm_slsblr_thunk_";
46 
47 namespace {
48 
49 struct ThunkKind {
50   enum ThunkKindId {
51     ThunkBR,
52     ThunkBRAA,
53     ThunkBRAB,
54     ThunkBRAAZ,
55     ThunkBRABZ,
56   };
57 
58   ThunkKindId Id;
59   StringRef NameInfix;
60   bool HasXmOperand;
61   bool NeedsPAuth;
62 
63   // Opcode to perform indirect jump from inside the thunk.
64   unsigned BROpcode;
65 
66   static const ThunkKind BR;
67   static const ThunkKind BRAA;
68   static const ThunkKind BRAB;
69   static const ThunkKind BRAAZ;
70   static const ThunkKind BRABZ;
71 };
72 
73 // Set of inserted thunks.
74 class ThunksSet {
75 public:
76   static constexpr unsigned NumXRegisters = 32;
77 
78   // Given Xn register, returns n.
79   static unsigned indexOfXReg(Register Xn);
80   // Given n, returns Xn register.
81   static Register xRegByIndex(unsigned N);
82 
operator |=(const ThunksSet & Other)83   ThunksSet &operator|=(const ThunksSet &Other) {
84     BLRThunks |= Other.BLRThunks;
85     BLRAAZThunks |= Other.BLRAAZThunks;
86     BLRABZThunks |= Other.BLRABZThunks;
87     for (unsigned I = 0; I < NumXRegisters; ++I)
88       BLRAAThunks[I] |= Other.BLRAAThunks[I];
89     for (unsigned I = 0; I < NumXRegisters; ++I)
90       BLRABThunks[I] |= Other.BLRABThunks[I];
91 
92     return *this;
93   }
94 
get(ThunkKind::ThunkKindId Kind,Register Xn,Register Xm)95   bool get(ThunkKind::ThunkKindId Kind, Register Xn, Register Xm) {
96     reg_bitmask_t XnBit = reg_bitmask_t(1) << indexOfXReg(Xn);
97     return getBitmask(Kind, Xm) & XnBit;
98   }
99 
set(ThunkKind::ThunkKindId Kind,Register Xn,Register Xm)100   void set(ThunkKind::ThunkKindId Kind, Register Xn, Register Xm) {
101     reg_bitmask_t XnBit = reg_bitmask_t(1) << indexOfXReg(Xn);
102     getBitmask(Kind, Xm) |= XnBit;
103   }
104 
105 private:
106   typedef uint32_t reg_bitmask_t;
107   static_assert(NumXRegisters <= sizeof(reg_bitmask_t) * CHAR_BIT,
108                 "Bitmask is not wide enough to hold all Xn registers");
109 
110   // Bitmasks representing operands used, with n-th bit corresponding to Xn
111   // register operand. If the instruction has a second operand (Xm), an array
112   // of bitmasks is used, indexed by m.
113   // Indexes corresponding to the forbidden x16, x17 and x30 registers are
114   // always unset, for simplicity there are no holes.
115   reg_bitmask_t BLRThunks = 0;
116   reg_bitmask_t BLRAAZThunks = 0;
117   reg_bitmask_t BLRABZThunks = 0;
118   reg_bitmask_t BLRAAThunks[NumXRegisters] = {};
119   reg_bitmask_t BLRABThunks[NumXRegisters] = {};
120 
getBitmask(ThunkKind::ThunkKindId Kind,Register Xm)121   reg_bitmask_t &getBitmask(ThunkKind::ThunkKindId Kind, Register Xm) {
122     switch (Kind) {
123     case ThunkKind::ThunkBR:
124       return BLRThunks;
125     case ThunkKind::ThunkBRAAZ:
126       return BLRAAZThunks;
127     case ThunkKind::ThunkBRABZ:
128       return BLRABZThunks;
129     case ThunkKind::ThunkBRAA:
130       return BLRAAThunks[indexOfXReg(Xm)];
131     case ThunkKind::ThunkBRAB:
132       return BLRABThunks[indexOfXReg(Xm)];
133     }
134     llvm_unreachable("Unknown ThunkKindId enum");
135   }
136 };
137 
138 struct SLSHardeningInserter : ThunkInserter<SLSHardeningInserter, ThunksSet> {
139 public:
getThunkPrefix__anon8cb708490111::SLSHardeningInserter140   const char *getThunkPrefix() { return CommonNamePrefix.data(); }
mayUseThunk__anon8cb708490111::SLSHardeningInserter141   bool mayUseThunk(const MachineFunction &MF) {
142     ComdatThunks &= !MF.getSubtarget<AArch64Subtarget>().hardenSlsNoComdat();
143     // We are inserting barriers aside from thunk calls, so
144     // check hardenSlsRetBr() as well.
145     return MF.getSubtarget<AArch64Subtarget>().hardenSlsBlr() ||
146            MF.getSubtarget<AArch64Subtarget>().hardenSlsRetBr();
147   }
148   ThunksSet insertThunks(MachineModuleInfo &MMI, MachineFunction &MF,
149                          ThunksSet ExistingThunks);
150   void populateThunk(MachineFunction &MF);
151 
152 private:
153   bool ComdatThunks = true;
154 
155   bool hardenReturnsAndBRs(MachineModuleInfo &MMI, MachineBasicBlock &MBB);
156   bool hardenBLRs(MachineModuleInfo &MMI, MachineBasicBlock &MBB,
157                   ThunksSet &Thunks);
158 
159   void convertBLRToBL(MachineModuleInfo &MMI, MachineBasicBlock &MBB,
160                       MachineBasicBlock::instr_iterator MBBI,
161                       ThunksSet &Thunks);
162 };
163 
164 } // end anonymous namespace
165 
166 const ThunkKind ThunkKind::BR = {ThunkBR, "", /*HasXmOperand=*/false,
167                                  /*NeedsPAuth=*/false, AArch64::BR};
168 const ThunkKind ThunkKind::BRAA = {ThunkBRAA, "aa_", /*HasXmOperand=*/true,
169                                    /*NeedsPAuth=*/true, AArch64::BRAA};
170 const ThunkKind ThunkKind::BRAB = {ThunkBRAB, "ab_", /*HasXmOperand=*/true,
171                                    /*NeedsPAuth=*/true, AArch64::BRAB};
172 const ThunkKind ThunkKind::BRAAZ = {ThunkBRAAZ, "aaz_", /*HasXmOperand=*/false,
173                                     /*NeedsPAuth=*/true, AArch64::BRAAZ};
174 const ThunkKind ThunkKind::BRABZ = {ThunkBRABZ, "abz_", /*HasXmOperand=*/false,
175                                     /*NeedsPAuth=*/true, AArch64::BRABZ};
176 
177 // Returns thunk kind to emit, or nullptr if not a BLR* instruction.
getThunkKind(unsigned OriginalOpcode)178 static const ThunkKind *getThunkKind(unsigned OriginalOpcode) {
179   switch (OriginalOpcode) {
180   case AArch64::BLR:
181   case AArch64::BLRNoIP:
182     return &ThunkKind::BR;
183   case AArch64::BLRAA:
184     return &ThunkKind::BRAA;
185   case AArch64::BLRAB:
186     return &ThunkKind::BRAB;
187   case AArch64::BLRAAZ:
188     return &ThunkKind::BRAAZ;
189   case AArch64::BLRABZ:
190     return &ThunkKind::BRABZ;
191   }
192   return nullptr;
193 }
194 
isBLR(const MachineInstr & MI)195 static bool isBLR(const MachineInstr &MI) {
196   return getThunkKind(MI.getOpcode()) != nullptr;
197 }
198 
indexOfXReg(Register Reg)199 unsigned ThunksSet::indexOfXReg(Register Reg) {
200   assert(AArch64::GPR64RegClass.contains(Reg));
201   assert(Reg != AArch64::X16 && Reg != AArch64::X17 && Reg != AArch64::LR);
202 
203   // Most Xn registers have consecutive ids, except for FP and XZR.
204   unsigned Result = (unsigned)Reg - (unsigned)AArch64::X0;
205   if (Reg == AArch64::FP)
206     Result = 29;
207   else if (Reg == AArch64::XZR)
208     Result = 31;
209 
210   assert(Result < NumXRegisters && "Internal register numbering changed");
211   assert(AArch64::GPR64RegClass.getRegister(Result).id() == Reg &&
212          "Internal register numbering changed");
213 
214   return Result;
215 }
216 
xRegByIndex(unsigned N)217 Register ThunksSet::xRegByIndex(unsigned N) {
218   return AArch64::GPR64RegClass.getRegister(N);
219 }
220 
insertSpeculationBarrier(const AArch64Subtarget * ST,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc DL,bool AlwaysUseISBDSB=false)221 static void insertSpeculationBarrier(const AArch64Subtarget *ST,
222                                      MachineBasicBlock &MBB,
223                                      MachineBasicBlock::iterator MBBI,
224                                      DebugLoc DL,
225                                      bool AlwaysUseISBDSB = false) {
226   assert(MBBI != MBB.begin() &&
227          "Must not insert SpeculationBarrierEndBB as only instruction in MBB.");
228   assert(std::prev(MBBI)->isBarrier() &&
229          "SpeculationBarrierEndBB must only follow unconditional control flow "
230          "instructions.");
231   assert(std::prev(MBBI)->isTerminator() &&
232          "SpeculationBarrierEndBB must only follow terminators.");
233   const TargetInstrInfo *TII = ST->getInstrInfo();
234   unsigned BarrierOpc = ST->hasSB() && !AlwaysUseISBDSB
235                             ? AArch64::SpeculationBarrierSBEndBB
236                             : AArch64::SpeculationBarrierISBDSBEndBB;
237   if (MBBI == MBB.end() ||
238       (MBBI->getOpcode() != AArch64::SpeculationBarrierSBEndBB &&
239        MBBI->getOpcode() != AArch64::SpeculationBarrierISBDSBEndBB))
240     BuildMI(MBB, MBBI, DL, TII->get(BarrierOpc));
241 }
242 
insertThunks(MachineModuleInfo & MMI,MachineFunction & MF,ThunksSet ExistingThunks)243 ThunksSet SLSHardeningInserter::insertThunks(MachineModuleInfo &MMI,
244                                              MachineFunction &MF,
245                                              ThunksSet ExistingThunks) {
246   const AArch64Subtarget *ST = &MF.getSubtarget<AArch64Subtarget>();
247 
248   for (auto &MBB : MF) {
249     if (ST->hardenSlsRetBr())
250       hardenReturnsAndBRs(MMI, MBB);
251     if (ST->hardenSlsBlr())
252       hardenBLRs(MMI, MBB, ExistingThunks);
253   }
254   return ExistingThunks;
255 }
256 
hardenReturnsAndBRs(MachineModuleInfo & MMI,MachineBasicBlock & MBB)257 bool SLSHardeningInserter::hardenReturnsAndBRs(MachineModuleInfo &MMI,
258                                                MachineBasicBlock &MBB) {
259   const AArch64Subtarget *ST =
260       &MBB.getParent()->getSubtarget<AArch64Subtarget>();
261   bool Modified = false;
262   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(), E = MBB.end();
263   MachineBasicBlock::iterator NextMBBI;
264   for (; MBBI != E; MBBI = NextMBBI) {
265     MachineInstr &MI = *MBBI;
266     NextMBBI = std::next(MBBI);
267     if (MI.isReturn() || isIndirectBranchOpcode(MI.getOpcode())) {
268       assert(MI.isTerminator());
269       insertSpeculationBarrier(ST, MBB, std::next(MBBI), MI.getDebugLoc());
270       Modified = true;
271     }
272   }
273   return Modified;
274 }
275 
276 // Currently, the longest possible thunk name is
277 //   __llvm_slsblr_thunk_aa_xNN_xMM
278 // which is 31 characters (without the '\0' character).
createThunkName(const ThunkKind & Kind,Register Xn,Register Xm)279 static SmallString<32> createThunkName(const ThunkKind &Kind, Register Xn,
280                                        Register Xm) {
281   unsigned N = ThunksSet::indexOfXReg(Xn);
282   if (!Kind.HasXmOperand)
283     return formatv("{0}{1}x{2}", CommonNamePrefix, Kind.NameInfix, N);
284 
285   unsigned M = ThunksSet::indexOfXReg(Xm);
286   return formatv("{0}{1}x{2}_x{3}", CommonNamePrefix, Kind.NameInfix, N, M);
287 }
288 
289 static std::tuple<const ThunkKind &, Register, Register>
parseThunkName(StringRef ThunkName)290 parseThunkName(StringRef ThunkName) {
291   assert(ThunkName.starts_with(CommonNamePrefix) &&
292          "Should be filtered out by ThunkInserter");
293   // Thunk name suffix, such as "x1" or "aa_x2_x3".
294   StringRef NameSuffix = ThunkName.drop_front(CommonNamePrefix.size());
295 
296   // Parse thunk kind based on thunk name infix.
297   const ThunkKind &Kind = *StringSwitch<const ThunkKind *>(NameSuffix)
298                                .StartsWith("aa_", &ThunkKind::BRAA)
299                                .StartsWith("ab_", &ThunkKind::BRAB)
300                                .StartsWith("aaz_", &ThunkKind::BRAAZ)
301                                .StartsWith("abz_", &ThunkKind::BRABZ)
302                                .Default(&ThunkKind::BR);
303 
304   auto ParseRegName = [](StringRef Name) {
305     unsigned N;
306 
307     assert(Name.starts_with("x") && "xN register name expected");
308     bool Fail = Name.drop_front(1).getAsInteger(/*Radix=*/10, N);
309     assert(!Fail && N < ThunksSet::NumXRegisters && "Unexpected register");
310     (void)Fail;
311 
312     return ThunksSet::xRegByIndex(N);
313   };
314 
315   // For example, "x1" or "x2_x3".
316   StringRef RegsStr = NameSuffix.drop_front(Kind.NameInfix.size());
317   StringRef XnStr, XmStr;
318   std::tie(XnStr, XmStr) = RegsStr.split('_');
319 
320   // Parse register operands.
321   Register Xn = ParseRegName(XnStr);
322   Register Xm = Kind.HasXmOperand ? ParseRegName(XmStr) : AArch64::NoRegister;
323 
324   return std::make_tuple(std::ref(Kind), Xn, Xm);
325 }
326 
populateThunk(MachineFunction & MF)327 void SLSHardeningInserter::populateThunk(MachineFunction &MF) {
328   assert(MF.getFunction().hasComdat() == ComdatThunks &&
329          "ComdatThunks value changed since MF creation");
330   Register Xn, Xm;
331   auto KindAndRegs = parseThunkName(MF.getName());
332   const ThunkKind &Kind = std::get<0>(KindAndRegs);
333   std::tie(std::ignore, Xn, Xm) = KindAndRegs;
334 
335   const TargetInstrInfo *TII =
336       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
337 
338   // Depending on whether this pass is in the same FunctionPassManager as the
339   // IR->MIR conversion, the thunk may be completely empty, or contain a single
340   // basic block with a single return instruction. Normalise it to contain a
341   // single empty basic block.
342   if (MF.size() == 1) {
343     assert(MF.front().size() == 1);
344     assert(MF.front().front().getOpcode() == AArch64::RET);
345     MF.front().erase(MF.front().begin());
346   } else {
347     assert(MF.size() == 0);
348     MF.push_back(MF.CreateMachineBasicBlock());
349   }
350 
351   MachineBasicBlock *Entry = &MF.front();
352   Entry->clear();
353 
354   //  These thunks need to consist of the following instructions:
355   //  __llvm_slsblr_thunk_...:
356   //      MOV x16, xN     ; BR* instructions are not compatible with "BTI c"
357   //                      ; branch target unless xN is x16 or x17.
358   //      BR* ...         ; One of: BR        x16
359   //                      ;         BRA(A|B)  x16, xM
360   //                      ;         BRA(A|B)Z x16
361   //      barrierInsts
362   Entry->addLiveIn(Xn);
363   // MOV X16, Reg == ORR X16, XZR, Reg, LSL #0
364   BuildMI(Entry, DebugLoc(), TII->get(AArch64::ORRXrs), AArch64::X16)
365       .addReg(AArch64::XZR)
366       .addReg(Xn)
367       .addImm(0);
368   MachineInstrBuilder Builder =
369       BuildMI(Entry, DebugLoc(), TII->get(Kind.BROpcode)).addReg(AArch64::X16);
370   if (Xm != AArch64::NoRegister) {
371     Entry->addLiveIn(Xm);
372     Builder.addReg(Xm);
373   }
374 
375   // Make sure the thunks do not make use of the SB extension in case there is
376   // a function somewhere that will call to it that for some reason disabled
377   // the SB extension locally on that function, even though it's enabled for
378   // the module otherwise. Therefore set AlwaysUseISBSDB to true.
379   insertSpeculationBarrier(&MF.getSubtarget<AArch64Subtarget>(), *Entry,
380                            Entry->end(), DebugLoc(), true /*AlwaysUseISBDSB*/);
381 }
382 
convertBLRToBL(MachineModuleInfo & MMI,MachineBasicBlock & MBB,MachineBasicBlock::instr_iterator MBBI,ThunksSet & Thunks)383 void SLSHardeningInserter::convertBLRToBL(
384     MachineModuleInfo &MMI, MachineBasicBlock &MBB,
385     MachineBasicBlock::instr_iterator MBBI, ThunksSet &Thunks) {
386   // Transform a BLR* instruction (one of BLR, BLRAA/BLRAB or BLRAAZ/BLRABZ) to
387   // a BL to the thunk containing BR, BRAA/BRAB or BRAAZ/BRABZ, respectively.
388   //
389   // Before:
390   //   |-----------------------------|
391   //   |      ...                    |
392   //   |  instI                      |
393   //   |  BLR* xN or BLR* xN, xM     |
394   //   |  instJ                      |
395   //   |      ...                    |
396   //   |-----------------------------|
397   //
398   // After:
399   //   |-----------------------------|
400   //   |      ...                    |
401   //   |  instI                      |
402   //   |  BL __llvm_slsblr_thunk_... |
403   //   |  instJ                      |
404   //   |      ...                    |
405   //   |-----------------------------|
406   //
407   //   __llvm_slsblr_thunk_...:
408   //   |-----------------------------|
409   //   |  MOV x16, xN                |
410   //   |  BR* x16 or BR* x16, xM     |
411   //   |  barrierInsts               |
412   //   |-----------------------------|
413   //
414   // This function needs to transform BLR* instruction into BL with the correct
415   // thunk name and lazily create the thunk if it does not exist yet.
416   //
417   // Since linkers are allowed to clobber X16 and X17 on function calls, the
418   // above mitigation only works if the original BLR* instruction had neither
419   // X16 nor X17 as one of its operands. Code generation before must make sure
420   // that no such BLR* instruction was produced if the mitigation is enabled.
421 
422   MachineInstr &BLR = *MBBI;
423   assert(isBLR(BLR));
424   const ThunkKind &Kind = *getThunkKind(BLR.getOpcode());
425 
426   unsigned NumRegOperands = Kind.HasXmOperand ? 2 : 1;
427   assert(BLR.getNumExplicitOperands() == NumRegOperands &&
428          "Expected one or two register inputs");
429   Register Xn = BLR.getOperand(0).getReg();
430   Register Xm =
431       Kind.HasXmOperand ? BLR.getOperand(1).getReg() : AArch64::NoRegister;
432 
433   DebugLoc DL = BLR.getDebugLoc();
434 
435   MachineFunction &MF = *MBBI->getMF();
436   MCContext &Context = MBB.getParent()->getContext();
437   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
438 
439   auto ThunkName = createThunkName(Kind, Xn, Xm);
440   MCSymbol *Sym = Context.getOrCreateSymbol(ThunkName);
441 
442   if (!Thunks.get(Kind.Id, Xn, Xm)) {
443     StringRef TargetAttrs = Kind.NeedsPAuth ? "+pauth" : "";
444     Thunks.set(Kind.Id, Xn, Xm);
445     createThunkFunction(MMI, ThunkName, ComdatThunks, TargetAttrs);
446   }
447 
448   MachineInstr *BL = BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL)).addSym(Sym);
449 
450   // Now copy the implicit operands from BLR to BL and copy other necessary
451   // info.
452   // However, both BLR and BL instructions implictly use SP and implicitly
453   // define LR. Blindly copying implicit operands would result in SP and LR
454   // operands to be present multiple times. While this may not be too much of
455   // an issue, let's avoid that for cleanliness, by removing those implicit
456   // operands from the BL created above before we copy over all implicit
457   // operands from the BLR.
458   int ImpLROpIdx = -1;
459   int ImpSPOpIdx = -1;
460   for (unsigned OpIdx = BL->getNumExplicitOperands();
461        OpIdx < BL->getNumOperands(); OpIdx++) {
462     MachineOperand Op = BL->getOperand(OpIdx);
463     if (!Op.isReg())
464       continue;
465     if (Op.getReg() == AArch64::LR && Op.isDef())
466       ImpLROpIdx = OpIdx;
467     if (Op.getReg() == AArch64::SP && !Op.isDef())
468       ImpSPOpIdx = OpIdx;
469   }
470   assert(ImpLROpIdx != -1);
471   assert(ImpSPOpIdx != -1);
472   int FirstOpIdxToRemove = std::max(ImpLROpIdx, ImpSPOpIdx);
473   int SecondOpIdxToRemove = std::min(ImpLROpIdx, ImpSPOpIdx);
474   BL->removeOperand(FirstOpIdxToRemove);
475   BL->removeOperand(SecondOpIdxToRemove);
476   // Now copy over the implicit operands from the original BLR
477   BL->copyImplicitOps(MF, BLR);
478   MF.moveCallSiteInfo(&BLR, BL);
479   // Also add the register operands of the original BLR* instruction
480   // as being used in the called thunk.
481   for (unsigned OpIdx = 0; OpIdx < NumRegOperands; ++OpIdx) {
482     MachineOperand &Op = BLR.getOperand(OpIdx);
483     BL->addOperand(MachineOperand::CreateReg(Op.getReg(), /*isDef=*/false,
484                                              /*isImp=*/true, Op.isKill()));
485   }
486   // Remove BLR instruction
487   MBB.erase(MBBI);
488 }
489 
hardenBLRs(MachineModuleInfo & MMI,MachineBasicBlock & MBB,ThunksSet & Thunks)490 bool SLSHardeningInserter::hardenBLRs(MachineModuleInfo &MMI,
491                                       MachineBasicBlock &MBB,
492                                       ThunksSet &Thunks) {
493   bool Modified = false;
494   MachineBasicBlock::instr_iterator MBBI = MBB.instr_begin(),
495                                     E = MBB.instr_end();
496   MachineBasicBlock::instr_iterator NextMBBI;
497   for (; MBBI != E; MBBI = NextMBBI) {
498     MachineInstr &MI = *MBBI;
499     NextMBBI = std::next(MBBI);
500     if (isBLR(MI)) {
501       convertBLRToBL(MMI, MBB, MBBI, Thunks);
502       Modified = true;
503     }
504   }
505   return Modified;
506 }
507 
508 namespace {
509 class AArch64SLSHardening : public ThunkInserterPass<SLSHardeningInserter> {
510 public:
511   static char ID;
512 
AArch64SLSHardening()513   AArch64SLSHardening() : ThunkInserterPass(ID) {}
514 
getPassName() const515   StringRef getPassName() const override { return AARCH64_SLS_HARDENING_NAME; }
516 };
517 
518 } // end anonymous namespace
519 
520 char AArch64SLSHardening::ID = 0;
521 
522 INITIALIZE_PASS(AArch64SLSHardening, "aarch64-sls-hardening",
523                 AARCH64_SLS_HARDENING_NAME, false, false)
524 
createAArch64SLSHardeningPass()525 FunctionPass *llvm::createAArch64SLSHardeningPass() {
526   return new AArch64SLSHardening();
527 }
528