xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp (revision a90b9d0159070121c221b966469c3e36d912bf82)
1 //===- AArch64SLSHardening.cpp - Harden Straight Line Missspeculation -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass to insert code to mitigate against side channel
10 // vulnerabilities that may happen under straight line miss-speculation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/CodeGen/IndirectThunks.h"
18 #include "llvm/CodeGen/MachineBasicBlock.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/IR/DebugLoc.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/CodeGen.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include <cassert>
32 
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "aarch64-sls-hardening"
36 
37 #define AARCH64_SLS_HARDENING_NAME "AArch64 sls hardening pass"
38 
39 namespace {
40 
41 class AArch64SLSHardening : public MachineFunctionPass {
42 public:
43   const TargetInstrInfo *TII;
44   const TargetRegisterInfo *TRI;
45   const AArch64Subtarget *ST;
46 
47   static char ID;
48 
49   AArch64SLSHardening() : MachineFunctionPass(ID) {
50     initializeAArch64SLSHardeningPass(*PassRegistry::getPassRegistry());
51   }
52 
53   bool runOnMachineFunction(MachineFunction &Fn) override;
54 
55   StringRef getPassName() const override { return AARCH64_SLS_HARDENING_NAME; }
56 
57 private:
58   bool hardenReturnsAndBRs(MachineBasicBlock &MBB) const;
59   bool hardenBLRs(MachineBasicBlock &MBB) const;
60   MachineBasicBlock &ConvertBLRToBL(MachineBasicBlock &MBB,
61                                     MachineBasicBlock::instr_iterator) const;
62 };
63 
64 } // end anonymous namespace
65 
66 char AArch64SLSHardening::ID = 0;
67 
68 INITIALIZE_PASS(AArch64SLSHardening, "aarch64-sls-hardening",
69                 AARCH64_SLS_HARDENING_NAME, false, false)
70 
71 static void insertSpeculationBarrier(const AArch64Subtarget *ST,
72                                      MachineBasicBlock &MBB,
73                                      MachineBasicBlock::iterator MBBI,
74                                      DebugLoc DL,
75                                      bool AlwaysUseISBDSB = false) {
76   assert(MBBI != MBB.begin() &&
77          "Must not insert SpeculationBarrierEndBB as only instruction in MBB.");
78   assert(std::prev(MBBI)->isBarrier() &&
79          "SpeculationBarrierEndBB must only follow unconditional control flow "
80          "instructions.");
81   assert(std::prev(MBBI)->isTerminator() &&
82          "SpeculationBarrierEndBB must only follow terminators.");
83   const TargetInstrInfo *TII = ST->getInstrInfo();
84   unsigned BarrierOpc = ST->hasSB() && !AlwaysUseISBDSB
85                             ? AArch64::SpeculationBarrierSBEndBB
86                             : AArch64::SpeculationBarrierISBDSBEndBB;
87   if (MBBI == MBB.end() ||
88       (MBBI->getOpcode() != AArch64::SpeculationBarrierSBEndBB &&
89        MBBI->getOpcode() != AArch64::SpeculationBarrierISBDSBEndBB))
90     BuildMI(MBB, MBBI, DL, TII->get(BarrierOpc));
91 }
92 
93 bool AArch64SLSHardening::runOnMachineFunction(MachineFunction &MF) {
94   ST = &MF.getSubtarget<AArch64Subtarget>();
95   TII = MF.getSubtarget().getInstrInfo();
96   TRI = MF.getSubtarget().getRegisterInfo();
97 
98   bool Modified = false;
99   for (auto &MBB : MF) {
100     Modified |= hardenReturnsAndBRs(MBB);
101     Modified |= hardenBLRs(MBB);
102   }
103 
104   return Modified;
105 }
106 
107 static bool isBLR(const MachineInstr &MI) {
108   switch (MI.getOpcode()) {
109   case AArch64::BLR:
110   case AArch64::BLRNoIP:
111     return true;
112   case AArch64::BLRAA:
113   case AArch64::BLRAB:
114   case AArch64::BLRAAZ:
115   case AArch64::BLRABZ:
116     llvm_unreachable("Currently, LLVM's code generator does not support "
117                      "producing BLRA* instructions. Therefore, there's no "
118                      "support in this pass for those instructions.");
119   }
120   return false;
121 }
122 
123 bool AArch64SLSHardening::hardenReturnsAndBRs(MachineBasicBlock &MBB) const {
124   if (!ST->hardenSlsRetBr())
125     return false;
126   bool Modified = false;
127   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(), E = MBB.end();
128   MachineBasicBlock::iterator NextMBBI;
129   for (; MBBI != E; MBBI = NextMBBI) {
130     MachineInstr &MI = *MBBI;
131     NextMBBI = std::next(MBBI);
132     if (MI.isReturn() || isIndirectBranchOpcode(MI.getOpcode())) {
133       assert(MI.isTerminator());
134       insertSpeculationBarrier(ST, MBB, std::next(MBBI), MI.getDebugLoc());
135       Modified = true;
136     }
137   }
138   return Modified;
139 }
140 
141 static const char SLSBLRNamePrefix[] = "__llvm_slsblr_thunk_";
142 
143 static const struct ThunkNameAndReg {
144   const char* Name;
145   Register Reg;
146 } SLSBLRThunks[] = {
147   { "__llvm_slsblr_thunk_x0",  AArch64::X0},
148   { "__llvm_slsblr_thunk_x1",  AArch64::X1},
149   { "__llvm_slsblr_thunk_x2",  AArch64::X2},
150   { "__llvm_slsblr_thunk_x3",  AArch64::X3},
151   { "__llvm_slsblr_thunk_x4",  AArch64::X4},
152   { "__llvm_slsblr_thunk_x5",  AArch64::X5},
153   { "__llvm_slsblr_thunk_x6",  AArch64::X6},
154   { "__llvm_slsblr_thunk_x7",  AArch64::X7},
155   { "__llvm_slsblr_thunk_x8",  AArch64::X8},
156   { "__llvm_slsblr_thunk_x9",  AArch64::X9},
157   { "__llvm_slsblr_thunk_x10",  AArch64::X10},
158   { "__llvm_slsblr_thunk_x11",  AArch64::X11},
159   { "__llvm_slsblr_thunk_x12",  AArch64::X12},
160   { "__llvm_slsblr_thunk_x13",  AArch64::X13},
161   { "__llvm_slsblr_thunk_x14",  AArch64::X14},
162   { "__llvm_slsblr_thunk_x15",  AArch64::X15},
163   // X16 and X17 are deliberately missing, as the mitigation requires those
164   // register to not be used in BLR. See comment in ConvertBLRToBL for more
165   // details.
166   { "__llvm_slsblr_thunk_x18",  AArch64::X18},
167   { "__llvm_slsblr_thunk_x19",  AArch64::X19},
168   { "__llvm_slsblr_thunk_x20",  AArch64::X20},
169   { "__llvm_slsblr_thunk_x21",  AArch64::X21},
170   { "__llvm_slsblr_thunk_x22",  AArch64::X22},
171   { "__llvm_slsblr_thunk_x23",  AArch64::X23},
172   { "__llvm_slsblr_thunk_x24",  AArch64::X24},
173   { "__llvm_slsblr_thunk_x25",  AArch64::X25},
174   { "__llvm_slsblr_thunk_x26",  AArch64::X26},
175   { "__llvm_slsblr_thunk_x27",  AArch64::X27},
176   { "__llvm_slsblr_thunk_x28",  AArch64::X28},
177   { "__llvm_slsblr_thunk_x29",  AArch64::FP},
178   // X30 is deliberately missing, for similar reasons as X16 and X17 are
179   // missing.
180   { "__llvm_slsblr_thunk_x31",  AArch64::XZR},
181 };
182 
183 namespace {
184 struct SLSBLRThunkInserter : ThunkInserter<SLSBLRThunkInserter> {
185   const char *getThunkPrefix() { return SLSBLRNamePrefix; }
186   bool mayUseThunk(const MachineFunction &MF, bool InsertedThunks) {
187     if (InsertedThunks)
188       return false;
189     ComdatThunks &= !MF.getSubtarget<AArch64Subtarget>().hardenSlsNoComdat();
190     // FIXME: This could also check if there are any BLRs in the function
191     // to more accurately reflect if a thunk will be needed.
192     return MF.getSubtarget<AArch64Subtarget>().hardenSlsBlr();
193   }
194   bool insertThunks(MachineModuleInfo &MMI, MachineFunction &MF);
195   void populateThunk(MachineFunction &MF);
196 
197 private:
198   bool ComdatThunks = true;
199 };
200 } // namespace
201 
202 bool SLSBLRThunkInserter::insertThunks(MachineModuleInfo &MMI,
203                                        MachineFunction &MF) {
204   // FIXME: It probably would be possible to filter which thunks to produce
205   // based on which registers are actually used in BLR instructions in this
206   // function. But would that be a worthwhile optimization?
207   for (auto T : SLSBLRThunks)
208     createThunkFunction(MMI, T.Name, ComdatThunks);
209   return true;
210 }
211 
212 void SLSBLRThunkInserter::populateThunk(MachineFunction &MF) {
213   // FIXME: How to better communicate Register number, rather than through
214   // name and lookup table?
215   assert(MF.getName().starts_with(getThunkPrefix()));
216   auto ThunkIt = llvm::find_if(
217       SLSBLRThunks, [&MF](auto T) { return T.Name == MF.getName(); });
218   assert(ThunkIt != std::end(SLSBLRThunks));
219   Register ThunkReg = ThunkIt->Reg;
220 
221   const TargetInstrInfo *TII =
222       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
223   assert (MF.size() == 1);
224   MachineBasicBlock *Entry = &MF.front();
225   Entry->clear();
226 
227   //  These thunks need to consist of the following instructions:
228   //  __llvm_slsblr_thunk_xN:
229   //      BR xN
230   //      barrierInsts
231   Entry->addLiveIn(ThunkReg);
232   // MOV X16, ThunkReg == ORR X16, XZR, ThunkReg, LSL #0
233   BuildMI(Entry, DebugLoc(), TII->get(AArch64::ORRXrs), AArch64::X16)
234       .addReg(AArch64::XZR)
235       .addReg(ThunkReg)
236       .addImm(0);
237   BuildMI(Entry, DebugLoc(), TII->get(AArch64::BR)).addReg(AArch64::X16);
238   // Make sure the thunks do not make use of the SB extension in case there is
239   // a function somewhere that will call to it that for some reason disabled
240   // the SB extension locally on that function, even though it's enabled for
241   // the module otherwise. Therefore set AlwaysUseISBSDB to true.
242   insertSpeculationBarrier(&MF.getSubtarget<AArch64Subtarget>(), *Entry,
243                            Entry->end(), DebugLoc(), true /*AlwaysUseISBDSB*/);
244 }
245 
246 MachineBasicBlock &AArch64SLSHardening::ConvertBLRToBL(
247     MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator MBBI) const {
248   // Transform a BLR to a BL as follows:
249   // Before:
250   //   |-----------------------------|
251   //   |      ...                    |
252   //   |  instI                      |
253   //   |  BLR xN                     |
254   //   |  instJ                      |
255   //   |      ...                    |
256   //   |-----------------------------|
257   //
258   // After:
259   //   |-----------------------------|
260   //   |      ...                    |
261   //   |  instI                      |
262   //   |  BL __llvm_slsblr_thunk_xN  |
263   //   |  instJ                      |
264   //   |      ...                    |
265   //   |-----------------------------|
266   //
267   //   __llvm_slsblr_thunk_xN:
268   //   |-----------------------------|
269   //   |  BR xN                      |
270   //   |  barrierInsts               |
271   //   |-----------------------------|
272   //
273   // The __llvm_slsblr_thunk_xN thunks are created by the SLSBLRThunkInserter.
274   // This function merely needs to transform BLR xN into BL
275   // __llvm_slsblr_thunk_xN.
276   //
277   // Since linkers are allowed to clobber X16 and X17 on function calls, the
278   // above mitigation only works if the original BLR instruction was not
279   // BLR X16 nor BLR X17. Code generation before must make sure that no BLR
280   // X16|X17 was produced if the mitigation is enabled.
281 
282   MachineInstr &BLR = *MBBI;
283   assert(isBLR(BLR));
284   unsigned BLOpcode;
285   Register Reg;
286   bool RegIsKilled;
287   switch (BLR.getOpcode()) {
288   case AArch64::BLR:
289   case AArch64::BLRNoIP:
290     BLOpcode = AArch64::BL;
291     Reg = BLR.getOperand(0).getReg();
292     assert(Reg != AArch64::X16 && Reg != AArch64::X17 && Reg != AArch64::LR);
293     RegIsKilled = BLR.getOperand(0).isKill();
294     break;
295   case AArch64::BLRAA:
296   case AArch64::BLRAB:
297   case AArch64::BLRAAZ:
298   case AArch64::BLRABZ:
299     llvm_unreachable("BLRA instructions cannot yet be produced by LLVM, "
300                      "therefore there is no need to support them for now.");
301   default:
302     llvm_unreachable("unhandled BLR");
303   }
304   DebugLoc DL = BLR.getDebugLoc();
305 
306   // If we'd like to support also BLRAA and BLRAB instructions, we'd need
307   // a lot more different kind of thunks.
308   // For example, a
309   //
310   // BLRAA xN, xM
311   //
312   // instruction probably would need to be transformed to something like:
313   //
314   // BL __llvm_slsblraa_thunk_x<N>_x<M>
315   //
316   // __llvm_slsblraa_thunk_x<N>_x<M>:
317   //   BRAA x<N>, x<M>
318   //   barrierInsts
319   //
320   // Given that about 30 different values of N are possible and about 30
321   // different values of M are possible in the above, with the current way
322   // of producing indirect thunks, we'd be producing about 30 times 30, i.e.
323   // about 900 thunks (where most might not be actually called). This would
324   // multiply further by two to support both BLRAA and BLRAB variants of those
325   // instructions.
326   // If we'd want to support this, we'd probably need to look into a different
327   // way to produce thunk functions, based on which variants are actually
328   // needed, rather than producing all possible variants.
329   // So far, LLVM does never produce BLRA* instructions, so let's leave this
330   // for the future when LLVM can start producing BLRA* instructions.
331   MachineFunction &MF = *MBBI->getMF();
332   MCContext &Context = MBB.getParent()->getContext();
333   auto ThunkIt =
334       llvm::find_if(SLSBLRThunks, [Reg](auto T) { return T.Reg == Reg; });
335   assert (ThunkIt != std::end(SLSBLRThunks));
336   MCSymbol *Sym = Context.getOrCreateSymbol(ThunkIt->Name);
337 
338   MachineInstr *BL = BuildMI(MBB, MBBI, DL, TII->get(BLOpcode)).addSym(Sym);
339 
340   // Now copy the implicit operands from BLR to BL and copy other necessary
341   // info.
342   // However, both BLR and BL instructions implictly use SP and implicitly
343   // define LR. Blindly copying implicit operands would result in SP and LR
344   // operands to be present multiple times. While this may not be too much of
345   // an issue, let's avoid that for cleanliness, by removing those implicit
346   // operands from the BL created above before we copy over all implicit
347   // operands from the BLR.
348   int ImpLROpIdx = -1;
349   int ImpSPOpIdx = -1;
350   for (unsigned OpIdx = BL->getNumExplicitOperands();
351        OpIdx < BL->getNumOperands(); OpIdx++) {
352     MachineOperand Op = BL->getOperand(OpIdx);
353     if (!Op.isReg())
354       continue;
355     if (Op.getReg() == AArch64::LR && Op.isDef())
356       ImpLROpIdx = OpIdx;
357     if (Op.getReg() == AArch64::SP && !Op.isDef())
358       ImpSPOpIdx = OpIdx;
359   }
360   assert(ImpLROpIdx != -1);
361   assert(ImpSPOpIdx != -1);
362   int FirstOpIdxToRemove = std::max(ImpLROpIdx, ImpSPOpIdx);
363   int SecondOpIdxToRemove = std::min(ImpLROpIdx, ImpSPOpIdx);
364   BL->removeOperand(FirstOpIdxToRemove);
365   BL->removeOperand(SecondOpIdxToRemove);
366   // Now copy over the implicit operands from the original BLR
367   BL->copyImplicitOps(MF, BLR);
368   MF.moveCallSiteInfo(&BLR, BL);
369   // Also add the register called in the BLR as being used in the called thunk.
370   BL->addOperand(MachineOperand::CreateReg(Reg, false /*isDef*/, true /*isImp*/,
371                                            RegIsKilled /*isKill*/));
372   // Remove BLR instruction
373   MBB.erase(MBBI);
374 
375   return MBB;
376 }
377 
378 bool AArch64SLSHardening::hardenBLRs(MachineBasicBlock &MBB) const {
379   if (!ST->hardenSlsBlr())
380     return false;
381   bool Modified = false;
382   MachineBasicBlock::instr_iterator MBBI = MBB.instr_begin(),
383                                     E = MBB.instr_end();
384   MachineBasicBlock::instr_iterator NextMBBI;
385   for (; MBBI != E; MBBI = NextMBBI) {
386     MachineInstr &MI = *MBBI;
387     NextMBBI = std::next(MBBI);
388     if (isBLR(MI)) {
389       ConvertBLRToBL(MBB, MBBI);
390       Modified = true;
391     }
392   }
393   return Modified;
394 }
395 
396 FunctionPass *llvm::createAArch64SLSHardeningPass() {
397   return new AArch64SLSHardening();
398 }
399 
400 namespace {
401 class AArch64IndirectThunks : public MachineFunctionPass {
402 public:
403   static char ID;
404 
405   AArch64IndirectThunks() : MachineFunctionPass(ID) {}
406 
407   StringRef getPassName() const override { return "AArch64 Indirect Thunks"; }
408 
409   bool doInitialization(Module &M) override;
410   bool runOnMachineFunction(MachineFunction &MF) override;
411 
412 private:
413   std::tuple<SLSBLRThunkInserter> TIs;
414 
415   // FIXME: When LLVM moves to C++17, these can become folds
416   template <typename... ThunkInserterT>
417   static void initTIs(Module &M,
418                       std::tuple<ThunkInserterT...> &ThunkInserters) {
419     (void)std::initializer_list<int>{
420         (std::get<ThunkInserterT>(ThunkInserters).init(M), 0)...};
421   }
422   template <typename... ThunkInserterT>
423   static bool runTIs(MachineModuleInfo &MMI, MachineFunction &MF,
424                      std::tuple<ThunkInserterT...> &ThunkInserters) {
425     bool Modified = false;
426     (void)std::initializer_list<int>{
427         Modified |= std::get<ThunkInserterT>(ThunkInserters).run(MMI, MF)...};
428     return Modified;
429   }
430 };
431 
432 } // end anonymous namespace
433 
434 char AArch64IndirectThunks::ID = 0;
435 
436 FunctionPass *llvm::createAArch64IndirectThunks() {
437   return new AArch64IndirectThunks();
438 }
439 
440 bool AArch64IndirectThunks::doInitialization(Module &M) {
441   initTIs(M, TIs);
442   return false;
443 }
444 
445 bool AArch64IndirectThunks::runOnMachineFunction(MachineFunction &MF) {
446   LLVM_DEBUG(dbgs() << getPassName() << '\n');
447   auto &MMI = getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
448   return runTIs(MMI, MF, TIs);
449 }
450