1 //===- AArch64SLSHardening.cpp - Harden Straight Line Missspeculation -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains a pass to insert code to mitigate against side channel 10 // vulnerabilities that may happen under straight line miss-speculation. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AArch64InstrInfo.h" 15 #include "AArch64Subtarget.h" 16 #include "Utils/AArch64BaseInfo.h" 17 #include "llvm/ADT/BitVector.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/CodeGen/IndirectThunks.h" 20 #include "llvm/CodeGen/MachineBasicBlock.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineOperand.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/RegisterScavenging.h" 28 #include "llvm/IR/DebugLoc.h" 29 #include "llvm/Pass.h" 30 #include "llvm/Support/CodeGen.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include <cassert> 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "aarch64-sls-hardening" 38 39 #define AARCH64_SLS_HARDENING_NAME "AArch64 sls hardening pass" 40 41 namespace { 42 43 class AArch64SLSHardening : public MachineFunctionPass { 44 public: 45 const TargetInstrInfo *TII; 46 const TargetRegisterInfo *TRI; 47 const AArch64Subtarget *ST; 48 49 static char ID; 50 51 AArch64SLSHardening() : MachineFunctionPass(ID) { 52 initializeAArch64SLSHardeningPass(*PassRegistry::getPassRegistry()); 53 } 54 55 bool runOnMachineFunction(MachineFunction &Fn) override; 56 57 StringRef getPassName() const override { return AARCH64_SLS_HARDENING_NAME; } 58 59 private: 60 bool hardenReturnsAndBRs(MachineBasicBlock &MBB) const; 61 bool hardenBLRs(MachineBasicBlock &MBB) const; 62 MachineBasicBlock &ConvertBLRToBL(MachineBasicBlock &MBB, 63 MachineBasicBlock::iterator) const; 64 }; 65 66 } // end anonymous namespace 67 68 char AArch64SLSHardening::ID = 0; 69 70 INITIALIZE_PASS(AArch64SLSHardening, "aarch64-sls-hardening", 71 AARCH64_SLS_HARDENING_NAME, false, false) 72 73 static void insertSpeculationBarrier(const AArch64Subtarget *ST, 74 MachineBasicBlock &MBB, 75 MachineBasicBlock::iterator MBBI, 76 DebugLoc DL, 77 bool AlwaysUseISBDSB = false) { 78 assert(MBBI != MBB.begin() && 79 "Must not insert SpeculationBarrierEndBB as only instruction in MBB."); 80 assert(std::prev(MBBI)->isBarrier() && 81 "SpeculationBarrierEndBB must only follow unconditional control flow " 82 "instructions."); 83 assert(std::prev(MBBI)->isTerminator() && 84 "SpeculationBarrierEndBB must only follow terminators."); 85 const TargetInstrInfo *TII = ST->getInstrInfo(); 86 unsigned BarrierOpc = ST->hasSB() && !AlwaysUseISBDSB 87 ? AArch64::SpeculationBarrierSBEndBB 88 : AArch64::SpeculationBarrierISBDSBEndBB; 89 if (MBBI == MBB.end() || 90 (MBBI->getOpcode() != AArch64::SpeculationBarrierSBEndBB && 91 MBBI->getOpcode() != AArch64::SpeculationBarrierISBDSBEndBB)) 92 BuildMI(MBB, MBBI, DL, TII->get(BarrierOpc)); 93 } 94 95 bool AArch64SLSHardening::runOnMachineFunction(MachineFunction &MF) { 96 ST = &MF.getSubtarget<AArch64Subtarget>(); 97 TII = MF.getSubtarget().getInstrInfo(); 98 TRI = MF.getSubtarget().getRegisterInfo(); 99 100 bool Modified = false; 101 for (auto &MBB : MF) { 102 Modified |= hardenReturnsAndBRs(MBB); 103 Modified |= hardenBLRs(MBB); 104 } 105 106 return Modified; 107 } 108 109 static bool isBLR(const MachineInstr &MI) { 110 switch (MI.getOpcode()) { 111 case AArch64::BLR: 112 case AArch64::BLRNoIP: 113 return true; 114 case AArch64::BLRAA: 115 case AArch64::BLRAB: 116 case AArch64::BLRAAZ: 117 case AArch64::BLRABZ: 118 llvm_unreachable("Currently, LLVM's code generator does not support " 119 "producing BLRA* instructions. Therefore, there's no " 120 "support in this pass for those instructions."); 121 } 122 return false; 123 } 124 125 bool AArch64SLSHardening::hardenReturnsAndBRs(MachineBasicBlock &MBB) const { 126 if (!ST->hardenSlsRetBr()) 127 return false; 128 bool Modified = false; 129 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(), E = MBB.end(); 130 MachineBasicBlock::iterator NextMBBI; 131 for (; MBBI != E; MBBI = NextMBBI) { 132 MachineInstr &MI = *MBBI; 133 NextMBBI = std::next(MBBI); 134 if (MI.isReturn() || isIndirectBranchOpcode(MI.getOpcode())) { 135 assert(MI.isTerminator()); 136 insertSpeculationBarrier(ST, MBB, std::next(MBBI), MI.getDebugLoc()); 137 Modified = true; 138 } 139 } 140 return Modified; 141 } 142 143 static const char SLSBLRNamePrefix[] = "__llvm_slsblr_thunk_"; 144 145 static const struct ThunkNameAndReg { 146 const char* Name; 147 Register Reg; 148 } SLSBLRThunks[] = { 149 { "__llvm_slsblr_thunk_x0", AArch64::X0}, 150 { "__llvm_slsblr_thunk_x1", AArch64::X1}, 151 { "__llvm_slsblr_thunk_x2", AArch64::X2}, 152 { "__llvm_slsblr_thunk_x3", AArch64::X3}, 153 { "__llvm_slsblr_thunk_x4", AArch64::X4}, 154 { "__llvm_slsblr_thunk_x5", AArch64::X5}, 155 { "__llvm_slsblr_thunk_x6", AArch64::X6}, 156 { "__llvm_slsblr_thunk_x7", AArch64::X7}, 157 { "__llvm_slsblr_thunk_x8", AArch64::X8}, 158 { "__llvm_slsblr_thunk_x9", AArch64::X9}, 159 { "__llvm_slsblr_thunk_x10", AArch64::X10}, 160 { "__llvm_slsblr_thunk_x11", AArch64::X11}, 161 { "__llvm_slsblr_thunk_x12", AArch64::X12}, 162 { "__llvm_slsblr_thunk_x13", AArch64::X13}, 163 { "__llvm_slsblr_thunk_x14", AArch64::X14}, 164 { "__llvm_slsblr_thunk_x15", AArch64::X15}, 165 // X16 and X17 are deliberately missing, as the mitigation requires those 166 // register to not be used in BLR. See comment in ConvertBLRToBL for more 167 // details. 168 { "__llvm_slsblr_thunk_x18", AArch64::X18}, 169 { "__llvm_slsblr_thunk_x19", AArch64::X19}, 170 { "__llvm_slsblr_thunk_x20", AArch64::X20}, 171 { "__llvm_slsblr_thunk_x21", AArch64::X21}, 172 { "__llvm_slsblr_thunk_x22", AArch64::X22}, 173 { "__llvm_slsblr_thunk_x23", AArch64::X23}, 174 { "__llvm_slsblr_thunk_x24", AArch64::X24}, 175 { "__llvm_slsblr_thunk_x25", AArch64::X25}, 176 { "__llvm_slsblr_thunk_x26", AArch64::X26}, 177 { "__llvm_slsblr_thunk_x27", AArch64::X27}, 178 { "__llvm_slsblr_thunk_x28", AArch64::X28}, 179 { "__llvm_slsblr_thunk_x29", AArch64::FP}, 180 // X30 is deliberately missing, for similar reasons as X16 and X17 are 181 // missing. 182 { "__llvm_slsblr_thunk_x31", AArch64::XZR}, 183 }; 184 185 namespace { 186 struct SLSBLRThunkInserter : ThunkInserter<SLSBLRThunkInserter> { 187 const char *getThunkPrefix() { return SLSBLRNamePrefix; } 188 bool mayUseThunk(const MachineFunction &MF) { 189 // FIXME: This could also check if there are any BLRs in the function 190 // to more accurately reflect if a thunk will be needed. 191 return MF.getSubtarget<AArch64Subtarget>().hardenSlsBlr(); 192 } 193 void insertThunks(MachineModuleInfo &MMI); 194 void populateThunk(MachineFunction &MF); 195 }; 196 } // namespace 197 198 void SLSBLRThunkInserter::insertThunks(MachineModuleInfo &MMI) { 199 // FIXME: It probably would be possible to filter which thunks to produce 200 // based on which registers are actually used in BLR instructions in this 201 // function. But would that be a worthwhile optimization? 202 for (auto T : SLSBLRThunks) 203 createThunkFunction(MMI, T.Name); 204 } 205 206 void SLSBLRThunkInserter::populateThunk(MachineFunction &MF) { 207 // FIXME: How to better communicate Register number, rather than through 208 // name and lookup table? 209 assert(MF.getName().startswith(getThunkPrefix())); 210 auto ThunkIt = llvm::find_if( 211 SLSBLRThunks, [&MF](auto T) { return T.Name == MF.getName(); }); 212 assert(ThunkIt != std::end(SLSBLRThunks)); 213 Register ThunkReg = ThunkIt->Reg; 214 215 const TargetInstrInfo *TII = 216 MF.getSubtarget<AArch64Subtarget>().getInstrInfo(); 217 assert (MF.size() == 1); 218 MachineBasicBlock *Entry = &MF.front(); 219 Entry->clear(); 220 221 // These thunks need to consist of the following instructions: 222 // __llvm_slsblr_thunk_xN: 223 // BR xN 224 // barrierInsts 225 Entry->addLiveIn(ThunkReg); 226 // MOV X16, ThunkReg == ORR X16, XZR, ThunkReg, LSL #0 227 BuildMI(Entry, DebugLoc(), TII->get(AArch64::ORRXrs), AArch64::X16) 228 .addReg(AArch64::XZR) 229 .addReg(ThunkReg) 230 .addImm(0); 231 BuildMI(Entry, DebugLoc(), TII->get(AArch64::BR)).addReg(AArch64::X16); 232 // Make sure the thunks do not make use of the SB extension in case there is 233 // a function somewhere that will call to it that for some reason disabled 234 // the SB extension locally on that function, even though it's enabled for 235 // the module otherwise. Therefore set AlwaysUseISBSDB to true. 236 insertSpeculationBarrier(&MF.getSubtarget<AArch64Subtarget>(), *Entry, 237 Entry->end(), DebugLoc(), true /*AlwaysUseISBDSB*/); 238 } 239 240 MachineBasicBlock & 241 AArch64SLSHardening::ConvertBLRToBL(MachineBasicBlock &MBB, 242 MachineBasicBlock::iterator MBBI) const { 243 // Transform a BLR to a BL as follows: 244 // Before: 245 // |-----------------------------| 246 // | ... | 247 // | instI | 248 // | BLR xN | 249 // | instJ | 250 // | ... | 251 // |-----------------------------| 252 // 253 // After: 254 // |-----------------------------| 255 // | ... | 256 // | instI | 257 // | BL __llvm_slsblr_thunk_xN | 258 // | instJ | 259 // | ... | 260 // |-----------------------------| 261 // 262 // __llvm_slsblr_thunk_xN: 263 // |-----------------------------| 264 // | BR xN | 265 // | barrierInsts | 266 // |-----------------------------| 267 // 268 // The __llvm_slsblr_thunk_xN thunks are created by the SLSBLRThunkInserter. 269 // This function merely needs to transform BLR xN into BL 270 // __llvm_slsblr_thunk_xN. 271 // 272 // Since linkers are allowed to clobber X16 and X17 on function calls, the 273 // above mitigation only works if the original BLR instruction was not 274 // BLR X16 nor BLR X17. Code generation before must make sure that no BLR 275 // X16|X17 was produced if the mitigation is enabled. 276 277 MachineInstr &BLR = *MBBI; 278 assert(isBLR(BLR)); 279 unsigned BLOpcode; 280 Register Reg; 281 bool RegIsKilled; 282 switch (BLR.getOpcode()) { 283 case AArch64::BLR: 284 case AArch64::BLRNoIP: 285 BLOpcode = AArch64::BL; 286 Reg = BLR.getOperand(0).getReg(); 287 assert(Reg != AArch64::X16 && Reg != AArch64::X17 && Reg != AArch64::LR); 288 RegIsKilled = BLR.getOperand(0).isKill(); 289 break; 290 case AArch64::BLRAA: 291 case AArch64::BLRAB: 292 case AArch64::BLRAAZ: 293 case AArch64::BLRABZ: 294 llvm_unreachable("BLRA instructions cannot yet be produced by LLVM, " 295 "therefore there is no need to support them for now."); 296 default: 297 llvm_unreachable("unhandled BLR"); 298 } 299 DebugLoc DL = BLR.getDebugLoc(); 300 301 // If we'd like to support also BLRAA and BLRAB instructions, we'd need 302 // a lot more different kind of thunks. 303 // For example, a 304 // 305 // BLRAA xN, xM 306 // 307 // instruction probably would need to be transformed to something like: 308 // 309 // BL __llvm_slsblraa_thunk_x<N>_x<M> 310 // 311 // __llvm_slsblraa_thunk_x<N>_x<M>: 312 // BRAA x<N>, x<M> 313 // barrierInsts 314 // 315 // Given that about 30 different values of N are possible and about 30 316 // different values of M are possible in the above, with the current way 317 // of producing indirect thunks, we'd be producing about 30 times 30, i.e. 318 // about 900 thunks (where most might not be actually called). This would 319 // multiply further by two to support both BLRAA and BLRAB variants of those 320 // instructions. 321 // If we'd want to support this, we'd probably need to look into a different 322 // way to produce thunk functions, based on which variants are actually 323 // needed, rather than producing all possible variants. 324 // So far, LLVM does never produce BLRA* instructions, so let's leave this 325 // for the future when LLVM can start producing BLRA* instructions. 326 MachineFunction &MF = *MBBI->getMF(); 327 MCContext &Context = MBB.getParent()->getContext(); 328 auto ThunkIt = 329 llvm::find_if(SLSBLRThunks, [Reg](auto T) { return T.Reg == Reg; }); 330 assert (ThunkIt != std::end(SLSBLRThunks)); 331 MCSymbol *Sym = Context.getOrCreateSymbol(ThunkIt->Name); 332 333 MachineInstr *BL = BuildMI(MBB, MBBI, DL, TII->get(BLOpcode)).addSym(Sym); 334 335 // Now copy the implicit operands from BLR to BL and copy other necessary 336 // info. 337 // However, both BLR and BL instructions implictly use SP and implicitly 338 // define LR. Blindly copying implicit operands would result in SP and LR 339 // operands to be present multiple times. While this may not be too much of 340 // an issue, let's avoid that for cleanliness, by removing those implicit 341 // operands from the BL created above before we copy over all implicit 342 // operands from the BLR. 343 int ImpLROpIdx = -1; 344 int ImpSPOpIdx = -1; 345 for (unsigned OpIdx = BL->getNumExplicitOperands(); 346 OpIdx < BL->getNumOperands(); OpIdx++) { 347 MachineOperand Op = BL->getOperand(OpIdx); 348 if (!Op.isReg()) 349 continue; 350 if (Op.getReg() == AArch64::LR && Op.isDef()) 351 ImpLROpIdx = OpIdx; 352 if (Op.getReg() == AArch64::SP && !Op.isDef()) 353 ImpSPOpIdx = OpIdx; 354 } 355 assert(ImpLROpIdx != -1); 356 assert(ImpSPOpIdx != -1); 357 int FirstOpIdxToRemove = std::max(ImpLROpIdx, ImpSPOpIdx); 358 int SecondOpIdxToRemove = std::min(ImpLROpIdx, ImpSPOpIdx); 359 BL->RemoveOperand(FirstOpIdxToRemove); 360 BL->RemoveOperand(SecondOpIdxToRemove); 361 // Now copy over the implicit operands from the original BLR 362 BL->copyImplicitOps(MF, BLR); 363 MF.moveCallSiteInfo(&BLR, BL); 364 // Also add the register called in the BLR as being used in the called thunk. 365 BL->addOperand(MachineOperand::CreateReg(Reg, false /*isDef*/, true /*isImp*/, 366 RegIsKilled /*isKill*/)); 367 // Remove BLR instruction 368 MBB.erase(MBBI); 369 370 return MBB; 371 } 372 373 bool AArch64SLSHardening::hardenBLRs(MachineBasicBlock &MBB) const { 374 if (!ST->hardenSlsBlr()) 375 return false; 376 bool Modified = false; 377 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); 378 MachineBasicBlock::iterator NextMBBI; 379 for (; MBBI != E; MBBI = NextMBBI) { 380 MachineInstr &MI = *MBBI; 381 NextMBBI = std::next(MBBI); 382 if (isBLR(MI)) { 383 ConvertBLRToBL(MBB, MBBI); 384 Modified = true; 385 } 386 } 387 return Modified; 388 } 389 390 FunctionPass *llvm::createAArch64SLSHardeningPass() { 391 return new AArch64SLSHardening(); 392 } 393 394 namespace { 395 class AArch64IndirectThunks : public MachineFunctionPass { 396 public: 397 static char ID; 398 399 AArch64IndirectThunks() : MachineFunctionPass(ID) {} 400 401 StringRef getPassName() const override { return "AArch64 Indirect Thunks"; } 402 403 bool doInitialization(Module &M) override; 404 bool runOnMachineFunction(MachineFunction &MF) override; 405 406 private: 407 std::tuple<SLSBLRThunkInserter> TIs; 408 409 // FIXME: When LLVM moves to C++17, these can become folds 410 template <typename... ThunkInserterT> 411 static void initTIs(Module &M, 412 std::tuple<ThunkInserterT...> &ThunkInserters) { 413 (void)std::initializer_list<int>{ 414 (std::get<ThunkInserterT>(ThunkInserters).init(M), 0)...}; 415 } 416 template <typename... ThunkInserterT> 417 static bool runTIs(MachineModuleInfo &MMI, MachineFunction &MF, 418 std::tuple<ThunkInserterT...> &ThunkInserters) { 419 bool Modified = false; 420 (void)std::initializer_list<int>{ 421 Modified |= std::get<ThunkInserterT>(ThunkInserters).run(MMI, MF)...}; 422 return Modified; 423 } 424 }; 425 426 } // end anonymous namespace 427 428 char AArch64IndirectThunks::ID = 0; 429 430 FunctionPass *llvm::createAArch64IndirectThunks() { 431 return new AArch64IndirectThunks(); 432 } 433 434 bool AArch64IndirectThunks::doInitialization(Module &M) { 435 initTIs(M, TIs); 436 return false; 437 } 438 439 bool AArch64IndirectThunks::runOnMachineFunction(MachineFunction &MF) { 440 LLVM_DEBUG(dbgs() << getPassName() << '\n'); 441 auto &MMI = getAnalysis<MachineModuleInfoWrapperPass>().getMMI(); 442 return runTIs(MMI, MF, TIs); 443 } 444