1 //===-- X86LoadValueInjectionRetHardening.cpp - LVI RET hardening for x86 --==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// Description: Replaces every `ret` instruction with the sequence: 10 /// ``` 11 /// pop <scratch-reg> 12 /// lfence 13 /// jmp *<scratch-reg> 14 /// ``` 15 /// where `<scratch-reg>` is some available scratch register, according to the 16 /// calling convention of the function being mitigated. 17 /// 18 //===----------------------------------------------------------------------===// 19 20 #include "X86.h" 21 #include "X86InstrBuilder.h" 22 #include "X86Subtarget.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineFunctionPass.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/Support/Debug.h" 30 #include <bitset> 31 32 using namespace llvm; 33 34 #define PASS_KEY "x86-lvi-ret" 35 #define DEBUG_TYPE PASS_KEY 36 37 STATISTIC(NumFences, "Number of LFENCEs inserted for LVI mitigation"); 38 STATISTIC(NumFunctionsConsidered, "Number of functions analyzed"); 39 STATISTIC(NumFunctionsMitigated, "Number of functions for which mitigations " 40 "were deployed"); 41 42 namespace { 43 44 class X86LoadValueInjectionRetHardeningPass : public MachineFunctionPass { 45 public: 46 X86LoadValueInjectionRetHardeningPass() : MachineFunctionPass(ID) {} 47 StringRef getPassName() const override { 48 return "X86 Load Value Injection (LVI) Ret-Hardening"; 49 } 50 bool runOnMachineFunction(MachineFunction &MF) override; 51 52 static char ID; 53 }; 54 55 } // end anonymous namespace 56 57 char X86LoadValueInjectionRetHardeningPass::ID = 0; 58 59 bool X86LoadValueInjectionRetHardeningPass::runOnMachineFunction( 60 MachineFunction &MF) { 61 LLVM_DEBUG(dbgs() << "***** " << getPassName() << " : " << MF.getName() 62 << " *****\n"); 63 const X86Subtarget *Subtarget = &MF.getSubtarget<X86Subtarget>(); 64 if (!Subtarget->useLVIControlFlowIntegrity() || !Subtarget->is64Bit()) 65 return false; // FIXME: support 32-bit 66 67 // Don't skip functions with the "optnone" attr but participate in opt-bisect. 68 const Function &F = MF.getFunction(); 69 if (!F.hasOptNone() && skipFunction(F)) 70 return false; 71 72 ++NumFunctionsConsidered; 73 const X86RegisterInfo *TRI = Subtarget->getRegisterInfo(); 74 const X86InstrInfo *TII = Subtarget->getInstrInfo(); 75 unsigned ClobberReg = X86::NoRegister; 76 std::bitset<X86::NUM_TARGET_REGS> UnclobberableGR64s; 77 UnclobberableGR64s.set(X86::RSP); // can't clobber stack pointer 78 UnclobberableGR64s.set(X86::RIP); // can't clobber instruction pointer 79 UnclobberableGR64s.set(X86::RAX); // used for function return 80 UnclobberableGR64s.set(X86::RDX); // used for function return 81 82 // We can clobber any register allowed by the function's calling convention. 83 for (const MCPhysReg *PR = TRI->getCalleeSavedRegs(&MF); auto Reg = *PR; ++PR) 84 UnclobberableGR64s.set(Reg); 85 for (auto &Reg : X86::GR64RegClass) { 86 if (!UnclobberableGR64s.test(Reg)) { 87 ClobberReg = Reg; 88 break; 89 } 90 } 91 92 if (ClobberReg != X86::NoRegister) { 93 LLVM_DEBUG(dbgs() << "Selected register " 94 << Subtarget->getRegisterInfo()->getRegAsmName(ClobberReg) 95 << " to clobber\n"); 96 } else { 97 LLVM_DEBUG(dbgs() << "Could not find a register to clobber\n"); 98 } 99 100 bool Modified = false; 101 for (auto &MBB : MF) { 102 if (MBB.empty()) 103 continue; 104 105 MachineInstr &MI = MBB.back(); 106 if (MI.getOpcode() != X86::RETQ) 107 continue; 108 109 if (ClobberReg != X86::NoRegister) { 110 MBB.erase_instr(&MI); 111 BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::POP64r)) 112 .addReg(ClobberReg, RegState::Define) 113 .setMIFlag(MachineInstr::FrameDestroy); 114 BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::LFENCE)); 115 BuildMI(MBB, MBB.end(), DebugLoc(), TII->get(X86::JMP64r)) 116 .addReg(ClobberReg); 117 } else { 118 // In case there is no available scratch register, we can still read from 119 // RSP to assert that RSP points to a valid page. The write to RSP is 120 // also helpful because it verifies that the stack's write permissions 121 // are intact. 122 MachineInstr *Fence = BuildMI(MBB, MI, DebugLoc(), TII->get(X86::LFENCE)); 123 addRegOffset(BuildMI(MBB, Fence, DebugLoc(), TII->get(X86::SHL64mi)), 124 X86::RSP, false, 0) 125 .addImm(0) 126 ->addRegisterDead(X86::EFLAGS, TRI); 127 } 128 129 ++NumFences; 130 Modified = true; 131 } 132 133 if (Modified) 134 ++NumFunctionsMitigated; 135 return Modified; 136 } 137 138 INITIALIZE_PASS(X86LoadValueInjectionRetHardeningPass, PASS_KEY, 139 "X86 LVI ret hardener", false, false) 140 141 FunctionPass *llvm::createX86LoadValueInjectionRetHardeningPass() { 142 return new X86LoadValueInjectionRetHardeningPass(); 143 } 144