1 //==- X86IndirectThunks.cpp - Construct indirect call/jump thunks for x86 --=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// Pass that injects an MI thunk that is used to lower indirect calls in a way 11 /// that prevents speculation on some x86 processors and can be used to mitigate 12 /// security vulnerabilities due to targeted speculative execution and side 13 /// channels such as CVE-2017-5715. 14 /// 15 /// Currently supported thunks include: 16 /// - Retpoline -- A RET-implemented trampoline that lowers indirect calls 17 /// - LVI Thunk -- A CALL/JMP-implemented thunk that forces load serialization 18 /// before making an indirect call/jump 19 /// 20 /// Note that the reason that this is implemented as a MachineFunctionPass and 21 /// not a ModulePass is that ModulePasses at this point in the LLVM X86 pipeline 22 /// serialize all transformations, which can consume lots of memory. 23 /// 24 /// TODO(chandlerc): All of this code could use better comments and 25 /// documentation. 26 /// 27 //===----------------------------------------------------------------------===// 28 29 #include "X86.h" 30 #include "X86InstrBuilder.h" 31 #include "X86Subtarget.h" 32 #include "llvm/CodeGen/IndirectThunks.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/Passes.h" 37 #include "llvm/CodeGen/TargetPassConfig.h" 38 #include "llvm/IR/IRBuilder.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/Module.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetMachine.h" 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "x86-retpoline-thunks" 49 50 static const char RetpolineNamePrefix[] = "__llvm_retpoline_"; 51 static const char R11RetpolineName[] = "__llvm_retpoline_r11"; 52 static const char EAXRetpolineName[] = "__llvm_retpoline_eax"; 53 static const char ECXRetpolineName[] = "__llvm_retpoline_ecx"; 54 static const char EDXRetpolineName[] = "__llvm_retpoline_edx"; 55 static const char EDIRetpolineName[] = "__llvm_retpoline_edi"; 56 57 static const char LVIThunkNamePrefix[] = "__llvm_lvi_thunk_"; 58 static const char R11LVIThunkName[] = "__llvm_lvi_thunk_r11"; 59 60 namespace { 61 struct RetpolineThunkInserter : ThunkInserter<RetpolineThunkInserter> { 62 const char *getThunkPrefix() { return RetpolineNamePrefix; } 63 bool mayUseThunk(const MachineFunction &MF) { 64 const auto &STI = MF.getSubtarget<X86Subtarget>(); 65 return (STI.useRetpolineIndirectCalls() || 66 STI.useRetpolineIndirectBranches()) && 67 !STI.useRetpolineExternalThunk(); 68 } 69 void insertThunks(MachineModuleInfo &MMI); 70 void populateThunk(MachineFunction &MF); 71 }; 72 73 struct LVIThunkInserter : ThunkInserter<LVIThunkInserter> { 74 const char *getThunkPrefix() { return LVIThunkNamePrefix; } 75 bool mayUseThunk(const MachineFunction &MF) { 76 return MF.getSubtarget<X86Subtarget>().useLVIControlFlowIntegrity(); 77 } 78 void insertThunks(MachineModuleInfo &MMI) { 79 createThunkFunction(MMI, R11LVIThunkName); 80 } 81 void populateThunk(MachineFunction &MF) { 82 assert (MF.size() == 1); 83 MachineBasicBlock *Entry = &MF.front(); 84 Entry->clear(); 85 86 // This code mitigates LVI by replacing each indirect call/jump with a 87 // direct call/jump to a thunk that looks like: 88 // ``` 89 // lfence 90 // jmpq *%r11 91 // ``` 92 // This ensures that if the value in register %r11 was loaded from memory, 93 // then the value in %r11 is (architecturally) correct prior to the jump. 94 const TargetInstrInfo *TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); 95 BuildMI(&MF.front(), DebugLoc(), TII->get(X86::LFENCE)); 96 BuildMI(&MF.front(), DebugLoc(), TII->get(X86::JMP64r)).addReg(X86::R11); 97 MF.front().addLiveIn(X86::R11); 98 } 99 }; 100 101 class X86IndirectThunks : public MachineFunctionPass { 102 public: 103 static char ID; 104 105 X86IndirectThunks() : MachineFunctionPass(ID) {} 106 107 StringRef getPassName() const override { return "X86 Indirect Thunks"; } 108 109 bool doInitialization(Module &M) override; 110 bool runOnMachineFunction(MachineFunction &MF) override; 111 112 private: 113 std::tuple<RetpolineThunkInserter, LVIThunkInserter> TIs; 114 115 // FIXME: When LLVM moves to C++17, these can become folds 116 template <typename... ThunkInserterT> 117 static void initTIs(Module &M, 118 std::tuple<ThunkInserterT...> &ThunkInserters) { 119 (void)std::initializer_list<int>{ 120 (std::get<ThunkInserterT>(ThunkInserters).init(M), 0)...}; 121 } 122 template <typename... ThunkInserterT> 123 static bool runTIs(MachineModuleInfo &MMI, MachineFunction &MF, 124 std::tuple<ThunkInserterT...> &ThunkInserters) { 125 bool Modified = false; 126 (void)std::initializer_list<int>{ 127 Modified |= std::get<ThunkInserterT>(ThunkInserters).run(MMI, MF)...}; 128 return Modified; 129 } 130 }; 131 132 } // end anonymous namespace 133 134 void RetpolineThunkInserter::insertThunks(MachineModuleInfo &MMI) { 135 if (MMI.getTarget().getTargetTriple().getArch() == Triple::x86_64) 136 createThunkFunction(MMI, R11RetpolineName); 137 else 138 for (StringRef Name : {EAXRetpolineName, ECXRetpolineName, EDXRetpolineName, 139 EDIRetpolineName}) 140 createThunkFunction(MMI, Name); 141 } 142 143 void RetpolineThunkInserter::populateThunk(MachineFunction &MF) { 144 bool Is64Bit = MF.getTarget().getTargetTriple().getArch() == Triple::x86_64; 145 Register ThunkReg; 146 if (Is64Bit) { 147 assert(MF.getName() == "__llvm_retpoline_r11" && 148 "Should only have an r11 thunk on 64-bit targets"); 149 150 // __llvm_retpoline_r11: 151 // callq .Lr11_call_target 152 // .Lr11_capture_spec: 153 // pause 154 // lfence 155 // jmp .Lr11_capture_spec 156 // .align 16 157 // .Lr11_call_target: 158 // movq %r11, (%rsp) 159 // retq 160 ThunkReg = X86::R11; 161 } else { 162 // For 32-bit targets we need to emit a collection of thunks for various 163 // possible scratch registers as well as a fallback that uses EDI, which is 164 // normally callee saved. 165 // __llvm_retpoline_eax: 166 // calll .Leax_call_target 167 // .Leax_capture_spec: 168 // pause 169 // jmp .Leax_capture_spec 170 // .align 16 171 // .Leax_call_target: 172 // movl %eax, (%esp) # Clobber return addr 173 // retl 174 // 175 // __llvm_retpoline_ecx: 176 // ... # Same setup 177 // movl %ecx, (%esp) 178 // retl 179 // 180 // __llvm_retpoline_edx: 181 // ... # Same setup 182 // movl %edx, (%esp) 183 // retl 184 // 185 // __llvm_retpoline_edi: 186 // ... # Same setup 187 // movl %edi, (%esp) 188 // retl 189 if (MF.getName() == EAXRetpolineName) 190 ThunkReg = X86::EAX; 191 else if (MF.getName() == ECXRetpolineName) 192 ThunkReg = X86::ECX; 193 else if (MF.getName() == EDXRetpolineName) 194 ThunkReg = X86::EDX; 195 else if (MF.getName() == EDIRetpolineName) 196 ThunkReg = X86::EDI; 197 else 198 llvm_unreachable("Invalid thunk name on x86-32!"); 199 } 200 201 const TargetInstrInfo *TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); 202 assert (MF.size() == 1); 203 MachineBasicBlock *Entry = &MF.front(); 204 Entry->clear(); 205 206 MachineBasicBlock *CaptureSpec = 207 MF.CreateMachineBasicBlock(Entry->getBasicBlock()); 208 MachineBasicBlock *CallTarget = 209 MF.CreateMachineBasicBlock(Entry->getBasicBlock()); 210 MCSymbol *TargetSym = MF.getContext().createTempSymbol(); 211 MF.push_back(CaptureSpec); 212 MF.push_back(CallTarget); 213 214 const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32; 215 const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL; 216 217 Entry->addLiveIn(ThunkReg); 218 BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addSym(TargetSym); 219 220 // The MIR verifier thinks that the CALL in the entry block will fall through 221 // to CaptureSpec, so mark it as the successor. Technically, CaptureTarget is 222 // the successor, but the MIR verifier doesn't know how to cope with that. 223 Entry->addSuccessor(CaptureSpec); 224 225 // In the capture loop for speculation, we want to stop the processor from 226 // speculating as fast as possible. On Intel processors, the PAUSE instruction 227 // will block speculation without consuming any execution resources. On AMD 228 // processors, the PAUSE instruction is (essentially) a nop, so we also use an 229 // LFENCE instruction which they have advised will stop speculation as well 230 // with minimal resource utilization. We still end the capture with a jump to 231 // form an infinite loop to fully guarantee that no matter what implementation 232 // of the x86 ISA, speculating this code path never escapes. 233 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::PAUSE)); 234 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::LFENCE)); 235 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::JMP_1)).addMBB(CaptureSpec); 236 CaptureSpec->setHasAddressTaken(); 237 CaptureSpec->addSuccessor(CaptureSpec); 238 239 CallTarget->addLiveIn(ThunkReg); 240 CallTarget->setHasAddressTaken(); 241 CallTarget->setAlignment(Align(16)); 242 243 // Insert return address clobber 244 const unsigned MovOpc = Is64Bit ? X86::MOV64mr : X86::MOV32mr; 245 const Register SPReg = Is64Bit ? X86::RSP : X86::ESP; 246 addRegOffset(BuildMI(CallTarget, DebugLoc(), TII->get(MovOpc)), SPReg, false, 247 0) 248 .addReg(ThunkReg); 249 250 CallTarget->back().setPreInstrSymbol(MF, TargetSym); 251 BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); 252 } 253 254 FunctionPass *llvm::createX86IndirectThunksPass() { 255 return new X86IndirectThunks(); 256 } 257 258 char X86IndirectThunks::ID = 0; 259 260 bool X86IndirectThunks::doInitialization(Module &M) { 261 initTIs(M, TIs); 262 return false; 263 } 264 265 bool X86IndirectThunks::runOnMachineFunction(MachineFunction &MF) { 266 LLVM_DEBUG(dbgs() << getPassName() << '\n'); 267 auto &MMI = getAnalysis<MachineModuleInfoWrapperPass>().getMMI(); 268 return runTIs(MMI, MF, TIs); 269 } 270