1 //==- X86IndirectThunks.cpp - Construct indirect call/jump thunks for x86 --=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// Pass that injects an MI thunk that is used to lower indirect calls in a way 11 /// that prevents speculation on some x86 processors and can be used to mitigate 12 /// security vulnerabilities due to targeted speculative execution and side 13 /// channels such as CVE-2017-5715. 14 /// 15 /// Currently supported thunks include: 16 /// - Retpoline -- A RET-implemented trampoline that lowers indirect calls 17 /// - LVI Thunk -- A CALL/JMP-implemented thunk that forces load serialization 18 /// before making an indirect call/jump 19 /// 20 /// Note that the reason that this is implemented as a MachineFunctionPass and 21 /// not a ModulePass is that ModulePasses at this point in the LLVM X86 pipeline 22 /// serialize all transformations, which can consume lots of memory. 23 /// 24 /// TODO(chandlerc): All of this code could use better comments and 25 /// documentation. 26 /// 27 //===----------------------------------------------------------------------===// 28 29 #include "X86.h" 30 #include "X86InstrBuilder.h" 31 #include "X86Subtarget.h" 32 #include "llvm/CodeGen/IndirectThunks.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/Passes.h" 37 #include "llvm/CodeGen/TargetPassConfig.h" 38 #include "llvm/IR/IRBuilder.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/Module.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetMachine.h" 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "x86-retpoline-thunks" 49 50 static const char RetpolineNamePrefix[] = "__llvm_retpoline_"; 51 static const char R11RetpolineName[] = "__llvm_retpoline_r11"; 52 static const char EAXRetpolineName[] = "__llvm_retpoline_eax"; 53 static const char ECXRetpolineName[] = "__llvm_retpoline_ecx"; 54 static const char EDXRetpolineName[] = "__llvm_retpoline_edx"; 55 static const char EDIRetpolineName[] = "__llvm_retpoline_edi"; 56 57 static const char LVIThunkNamePrefix[] = "__llvm_lvi_thunk_"; 58 static const char R11LVIThunkName[] = "__llvm_lvi_thunk_r11"; 59 60 namespace { 61 struct RetpolineThunkInserter : ThunkInserter<RetpolineThunkInserter> { 62 const char *getThunkPrefix() { return RetpolineNamePrefix; } 63 bool mayUseThunk(const MachineFunction &MF) { 64 const auto &STI = MF.getSubtarget<X86Subtarget>(); 65 return (STI.useRetpolineIndirectCalls() || 66 STI.useRetpolineIndirectBranches()) && 67 !STI.useRetpolineExternalThunk(); 68 } 69 void insertThunks(MachineModuleInfo &MMI); 70 void populateThunk(MachineFunction &MF); 71 }; 72 73 struct LVIThunkInserter : ThunkInserter<LVIThunkInserter> { 74 const char *getThunkPrefix() { return LVIThunkNamePrefix; } 75 bool mayUseThunk(const MachineFunction &MF) { 76 return MF.getSubtarget<X86Subtarget>().useLVIControlFlowIntegrity(); 77 } 78 void insertThunks(MachineModuleInfo &MMI) { 79 createThunkFunction(MMI, R11LVIThunkName); 80 } 81 void populateThunk(MachineFunction &MF) { 82 assert (MF.size() == 1); 83 MachineBasicBlock *Entry = &MF.front(); 84 Entry->clear(); 85 86 // This code mitigates LVI by replacing each indirect call/jump with a 87 // direct call/jump to a thunk that looks like: 88 // ``` 89 // lfence 90 // jmpq *%r11 91 // ``` 92 // This ensures that if the value in register %r11 was loaded from memory, 93 // then the value in %r11 is (architecturally) correct prior to the jump. 94 const TargetInstrInfo *TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); 95 BuildMI(&MF.front(), DebugLoc(), TII->get(X86::LFENCE)); 96 BuildMI(&MF.front(), DebugLoc(), TII->get(X86::JMP64r)).addReg(X86::R11); 97 MF.front().addLiveIn(X86::R11); 98 return; 99 } 100 }; 101 102 class X86IndirectThunks : public MachineFunctionPass { 103 public: 104 static char ID; 105 106 X86IndirectThunks() : MachineFunctionPass(ID) {} 107 108 StringRef getPassName() const override { return "X86 Indirect Thunks"; } 109 110 bool doInitialization(Module &M) override; 111 bool runOnMachineFunction(MachineFunction &MF) override; 112 113 private: 114 std::tuple<RetpolineThunkInserter, LVIThunkInserter> TIs; 115 116 // FIXME: When LLVM moves to C++17, these can become folds 117 template <typename... ThunkInserterT> 118 static void initTIs(Module &M, 119 std::tuple<ThunkInserterT...> &ThunkInserters) { 120 (void)std::initializer_list<int>{ 121 (std::get<ThunkInserterT>(ThunkInserters).init(M), 0)...}; 122 } 123 template <typename... ThunkInserterT> 124 static bool runTIs(MachineModuleInfo &MMI, MachineFunction &MF, 125 std::tuple<ThunkInserterT...> &ThunkInserters) { 126 bool Modified = false; 127 (void)std::initializer_list<int>{ 128 Modified |= std::get<ThunkInserterT>(ThunkInserters).run(MMI, MF)...}; 129 return Modified; 130 } 131 }; 132 133 } // end anonymous namespace 134 135 void RetpolineThunkInserter::insertThunks(MachineModuleInfo &MMI) { 136 if (MMI.getTarget().getTargetTriple().getArch() == Triple::x86_64) 137 createThunkFunction(MMI, R11RetpolineName); 138 else 139 for (StringRef Name : {EAXRetpolineName, ECXRetpolineName, EDXRetpolineName, 140 EDIRetpolineName}) 141 createThunkFunction(MMI, Name); 142 } 143 144 void RetpolineThunkInserter::populateThunk(MachineFunction &MF) { 145 bool Is64Bit = MF.getTarget().getTargetTriple().getArch() == Triple::x86_64; 146 Register ThunkReg; 147 if (Is64Bit) { 148 assert(MF.getName() == "__llvm_retpoline_r11" && 149 "Should only have an r11 thunk on 64-bit targets"); 150 151 // __llvm_retpoline_r11: 152 // callq .Lr11_call_target 153 // .Lr11_capture_spec: 154 // pause 155 // lfence 156 // jmp .Lr11_capture_spec 157 // .align 16 158 // .Lr11_call_target: 159 // movq %r11, (%rsp) 160 // retq 161 ThunkReg = X86::R11; 162 } else { 163 // For 32-bit targets we need to emit a collection of thunks for various 164 // possible scratch registers as well as a fallback that uses EDI, which is 165 // normally callee saved. 166 // __llvm_retpoline_eax: 167 // calll .Leax_call_target 168 // .Leax_capture_spec: 169 // pause 170 // jmp .Leax_capture_spec 171 // .align 16 172 // .Leax_call_target: 173 // movl %eax, (%esp) # Clobber return addr 174 // retl 175 // 176 // __llvm_retpoline_ecx: 177 // ... # Same setup 178 // movl %ecx, (%esp) 179 // retl 180 // 181 // __llvm_retpoline_edx: 182 // ... # Same setup 183 // movl %edx, (%esp) 184 // retl 185 // 186 // __llvm_retpoline_edi: 187 // ... # Same setup 188 // movl %edi, (%esp) 189 // retl 190 if (MF.getName() == EAXRetpolineName) 191 ThunkReg = X86::EAX; 192 else if (MF.getName() == ECXRetpolineName) 193 ThunkReg = X86::ECX; 194 else if (MF.getName() == EDXRetpolineName) 195 ThunkReg = X86::EDX; 196 else if (MF.getName() == EDIRetpolineName) 197 ThunkReg = X86::EDI; 198 else 199 llvm_unreachable("Invalid thunk name on x86-32!"); 200 } 201 202 const TargetInstrInfo *TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); 203 assert (MF.size() == 1); 204 MachineBasicBlock *Entry = &MF.front(); 205 Entry->clear(); 206 207 MachineBasicBlock *CaptureSpec = 208 MF.CreateMachineBasicBlock(Entry->getBasicBlock()); 209 MachineBasicBlock *CallTarget = 210 MF.CreateMachineBasicBlock(Entry->getBasicBlock()); 211 MCSymbol *TargetSym = MF.getContext().createTempSymbol(); 212 MF.push_back(CaptureSpec); 213 MF.push_back(CallTarget); 214 215 const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32; 216 const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL; 217 218 Entry->addLiveIn(ThunkReg); 219 BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addSym(TargetSym); 220 221 // The MIR verifier thinks that the CALL in the entry block will fall through 222 // to CaptureSpec, so mark it as the successor. Technically, CaptureTarget is 223 // the successor, but the MIR verifier doesn't know how to cope with that. 224 Entry->addSuccessor(CaptureSpec); 225 226 // In the capture loop for speculation, we want to stop the processor from 227 // speculating as fast as possible. On Intel processors, the PAUSE instruction 228 // will block speculation without consuming any execution resources. On AMD 229 // processors, the PAUSE instruction is (essentially) a nop, so we also use an 230 // LFENCE instruction which they have advised will stop speculation as well 231 // with minimal resource utilization. We still end the capture with a jump to 232 // form an infinite loop to fully guarantee that no matter what implementation 233 // of the x86 ISA, speculating this code path never escapes. 234 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::PAUSE)); 235 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::LFENCE)); 236 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::JMP_1)).addMBB(CaptureSpec); 237 CaptureSpec->setHasAddressTaken(); 238 CaptureSpec->addSuccessor(CaptureSpec); 239 240 CallTarget->addLiveIn(ThunkReg); 241 CallTarget->setHasAddressTaken(); 242 CallTarget->setAlignment(Align(16)); 243 244 // Insert return address clobber 245 const unsigned MovOpc = Is64Bit ? X86::MOV64mr : X86::MOV32mr; 246 const Register SPReg = Is64Bit ? X86::RSP : X86::ESP; 247 addRegOffset(BuildMI(CallTarget, DebugLoc(), TII->get(MovOpc)), SPReg, false, 248 0) 249 .addReg(ThunkReg); 250 251 CallTarget->back().setPreInstrSymbol(MF, TargetSym); 252 BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); 253 } 254 255 FunctionPass *llvm::createX86IndirectThunksPass() { 256 return new X86IndirectThunks(); 257 } 258 259 char X86IndirectThunks::ID = 0; 260 261 bool X86IndirectThunks::doInitialization(Module &M) { 262 initTIs(M, TIs); 263 return false; 264 } 265 266 bool X86IndirectThunks::runOnMachineFunction(MachineFunction &MF) { 267 LLVM_DEBUG(dbgs() << getPassName() << '\n'); 268 auto &MMI = getAnalysis<MachineModuleInfoWrapperPass>().getMMI(); 269 return runTIs(MMI, MF, TIs); 270 } 271