1 //===- StackProtector.cpp - Stack Protector Insertion ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass inserts stack protectors into functions which need them. A variable 10 // with a random value in it is stored onto the stack before the local variables 11 // are allocated. Upon exiting the block, the stored value is checked. If it's 12 // changed, then there was some sort of violation and the program aborts. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/CodeGen/StackProtector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/BranchProbabilityInfo.h" 20 #include "llvm/Analysis/EHPersonalities.h" 21 #include "llvm/Analysis/MemoryLocation.h" 22 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 23 #include "llvm/CodeGen/Passes.h" 24 #include "llvm/CodeGen/TargetLowering.h" 25 #include "llvm/CodeGen/TargetPassConfig.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DebugInfo.h" 32 #include "llvm/IR/DebugLoc.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/IRBuilder.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/MDBuilder.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/IR/Type.h" 44 #include "llvm/IR/User.h" 45 #include "llvm/InitializePasses.h" 46 #include "llvm/Pass.h" 47 #include "llvm/Support/Casting.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Target/TargetMachine.h" 50 #include "llvm/Target/TargetOptions.h" 51 #include <utility> 52 53 using namespace llvm; 54 55 #define DEBUG_TYPE "stack-protector" 56 57 STATISTIC(NumFunProtected, "Number of functions protected"); 58 STATISTIC(NumAddrTaken, "Number of local variables that have their address" 59 " taken."); 60 61 static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp", 62 cl::init(true), cl::Hidden); 63 64 char StackProtector::ID = 0; 65 66 StackProtector::StackProtector() : FunctionPass(ID), SSPBufferSize(8) { 67 initializeStackProtectorPass(*PassRegistry::getPassRegistry()); 68 } 69 70 INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE, 71 "Insert stack protectors", false, true) 72 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 73 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 74 INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE, 75 "Insert stack protectors", false, true) 76 77 FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); } 78 79 void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const { 80 AU.addRequired<TargetPassConfig>(); 81 AU.addPreserved<DominatorTreeWrapperPass>(); 82 } 83 84 bool StackProtector::runOnFunction(Function &Fn) { 85 F = &Fn; 86 M = F->getParent(); 87 DominatorTreeWrapperPass *DTWP = 88 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 89 DT = DTWP ? &DTWP->getDomTree() : nullptr; 90 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); 91 Trip = TM->getTargetTriple(); 92 TLI = TM->getSubtargetImpl(Fn)->getTargetLowering(); 93 HasPrologue = false; 94 HasIRCheck = false; 95 96 Attribute Attr = Fn.getFnAttribute("stack-protector-buffer-size"); 97 if (Attr.isStringAttribute() && 98 Attr.getValueAsString().getAsInteger(10, SSPBufferSize)) 99 return false; // Invalid integer string 100 101 if (!RequiresStackProtector()) 102 return false; 103 104 // TODO(etienneb): Functions with funclets are not correctly supported now. 105 // Do nothing if this is funclet-based personality. 106 if (Fn.hasPersonalityFn()) { 107 EHPersonality Personality = classifyEHPersonality(Fn.getPersonalityFn()); 108 if (isFuncletEHPersonality(Personality)) 109 return false; 110 } 111 112 ++NumFunProtected; 113 return InsertStackProtectors(); 114 } 115 116 /// \param [out] IsLarge is set to true if a protectable array is found and 117 /// it is "large" ( >= ssp-buffer-size). In the case of a structure with 118 /// multiple arrays, this gets set if any of them is large. 119 bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge, 120 bool Strong, 121 bool InStruct) const { 122 if (!Ty) 123 return false; 124 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 125 if (!AT->getElementType()->isIntegerTy(8)) { 126 // If we're on a non-Darwin platform or we're inside of a structure, don't 127 // add stack protectors unless the array is a character array. 128 // However, in strong mode any array, regardless of type and size, 129 // triggers a protector. 130 if (!Strong && (InStruct || !Trip.isOSDarwin())) 131 return false; 132 } 133 134 // If an array has more than SSPBufferSize bytes of allocated space, then we 135 // emit stack protectors. 136 if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) { 137 IsLarge = true; 138 return true; 139 } 140 141 if (Strong) 142 // Require a protector for all arrays in strong mode 143 return true; 144 } 145 146 const StructType *ST = dyn_cast<StructType>(Ty); 147 if (!ST) 148 return false; 149 150 bool NeedsProtector = false; 151 for (Type *ET : ST->elements()) 152 if (ContainsProtectableArray(ET, IsLarge, Strong, true)) { 153 // If the element is a protectable array and is large (>= SSPBufferSize) 154 // then we are done. If the protectable array is not large, then 155 // keep looking in case a subsequent element is a large array. 156 if (IsLarge) 157 return true; 158 NeedsProtector = true; 159 } 160 161 return NeedsProtector; 162 } 163 164 bool StackProtector::HasAddressTaken(const Instruction *AI, 165 TypeSize AllocSize) { 166 const DataLayout &DL = M->getDataLayout(); 167 for (const User *U : AI->users()) { 168 const auto *I = cast<Instruction>(U); 169 // If this instruction accesses memory make sure it doesn't access beyond 170 // the bounds of the allocated object. 171 Optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I); 172 if (MemLoc.hasValue() && MemLoc->Size.hasValue() && 173 !TypeSize::isKnownGE(AllocSize, 174 TypeSize::getFixed(MemLoc->Size.getValue()))) 175 return true; 176 switch (I->getOpcode()) { 177 case Instruction::Store: 178 if (AI == cast<StoreInst>(I)->getValueOperand()) 179 return true; 180 break; 181 case Instruction::AtomicCmpXchg: 182 // cmpxchg conceptually includes both a load and store from the same 183 // location. So, like store, the value being stored is what matters. 184 if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand()) 185 return true; 186 break; 187 case Instruction::PtrToInt: 188 if (AI == cast<PtrToIntInst>(I)->getOperand(0)) 189 return true; 190 break; 191 case Instruction::Call: { 192 // Ignore intrinsics that do not become real instructions. 193 // TODO: Narrow this to intrinsics that have store-like effects. 194 const auto *CI = cast<CallInst>(I); 195 if (!CI->isDebugOrPseudoInst() && !CI->isLifetimeStartOrEnd()) 196 return true; 197 break; 198 } 199 case Instruction::Invoke: 200 return true; 201 case Instruction::GetElementPtr: { 202 // If the GEP offset is out-of-bounds, or is non-constant and so has to be 203 // assumed to be potentially out-of-bounds, then any memory access that 204 // would use it could also be out-of-bounds meaning stack protection is 205 // required. 206 const GetElementPtrInst *GEP = cast<GetElementPtrInst>(I); 207 unsigned IndexSize = DL.getIndexTypeSizeInBits(I->getType()); 208 APInt Offset(IndexSize, 0); 209 if (!GEP->accumulateConstantOffset(DL, Offset)) 210 return true; 211 TypeSize OffsetSize = TypeSize::Fixed(Offset.getLimitedValue()); 212 if (!TypeSize::isKnownGT(AllocSize, OffsetSize)) 213 return true; 214 // Adjust AllocSize to be the space remaining after this offset. 215 // We can't subtract a fixed size from a scalable one, so in that case 216 // assume the scalable value is of minimum size. 217 TypeSize NewAllocSize = 218 TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize; 219 if (HasAddressTaken(I, NewAllocSize)) 220 return true; 221 break; 222 } 223 case Instruction::BitCast: 224 case Instruction::Select: 225 case Instruction::AddrSpaceCast: 226 if (HasAddressTaken(I, AllocSize)) 227 return true; 228 break; 229 case Instruction::PHI: { 230 // Keep track of what PHI nodes we have already visited to ensure 231 // they are only visited once. 232 const auto *PN = cast<PHINode>(I); 233 if (VisitedPHIs.insert(PN).second) 234 if (HasAddressTaken(PN, AllocSize)) 235 return true; 236 break; 237 } 238 case Instruction::Load: 239 case Instruction::AtomicRMW: 240 case Instruction::Ret: 241 // These instructions take an address operand, but have load-like or 242 // other innocuous behavior that should not trigger a stack protector. 243 // atomicrmw conceptually has both load and store semantics, but the 244 // value being stored must be integer; so if a pointer is being stored, 245 // we'll catch it in the PtrToInt case above. 246 break; 247 default: 248 // Conservatively return true for any instruction that takes an address 249 // operand, but is not handled above. 250 return true; 251 } 252 } 253 return false; 254 } 255 256 /// Search for the first call to the llvm.stackprotector intrinsic and return it 257 /// if present. 258 static const CallInst *findStackProtectorIntrinsic(Function &F) { 259 for (const BasicBlock &BB : F) 260 for (const Instruction &I : BB) 261 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) 262 if (II->getIntrinsicID() == Intrinsic::stackprotector) 263 return II; 264 return nullptr; 265 } 266 267 /// Check whether or not this function needs a stack protector based 268 /// upon the stack protector level. 269 /// 270 /// We use two heuristics: a standard (ssp) and strong (sspstrong). 271 /// The standard heuristic which will add a guard variable to functions that 272 /// call alloca with a either a variable size or a size >= SSPBufferSize, 273 /// functions with character buffers larger than SSPBufferSize, and functions 274 /// with aggregates containing character buffers larger than SSPBufferSize. The 275 /// strong heuristic will add a guard variables to functions that call alloca 276 /// regardless of size, functions with any buffer regardless of type and size, 277 /// functions with aggregates that contain any buffer regardless of type and 278 /// size, and functions that contain stack-based variables that have had their 279 /// address taken. 280 bool StackProtector::RequiresStackProtector() { 281 bool Strong = false; 282 bool NeedsProtector = false; 283 284 if (F->hasFnAttribute(Attribute::SafeStack)) 285 return false; 286 287 // We are constructing the OptimizationRemarkEmitter on the fly rather than 288 // using the analysis pass to avoid building DominatorTree and LoopInfo which 289 // are not available this late in the IR pipeline. 290 OptimizationRemarkEmitter ORE(F); 291 292 if (F->hasFnAttribute(Attribute::StackProtectReq)) { 293 ORE.emit([&]() { 294 return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F) 295 << "Stack protection applied to function " 296 << ore::NV("Function", F) 297 << " due to a function attribute or command-line switch"; 298 }); 299 NeedsProtector = true; 300 Strong = true; // Use the same heuristic as strong to determine SSPLayout 301 } else if (F->hasFnAttribute(Attribute::StackProtectStrong)) 302 Strong = true; 303 else if (!F->hasFnAttribute(Attribute::StackProtect)) 304 return false; 305 306 for (const BasicBlock &BB : *F) { 307 for (const Instruction &I : BB) { 308 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 309 if (AI->isArrayAllocation()) { 310 auto RemarkBuilder = [&]() { 311 return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray", 312 &I) 313 << "Stack protection applied to function " 314 << ore::NV("Function", F) 315 << " due to a call to alloca or use of a variable length " 316 "array"; 317 }; 318 if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 319 if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) { 320 // A call to alloca with size >= SSPBufferSize requires 321 // stack protectors. 322 Layout.insert(std::make_pair(AI, 323 MachineFrameInfo::SSPLK_LargeArray)); 324 ORE.emit(RemarkBuilder); 325 NeedsProtector = true; 326 } else if (Strong) { 327 // Require protectors for all alloca calls in strong mode. 328 Layout.insert(std::make_pair(AI, 329 MachineFrameInfo::SSPLK_SmallArray)); 330 ORE.emit(RemarkBuilder); 331 NeedsProtector = true; 332 } 333 } else { 334 // A call to alloca with a variable size requires protectors. 335 Layout.insert(std::make_pair(AI, 336 MachineFrameInfo::SSPLK_LargeArray)); 337 ORE.emit(RemarkBuilder); 338 NeedsProtector = true; 339 } 340 continue; 341 } 342 343 bool IsLarge = false; 344 if (ContainsProtectableArray(AI->getAllocatedType(), IsLarge, Strong)) { 345 Layout.insert(std::make_pair(AI, IsLarge 346 ? MachineFrameInfo::SSPLK_LargeArray 347 : MachineFrameInfo::SSPLK_SmallArray)); 348 ORE.emit([&]() { 349 return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I) 350 << "Stack protection applied to function " 351 << ore::NV("Function", F) 352 << " due to a stack allocated buffer or struct containing a " 353 "buffer"; 354 }); 355 NeedsProtector = true; 356 continue; 357 } 358 359 if (Strong && HasAddressTaken(AI, M->getDataLayout().getTypeAllocSize( 360 AI->getAllocatedType()))) { 361 ++NumAddrTaken; 362 Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf)); 363 ORE.emit([&]() { 364 return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken", 365 &I) 366 << "Stack protection applied to function " 367 << ore::NV("Function", F) 368 << " due to the address of a local variable being taken"; 369 }); 370 NeedsProtector = true; 371 } 372 // Clear any PHIs that we visited, to make sure we examine all uses of 373 // any subsequent allocas that we look at. 374 VisitedPHIs.clear(); 375 } 376 } 377 } 378 379 return NeedsProtector; 380 } 381 382 /// Create a stack guard loading and populate whether SelectionDAG SSP is 383 /// supported. 384 static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M, 385 IRBuilder<> &B, 386 bool *SupportsSelectionDAGSP = nullptr) { 387 Value *Guard = TLI->getIRStackGuard(B); 388 StringRef GuardMode = M->getStackProtectorGuard(); 389 if ((GuardMode == "tls" || GuardMode.empty()) && Guard) 390 return B.CreateLoad(B.getInt8PtrTy(), Guard, true, "StackGuard"); 391 392 // Use SelectionDAG SSP handling, since there isn't an IR guard. 393 // 394 // This is more or less weird, since we optionally output whether we 395 // should perform a SelectionDAG SP here. The reason is that it's strictly 396 // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also 397 // mutating. There is no way to get this bit without mutating the IR, so 398 // getting this bit has to happen in this right time. 399 // 400 // We could have define a new function TLI::supportsSelectionDAGSP(), but that 401 // will put more burden on the backends' overriding work, especially when it 402 // actually conveys the same information getIRStackGuard() already gives. 403 if (SupportsSelectionDAGSP) 404 *SupportsSelectionDAGSP = true; 405 TLI->insertSSPDeclarations(*M); 406 return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard)); 407 } 408 409 /// Insert code into the entry block that stores the stack guard 410 /// variable onto the stack: 411 /// 412 /// entry: 413 /// StackGuardSlot = alloca i8* 414 /// StackGuard = <stack guard> 415 /// call void @llvm.stackprotector(StackGuard, StackGuardSlot) 416 /// 417 /// Returns true if the platform/triple supports the stackprotectorcreate pseudo 418 /// node. 419 static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI, 420 const TargetLoweringBase *TLI, AllocaInst *&AI) { 421 bool SupportsSelectionDAGSP = false; 422 IRBuilder<> B(&F->getEntryBlock().front()); 423 PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext()); 424 AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot"); 425 426 Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP); 427 B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector), 428 {GuardSlot, AI}); 429 return SupportsSelectionDAGSP; 430 } 431 432 /// InsertStackProtectors - Insert code into the prologue and epilogue of the 433 /// function. 434 /// 435 /// - The prologue code loads and stores the stack guard onto the stack. 436 /// - The epilogue checks the value stored in the prologue against the original 437 /// value. It calls __stack_chk_fail if they differ. 438 bool StackProtector::InsertStackProtectors() { 439 // If the target wants to XOR the frame pointer into the guard value, it's 440 // impossible to emit the check in IR, so the target *must* support stack 441 // protection in SDAG. 442 bool SupportsSelectionDAGSP = 443 TLI->useStackGuardXorFP() || 444 (EnableSelectionDAGSP && !TM->Options.EnableFastISel); 445 AllocaInst *AI = nullptr; // Place on stack that stores the stack guard. 446 447 for (BasicBlock &BB : llvm::make_early_inc_range(*F)) { 448 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()); 449 if (!RI) 450 continue; 451 452 // Generate prologue instrumentation if not already generated. 453 if (!HasPrologue) { 454 HasPrologue = true; 455 SupportsSelectionDAGSP &= CreatePrologue(F, M, RI, TLI, AI); 456 } 457 458 // SelectionDAG based code generation. Nothing else needs to be done here. 459 // The epilogue instrumentation is postponed to SelectionDAG. 460 if (SupportsSelectionDAGSP) 461 break; 462 463 // Find the stack guard slot if the prologue was not created by this pass 464 // itself via a previous call to CreatePrologue(). 465 if (!AI) { 466 const CallInst *SPCall = findStackProtectorIntrinsic(*F); 467 assert(SPCall && "Call to llvm.stackprotector is missing"); 468 AI = cast<AllocaInst>(SPCall->getArgOperand(1)); 469 } 470 471 // Set HasIRCheck to true, so that SelectionDAG will not generate its own 472 // version. SelectionDAG called 'shouldEmitSDCheck' to check whether 473 // instrumentation has already been generated. 474 HasIRCheck = true; 475 476 // If we're instrumenting a block with a musttail call, the check has to be 477 // inserted before the call rather than between it and the return. The 478 // verifier guarantees that a musttail call is either directly before the 479 // return or with a single correct bitcast of the return value in between so 480 // we don't need to worry about many situations here. 481 Instruction *CheckLoc = RI; 482 Instruction *Prev = RI->getPrevNonDebugInstruction(); 483 if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isMustTailCall()) 484 CheckLoc = Prev; 485 else if (Prev) { 486 Prev = Prev->getPrevNonDebugInstruction(); 487 if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isMustTailCall()) 488 CheckLoc = Prev; 489 } 490 491 // Generate epilogue instrumentation. The epilogue intrumentation can be 492 // function-based or inlined depending on which mechanism the target is 493 // providing. 494 if (Function *GuardCheck = TLI->getSSPStackGuardCheck(*M)) { 495 // Generate the function-based epilogue instrumentation. 496 // The target provides a guard check function, generate a call to it. 497 IRBuilder<> B(CheckLoc); 498 LoadInst *Guard = B.CreateLoad(B.getInt8PtrTy(), AI, true, "Guard"); 499 CallInst *Call = B.CreateCall(GuardCheck, {Guard}); 500 Call->setAttributes(GuardCheck->getAttributes()); 501 Call->setCallingConv(GuardCheck->getCallingConv()); 502 } else { 503 // Generate the epilogue with inline instrumentation. 504 // If we do not support SelectionDAG based calls, generate IR level 505 // calls. 506 // 507 // For each block with a return instruction, convert this: 508 // 509 // return: 510 // ... 511 // ret ... 512 // 513 // into this: 514 // 515 // return: 516 // ... 517 // %1 = <stack guard> 518 // %2 = load StackGuardSlot 519 // %3 = cmp i1 %1, %2 520 // br i1 %3, label %SP_return, label %CallStackCheckFailBlk 521 // 522 // SP_return: 523 // ret ... 524 // 525 // CallStackCheckFailBlk: 526 // call void @__stack_chk_fail() 527 // unreachable 528 529 // Create the FailBB. We duplicate the BB every time since the MI tail 530 // merge pass will merge together all of the various BB into one including 531 // fail BB generated by the stack protector pseudo instruction. 532 BasicBlock *FailBB = CreateFailBB(); 533 534 // Split the basic block before the return instruction. 535 BasicBlock *NewBB = 536 BB.splitBasicBlock(CheckLoc->getIterator(), "SP_return"); 537 538 // Update the dominator tree if we need to. 539 if (DT && DT->isReachableFromEntry(&BB)) { 540 DT->addNewBlock(NewBB, &BB); 541 DT->addNewBlock(FailBB, &BB); 542 } 543 544 // Remove default branch instruction to the new BB. 545 BB.getTerminator()->eraseFromParent(); 546 547 // Move the newly created basic block to the point right after the old 548 // basic block so that it's in the "fall through" position. 549 NewBB->moveAfter(&BB); 550 551 // Generate the stack protector instructions in the old basic block. 552 IRBuilder<> B(&BB); 553 Value *Guard = getStackGuard(TLI, M, B); 554 LoadInst *LI2 = B.CreateLoad(B.getInt8PtrTy(), AI, true); 555 Value *Cmp = B.CreateICmpEQ(Guard, LI2); 556 auto SuccessProb = 557 BranchProbabilityInfo::getBranchProbStackProtector(true); 558 auto FailureProb = 559 BranchProbabilityInfo::getBranchProbStackProtector(false); 560 MDNode *Weights = MDBuilder(F->getContext()) 561 .createBranchWeights(SuccessProb.getNumerator(), 562 FailureProb.getNumerator()); 563 B.CreateCondBr(Cmp, NewBB, FailBB, Weights); 564 } 565 } 566 567 // Return if we didn't modify any basic blocks. i.e., there are no return 568 // statements in the function. 569 return HasPrologue; 570 } 571 572 /// CreateFailBB - Create a basic block to jump to when the stack protector 573 /// check fails. 574 BasicBlock *StackProtector::CreateFailBB() { 575 LLVMContext &Context = F->getContext(); 576 BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F); 577 IRBuilder<> B(FailBB); 578 if (F->getSubprogram()) 579 B.SetCurrentDebugLocation( 580 DILocation::get(Context, 0, 0, F->getSubprogram())); 581 if (Trip.isOSOpenBSD()) { 582 FunctionCallee StackChkFail = M->getOrInsertFunction( 583 "__stack_smash_handler", Type::getVoidTy(Context), 584 Type::getInt8PtrTy(Context)); 585 586 B.CreateCall(StackChkFail, B.CreateGlobalStringPtr(F->getName(), "SSH")); 587 } else { 588 FunctionCallee StackChkFail = 589 M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context)); 590 591 B.CreateCall(StackChkFail, {}); 592 } 593 B.CreateUnreachable(); 594 return FailBB; 595 } 596 597 bool StackProtector::shouldEmitSDCheck(const BasicBlock &BB) const { 598 return HasPrologue && !HasIRCheck && isa<ReturnInst>(BB.getTerminator()); 599 } 600 601 void StackProtector::copyToMachineFrameInfo(MachineFrameInfo &MFI) const { 602 if (Layout.empty()) 603 return; 604 605 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { 606 if (MFI.isDeadObjectIndex(I)) 607 continue; 608 609 const AllocaInst *AI = MFI.getObjectAllocation(I); 610 if (!AI) 611 continue; 612 613 SSPLayoutMap::const_iterator LI = Layout.find(AI); 614 if (LI == Layout.end()) 615 continue; 616 617 MFI.setObjectSSPLayout(I, LI->second); 618 } 619 } 620