xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/StackProtector.cpp (revision 7a65641922f404b84e9a249d48593de84d8e8d17)
1  //===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
2  //
3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4  // See https://llvm.org/LICENSE.txt for license information.
5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6  //
7  //===----------------------------------------------------------------------===//
8  //
9  // This pass inserts stack protectors into functions which need them. A variable
10  // with a random value in it is stored onto the stack before the local variables
11  // are allocated. Upon exiting the block, the stored value is checked. If it's
12  // changed, then there was some sort of violation and the program aborts.
13  //
14  //===----------------------------------------------------------------------===//
15  
16  #include "llvm/CodeGen/StackProtector.h"
17  #include "llvm/ADT/SmallPtrSet.h"
18  #include "llvm/ADT/Statistic.h"
19  #include "llvm/Analysis/BranchProbabilityInfo.h"
20  #include "llvm/Analysis/EHPersonalities.h"
21  #include "llvm/Analysis/MemoryLocation.h"
22  #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23  #include "llvm/CodeGen/Passes.h"
24  #include "llvm/CodeGen/TargetLowering.h"
25  #include "llvm/CodeGen/TargetPassConfig.h"
26  #include "llvm/CodeGen/TargetSubtargetInfo.h"
27  #include "llvm/IR/Attributes.h"
28  #include "llvm/IR/BasicBlock.h"
29  #include "llvm/IR/Constants.h"
30  #include "llvm/IR/DataLayout.h"
31  #include "llvm/IR/DebugInfo.h"
32  #include "llvm/IR/DebugLoc.h"
33  #include "llvm/IR/DerivedTypes.h"
34  #include "llvm/IR/Dominators.h"
35  #include "llvm/IR/Function.h"
36  #include "llvm/IR/IRBuilder.h"
37  #include "llvm/IR/Instruction.h"
38  #include "llvm/IR/Instructions.h"
39  #include "llvm/IR/IntrinsicInst.h"
40  #include "llvm/IR/Intrinsics.h"
41  #include "llvm/IR/MDBuilder.h"
42  #include "llvm/IR/Module.h"
43  #include "llvm/IR/Type.h"
44  #include "llvm/IR/User.h"
45  #include "llvm/InitializePasses.h"
46  #include "llvm/Pass.h"
47  #include "llvm/Support/Casting.h"
48  #include "llvm/Support/CommandLine.h"
49  #include "llvm/Target/TargetMachine.h"
50  #include "llvm/Target/TargetOptions.h"
51  #include <utility>
52  
53  using namespace llvm;
54  
55  #define DEBUG_TYPE "stack-protector"
56  
57  STATISTIC(NumFunProtected, "Number of functions protected");
58  STATISTIC(NumAddrTaken, "Number of local variables that have their address"
59                          " taken.");
60  
61  static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
62                                            cl::init(true), cl::Hidden);
63  
64  char StackProtector::ID = 0;
65  
66  StackProtector::StackProtector() : FunctionPass(ID), SSPBufferSize(8) {
67    initializeStackProtectorPass(*PassRegistry::getPassRegistry());
68  }
69  
70  INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE,
71                        "Insert stack protectors", false, true)
72  INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
73  INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE,
74                      "Insert stack protectors", false, true)
75  
76  FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
77  
78  void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const {
79    AU.addRequired<TargetPassConfig>();
80    AU.addPreserved<DominatorTreeWrapperPass>();
81  }
82  
83  bool StackProtector::runOnFunction(Function &Fn) {
84    F = &Fn;
85    M = F->getParent();
86    DominatorTreeWrapperPass *DTWP =
87        getAnalysisIfAvailable<DominatorTreeWrapperPass>();
88    DT = DTWP ? &DTWP->getDomTree() : nullptr;
89    TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
90    Trip = TM->getTargetTriple();
91    TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
92    HasPrologue = false;
93    HasIRCheck = false;
94  
95    Attribute Attr = Fn.getFnAttribute("stack-protector-buffer-size");
96    if (Attr.isStringAttribute() &&
97        Attr.getValueAsString().getAsInteger(10, SSPBufferSize))
98      return false; // Invalid integer string
99  
100    if (!RequiresStackProtector())
101      return false;
102  
103    // TODO(etienneb): Functions with funclets are not correctly supported now.
104    // Do nothing if this is funclet-based personality.
105    if (Fn.hasPersonalityFn()) {
106      EHPersonality Personality = classifyEHPersonality(Fn.getPersonalityFn());
107      if (isFuncletEHPersonality(Personality))
108        return false;
109    }
110  
111    ++NumFunProtected;
112    return InsertStackProtectors();
113  }
114  
115  /// \param [out] IsLarge is set to true if a protectable array is found and
116  /// it is "large" ( >= ssp-buffer-size).  In the case of a structure with
117  /// multiple arrays, this gets set if any of them is large.
118  bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
119                                                bool Strong,
120                                                bool InStruct) const {
121    if (!Ty)
122      return false;
123    if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
124      if (!AT->getElementType()->isIntegerTy(8)) {
125        // If we're on a non-Darwin platform or we're inside of a structure, don't
126        // add stack protectors unless the array is a character array.
127        // However, in strong mode any array, regardless of type and size,
128        // triggers a protector.
129        if (!Strong && (InStruct || !Trip.isOSDarwin()))
130          return false;
131      }
132  
133      // If an array has more than SSPBufferSize bytes of allocated space, then we
134      // emit stack protectors.
135      if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
136        IsLarge = true;
137        return true;
138      }
139  
140      if (Strong)
141        // Require a protector for all arrays in strong mode
142        return true;
143    }
144  
145    const StructType *ST = dyn_cast<StructType>(Ty);
146    if (!ST)
147      return false;
148  
149    bool NeedsProtector = false;
150    for (StructType::element_iterator I = ST->element_begin(),
151                                      E = ST->element_end();
152         I != E; ++I)
153      if (ContainsProtectableArray(*I, IsLarge, Strong, true)) {
154        // If the element is a protectable array and is large (>= SSPBufferSize)
155        // then we are done.  If the protectable array is not large, then
156        // keep looking in case a subsequent element is a large array.
157        if (IsLarge)
158          return true;
159        NeedsProtector = true;
160      }
161  
162    return NeedsProtector;
163  }
164  
165  bool StackProtector::HasAddressTaken(const Instruction *AI,
166                                       uint64_t AllocSize) {
167    const DataLayout &DL = M->getDataLayout();
168    for (const User *U : AI->users()) {
169      const auto *I = cast<Instruction>(U);
170      // If this instruction accesses memory make sure it doesn't access beyond
171      // the bounds of the allocated object.
172      Optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
173      if (MemLoc.hasValue() && MemLoc->Size.getValue() > AllocSize)
174        return true;
175      switch (I->getOpcode()) {
176      case Instruction::Store:
177        if (AI == cast<StoreInst>(I)->getValueOperand())
178          return true;
179        break;
180      case Instruction::AtomicCmpXchg:
181        // cmpxchg conceptually includes both a load and store from the same
182        // location. So, like store, the value being stored is what matters.
183        if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
184          return true;
185        break;
186      case Instruction::PtrToInt:
187        if (AI == cast<PtrToIntInst>(I)->getOperand(0))
188          return true;
189        break;
190      case Instruction::Call: {
191        // Ignore intrinsics that do not become real instructions.
192        // TODO: Narrow this to intrinsics that have store-like effects.
193        const auto *CI = cast<CallInst>(I);
194        if (!isa<DbgInfoIntrinsic>(CI) && !CI->isLifetimeStartOrEnd())
195          return true;
196        break;
197      }
198      case Instruction::Invoke:
199        return true;
200      case Instruction::GetElementPtr: {
201        // If the GEP offset is out-of-bounds, or is non-constant and so has to be
202        // assumed to be potentially out-of-bounds, then any memory access that
203        // would use it could also be out-of-bounds meaning stack protection is
204        // required.
205        const GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
206        unsigned TypeSize = DL.getIndexTypeSizeInBits(I->getType());
207        APInt Offset(TypeSize, 0);
208        APInt MaxOffset(TypeSize, AllocSize);
209        if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.ugt(MaxOffset))
210          return true;
211        // Adjust AllocSize to be the space remaining after this offset.
212        if (HasAddressTaken(I, AllocSize - Offset.getLimitedValue()))
213          return true;
214        break;
215      }
216      case Instruction::BitCast:
217      case Instruction::Select:
218      case Instruction::AddrSpaceCast:
219        if (HasAddressTaken(I, AllocSize))
220          return true;
221        break;
222      case Instruction::PHI: {
223        // Keep track of what PHI nodes we have already visited to ensure
224        // they are only visited once.
225        const auto *PN = cast<PHINode>(I);
226        if (VisitedPHIs.insert(PN).second)
227          if (HasAddressTaken(PN, AllocSize))
228            return true;
229        break;
230      }
231      case Instruction::Load:
232      case Instruction::AtomicRMW:
233      case Instruction::Ret:
234        // These instructions take an address operand, but have load-like or
235        // other innocuous behavior that should not trigger a stack protector.
236        // atomicrmw conceptually has both load and store semantics, but the
237        // value being stored must be integer; so if a pointer is being stored,
238        // we'll catch it in the PtrToInt case above.
239        break;
240      default:
241        // Conservatively return true for any instruction that takes an address
242        // operand, but is not handled above.
243        return true;
244      }
245    }
246    return false;
247  }
248  
249  /// Search for the first call to the llvm.stackprotector intrinsic and return it
250  /// if present.
251  static const CallInst *findStackProtectorIntrinsic(Function &F) {
252    for (const BasicBlock &BB : F)
253      for (const Instruction &I : BB)
254        if (const CallInst *CI = dyn_cast<CallInst>(&I))
255          if (CI->getCalledFunction() ==
256              Intrinsic::getDeclaration(F.getParent(), Intrinsic::stackprotector))
257            return CI;
258    return nullptr;
259  }
260  
261  /// Check whether or not this function needs a stack protector based
262  /// upon the stack protector level.
263  ///
264  /// We use two heuristics: a standard (ssp) and strong (sspstrong).
265  /// The standard heuristic which will add a guard variable to functions that
266  /// call alloca with a either a variable size or a size >= SSPBufferSize,
267  /// functions with character buffers larger than SSPBufferSize, and functions
268  /// with aggregates containing character buffers larger than SSPBufferSize. The
269  /// strong heuristic will add a guard variables to functions that call alloca
270  /// regardless of size, functions with any buffer regardless of type and size,
271  /// functions with aggregates that contain any buffer regardless of type and
272  /// size, and functions that contain stack-based variables that have had their
273  /// address taken.
274  bool StackProtector::RequiresStackProtector() {
275    bool Strong = false;
276    bool NeedsProtector = false;
277    HasPrologue = findStackProtectorIntrinsic(*F);
278  
279    if (F->hasFnAttribute(Attribute::SafeStack))
280      return false;
281  
282    // We are constructing the OptimizationRemarkEmitter on the fly rather than
283    // using the analysis pass to avoid building DominatorTree and LoopInfo which
284    // are not available this late in the IR pipeline.
285    OptimizationRemarkEmitter ORE(F);
286  
287    if (F->hasFnAttribute(Attribute::StackProtectReq)) {
288      ORE.emit([&]() {
289        return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
290               << "Stack protection applied to function "
291               << ore::NV("Function", F)
292               << " due to a function attribute or command-line switch";
293      });
294      NeedsProtector = true;
295      Strong = true; // Use the same heuristic as strong to determine SSPLayout
296    } else if (F->hasFnAttribute(Attribute::StackProtectStrong))
297      Strong = true;
298    else if (HasPrologue)
299      NeedsProtector = true;
300    else if (!F->hasFnAttribute(Attribute::StackProtect))
301      return false;
302  
303    for (const BasicBlock &BB : *F) {
304      for (const Instruction &I : BB) {
305        if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
306          if (AI->isArrayAllocation()) {
307            auto RemarkBuilder = [&]() {
308              return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
309                                        &I)
310                     << "Stack protection applied to function "
311                     << ore::NV("Function", F)
312                     << " due to a call to alloca or use of a variable length "
313                        "array";
314            };
315            if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
316              if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
317                // A call to alloca with size >= SSPBufferSize requires
318                // stack protectors.
319                Layout.insert(std::make_pair(AI,
320                                             MachineFrameInfo::SSPLK_LargeArray));
321                ORE.emit(RemarkBuilder);
322                NeedsProtector = true;
323              } else if (Strong) {
324                // Require protectors for all alloca calls in strong mode.
325                Layout.insert(std::make_pair(AI,
326                                             MachineFrameInfo::SSPLK_SmallArray));
327                ORE.emit(RemarkBuilder);
328                NeedsProtector = true;
329              }
330            } else {
331              // A call to alloca with a variable size requires protectors.
332              Layout.insert(std::make_pair(AI,
333                                           MachineFrameInfo::SSPLK_LargeArray));
334              ORE.emit(RemarkBuilder);
335              NeedsProtector = true;
336            }
337            continue;
338          }
339  
340          bool IsLarge = false;
341          if (ContainsProtectableArray(AI->getAllocatedType(), IsLarge, Strong)) {
342            Layout.insert(std::make_pair(AI, IsLarge
343                                         ? MachineFrameInfo::SSPLK_LargeArray
344                                         : MachineFrameInfo::SSPLK_SmallArray));
345            ORE.emit([&]() {
346              return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
347                     << "Stack protection applied to function "
348                     << ore::NV("Function", F)
349                     << " due to a stack allocated buffer or struct containing a "
350                        "buffer";
351            });
352            NeedsProtector = true;
353            continue;
354          }
355  
356          if (Strong && HasAddressTaken(AI, M->getDataLayout().getTypeAllocSize(
357                                                AI->getAllocatedType()))) {
358            ++NumAddrTaken;
359            Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
360            ORE.emit([&]() {
361              return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
362                                        &I)
363                     << "Stack protection applied to function "
364                     << ore::NV("Function", F)
365                     << " due to the address of a local variable being taken";
366            });
367            NeedsProtector = true;
368          }
369          // Clear any PHIs that we visited, to make sure we examine all uses of
370          // any subsequent allocas that we look at.
371          VisitedPHIs.clear();
372        }
373      }
374    }
375  
376    return NeedsProtector;
377  }
378  
379  /// Create a stack guard loading and populate whether SelectionDAG SSP is
380  /// supported.
381  static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
382                              IRBuilder<> &B,
383                              bool *SupportsSelectionDAGSP = nullptr) {
384    if (Value *Guard = TLI->getIRStackGuard(B))
385      return B.CreateLoad(B.getInt8PtrTy(), Guard, true, "StackGuard");
386  
387    // Use SelectionDAG SSP handling, since there isn't an IR guard.
388    //
389    // This is more or less weird, since we optionally output whether we
390    // should perform a SelectionDAG SP here. The reason is that it's strictly
391    // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
392    // mutating. There is no way to get this bit without mutating the IR, so
393    // getting this bit has to happen in this right time.
394    //
395    // We could have define a new function TLI::supportsSelectionDAGSP(), but that
396    // will put more burden on the backends' overriding work, especially when it
397    // actually conveys the same information getIRStackGuard() already gives.
398    if (SupportsSelectionDAGSP)
399      *SupportsSelectionDAGSP = true;
400    TLI->insertSSPDeclarations(*M);
401    return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
402  }
403  
404  /// Insert code into the entry block that stores the stack guard
405  /// variable onto the stack:
406  ///
407  ///   entry:
408  ///     StackGuardSlot = alloca i8*
409  ///     StackGuard = <stack guard>
410  ///     call void @llvm.stackprotector(StackGuard, StackGuardSlot)
411  ///
412  /// Returns true if the platform/triple supports the stackprotectorcreate pseudo
413  /// node.
414  static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
415                             const TargetLoweringBase *TLI, AllocaInst *&AI) {
416    bool SupportsSelectionDAGSP = false;
417    IRBuilder<> B(&F->getEntryBlock().front());
418    PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
419    AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
420  
421    Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
422    B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
423                 {GuardSlot, AI});
424    return SupportsSelectionDAGSP;
425  }
426  
427  /// InsertStackProtectors - Insert code into the prologue and epilogue of the
428  /// function.
429  ///
430  ///  - The prologue code loads and stores the stack guard onto the stack.
431  ///  - The epilogue checks the value stored in the prologue against the original
432  ///    value. It calls __stack_chk_fail if they differ.
433  bool StackProtector::InsertStackProtectors() {
434    // If the target wants to XOR the frame pointer into the guard value, it's
435    // impossible to emit the check in IR, so the target *must* support stack
436    // protection in SDAG.
437    bool SupportsSelectionDAGSP =
438        TLI->useStackGuardXorFP() ||
439        (EnableSelectionDAGSP && !TM->Options.EnableFastISel &&
440         !TM->Options.EnableGlobalISel);
441    AllocaInst *AI = nullptr;       // Place on stack that stores the stack guard.
442  
443    for (Function::iterator I = F->begin(), E = F->end(); I != E;) {
444      BasicBlock *BB = &*I++;
445      ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
446      if (!RI)
447        continue;
448  
449      // Generate prologue instrumentation if not already generated.
450      if (!HasPrologue) {
451        HasPrologue = true;
452        SupportsSelectionDAGSP &= CreatePrologue(F, M, RI, TLI, AI);
453      }
454  
455      // SelectionDAG based code generation. Nothing else needs to be done here.
456      // The epilogue instrumentation is postponed to SelectionDAG.
457      if (SupportsSelectionDAGSP)
458        break;
459  
460      // Find the stack guard slot if the prologue was not created by this pass
461      // itself via a previous call to CreatePrologue().
462      if (!AI) {
463        const CallInst *SPCall = findStackProtectorIntrinsic(*F);
464        assert(SPCall && "Call to llvm.stackprotector is missing");
465        AI = cast<AllocaInst>(SPCall->getArgOperand(1));
466      }
467  
468      // Set HasIRCheck to true, so that SelectionDAG will not generate its own
469      // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
470      // instrumentation has already been generated.
471      HasIRCheck = true;
472  
473      // Generate epilogue instrumentation. The epilogue intrumentation can be
474      // function-based or inlined depending on which mechanism the target is
475      // providing.
476      if (Function *GuardCheck = TLI->getSSPStackGuardCheck(*M)) {
477        // Generate the function-based epilogue instrumentation.
478        // The target provides a guard check function, generate a call to it.
479        IRBuilder<> B(RI);
480        LoadInst *Guard = B.CreateLoad(B.getInt8PtrTy(), AI, true, "Guard");
481        CallInst *Call = B.CreateCall(GuardCheck, {Guard});
482        Call->setAttributes(GuardCheck->getAttributes());
483        Call->setCallingConv(GuardCheck->getCallingConv());
484      } else {
485        // Generate the epilogue with inline instrumentation.
486        // If we do not support SelectionDAG based tail calls, generate IR level
487        // tail calls.
488        //
489        // For each block with a return instruction, convert this:
490        //
491        //   return:
492        //     ...
493        //     ret ...
494        //
495        // into this:
496        //
497        //   return:
498        //     ...
499        //     %1 = <stack guard>
500        //     %2 = load StackGuardSlot
501        //     %3 = cmp i1 %1, %2
502        //     br i1 %3, label %SP_return, label %CallStackCheckFailBlk
503        //
504        //   SP_return:
505        //     ret ...
506        //
507        //   CallStackCheckFailBlk:
508        //     call void @__stack_chk_fail()
509        //     unreachable
510  
511        // Create the FailBB. We duplicate the BB every time since the MI tail
512        // merge pass will merge together all of the various BB into one including
513        // fail BB generated by the stack protector pseudo instruction.
514        BasicBlock *FailBB = CreateFailBB();
515  
516        // Split the basic block before the return instruction.
517        BasicBlock *NewBB = BB->splitBasicBlock(RI->getIterator(), "SP_return");
518  
519        // Update the dominator tree if we need to.
520        if (DT && DT->isReachableFromEntry(BB)) {
521          DT->addNewBlock(NewBB, BB);
522          DT->addNewBlock(FailBB, BB);
523        }
524  
525        // Remove default branch instruction to the new BB.
526        BB->getTerminator()->eraseFromParent();
527  
528        // Move the newly created basic block to the point right after the old
529        // basic block so that it's in the "fall through" position.
530        NewBB->moveAfter(BB);
531  
532        // Generate the stack protector instructions in the old basic block.
533        IRBuilder<> B(BB);
534        Value *Guard = getStackGuard(TLI, M, B);
535        LoadInst *LI2 = B.CreateLoad(B.getInt8PtrTy(), AI, true);
536        Value *Cmp = B.CreateICmpEQ(Guard, LI2);
537        auto SuccessProb =
538            BranchProbabilityInfo::getBranchProbStackProtector(true);
539        auto FailureProb =
540            BranchProbabilityInfo::getBranchProbStackProtector(false);
541        MDNode *Weights = MDBuilder(F->getContext())
542                              .createBranchWeights(SuccessProb.getNumerator(),
543                                                   FailureProb.getNumerator());
544        B.CreateCondBr(Cmp, NewBB, FailBB, Weights);
545      }
546    }
547  
548    // Return if we didn't modify any basic blocks. i.e., there are no return
549    // statements in the function.
550    return HasPrologue;
551  }
552  
553  /// CreateFailBB - Create a basic block to jump to when the stack protector
554  /// check fails.
555  BasicBlock *StackProtector::CreateFailBB() {
556    LLVMContext &Context = F->getContext();
557    BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F);
558    IRBuilder<> B(FailBB);
559    B.SetCurrentDebugLocation(DebugLoc::get(0, 0, F->getSubprogram()));
560    if (Trip.isOSOpenBSD()) {
561      FunctionCallee StackChkFail = M->getOrInsertFunction(
562          "__stack_smash_handler", Type::getVoidTy(Context),
563          Type::getInt8PtrTy(Context));
564  
565      B.CreateCall(StackChkFail, B.CreateGlobalStringPtr(F->getName(), "SSH"));
566    } else {
567      FunctionCallee StackChkFail =
568          M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
569  
570      B.CreateCall(StackChkFail, {});
571    }
572    B.CreateUnreachable();
573    return FailBB;
574  }
575  
576  bool StackProtector::shouldEmitSDCheck(const BasicBlock &BB) const {
577    return HasPrologue && !HasIRCheck && isa<ReturnInst>(BB.getTerminator());
578  }
579  
580  void StackProtector::copyToMachineFrameInfo(MachineFrameInfo &MFI) const {
581    if (Layout.empty())
582      return;
583  
584    for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
585      if (MFI.isDeadObjectIndex(I))
586        continue;
587  
588      const AllocaInst *AI = MFI.getObjectAllocation(I);
589      if (!AI)
590        continue;
591  
592      SSPLayoutMap::const_iterator LI = Layout.find(AI);
593      if (LI == Layout.end())
594        continue;
595  
596      MFI.setObjectSSPLayout(I, LI->second);
597    }
598  }
599