xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/StackProtector.cpp (revision 734e82fe33aa764367791a7d603b383996c6b40b)
1 //===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass inserts stack protectors into functions which need them. A variable
10 // with a random value in it is stored onto the stack before the local variables
11 // are allocated. Upon exiting the block, the stored value is checked. If it's
12 // changed, then there was some sort of violation and the program aborts.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/CodeGen/StackProtector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/BranchProbabilityInfo.h"
20 #include "llvm/Analysis/EHPersonalities.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/MDBuilder.h"
40 #include "llvm/IR/Module.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/IR/User.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Target/TargetMachine.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include <optional>
51 #include <utility>
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "stack-protector"
56 
57 STATISTIC(NumFunProtected, "Number of functions protected");
58 STATISTIC(NumAddrTaken, "Number of local variables that have their address"
59                         " taken.");
60 
61 static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
62                                           cl::init(true), cl::Hidden);
63 static cl::opt<bool> DisableCheckNoReturn("disable-check-noreturn-call",
64                                           cl::init(false), cl::Hidden);
65 
66 char StackProtector::ID = 0;
67 
68 StackProtector::StackProtector() : FunctionPass(ID) {
69   initializeStackProtectorPass(*PassRegistry::getPassRegistry());
70 }
71 
72 INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE,
73                       "Insert stack protectors", false, true)
74 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
75 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
76 INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE,
77                     "Insert stack protectors", false, true)
78 
79 FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
80 
81 void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const {
82   AU.addRequired<TargetPassConfig>();
83   AU.addPreserved<DominatorTreeWrapperPass>();
84 }
85 
86 bool StackProtector::runOnFunction(Function &Fn) {
87   F = &Fn;
88   M = F->getParent();
89   if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
90     DTU.emplace(DTWP->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy);
91   TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
92   Trip = TM->getTargetTriple();
93   TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
94   HasPrologue = false;
95   HasIRCheck = false;
96 
97   SSPBufferSize = Fn.getFnAttributeAsParsedInteger(
98       "stack-protector-buffer-size", DefaultSSPBufferSize);
99   if (!RequiresStackProtector())
100     return false;
101 
102   // TODO(etienneb): Functions with funclets are not correctly supported now.
103   // Do nothing if this is funclet-based personality.
104   if (Fn.hasPersonalityFn()) {
105     EHPersonality Personality = classifyEHPersonality(Fn.getPersonalityFn());
106     if (isFuncletEHPersonality(Personality))
107       return false;
108   }
109 
110   ++NumFunProtected;
111   bool Changed = InsertStackProtectors();
112 #ifdef EXPENSIVE_CHECKS
113   assert((!DTU ||
114           DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full)) &&
115          "Failed to maintain validity of domtree!");
116 #endif
117   DTU.reset();
118   return Changed;
119 }
120 
121 /// \param [out] IsLarge is set to true if a protectable array is found and
122 /// it is "large" ( >= ssp-buffer-size).  In the case of a structure with
123 /// multiple arrays, this gets set if any of them is large.
124 bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
125                                               bool Strong,
126                                               bool InStruct) const {
127   if (!Ty)
128     return false;
129   if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
130     if (!AT->getElementType()->isIntegerTy(8)) {
131       // If we're on a non-Darwin platform or we're inside of a structure, don't
132       // add stack protectors unless the array is a character array.
133       // However, in strong mode any array, regardless of type and size,
134       // triggers a protector.
135       if (!Strong && (InStruct || !Trip.isOSDarwin()))
136         return false;
137     }
138 
139     // If an array has more than SSPBufferSize bytes of allocated space, then we
140     // emit stack protectors.
141     if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(AT)) {
142       IsLarge = true;
143       return true;
144     }
145 
146     if (Strong)
147       // Require a protector for all arrays in strong mode
148       return true;
149   }
150 
151   const StructType *ST = dyn_cast<StructType>(Ty);
152   if (!ST)
153     return false;
154 
155   bool NeedsProtector = false;
156   for (Type *ET : ST->elements())
157     if (ContainsProtectableArray(ET, IsLarge, Strong, true)) {
158       // If the element is a protectable array and is large (>= SSPBufferSize)
159       // then we are done.  If the protectable array is not large, then
160       // keep looking in case a subsequent element is a large array.
161       if (IsLarge)
162         return true;
163       NeedsProtector = true;
164     }
165 
166   return NeedsProtector;
167 }
168 
169 bool StackProtector::HasAddressTaken(const Instruction *AI,
170                                      TypeSize AllocSize) {
171   const DataLayout &DL = M->getDataLayout();
172   for (const User *U : AI->users()) {
173     const auto *I = cast<Instruction>(U);
174     // If this instruction accesses memory make sure it doesn't access beyond
175     // the bounds of the allocated object.
176     std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
177     if (MemLoc && MemLoc->Size.hasValue() &&
178         !TypeSize::isKnownGE(AllocSize,
179                              TypeSize::getFixed(MemLoc->Size.getValue())))
180       return true;
181     switch (I->getOpcode()) {
182     case Instruction::Store:
183       if (AI == cast<StoreInst>(I)->getValueOperand())
184         return true;
185       break;
186     case Instruction::AtomicCmpXchg:
187       // cmpxchg conceptually includes both a load and store from the same
188       // location. So, like store, the value being stored is what matters.
189       if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
190         return true;
191       break;
192     case Instruction::PtrToInt:
193       if (AI == cast<PtrToIntInst>(I)->getOperand(0))
194         return true;
195       break;
196     case Instruction::Call: {
197       // Ignore intrinsics that do not become real instructions.
198       // TODO: Narrow this to intrinsics that have store-like effects.
199       const auto *CI = cast<CallInst>(I);
200       if (!CI->isDebugOrPseudoInst() && !CI->isLifetimeStartOrEnd())
201         return true;
202       break;
203     }
204     case Instruction::Invoke:
205       return true;
206     case Instruction::GetElementPtr: {
207       // If the GEP offset is out-of-bounds, or is non-constant and so has to be
208       // assumed to be potentially out-of-bounds, then any memory access that
209       // would use it could also be out-of-bounds meaning stack protection is
210       // required.
211       const GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
212       unsigned IndexSize = DL.getIndexTypeSizeInBits(I->getType());
213       APInt Offset(IndexSize, 0);
214       if (!GEP->accumulateConstantOffset(DL, Offset))
215         return true;
216       TypeSize OffsetSize = TypeSize::Fixed(Offset.getLimitedValue());
217       if (!TypeSize::isKnownGT(AllocSize, OffsetSize))
218         return true;
219       // Adjust AllocSize to be the space remaining after this offset.
220       // We can't subtract a fixed size from a scalable one, so in that case
221       // assume the scalable value is of minimum size.
222       TypeSize NewAllocSize =
223           TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize;
224       if (HasAddressTaken(I, NewAllocSize))
225         return true;
226       break;
227     }
228     case Instruction::BitCast:
229     case Instruction::Select:
230     case Instruction::AddrSpaceCast:
231       if (HasAddressTaken(I, AllocSize))
232         return true;
233       break;
234     case Instruction::PHI: {
235       // Keep track of what PHI nodes we have already visited to ensure
236       // they are only visited once.
237       const auto *PN = cast<PHINode>(I);
238       if (VisitedPHIs.insert(PN).second)
239         if (HasAddressTaken(PN, AllocSize))
240           return true;
241       break;
242     }
243     case Instruction::Load:
244     case Instruction::AtomicRMW:
245     case Instruction::Ret:
246       // These instructions take an address operand, but have load-like or
247       // other innocuous behavior that should not trigger a stack protector.
248       // atomicrmw conceptually has both load and store semantics, but the
249       // value being stored must be integer; so if a pointer is being stored,
250       // we'll catch it in the PtrToInt case above.
251       break;
252     default:
253       // Conservatively return true for any instruction that takes an address
254       // operand, but is not handled above.
255       return true;
256     }
257   }
258   return false;
259 }
260 
261 /// Search for the first call to the llvm.stackprotector intrinsic and return it
262 /// if present.
263 static const CallInst *findStackProtectorIntrinsic(Function &F) {
264   for (const BasicBlock &BB : F)
265     for (const Instruction &I : BB)
266       if (const auto *II = dyn_cast<IntrinsicInst>(&I))
267         if (II->getIntrinsicID() == Intrinsic::stackprotector)
268           return II;
269   return nullptr;
270 }
271 
272 /// Check whether or not this function needs a stack protector based
273 /// upon the stack protector level.
274 ///
275 /// We use two heuristics: a standard (ssp) and strong (sspstrong).
276 /// The standard heuristic which will add a guard variable to functions that
277 /// call alloca with a either a variable size or a size >= SSPBufferSize,
278 /// functions with character buffers larger than SSPBufferSize, and functions
279 /// with aggregates containing character buffers larger than SSPBufferSize. The
280 /// strong heuristic will add a guard variables to functions that call alloca
281 /// regardless of size, functions with any buffer regardless of type and size,
282 /// functions with aggregates that contain any buffer regardless of type and
283 /// size, and functions that contain stack-based variables that have had their
284 /// address taken.
285 bool StackProtector::RequiresStackProtector() {
286   bool Strong = false;
287   bool NeedsProtector = false;
288 
289   if (F->hasFnAttribute(Attribute::SafeStack))
290     return false;
291 
292   // We are constructing the OptimizationRemarkEmitter on the fly rather than
293   // using the analysis pass to avoid building DominatorTree and LoopInfo which
294   // are not available this late in the IR pipeline.
295   OptimizationRemarkEmitter ORE(F);
296 
297   if (F->hasFnAttribute(Attribute::StackProtectReq)) {
298     ORE.emit([&]() {
299       return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
300              << "Stack protection applied to function "
301              << ore::NV("Function", F)
302              << " due to a function attribute or command-line switch";
303     });
304     NeedsProtector = true;
305     Strong = true; // Use the same heuristic as strong to determine SSPLayout
306   } else if (F->hasFnAttribute(Attribute::StackProtectStrong))
307     Strong = true;
308   else if (!F->hasFnAttribute(Attribute::StackProtect))
309     return false;
310 
311   for (const BasicBlock &BB : *F) {
312     for (const Instruction &I : BB) {
313       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
314         if (AI->isArrayAllocation()) {
315           auto RemarkBuilder = [&]() {
316             return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
317                                       &I)
318                    << "Stack protection applied to function "
319                    << ore::NV("Function", F)
320                    << " due to a call to alloca or use of a variable length "
321                       "array";
322           };
323           if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
324             if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
325               // A call to alloca with size >= SSPBufferSize requires
326               // stack protectors.
327               Layout.insert(std::make_pair(AI,
328                                            MachineFrameInfo::SSPLK_LargeArray));
329               ORE.emit(RemarkBuilder);
330               NeedsProtector = true;
331             } else if (Strong) {
332               // Require protectors for all alloca calls in strong mode.
333               Layout.insert(std::make_pair(AI,
334                                            MachineFrameInfo::SSPLK_SmallArray));
335               ORE.emit(RemarkBuilder);
336               NeedsProtector = true;
337             }
338           } else {
339             // A call to alloca with a variable size requires protectors.
340             Layout.insert(std::make_pair(AI,
341                                          MachineFrameInfo::SSPLK_LargeArray));
342             ORE.emit(RemarkBuilder);
343             NeedsProtector = true;
344           }
345           continue;
346         }
347 
348         bool IsLarge = false;
349         if (ContainsProtectableArray(AI->getAllocatedType(), IsLarge, Strong)) {
350           Layout.insert(std::make_pair(AI, IsLarge
351                                        ? MachineFrameInfo::SSPLK_LargeArray
352                                        : MachineFrameInfo::SSPLK_SmallArray));
353           ORE.emit([&]() {
354             return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
355                    << "Stack protection applied to function "
356                    << ore::NV("Function", F)
357                    << " due to a stack allocated buffer or struct containing a "
358                       "buffer";
359           });
360           NeedsProtector = true;
361           continue;
362         }
363 
364         if (Strong && HasAddressTaken(AI, M->getDataLayout().getTypeAllocSize(
365                                               AI->getAllocatedType()))) {
366           ++NumAddrTaken;
367           Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
368           ORE.emit([&]() {
369             return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
370                                       &I)
371                    << "Stack protection applied to function "
372                    << ore::NV("Function", F)
373                    << " due to the address of a local variable being taken";
374           });
375           NeedsProtector = true;
376         }
377         // Clear any PHIs that we visited, to make sure we examine all uses of
378         // any subsequent allocas that we look at.
379         VisitedPHIs.clear();
380       }
381     }
382   }
383 
384   return NeedsProtector;
385 }
386 
387 /// Create a stack guard loading and populate whether SelectionDAG SSP is
388 /// supported.
389 static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
390                             IRBuilder<> &B,
391                             bool *SupportsSelectionDAGSP = nullptr) {
392   Value *Guard = TLI->getIRStackGuard(B);
393   StringRef GuardMode = M->getStackProtectorGuard();
394   if ((GuardMode == "tls" || GuardMode.empty()) && Guard)
395     return B.CreateLoad(B.getInt8PtrTy(), Guard, true, "StackGuard");
396 
397   // Use SelectionDAG SSP handling, since there isn't an IR guard.
398   //
399   // This is more or less weird, since we optionally output whether we
400   // should perform a SelectionDAG SP here. The reason is that it's strictly
401   // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
402   // mutating. There is no way to get this bit without mutating the IR, so
403   // getting this bit has to happen in this right time.
404   //
405   // We could have define a new function TLI::supportsSelectionDAGSP(), but that
406   // will put more burden on the backends' overriding work, especially when it
407   // actually conveys the same information getIRStackGuard() already gives.
408   if (SupportsSelectionDAGSP)
409     *SupportsSelectionDAGSP = true;
410   TLI->insertSSPDeclarations(*M);
411   return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
412 }
413 
414 /// Insert code into the entry block that stores the stack guard
415 /// variable onto the stack:
416 ///
417 ///   entry:
418 ///     StackGuardSlot = alloca i8*
419 ///     StackGuard = <stack guard>
420 ///     call void @llvm.stackprotector(StackGuard, StackGuardSlot)
421 ///
422 /// Returns true if the platform/triple supports the stackprotectorcreate pseudo
423 /// node.
424 static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc,
425                            const TargetLoweringBase *TLI, AllocaInst *&AI) {
426   bool SupportsSelectionDAGSP = false;
427   IRBuilder<> B(&F->getEntryBlock().front());
428   PointerType *PtrTy = Type::getInt8PtrTy(CheckLoc->getContext());
429   AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
430 
431   Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
432   B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
433                {GuardSlot, AI});
434   return SupportsSelectionDAGSP;
435 }
436 
437 /// InsertStackProtectors - Insert code into the prologue and epilogue of the
438 /// function.
439 ///
440 ///  - The prologue code loads and stores the stack guard onto the stack.
441 ///  - The epilogue checks the value stored in the prologue against the original
442 ///    value. It calls __stack_chk_fail if they differ.
443 bool StackProtector::InsertStackProtectors() {
444   // If the target wants to XOR the frame pointer into the guard value, it's
445   // impossible to emit the check in IR, so the target *must* support stack
446   // protection in SDAG.
447   bool SupportsSelectionDAGSP =
448       TLI->useStackGuardXorFP() ||
449       (EnableSelectionDAGSP && !TM->Options.EnableFastISel);
450   AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
451   BasicBlock *FailBB = nullptr;
452 
453   for (BasicBlock &BB : llvm::make_early_inc_range(*F)) {
454     // This is stack protector auto generated check BB, skip it.
455     if (&BB == FailBB)
456       continue;
457     Instruction *CheckLoc = dyn_cast<ReturnInst>(BB.getTerminator());
458     if (!CheckLoc && !DisableCheckNoReturn)
459       for (auto &Inst : BB)
460         if (auto *CB = dyn_cast<CallBase>(&Inst))
461           // Do stack check before noreturn calls that aren't nounwind (e.g:
462           // __cxa_throw).
463           if (CB->doesNotReturn() && !CB->doesNotThrow()) {
464             CheckLoc = CB;
465             break;
466           }
467 
468     if (!CheckLoc)
469       continue;
470 
471     // Generate prologue instrumentation if not already generated.
472     if (!HasPrologue) {
473       HasPrologue = true;
474       SupportsSelectionDAGSP &= CreatePrologue(F, M, CheckLoc, TLI, AI);
475     }
476 
477     // SelectionDAG based code generation. Nothing else needs to be done here.
478     // The epilogue instrumentation is postponed to SelectionDAG.
479     if (SupportsSelectionDAGSP)
480       break;
481 
482     // Find the stack guard slot if the prologue was not created by this pass
483     // itself via a previous call to CreatePrologue().
484     if (!AI) {
485       const CallInst *SPCall = findStackProtectorIntrinsic(*F);
486       assert(SPCall && "Call to llvm.stackprotector is missing");
487       AI = cast<AllocaInst>(SPCall->getArgOperand(1));
488     }
489 
490     // Set HasIRCheck to true, so that SelectionDAG will not generate its own
491     // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
492     // instrumentation has already been generated.
493     HasIRCheck = true;
494 
495     // If we're instrumenting a block with a tail call, the check has to be
496     // inserted before the call rather than between it and the return. The
497     // verifier guarantees that a tail call is either directly before the
498     // return or with a single correct bitcast of the return value in between so
499     // we don't need to worry about many situations here.
500     Instruction *Prev = CheckLoc->getPrevNonDebugInstruction();
501     if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
502       CheckLoc = Prev;
503     else if (Prev) {
504       Prev = Prev->getPrevNonDebugInstruction();
505       if (Prev && isa<CallInst>(Prev) && cast<CallInst>(Prev)->isTailCall())
506         CheckLoc = Prev;
507     }
508 
509     // Generate epilogue instrumentation. The epilogue intrumentation can be
510     // function-based or inlined depending on which mechanism the target is
511     // providing.
512     if (Function *GuardCheck = TLI->getSSPStackGuardCheck(*M)) {
513       // Generate the function-based epilogue instrumentation.
514       // The target provides a guard check function, generate a call to it.
515       IRBuilder<> B(CheckLoc);
516       LoadInst *Guard = B.CreateLoad(B.getInt8PtrTy(), AI, true, "Guard");
517       CallInst *Call = B.CreateCall(GuardCheck, {Guard});
518       Call->setAttributes(GuardCheck->getAttributes());
519       Call->setCallingConv(GuardCheck->getCallingConv());
520     } else {
521       // Generate the epilogue with inline instrumentation.
522       // If we do not support SelectionDAG based calls, generate IR level
523       // calls.
524       //
525       // For each block with a return instruction, convert this:
526       //
527       //   return:
528       //     ...
529       //     ret ...
530       //
531       // into this:
532       //
533       //   return:
534       //     ...
535       //     %1 = <stack guard>
536       //     %2 = load StackGuardSlot
537       //     %3 = icmp ne i1 %1, %2
538       //     br i1 %3, label %CallStackCheckFailBlk, label %SP_return
539       //
540       //   SP_return:
541       //     ret ...
542       //
543       //   CallStackCheckFailBlk:
544       //     call void @__stack_chk_fail()
545       //     unreachable
546 
547       // Create the FailBB. We duplicate the BB every time since the MI tail
548       // merge pass will merge together all of the various BB into one including
549       // fail BB generated by the stack protector pseudo instruction.
550       if (!FailBB)
551         FailBB = CreateFailBB();
552 
553       IRBuilder<> B(CheckLoc);
554       Value *Guard = getStackGuard(TLI, M, B);
555       LoadInst *LI2 = B.CreateLoad(B.getInt8PtrTy(), AI, true);
556       auto *Cmp = cast<ICmpInst>(B.CreateICmpNE(Guard, LI2));
557       auto SuccessProb =
558           BranchProbabilityInfo::getBranchProbStackProtector(true);
559       auto FailureProb =
560           BranchProbabilityInfo::getBranchProbStackProtector(false);
561       MDNode *Weights = MDBuilder(F->getContext())
562                             .createBranchWeights(FailureProb.getNumerator(),
563                                                  SuccessProb.getNumerator());
564 
565       SplitBlockAndInsertIfThen(Cmp, CheckLoc,
566                                 /*Unreachable=*/false, Weights,
567                                 DTU ? &*DTU : nullptr,
568                                 /*LI=*/nullptr, /*ThenBlock=*/FailBB);
569 
570       auto *BI = cast<BranchInst>(Cmp->getParent()->getTerminator());
571       BasicBlock *NewBB = BI->getSuccessor(1);
572       NewBB->setName("SP_return");
573       NewBB->moveAfter(&BB);
574 
575       Cmp->setPredicate(Cmp->getInversePredicate());
576       BI->swapSuccessors();
577     }
578   }
579 
580   // Return if we didn't modify any basic blocks. i.e., there are no return
581   // statements in the function.
582   return HasPrologue;
583 }
584 
585 /// CreateFailBB - Create a basic block to jump to when the stack protector
586 /// check fails.
587 BasicBlock *StackProtector::CreateFailBB() {
588   LLVMContext &Context = F->getContext();
589   BasicBlock *FailBB = BasicBlock::Create(Context, "CallStackCheckFailBlk", F);
590   IRBuilder<> B(FailBB);
591   if (F->getSubprogram())
592     B.SetCurrentDebugLocation(
593         DILocation::get(Context, 0, 0, F->getSubprogram()));
594   if (Trip.isOSOpenBSD()) {
595     FunctionCallee StackChkFail = M->getOrInsertFunction(
596         "__stack_smash_handler", Type::getVoidTy(Context),
597         Type::getInt8PtrTy(Context));
598 
599     B.CreateCall(StackChkFail, B.CreateGlobalStringPtr(F->getName(), "SSH"));
600   } else {
601     FunctionCallee StackChkFail =
602         M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
603 
604     B.CreateCall(StackChkFail, {});
605   }
606   B.CreateUnreachable();
607   return FailBB;
608 }
609 
610 bool StackProtector::shouldEmitSDCheck(const BasicBlock &BB) const {
611   return HasPrologue && !HasIRCheck && isa<ReturnInst>(BB.getTerminator());
612 }
613 
614 void StackProtector::copyToMachineFrameInfo(MachineFrameInfo &MFI) const {
615   if (Layout.empty())
616     return;
617 
618   for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
619     if (MFI.isDeadObjectIndex(I))
620       continue;
621 
622     const AllocaInst *AI = MFI.getObjectAllocation(I);
623     if (!AI)
624       continue;
625 
626     SSPLayoutMap::const_iterator LI = Layout.find(AI);
627     if (LI == Layout.end())
628       continue;
629 
630     MFI.setObjectSSPLayout(I, LI->second);
631   }
632 }
633