xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGHLSLRuntime.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CodeGenModule.h"
22 #include "CodeGenPGO.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/ASTLambda.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/Decl.h"
28 #include "clang/AST/DeclCXX.h"
29 #include "clang/AST/Expr.h"
30 #include "clang/AST/StmtCXX.h"
31 #include "clang/AST/StmtObjC.h"
32 #include "clang/Basic/Builtins.h"
33 #include "clang/Basic/CodeGenOptions.h"
34 #include "clang/Basic/TargetBuiltins.h"
35 #include "clang/Basic/TargetInfo.h"
36 #include "clang/CodeGen/CGFunctionInfo.h"
37 #include "clang/Frontend/FrontendDiagnostic.h"
38 #include "llvm/ADT/ArrayRef.h"
39 #include "llvm/ADT/ScopeExit.h"
40 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/Dominators.h"
43 #include "llvm/IR/FPEnv.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/MDBuilder.h"
48 #include "llvm/Support/CRC.h"
49 #include "llvm/Support/xxhash.h"
50 #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
51 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
52 #include <optional>
53 
54 using namespace clang;
55 using namespace CodeGen;
56 
57 namespace llvm {
58 extern cl::opt<bool> EnableSingleByteCoverage;
59 } // namespace llvm
60 
61 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
62 /// markers.
63 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
64                                       const LangOptions &LangOpts) {
65   if (CGOpts.DisableLifetimeMarkers)
66     return false;
67 
68   // Sanitizers may use markers.
69   if (CGOpts.SanitizeAddressUseAfterScope ||
70       LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
71       LangOpts.Sanitize.has(SanitizerKind::Memory))
72     return true;
73 
74   // For now, only in optimized builds.
75   return CGOpts.OptimizationLevel != 0;
76 }
77 
78 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
79     : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
80       Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
81               CGBuilderInserterTy(this)),
82       SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
83       DebugInfo(CGM.getModuleDebugInfo()),
84       PGO(std::make_unique<CodeGenPGO>(cgm)),
85       ShouldEmitLifetimeMarkers(
86           shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
87   if (!suppressNewContext)
88     CGM.getCXXABI().getMangleContext().startNewFunction();
89   EHStack.setCGF(this);
90 
91   SetFastMathFlags(CurFPFeatures);
92 }
93 
94 CodeGenFunction::~CodeGenFunction() {
95   assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
96   assert(DeferredDeactivationCleanupStack.empty() &&
97          "missed to deactivate a cleanup");
98 
99   if (getLangOpts().OpenMP && CurFn)
100     CGM.getOpenMPRuntime().functionFinished(*this);
101 
102   // If we have an OpenMPIRBuilder we want to finalize functions (incl.
103   // outlining etc) at some point. Doing it once the function codegen is done
104   // seems to be a reasonable spot. We do it here, as opposed to the deletion
105   // time of the CodeGenModule, because we have to ensure the IR has not yet
106   // been "emitted" to the outside, thus, modifications are still sensible.
107   if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
108     CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
109 }
110 
111 // Map the LangOption for exception behavior into
112 // the corresponding enum in the IR.
113 llvm::fp::ExceptionBehavior
114 clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
115 
116   switch (Kind) {
117   case LangOptions::FPE_Ignore:  return llvm::fp::ebIgnore;
118   case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
119   case LangOptions::FPE_Strict:  return llvm::fp::ebStrict;
120   default:
121     llvm_unreachable("Unsupported FP Exception Behavior");
122   }
123 }
124 
125 void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
126   llvm::FastMathFlags FMF;
127   FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
128   FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
129   FMF.setNoInfs(FPFeatures.getNoHonorInfs());
130   FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
131   FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
132   FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
133   FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
134   Builder.setFastMathFlags(FMF);
135 }
136 
137 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
138                                                   const Expr *E)
139     : CGF(CGF) {
140   ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
141 }
142 
143 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
144                                                   FPOptions FPFeatures)
145     : CGF(CGF) {
146   ConstructorHelper(FPFeatures);
147 }
148 
149 void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
150   OldFPFeatures = CGF.CurFPFeatures;
151   CGF.CurFPFeatures = FPFeatures;
152 
153   OldExcept = CGF.Builder.getDefaultConstrainedExcept();
154   OldRounding = CGF.Builder.getDefaultConstrainedRounding();
155 
156   if (OldFPFeatures == FPFeatures)
157     return;
158 
159   FMFGuard.emplace(CGF.Builder);
160 
161   llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
162   CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
163   auto NewExceptionBehavior =
164       ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
165           FPFeatures.getExceptionMode()));
166   CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
167 
168   CGF.SetFastMathFlags(FPFeatures);
169 
170   assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
171           isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
172           isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
173           (NewExceptionBehavior == llvm::fp::ebIgnore &&
174            NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
175          "FPConstrained should be enabled on entire function");
176 
177   auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
178     auto OldValue =
179         CGF.CurFn->getFnAttribute(Name).getValueAsBool();
180     auto NewValue = OldValue & Value;
181     if (OldValue != NewValue)
182       CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
183   };
184   mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
185   mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
186   mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
187   mergeFnAttrValue(
188       "unsafe-fp-math",
189       FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
190           FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
191           FPFeatures.allowFPContractAcrossStatement());
192 }
193 
194 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
195   CGF.CurFPFeatures = OldFPFeatures;
196   CGF.Builder.setDefaultConstrainedExcept(OldExcept);
197   CGF.Builder.setDefaultConstrainedRounding(OldRounding);
198 }
199 
200 static LValue
201 makeNaturalAlignAddrLValue(llvm::Value *V, QualType T, bool ForPointeeType,
202                            bool MightBeSigned, CodeGenFunction &CGF,
203                            KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
204   LValueBaseInfo BaseInfo;
205   TBAAAccessInfo TBAAInfo;
206   CharUnits Alignment =
207       CGF.CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, ForPointeeType);
208   Address Addr =
209       MightBeSigned
210           ? CGF.makeNaturalAddressForPointer(V, T, Alignment, false, nullptr,
211                                              nullptr, IsKnownNonNull)
212           : Address(V, CGF.ConvertTypeForMem(T), Alignment, IsKnownNonNull);
213   return CGF.MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
214 }
215 
216 LValue
217 CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
218                                             KnownNonNull_t IsKnownNonNull) {
219   return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
220                                       /*MightBeSigned*/ true, *this,
221                                       IsKnownNonNull);
222 }
223 
224 LValue
225 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
226   return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
227                                       /*MightBeSigned*/ true, *this);
228 }
229 
230 LValue CodeGenFunction::MakeNaturalAlignRawAddrLValue(llvm::Value *V,
231                                                       QualType T) {
232   return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ false,
233                                       /*MightBeSigned*/ false, *this);
234 }
235 
236 LValue CodeGenFunction::MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V,
237                                                              QualType T) {
238   return ::makeNaturalAlignAddrLValue(V, T, /*ForPointeeType*/ true,
239                                       /*MightBeSigned*/ false, *this);
240 }
241 
242 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
243   return CGM.getTypes().ConvertTypeForMem(T);
244 }
245 
246 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
247   return CGM.getTypes().ConvertType(T);
248 }
249 
250 llvm::Type *CodeGenFunction::convertTypeForLoadStore(QualType ASTTy,
251                                                      llvm::Type *LLVMTy) {
252   return CGM.getTypes().convertTypeForLoadStore(ASTTy, LLVMTy);
253 }
254 
255 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
256   type = type.getCanonicalType();
257   while (true) {
258     switch (type->getTypeClass()) {
259 #define TYPE(name, parent)
260 #define ABSTRACT_TYPE(name, parent)
261 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
262 #define DEPENDENT_TYPE(name, parent) case Type::name:
263 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
264 #include "clang/AST/TypeNodes.inc"
265       llvm_unreachable("non-canonical or dependent type in IR-generation");
266 
267     case Type::Auto:
268     case Type::DeducedTemplateSpecialization:
269       llvm_unreachable("undeduced type in IR-generation");
270 
271     // Various scalar types.
272     case Type::Builtin:
273     case Type::Pointer:
274     case Type::BlockPointer:
275     case Type::LValueReference:
276     case Type::RValueReference:
277     case Type::MemberPointer:
278     case Type::Vector:
279     case Type::ExtVector:
280     case Type::ConstantMatrix:
281     case Type::FunctionProto:
282     case Type::FunctionNoProto:
283     case Type::Enum:
284     case Type::ObjCObjectPointer:
285     case Type::Pipe:
286     case Type::BitInt:
287     case Type::HLSLAttributedResource:
288     case Type::HLSLInlineSpirv:
289       return TEK_Scalar;
290 
291     // Complexes.
292     case Type::Complex:
293       return TEK_Complex;
294 
295     // Arrays, records, and Objective-C objects.
296     case Type::ConstantArray:
297     case Type::IncompleteArray:
298     case Type::VariableArray:
299     case Type::Record:
300     case Type::ObjCObject:
301     case Type::ObjCInterface:
302     case Type::ArrayParameter:
303       return TEK_Aggregate;
304 
305     // We operate on atomic values according to their underlying type.
306     case Type::Atomic:
307       type = cast<AtomicType>(type)->getValueType();
308       continue;
309     }
310     llvm_unreachable("unknown type kind!");
311   }
312 }
313 
314 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
315   // For cleanliness, we try to avoid emitting the return block for
316   // simple cases.
317   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
318 
319   if (CurBB) {
320     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
321 
322     // We have a valid insert point, reuse it if it is empty or there are no
323     // explicit jumps to the return block.
324     if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
325       ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
326       delete ReturnBlock.getBlock();
327       ReturnBlock = JumpDest();
328     } else
329       EmitBlock(ReturnBlock.getBlock());
330     return llvm::DebugLoc();
331   }
332 
333   // Otherwise, if the return block is the target of a single direct
334   // branch then we can just put the code in that block instead. This
335   // cleans up functions which started with a unified return block.
336   if (ReturnBlock.getBlock()->hasOneUse()) {
337     llvm::BranchInst *BI =
338       dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
339     if (BI && BI->isUnconditional() &&
340         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
341       // Record/return the DebugLoc of the simple 'return' expression to be used
342       // later by the actual 'ret' instruction.
343       llvm::DebugLoc Loc = BI->getDebugLoc();
344       Builder.SetInsertPoint(BI->getParent());
345       BI->eraseFromParent();
346       delete ReturnBlock.getBlock();
347       ReturnBlock = JumpDest();
348       return Loc;
349     }
350   }
351 
352   // FIXME: We are at an unreachable point, there is no reason to emit the block
353   // unless it has uses. However, we still need a place to put the debug
354   // region.end for now.
355 
356   EmitBlock(ReturnBlock.getBlock());
357   return llvm::DebugLoc();
358 }
359 
360 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
361   if (!BB) return;
362   if (!BB->use_empty()) {
363     CGF.CurFn->insert(CGF.CurFn->end(), BB);
364     return;
365   }
366   delete BB;
367 }
368 
369 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
370   assert(BreakContinueStack.empty() &&
371          "mismatched push/pop in break/continue stack!");
372   assert(LifetimeExtendedCleanupStack.empty() &&
373          "mismatched push/pop of cleanups in EHStack!");
374   assert(DeferredDeactivationCleanupStack.empty() &&
375          "mismatched activate/deactivate of cleanups!");
376 
377   if (CGM.shouldEmitConvergenceTokens()) {
378     ConvergenceTokenStack.pop_back();
379     assert(ConvergenceTokenStack.empty() &&
380            "mismatched push/pop in convergence stack!");
381   }
382 
383   bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
384     && NumSimpleReturnExprs == NumReturnExprs
385     && ReturnBlock.getBlock()->use_empty();
386   // Usually the return expression is evaluated before the cleanup
387   // code.  If the function contains only a simple return statement,
388   // such as a constant, the location before the cleanup code becomes
389   // the last useful breakpoint in the function, because the simple
390   // return expression will be evaluated after the cleanup code. To be
391   // safe, set the debug location for cleanup code to the location of
392   // the return statement.  Otherwise the cleanup code should be at the
393   // end of the function's lexical scope.
394   //
395   // If there are multiple branches to the return block, the branch
396   // instructions will get the location of the return statements and
397   // all will be fine.
398   if (CGDebugInfo *DI = getDebugInfo()) {
399     if (OnlySimpleReturnStmts)
400       DI->EmitLocation(Builder, LastStopPoint);
401     else
402       DI->EmitLocation(Builder, EndLoc);
403   }
404 
405   // Pop any cleanups that might have been associated with the
406   // parameters.  Do this in whatever block we're currently in; it's
407   // important to do this before we enter the return block or return
408   // edges will be *really* confused.
409   bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
410   bool HasOnlyNoopCleanups =
411       HasCleanups && EHStack.containsOnlyNoopCleanups(PrologueCleanupDepth);
412   bool EmitRetDbgLoc = !HasCleanups || HasOnlyNoopCleanups;
413 
414   std::optional<ApplyDebugLocation> OAL;
415   if (HasCleanups) {
416     // Make sure the line table doesn't jump back into the body for
417     // the ret after it's been at EndLoc.
418     if (CGDebugInfo *DI = getDebugInfo()) {
419       if (OnlySimpleReturnStmts)
420         DI->EmitLocation(Builder, EndLoc);
421       else
422         // We may not have a valid end location. Try to apply it anyway, and
423         // fall back to an artificial location if needed.
424         OAL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
425     }
426 
427     PopCleanupBlocks(PrologueCleanupDepth);
428   }
429 
430   // Emit function epilog (to return).
431   llvm::DebugLoc Loc = EmitReturnBlock();
432 
433   if (ShouldInstrumentFunction()) {
434     if (CGM.getCodeGenOpts().InstrumentFunctions)
435       CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
436     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
437       CurFn->addFnAttr("instrument-function-exit-inlined",
438                        "__cyg_profile_func_exit");
439   }
440 
441   // Emit debug descriptor for function end.
442   if (CGDebugInfo *DI = getDebugInfo())
443     DI->EmitFunctionEnd(Builder, CurFn);
444 
445   // Reset the debug location to that of the simple 'return' expression, if any
446   // rather than that of the end of the function's scope '}'.
447   uint64_t RetKeyInstructionsAtomGroup = Loc ? Loc->getAtomGroup() : 0;
448   ApplyDebugLocation AL(*this, Loc);
449   EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc,
450                      RetKeyInstructionsAtomGroup);
451   EmitEndEHSpec(CurCodeDecl);
452 
453   assert(EHStack.empty() &&
454          "did not remove all scopes from cleanup stack!");
455 
456   // If someone did an indirect goto, emit the indirect goto block at the end of
457   // the function.
458   if (IndirectBranch) {
459     EmitBlock(IndirectBranch->getParent());
460     Builder.ClearInsertionPoint();
461   }
462 
463   // If some of our locals escaped, insert a call to llvm.localescape in the
464   // entry block.
465   if (!EscapedLocals.empty()) {
466     // Invert the map from local to index into a simple vector. There should be
467     // no holes.
468     SmallVector<llvm::Value *, 4> EscapeArgs;
469     EscapeArgs.resize(EscapedLocals.size());
470     for (auto &Pair : EscapedLocals)
471       EscapeArgs[Pair.second] = Pair.first;
472     llvm::Function *FrameEscapeFn = llvm::Intrinsic::getOrInsertDeclaration(
473         &CGM.getModule(), llvm::Intrinsic::localescape);
474     CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
475   }
476 
477   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
478   llvm::Instruction *Ptr = AllocaInsertPt;
479   AllocaInsertPt = nullptr;
480   Ptr->eraseFromParent();
481 
482   // PostAllocaInsertPt, if created, was lazily created when it was required,
483   // remove it now since it was just created for our own convenience.
484   if (PostAllocaInsertPt) {
485     llvm::Instruction *PostPtr = PostAllocaInsertPt;
486     PostAllocaInsertPt = nullptr;
487     PostPtr->eraseFromParent();
488   }
489 
490   // If someone took the address of a label but never did an indirect goto, we
491   // made a zero entry PHI node, which is illegal, zap it now.
492   if (IndirectBranch) {
493     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
494     if (PN->getNumIncomingValues() == 0) {
495       PN->replaceAllUsesWith(llvm::PoisonValue::get(PN->getType()));
496       PN->eraseFromParent();
497     }
498   }
499 
500   EmitIfUsed(*this, EHResumeBlock);
501   EmitIfUsed(*this, TerminateLandingPad);
502   EmitIfUsed(*this, TerminateHandler);
503   EmitIfUsed(*this, UnreachableBlock);
504 
505   for (const auto &FuncletAndParent : TerminateFunclets)
506     EmitIfUsed(*this, FuncletAndParent.second);
507 
508   if (CGM.getCodeGenOpts().EmitDeclMetadata)
509     EmitDeclMetadata();
510 
511   for (const auto &R : DeferredReplacements) {
512     if (llvm::Value *Old = R.first) {
513       Old->replaceAllUsesWith(R.second);
514       cast<llvm::Instruction>(Old)->eraseFromParent();
515     }
516   }
517   DeferredReplacements.clear();
518 
519   // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
520   // PHIs if the current function is a coroutine. We don't do it for all
521   // functions as it may result in slight increase in numbers of instructions
522   // if compiled with no optimizations. We do it for coroutine as the lifetime
523   // of CleanupDestSlot alloca make correct coroutine frame building very
524   // difficult.
525   if (NormalCleanupDest.isValid() && isCoroutine()) {
526     llvm::DominatorTree DT(*CurFn);
527     llvm::PromoteMemToReg(
528         cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
529     NormalCleanupDest = Address::invalid();
530   }
531 
532   // Scan function arguments for vector width.
533   for (llvm::Argument &A : CurFn->args())
534     if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
535       LargestVectorWidth =
536           std::max((uint64_t)LargestVectorWidth,
537                    VT->getPrimitiveSizeInBits().getKnownMinValue());
538 
539   // Update vector width based on return type.
540   if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
541     LargestVectorWidth =
542         std::max((uint64_t)LargestVectorWidth,
543                  VT->getPrimitiveSizeInBits().getKnownMinValue());
544 
545   if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
546     LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
547 
548   // Add the min-legal-vector-width attribute. This contains the max width from:
549   // 1. min-vector-width attribute used in the source program.
550   // 2. Any builtins used that have a vector width specified.
551   // 3. Values passed in and out of inline assembly.
552   // 4. Width of vector arguments and return types for this function.
553   // 5. Width of vector arguments and return types for functions called by this
554   //    function.
555   if (getContext().getTargetInfo().getTriple().isX86())
556     CurFn->addFnAttr("min-legal-vector-width",
557                      llvm::utostr(LargestVectorWidth));
558 
559   // If we generated an unreachable return block, delete it now.
560   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
561     Builder.ClearInsertionPoint();
562     ReturnBlock.getBlock()->eraseFromParent();
563   }
564   if (ReturnValue.isValid()) {
565     auto *RetAlloca =
566         dyn_cast<llvm::AllocaInst>(ReturnValue.emitRawPointer(*this));
567     if (RetAlloca && RetAlloca->use_empty()) {
568       RetAlloca->eraseFromParent();
569       ReturnValue = Address::invalid();
570     }
571   }
572 }
573 
574 /// ShouldInstrumentFunction - Return true if the current function should be
575 /// instrumented with __cyg_profile_func_* calls
576 bool CodeGenFunction::ShouldInstrumentFunction() {
577   if (!CGM.getCodeGenOpts().InstrumentFunctions &&
578       !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
579       !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
580     return false;
581   if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
582     return false;
583   return true;
584 }
585 
586 bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
587   if (!CurFuncDecl)
588     return false;
589   return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
590 }
591 
592 /// ShouldXRayInstrument - Return true if the current function should be
593 /// instrumented with XRay nop sleds.
594 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
595   return CGM.getCodeGenOpts().XRayInstrumentFunctions;
596 }
597 
598 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
599 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
600 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
601   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
602          (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
603           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
604               XRayInstrKind::Custom);
605 }
606 
607 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
608   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
609          (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
610           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
611               XRayInstrKind::Typed);
612 }
613 
614 llvm::ConstantInt *
615 CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
616   // Remove any (C++17) exception specifications, to allow calling e.g. a
617   // noexcept function through a non-noexcept pointer.
618   if (!Ty->isFunctionNoProtoType())
619     Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None);
620   std::string Mangled;
621   llvm::raw_string_ostream Out(Mangled);
622   CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
623   return llvm::ConstantInt::get(
624       CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
625 }
626 
627 void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
628                                          llvm::Function *Fn) {
629   if (!FD->hasAttr<DeviceKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
630     return;
631 
632   llvm::LLVMContext &Context = getLLVMContext();
633 
634   CGM.GenKernelArgMetadata(Fn, FD, this);
635 
636   if (!(getLangOpts().OpenCL ||
637         (getLangOpts().CUDA &&
638          getContext().getTargetInfo().getTriple().isSPIRV())))
639     return;
640 
641   if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
642     QualType HintQTy = A->getTypeHint();
643     const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
644     bool IsSignedInteger =
645         HintQTy->isSignedIntegerType() ||
646         (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
647     llvm::Metadata *AttrMDArgs[] = {
648         llvm::ConstantAsMetadata::get(llvm::PoisonValue::get(
649             CGM.getTypes().ConvertType(A->getTypeHint()))),
650         llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
651             llvm::IntegerType::get(Context, 32),
652             llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
653     Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
654   }
655 
656   if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
657     auto Eval = [&](Expr *E) {
658       return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
659     };
660     llvm::Metadata *AttrMDArgs[] = {
661         llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
662         llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
663         llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
664     Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
665   }
666 
667   if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
668     auto Eval = [&](Expr *E) {
669       return E->EvaluateKnownConstInt(FD->getASTContext()).getExtValue();
670     };
671     llvm::Metadata *AttrMDArgs[] = {
672         llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getXDim()))),
673         llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getYDim()))),
674         llvm::ConstantAsMetadata::get(Builder.getInt32(Eval(A->getZDim())))};
675     Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
676   }
677 
678   if (const OpenCLIntelReqdSubGroupSizeAttr *A =
679           FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
680     llvm::Metadata *AttrMDArgs[] = {
681         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
682     Fn->setMetadata("intel_reqd_sub_group_size",
683                     llvm::MDNode::get(Context, AttrMDArgs));
684   }
685 }
686 
687 /// Determine whether the function F ends with a return stmt.
688 static bool endsWithReturn(const Decl* F) {
689   const Stmt *Body = nullptr;
690   if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
691     Body = FD->getBody();
692   else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
693     Body = OMD->getBody();
694 
695   if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
696     auto LastStmt = CS->body_rbegin();
697     if (LastStmt != CS->body_rend())
698       return isa<ReturnStmt>(*LastStmt);
699   }
700   return false;
701 }
702 
703 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
704   if (SanOpts.has(SanitizerKind::Thread)) {
705     Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
706     Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
707   }
708 }
709 
710 /// Check if the return value of this function requires sanitization.
711 bool CodeGenFunction::requiresReturnValueCheck() const {
712   return requiresReturnValueNullabilityCheck() ||
713          (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
714           CurCodeDecl->getAttr<ReturnsNonNullAttr>());
715 }
716 
717 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
718   auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
719   if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
720       !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
721       (MD->getNumParams() != 1 && MD->getNumParams() != 2))
722     return false;
723 
724   if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
725     return false;
726 
727   if (MD->getNumParams() == 2) {
728     auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
729     if (!PT || !PT->isVoidPointerType() ||
730         !PT->getPointeeType().isConstQualified())
731       return false;
732   }
733 
734   return true;
735 }
736 
737 bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
738   const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
739   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
740 }
741 
742 bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
743   return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
744          getTarget().getCXXABI().isMicrosoft() &&
745          llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
746            return isInAllocaArgument(CGM.getCXXABI(), P->getType());
747          });
748 }
749 
750 /// Return the UBSan prologue signature for \p FD if one is available.
751 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
752                                             const FunctionDecl *FD) {
753   if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
754     if (!MD->isStatic())
755       return nullptr;
756   return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
757 }
758 
759 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
760                                     llvm::Function *Fn,
761                                     const CGFunctionInfo &FnInfo,
762                                     const FunctionArgList &Args,
763                                     SourceLocation Loc,
764                                     SourceLocation StartLoc) {
765   assert(!CurFn &&
766          "Do not use a CodeGenFunction object for more than one function");
767 
768   const Decl *D = GD.getDecl();
769 
770   DidCallStackSave = false;
771   CurCodeDecl = D;
772   const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
773   if (FD && FD->usesSEHTry())
774     CurSEHParent = GD;
775   CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
776   FnRetTy = RetTy;
777   CurFn = Fn;
778   CurFnInfo = &FnInfo;
779   assert(CurFn->isDeclaration() && "Function already has body?");
780 
781   // If this function is ignored for any of the enabled sanitizers,
782   // disable the sanitizer for the function.
783   do {
784 #define SANITIZER(NAME, ID)                                                    \
785   if (SanOpts.empty())                                                         \
786     break;                                                                     \
787   if (SanOpts.has(SanitizerKind::ID))                                          \
788     if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc))                    \
789       SanOpts.set(SanitizerKind::ID, false);
790 
791 #include "clang/Basic/Sanitizers.def"
792 #undef SANITIZER
793   } while (false);
794 
795   if (D) {
796     const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
797     SanitizerMask no_sanitize_mask;
798     bool NoSanitizeCoverage = false;
799 
800     for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
801       no_sanitize_mask |= Attr->getMask();
802       // SanitizeCoverage is not handled by SanOpts.
803       if (Attr->hasCoverage())
804         NoSanitizeCoverage = true;
805     }
806 
807     // Apply the no_sanitize* attributes to SanOpts.
808     SanOpts.Mask &= ~no_sanitize_mask;
809     if (no_sanitize_mask & SanitizerKind::Address)
810       SanOpts.set(SanitizerKind::KernelAddress, false);
811     if (no_sanitize_mask & SanitizerKind::KernelAddress)
812       SanOpts.set(SanitizerKind::Address, false);
813     if (no_sanitize_mask & SanitizerKind::HWAddress)
814       SanOpts.set(SanitizerKind::KernelHWAddress, false);
815     if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
816       SanOpts.set(SanitizerKind::HWAddress, false);
817 
818     if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
819       Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
820 
821     if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
822       Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
823 
824     // Some passes need the non-negated no_sanitize attribute. Pass them on.
825     if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
826       if (no_sanitize_mask & SanitizerKind::Thread)
827         Fn->addFnAttr("no_sanitize_thread");
828     }
829   }
830 
831   if (ShouldSkipSanitizerInstrumentation()) {
832     CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
833   } else {
834     // Apply sanitizer attributes to the function.
835     if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
836       Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
837     if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
838                          SanitizerKind::KernelHWAddress))
839       Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
840     if (SanOpts.has(SanitizerKind::MemtagStack))
841       Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
842     if (SanOpts.has(SanitizerKind::Thread))
843       Fn->addFnAttr(llvm::Attribute::SanitizeThread);
844     if (SanOpts.has(SanitizerKind::Type))
845       Fn->addFnAttr(llvm::Attribute::SanitizeType);
846     if (SanOpts.has(SanitizerKind::NumericalStability))
847       Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
848     if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
849       Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
850   }
851   if (SanOpts.has(SanitizerKind::SafeStack))
852     Fn->addFnAttr(llvm::Attribute::SafeStack);
853   if (SanOpts.has(SanitizerKind::ShadowCallStack))
854     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
855 
856   if (SanOpts.has(SanitizerKind::Realtime))
857     if (FD && FD->getASTContext().hasAnyFunctionEffects())
858       for (const FunctionEffectWithCondition &Fe : FD->getFunctionEffects()) {
859         if (Fe.Effect.kind() == FunctionEffect::Kind::NonBlocking)
860           Fn->addFnAttr(llvm::Attribute::SanitizeRealtime);
861         else if (Fe.Effect.kind() == FunctionEffect::Kind::Blocking)
862           Fn->addFnAttr(llvm::Attribute::SanitizeRealtimeBlocking);
863       }
864 
865   // Apply fuzzing attribute to the function.
866   if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
867     Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
868 
869   // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
870   // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
871   if (SanOpts.has(SanitizerKind::Thread)) {
872     if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
873       const IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
874       if (OMD->getMethodFamily() == OMF_dealloc ||
875           OMD->getMethodFamily() == OMF_initialize ||
876           (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
877         markAsIgnoreThreadCheckingAtRuntime(Fn);
878       }
879     }
880   }
881 
882   // Ignore unrelated casts in STL allocate() since the allocator must cast
883   // from void* to T* before object initialization completes. Don't match on the
884   // namespace because not all allocators are in std::
885   if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
886     if (matchesStlAllocatorFn(D, getContext()))
887       SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
888   }
889 
890   // Ignore null checks in coroutine functions since the coroutines passes
891   // are not aware of how to move the extra UBSan instructions across the split
892   // coroutine boundaries.
893   if (D && SanOpts.has(SanitizerKind::Null))
894     if (FD && FD->getBody() &&
895         FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
896       SanOpts.Mask &= ~SanitizerKind::Null;
897 
898   // Apply xray attributes to the function (as a string, for now)
899   bool AlwaysXRayAttr = false;
900   if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
901     if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
902             XRayInstrKind::FunctionEntry) ||
903         CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
904             XRayInstrKind::FunctionExit)) {
905       if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
906         Fn->addFnAttr("function-instrument", "xray-always");
907         AlwaysXRayAttr = true;
908       }
909       if (XRayAttr->neverXRayInstrument())
910         Fn->addFnAttr("function-instrument", "xray-never");
911       if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
912         if (ShouldXRayInstrumentFunction())
913           Fn->addFnAttr("xray-log-args",
914                         llvm::utostr(LogArgs->getArgumentCount()));
915     }
916   } else {
917     if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
918       Fn->addFnAttr(
919           "xray-instruction-threshold",
920           llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
921   }
922 
923   if (ShouldXRayInstrumentFunction()) {
924     if (CGM.getCodeGenOpts().XRayIgnoreLoops)
925       Fn->addFnAttr("xray-ignore-loops");
926 
927     if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
928             XRayInstrKind::FunctionExit))
929       Fn->addFnAttr("xray-skip-exit");
930 
931     if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
932             XRayInstrKind::FunctionEntry))
933       Fn->addFnAttr("xray-skip-entry");
934 
935     auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
936     if (FuncGroups > 1) {
937       auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
938                                               CurFn->getName().bytes_end());
939       auto Group = crc32(FuncName) % FuncGroups;
940       if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
941           !AlwaysXRayAttr)
942         Fn->addFnAttr("function-instrument", "xray-never");
943     }
944   }
945 
946   if (CGM.getCodeGenOpts().getProfileInstr() !=
947       llvm::driver::ProfileInstrKind::ProfileNone) {
948     switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
949     case ProfileList::Skip:
950       Fn->addFnAttr(llvm::Attribute::SkipProfile);
951       break;
952     case ProfileList::Forbid:
953       Fn->addFnAttr(llvm::Attribute::NoProfile);
954       break;
955     case ProfileList::Allow:
956       break;
957     }
958   }
959 
960   unsigned Count, Offset;
961   StringRef Section;
962   if (const auto *Attr =
963           D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
964     Count = Attr->getCount();
965     Offset = Attr->getOffset();
966     Section = Attr->getSection();
967   } else {
968     Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
969     Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
970   }
971   if (Section.empty())
972     Section = CGM.getCodeGenOpts().PatchableFunctionEntrySection;
973   if (Count && Offset <= Count) {
974     Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
975     if (Offset)
976       Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
977     if (!Section.empty())
978       Fn->addFnAttr("patchable-function-entry-section", Section);
979   }
980   // Instruct that functions for COFF/CodeView targets should start with a
981   // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
982   // backends as they don't need it -- instructions on these architectures are
983   // always atomically patchable at runtime.
984   if (CGM.getCodeGenOpts().HotPatch &&
985       getContext().getTargetInfo().getTriple().isX86() &&
986       getContext().getTargetInfo().getTriple().getEnvironment() !=
987           llvm::Triple::CODE16)
988     Fn->addFnAttr("patchable-function", "prologue-short-redirect");
989 
990   // Add no-jump-tables value.
991   if (CGM.getCodeGenOpts().NoUseJumpTables)
992     Fn->addFnAttr("no-jump-tables", "true");
993 
994   // Add no-inline-line-tables value.
995   if (CGM.getCodeGenOpts().NoInlineLineTables)
996     Fn->addFnAttr("no-inline-line-tables");
997 
998   // Add profile-sample-accurate value.
999   if (CGM.getCodeGenOpts().ProfileSampleAccurate)
1000     Fn->addFnAttr("profile-sample-accurate");
1001 
1002   if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
1003     Fn->addFnAttr("use-sample-profile");
1004 
1005   if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
1006     Fn->addFnAttr("cfi-canonical-jump-table");
1007 
1008   if (D && D->hasAttr<NoProfileFunctionAttr>())
1009     Fn->addFnAttr(llvm::Attribute::NoProfile);
1010 
1011   if (D && D->hasAttr<HybridPatchableAttr>())
1012     Fn->addFnAttr(llvm::Attribute::HybridPatchable);
1013 
1014   if (D) {
1015     // Function attributes take precedence over command line flags.
1016     if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
1017       switch (A->getThunkType()) {
1018       case FunctionReturnThunksAttr::Kind::Keep:
1019         break;
1020       case FunctionReturnThunksAttr::Kind::Extern:
1021         Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1022         break;
1023       }
1024     } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
1025       Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
1026   }
1027 
1028   if (FD && (getLangOpts().OpenCL ||
1029              (getLangOpts().CUDA &&
1030               getContext().getTargetInfo().getTriple().isSPIRV()) ||
1031              ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
1032               getLangOpts().CUDAIsDevice))) {
1033     // Add metadata for a kernel function.
1034     EmitKernelMetadata(FD, Fn);
1035   }
1036 
1037   if (FD && FD->hasAttr<ClspvLibclcBuiltinAttr>()) {
1038     Fn->setMetadata("clspv_libclc_builtin",
1039                     llvm::MDNode::get(getLLVMContext(), {}));
1040   }
1041 
1042   // If we are checking function types, emit a function type signature as
1043   // prologue data.
1044   if (FD && SanOpts.has(SanitizerKind::Function)) {
1045     if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
1046       llvm::LLVMContext &Ctx = Fn->getContext();
1047       llvm::MDBuilder MDB(Ctx);
1048       Fn->setMetadata(
1049           llvm::LLVMContext::MD_func_sanitize,
1050           MDB.createRTTIPointerPrologue(
1051               PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
1052     }
1053   }
1054 
1055   // If we're checking nullability, we need to know whether we can check the
1056   // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
1057   if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
1058     auto Nullability = FnRetTy->getNullability();
1059     if (Nullability && *Nullability == NullabilityKind::NonNull &&
1060         !FnRetTy->isRecordType()) {
1061       if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
1062             CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
1063         RetValNullabilityPrecondition =
1064             llvm::ConstantInt::getTrue(getLLVMContext());
1065     }
1066   }
1067 
1068   // If we're in C++ mode and the function name is "main", it is guaranteed
1069   // to be norecurse by the standard (3.6.1.3 "The function main shall not be
1070   // used within a program").
1071   //
1072   // OpenCL C 2.0 v2.2-11 s6.9.i:
1073   //     Recursion is not supported.
1074   //
1075   // HLSL
1076   //     Recursion is not supported.
1077   //
1078   // SYCL v1.2.1 s3.10:
1079   //     kernels cannot include RTTI information, exception classes,
1080   //     recursive code, virtual functions or make use of C++ libraries that
1081   //     are not compiled for the device.
1082   if (FD &&
1083       ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
1084        getLangOpts().HLSL || getLangOpts().SYCLIsDevice ||
1085        (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
1086     Fn->addFnAttr(llvm::Attribute::NoRecurse);
1087 
1088   llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
1089   llvm::fp::ExceptionBehavior FPExceptionBehavior =
1090       ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
1091   Builder.setDefaultConstrainedRounding(RM);
1092   Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
1093   if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
1094       (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
1095                RM != llvm::RoundingMode::NearestTiesToEven))) {
1096     Builder.setIsFPConstrained(true);
1097     Fn->addFnAttr(llvm::Attribute::StrictFP);
1098   }
1099 
1100   // If a custom alignment is used, force realigning to this alignment on
1101   // any main function which certainly will need it.
1102   if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1103              CGM.getCodeGenOpts().StackAlignment))
1104     Fn->addFnAttr("stackrealign");
1105 
1106   // "main" doesn't need to zero out call-used registers.
1107   if (FD && FD->isMain())
1108     Fn->removeFnAttr("zero-call-used-regs");
1109 
1110   // Add vscale_range attribute if appropriate.
1111   llvm::StringMap<bool> FeatureMap;
1112   auto IsArmStreaming = TargetInfo::ArmStreamingKind::NotStreaming;
1113   if (FD) {
1114     getContext().getFunctionFeatureMap(FeatureMap, FD);
1115     if (const auto *T = FD->getType()->getAs<FunctionProtoType>())
1116       if (T->getAArch64SMEAttributes() &
1117           FunctionType::SME_PStateSMCompatibleMask)
1118         IsArmStreaming = TargetInfo::ArmStreamingKind::StreamingCompatible;
1119 
1120     if (IsArmStreamingFunction(FD, true))
1121       IsArmStreaming = TargetInfo::ArmStreamingKind::Streaming;
1122   }
1123   std::optional<std::pair<unsigned, unsigned>> VScaleRange =
1124       getContext().getTargetInfo().getVScaleRange(getLangOpts(), IsArmStreaming,
1125                                                   &FeatureMap);
1126   if (VScaleRange) {
1127     CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
1128         getLLVMContext(), VScaleRange->first, VScaleRange->second));
1129   }
1130 
1131   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1132 
1133   // Create a marker to make it easy to insert allocas into the entryblock
1134   // later.  Don't create this with the builder, because we don't want it
1135   // folded.
1136   llvm::Value *Poison = llvm::PoisonValue::get(Int32Ty);
1137   AllocaInsertPt = new llvm::BitCastInst(Poison, Int32Ty, "allocapt", EntryBB);
1138 
1139   ReturnBlock = getJumpDestInCurrentScope("return");
1140 
1141   Builder.SetInsertPoint(EntryBB);
1142 
1143   // If we're checking the return value, allocate space for a pointer to a
1144   // precise source location of the checked return statement.
1145   if (requiresReturnValueCheck()) {
1146     ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1147     Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1148                         ReturnLocation);
1149   }
1150 
1151   // Emit subprogram debug descriptor.
1152   if (CGDebugInfo *DI = getDebugInfo()) {
1153     // Reconstruct the type from the argument list so that implicit parameters,
1154     // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1155     // convention.
1156     DI->emitFunctionStart(GD, Loc, StartLoc,
1157                           DI->getFunctionType(FD, RetTy, Args), CurFn,
1158                           CurFuncIsThunk);
1159   }
1160 
1161   if (ShouldInstrumentFunction()) {
1162     if (CGM.getCodeGenOpts().InstrumentFunctions)
1163       CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1164     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1165       CurFn->addFnAttr("instrument-function-entry-inlined",
1166                        "__cyg_profile_func_enter");
1167     if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1168       CurFn->addFnAttr("instrument-function-entry-inlined",
1169                        "__cyg_profile_func_enter_bare");
1170   }
1171 
1172   // Since emitting the mcount call here impacts optimizations such as function
1173   // inlining, we just add an attribute to insert a mcount call in backend.
1174   // The attribute "counting-function" is set to mcount function name which is
1175   // architecture dependent.
1176   if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1177     // Calls to fentry/mcount should not be generated if function has
1178     // the no_instrument_function attribute.
1179     if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1180       if (CGM.getCodeGenOpts().CallFEntry)
1181         Fn->addFnAttr("fentry-call", "true");
1182       else {
1183         Fn->addFnAttr("instrument-function-entry-inlined",
1184                       getTarget().getMCountName());
1185       }
1186       if (CGM.getCodeGenOpts().MNopMCount) {
1187         if (!CGM.getCodeGenOpts().CallFEntry)
1188           CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1189             << "-mnop-mcount" << "-mfentry";
1190         Fn->addFnAttr("mnop-mcount");
1191       }
1192 
1193       if (CGM.getCodeGenOpts().RecordMCount) {
1194         if (!CGM.getCodeGenOpts().CallFEntry)
1195           CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1196             << "-mrecord-mcount" << "-mfentry";
1197         Fn->addFnAttr("mrecord-mcount");
1198       }
1199     }
1200   }
1201 
1202   if (CGM.getCodeGenOpts().PackedStack) {
1203     if (getContext().getTargetInfo().getTriple().getArch() !=
1204         llvm::Triple::systemz)
1205       CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1206         << "-mpacked-stack";
1207     Fn->addFnAttr("packed-stack");
1208   }
1209 
1210   if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1211       !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1212     Fn->addFnAttr("warn-stack-size",
1213                   std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1214 
1215   if (RetTy->isVoidType()) {
1216     // Void type; nothing to return.
1217     ReturnValue = Address::invalid();
1218 
1219     // Count the implicit return.
1220     if (!endsWithReturn(D))
1221       ++NumReturnExprs;
1222   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1223     // Indirect return; emit returned value directly into sret slot.
1224     // This reduces code size, and affects correctness in C++.
1225     auto AI = CurFn->arg_begin();
1226     if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1227       ++AI;
1228     ReturnValue = makeNaturalAddressForPointer(
1229         &*AI, RetTy, CurFnInfo->getReturnInfo().getIndirectAlign(), false,
1230         nullptr, nullptr, KnownNonNull);
1231     if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1232       ReturnValuePointer =
1233           CreateDefaultAlignTempAlloca(ReturnValue.getType(), "result.ptr");
1234       Builder.CreateStore(ReturnValue.emitRawPointer(*this),
1235                           ReturnValuePointer);
1236     }
1237   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1238              !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1239     // Load the sret pointer from the argument struct and return into that.
1240     unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1241     llvm::Function::arg_iterator EI = CurFn->arg_end();
1242     --EI;
1243     llvm::Value *Addr = Builder.CreateStructGEP(
1244         CurFnInfo->getArgStruct(), &*EI, Idx);
1245     llvm::Type *Ty =
1246         cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1247     ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
1248     Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1249     ReturnValue = Address(Addr, ConvertType(RetTy),
1250                           CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
1251   } else {
1252     ReturnValue = CreateIRTemp(RetTy, "retval");
1253 
1254     // Tell the epilog emitter to autorelease the result.  We do this
1255     // now so that various specialized functions can suppress it
1256     // during their IR-generation.
1257     if (getLangOpts().ObjCAutoRefCount &&
1258         !CurFnInfo->isReturnsRetained() &&
1259         RetTy->isObjCRetainableType())
1260       AutoreleaseResult = true;
1261   }
1262 
1263   EmitStartEHSpec(CurCodeDecl);
1264 
1265   PrologueCleanupDepth = EHStack.stable_begin();
1266 
1267   // Emit OpenMP specific initialization of the device functions.
1268   if (getLangOpts().OpenMP && CurCodeDecl)
1269     CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1270 
1271   if (FD && getLangOpts().HLSL) {
1272     // Handle emitting HLSL entry functions.
1273     if (FD->hasAttr<HLSLShaderAttr>()) {
1274       CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1275     }
1276   }
1277 
1278   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1279 
1280   if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
1281       MD && !MD->isStatic()) {
1282     bool IsInLambda =
1283         MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
1284     if (MD->isImplicitObjectMemberFunction())
1285       CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1286     if (IsInLambda) {
1287       // We're in a lambda; figure out the captures.
1288       MD->getParent()->getCaptureFields(LambdaCaptureFields,
1289                                         LambdaThisCaptureField);
1290       if (LambdaThisCaptureField) {
1291         // If the lambda captures the object referred to by '*this' - either by
1292         // value or by reference, make sure CXXThisValue points to the correct
1293         // object.
1294 
1295         // Get the lvalue for the field (which is a copy of the enclosing object
1296         // or contains the address of the enclosing object).
1297         LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1298         if (!LambdaThisCaptureField->getType()->isPointerType()) {
1299           // If the enclosing object was captured by value, just use its
1300           // address. Sign this pointer.
1301           CXXThisValue = ThisFieldLValue.getPointer(*this);
1302         } else {
1303           // Load the lvalue pointed to by the field, since '*this' was captured
1304           // by reference.
1305           CXXThisValue =
1306               EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1307         }
1308       }
1309       for (auto *FD : MD->getParent()->fields()) {
1310         if (FD->hasCapturedVLAType()) {
1311           auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1312                                            SourceLocation()).getScalarVal();
1313           auto VAT = FD->getCapturedVLAType();
1314           VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1315         }
1316       }
1317     } else if (MD->isImplicitObjectMemberFunction()) {
1318       // Not in a lambda; just use 'this' from the method.
1319       // FIXME: Should we generate a new load for each use of 'this'?  The
1320       // fast register allocator would be happier...
1321       CXXThisValue = CXXABIThisValue;
1322     }
1323 
1324     // Check the 'this' pointer once per function, if it's available.
1325     if (CXXABIThisValue) {
1326       SanitizerSet SkippedChecks;
1327       SkippedChecks.set(SanitizerKind::ObjectSize, true);
1328       QualType ThisTy = MD->getThisType();
1329 
1330       // If this is the call operator of a lambda with no captures, it
1331       // may have a static invoker function, which may call this operator with
1332       // a null 'this' pointer.
1333       if (isLambdaCallOperator(MD) && MD->getParent()->isCapturelessLambda())
1334         SkippedChecks.set(SanitizerKind::Null, true);
1335 
1336       EmitTypeCheck(
1337           isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1338           Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1339     }
1340   }
1341 
1342   // If any of the arguments have a variably modified type, make sure to
1343   // emit the type size, but only if the function is not naked. Naked functions
1344   // have no prolog to run this evaluation.
1345   if (!FD || !FD->hasAttr<NakedAttr>()) {
1346     for (const VarDecl *VD : Args) {
1347       // Dig out the type as written from ParmVarDecls; it's unclear whether
1348       // the standard (C99 6.9.1p10) requires this, but we're following the
1349       // precedent set by gcc.
1350       QualType Ty;
1351       if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1352         Ty = PVD->getOriginalType();
1353       else
1354         Ty = VD->getType();
1355 
1356       if (Ty->isVariablyModifiedType())
1357         EmitVariablyModifiedType(Ty);
1358     }
1359   }
1360   // Emit a location at the end of the prologue.
1361   if (CGDebugInfo *DI = getDebugInfo())
1362     DI->EmitLocation(Builder, StartLoc);
1363   // TODO: Do we need to handle this in two places like we do with
1364   // target-features/target-cpu?
1365   if (CurFuncDecl)
1366     if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1367       LargestVectorWidth = VecWidth->getVectorWidth();
1368 
1369   if (CGM.shouldEmitConvergenceTokens())
1370     ConvergenceTokenStack.push_back(getOrEmitConvergenceEntryToken(CurFn));
1371 }
1372 
1373 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1374   incrementProfileCounter(Body);
1375   maybeCreateMCDCCondBitmap();
1376   if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1377     EmitCompoundStmtWithoutScope(*S);
1378   else
1379     EmitStmt(Body);
1380 }
1381 
1382 /// When instrumenting to collect profile data, the counts for some blocks
1383 /// such as switch cases need to not include the fall-through counts, so
1384 /// emit a branch around the instrumentation code. When not instrumenting,
1385 /// this just calls EmitBlock().
1386 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1387                                                const Stmt *S) {
1388   llvm::BasicBlock *SkipCountBB = nullptr;
1389   // Do not skip over the instrumentation when single byte coverage mode is
1390   // enabled.
1391   if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1392       !llvm::EnableSingleByteCoverage) {
1393     // When instrumenting for profiling, the fallthrough to certain
1394     // statements needs to skip over the instrumentation code so that we
1395     // get an accurate count.
1396     SkipCountBB = createBasicBlock("skipcount");
1397     EmitBranch(SkipCountBB);
1398   }
1399   EmitBlock(BB);
1400   uint64_t CurrentCount = getCurrentProfileCount();
1401   incrementProfileCounter(S);
1402   setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1403   if (SkipCountBB)
1404     EmitBlock(SkipCountBB);
1405 }
1406 
1407 /// Tries to mark the given function nounwind based on the
1408 /// non-existence of any throwing calls within it.  We believe this is
1409 /// lightweight enough to do at -O0.
1410 static void TryMarkNoThrow(llvm::Function *F) {
1411   // LLVM treats 'nounwind' on a function as part of the type, so we
1412   // can't do this on functions that can be overwritten.
1413   if (F->isInterposable()) return;
1414 
1415   for (llvm::BasicBlock &BB : *F)
1416     for (llvm::Instruction &I : BB)
1417       if (I.mayThrow())
1418         return;
1419 
1420   F->setDoesNotThrow();
1421 }
1422 
1423 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1424                                                FunctionArgList &Args) {
1425   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1426   QualType ResTy = FD->getReturnType();
1427 
1428   const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1429   if (MD && MD->isImplicitObjectMemberFunction()) {
1430     if (CGM.getCXXABI().HasThisReturn(GD))
1431       ResTy = MD->getThisType();
1432     else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1433       ResTy = CGM.getContext().VoidPtrTy;
1434     CGM.getCXXABI().buildThisParam(*this, Args);
1435   }
1436 
1437   // The base version of an inheriting constructor whose constructed base is a
1438   // virtual base is not passed any arguments (because it doesn't actually call
1439   // the inherited constructor).
1440   bool PassedParams = true;
1441   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1442     if (auto Inherited = CD->getInheritedConstructor())
1443       PassedParams =
1444           getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1445 
1446   if (PassedParams) {
1447     for (auto *Param : FD->parameters()) {
1448       Args.push_back(Param);
1449       if (!Param->hasAttr<PassObjectSizeAttr>())
1450         continue;
1451 
1452       auto *Implicit = ImplicitParamDecl::Create(
1453           getContext(), Param->getDeclContext(), Param->getLocation(),
1454           /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1455       SizeArguments[Param] = Implicit;
1456       Args.push_back(Implicit);
1457     }
1458   }
1459 
1460   if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1461     CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1462 
1463   return ResTy;
1464 }
1465 
1466 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1467                                    const CGFunctionInfo &FnInfo) {
1468   assert(Fn && "generating code for null Function");
1469   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1470   CurGD = GD;
1471 
1472   FunctionArgList Args;
1473   QualType ResTy = BuildFunctionArgList(GD, Args);
1474 
1475   CGM.getTargetCodeGenInfo().checkFunctionABI(CGM, FD);
1476 
1477   if (FD->isInlineBuiltinDeclaration()) {
1478     // When generating code for a builtin with an inline declaration, use a
1479     // mangled name to hold the actual body, while keeping an external
1480     // definition in case the function pointer is referenced somewhere.
1481     std::string FDInlineName = (Fn->getName() + ".inline").str();
1482     llvm::Module *M = Fn->getParent();
1483     llvm::Function *Clone = M->getFunction(FDInlineName);
1484     if (!Clone) {
1485       Clone = llvm::Function::Create(Fn->getFunctionType(),
1486                                      llvm::GlobalValue::InternalLinkage,
1487                                      Fn->getAddressSpace(), FDInlineName, M);
1488       Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1489     }
1490     Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1491     Fn = Clone;
1492   } else {
1493     // Detect the unusual situation where an inline version is shadowed by a
1494     // non-inline version. In that case we should pick the external one
1495     // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1496     // to detect that situation before we reach codegen, so do some late
1497     // replacement.
1498     for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1499          PD = PD->getPreviousDecl()) {
1500       if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1501         std::string FDInlineName = (Fn->getName() + ".inline").str();
1502         llvm::Module *M = Fn->getParent();
1503         if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1504           Clone->replaceAllUsesWith(Fn);
1505           Clone->eraseFromParent();
1506         }
1507         break;
1508       }
1509     }
1510   }
1511 
1512   // Check if we should generate debug info for this function.
1513   if (FD->hasAttr<NoDebugAttr>()) {
1514     // Clear non-distinct debug info that was possibly attached to the function
1515     // due to an earlier declaration without the nodebug attribute
1516     Fn->setSubprogram(nullptr);
1517     // Disable debug info indefinitely for this function
1518     DebugInfo = nullptr;
1519   }
1520   // Finalize function debug info on exit.
1521   auto Cleanup = llvm::make_scope_exit([this] {
1522     if (CGDebugInfo *DI = getDebugInfo())
1523       DI->completeFunction();
1524   });
1525 
1526   // The function might not have a body if we're generating thunks for a
1527   // function declaration.
1528   SourceRange BodyRange;
1529   if (Stmt *Body = FD->getBody())
1530     BodyRange = Body->getSourceRange();
1531   else
1532     BodyRange = FD->getLocation();
1533   CurEHLocation = BodyRange.getEnd();
1534 
1535   // Use the location of the start of the function to determine where
1536   // the function definition is located. By default use the location
1537   // of the declaration as the location for the subprogram. A function
1538   // may lack a declaration in the source code if it is created by code
1539   // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1540   SourceLocation Loc = FD->getLocation();
1541 
1542   // If this is a function specialization then use the pattern body
1543   // as the location for the function.
1544   if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1545     if (SpecDecl->hasBody(SpecDecl))
1546       Loc = SpecDecl->getLocation();
1547 
1548   Stmt *Body = FD->getBody();
1549 
1550   if (Body) {
1551     // Coroutines always emit lifetime markers.
1552     if (isa<CoroutineBodyStmt>(Body))
1553       ShouldEmitLifetimeMarkers = true;
1554 
1555     // Initialize helper which will detect jumps which can cause invalid
1556     // lifetime markers.
1557     if (ShouldEmitLifetimeMarkers)
1558       Bypasses.Init(CGM, Body);
1559   }
1560 
1561   // Emit the standard function prologue.
1562   StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1563 
1564   // Save parameters for coroutine function.
1565   if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1566     llvm::append_range(FnArgs, FD->parameters());
1567 
1568   // Ensure that the function adheres to the forward progress guarantee, which
1569   // is required by certain optimizations.
1570   // In C++11 and up, the attribute will be removed if the body contains a
1571   // trivial empty loop.
1572   if (checkIfFunctionMustProgress())
1573     CurFn->addFnAttr(llvm::Attribute::MustProgress);
1574 
1575   // Generate the body of the function.
1576   PGO->assignRegionCounters(GD, CurFn);
1577   if (isa<CXXDestructorDecl>(FD))
1578     EmitDestructorBody(Args);
1579   else if (isa<CXXConstructorDecl>(FD))
1580     EmitConstructorBody(Args);
1581   else if (getLangOpts().CUDA &&
1582            !getLangOpts().CUDAIsDevice &&
1583            FD->hasAttr<CUDAGlobalAttr>())
1584     CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1585   else if (isa<CXXMethodDecl>(FD) &&
1586            cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1587     // The lambda static invoker function is special, because it forwards or
1588     // clones the body of the function call operator (but is actually static).
1589     EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1590   } else if (isa<CXXMethodDecl>(FD) &&
1591              isLambdaCallOperator(cast<CXXMethodDecl>(FD)) &&
1592              !FnInfo.isDelegateCall() &&
1593              cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
1594              hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
1595     // If emitting a lambda with static invoker on X86 Windows, change
1596     // the call operator body.
1597     // Make sure that this is a call operator with an inalloca arg and check
1598     // for delegate call to make sure this is the original call op and not the
1599     // new forwarding function for the static invoker.
1600     EmitLambdaInAllocaCallOpBody(cast<CXXMethodDecl>(FD));
1601   } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1602              (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1603               cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1604     // Implicit copy-assignment gets the same special treatment as implicit
1605     // copy-constructors.
1606     emitImplicitAssignmentOperatorBody(Args);
1607   } else if (DeviceKernelAttr::isOpenCLSpelling(
1608                  FD->getAttr<DeviceKernelAttr>()) &&
1609              GD.getKernelReferenceKind() == KernelReferenceKind::Kernel) {
1610     CallArgList CallArgs;
1611     for (unsigned i = 0; i < Args.size(); ++i) {
1612       Address ArgAddr = GetAddrOfLocalVar(Args[i]);
1613       QualType ArgQualType = Args[i]->getType();
1614       RValue ArgRValue = convertTempToRValue(ArgAddr, ArgQualType, Loc);
1615       CallArgs.add(ArgRValue, ArgQualType);
1616     }
1617     GlobalDecl GDStub = GlobalDecl(FD, KernelReferenceKind::Stub);
1618     const FunctionType *FT = cast<FunctionType>(FD->getType());
1619     CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FT);
1620     const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
1621         CallArgs, FT, /*ChainCall=*/false);
1622     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FnInfo);
1623     llvm::Constant *GDStubFunctionPointer =
1624         CGM.getRawFunctionPointer(GDStub, FTy);
1625     CGCallee GDStubCallee = CGCallee::forDirect(GDStubFunctionPointer, GDStub);
1626     EmitCall(FnInfo, GDStubCallee, ReturnValueSlot(), CallArgs, nullptr, false,
1627              Loc);
1628   } else if (Body) {
1629     EmitFunctionBody(Body);
1630   } else
1631     llvm_unreachable("no definition for emitted function");
1632 
1633   // C++11 [stmt.return]p2:
1634   //   Flowing off the end of a function [...] results in undefined behavior in
1635   //   a value-returning function.
1636   // C11 6.9.1p12:
1637   //   If the '}' that terminates a function is reached, and the value of the
1638   //   function call is used by the caller, the behavior is undefined.
1639   if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1640       !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1641     bool ShouldEmitUnreachable =
1642         CGM.getCodeGenOpts().StrictReturn ||
1643         !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1644     if (SanOpts.has(SanitizerKind::Return)) {
1645       auto CheckOrdinal = SanitizerKind::SO_Return;
1646       auto CheckHandler = SanitizerHandler::MissingReturn;
1647       SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
1648       llvm::Value *IsFalse = Builder.getFalse();
1649       EmitCheck(std::make_pair(IsFalse, CheckOrdinal), CheckHandler,
1650                 EmitCheckSourceLocation(FD->getLocation()), {});
1651     } else if (ShouldEmitUnreachable) {
1652       if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1653         EmitTrapCall(llvm::Intrinsic::trap);
1654     }
1655     if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1656       Builder.CreateUnreachable();
1657       Builder.ClearInsertionPoint();
1658     }
1659   }
1660 
1661   // Emit the standard function epilogue.
1662   FinishFunction(BodyRange.getEnd());
1663 
1664   PGO->verifyCounterMap();
1665 
1666   // If we haven't marked the function nothrow through other means, do
1667   // a quick pass now to see if we can.
1668   if (!CurFn->doesNotThrow())
1669     TryMarkNoThrow(CurFn);
1670 }
1671 
1672 /// ContainsLabel - Return true if the statement contains a label in it.  If
1673 /// this statement is not executed normally, it not containing a label means
1674 /// that we can just remove the code.
1675 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1676   // Null statement, not a label!
1677   if (!S) return false;
1678 
1679   // If this is a label, we have to emit the code, consider something like:
1680   // if (0) {  ...  foo:  bar(); }  goto foo;
1681   //
1682   // TODO: If anyone cared, we could track __label__'s, since we know that you
1683   // can't jump to one from outside their declared region.
1684   if (isa<LabelStmt>(S))
1685     return true;
1686 
1687   // If this is a case/default statement, and we haven't seen a switch, we have
1688   // to emit the code.
1689   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1690     return true;
1691 
1692   // If this is a switch statement, we want to ignore cases below it.
1693   if (isa<SwitchStmt>(S))
1694     IgnoreCaseStmts = true;
1695 
1696   // Scan subexpressions for verboten labels.
1697   for (const Stmt *SubStmt : S->children())
1698     if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1699       return true;
1700 
1701   return false;
1702 }
1703 
1704 /// containsBreak - Return true if the statement contains a break out of it.
1705 /// If the statement (recursively) contains a switch or loop with a break
1706 /// inside of it, this is fine.
1707 bool CodeGenFunction::containsBreak(const Stmt *S) {
1708   // Null statement, not a label!
1709   if (!S) return false;
1710 
1711   // If this is a switch or loop that defines its own break scope, then we can
1712   // include it and anything inside of it.
1713   if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1714       isa<ForStmt>(S))
1715     return false;
1716 
1717   if (isa<BreakStmt>(S))
1718     return true;
1719 
1720   // Scan subexpressions for verboten breaks.
1721   for (const Stmt *SubStmt : S->children())
1722     if (containsBreak(SubStmt))
1723       return true;
1724 
1725   return false;
1726 }
1727 
1728 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1729   if (!S) return false;
1730 
1731   // Some statement kinds add a scope and thus never add a decl to the current
1732   // scope. Note, this list is longer than the list of statements that might
1733   // have an unscoped decl nested within them, but this way is conservatively
1734   // correct even if more statement kinds are added.
1735   if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1736       isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1737       isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1738       isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1739     return false;
1740 
1741   if (isa<DeclStmt>(S))
1742     return true;
1743 
1744   for (const Stmt *SubStmt : S->children())
1745     if (mightAddDeclToScope(SubStmt))
1746       return true;
1747 
1748   return false;
1749 }
1750 
1751 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1752 /// to a constant, or if it does but contains a label, return false.  If it
1753 /// constant folds return true and set the boolean result in Result.
1754 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1755                                                    bool &ResultBool,
1756                                                    bool AllowLabels) {
1757   // If MC/DC is enabled, disable folding so that we can instrument all
1758   // conditions to yield complete test vectors. We still keep track of
1759   // folded conditions during region mapping and visualization.
1760   if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
1761       CGM.getCodeGenOpts().MCDCCoverage)
1762     return false;
1763 
1764   llvm::APSInt ResultInt;
1765   if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1766     return false;
1767 
1768   ResultBool = ResultInt.getBoolValue();
1769   return true;
1770 }
1771 
1772 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1773 /// to a constant, or if it does but contains a label, return false.  If it
1774 /// constant folds return true and set the folded value.
1775 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1776                                                    llvm::APSInt &ResultInt,
1777                                                    bool AllowLabels) {
1778   // FIXME: Rename and handle conversion of other evaluatable things
1779   // to bool.
1780   Expr::EvalResult Result;
1781   if (!Cond->EvaluateAsInt(Result, getContext()))
1782     return false;  // Not foldable, not integer or not fully evaluatable.
1783 
1784   llvm::APSInt Int = Result.Val.getInt();
1785   if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1786     return false;  // Contains a label.
1787 
1788   PGO->markStmtMaybeUsed(Cond);
1789   ResultInt = Int;
1790   return true;
1791 }
1792 
1793 /// Strip parentheses and simplistic logical-NOT operators.
1794 const Expr *CodeGenFunction::stripCond(const Expr *C) {
1795   while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(C->IgnoreParens())) {
1796     if (Op->getOpcode() != UO_LNot)
1797       break;
1798     C = Op->getSubExpr();
1799   }
1800   return C->IgnoreParens();
1801 }
1802 
1803 /// Determine whether the given condition is an instrumentable condition
1804 /// (i.e. no "&&" or "||").
1805 bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1806   const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
1807   return (!BOp || !BOp->isLogicalOp());
1808 }
1809 
1810 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1811 /// increments a profile counter based on the semantics of the given logical
1812 /// operator opcode.  This is used to instrument branch condition coverage for
1813 /// logical operators.
1814 void CodeGenFunction::EmitBranchToCounterBlock(
1815     const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1816     llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1817     Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1818   // If not instrumenting, just emit a branch.
1819   bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1820   if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1821     return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1822 
1823   const Stmt *CntrStmt = (CntrIdx ? CntrIdx : Cond);
1824 
1825   llvm::BasicBlock *ThenBlock = nullptr;
1826   llvm::BasicBlock *ElseBlock = nullptr;
1827   llvm::BasicBlock *NextBlock = nullptr;
1828 
1829   // Create the block we'll use to increment the appropriate counter.
1830   llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1831 
1832   // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1833   // means we need to evaluate the condition and increment the counter on TRUE:
1834   //
1835   // if (Cond)
1836   //   goto CounterIncrBlock;
1837   // else
1838   //   goto FalseBlock;
1839   //
1840   // CounterIncrBlock:
1841   //   Counter++;
1842   //   goto TrueBlock;
1843 
1844   if (LOp == BO_LAnd) {
1845     ThenBlock = CounterIncrBlock;
1846     ElseBlock = FalseBlock;
1847     NextBlock = TrueBlock;
1848   }
1849 
1850   // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1851   // we need to evaluate the condition and increment the counter on FALSE:
1852   //
1853   // if (Cond)
1854   //   goto TrueBlock;
1855   // else
1856   //   goto CounterIncrBlock;
1857   //
1858   // CounterIncrBlock:
1859   //   Counter++;
1860   //   goto FalseBlock;
1861 
1862   else if (LOp == BO_LOr) {
1863     ThenBlock = TrueBlock;
1864     ElseBlock = CounterIncrBlock;
1865     NextBlock = FalseBlock;
1866   } else {
1867     llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1868   }
1869 
1870   // Emit Branch based on condition.
1871   EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1872 
1873   // Emit the block containing the counter increment(s).
1874   EmitBlock(CounterIncrBlock);
1875 
1876   // Increment corresponding counter; if index not provided, use Cond as index.
1877   incrementProfileCounter(CntrStmt);
1878 
1879   // Go to the next block.
1880   EmitBranch(NextBlock);
1881 }
1882 
1883 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1884 /// statement) to the specified blocks.  Based on the condition, this might try
1885 /// to simplify the codegen of the conditional based on the branch.
1886 /// \param LH The value of the likelihood attribute on the True branch.
1887 /// \param ConditionalOp Used by MC/DC code coverage to track the result of the
1888 /// ConditionalOperator (ternary) through a recursive call for the operator's
1889 /// LHS and RHS nodes.
1890 void CodeGenFunction::EmitBranchOnBoolExpr(
1891     const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
1892     uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp,
1893     const VarDecl *ConditionalDecl) {
1894   Cond = Cond->IgnoreParens();
1895 
1896   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1897     // Handle X && Y in a condition.
1898     if (CondBOp->getOpcode() == BO_LAnd) {
1899       MCDCLogOpStack.push_back(CondBOp);
1900 
1901       // If we have "1 && X", simplify the code.  "0 && X" would have constant
1902       // folded if the case was simple enough.
1903       bool ConstantBool = false;
1904       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1905           ConstantBool) {
1906         // br(1 && X) -> br(X).
1907         incrementProfileCounter(CondBOp);
1908         EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1909                                  FalseBlock, TrueCount, LH);
1910         MCDCLogOpStack.pop_back();
1911         return;
1912       }
1913 
1914       // If we have "X && 1", simplify the code to use an uncond branch.
1915       // "X && 0" would have been constant folded to 0.
1916       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1917           ConstantBool) {
1918         // br(X && 1) -> br(X).
1919         EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1920                                  FalseBlock, TrueCount, LH, CondBOp);
1921         MCDCLogOpStack.pop_back();
1922         return;
1923       }
1924 
1925       // Emit the LHS as a conditional.  If the LHS conditional is false, we
1926       // want to jump to the FalseBlock.
1927       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1928       // The counter tells us how often we evaluate RHS, and all of TrueCount
1929       // can be propagated to that branch.
1930       uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1931 
1932       ConditionalEvaluation eval(*this);
1933       {
1934         ApplyDebugLocation DL(*this, Cond);
1935         // Propagate the likelihood attribute like __builtin_expect
1936         // __builtin_expect(X && Y, 1) -> X and Y are likely
1937         // __builtin_expect(X && Y, 0) -> only Y is unlikely
1938         EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1939                              LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1940         EmitBlock(LHSTrue);
1941       }
1942 
1943       incrementProfileCounter(CondBOp);
1944       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1945 
1946       // Any temporaries created here are conditional.
1947       eval.begin(*this);
1948       EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1949                                FalseBlock, TrueCount, LH);
1950       eval.end(*this);
1951       MCDCLogOpStack.pop_back();
1952       return;
1953     }
1954 
1955     if (CondBOp->getOpcode() == BO_LOr) {
1956       MCDCLogOpStack.push_back(CondBOp);
1957 
1958       // If we have "0 || X", simplify the code.  "1 || X" would have constant
1959       // folded if the case was simple enough.
1960       bool ConstantBool = false;
1961       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1962           !ConstantBool) {
1963         // br(0 || X) -> br(X).
1964         incrementProfileCounter(CondBOp);
1965         EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1966                                  FalseBlock, TrueCount, LH);
1967         MCDCLogOpStack.pop_back();
1968         return;
1969       }
1970 
1971       // If we have "X || 0", simplify the code to use an uncond branch.
1972       // "X || 1" would have been constant folded to 1.
1973       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1974           !ConstantBool) {
1975         // br(X || 0) -> br(X).
1976         EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1977                                  FalseBlock, TrueCount, LH, CondBOp);
1978         MCDCLogOpStack.pop_back();
1979         return;
1980       }
1981       // Emit the LHS as a conditional.  If the LHS conditional is true, we
1982       // want to jump to the TrueBlock.
1983       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1984       // We have the count for entry to the RHS and for the whole expression
1985       // being true, so we can divy up True count between the short circuit and
1986       // the RHS.
1987       uint64_t LHSCount =
1988           getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1989       uint64_t RHSCount = TrueCount - LHSCount;
1990 
1991       ConditionalEvaluation eval(*this);
1992       {
1993         // Propagate the likelihood attribute like __builtin_expect
1994         // __builtin_expect(X || Y, 1) -> only Y is likely
1995         // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1996         ApplyDebugLocation DL(*this, Cond);
1997         EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1998                              LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1999         EmitBlock(LHSFalse);
2000       }
2001 
2002       incrementProfileCounter(CondBOp);
2003       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
2004 
2005       // Any temporaries created here are conditional.
2006       eval.begin(*this);
2007       EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
2008                                RHSCount, LH);
2009 
2010       eval.end(*this);
2011       MCDCLogOpStack.pop_back();
2012       return;
2013     }
2014   }
2015 
2016   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
2017     // br(!x, t, f) -> br(x, f, t)
2018     // Avoid doing this optimization when instrumenting a condition for MC/DC.
2019     // LNot is taken as part of the condition for simplicity, and changing its
2020     // sense negatively impacts test vector tracking.
2021     bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
2022                          CGM.getCodeGenOpts().MCDCCoverage &&
2023                          isInstrumentedCondition(Cond);
2024     if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
2025       // Negate the count.
2026       uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
2027       // The values of the enum are chosen to make this negation possible.
2028       LH = static_cast<Stmt::Likelihood>(-LH);
2029       // Negate the condition and swap the destination blocks.
2030       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
2031                                   FalseCount, LH);
2032     }
2033   }
2034 
2035   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
2036     // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
2037     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
2038     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
2039 
2040     // The ConditionalOperator itself has no likelihood information for its
2041     // true and false branches. This matches the behavior of __builtin_expect.
2042     ConditionalEvaluation cond(*this);
2043     EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
2044                          getProfileCount(CondOp), Stmt::LH_None);
2045 
2046     // When computing PGO branch weights, we only know the overall count for
2047     // the true block. This code is essentially doing tail duplication of the
2048     // naive code-gen, introducing new edges for which counts are not
2049     // available. Divide the counts proportionally between the LHS and RHS of
2050     // the conditional operator.
2051     uint64_t LHSScaledTrueCount = 0;
2052     if (TrueCount) {
2053       double LHSRatio =
2054           getProfileCount(CondOp) / (double)getCurrentProfileCount();
2055       LHSScaledTrueCount = TrueCount * LHSRatio;
2056     }
2057 
2058     cond.begin(*this);
2059     EmitBlock(LHSBlock);
2060     incrementProfileCounter(CondOp);
2061     {
2062       ApplyDebugLocation DL(*this, Cond);
2063       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
2064                            LHSScaledTrueCount, LH, CondOp);
2065     }
2066     cond.end(*this);
2067 
2068     cond.begin(*this);
2069     EmitBlock(RHSBlock);
2070     EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
2071                          TrueCount - LHSScaledTrueCount, LH, CondOp);
2072     cond.end(*this);
2073 
2074     return;
2075   }
2076 
2077   if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
2078     // Conditional operator handling can give us a throw expression as a
2079     // condition for a case like:
2080     //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
2081     // Fold this to:
2082     //   br(c, throw x, br(y, t, f))
2083     EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
2084     return;
2085   }
2086 
2087   // Emit the code with the fully general case.
2088   llvm::Value *CondV;
2089   {
2090     ApplyDebugLocation DL(*this, Cond);
2091     CondV = EvaluateExprAsBool(Cond);
2092   }
2093 
2094   MaybeEmitDeferredVarDeclInit(ConditionalDecl);
2095 
2096   // If not at the top of the logical operator nest, update MCDC temp with the
2097   // boolean result of the evaluated condition.
2098   if (!MCDCLogOpStack.empty()) {
2099     const Expr *MCDCBaseExpr = Cond;
2100     // When a nested ConditionalOperator (ternary) is encountered in a boolean
2101     // expression, MC/DC tracks the result of the ternary, and this is tied to
2102     // the ConditionalOperator expression and not the ternary's LHS or RHS. If
2103     // this is the case, the ConditionalOperator expression is passed through
2104     // the ConditionalOp parameter and then used as the MCDC base expression.
2105     if (ConditionalOp)
2106       MCDCBaseExpr = ConditionalOp;
2107 
2108     maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
2109   }
2110 
2111   llvm::MDNode *Weights = nullptr;
2112   llvm::MDNode *Unpredictable = nullptr;
2113 
2114   // If the branch has a condition wrapped by __builtin_unpredictable,
2115   // create metadata that specifies that the branch is unpredictable.
2116   // Don't bother if not optimizing because that metadata would not be used.
2117   auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
2118   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2119     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2120     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2121       llvm::MDBuilder MDHelper(getLLVMContext());
2122       Unpredictable = MDHelper.createUnpredictable();
2123     }
2124   }
2125 
2126   // If there is a Likelihood knowledge for the cond, lower it.
2127   // Note that if not optimizing this won't emit anything.
2128   llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
2129   if (CondV != NewCondV)
2130     CondV = NewCondV;
2131   else {
2132     // Otherwise, lower profile counts. Note that we do this even at -O0.
2133     uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
2134     Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
2135   }
2136 
2137   llvm::Instruction *BrInst = Builder.CreateCondBr(CondV, TrueBlock, FalseBlock,
2138                                                    Weights, Unpredictable);
2139   addInstToNewSourceAtom(BrInst, CondV);
2140 
2141   switch (HLSLControlFlowAttr) {
2142   case HLSLControlFlowHintAttr::Microsoft_branch:
2143   case HLSLControlFlowHintAttr::Microsoft_flatten: {
2144     llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2145 
2146     llvm::ConstantInt *BranchHintConstant =
2147         HLSLControlFlowAttr ==
2148                 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2149             ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2150             : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2151 
2152     SmallVector<llvm::Metadata *, 2> Vals(
2153         {MDHelper.createString("hlsl.controlflow.hint"),
2154          MDHelper.createConstant(BranchHintConstant)});
2155     BrInst->setMetadata("hlsl.controlflow.hint",
2156                         llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2157     break;
2158   }
2159   // This is required to avoid warnings during compilation
2160   case HLSLControlFlowHintAttr::SpellingNotCalculated:
2161     break;
2162   }
2163 }
2164 
2165 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2166 /// specified stmt yet.
2167 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
2168   CGM.ErrorUnsupported(S, Type);
2169 }
2170 
2171 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
2172 /// variable-length array whose elements have a non-zero bit-pattern.
2173 ///
2174 /// \param baseType the inner-most element type of the array
2175 /// \param src - a char* pointing to the bit-pattern for a single
2176 /// base element of the array
2177 /// \param sizeInChars - the total size of the VLA, in chars
2178 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
2179                                Address dest, Address src,
2180                                llvm::Value *sizeInChars) {
2181   CGBuilderTy &Builder = CGF.Builder;
2182 
2183   CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
2184   llvm::Value *baseSizeInChars
2185     = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
2186 
2187   Address begin = dest.withElementType(CGF.Int8Ty);
2188   llvm::Value *end = Builder.CreateInBoundsGEP(begin.getElementType(),
2189                                                begin.emitRawPointer(CGF),
2190                                                sizeInChars, "vla.end");
2191 
2192   llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
2193   llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
2194   llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
2195 
2196   // Make a loop over the VLA.  C99 guarantees that the VLA element
2197   // count must be nonzero.
2198   CGF.EmitBlock(loopBB);
2199 
2200   llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
2201   cur->addIncoming(begin.emitRawPointer(CGF), originBB);
2202 
2203   CharUnits curAlign =
2204     dest.getAlignment().alignmentOfArrayElement(baseSize);
2205 
2206   // memcpy the individual element bit-pattern.
2207   Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
2208                        /*volatile*/ false);
2209 
2210   // Go to the next element.
2211   llvm::Value *next =
2212     Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
2213 
2214   // Leave if that's the end of the VLA.
2215   llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
2216   Builder.CreateCondBr(done, contBB, loopBB);
2217   cur->addIncoming(next, loopBB);
2218 
2219   CGF.EmitBlock(contBB);
2220 }
2221 
2222 void
2223 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
2224   // Ignore empty classes in C++.
2225   if (getLangOpts().CPlusPlus) {
2226     if (const RecordType *RT = Ty->getAs<RecordType>()) {
2227       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
2228         return;
2229     }
2230   }
2231 
2232   if (DestPtr.getElementType() != Int8Ty)
2233     DestPtr = DestPtr.withElementType(Int8Ty);
2234 
2235   // Get size and alignment info for this aggregate.
2236   CharUnits size = getContext().getTypeSizeInChars(Ty);
2237 
2238   llvm::Value *SizeVal;
2239   const VariableArrayType *vla;
2240 
2241   // Don't bother emitting a zero-byte memset.
2242   if (size.isZero()) {
2243     // But note that getTypeInfo returns 0 for a VLA.
2244     if (const VariableArrayType *vlaType =
2245           dyn_cast_or_null<VariableArrayType>(
2246                                           getContext().getAsArrayType(Ty))) {
2247       auto VlaSize = getVLASize(vlaType);
2248       SizeVal = VlaSize.NumElts;
2249       CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2250       if (!eltSize.isOne())
2251         SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2252       vla = vlaType;
2253     } else {
2254       return;
2255     }
2256   } else {
2257     SizeVal = CGM.getSize(size);
2258     vla = nullptr;
2259   }
2260 
2261   // If the type contains a pointer to data member we can't memset it to zero.
2262   // Instead, create a null constant and copy it to the destination.
2263   // TODO: there are other patterns besides zero that we can usefully memset,
2264   // like -1, which happens to be the pattern used by member-pointers.
2265   if (!CGM.getTypes().isZeroInitializable(Ty)) {
2266     // For a VLA, emit a single element, then splat that over the VLA.
2267     if (vla) Ty = getContext().getBaseElementType(vla);
2268 
2269     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2270 
2271     llvm::GlobalVariable *NullVariable =
2272       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2273                                /*isConstant=*/true,
2274                                llvm::GlobalVariable::PrivateLinkage,
2275                                NullConstant, Twine());
2276     CharUnits NullAlign = DestPtr.getAlignment();
2277     NullVariable->setAlignment(NullAlign.getAsAlign());
2278     Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
2279 
2280     if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2281 
2282     // Get and call the appropriate llvm.memcpy overload.
2283     Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2284     return;
2285   }
2286 
2287   // Otherwise, just memset the whole thing to zero.  This is legal
2288   // because in LLVM, all default initializers (other than the ones we just
2289   // handled above) are guaranteed to have a bit pattern of all zeros.
2290   Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2291 }
2292 
2293 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2294   // Make sure that there is a block for the indirect goto.
2295   if (!IndirectBranch)
2296     GetIndirectGotoBlock();
2297 
2298   llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2299 
2300   // Make sure the indirect branch includes all of the address-taken blocks.
2301   IndirectBranch->addDestination(BB);
2302   return llvm::BlockAddress::get(CurFn->getType(), BB);
2303 }
2304 
2305 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2306   // If we already made the indirect branch for indirect goto, return its block.
2307   if (IndirectBranch) return IndirectBranch->getParent();
2308 
2309   CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2310 
2311   // Create the PHI node that indirect gotos will add entries to.
2312   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2313                                               "indirect.goto.dest");
2314 
2315   // Create the indirect branch instruction.
2316   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2317   return IndirectBranch->getParent();
2318 }
2319 
2320 /// Computes the length of an array in elements, as well as the base
2321 /// element type and a properly-typed first element pointer.
2322 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2323                                               QualType &baseType,
2324                                               Address &addr) {
2325   const ArrayType *arrayType = origArrayType;
2326 
2327   // If it's a VLA, we have to load the stored size.  Note that
2328   // this is the size of the VLA in bytes, not its size in elements.
2329   llvm::Value *numVLAElements = nullptr;
2330   if (isa<VariableArrayType>(arrayType)) {
2331     numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2332 
2333     // Walk into all VLAs.  This doesn't require changes to addr,
2334     // which has type T* where T is the first non-VLA element type.
2335     do {
2336       QualType elementType = arrayType->getElementType();
2337       arrayType = getContext().getAsArrayType(elementType);
2338 
2339       // If we only have VLA components, 'addr' requires no adjustment.
2340       if (!arrayType) {
2341         baseType = elementType;
2342         return numVLAElements;
2343       }
2344     } while (isa<VariableArrayType>(arrayType));
2345 
2346     // We get out here only if we find a constant array type
2347     // inside the VLA.
2348   }
2349 
2350   // We have some number of constant-length arrays, so addr should
2351   // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
2352   // down to the first element of addr.
2353   SmallVector<llvm::Value*, 8> gepIndices;
2354 
2355   // GEP down to the array type.
2356   llvm::ConstantInt *zero = Builder.getInt32(0);
2357   gepIndices.push_back(zero);
2358 
2359   uint64_t countFromCLAs = 1;
2360   QualType eltType;
2361 
2362   llvm::ArrayType *llvmArrayType =
2363     dyn_cast<llvm::ArrayType>(addr.getElementType());
2364   while (llvmArrayType) {
2365     assert(isa<ConstantArrayType>(arrayType));
2366     assert(cast<ConstantArrayType>(arrayType)->getZExtSize() ==
2367            llvmArrayType->getNumElements());
2368 
2369     gepIndices.push_back(zero);
2370     countFromCLAs *= llvmArrayType->getNumElements();
2371     eltType = arrayType->getElementType();
2372 
2373     llvmArrayType =
2374       dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2375     arrayType = getContext().getAsArrayType(arrayType->getElementType());
2376     assert((!llvmArrayType || arrayType) &&
2377            "LLVM and Clang types are out-of-synch");
2378   }
2379 
2380   if (arrayType) {
2381     // From this point onwards, the Clang array type has been emitted
2382     // as some other type (probably a packed struct). Compute the array
2383     // size, and just emit the 'begin' expression as a bitcast.
2384     while (arrayType) {
2385       countFromCLAs *= cast<ConstantArrayType>(arrayType)->getZExtSize();
2386       eltType = arrayType->getElementType();
2387       arrayType = getContext().getAsArrayType(eltType);
2388     }
2389 
2390     llvm::Type *baseType = ConvertType(eltType);
2391     addr = addr.withElementType(baseType);
2392   } else {
2393     // Create the actual GEP.
2394     addr = Address(Builder.CreateInBoundsGEP(addr.getElementType(),
2395                                              addr.emitRawPointer(*this),
2396                                              gepIndices, "array.begin"),
2397                    ConvertTypeForMem(eltType), addr.getAlignment());
2398   }
2399 
2400   baseType = eltType;
2401 
2402   llvm::Value *numElements
2403     = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2404 
2405   // If we had any VLA dimensions, factor them in.
2406   if (numVLAElements)
2407     numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2408 
2409   return numElements;
2410 }
2411 
2412 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2413   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2414   assert(vla && "type was not a variable array type!");
2415   return getVLASize(vla);
2416 }
2417 
2418 CodeGenFunction::VlaSizePair
2419 CodeGenFunction::getVLASize(const VariableArrayType *type) {
2420   // The number of elements so far; always size_t.
2421   llvm::Value *numElements = nullptr;
2422 
2423   QualType elementType;
2424   do {
2425     elementType = type->getElementType();
2426     llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2427     assert(vlaSize && "no size for VLA!");
2428     assert(vlaSize->getType() == SizeTy);
2429 
2430     if (!numElements) {
2431       numElements = vlaSize;
2432     } else {
2433       // It's undefined behavior if this wraps around, so mark it that way.
2434       // FIXME: Teach -fsanitize=undefined to trap this.
2435       numElements = Builder.CreateNUWMul(numElements, vlaSize);
2436     }
2437   } while ((type = getContext().getAsVariableArrayType(elementType)));
2438 
2439   return { numElements, elementType };
2440 }
2441 
2442 CodeGenFunction::VlaSizePair
2443 CodeGenFunction::getVLAElements1D(QualType type) {
2444   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2445   assert(vla && "type was not a variable array type!");
2446   return getVLAElements1D(vla);
2447 }
2448 
2449 CodeGenFunction::VlaSizePair
2450 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2451   llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2452   assert(VlaSize && "no size for VLA!");
2453   assert(VlaSize->getType() == SizeTy);
2454   return { VlaSize, Vla->getElementType() };
2455 }
2456 
2457 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2458   assert(type->isVariablyModifiedType() &&
2459          "Must pass variably modified type to EmitVLASizes!");
2460 
2461   EnsureInsertPoint();
2462 
2463   // We're going to walk down into the type and look for VLA
2464   // expressions.
2465   do {
2466     assert(type->isVariablyModifiedType());
2467 
2468     const Type *ty = type.getTypePtr();
2469     switch (ty->getTypeClass()) {
2470 
2471 #define TYPE(Class, Base)
2472 #define ABSTRACT_TYPE(Class, Base)
2473 #define NON_CANONICAL_TYPE(Class, Base)
2474 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2475 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2476 #include "clang/AST/TypeNodes.inc"
2477       llvm_unreachable("unexpected dependent type!");
2478 
2479     // These types are never variably-modified.
2480     case Type::Builtin:
2481     case Type::Complex:
2482     case Type::Vector:
2483     case Type::ExtVector:
2484     case Type::ConstantMatrix:
2485     case Type::Record:
2486     case Type::Enum:
2487     case Type::Using:
2488     case Type::TemplateSpecialization:
2489     case Type::ObjCTypeParam:
2490     case Type::ObjCObject:
2491     case Type::ObjCInterface:
2492     case Type::ObjCObjectPointer:
2493     case Type::BitInt:
2494     case Type::HLSLInlineSpirv:
2495       llvm_unreachable("type class is never variably-modified!");
2496 
2497     case Type::Elaborated:
2498       type = cast<ElaboratedType>(ty)->getNamedType();
2499       break;
2500 
2501     case Type::Adjusted:
2502       type = cast<AdjustedType>(ty)->getAdjustedType();
2503       break;
2504 
2505     case Type::Decayed:
2506       type = cast<DecayedType>(ty)->getPointeeType();
2507       break;
2508 
2509     case Type::Pointer:
2510       type = cast<PointerType>(ty)->getPointeeType();
2511       break;
2512 
2513     case Type::BlockPointer:
2514       type = cast<BlockPointerType>(ty)->getPointeeType();
2515       break;
2516 
2517     case Type::LValueReference:
2518     case Type::RValueReference:
2519       type = cast<ReferenceType>(ty)->getPointeeType();
2520       break;
2521 
2522     case Type::MemberPointer:
2523       type = cast<MemberPointerType>(ty)->getPointeeType();
2524       break;
2525 
2526     case Type::ArrayParameter:
2527     case Type::ConstantArray:
2528     case Type::IncompleteArray:
2529       // Losing element qualification here is fine.
2530       type = cast<ArrayType>(ty)->getElementType();
2531       break;
2532 
2533     case Type::VariableArray: {
2534       // Losing element qualification here is fine.
2535       const VariableArrayType *vat = cast<VariableArrayType>(ty);
2536 
2537       // Unknown size indication requires no size computation.
2538       // Otherwise, evaluate and record it.
2539       if (const Expr *sizeExpr = vat->getSizeExpr()) {
2540         // It's possible that we might have emitted this already,
2541         // e.g. with a typedef and a pointer to it.
2542         llvm::Value *&entry = VLASizeMap[sizeExpr];
2543         if (!entry) {
2544           llvm::Value *size = EmitScalarExpr(sizeExpr);
2545 
2546           // C11 6.7.6.2p5:
2547           //   If the size is an expression that is not an integer constant
2548           //   expression [...] each time it is evaluated it shall have a value
2549           //   greater than zero.
2550           if (SanOpts.has(SanitizerKind::VLABound)) {
2551             auto CheckOrdinal = SanitizerKind::SO_VLABound;
2552             auto CheckHandler = SanitizerHandler::VLABoundNotPositive;
2553             SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2554             llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2555             clang::QualType SEType = sizeExpr->getType();
2556             llvm::Value *CheckCondition =
2557                 SEType->isSignedIntegerType()
2558                     ? Builder.CreateICmpSGT(size, Zero)
2559                     : Builder.CreateICmpUGT(size, Zero);
2560             llvm::Constant *StaticArgs[] = {
2561                 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2562                 EmitCheckTypeDescriptor(SEType)};
2563             EmitCheck(std::make_pair(CheckCondition, CheckOrdinal),
2564                       CheckHandler, StaticArgs, size);
2565           }
2566 
2567           // Always zexting here would be wrong if it weren't
2568           // undefined behavior to have a negative bound.
2569           // FIXME: What about when size's type is larger than size_t?
2570           entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2571         }
2572       }
2573       type = vat->getElementType();
2574       break;
2575     }
2576 
2577     case Type::FunctionProto:
2578     case Type::FunctionNoProto:
2579       type = cast<FunctionType>(ty)->getReturnType();
2580       break;
2581 
2582     case Type::Paren:
2583     case Type::TypeOf:
2584     case Type::UnaryTransform:
2585     case Type::Attributed:
2586     case Type::BTFTagAttributed:
2587     case Type::HLSLAttributedResource:
2588     case Type::SubstTemplateTypeParm:
2589     case Type::MacroQualified:
2590     case Type::CountAttributed:
2591       // Keep walking after single level desugaring.
2592       type = type.getSingleStepDesugaredType(getContext());
2593       break;
2594 
2595     case Type::Typedef:
2596     case Type::Decltype:
2597     case Type::Auto:
2598     case Type::DeducedTemplateSpecialization:
2599     case Type::PackIndexing:
2600       // Stop walking: nothing to do.
2601       return;
2602 
2603     case Type::TypeOfExpr:
2604       // Stop walking: emit typeof expression.
2605       EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2606       return;
2607 
2608     case Type::Atomic:
2609       type = cast<AtomicType>(ty)->getValueType();
2610       break;
2611 
2612     case Type::Pipe:
2613       type = cast<PipeType>(ty)->getElementType();
2614       break;
2615     }
2616   } while (type->isVariablyModifiedType());
2617 }
2618 
2619 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2620   if (getContext().getBuiltinVaListType()->isArrayType())
2621     return EmitPointerWithAlignment(E);
2622   return EmitLValue(E).getAddress();
2623 }
2624 
2625 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2626   return EmitLValue(E).getAddress();
2627 }
2628 
2629 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2630                                               const APValue &Init) {
2631   assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2632   if (CGDebugInfo *Dbg = getDebugInfo())
2633     if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2634       Dbg->EmitGlobalVariable(E->getDecl(), Init);
2635 }
2636 
2637 CodeGenFunction::PeepholeProtection
2638 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2639   // At the moment, the only aggressive peephole we do in IR gen
2640   // is trunc(zext) folding, but if we add more, we can easily
2641   // extend this protection.
2642 
2643   if (!rvalue.isScalar()) return PeepholeProtection();
2644   llvm::Value *value = rvalue.getScalarVal();
2645   if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2646 
2647   // Just make an extra bitcast.
2648   assert(HaveInsertPoint());
2649   llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2650                                                   Builder.GetInsertBlock());
2651 
2652   PeepholeProtection protection;
2653   protection.Inst = inst;
2654   return protection;
2655 }
2656 
2657 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2658   if (!protection.Inst) return;
2659 
2660   // In theory, we could try to duplicate the peepholes now, but whatever.
2661   protection.Inst->eraseFromParent();
2662 }
2663 
2664 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2665                                               QualType Ty, SourceLocation Loc,
2666                                               SourceLocation AssumptionLoc,
2667                                               llvm::Value *Alignment,
2668                                               llvm::Value *OffsetValue) {
2669   if (Alignment->getType() != IntPtrTy)
2670     Alignment =
2671         Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2672   if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2673     OffsetValue =
2674         Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2675   llvm::Value *TheCheck = nullptr;
2676   if (SanOpts.has(SanitizerKind::Alignment)) {
2677     llvm::Value *PtrIntValue =
2678         Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2679 
2680     if (OffsetValue) {
2681       bool IsOffsetZero = false;
2682       if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2683         IsOffsetZero = CI->isZero();
2684 
2685       if (!IsOffsetZero)
2686         PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2687     }
2688 
2689     llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2690     llvm::Value *Mask =
2691         Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2692     llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2693     TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2694   }
2695   llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2696       CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2697 
2698   if (!SanOpts.has(SanitizerKind::Alignment))
2699     return;
2700   emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2701                                OffsetValue, TheCheck, Assumption);
2702 }
2703 
2704 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2705                                               const Expr *E,
2706                                               SourceLocation AssumptionLoc,
2707                                               llvm::Value *Alignment,
2708                                               llvm::Value *OffsetValue) {
2709   QualType Ty = E->getType();
2710   SourceLocation Loc = E->getExprLoc();
2711 
2712   emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2713                           OffsetValue);
2714 }
2715 
2716 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2717                                                  llvm::Value *AnnotatedVal,
2718                                                  StringRef AnnotationStr,
2719                                                  SourceLocation Location,
2720                                                  const AnnotateAttr *Attr) {
2721   SmallVector<llvm::Value *, 5> Args = {
2722       AnnotatedVal,
2723       CGM.EmitAnnotationString(AnnotationStr),
2724       CGM.EmitAnnotationUnit(Location),
2725       CGM.EmitAnnotationLineNo(Location),
2726   };
2727   if (Attr)
2728     Args.push_back(CGM.EmitAnnotationArgs(Attr));
2729   return Builder.CreateCall(AnnotationFn, Args);
2730 }
2731 
2732 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2733   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2734   for (const auto *I : D->specific_attrs<AnnotateAttr>())
2735     EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2736                                         {V->getType(), CGM.ConstGlobalsPtrTy}),
2737                        V, I->getAnnotation(), D->getLocation(), I);
2738 }
2739 
2740 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2741                                               Address Addr) {
2742   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2743   llvm::Value *V = Addr.emitRawPointer(*this);
2744   llvm::Type *VTy = V->getType();
2745   auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2746   unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2747   llvm::PointerType *IntrinTy =
2748       llvm::PointerType::get(CGM.getLLVMContext(), AS);
2749   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2750                                        {IntrinTy, CGM.ConstGlobalsPtrTy});
2751 
2752   for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2753     // FIXME Always emit the cast inst so we can differentiate between
2754     // annotation on the first field of a struct and annotation on the struct
2755     // itself.
2756     if (VTy != IntrinTy)
2757       V = Builder.CreateBitCast(V, IntrinTy);
2758     V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2759     V = Builder.CreateBitCast(V, VTy);
2760   }
2761 
2762   return Address(V, Addr.getElementType(), Addr.getAlignment());
2763 }
2764 
2765 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2766 
2767 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2768     : CGF(CGF) {
2769   assert(!CGF->IsSanitizerScope);
2770   CGF->IsSanitizerScope = true;
2771 }
2772 
2773 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2774   CGF->IsSanitizerScope = false;
2775 }
2776 
2777 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2778                                    const llvm::Twine &Name,
2779                                    llvm::BasicBlock::iterator InsertPt) const {
2780   LoopStack.InsertHelper(I);
2781   if (IsSanitizerScope)
2782     I->setNoSanitizeMetadata();
2783 }
2784 
2785 void CGBuilderInserter::InsertHelper(
2786     llvm::Instruction *I, const llvm::Twine &Name,
2787     llvm::BasicBlock::iterator InsertPt) const {
2788   llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, InsertPt);
2789   if (CGF)
2790     CGF->InsertHelper(I, Name, InsertPt);
2791 }
2792 
2793 // Emits an error if we don't have a valid set of target features for the
2794 // called function.
2795 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2796                                           const FunctionDecl *TargetDecl) {
2797   // SemaChecking cannot handle below x86 builtins because they have different
2798   // parameter ranges with different TargetAttribute of caller.
2799   if (CGM.getContext().getTargetInfo().getTriple().isX86()) {
2800     unsigned BuiltinID = TargetDecl->getBuiltinID();
2801     if (BuiltinID == X86::BI__builtin_ia32_cmpps ||
2802         BuiltinID == X86::BI__builtin_ia32_cmpss ||
2803         BuiltinID == X86::BI__builtin_ia32_cmppd ||
2804         BuiltinID == X86::BI__builtin_ia32_cmpsd) {
2805       const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2806       llvm::StringMap<bool> TargetFetureMap;
2807       CGM.getContext().getFunctionFeatureMap(TargetFetureMap, FD);
2808       llvm::APSInt Result =
2809           *(E->getArg(2)->getIntegerConstantExpr(CGM.getContext()));
2810       if (Result.getSExtValue() > 7 && !TargetFetureMap.lookup("avx"))
2811         CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2812             << TargetDecl->getDeclName() << "avx";
2813     }
2814   }
2815   return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2816 }
2817 
2818 // Emits an error if we don't have a valid set of target features for the
2819 // called function.
2820 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2821                                           const FunctionDecl *TargetDecl) {
2822   // Early exit if this is an indirect call.
2823   if (!TargetDecl)
2824     return;
2825 
2826   // Get the current enclosing function if it exists. If it doesn't
2827   // we can't check the target features anyhow.
2828   const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2829   if (!FD)
2830     return;
2831 
2832   // Grab the required features for the call. For a builtin this is listed in
2833   // the td file with the default cpu, for an always_inline function this is any
2834   // listed cpu and any listed features.
2835   unsigned BuiltinID = TargetDecl->getBuiltinID();
2836   std::string MissingFeature;
2837   llvm::StringMap<bool> CallerFeatureMap;
2838   CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2839   // When compiling in HipStdPar mode we have to be conservative in rejecting
2840   // target specific features in the FE, and defer the possible error to the
2841   // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2842   // referenced by an accelerator executable function, we emit an error.
2843   bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2844   if (BuiltinID) {
2845     StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2846     if (!Builtin::evaluateRequiredTargetFeatures(
2847         FeatureList, CallerFeatureMap) && !IsHipStdPar) {
2848       CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2849           << TargetDecl->getDeclName()
2850           << FeatureList;
2851     }
2852   } else if (!TargetDecl->isMultiVersion() &&
2853              TargetDecl->hasAttr<TargetAttr>()) {
2854     // Get the required features for the callee.
2855 
2856     const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2857     ParsedTargetAttr ParsedAttr =
2858         CGM.getContext().filterFunctionTargetAttrs(TD);
2859 
2860     SmallVector<StringRef, 1> ReqFeatures;
2861     llvm::StringMap<bool> CalleeFeatureMap;
2862     CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2863 
2864     for (const auto &F : ParsedAttr.Features) {
2865       if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2866         ReqFeatures.push_back(StringRef(F).substr(1));
2867     }
2868 
2869     for (const auto &F : CalleeFeatureMap) {
2870       // Only positive features are "required".
2871       if (F.getValue())
2872         ReqFeatures.push_back(F.getKey());
2873     }
2874     if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2875       if (!CallerFeatureMap.lookup(Feature)) {
2876         MissingFeature = Feature.str();
2877         return false;
2878       }
2879       return true;
2880     }) && !IsHipStdPar)
2881       CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2882           << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2883   } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2884     llvm::StringMap<bool> CalleeFeatureMap;
2885     CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2886 
2887     for (const auto &F : CalleeFeatureMap) {
2888       if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2889                            !CallerFeatureMap.find(F.getKey())->getValue()) &&
2890           !IsHipStdPar)
2891         CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2892             << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2893     }
2894   }
2895 }
2896 
2897 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2898   if (!CGM.getCodeGenOpts().SanitizeStats)
2899     return;
2900 
2901   llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2902   IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2903   CGM.getSanStats().create(IRB, SSK);
2904 }
2905 
2906 void CodeGenFunction::EmitKCFIOperandBundle(
2907     const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2908   const FunctionProtoType *FP =
2909       Callee.getAbstractInfo().getCalleeFunctionProtoType();
2910   if (FP)
2911     Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
2912 }
2913 
2914 llvm::Value *
2915 CodeGenFunction::FormAArch64ResolverCondition(const FMVResolverOption &RO) {
2916   return RO.Features.empty() ? nullptr : EmitAArch64CpuSupports(RO.Features);
2917 }
2918 
2919 llvm::Value *
2920 CodeGenFunction::FormX86ResolverCondition(const FMVResolverOption &RO) {
2921   llvm::Value *Condition = nullptr;
2922 
2923   if (RO.Architecture) {
2924     StringRef Arch = *RO.Architecture;
2925     // If arch= specifies an x86-64 micro-architecture level, test the feature
2926     // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
2927     if (Arch.starts_with("x86-64"))
2928       Condition = EmitX86CpuSupports({Arch});
2929     else
2930       Condition = EmitX86CpuIs(Arch);
2931   }
2932 
2933   if (!RO.Features.empty()) {
2934     llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Features);
2935     Condition =
2936         Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2937   }
2938   return Condition;
2939 }
2940 
2941 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2942                                              llvm::Function *Resolver,
2943                                              CGBuilderTy &Builder,
2944                                              llvm::Function *FuncToReturn,
2945                                              bool SupportsIFunc) {
2946   if (SupportsIFunc) {
2947     Builder.CreateRet(FuncToReturn);
2948     return;
2949   }
2950 
2951   llvm::SmallVector<llvm::Value *, 10> Args(
2952       llvm::make_pointer_range(Resolver->args()));
2953 
2954   llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2955   Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2956 
2957   if (Resolver->getReturnType()->isVoidTy())
2958     Builder.CreateRetVoid();
2959   else
2960     Builder.CreateRet(Result);
2961 }
2962 
2963 void CodeGenFunction::EmitMultiVersionResolver(
2964     llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
2965 
2966   llvm::Triple::ArchType ArchType =
2967       getContext().getTargetInfo().getTriple().getArch();
2968 
2969   switch (ArchType) {
2970   case llvm::Triple::x86:
2971   case llvm::Triple::x86_64:
2972     EmitX86MultiVersionResolver(Resolver, Options);
2973     return;
2974   case llvm::Triple::aarch64:
2975     EmitAArch64MultiVersionResolver(Resolver, Options);
2976     return;
2977   case llvm::Triple::riscv32:
2978   case llvm::Triple::riscv64:
2979     EmitRISCVMultiVersionResolver(Resolver, Options);
2980     return;
2981 
2982   default:
2983     assert(false && "Only implemented for x86, AArch64 and RISC-V targets");
2984   }
2985 }
2986 
2987 void CodeGenFunction::EmitRISCVMultiVersionResolver(
2988     llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
2989 
2990   if (getContext().getTargetInfo().getTriple().getOS() !=
2991       llvm::Triple::OSType::Linux) {
2992     CGM.getDiags().Report(diag::err_os_unsupport_riscv_fmv);
2993     return;
2994   }
2995 
2996   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2997   Builder.SetInsertPoint(CurBlock);
2998   EmitRISCVCpuInit();
2999 
3000   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3001   bool HasDefault = false;
3002   unsigned DefaultIndex = 0;
3003 
3004   // Check the each candidate function.
3005   for (unsigned Index = 0; Index < Options.size(); Index++) {
3006 
3007     if (Options[Index].Features.empty()) {
3008       HasDefault = true;
3009       DefaultIndex = Index;
3010       continue;
3011     }
3012 
3013     Builder.SetInsertPoint(CurBlock);
3014 
3015     // FeaturesCondition: The bitmask of the required extension has been
3016     // enabled by the runtime object.
3017     // (__riscv_feature_bits.features[i] & REQUIRED_BITMASK) ==
3018     // REQUIRED_BITMASK
3019     //
3020     // When condition is met, return this version of the function.
3021     // Otherwise, try the next version.
3022     //
3023     // if (FeaturesConditionVersion1)
3024     //     return Version1;
3025     // else if (FeaturesConditionVersion2)
3026     //     return Version2;
3027     // else if (FeaturesConditionVersion3)
3028     //     return Version3;
3029     // ...
3030     // else
3031     //     return DefaultVersion;
3032 
3033     // TODO: Add a condition to check the length before accessing elements.
3034     // Without checking the length first, we may access an incorrect memory
3035     // address when using different versions.
3036     llvm::SmallVector<StringRef, 8> CurrTargetAttrFeats;
3037     llvm::SmallVector<std::string, 8> TargetAttrFeats;
3038 
3039     for (StringRef Feat : Options[Index].Features) {
3040       std::vector<std::string> FeatStr =
3041           getContext().getTargetInfo().parseTargetAttr(Feat).Features;
3042 
3043       assert(FeatStr.size() == 1 && "Feature string not delimited");
3044 
3045       std::string &CurrFeat = FeatStr.front();
3046       if (CurrFeat[0] == '+')
3047         TargetAttrFeats.push_back(CurrFeat.substr(1));
3048     }
3049 
3050     if (TargetAttrFeats.empty())
3051       continue;
3052 
3053     for (std::string &Feat : TargetAttrFeats)
3054       CurrTargetAttrFeats.push_back(Feat);
3055 
3056     Builder.SetInsertPoint(CurBlock);
3057     llvm::Value *FeatsCondition = EmitRISCVCpuSupports(CurrTargetAttrFeats);
3058 
3059     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3060     CGBuilderTy RetBuilder(*this, RetBlock);
3061     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder,
3062                                      Options[Index].Function, SupportsIFunc);
3063     llvm::BasicBlock *ElseBlock = createBasicBlock("resolver_else", Resolver);
3064 
3065     Builder.SetInsertPoint(CurBlock);
3066     Builder.CreateCondBr(FeatsCondition, RetBlock, ElseBlock);
3067 
3068     CurBlock = ElseBlock;
3069   }
3070 
3071   // Finally, emit the default one.
3072   if (HasDefault) {
3073     Builder.SetInsertPoint(CurBlock);
3074     CreateMultiVersionResolverReturn(
3075         CGM, Resolver, Builder, Options[DefaultIndex].Function, SupportsIFunc);
3076     return;
3077   }
3078 
3079   // If no generic/default, emit an unreachable.
3080   Builder.SetInsertPoint(CurBlock);
3081   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3082   TrapCall->setDoesNotReturn();
3083   TrapCall->setDoesNotThrow();
3084   Builder.CreateUnreachable();
3085   Builder.ClearInsertionPoint();
3086 }
3087 
3088 void CodeGenFunction::EmitAArch64MultiVersionResolver(
3089     llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3090   assert(!Options.empty() && "No multiversion resolver options found");
3091   assert(Options.back().Features.size() == 0 && "Default case must be last");
3092   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3093   assert(SupportsIFunc &&
3094          "Multiversion resolver requires target IFUNC support");
3095   bool AArch64CpuInitialized = false;
3096   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3097 
3098   for (const FMVResolverOption &RO : Options) {
3099     Builder.SetInsertPoint(CurBlock);
3100     llvm::Value *Condition = FormAArch64ResolverCondition(RO);
3101 
3102     // The 'default' or 'all features enabled' case.
3103     if (!Condition) {
3104       CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3105                                        SupportsIFunc);
3106       return;
3107     }
3108 
3109     if (!AArch64CpuInitialized) {
3110       Builder.SetInsertPoint(CurBlock, CurBlock->begin());
3111       EmitAArch64CpuInit();
3112       AArch64CpuInitialized = true;
3113       Builder.SetInsertPoint(CurBlock);
3114     }
3115 
3116     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3117     CGBuilderTy RetBuilder(*this, RetBlock);
3118     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3119                                      SupportsIFunc);
3120     CurBlock = createBasicBlock("resolver_else", Resolver);
3121     Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3122   }
3123 
3124   // If no default, emit an unreachable.
3125   Builder.SetInsertPoint(CurBlock);
3126   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3127   TrapCall->setDoesNotReturn();
3128   TrapCall->setDoesNotThrow();
3129   Builder.CreateUnreachable();
3130   Builder.ClearInsertionPoint();
3131 }
3132 
3133 void CodeGenFunction::EmitX86MultiVersionResolver(
3134     llvm::Function *Resolver, ArrayRef<FMVResolverOption> Options) {
3135 
3136   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
3137 
3138   // Main function's basic block.
3139   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
3140   Builder.SetInsertPoint(CurBlock);
3141   EmitX86CpuInit();
3142 
3143   for (const FMVResolverOption &RO : Options) {
3144     Builder.SetInsertPoint(CurBlock);
3145     llvm::Value *Condition = FormX86ResolverCondition(RO);
3146 
3147     // The 'default' or 'generic' case.
3148     if (!Condition) {
3149       assert(&RO == Options.end() - 1 &&
3150              "Default or Generic case must be last");
3151       CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
3152                                        SupportsIFunc);
3153       return;
3154     }
3155 
3156     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
3157     CGBuilderTy RetBuilder(*this, RetBlock);
3158     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
3159                                      SupportsIFunc);
3160     CurBlock = createBasicBlock("resolver_else", Resolver);
3161     Builder.CreateCondBr(Condition, RetBlock, CurBlock);
3162   }
3163 
3164   // If no generic/default, emit an unreachable.
3165   Builder.SetInsertPoint(CurBlock);
3166   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3167   TrapCall->setDoesNotReturn();
3168   TrapCall->setDoesNotThrow();
3169   Builder.CreateUnreachable();
3170   Builder.ClearInsertionPoint();
3171 }
3172 
3173 // Loc - where the diagnostic will point, where in the source code this
3174 //  alignment has failed.
3175 // SecondaryLoc - if present (will be present if sufficiently different from
3176 //  Loc), the diagnostic will additionally point a "Note:" to this location.
3177 //  It should be the location where the __attribute__((assume_aligned))
3178 //  was written e.g.
3179 void CodeGenFunction::emitAlignmentAssumptionCheck(
3180     llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
3181     SourceLocation SecondaryLoc, llvm::Value *Alignment,
3182     llvm::Value *OffsetValue, llvm::Value *TheCheck,
3183     llvm::Instruction *Assumption) {
3184   assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
3185          cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
3186              llvm::Intrinsic::getOrInsertDeclaration(
3187                  Builder.GetInsertBlock()->getParent()->getParent(),
3188                  llvm::Intrinsic::assume) &&
3189          "Assumption should be a call to llvm.assume().");
3190   assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
3191          "Assumption should be the last instruction of the basic block, "
3192          "since the basic block is still being generated.");
3193 
3194   if (!SanOpts.has(SanitizerKind::Alignment))
3195     return;
3196 
3197   // Don't check pointers to volatile data. The behavior here is implementation-
3198   // defined.
3199   if (Ty->getPointeeType().isVolatileQualified())
3200     return;
3201 
3202   // We need to temorairly remove the assumption so we can insert the
3203   // sanitizer check before it, else the check will be dropped by optimizations.
3204   Assumption->removeFromParent();
3205 
3206   {
3207     auto CheckOrdinal = SanitizerKind::SO_Alignment;
3208     auto CheckHandler = SanitizerHandler::AlignmentAssumption;
3209     SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
3210 
3211     if (!OffsetValue)
3212       OffsetValue = Builder.getInt1(false); // no offset.
3213 
3214     llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
3215                                     EmitCheckSourceLocation(SecondaryLoc),
3216                                     EmitCheckTypeDescriptor(Ty)};
3217     llvm::Value *DynamicData[] = {Ptr, Alignment, OffsetValue};
3218     EmitCheck({std::make_pair(TheCheck, CheckOrdinal)}, CheckHandler,
3219               StaticData, DynamicData);
3220   }
3221 
3222   // We are now in the (new, empty) "cont" basic block.
3223   // Reintroduce the assumption.
3224   Builder.Insert(Assumption);
3225   // FIXME: Assumption still has it's original basic block as it's Parent.
3226 }
3227 
3228 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
3229   if (CGDebugInfo *DI = getDebugInfo())
3230     return DI->SourceLocToDebugLoc(Location);
3231 
3232   return llvm::DebugLoc();
3233 }
3234 
3235 llvm::Value *
3236 CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
3237                                                       Stmt::Likelihood LH) {
3238   switch (LH) {
3239   case Stmt::LH_None:
3240     return Cond;
3241   case Stmt::LH_Likely:
3242   case Stmt::LH_Unlikely:
3243     // Don't generate llvm.expect on -O0 as the backend won't use it for
3244     // anything.
3245     if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3246       return Cond;
3247     llvm::Type *CondTy = Cond->getType();
3248     assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
3249     llvm::Function *FnExpect =
3250         CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
3251     llvm::Value *ExpectedValueOfCond =
3252         llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
3253     return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
3254                               Cond->getName() + ".expval");
3255   }
3256   llvm_unreachable("Unknown Likelihood");
3257 }
3258 
3259 llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
3260                                                     unsigned NumElementsDst,
3261                                                     const llvm::Twine &Name) {
3262   auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
3263   unsigned NumElementsSrc = SrcTy->getNumElements();
3264   if (NumElementsSrc == NumElementsDst)
3265     return SrcVec;
3266 
3267   std::vector<int> ShuffleMask(NumElementsDst, -1);
3268   for (unsigned MaskIdx = 0;
3269        MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
3270     ShuffleMask[MaskIdx] = MaskIdx;
3271 
3272   return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
3273 }
3274 
3275 void CodeGenFunction::EmitPointerAuthOperandBundle(
3276     const CGPointerAuthInfo &PointerAuth,
3277     SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
3278   if (!PointerAuth.isSigned())
3279     return;
3280 
3281   auto *Key = Builder.getInt32(PointerAuth.getKey());
3282 
3283   llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3284   if (!Discriminator)
3285     Discriminator = Builder.getSize(0);
3286 
3287   llvm::Value *Args[] = {Key, Discriminator};
3288   Bundles.emplace_back("ptrauth", Args);
3289 }
3290 
3291 static llvm::Value *EmitPointerAuthCommon(CodeGenFunction &CGF,
3292                                           const CGPointerAuthInfo &PointerAuth,
3293                                           llvm::Value *Pointer,
3294                                           unsigned IntrinsicID) {
3295   if (!PointerAuth)
3296     return Pointer;
3297 
3298   auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3299 
3300   llvm::Value *Discriminator = PointerAuth.getDiscriminator();
3301   if (!Discriminator) {
3302     Discriminator = CGF.Builder.getSize(0);
3303   }
3304 
3305   // Convert the pointer to intptr_t before signing it.
3306   auto OrigType = Pointer->getType();
3307   Pointer = CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy);
3308 
3309   // call i64 @llvm.ptrauth.sign.i64(i64 %pointer, i32 %key, i64 %discriminator)
3310   auto Intrinsic = CGF.CGM.getIntrinsic(IntrinsicID);
3311   Pointer = CGF.EmitRuntimeCall(Intrinsic, {Pointer, Key, Discriminator});
3312 
3313   // Convert back to the original type.
3314   Pointer = CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3315   return Pointer;
3316 }
3317 
3318 llvm::Value *
3319 CodeGenFunction::EmitPointerAuthSign(const CGPointerAuthInfo &PointerAuth,
3320                                      llvm::Value *Pointer) {
3321   if (!PointerAuth.shouldSign())
3322     return Pointer;
3323   return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3324                                llvm::Intrinsic::ptrauth_sign);
3325 }
3326 
3327 static llvm::Value *EmitStrip(CodeGenFunction &CGF,
3328                               const CGPointerAuthInfo &PointerAuth,
3329                               llvm::Value *Pointer) {
3330   auto StripIntrinsic = CGF.CGM.getIntrinsic(llvm::Intrinsic::ptrauth_strip);
3331 
3332   auto Key = CGF.Builder.getInt32(PointerAuth.getKey());
3333   // Convert the pointer to intptr_t before signing it.
3334   auto OrigType = Pointer->getType();
3335   Pointer = CGF.EmitRuntimeCall(
3336       StripIntrinsic, {CGF.Builder.CreatePtrToInt(Pointer, CGF.IntPtrTy), Key});
3337   return CGF.Builder.CreateIntToPtr(Pointer, OrigType);
3338 }
3339 
3340 llvm::Value *
3341 CodeGenFunction::EmitPointerAuthAuth(const CGPointerAuthInfo &PointerAuth,
3342                                      llvm::Value *Pointer) {
3343   if (PointerAuth.shouldStrip()) {
3344     return EmitStrip(*this, PointerAuth, Pointer);
3345   }
3346   if (!PointerAuth.shouldAuth()) {
3347     return Pointer;
3348   }
3349 
3350   return EmitPointerAuthCommon(*this, PointerAuth, Pointer,
3351                                llvm::Intrinsic::ptrauth_auth);
3352 }
3353 
3354 void CodeGenFunction::addInstToCurrentSourceAtom(
3355     llvm::Instruction *KeyInstruction, llvm::Value *Backup) {
3356   if (CGDebugInfo *DI = getDebugInfo())
3357     DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3358 }
3359 
3360 void CodeGenFunction::addInstToSpecificSourceAtom(
3361     llvm::Instruction *KeyInstruction, llvm::Value *Backup, uint64_t Atom) {
3362   if (CGDebugInfo *DI = getDebugInfo())
3363     DI->addInstToSpecificSourceAtom(KeyInstruction, Backup, Atom);
3364 }
3365 
3366 void CodeGenFunction::addInstToNewSourceAtom(llvm::Instruction *KeyInstruction,
3367                                              llvm::Value *Backup) {
3368   if (CGDebugInfo *DI = getDebugInfo()) {
3369     ApplyAtomGroup Grp(getDebugInfo());
3370     DI->addInstToCurrentSourceAtom(KeyInstruction, Backup);
3371   }
3372 }
3373