xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Decl nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGBlocks.h"
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "EHScopeStack.h"
23 #include "PatternInit.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/CharUnits.h"
28 #include "clang/AST/Decl.h"
29 #include "clang/AST/DeclObjC.h"
30 #include "clang/AST/DeclOpenMP.h"
31 #include "clang/Basic/CodeGenOptions.h"
32 #include "clang/Basic/SourceManager.h"
33 #include "clang/Basic/TargetInfo.h"
34 #include "clang/CodeGen/CGFunctionInfo.h"
35 #include "clang/Sema/Sema.h"
36 #include "llvm/Analysis/ConstantFolding.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/Type.h"
43 #include <optional>
44 
45 using namespace clang;
46 using namespace CodeGen;
47 
48 static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
49               "Clang max alignment greater than what LLVM supports?");
50 
51 void CodeGenFunction::EmitDecl(const Decl &D) {
52   switch (D.getKind()) {
53   case Decl::BuiltinTemplate:
54   case Decl::TranslationUnit:
55   case Decl::ExternCContext:
56   case Decl::Namespace:
57   case Decl::UnresolvedUsingTypename:
58   case Decl::ClassTemplateSpecialization:
59   case Decl::ClassTemplatePartialSpecialization:
60   case Decl::VarTemplateSpecialization:
61   case Decl::VarTemplatePartialSpecialization:
62   case Decl::TemplateTypeParm:
63   case Decl::UnresolvedUsingValue:
64   case Decl::NonTypeTemplateParm:
65   case Decl::CXXDeductionGuide:
66   case Decl::CXXMethod:
67   case Decl::CXXConstructor:
68   case Decl::CXXDestructor:
69   case Decl::CXXConversion:
70   case Decl::Field:
71   case Decl::MSProperty:
72   case Decl::IndirectField:
73   case Decl::ObjCIvar:
74   case Decl::ObjCAtDefsField:
75   case Decl::ParmVar:
76   case Decl::ImplicitParam:
77   case Decl::ClassTemplate:
78   case Decl::VarTemplate:
79   case Decl::FunctionTemplate:
80   case Decl::TypeAliasTemplate:
81   case Decl::TemplateTemplateParm:
82   case Decl::ObjCMethod:
83   case Decl::ObjCCategory:
84   case Decl::ObjCProtocol:
85   case Decl::ObjCInterface:
86   case Decl::ObjCCategoryImpl:
87   case Decl::ObjCImplementation:
88   case Decl::ObjCProperty:
89   case Decl::ObjCCompatibleAlias:
90   case Decl::PragmaComment:
91   case Decl::PragmaDetectMismatch:
92   case Decl::AccessSpec:
93   case Decl::LinkageSpec:
94   case Decl::Export:
95   case Decl::ObjCPropertyImpl:
96   case Decl::FileScopeAsm:
97   case Decl::TopLevelStmt:
98   case Decl::Friend:
99   case Decl::FriendTemplate:
100   case Decl::Block:
101   case Decl::Captured:
102   case Decl::UsingShadow:
103   case Decl::ConstructorUsingShadow:
104   case Decl::ObjCTypeParam:
105   case Decl::Binding:
106   case Decl::UnresolvedUsingIfExists:
107   case Decl::HLSLBuffer:
108     llvm_unreachable("Declaration should not be in declstmts!");
109   case Decl::Record:    // struct/union/class X;
110   case Decl::CXXRecord: // struct/union/class X; [C++]
111     if (CGDebugInfo *DI = getDebugInfo())
112       if (cast<RecordDecl>(D).getDefinition())
113         DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
114     return;
115   case Decl::Enum:      // enum X;
116     if (CGDebugInfo *DI = getDebugInfo())
117       if (cast<EnumDecl>(D).getDefinition())
118         DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
119     return;
120   case Decl::Function:     // void X();
121   case Decl::EnumConstant: // enum ? { X = ? }
122   case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
123   case Decl::Label:        // __label__ x;
124   case Decl::Import:
125   case Decl::MSGuid:    // __declspec(uuid("..."))
126   case Decl::UnnamedGlobalConstant:
127   case Decl::TemplateParamObject:
128   case Decl::OMPThreadPrivate:
129   case Decl::OMPAllocate:
130   case Decl::OMPCapturedExpr:
131   case Decl::OMPRequires:
132   case Decl::Empty:
133   case Decl::Concept:
134   case Decl::ImplicitConceptSpecialization:
135   case Decl::LifetimeExtendedTemporary:
136   case Decl::RequiresExprBody:
137     // None of these decls require codegen support.
138     return;
139 
140   case Decl::NamespaceAlias:
141     if (CGDebugInfo *DI = getDebugInfo())
142         DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
143     return;
144   case Decl::Using:          // using X; [C++]
145     if (CGDebugInfo *DI = getDebugInfo())
146         DI->EmitUsingDecl(cast<UsingDecl>(D));
147     return;
148   case Decl::UsingEnum: // using enum X; [C++]
149     if (CGDebugInfo *DI = getDebugInfo())
150       DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
151     return;
152   case Decl::UsingPack:
153     for (auto *Using : cast<UsingPackDecl>(D).expansions())
154       EmitDecl(*Using);
155     return;
156   case Decl::UsingDirective: // using namespace X; [C++]
157     if (CGDebugInfo *DI = getDebugInfo())
158       DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
159     return;
160   case Decl::Var:
161   case Decl::Decomposition: {
162     const VarDecl &VD = cast<VarDecl>(D);
163     assert(VD.isLocalVarDecl() &&
164            "Should not see file-scope variables inside a function!");
165     EmitVarDecl(VD);
166     if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
167       for (auto *B : DD->bindings())
168         if (auto *HD = B->getHoldingVar())
169           EmitVarDecl(*HD);
170     return;
171   }
172 
173   case Decl::OMPDeclareReduction:
174     return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
175 
176   case Decl::OMPDeclareMapper:
177     return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
178 
179   case Decl::Typedef:      // typedef int X;
180   case Decl::TypeAlias: {  // using X = int; [C++0x]
181     QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
182     if (CGDebugInfo *DI = getDebugInfo())
183       DI->EmitAndRetainType(Ty);
184     if (Ty->isVariablyModifiedType())
185       EmitVariablyModifiedType(Ty);
186     return;
187   }
188   }
189 }
190 
191 /// EmitVarDecl - This method handles emission of any variable declaration
192 /// inside a function, including static vars etc.
193 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
194   if (D.hasExternalStorage())
195     // Don't emit it now, allow it to be emitted lazily on its first use.
196     return;
197 
198   // Some function-scope variable does not have static storage but still
199   // needs to be emitted like a static variable, e.g. a function-scope
200   // variable in constant address space in OpenCL.
201   if (D.getStorageDuration() != SD_Automatic) {
202     // Static sampler variables translated to function calls.
203     if (D.getType()->isSamplerT())
204       return;
205 
206     llvm::GlobalValue::LinkageTypes Linkage =
207         CGM.getLLVMLinkageVarDefinition(&D);
208 
209     // FIXME: We need to force the emission/use of a guard variable for
210     // some variables even if we can constant-evaluate them because
211     // we can't guarantee every translation unit will constant-evaluate them.
212 
213     return EmitStaticVarDecl(D, Linkage);
214   }
215 
216   if (D.getType().getAddressSpace() == LangAS::opencl_local)
217     return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
218 
219   assert(D.hasLocalStorage());
220   return EmitAutoVarDecl(D);
221 }
222 
223 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
224   if (CGM.getLangOpts().CPlusPlus)
225     return CGM.getMangledName(&D).str();
226 
227   // If this isn't C++, we don't need a mangled name, just a pretty one.
228   assert(!D.isExternallyVisible() && "name shouldn't matter");
229   std::string ContextName;
230   const DeclContext *DC = D.getDeclContext();
231   if (auto *CD = dyn_cast<CapturedDecl>(DC))
232     DC = cast<DeclContext>(CD->getNonClosureContext());
233   if (const auto *FD = dyn_cast<FunctionDecl>(DC))
234     ContextName = std::string(CGM.getMangledName(FD));
235   else if (const auto *BD = dyn_cast<BlockDecl>(DC))
236     ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
237   else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
238     ContextName = OMD->getSelector().getAsString();
239   else
240     llvm_unreachable("Unknown context for static var decl");
241 
242   ContextName += "." + D.getNameAsString();
243   return ContextName;
244 }
245 
246 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
247     const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
248   // In general, we don't always emit static var decls once before we reference
249   // them. It is possible to reference them before emitting the function that
250   // contains them, and it is possible to emit the containing function multiple
251   // times.
252   if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
253     return ExistingGV;
254 
255   QualType Ty = D.getType();
256   assert(Ty->isConstantSizeType() && "VLAs can't be static");
257 
258   // Use the label if the variable is renamed with the asm-label extension.
259   std::string Name;
260   if (D.hasAttr<AsmLabelAttr>())
261     Name = std::string(getMangledName(&D));
262   else
263     Name = getStaticDeclName(*this, D);
264 
265   llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
266   LangAS AS = GetGlobalVarAddressSpace(&D);
267   unsigned TargetAS = getContext().getTargetAddressSpace(AS);
268 
269   // OpenCL variables in local address space and CUDA shared
270   // variables cannot have an initializer.
271   llvm::Constant *Init = nullptr;
272   if (Ty.getAddressSpace() == LangAS::opencl_local ||
273       D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
274     Init = llvm::UndefValue::get(LTy);
275   else
276     Init = EmitNullConstant(Ty);
277 
278   llvm::GlobalVariable *GV = new llvm::GlobalVariable(
279       getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
280       nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
281   GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
282 
283   if (supportsCOMDAT() && GV->isWeakForLinker())
284     GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
285 
286   if (D.getTLSKind())
287     setTLSMode(GV, D);
288 
289   setGVProperties(GV, &D);
290   getTargetCodeGenInfo().setTargetAttributes(cast<Decl>(&D), GV, *this);
291 
292   // Make sure the result is of the correct type.
293   LangAS ExpectedAS = Ty.getAddressSpace();
294   llvm::Constant *Addr = GV;
295   if (AS != ExpectedAS) {
296     Addr = getTargetCodeGenInfo().performAddrSpaceCast(
297         *this, GV, AS, ExpectedAS,
298         llvm::PointerType::get(getLLVMContext(),
299                                getContext().getTargetAddressSpace(ExpectedAS)));
300   }
301 
302   setStaticLocalDeclAddress(&D, Addr);
303 
304   // Ensure that the static local gets initialized by making sure the parent
305   // function gets emitted eventually.
306   const Decl *DC = cast<Decl>(D.getDeclContext());
307 
308   // We can't name blocks or captured statements directly, so try to emit their
309   // parents.
310   if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
311     DC = DC->getNonClosureContext();
312     // FIXME: Ensure that global blocks get emitted.
313     if (!DC)
314       return Addr;
315   }
316 
317   GlobalDecl GD;
318   if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
319     GD = GlobalDecl(CD, Ctor_Base);
320   else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
321     GD = GlobalDecl(DD, Dtor_Base);
322   else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
323     GD = GlobalDecl(FD);
324   else {
325     // Don't do anything for Obj-C method decls or global closures. We should
326     // never defer them.
327     assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
328   }
329   if (GD.getDecl()) {
330     // Disable emission of the parent function for the OpenMP device codegen.
331     CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
332     (void)GetAddrOfGlobal(GD);
333   }
334 
335   return Addr;
336 }
337 
338 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
339 /// global variable that has already been created for it.  If the initializer
340 /// has a different type than GV does, this may free GV and return a different
341 /// one.  Otherwise it just returns GV.
342 llvm::GlobalVariable *
343 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
344                                                llvm::GlobalVariable *GV) {
345   ConstantEmitter emitter(*this);
346   llvm::Constant *Init = emitter.tryEmitForInitializer(D);
347 
348   // If constant emission failed, then this should be a C++ static
349   // initializer.
350   if (!Init) {
351     if (!getLangOpts().CPlusPlus)
352       CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
353     else if (D.hasFlexibleArrayInit(getContext()))
354       CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
355     else if (HaveInsertPoint()) {
356       // Since we have a static initializer, this global variable can't
357       // be constant.
358       GV->setConstant(false);
359 
360       EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
361     }
362     return GV;
363   }
364 
365 #ifndef NDEBUG
366   CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
367                       D.getFlexibleArrayInitChars(getContext());
368   CharUnits CstSize = CharUnits::fromQuantity(
369       CGM.getDataLayout().getTypeAllocSize(Init->getType()));
370   assert(VarSize == CstSize && "Emitted constant has unexpected size");
371 #endif
372 
373   // The initializer may differ in type from the global. Rewrite
374   // the global to match the initializer.  (We have to do this
375   // because some types, like unions, can't be completely represented
376   // in the LLVM type system.)
377   if (GV->getValueType() != Init->getType()) {
378     llvm::GlobalVariable *OldGV = GV;
379 
380     GV = new llvm::GlobalVariable(
381         CGM.getModule(), Init->getType(), OldGV->isConstant(),
382         OldGV->getLinkage(), Init, "",
383         /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
384         OldGV->getType()->getPointerAddressSpace());
385     GV->setVisibility(OldGV->getVisibility());
386     GV->setDSOLocal(OldGV->isDSOLocal());
387     GV->setComdat(OldGV->getComdat());
388 
389     // Steal the name of the old global
390     GV->takeName(OldGV);
391 
392     // Replace all uses of the old global with the new global
393     OldGV->replaceAllUsesWith(GV);
394 
395     // Erase the old global, since it is no longer used.
396     OldGV->eraseFromParent();
397   }
398 
399   bool NeedsDtor =
400       D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
401 
402   GV->setConstant(
403       D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
404   GV->setInitializer(Init);
405 
406   emitter.finalize(GV);
407 
408   if (NeedsDtor && HaveInsertPoint()) {
409     // We have a constant initializer, but a nontrivial destructor. We still
410     // need to perform a guarded "initialization" in order to register the
411     // destructor.
412     EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
413   }
414 
415   return GV;
416 }
417 
418 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
419                                       llvm::GlobalValue::LinkageTypes Linkage) {
420   // Check to see if we already have a global variable for this
421   // declaration.  This can happen when double-emitting function
422   // bodies, e.g. with complete and base constructors.
423   llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
424   CharUnits alignment = getContext().getDeclAlign(&D);
425 
426   // Store into LocalDeclMap before generating initializer to handle
427   // circular references.
428   llvm::Type *elemTy = ConvertTypeForMem(D.getType());
429   setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
430 
431   // We can't have a VLA here, but we can have a pointer to a VLA,
432   // even though that doesn't really make any sense.
433   // Make sure to evaluate VLA bounds now so that we have them for later.
434   if (D.getType()->isVariablyModifiedType())
435     EmitVariablyModifiedType(D.getType());
436 
437   // Save the type in case adding the initializer forces a type change.
438   llvm::Type *expectedType = addr->getType();
439 
440   llvm::GlobalVariable *var =
441     cast<llvm::GlobalVariable>(addr->stripPointerCasts());
442 
443   // CUDA's local and local static __shared__ variables should not
444   // have any non-empty initializers. This is ensured by Sema.
445   // Whatever initializer such variable may have when it gets here is
446   // a no-op and should not be emitted.
447   bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
448                          D.hasAttr<CUDASharedAttr>();
449   // If this value has an initializer, emit it.
450   if (D.getInit() && !isCudaSharedVar)
451     var = AddInitializerToStaticVarDecl(D, var);
452 
453   var->setAlignment(alignment.getAsAlign());
454 
455   if (D.hasAttr<AnnotateAttr>())
456     CGM.AddGlobalAnnotations(&D, var);
457 
458   if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
459     var->addAttribute("bss-section", SA->getName());
460   if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
461     var->addAttribute("data-section", SA->getName());
462   if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
463     var->addAttribute("rodata-section", SA->getName());
464   if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
465     var->addAttribute("relro-section", SA->getName());
466 
467   if (const SectionAttr *SA = D.getAttr<SectionAttr>())
468     var->setSection(SA->getName());
469 
470   if (D.hasAttr<RetainAttr>())
471     CGM.addUsedGlobal(var);
472   else if (D.hasAttr<UsedAttr>())
473     CGM.addUsedOrCompilerUsedGlobal(var);
474 
475   if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
476     CGM.addUsedOrCompilerUsedGlobal(var);
477 
478   // We may have to cast the constant because of the initializer
479   // mismatch above.
480   //
481   // FIXME: It is really dangerous to store this in the map; if anyone
482   // RAUW's the GV uses of this constant will be invalid.
483   llvm::Constant *castedAddr =
484     llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
485   LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
486   CGM.setStaticLocalDeclAddress(&D, castedAddr);
487 
488   CGM.getSanitizerMetadata()->reportGlobal(var, D);
489 
490   // Emit global variable debug descriptor for static vars.
491   CGDebugInfo *DI = getDebugInfo();
492   if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
493     DI->setLocation(D.getLocation());
494     DI->EmitGlobalVariable(var, &D);
495   }
496 }
497 
498 namespace {
499   struct DestroyObject final : EHScopeStack::Cleanup {
500     DestroyObject(Address addr, QualType type,
501                   CodeGenFunction::Destroyer *destroyer,
502                   bool useEHCleanupForArray)
503       : addr(addr), type(type), destroyer(destroyer),
504         useEHCleanupForArray(useEHCleanupForArray) {}
505 
506     Address addr;
507     QualType type;
508     CodeGenFunction::Destroyer *destroyer;
509     bool useEHCleanupForArray;
510 
511     void Emit(CodeGenFunction &CGF, Flags flags) override {
512       // Don't use an EH cleanup recursively from an EH cleanup.
513       bool useEHCleanupForArray =
514         flags.isForNormalCleanup() && this->useEHCleanupForArray;
515 
516       CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
517     }
518   };
519 
520   template <class Derived>
521   struct DestroyNRVOVariable : EHScopeStack::Cleanup {
522     DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
523         : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
524 
525     llvm::Value *NRVOFlag;
526     Address Loc;
527     QualType Ty;
528 
529     void Emit(CodeGenFunction &CGF, Flags flags) override {
530       // Along the exceptions path we always execute the dtor.
531       bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
532 
533       llvm::BasicBlock *SkipDtorBB = nullptr;
534       if (NRVO) {
535         // If we exited via NRVO, we skip the destructor call.
536         llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
537         SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
538         llvm::Value *DidNRVO =
539           CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
540         CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
541         CGF.EmitBlock(RunDtorBB);
542       }
543 
544       static_cast<Derived *>(this)->emitDestructorCall(CGF);
545 
546       if (NRVO) CGF.EmitBlock(SkipDtorBB);
547     }
548 
549     virtual ~DestroyNRVOVariable() = default;
550   };
551 
552   struct DestroyNRVOVariableCXX final
553       : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
554     DestroyNRVOVariableCXX(Address addr, QualType type,
555                            const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
556         : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
557           Dtor(Dtor) {}
558 
559     const CXXDestructorDecl *Dtor;
560 
561     void emitDestructorCall(CodeGenFunction &CGF) {
562       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
563                                 /*ForVirtualBase=*/false,
564                                 /*Delegating=*/false, Loc, Ty);
565     }
566   };
567 
568   struct DestroyNRVOVariableC final
569       : DestroyNRVOVariable<DestroyNRVOVariableC> {
570     DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
571         : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
572 
573     void emitDestructorCall(CodeGenFunction &CGF) {
574       CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
575     }
576   };
577 
578   struct CallStackRestore final : EHScopeStack::Cleanup {
579     Address Stack;
580     CallStackRestore(Address Stack) : Stack(Stack) {}
581     bool isRedundantBeforeReturn() override { return true; }
582     void Emit(CodeGenFunction &CGF, Flags flags) override {
583       llvm::Value *V = CGF.Builder.CreateLoad(Stack);
584       CGF.Builder.CreateStackRestore(V);
585     }
586   };
587 
588   struct KmpcAllocFree final : EHScopeStack::Cleanup {
589     std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
590     KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
591         : AddrSizePair(AddrSizePair) {}
592     void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
593       auto &RT = CGF.CGM.getOpenMPRuntime();
594       RT.getKmpcFreeShared(CGF, AddrSizePair);
595     }
596   };
597 
598   struct ExtendGCLifetime final : EHScopeStack::Cleanup {
599     const VarDecl &Var;
600     ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
601 
602     void Emit(CodeGenFunction &CGF, Flags flags) override {
603       // Compute the address of the local variable, in case it's a
604       // byref or something.
605       DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
606                       Var.getType(), VK_LValue, SourceLocation());
607       llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
608                                                 SourceLocation());
609       CGF.EmitExtendGCLifetime(value);
610     }
611   };
612 
613   struct CallCleanupFunction final : EHScopeStack::Cleanup {
614     llvm::Constant *CleanupFn;
615     const CGFunctionInfo &FnInfo;
616     const VarDecl &Var;
617 
618     CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
619                         const VarDecl *Var)
620       : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
621 
622     void Emit(CodeGenFunction &CGF, Flags flags) override {
623       DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
624                       Var.getType(), VK_LValue, SourceLocation());
625       // Compute the address of the local variable, in case it's a byref
626       // or something.
627       llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
628 
629       // In some cases, the type of the function argument will be different from
630       // the type of the pointer. An example of this is
631       // void f(void* arg);
632       // __attribute__((cleanup(f))) void *g;
633       //
634       // To fix this we insert a bitcast here.
635       QualType ArgTy = FnInfo.arg_begin()->type;
636       llvm::Value *Arg =
637         CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
638 
639       CallArgList Args;
640       Args.add(RValue::get(Arg),
641                CGF.getContext().getPointerType(Var.getType()));
642       auto Callee = CGCallee::forDirect(CleanupFn);
643       CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
644     }
645   };
646 } // end anonymous namespace
647 
648 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
649 /// variable with lifetime.
650 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
651                                     Address addr,
652                                     Qualifiers::ObjCLifetime lifetime) {
653   switch (lifetime) {
654   case Qualifiers::OCL_None:
655     llvm_unreachable("present but none");
656 
657   case Qualifiers::OCL_ExplicitNone:
658     // nothing to do
659     break;
660 
661   case Qualifiers::OCL_Strong: {
662     CodeGenFunction::Destroyer *destroyer =
663       (var.hasAttr<ObjCPreciseLifetimeAttr>()
664        ? CodeGenFunction::destroyARCStrongPrecise
665        : CodeGenFunction::destroyARCStrongImprecise);
666 
667     CleanupKind cleanupKind = CGF.getARCCleanupKind();
668     CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
669                     cleanupKind & EHCleanup);
670     break;
671   }
672   case Qualifiers::OCL_Autoreleasing:
673     // nothing to do
674     break;
675 
676   case Qualifiers::OCL_Weak:
677     // __weak objects always get EH cleanups; otherwise, exceptions
678     // could cause really nasty crashes instead of mere leaks.
679     CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
680                     CodeGenFunction::destroyARCWeak,
681                     /*useEHCleanup*/ true);
682     break;
683   }
684 }
685 
686 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
687   if (const Expr *e = dyn_cast<Expr>(s)) {
688     // Skip the most common kinds of expressions that make
689     // hierarchy-walking expensive.
690     s = e = e->IgnoreParenCasts();
691 
692     if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
693       return (ref->getDecl() == &var);
694     if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
695       const BlockDecl *block = be->getBlockDecl();
696       for (const auto &I : block->captures()) {
697         if (I.getVariable() == &var)
698           return true;
699       }
700     }
701   }
702 
703   for (const Stmt *SubStmt : s->children())
704     // SubStmt might be null; as in missing decl or conditional of an if-stmt.
705     if (SubStmt && isAccessedBy(var, SubStmt))
706       return true;
707 
708   return false;
709 }
710 
711 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
712   if (!decl) return false;
713   if (!isa<VarDecl>(decl)) return false;
714   const VarDecl *var = cast<VarDecl>(decl);
715   return isAccessedBy(*var, e);
716 }
717 
718 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
719                                    const LValue &destLV, const Expr *init) {
720   bool needsCast = false;
721 
722   while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
723     switch (castExpr->getCastKind()) {
724     // Look through casts that don't require representation changes.
725     case CK_NoOp:
726     case CK_BitCast:
727     case CK_BlockPointerToObjCPointerCast:
728       needsCast = true;
729       break;
730 
731     // If we find an l-value to r-value cast from a __weak variable,
732     // emit this operation as a copy or move.
733     case CK_LValueToRValue: {
734       const Expr *srcExpr = castExpr->getSubExpr();
735       if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
736         return false;
737 
738       // Emit the source l-value.
739       LValue srcLV = CGF.EmitLValue(srcExpr);
740 
741       // Handle a formal type change to avoid asserting.
742       auto srcAddr = srcLV.getAddress();
743       if (needsCast) {
744         srcAddr = srcAddr.withElementType(destLV.getAddress().getElementType());
745       }
746 
747       // If it was an l-value, use objc_copyWeak.
748       if (srcExpr->isLValue()) {
749         CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
750       } else {
751         assert(srcExpr->isXValue());
752         CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
753       }
754       return true;
755     }
756 
757     // Stop at anything else.
758     default:
759       return false;
760     }
761 
762     init = castExpr->getSubExpr();
763   }
764   return false;
765 }
766 
767 static void drillIntoBlockVariable(CodeGenFunction &CGF,
768                                    LValue &lvalue,
769                                    const VarDecl *var) {
770   lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
771 }
772 
773 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
774                                            SourceLocation Loc) {
775   if (!SanOpts.has(SanitizerKind::NullabilityAssign))
776     return;
777 
778   auto Nullability = LHS.getType()->getNullability();
779   if (!Nullability || *Nullability != NullabilityKind::NonNull)
780     return;
781 
782   // Check if the right hand side of the assignment is nonnull, if the left
783   // hand side must be nonnull.
784   SanitizerScope SanScope(this);
785   llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
786   llvm::Constant *StaticData[] = {
787       EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
788       llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
789       llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
790   EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
791             SanitizerHandler::TypeMismatch, StaticData, RHS);
792 }
793 
794 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
795                                      LValue lvalue, bool capturedByInit) {
796   Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
797   if (!lifetime) {
798     llvm::Value *value = EmitScalarExpr(init);
799     if (capturedByInit)
800       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
801     EmitNullabilityCheck(lvalue, value, init->getExprLoc());
802     EmitStoreThroughLValue(RValue::get(value), lvalue, true);
803     return;
804   }
805 
806   if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
807     init = DIE->getExpr();
808 
809   // If we're emitting a value with lifetime, we have to do the
810   // initialization *before* we leave the cleanup scopes.
811   if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
812     CodeGenFunction::RunCleanupsScope Scope(*this);
813     return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
814   }
815 
816   // We have to maintain the illusion that the variable is
817   // zero-initialized.  If the variable might be accessed in its
818   // initializer, zero-initialize before running the initializer, then
819   // actually perform the initialization with an assign.
820   bool accessedByInit = false;
821   if (lifetime != Qualifiers::OCL_ExplicitNone)
822     accessedByInit = (capturedByInit || isAccessedBy(D, init));
823   if (accessedByInit) {
824     LValue tempLV = lvalue;
825     // Drill down to the __block object if necessary.
826     if (capturedByInit) {
827       // We can use a simple GEP for this because it can't have been
828       // moved yet.
829       tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
830                                               cast<VarDecl>(D),
831                                               /*follow*/ false));
832     }
833 
834     auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
835     llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
836 
837     // If __weak, we want to use a barrier under certain conditions.
838     if (lifetime == Qualifiers::OCL_Weak)
839       EmitARCInitWeak(tempLV.getAddress(), zero);
840 
841     // Otherwise just do a simple store.
842     else
843       EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
844   }
845 
846   // Emit the initializer.
847   llvm::Value *value = nullptr;
848 
849   switch (lifetime) {
850   case Qualifiers::OCL_None:
851     llvm_unreachable("present but none");
852 
853   case Qualifiers::OCL_Strong: {
854     if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
855       value = EmitARCRetainScalarExpr(init);
856       break;
857     }
858     // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
859     // that we omit the retain, and causes non-autoreleased return values to be
860     // immediately released.
861     [[fallthrough]];
862   }
863 
864   case Qualifiers::OCL_ExplicitNone:
865     value = EmitARCUnsafeUnretainedScalarExpr(init);
866     break;
867 
868   case Qualifiers::OCL_Weak: {
869     // If it's not accessed by the initializer, try to emit the
870     // initialization with a copy or move.
871     if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
872       return;
873     }
874 
875     // No way to optimize a producing initializer into this.  It's not
876     // worth optimizing for, because the value will immediately
877     // disappear in the common case.
878     value = EmitScalarExpr(init);
879 
880     if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
881     if (accessedByInit)
882       EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
883     else
884       EmitARCInitWeak(lvalue.getAddress(), value);
885     return;
886   }
887 
888   case Qualifiers::OCL_Autoreleasing:
889     value = EmitARCRetainAutoreleaseScalarExpr(init);
890     break;
891   }
892 
893   if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
894 
895   EmitNullabilityCheck(lvalue, value, init->getExprLoc());
896 
897   // If the variable might have been accessed by its initializer, we
898   // might have to initialize with a barrier.  We have to do this for
899   // both __weak and __strong, but __weak got filtered out above.
900   if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
901     llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
902     EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
903     EmitARCRelease(oldValue, ARCImpreciseLifetime);
904     return;
905   }
906 
907   EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
908 }
909 
910 /// Decide whether we can emit the non-zero parts of the specified initializer
911 /// with equal or fewer than NumStores scalar stores.
912 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
913                                                unsigned &NumStores) {
914   // Zero and Undef never requires any extra stores.
915   if (isa<llvm::ConstantAggregateZero>(Init) ||
916       isa<llvm::ConstantPointerNull>(Init) ||
917       isa<llvm::UndefValue>(Init))
918     return true;
919   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
920       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
921       isa<llvm::ConstantExpr>(Init))
922     return Init->isNullValue() || NumStores--;
923 
924   // See if we can emit each element.
925   if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
926     for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
927       llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
928       if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
929         return false;
930     }
931     return true;
932   }
933 
934   if (llvm::ConstantDataSequential *CDS =
935         dyn_cast<llvm::ConstantDataSequential>(Init)) {
936     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
937       llvm::Constant *Elt = CDS->getElementAsConstant(i);
938       if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
939         return false;
940     }
941     return true;
942   }
943 
944   // Anything else is hard and scary.
945   return false;
946 }
947 
948 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
949 /// the scalar stores that would be required.
950 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
951                                         llvm::Constant *Init, Address Loc,
952                                         bool isVolatile, CGBuilderTy &Builder,
953                                         bool IsAutoInit) {
954   assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
955          "called emitStoresForInitAfterBZero for zero or undef value.");
956 
957   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
958       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
959       isa<llvm::ConstantExpr>(Init)) {
960     auto *I = Builder.CreateStore(Init, Loc, isVolatile);
961     if (IsAutoInit)
962       I->addAnnotationMetadata("auto-init");
963     return;
964   }
965 
966   if (llvm::ConstantDataSequential *CDS =
967           dyn_cast<llvm::ConstantDataSequential>(Init)) {
968     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
969       llvm::Constant *Elt = CDS->getElementAsConstant(i);
970 
971       // If necessary, get a pointer to the element and emit it.
972       if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
973         emitStoresForInitAfterBZero(
974             CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
975             Builder, IsAutoInit);
976     }
977     return;
978   }
979 
980   assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
981          "Unknown value type!");
982 
983   for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
984     llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
985 
986     // If necessary, get a pointer to the element and emit it.
987     if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
988       emitStoresForInitAfterBZero(CGM, Elt,
989                                   Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
990                                   isVolatile, Builder, IsAutoInit);
991   }
992 }
993 
994 /// Decide whether we should use bzero plus some stores to initialize a local
995 /// variable instead of using a memcpy from a constant global.  It is beneficial
996 /// to use bzero if the global is all zeros, or mostly zeros and large.
997 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
998                                                  uint64_t GlobalSize) {
999   // If a global is all zeros, always use a bzero.
1000   if (isa<llvm::ConstantAggregateZero>(Init)) return true;
1001 
1002   // If a non-zero global is <= 32 bytes, always use a memcpy.  If it is large,
1003   // do it if it will require 6 or fewer scalar stores.
1004   // TODO: Should budget depends on the size?  Avoiding a large global warrants
1005   // plopping in more stores.
1006   unsigned StoreBudget = 6;
1007   uint64_t SizeLimit = 32;
1008 
1009   return GlobalSize > SizeLimit &&
1010          canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
1011 }
1012 
1013 /// Decide whether we should use memset to initialize a local variable instead
1014 /// of using a memcpy from a constant global. Assumes we've already decided to
1015 /// not user bzero.
1016 /// FIXME We could be more clever, as we are for bzero above, and generate
1017 ///       memset followed by stores. It's unclear that's worth the effort.
1018 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1019                                                 uint64_t GlobalSize,
1020                                                 const llvm::DataLayout &DL) {
1021   uint64_t SizeLimit = 32;
1022   if (GlobalSize <= SizeLimit)
1023     return nullptr;
1024   return llvm::isBytewiseValue(Init, DL);
1025 }
1026 
1027 /// Decide whether we want to split a constant structure or array store into a
1028 /// sequence of its fields' stores. This may cost us code size and compilation
1029 /// speed, but plays better with store optimizations.
1030 static bool shouldSplitConstantStore(CodeGenModule &CGM,
1031                                      uint64_t GlobalByteSize) {
1032   // Don't break things that occupy more than one cacheline.
1033   uint64_t ByteSizeLimit = 64;
1034   if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1035     return false;
1036   if (GlobalByteSize <= ByteSizeLimit)
1037     return true;
1038   return false;
1039 }
1040 
1041 enum class IsPattern { No, Yes };
1042 
1043 /// Generate a constant filled with either a pattern or zeroes.
1044 static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1045                                         llvm::Type *Ty) {
1046   if (isPattern == IsPattern::Yes)
1047     return initializationPatternFor(CGM, Ty);
1048   else
1049     return llvm::Constant::getNullValue(Ty);
1050 }
1051 
1052 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1053                                         llvm::Constant *constant);
1054 
1055 /// Helper function for constWithPadding() to deal with padding in structures.
1056 static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1057                                               IsPattern isPattern,
1058                                               llvm::StructType *STy,
1059                                               llvm::Constant *constant) {
1060   const llvm::DataLayout &DL = CGM.getDataLayout();
1061   const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1062   llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1063   unsigned SizeSoFar = 0;
1064   SmallVector<llvm::Constant *, 8> Values;
1065   bool NestedIntact = true;
1066   for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1067     unsigned CurOff = Layout->getElementOffset(i);
1068     if (SizeSoFar < CurOff) {
1069       assert(!STy->isPacked());
1070       auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1071       Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1072     }
1073     llvm::Constant *CurOp;
1074     if (constant->isZeroValue())
1075       CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1076     else
1077       CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1078     auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1079     if (CurOp != NewOp)
1080       NestedIntact = false;
1081     Values.push_back(NewOp);
1082     SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1083   }
1084   unsigned TotalSize = Layout->getSizeInBytes();
1085   if (SizeSoFar < TotalSize) {
1086     auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1087     Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1088   }
1089   if (NestedIntact && Values.size() == STy->getNumElements())
1090     return constant;
1091   return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1092 }
1093 
1094 /// Replace all padding bytes in a given constant with either a pattern byte or
1095 /// 0x00.
1096 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1097                                         llvm::Constant *constant) {
1098   llvm::Type *OrigTy = constant->getType();
1099   if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1100     return constStructWithPadding(CGM, isPattern, STy, constant);
1101   if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1102     llvm::SmallVector<llvm::Constant *, 8> Values;
1103     uint64_t Size = ArrayTy->getNumElements();
1104     if (!Size)
1105       return constant;
1106     llvm::Type *ElemTy = ArrayTy->getElementType();
1107     bool ZeroInitializer = constant->isNullValue();
1108     llvm::Constant *OpValue, *PaddedOp;
1109     if (ZeroInitializer) {
1110       OpValue = llvm::Constant::getNullValue(ElemTy);
1111       PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1112     }
1113     for (unsigned Op = 0; Op != Size; ++Op) {
1114       if (!ZeroInitializer) {
1115         OpValue = constant->getAggregateElement(Op);
1116         PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1117       }
1118       Values.push_back(PaddedOp);
1119     }
1120     auto *NewElemTy = Values[0]->getType();
1121     if (NewElemTy == ElemTy)
1122       return constant;
1123     auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1124     return llvm::ConstantArray::get(NewArrayTy, Values);
1125   }
1126   // FIXME: Add handling for tail padding in vectors. Vectors don't
1127   // have padding between or inside elements, but the total amount of
1128   // data can be less than the allocated size.
1129   return constant;
1130 }
1131 
1132 Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1133                                                llvm::Constant *Constant,
1134                                                CharUnits Align) {
1135   auto FunctionName = [&](const DeclContext *DC) -> std::string {
1136     if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1137       if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1138         return CC->getNameAsString();
1139       if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1140         return CD->getNameAsString();
1141       return std::string(getMangledName(FD));
1142     } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1143       return OM->getNameAsString();
1144     } else if (isa<BlockDecl>(DC)) {
1145       return "<block>";
1146     } else if (isa<CapturedDecl>(DC)) {
1147       return "<captured>";
1148     } else {
1149       llvm_unreachable("expected a function or method");
1150     }
1151   };
1152 
1153   // Form a simple per-variable cache of these values in case we find we
1154   // want to reuse them.
1155   llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1156   if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1157     auto *Ty = Constant->getType();
1158     bool isConstant = true;
1159     llvm::GlobalVariable *InsertBefore = nullptr;
1160     unsigned AS =
1161         getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace());
1162     std::string Name;
1163     if (D.hasGlobalStorage())
1164       Name = getMangledName(&D).str() + ".const";
1165     else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1166       Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1167     else
1168       llvm_unreachable("local variable has no parent function or method");
1169     llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1170         getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1171         Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1172     GV->setAlignment(Align.getAsAlign());
1173     GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1174     CacheEntry = GV;
1175   } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1176     CacheEntry->setAlignment(Align.getAsAlign());
1177   }
1178 
1179   return Address(CacheEntry, CacheEntry->getValueType(), Align);
1180 }
1181 
1182 static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1183                                                 const VarDecl &D,
1184                                                 CGBuilderTy &Builder,
1185                                                 llvm::Constant *Constant,
1186                                                 CharUnits Align) {
1187   Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1188   return SrcPtr.withElementType(CGM.Int8Ty);
1189 }
1190 
1191 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1192                                   Address Loc, bool isVolatile,
1193                                   CGBuilderTy &Builder,
1194                                   llvm::Constant *constant, bool IsAutoInit) {
1195   auto *Ty = constant->getType();
1196   uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1197   if (!ConstantSize)
1198     return;
1199 
1200   bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1201                           Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1202   if (canDoSingleStore) {
1203     auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1204     if (IsAutoInit)
1205       I->addAnnotationMetadata("auto-init");
1206     return;
1207   }
1208 
1209   auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1210 
1211   // If the initializer is all or mostly the same, codegen with bzero / memset
1212   // then do a few stores afterward.
1213   if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1214     auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1215                                    SizeVal, isVolatile);
1216     if (IsAutoInit)
1217       I->addAnnotationMetadata("auto-init");
1218 
1219     bool valueAlreadyCorrect =
1220         constant->isNullValue() || isa<llvm::UndefValue>(constant);
1221     if (!valueAlreadyCorrect) {
1222       Loc = Loc.withElementType(Ty);
1223       emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
1224                                   IsAutoInit);
1225     }
1226     return;
1227   }
1228 
1229   // If the initializer is a repeated byte pattern, use memset.
1230   llvm::Value *Pattern =
1231       shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1232   if (Pattern) {
1233     uint64_t Value = 0x00;
1234     if (!isa<llvm::UndefValue>(Pattern)) {
1235       const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1236       assert(AP.getBitWidth() <= 8);
1237       Value = AP.getLimitedValue();
1238     }
1239     auto *I = Builder.CreateMemSet(
1240         Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1241     if (IsAutoInit)
1242       I->addAnnotationMetadata("auto-init");
1243     return;
1244   }
1245 
1246   // If the initializer is small or trivialAutoVarInit is set, use a handful of
1247   // stores.
1248   bool IsTrivialAutoVarInitPattern =
1249       CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1250       LangOptions::TrivialAutoVarInitKind::Pattern;
1251   if (shouldSplitConstantStore(CGM, ConstantSize)) {
1252     if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1253       if (STy == Loc.getElementType() ||
1254           (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1255         const llvm::StructLayout *Layout =
1256             CGM.getDataLayout().getStructLayout(STy);
1257         for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1258           CharUnits CurOff =
1259               CharUnits::fromQuantity(Layout->getElementOffset(i));
1260           Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1261               Loc.withElementType(CGM.Int8Ty), CurOff);
1262           emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1263                                 constant->getAggregateElement(i), IsAutoInit);
1264         }
1265         return;
1266       }
1267     } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1268       if (ATy == Loc.getElementType() ||
1269           (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1270         for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1271           Address EltPtr = Builder.CreateConstGEP(
1272               Loc.withElementType(ATy->getElementType()), i);
1273           emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
1274                                 constant->getAggregateElement(i), IsAutoInit);
1275         }
1276         return;
1277       }
1278     }
1279   }
1280 
1281   // Copy from a global.
1282   auto *I =
1283       Builder.CreateMemCpy(Loc,
1284                            createUnnamedGlobalForMemcpyFrom(
1285                                CGM, D, Builder, constant, Loc.getAlignment()),
1286                            SizeVal, isVolatile);
1287   if (IsAutoInit)
1288     I->addAnnotationMetadata("auto-init");
1289 }
1290 
1291 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1292                                   Address Loc, bool isVolatile,
1293                                   CGBuilderTy &Builder) {
1294   llvm::Type *ElTy = Loc.getElementType();
1295   llvm::Constant *constant =
1296       constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1297   emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1298                         /*IsAutoInit=*/true);
1299 }
1300 
1301 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1302                                      Address Loc, bool isVolatile,
1303                                      CGBuilderTy &Builder) {
1304   llvm::Type *ElTy = Loc.getElementType();
1305   llvm::Constant *constant = constWithPadding(
1306       CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1307   assert(!isa<llvm::UndefValue>(constant));
1308   emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1309                         /*IsAutoInit=*/true);
1310 }
1311 
1312 static bool containsUndef(llvm::Constant *constant) {
1313   auto *Ty = constant->getType();
1314   if (isa<llvm::UndefValue>(constant))
1315     return true;
1316   if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1317     for (llvm::Use &Op : constant->operands())
1318       if (containsUndef(cast<llvm::Constant>(Op)))
1319         return true;
1320   return false;
1321 }
1322 
1323 static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1324                                     llvm::Constant *constant) {
1325   auto *Ty = constant->getType();
1326   if (isa<llvm::UndefValue>(constant))
1327     return patternOrZeroFor(CGM, isPattern, Ty);
1328   if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1329     return constant;
1330   if (!containsUndef(constant))
1331     return constant;
1332   llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1333   for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1334     auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1335     Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1336   }
1337   if (Ty->isStructTy())
1338     return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1339   if (Ty->isArrayTy())
1340     return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1341   assert(Ty->isVectorTy());
1342   return llvm::ConstantVector::get(Values);
1343 }
1344 
1345 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1346 /// variable declaration with auto, register, or no storage class specifier.
1347 /// These turn into simple stack objects, or GlobalValues depending on target.
1348 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1349   AutoVarEmission emission = EmitAutoVarAlloca(D);
1350   EmitAutoVarInit(emission);
1351   EmitAutoVarCleanups(emission);
1352 }
1353 
1354 /// Emit a lifetime.begin marker if some criteria are satisfied.
1355 /// \return a pointer to the temporary size Value if a marker was emitted, null
1356 /// otherwise
1357 llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1358                                                 llvm::Value *Addr) {
1359   if (!ShouldEmitLifetimeMarkers)
1360     return nullptr;
1361 
1362   assert(Addr->getType()->getPointerAddressSpace() ==
1363              CGM.getDataLayout().getAllocaAddrSpace() &&
1364          "Pointer should be in alloca address space");
1365   llvm::Value *SizeV = llvm::ConstantInt::get(
1366       Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
1367   llvm::CallInst *C =
1368       Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1369   C->setDoesNotThrow();
1370   return SizeV;
1371 }
1372 
1373 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1374   assert(Addr->getType()->getPointerAddressSpace() ==
1375              CGM.getDataLayout().getAllocaAddrSpace() &&
1376          "Pointer should be in alloca address space");
1377   llvm::CallInst *C =
1378       Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1379   C->setDoesNotThrow();
1380 }
1381 
1382 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1383     CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1384   // For each dimension stores its QualType and corresponding
1385   // size-expression Value.
1386   SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1387   SmallVector<const IdentifierInfo *, 4> VLAExprNames;
1388 
1389   // Break down the array into individual dimensions.
1390   QualType Type1D = D.getType();
1391   while (getContext().getAsVariableArrayType(Type1D)) {
1392     auto VlaSize = getVLAElements1D(Type1D);
1393     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1394       Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1395     else {
1396       // Generate a locally unique name for the size expression.
1397       Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1398       SmallString<12> Buffer;
1399       StringRef NameRef = Name.toStringRef(Buffer);
1400       auto &Ident = getContext().Idents.getOwn(NameRef);
1401       VLAExprNames.push_back(&Ident);
1402       auto SizeExprAddr =
1403           CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1404       Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1405       Dimensions.emplace_back(SizeExprAddr.getPointer(),
1406                               Type1D.getUnqualifiedType());
1407     }
1408     Type1D = VlaSize.Type;
1409   }
1410 
1411   if (!EmitDebugInfo)
1412     return;
1413 
1414   // Register each dimension's size-expression with a DILocalVariable,
1415   // so that it can be used by CGDebugInfo when instantiating a DISubrange
1416   // to describe this array.
1417   unsigned NameIdx = 0;
1418   for (auto &VlaSize : Dimensions) {
1419     llvm::Metadata *MD;
1420     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1421       MD = llvm::ConstantAsMetadata::get(C);
1422     else {
1423       // Create an artificial VarDecl to generate debug info for.
1424       const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1425       auto QT = getContext().getIntTypeForBitwidth(
1426           SizeTy->getScalarSizeInBits(), false);
1427       auto *ArtificialDecl = VarDecl::Create(
1428           getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1429           D.getLocation(), D.getLocation(), NameIdent, QT,
1430           getContext().CreateTypeSourceInfo(QT), SC_Auto);
1431       ArtificialDecl->setImplicit();
1432 
1433       MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1434                                          Builder);
1435     }
1436     assert(MD && "No Size expression debug node created");
1437     DI->registerVLASizeExpression(VlaSize.Type, MD);
1438   }
1439 }
1440 
1441 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1442 /// local variable.  Does not emit initialization or destruction.
1443 CodeGenFunction::AutoVarEmission
1444 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1445   QualType Ty = D.getType();
1446   assert(
1447       Ty.getAddressSpace() == LangAS::Default ||
1448       (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1449 
1450   AutoVarEmission emission(D);
1451 
1452   bool isEscapingByRef = D.isEscapingByref();
1453   emission.IsEscapingByRef = isEscapingByRef;
1454 
1455   CharUnits alignment = getContext().getDeclAlign(&D);
1456 
1457   // If the type is variably-modified, emit all the VLA sizes for it.
1458   if (Ty->isVariablyModifiedType())
1459     EmitVariablyModifiedType(Ty);
1460 
1461   auto *DI = getDebugInfo();
1462   bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1463 
1464   Address address = Address::invalid();
1465   RawAddress AllocaAddr = RawAddress::invalid();
1466   Address OpenMPLocalAddr = Address::invalid();
1467   if (CGM.getLangOpts().OpenMPIRBuilder)
1468     OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1469   else
1470     OpenMPLocalAddr =
1471         getLangOpts().OpenMP
1472             ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1473             : Address::invalid();
1474 
1475   bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1476 
1477   if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1478     address = OpenMPLocalAddr;
1479     AllocaAddr = OpenMPLocalAddr;
1480   } else if (Ty->isConstantSizeType()) {
1481     // If this value is an array or struct with a statically determinable
1482     // constant initializer, there are optimizations we can do.
1483     //
1484     // TODO: We should constant-evaluate the initializer of any variable,
1485     // as long as it is initialized by a constant expression. Currently,
1486     // isConstantInitializer produces wrong answers for structs with
1487     // reference or bitfield members, and a few other cases, and checking
1488     // for POD-ness protects us from some of these.
1489     if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1490         (D.isConstexpr() ||
1491          ((Ty.isPODType(getContext()) ||
1492            getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1493           D.getInit()->isConstantInitializer(getContext(), false)))) {
1494 
1495       // If the variable's a const type, and it's neither an NRVO
1496       // candidate nor a __block variable and has no mutable members,
1497       // emit it as a global instead.
1498       // Exception is if a variable is located in non-constant address space
1499       // in OpenCL.
1500       bool NeedsDtor =
1501           D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
1502       if ((!getLangOpts().OpenCL ||
1503            Ty.getAddressSpace() == LangAS::opencl_constant) &&
1504           (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1505            !isEscapingByRef &&
1506            Ty.isConstantStorage(getContext(), true, !NeedsDtor))) {
1507         EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1508 
1509         // Signal this condition to later callbacks.
1510         emission.Addr = Address::invalid();
1511         assert(emission.wasEmittedAsGlobal());
1512         return emission;
1513       }
1514 
1515       // Otherwise, tell the initialization code that we're in this case.
1516       emission.IsConstantAggregate = true;
1517     }
1518 
1519     // A normal fixed sized variable becomes an alloca in the entry block,
1520     // unless:
1521     // - it's an NRVO variable.
1522     // - we are compiling OpenMP and it's an OpenMP local variable.
1523     if (NRVO) {
1524       // The named return value optimization: allocate this variable in the
1525       // return slot, so that we can elide the copy when returning this
1526       // variable (C++0x [class.copy]p34).
1527       address = ReturnValue;
1528       AllocaAddr =
1529           RawAddress(ReturnValue.emitRawPointer(*this),
1530                      ReturnValue.getElementType(), ReturnValue.getAlignment());
1531       ;
1532 
1533       if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1534         const auto *RD = RecordTy->getDecl();
1535         const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1536         if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1537             RD->isNonTrivialToPrimitiveDestroy()) {
1538           // Create a flag that is used to indicate when the NRVO was applied
1539           // to this variable. Set it to zero to indicate that NRVO was not
1540           // applied.
1541           llvm::Value *Zero = Builder.getFalse();
1542           RawAddress NRVOFlag =
1543               CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1544           EnsureInsertPoint();
1545           Builder.CreateStore(Zero, NRVOFlag);
1546 
1547           // Record the NRVO flag for this variable.
1548           NRVOFlags[&D] = NRVOFlag.getPointer();
1549           emission.NRVOFlag = NRVOFlag.getPointer();
1550         }
1551       }
1552     } else {
1553       CharUnits allocaAlignment;
1554       llvm::Type *allocaTy;
1555       if (isEscapingByRef) {
1556         auto &byrefInfo = getBlockByrefInfo(&D);
1557         allocaTy = byrefInfo.Type;
1558         allocaAlignment = byrefInfo.ByrefAlignment;
1559       } else {
1560         allocaTy = ConvertTypeForMem(Ty);
1561         allocaAlignment = alignment;
1562       }
1563 
1564       // Create the alloca.  Note that we set the name separately from
1565       // building the instruction so that it's there even in no-asserts
1566       // builds.
1567       address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1568                                  /*ArraySize=*/nullptr, &AllocaAddr);
1569 
1570       // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1571       // the catch parameter starts in the catchpad instruction, and we can't
1572       // insert code in those basic blocks.
1573       bool IsMSCatchParam =
1574           D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1575 
1576       // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1577       // if we don't have a valid insertion point (?).
1578       if (HaveInsertPoint() && !IsMSCatchParam) {
1579         // If there's a jump into the lifetime of this variable, its lifetime
1580         // gets broken up into several regions in IR, which requires more work
1581         // to handle correctly. For now, just omit the intrinsics; this is a
1582         // rare case, and it's better to just be conservatively correct.
1583         // PR28267.
1584         //
1585         // We have to do this in all language modes if there's a jump past the
1586         // declaration. We also have to do it in C if there's a jump to an
1587         // earlier point in the current block because non-VLA lifetimes begin as
1588         // soon as the containing block is entered, not when its variables
1589         // actually come into scope; suppressing the lifetime annotations
1590         // completely in this case is unnecessarily pessimistic, but again, this
1591         // is rare.
1592         if (!Bypasses.IsBypassed(&D) &&
1593             !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1594           llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1595           emission.SizeForLifetimeMarkers =
1596               EmitLifetimeStart(Size, AllocaAddr.getPointer());
1597         }
1598       } else {
1599         assert(!emission.useLifetimeMarkers());
1600       }
1601     }
1602   } else {
1603     EnsureInsertPoint();
1604 
1605     // Delayed globalization for variable length declarations. This ensures that
1606     // the expression representing the length has been emitted and can be used
1607     // by the definition of the VLA. Since this is an escaped declaration, in
1608     // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1609     // deallocation call to __kmpc_free_shared() is emitted later.
1610     bool VarAllocated = false;
1611     if (getLangOpts().OpenMPIsTargetDevice) {
1612       auto &RT = CGM.getOpenMPRuntime();
1613       if (RT.isDelayedVariableLengthDecl(*this, &D)) {
1614         // Emit call to __kmpc_alloc_shared() instead of the alloca.
1615         std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1616             RT.getKmpcAllocShared(*this, &D);
1617 
1618         // Save the address of the allocation:
1619         LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
1620                                      CGM.getContext().getDeclAlign(&D),
1621                                      AlignmentSource::Decl);
1622         address = Base.getAddress();
1623 
1624         // Push a cleanup block to emit the call to __kmpc_free_shared in the
1625         // appropriate location at the end of the scope of the
1626         // __kmpc_alloc_shared functions:
1627         pushKmpcAllocFree(NormalCleanup, AddrSizePair);
1628 
1629         // Mark variable as allocated:
1630         VarAllocated = true;
1631       }
1632     }
1633 
1634     if (!VarAllocated) {
1635       if (!DidCallStackSave) {
1636         // Save the stack.
1637         Address Stack =
1638             CreateDefaultAlignTempAlloca(AllocaInt8PtrTy, "saved_stack");
1639 
1640         llvm::Value *V = Builder.CreateStackSave();
1641         assert(V->getType() == AllocaInt8PtrTy);
1642         Builder.CreateStore(V, Stack);
1643 
1644         DidCallStackSave = true;
1645 
1646         // Push a cleanup block and restore the stack there.
1647         // FIXME: in general circumstances, this should be an EH cleanup.
1648         pushStackRestore(NormalCleanup, Stack);
1649       }
1650 
1651       auto VlaSize = getVLASize(Ty);
1652       llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1653 
1654       // Allocate memory for the array.
1655       address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1656                                  &AllocaAddr);
1657     }
1658 
1659     // If we have debug info enabled, properly describe the VLA dimensions for
1660     // this type by registering the vla size expression for each of the
1661     // dimensions.
1662     EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1663   }
1664 
1665   setAddrOfLocalVar(&D, address);
1666   emission.Addr = address;
1667   emission.AllocaAddr = AllocaAddr;
1668 
1669   // Emit debug info for local var declaration.
1670   if (EmitDebugInfo && HaveInsertPoint()) {
1671     Address DebugAddr = address;
1672     bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1673     DI->setLocation(D.getLocation());
1674 
1675     // If NRVO, use a pointer to the return address.
1676     if (UsePointerValue) {
1677       DebugAddr = ReturnValuePointer;
1678       AllocaAddr = ReturnValuePointer;
1679     }
1680     (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
1681                                         UsePointerValue);
1682   }
1683 
1684   if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1685     EmitVarAnnotations(&D, address.emitRawPointer(*this));
1686 
1687   // Make sure we call @llvm.lifetime.end.
1688   if (emission.useLifetimeMarkers())
1689     EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1690                                          emission.getOriginalAllocatedAddress(),
1691                                          emission.getSizeForLifetimeMarkers());
1692 
1693   return emission;
1694 }
1695 
1696 static bool isCapturedBy(const VarDecl &, const Expr *);
1697 
1698 /// Determines whether the given __block variable is potentially
1699 /// captured by the given statement.
1700 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1701   if (const Expr *E = dyn_cast<Expr>(S))
1702     return isCapturedBy(Var, E);
1703   for (const Stmt *SubStmt : S->children())
1704     if (isCapturedBy(Var, SubStmt))
1705       return true;
1706   return false;
1707 }
1708 
1709 /// Determines whether the given __block variable is potentially
1710 /// captured by the given expression.
1711 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1712   // Skip the most common kinds of expressions that make
1713   // hierarchy-walking expensive.
1714   E = E->IgnoreParenCasts();
1715 
1716   if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1717     const BlockDecl *Block = BE->getBlockDecl();
1718     for (const auto &I : Block->captures()) {
1719       if (I.getVariable() == &Var)
1720         return true;
1721     }
1722 
1723     // No need to walk into the subexpressions.
1724     return false;
1725   }
1726 
1727   if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1728     const CompoundStmt *CS = SE->getSubStmt();
1729     for (const auto *BI : CS->body())
1730       if (const auto *BIE = dyn_cast<Expr>(BI)) {
1731         if (isCapturedBy(Var, BIE))
1732           return true;
1733       }
1734       else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1735           // special case declarations
1736           for (const auto *I : DS->decls()) {
1737               if (const auto *VD = dyn_cast<VarDecl>((I))) {
1738                 const Expr *Init = VD->getInit();
1739                 if (Init && isCapturedBy(Var, Init))
1740                   return true;
1741               }
1742           }
1743       }
1744       else
1745         // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1746         // Later, provide code to poke into statements for capture analysis.
1747         return true;
1748     return false;
1749   }
1750 
1751   for (const Stmt *SubStmt : E->children())
1752     if (isCapturedBy(Var, SubStmt))
1753       return true;
1754 
1755   return false;
1756 }
1757 
1758 /// Determine whether the given initializer is trivial in the sense
1759 /// that it requires no code to be generated.
1760 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1761   if (!Init)
1762     return true;
1763 
1764   if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1765     if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1766       if (Constructor->isTrivial() &&
1767           Constructor->isDefaultConstructor() &&
1768           !Construct->requiresZeroInitialization())
1769         return true;
1770 
1771   return false;
1772 }
1773 
1774 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1775                                                       const VarDecl &D,
1776                                                       Address Loc) {
1777   auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1778   auto trivialAutoVarInitMaxSize =
1779       getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1780   CharUnits Size = getContext().getTypeSizeInChars(type);
1781   bool isVolatile = type.isVolatileQualified();
1782   if (!Size.isZero()) {
1783     // We skip auto-init variables by their alloc size. Take this as an example:
1784     // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1785     // All Foo type variables will be skipped. Ideally, we only skip the buff
1786     // array and still auto-init X in this example.
1787     // TODO: Improve the size filtering to by member size.
1788     auto allocSize = CGM.getDataLayout().getTypeAllocSize(Loc.getElementType());
1789     switch (trivialAutoVarInit) {
1790     case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1791       llvm_unreachable("Uninitialized handled by caller");
1792     case LangOptions::TrivialAutoVarInitKind::Zero:
1793       if (CGM.stopAutoInit())
1794         return;
1795       if (trivialAutoVarInitMaxSize > 0 &&
1796           allocSize > trivialAutoVarInitMaxSize)
1797         return;
1798       emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1799       break;
1800     case LangOptions::TrivialAutoVarInitKind::Pattern:
1801       if (CGM.stopAutoInit())
1802         return;
1803       if (trivialAutoVarInitMaxSize > 0 &&
1804           allocSize > trivialAutoVarInitMaxSize)
1805         return;
1806       emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1807       break;
1808     }
1809     return;
1810   }
1811 
1812   // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1813   // them, so emit a memcpy with the VLA size to initialize each element.
1814   // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1815   // will catch that code, but there exists code which generates zero-sized
1816   // VLAs. Be nice and initialize whatever they requested.
1817   const auto *VlaType = getContext().getAsVariableArrayType(type);
1818   if (!VlaType)
1819     return;
1820   auto VlaSize = getVLASize(VlaType);
1821   auto SizeVal = VlaSize.NumElts;
1822   CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1823   switch (trivialAutoVarInit) {
1824   case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1825     llvm_unreachable("Uninitialized handled by caller");
1826 
1827   case LangOptions::TrivialAutoVarInitKind::Zero: {
1828     if (CGM.stopAutoInit())
1829       return;
1830     if (!EltSize.isOne())
1831       SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1832     auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1833                                    SizeVal, isVolatile);
1834     I->addAnnotationMetadata("auto-init");
1835     break;
1836   }
1837 
1838   case LangOptions::TrivialAutoVarInitKind::Pattern: {
1839     if (CGM.stopAutoInit())
1840       return;
1841     llvm::Type *ElTy = Loc.getElementType();
1842     llvm::Constant *Constant = constWithPadding(
1843         CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1844     CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1845     llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1846     llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1847     llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1848     llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1849         SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1850         "vla.iszerosized");
1851     Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1852     EmitBlock(SetupBB);
1853     if (!EltSize.isOne())
1854       SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1855     llvm::Value *BaseSizeInChars =
1856         llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1857     Address Begin = Loc.withElementType(Int8Ty);
1858     llvm::Value *End = Builder.CreateInBoundsGEP(Begin.getElementType(),
1859                                                  Begin.emitRawPointer(*this),
1860                                                  SizeVal, "vla.end");
1861     llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1862     EmitBlock(LoopBB);
1863     llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1864     Cur->addIncoming(Begin.emitRawPointer(*this), OriginBB);
1865     CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1866     auto *I =
1867         Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
1868                              createUnnamedGlobalForMemcpyFrom(
1869                                  CGM, D, Builder, Constant, ConstantAlign),
1870                              BaseSizeInChars, isVolatile);
1871     I->addAnnotationMetadata("auto-init");
1872     llvm::Value *Next =
1873         Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1874     llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1875     Builder.CreateCondBr(Done, ContBB, LoopBB);
1876     Cur->addIncoming(Next, LoopBB);
1877     EmitBlock(ContBB);
1878   } break;
1879   }
1880 }
1881 
1882 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1883   assert(emission.Variable && "emission was not valid!");
1884 
1885   // If this was emitted as a global constant, we're done.
1886   if (emission.wasEmittedAsGlobal()) return;
1887 
1888   const VarDecl &D = *emission.Variable;
1889   auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1890   QualType type = D.getType();
1891 
1892   // If this local has an initializer, emit it now.
1893   const Expr *Init = D.getInit();
1894 
1895   // If we are at an unreachable point, we don't need to emit the initializer
1896   // unless it contains a label.
1897   if (!HaveInsertPoint()) {
1898     if (!Init || !ContainsLabel(Init)) return;
1899     EnsureInsertPoint();
1900   }
1901 
1902   // Initialize the structure of a __block variable.
1903   if (emission.IsEscapingByRef)
1904     emitByrefStructureInit(emission);
1905 
1906   // Initialize the variable here if it doesn't have a initializer and it is a
1907   // C struct that is non-trivial to initialize or an array containing such a
1908   // struct.
1909   if (!Init &&
1910       type.isNonTrivialToPrimitiveDefaultInitialize() ==
1911           QualType::PDIK_Struct) {
1912     LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1913     if (emission.IsEscapingByRef)
1914       drillIntoBlockVariable(*this, Dst, &D);
1915     defaultInitNonTrivialCStructVar(Dst);
1916     return;
1917   }
1918 
1919   // Check whether this is a byref variable that's potentially
1920   // captured and moved by its own initializer.  If so, we'll need to
1921   // emit the initializer first, then copy into the variable.
1922   bool capturedByInit =
1923       Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1924 
1925   bool locIsByrefHeader = !capturedByInit;
1926   const Address Loc =
1927       locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1928 
1929   // Note: constexpr already initializes everything correctly.
1930   LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1931       (D.isConstexpr()
1932            ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1933            : (D.getAttr<UninitializedAttr>()
1934                   ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1935                   : getContext().getLangOpts().getTrivialAutoVarInit()));
1936 
1937   auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1938     if (trivialAutoVarInit ==
1939         LangOptions::TrivialAutoVarInitKind::Uninitialized)
1940       return;
1941 
1942     // Only initialize a __block's storage: we always initialize the header.
1943     if (emission.IsEscapingByRef && !locIsByrefHeader)
1944       Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1945 
1946     return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1947   };
1948 
1949   if (isTrivialInitializer(Init))
1950     return initializeWhatIsTechnicallyUninitialized(Loc);
1951 
1952   llvm::Constant *constant = nullptr;
1953   if (emission.IsConstantAggregate ||
1954       D.mightBeUsableInConstantExpressions(getContext())) {
1955     assert(!capturedByInit && "constant init contains a capturing block?");
1956     constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1957     if (constant && !constant->isZeroValue() &&
1958         (trivialAutoVarInit !=
1959          LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1960       IsPattern isPattern =
1961           (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1962               ? IsPattern::Yes
1963               : IsPattern::No;
1964       // C guarantees that brace-init with fewer initializers than members in
1965       // the aggregate will initialize the rest of the aggregate as-if it were
1966       // static initialization. In turn static initialization guarantees that
1967       // padding is initialized to zero bits. We could instead pattern-init if D
1968       // has any ImplicitValueInitExpr, but that seems to be unintuitive
1969       // behavior.
1970       constant = constWithPadding(CGM, IsPattern::No,
1971                                   replaceUndef(CGM, isPattern, constant));
1972     }
1973 
1974     if (D.getType()->isBitIntType() &&
1975         CGM.getTypes().typeRequiresSplitIntoByteArray(D.getType())) {
1976       // Constants for long _BitInt types are split into individual bytes.
1977       // Try to fold these back into an integer constant so it can be stored
1978       // properly.
1979       llvm::Type *LoadType = CGM.getTypes().convertTypeForLoadStore(
1980           D.getType(), constant->getType());
1981       constant = llvm::ConstantFoldLoadFromConst(
1982           constant, LoadType, llvm::APInt::getZero(32), CGM.getDataLayout());
1983     }
1984   }
1985 
1986   if (!constant) {
1987     if (trivialAutoVarInit !=
1988         LangOptions::TrivialAutoVarInitKind::Uninitialized) {
1989       // At this point, we know D has an Init expression, but isn't a constant.
1990       // - If D is not a scalar, auto-var-init conservatively (members may be
1991       // left uninitialized by constructor Init expressions for example).
1992       // - If D is a scalar, we only need to auto-var-init if there is a
1993       // self-reference. Otherwise, the Init expression should be sufficient.
1994       // It may be that the Init expression uses other uninitialized memory,
1995       // but auto-var-init here would not help, as auto-init would get
1996       // overwritten by Init.
1997       if (!D.getType()->isScalarType() || capturedByInit ||
1998           isAccessedBy(D, Init)) {
1999         initializeWhatIsTechnicallyUninitialized(Loc);
2000       }
2001     }
2002     LValue lv = MakeAddrLValue(Loc, type);
2003     lv.setNonGC(true);
2004     return EmitExprAsInit(Init, &D, lv, capturedByInit);
2005   }
2006 
2007   if (!emission.IsConstantAggregate) {
2008     // For simple scalar/complex initialization, store the value directly.
2009     LValue lv = MakeAddrLValue(Loc, type);
2010     lv.setNonGC(true);
2011     return EmitStoreThroughLValue(RValue::get(constant), lv, true);
2012   }
2013 
2014   emitStoresForConstant(CGM, D, Loc.withElementType(CGM.Int8Ty),
2015                         type.isVolatileQualified(), Builder, constant,
2016                         /*IsAutoInit=*/false);
2017 }
2018 
2019 /// Emit an expression as an initializer for an object (variable, field, etc.)
2020 /// at the given location.  The expression is not necessarily the normal
2021 /// initializer for the object, and the address is not necessarily
2022 /// its normal location.
2023 ///
2024 /// \param init the initializing expression
2025 /// \param D the object to act as if we're initializing
2026 /// \param lvalue the lvalue to initialize
2027 /// \param capturedByInit true if \p D is a __block variable
2028 ///   whose address is potentially changed by the initializer
2029 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
2030                                      LValue lvalue, bool capturedByInit) {
2031   QualType type = D->getType();
2032 
2033   if (type->isReferenceType()) {
2034     RValue rvalue = EmitReferenceBindingToExpr(init);
2035     if (capturedByInit)
2036       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2037     EmitStoreThroughLValue(rvalue, lvalue, true);
2038     return;
2039   }
2040   switch (getEvaluationKind(type)) {
2041   case TEK_Scalar:
2042     EmitScalarInit(init, D, lvalue, capturedByInit);
2043     return;
2044   case TEK_Complex: {
2045     ComplexPairTy complex = EmitComplexExpr(init);
2046     if (capturedByInit)
2047       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
2048     EmitStoreOfComplex(complex, lvalue, /*init*/ true);
2049     return;
2050   }
2051   case TEK_Aggregate:
2052     if (type->isAtomicType()) {
2053       EmitAtomicInit(const_cast<Expr*>(init), lvalue);
2054     } else {
2055       AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
2056       if (isa<VarDecl>(D))
2057         Overlap = AggValueSlot::DoesNotOverlap;
2058       else if (auto *FD = dyn_cast<FieldDecl>(D))
2059         Overlap = getOverlapForFieldInit(FD);
2060       // TODO: how can we delay here if D is captured by its initializer?
2061       EmitAggExpr(init,
2062                   AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed,
2063                                           AggValueSlot::DoesNotNeedGCBarriers,
2064                                           AggValueSlot::IsNotAliased, Overlap));
2065     }
2066     return;
2067   }
2068   llvm_unreachable("bad evaluation kind");
2069 }
2070 
2071 /// Enter a destroy cleanup for the given local variable.
2072 void CodeGenFunction::emitAutoVarTypeCleanup(
2073                             const CodeGenFunction::AutoVarEmission &emission,
2074                             QualType::DestructionKind dtorKind) {
2075   assert(dtorKind != QualType::DK_none);
2076 
2077   // Note that for __block variables, we want to destroy the
2078   // original stack object, not the possibly forwarded object.
2079   Address addr = emission.getObjectAddress(*this);
2080 
2081   const VarDecl *var = emission.Variable;
2082   QualType type = var->getType();
2083 
2084   CleanupKind cleanupKind = NormalAndEHCleanup;
2085   CodeGenFunction::Destroyer *destroyer = nullptr;
2086 
2087   switch (dtorKind) {
2088   case QualType::DK_none:
2089     llvm_unreachable("no cleanup for trivially-destructible variable");
2090 
2091   case QualType::DK_cxx_destructor:
2092     // If there's an NRVO flag on the emission, we need a different
2093     // cleanup.
2094     if (emission.NRVOFlag) {
2095       assert(!type->isArrayType());
2096       CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2097       EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
2098                                                   emission.NRVOFlag);
2099       return;
2100     }
2101     break;
2102 
2103   case QualType::DK_objc_strong_lifetime:
2104     // Suppress cleanups for pseudo-strong variables.
2105     if (var->isARCPseudoStrong()) return;
2106 
2107     // Otherwise, consider whether to use an EH cleanup or not.
2108     cleanupKind = getARCCleanupKind();
2109 
2110     // Use the imprecise destroyer by default.
2111     if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2112       destroyer = CodeGenFunction::destroyARCStrongImprecise;
2113     break;
2114 
2115   case QualType::DK_objc_weak_lifetime:
2116     break;
2117 
2118   case QualType::DK_nontrivial_c_struct:
2119     destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2120     if (emission.NRVOFlag) {
2121       assert(!type->isArrayType());
2122       EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2123                                                 emission.NRVOFlag, type);
2124       return;
2125     }
2126     break;
2127   }
2128 
2129   // If we haven't chosen a more specific destroyer, use the default.
2130   if (!destroyer) destroyer = getDestroyer(dtorKind);
2131 
2132   // Use an EH cleanup in array destructors iff the destructor itself
2133   // is being pushed as an EH cleanup.
2134   bool useEHCleanup = (cleanupKind & EHCleanup);
2135   EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2136                                      useEHCleanup);
2137 }
2138 
2139 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2140   assert(emission.Variable && "emission was not valid!");
2141 
2142   // If this was emitted as a global constant, we're done.
2143   if (emission.wasEmittedAsGlobal()) return;
2144 
2145   // If we don't have an insertion point, we're done.  Sema prevents
2146   // us from jumping into any of these scopes anyway.
2147   if (!HaveInsertPoint()) return;
2148 
2149   const VarDecl &D = *emission.Variable;
2150 
2151   // Check the type for a cleanup.
2152   if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2153     emitAutoVarTypeCleanup(emission, dtorKind);
2154 
2155   // In GC mode, honor objc_precise_lifetime.
2156   if (getLangOpts().getGC() != LangOptions::NonGC &&
2157       D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2158     EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2159   }
2160 
2161   // Handle the cleanup attribute.
2162   if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2163     const FunctionDecl *FD = CA->getFunctionDecl();
2164 
2165     llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2166     assert(F && "Could not find function!");
2167 
2168     const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2169     EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2170   }
2171 
2172   // If this is a block variable, call _Block_object_destroy
2173   // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2174   // mode.
2175   if (emission.IsEscapingByRef &&
2176       CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2177     BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2178     if (emission.Variable->getType().isObjCGCWeak())
2179       Flags |= BLOCK_FIELD_IS_WEAK;
2180     enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2181                       /*LoadBlockVarAddr*/ false,
2182                       cxxDestructorCanThrow(emission.Variable->getType()));
2183   }
2184 }
2185 
2186 CodeGenFunction::Destroyer *
2187 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2188   switch (kind) {
2189   case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2190   case QualType::DK_cxx_destructor:
2191     return destroyCXXObject;
2192   case QualType::DK_objc_strong_lifetime:
2193     return destroyARCStrongPrecise;
2194   case QualType::DK_objc_weak_lifetime:
2195     return destroyARCWeak;
2196   case QualType::DK_nontrivial_c_struct:
2197     return destroyNonTrivialCStruct;
2198   }
2199   llvm_unreachable("Unknown DestructionKind");
2200 }
2201 
2202 /// pushEHDestroy - Push the standard destructor for the given type as
2203 /// an EH-only cleanup.
2204 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2205                                     Address addr, QualType type) {
2206   assert(dtorKind && "cannot push destructor for trivial type");
2207   assert(needsEHCleanup(dtorKind));
2208 
2209   pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2210 }
2211 
2212 /// pushDestroy - Push the standard destructor for the given type as
2213 /// at least a normal cleanup.
2214 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2215                                   Address addr, QualType type) {
2216   assert(dtorKind && "cannot push destructor for trivial type");
2217 
2218   CleanupKind cleanupKind = getCleanupKind(dtorKind);
2219   pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2220               cleanupKind & EHCleanup);
2221 }
2222 
2223 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2224                                   QualType type, Destroyer *destroyer,
2225                                   bool useEHCleanupForArray) {
2226   pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2227                                      destroyer, useEHCleanupForArray);
2228 }
2229 
2230 // Pushes a destroy and defers its deactivation until its
2231 // CleanupDeactivationScope is exited.
2232 void CodeGenFunction::pushDestroyAndDeferDeactivation(
2233     QualType::DestructionKind dtorKind, Address addr, QualType type) {
2234   assert(dtorKind && "cannot push destructor for trivial type");
2235 
2236   CleanupKind cleanupKind = getCleanupKind(dtorKind);
2237   pushDestroyAndDeferDeactivation(
2238       cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup);
2239 }
2240 
2241 void CodeGenFunction::pushDestroyAndDeferDeactivation(
2242     CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer,
2243     bool useEHCleanupForArray) {
2244   llvm::Instruction *DominatingIP =
2245       Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
2246   pushDestroy(cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2247   DeferredDeactivationCleanupStack.push_back(
2248       {EHStack.stable_begin(), DominatingIP});
2249 }
2250 
2251 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2252   EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2253 }
2254 
2255 void CodeGenFunction::pushKmpcAllocFree(
2256     CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2257   EHStack.pushCleanup<KmpcAllocFree>(Kind, AddrSizePair);
2258 }
2259 
2260 void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2261                                                   Address addr, QualType type,
2262                                                   Destroyer *destroyer,
2263                                                   bool useEHCleanupForArray) {
2264   // If we're not in a conditional branch, we don't need to bother generating a
2265   // conditional cleanup.
2266   if (!isInConditionalBranch()) {
2267     // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2268     // around in case a temporary's destructor throws an exception.
2269 
2270     // Add the cleanup to the EHStack. After the full-expr, this would be
2271     // deactivated before being popped from the stack.
2272     pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer,
2273                                     useEHCleanupForArray);
2274 
2275     // Since this is lifetime-extended, push it once again to the EHStack after
2276     // the full expression.
2277     return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2278         cleanupKind, Address::invalid(), addr, type, destroyer,
2279         useEHCleanupForArray);
2280   }
2281 
2282   // Otherwise, we should only destroy the object if it's been initialized.
2283 
2284   using ConditionalCleanupType =
2285       EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2286                                        Destroyer *, bool>;
2287   DominatingValue<Address>::saved_type SavedAddr = saveValueInCond(addr);
2288 
2289   // Remember to emit cleanup if we branch-out before end of full-expression
2290   // (eg: through stmt-expr or coro suspensions).
2291   AllocaTrackerRAII DeactivationAllocas(*this);
2292   Address ActiveFlagForDeactivation = createCleanupActiveFlag();
2293 
2294   pushCleanupAndDeferDeactivation<ConditionalCleanupType>(
2295       cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray);
2296   initFullExprCleanupWithFlag(ActiveFlagForDeactivation);
2297   EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
2298   // Erase the active flag if the cleanup was not emitted.
2299   cleanup.AddAuxAllocas(std::move(DeactivationAllocas).Take());
2300 
2301   // Since this is lifetime-extended, push it once again to the EHStack after
2302   // the full expression.
2303   // The previous active flag would always be 'false' due to forced deferred
2304   // deactivation. Use a separate flag for lifetime-extension to correctly
2305   // remember if this branch was taken and the object was initialized.
2306   Address ActiveFlagForLifetimeExt = createCleanupActiveFlag();
2307   pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2308       cleanupKind, ActiveFlagForLifetimeExt, SavedAddr, type, destroyer,
2309       useEHCleanupForArray);
2310 }
2311 
2312 /// emitDestroy - Immediately perform the destruction of the given
2313 /// object.
2314 ///
2315 /// \param addr - the address of the object; a type*
2316 /// \param type - the type of the object; if an array type, all
2317 ///   objects are destroyed in reverse order
2318 /// \param destroyer - the function to call to destroy individual
2319 ///   elements
2320 /// \param useEHCleanupForArray - whether an EH cleanup should be
2321 ///   used when destroying array elements, in case one of the
2322 ///   destructions throws an exception
2323 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2324                                   Destroyer *destroyer,
2325                                   bool useEHCleanupForArray) {
2326   const ArrayType *arrayType = getContext().getAsArrayType(type);
2327   if (!arrayType)
2328     return destroyer(*this, addr, type);
2329 
2330   llvm::Value *length = emitArrayLength(arrayType, type, addr);
2331 
2332   CharUnits elementAlign =
2333     addr.getAlignment()
2334         .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2335 
2336   // Normally we have to check whether the array is zero-length.
2337   bool checkZeroLength = true;
2338 
2339   // But if the array length is constant, we can suppress that.
2340   if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2341     // ...and if it's constant zero, we can just skip the entire thing.
2342     if (constLength->isZero()) return;
2343     checkZeroLength = false;
2344   }
2345 
2346   llvm::Value *begin = addr.emitRawPointer(*this);
2347   llvm::Value *end =
2348       Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
2349   emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2350                    checkZeroLength, useEHCleanupForArray);
2351 }
2352 
2353 /// emitArrayDestroy - Destroys all the elements of the given array,
2354 /// beginning from last to first.  The array cannot be zero-length.
2355 ///
2356 /// \param begin - a type* denoting the first element of the array
2357 /// \param end - a type* denoting one past the end of the array
2358 /// \param elementType - the element type of the array
2359 /// \param destroyer - the function to call to destroy elements
2360 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2361 ///   the remaining elements in case the destruction of a single
2362 ///   element throws
2363 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2364                                        llvm::Value *end,
2365                                        QualType elementType,
2366                                        CharUnits elementAlign,
2367                                        Destroyer *destroyer,
2368                                        bool checkZeroLength,
2369                                        bool useEHCleanup) {
2370   assert(!elementType->isArrayType());
2371 
2372   // The basic structure here is a do-while loop, because we don't
2373   // need to check for the zero-element case.
2374   llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2375   llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2376 
2377   if (checkZeroLength) {
2378     llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2379                                                 "arraydestroy.isempty");
2380     Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2381   }
2382 
2383   // Enter the loop body, making that address the current address.
2384   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2385   EmitBlock(bodyBB);
2386   llvm::PHINode *elementPast =
2387     Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2388   elementPast->addIncoming(end, entryBB);
2389 
2390   // Shift the address back by one element.
2391   llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2392   llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
2393   llvm::Value *element = Builder.CreateInBoundsGEP(
2394       llvmElementType, elementPast, negativeOne, "arraydestroy.element");
2395 
2396   if (useEHCleanup)
2397     pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2398                                    destroyer);
2399 
2400   // Perform the actual destruction there.
2401   destroyer(*this, Address(element, llvmElementType, elementAlign),
2402             elementType);
2403 
2404   if (useEHCleanup)
2405     PopCleanupBlock();
2406 
2407   // Check whether we've reached the end.
2408   llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2409   Builder.CreateCondBr(done, doneBB, bodyBB);
2410   elementPast->addIncoming(element, Builder.GetInsertBlock());
2411 
2412   // Done.
2413   EmitBlock(doneBB);
2414 }
2415 
2416 /// Perform partial array destruction as if in an EH cleanup.  Unlike
2417 /// emitArrayDestroy, the element type here may still be an array type.
2418 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2419                                     llvm::Value *begin, llvm::Value *end,
2420                                     QualType type, CharUnits elementAlign,
2421                                     CodeGenFunction::Destroyer *destroyer) {
2422   llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2423 
2424   // If the element type is itself an array, drill down.
2425   unsigned arrayDepth = 0;
2426   while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2427     // VLAs don't require a GEP index to walk into.
2428     if (!isa<VariableArrayType>(arrayType))
2429       arrayDepth++;
2430     type = arrayType->getElementType();
2431   }
2432 
2433   if (arrayDepth) {
2434     llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2435 
2436     SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2437     begin = CGF.Builder.CreateInBoundsGEP(
2438         elemTy, begin, gepIndices, "pad.arraybegin");
2439     end = CGF.Builder.CreateInBoundsGEP(
2440         elemTy, end, gepIndices, "pad.arrayend");
2441   }
2442 
2443   // Destroy the array.  We don't ever need an EH cleanup because we
2444   // assume that we're in an EH cleanup ourselves, so a throwing
2445   // destructor causes an immediate terminate.
2446   CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2447                        /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2448 }
2449 
2450 namespace {
2451   /// RegularPartialArrayDestroy - a cleanup which performs a partial
2452   /// array destroy where the end pointer is regularly determined and
2453   /// does not need to be loaded from a local.
2454   class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2455     llvm::Value *ArrayBegin;
2456     llvm::Value *ArrayEnd;
2457     QualType ElementType;
2458     CodeGenFunction::Destroyer *Destroyer;
2459     CharUnits ElementAlign;
2460   public:
2461     RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2462                                QualType elementType, CharUnits elementAlign,
2463                                CodeGenFunction::Destroyer *destroyer)
2464       : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2465         ElementType(elementType), Destroyer(destroyer),
2466         ElementAlign(elementAlign) {}
2467 
2468     void Emit(CodeGenFunction &CGF, Flags flags) override {
2469       emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2470                               ElementType, ElementAlign, Destroyer);
2471     }
2472   };
2473 
2474   /// IrregularPartialArrayDestroy - a cleanup which performs a
2475   /// partial array destroy where the end pointer is irregularly
2476   /// determined and must be loaded from a local.
2477   class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2478     llvm::Value *ArrayBegin;
2479     Address ArrayEndPointer;
2480     QualType ElementType;
2481     CodeGenFunction::Destroyer *Destroyer;
2482     CharUnits ElementAlign;
2483   public:
2484     IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2485                                  Address arrayEndPointer,
2486                                  QualType elementType,
2487                                  CharUnits elementAlign,
2488                                  CodeGenFunction::Destroyer *destroyer)
2489       : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2490         ElementType(elementType), Destroyer(destroyer),
2491         ElementAlign(elementAlign) {}
2492 
2493     void Emit(CodeGenFunction &CGF, Flags flags) override {
2494       llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2495       emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2496                               ElementType, ElementAlign, Destroyer);
2497     }
2498   };
2499 } // end anonymous namespace
2500 
2501 /// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
2502 /// destroy already-constructed elements of the given array.  The cleanup may be
2503 /// popped with DeactivateCleanupBlock or PopCleanupBlock.
2504 ///
2505 /// \param elementType - the immediate element type of the array;
2506 ///   possibly still an array type
2507 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2508                                                        Address arrayEndPointer,
2509                                                        QualType elementType,
2510                                                        CharUnits elementAlign,
2511                                                        Destroyer *destroyer) {
2512   pushFullExprCleanup<IrregularPartialArrayDestroy>(
2513       NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType,
2514       elementAlign, destroyer);
2515 }
2516 
2517 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2518 /// already-constructed elements of the given array.  The cleanup
2519 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2520 ///
2521 /// \param elementType - the immediate element type of the array;
2522 ///   possibly still an array type
2523 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2524                                                      llvm::Value *arrayEnd,
2525                                                      QualType elementType,
2526                                                      CharUnits elementAlign,
2527                                                      Destroyer *destroyer) {
2528   pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2529                                                   arrayBegin, arrayEnd,
2530                                                   elementType, elementAlign,
2531                                                   destroyer);
2532 }
2533 
2534 /// Lazily declare the @llvm.lifetime.start intrinsic.
2535 llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2536   if (LifetimeStartFn)
2537     return LifetimeStartFn;
2538   LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2539     llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2540   return LifetimeStartFn;
2541 }
2542 
2543 /// Lazily declare the @llvm.lifetime.end intrinsic.
2544 llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2545   if (LifetimeEndFn)
2546     return LifetimeEndFn;
2547   LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2548     llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2549   return LifetimeEndFn;
2550 }
2551 
2552 namespace {
2553   /// A cleanup to perform a release of an object at the end of a
2554   /// function.  This is used to balance out the incoming +1 of a
2555   /// ns_consumed argument when we can't reasonably do that just by
2556   /// not doing the initial retain for a __block argument.
2557   struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2558     ConsumeARCParameter(llvm::Value *param,
2559                         ARCPreciseLifetime_t precise)
2560       : Param(param), Precise(precise) {}
2561 
2562     llvm::Value *Param;
2563     ARCPreciseLifetime_t Precise;
2564 
2565     void Emit(CodeGenFunction &CGF, Flags flags) override {
2566       CGF.EmitARCRelease(Param, Precise);
2567     }
2568   };
2569 } // end anonymous namespace
2570 
2571 /// Emit an alloca (or GlobalValue depending on target)
2572 /// for the specified parameter and set up LocalDeclMap.
2573 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2574                                    unsigned ArgNo) {
2575   bool NoDebugInfo = false;
2576   // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2577   assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2578          "Invalid argument to EmitParmDecl");
2579 
2580   // Set the name of the parameter's initial value to make IR easier to
2581   // read. Don't modify the names of globals.
2582   if (!isa<llvm::GlobalValue>(Arg.getAnyValue()))
2583     Arg.getAnyValue()->setName(D.getName());
2584 
2585   QualType Ty = D.getType();
2586 
2587   // Use better IR generation for certain implicit parameters.
2588   if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2589     // The only implicit argument a block has is its literal.
2590     // This may be passed as an inalloca'ed value on Windows x86.
2591     if (BlockInfo) {
2592       llvm::Value *V = Arg.isIndirect()
2593                            ? Builder.CreateLoad(Arg.getIndirectAddress())
2594                            : Arg.getDirectValue();
2595       setBlockContextParameter(IPD, ArgNo, V);
2596       return;
2597     }
2598     // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2599     // debug info of TLS variables.
2600     NoDebugInfo =
2601         (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2602   }
2603 
2604   Address DeclPtr = Address::invalid();
2605   RawAddress AllocaPtr = Address::invalid();
2606   bool DoStore = false;
2607   bool IsScalar = hasScalarEvaluationKind(Ty);
2608   bool UseIndirectDebugAddress = false;
2609 
2610   // If we already have a pointer to the argument, reuse the input pointer.
2611   if (Arg.isIndirect()) {
2612     DeclPtr = Arg.getIndirectAddress();
2613     DeclPtr = DeclPtr.withElementType(ConvertTypeForMem(Ty));
2614     // Indirect argument is in alloca address space, which may be different
2615     // from the default address space.
2616     auto AllocaAS = CGM.getASTAllocaAddressSpace();
2617     auto *V = DeclPtr.emitRawPointer(*this);
2618     AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
2619 
2620     // For truly ABI indirect arguments -- those that are not `byval` -- store
2621     // the address of the argument on the stack to preserve debug information.
2622     ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2623     if (ArgInfo.isIndirect())
2624       UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2625     if (UseIndirectDebugAddress) {
2626       auto PtrTy = getContext().getPointerType(Ty);
2627       AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
2628                                 D.getName() + ".indirect_addr");
2629       EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
2630     }
2631 
2632     auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2633     auto DestLangAS =
2634         getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2635     if (SrcLangAS != DestLangAS) {
2636       assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2637              CGM.getDataLayout().getAllocaAddrSpace());
2638       auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2639       auto *T = llvm::PointerType::get(getLLVMContext(), DestAS);
2640       DeclPtr =
2641           DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
2642                                   *this, V, SrcLangAS, DestLangAS, T, true),
2643                               DeclPtr.isKnownNonNull());
2644     }
2645 
2646     // Push a destructor cleanup for this parameter if the ABI requires it.
2647     // Don't push a cleanup in a thunk for a method that will also emit a
2648     // cleanup.
2649     if (Ty->isRecordType() && !CurFuncIsThunk &&
2650         Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2651       if (QualType::DestructionKind DtorKind =
2652               D.needsDestruction(getContext())) {
2653         assert((DtorKind == QualType::DK_cxx_destructor ||
2654                 DtorKind == QualType::DK_nontrivial_c_struct) &&
2655                "unexpected destructor type");
2656         pushDestroy(DtorKind, DeclPtr, Ty);
2657         CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2658             EHStack.stable_begin();
2659       }
2660     }
2661   } else {
2662     // Check if the parameter address is controlled by OpenMP runtime.
2663     Address OpenMPLocalAddr =
2664         getLangOpts().OpenMP
2665             ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2666             : Address::invalid();
2667     if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2668       DeclPtr = OpenMPLocalAddr;
2669       AllocaPtr = DeclPtr;
2670     } else {
2671       // Otherwise, create a temporary to hold the value.
2672       DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2673                               D.getName() + ".addr", &AllocaPtr);
2674     }
2675     DoStore = true;
2676   }
2677 
2678   llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2679 
2680   LValue lv = MakeAddrLValue(DeclPtr, Ty);
2681   if (IsScalar) {
2682     Qualifiers qs = Ty.getQualifiers();
2683     if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2684       // We honor __attribute__((ns_consumed)) for types with lifetime.
2685       // For __strong, it's handled by just skipping the initial retain;
2686       // otherwise we have to balance out the initial +1 with an extra
2687       // cleanup to do the release at the end of the function.
2688       bool isConsumed = D.hasAttr<NSConsumedAttr>();
2689 
2690       // If a parameter is pseudo-strong then we can omit the implicit retain.
2691       if (D.isARCPseudoStrong()) {
2692         assert(lt == Qualifiers::OCL_Strong &&
2693                "pseudo-strong variable isn't strong?");
2694         assert(qs.hasConst() && "pseudo-strong variable should be const!");
2695         lt = Qualifiers::OCL_ExplicitNone;
2696       }
2697 
2698       // Load objects passed indirectly.
2699       if (Arg.isIndirect() && !ArgVal)
2700         ArgVal = Builder.CreateLoad(DeclPtr);
2701 
2702       if (lt == Qualifiers::OCL_Strong) {
2703         if (!isConsumed) {
2704           if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2705             // use objc_storeStrong(&dest, value) for retaining the
2706             // object. But first, store a null into 'dest' because
2707             // objc_storeStrong attempts to release its old value.
2708             llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2709             EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2710             EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
2711             DoStore = false;
2712           }
2713           else
2714           // Don't use objc_retainBlock for block pointers, because we
2715           // don't want to Block_copy something just because we got it
2716           // as a parameter.
2717             ArgVal = EmitARCRetainNonBlock(ArgVal);
2718         }
2719       } else {
2720         // Push the cleanup for a consumed parameter.
2721         if (isConsumed) {
2722           ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2723                                 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2724           EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2725                                                    precise);
2726         }
2727 
2728         if (lt == Qualifiers::OCL_Weak) {
2729           EmitARCInitWeak(DeclPtr, ArgVal);
2730           DoStore = false; // The weak init is a store, no need to do two.
2731         }
2732       }
2733 
2734       // Enter the cleanup scope.
2735       EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2736     }
2737   }
2738 
2739   // Store the initial value into the alloca.
2740   if (DoStore)
2741     EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2742 
2743   setAddrOfLocalVar(&D, DeclPtr);
2744 
2745   // Emit debug info for param declarations in non-thunk functions.
2746   if (CGDebugInfo *DI = getDebugInfo()) {
2747     if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
2748         !NoDebugInfo) {
2749       llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2750           &D, AllocaPtr.getPointer(), ArgNo, Builder, UseIndirectDebugAddress);
2751       if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2752         DI->getParamDbgMappings().insert({Var, DILocalVar});
2753     }
2754   }
2755 
2756   if (D.hasAttr<AnnotateAttr>())
2757     EmitVarAnnotations(&D, DeclPtr.emitRawPointer(*this));
2758 
2759   // We can only check return value nullability if all arguments to the
2760   // function satisfy their nullability preconditions. This makes it necessary
2761   // to emit null checks for args in the function body itself.
2762   if (requiresReturnValueNullabilityCheck()) {
2763     auto Nullability = Ty->getNullability();
2764     if (Nullability && *Nullability == NullabilityKind::NonNull) {
2765       SanitizerScope SanScope(this);
2766       RetValNullabilityPrecondition =
2767           Builder.CreateAnd(RetValNullabilityPrecondition,
2768                             Builder.CreateIsNotNull(Arg.getAnyValue()));
2769     }
2770   }
2771 }
2772 
2773 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2774                                             CodeGenFunction *CGF) {
2775   if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2776     return;
2777   getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2778 }
2779 
2780 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2781                                          CodeGenFunction *CGF) {
2782   if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2783       (!LangOpts.EmitAllDecls && !D->isUsed()))
2784     return;
2785   getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2786 }
2787 
2788 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2789   getOpenMPRuntime().processRequiresDirective(D);
2790 }
2791 
2792 void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2793   for (const Expr *E : D->varlists()) {
2794     const auto *DE = cast<DeclRefExpr>(E);
2795     const auto *VD = cast<VarDecl>(DE->getDecl());
2796 
2797     // Skip all but globals.
2798     if (!VD->hasGlobalStorage())
2799       continue;
2800 
2801     // Check if the global has been materialized yet or not. If not, we are done
2802     // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2803     // we already emitted the global we might have done so before the
2804     // OMPAllocateDeclAttr was attached, leading to the wrong address space
2805     // (potentially). While not pretty, common practise is to remove the old IR
2806     // global and generate a new one, so we do that here too. Uses are replaced
2807     // properly.
2808     StringRef MangledName = getMangledName(VD);
2809     llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2810     if (!Entry)
2811       continue;
2812 
2813     // We can also keep the existing global if the address space is what we
2814     // expect it to be, if not, it is replaced.
2815     QualType ASTTy = VD->getType();
2816     clang::LangAS GVAS = GetGlobalVarAddressSpace(VD);
2817     auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2818     if (Entry->getType()->getAddressSpace() == TargetAS)
2819       continue;
2820 
2821     // Make a new global with the correct type / address space.
2822     llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
2823     llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
2824 
2825     // Replace all uses of the old global with a cast. Since we mutate the type
2826     // in place we neeed an intermediate that takes the spot of the old entry
2827     // until we can create the cast.
2828     llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2829         getModule(), Entry->getValueType(), false,
2830         llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2831         llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2832     Entry->replaceAllUsesWith(DummyGV);
2833 
2834     Entry->mutateType(PTy);
2835     llvm::Constant *NewPtrForOldDecl =
2836         llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2837             Entry, DummyGV->getType());
2838 
2839     // Now we have a casted version of the changed global, the dummy can be
2840     // replaced and deleted.
2841     DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2842     DummyGV->eraseFromParent();
2843   }
2844 }
2845 
2846 std::optional<CharUnits>
2847 CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
2848   if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2849     if (Expr *Alignment = AA->getAlignment()) {
2850       unsigned UserAlign =
2851           Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
2852       CharUnits NaturalAlign =
2853           getNaturalTypeAlignment(VD->getType().getNonReferenceType());
2854 
2855       // OpenMP5.1 pg 185 lines 7-10
2856       //   Each item in the align modifier list must be aligned to the maximum
2857       //   of the specified alignment and the type's natural alignment.
2858       return CharUnits::fromQuantity(
2859           std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
2860     }
2861   }
2862   return std::nullopt;
2863 }
2864