xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp (revision a7dea1671b87c07d2d266f836bfa8b58efc7c134)
1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with code generation of C++ expressions
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCUDARuntime.h"
14 #include "CGCXXABI.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "ConstantEmitter.h"
19 #include "TargetInfo.h"
20 #include "clang/Basic/CodeGenOptions.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "llvm/IR/Intrinsics.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28 struct MemberCallInfo {
29   RequiredArgs ReqArgs;
30   // Number of prefix arguments for the call. Ignores the `this` pointer.
31   unsigned PrefixSize;
32 };
33 }
34 
35 static MemberCallInfo
36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
37                                   llvm::Value *This, llvm::Value *ImplicitParam,
38                                   QualType ImplicitParamTy, const CallExpr *CE,
39                                   CallArgList &Args, CallArgList *RtlArgs) {
40   assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
41          isa<CXXOperatorCallExpr>(CE));
42   assert(MD->isInstance() &&
43          "Trying to emit a member or operator call expr on a static method!");
44 
45   // Push the this ptr.
46   const CXXRecordDecl *RD =
47       CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
48   Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
49 
50   // If there is an implicit parameter (e.g. VTT), emit it.
51   if (ImplicitParam) {
52     Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53   }
54 
55   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57   unsigned PrefixSize = Args.size() - 1;
58 
59   // And the rest of the call args.
60   if (RtlArgs) {
61     // Special case: if the caller emitted the arguments right-to-left already
62     // (prior to emitting the *this argument), we're done. This happens for
63     // assignment operators.
64     Args.addFrom(*RtlArgs);
65   } else if (CE) {
66     // Special case: skip first argument of CXXOperatorCall (it is "this").
67     unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
68     CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
69                      CE->getDirectCallee());
70   } else {
71     assert(
72         FPT->getNumParams() == 0 &&
73         "No CallExpr specified for function with non-zero number of arguments");
74   }
75   return {required, PrefixSize};
76 }
77 
78 RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
79     const CXXMethodDecl *MD, const CGCallee &Callee,
80     ReturnValueSlot ReturnValue,
81     llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
82     const CallExpr *CE, CallArgList *RtlArgs) {
83   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
84   CallArgList Args;
85   MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
86       *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
87   auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
88       Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
89   return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
90                   CE ? CE->getExprLoc() : SourceLocation());
91 }
92 
93 RValue CodeGenFunction::EmitCXXDestructorCall(
94     GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
95     llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) {
96   const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());
97 
98   assert(!ThisTy.isNull());
99   assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
100          "Pointer/Object mixup");
101 
102   LangAS SrcAS = ThisTy.getAddressSpace();
103   LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
104   if (SrcAS != DstAS) {
105     QualType DstTy = DtorDecl->getThisType();
106     llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);
107     This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS,
108                                                  NewType);
109   }
110 
111   CallArgList Args;
112   commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
113                                     ImplicitParamTy, CE, Args, nullptr);
114   return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
115                   ReturnValueSlot(), Args);
116 }
117 
118 RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
119                                             const CXXPseudoDestructorExpr *E) {
120   QualType DestroyedType = E->getDestroyedType();
121   if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
122     // Automatic Reference Counting:
123     //   If the pseudo-expression names a retainable object with weak or
124     //   strong lifetime, the object shall be released.
125     Expr *BaseExpr = E->getBase();
126     Address BaseValue = Address::invalid();
127     Qualifiers BaseQuals;
128 
129     // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
130     if (E->isArrow()) {
131       BaseValue = EmitPointerWithAlignment(BaseExpr);
132       const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
133       BaseQuals = PTy->getPointeeType().getQualifiers();
134     } else {
135       LValue BaseLV = EmitLValue(BaseExpr);
136       BaseValue = BaseLV.getAddress();
137       QualType BaseTy = BaseExpr->getType();
138       BaseQuals = BaseTy.getQualifiers();
139     }
140 
141     switch (DestroyedType.getObjCLifetime()) {
142     case Qualifiers::OCL_None:
143     case Qualifiers::OCL_ExplicitNone:
144     case Qualifiers::OCL_Autoreleasing:
145       break;
146 
147     case Qualifiers::OCL_Strong:
148       EmitARCRelease(Builder.CreateLoad(BaseValue,
149                         DestroyedType.isVolatileQualified()),
150                      ARCPreciseLifetime);
151       break;
152 
153     case Qualifiers::OCL_Weak:
154       EmitARCDestroyWeak(BaseValue);
155       break;
156     }
157   } else {
158     // C++ [expr.pseudo]p1:
159     //   The result shall only be used as the operand for the function call
160     //   operator (), and the result of such a call has type void. The only
161     //   effect is the evaluation of the postfix-expression before the dot or
162     //   arrow.
163     EmitIgnoredExpr(E->getBase());
164   }
165 
166   return RValue::get(nullptr);
167 }
168 
169 static CXXRecordDecl *getCXXRecord(const Expr *E) {
170   QualType T = E->getType();
171   if (const PointerType *PTy = T->getAs<PointerType>())
172     T = PTy->getPointeeType();
173   const RecordType *Ty = T->castAs<RecordType>();
174   return cast<CXXRecordDecl>(Ty->getDecl());
175 }
176 
177 // Note: This function also emit constructor calls to support a MSVC
178 // extensions allowing explicit constructor function call.
179 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
180                                               ReturnValueSlot ReturnValue) {
181   const Expr *callee = CE->getCallee()->IgnoreParens();
182 
183   if (isa<BinaryOperator>(callee))
184     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
185 
186   const MemberExpr *ME = cast<MemberExpr>(callee);
187   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
188 
189   if (MD->isStatic()) {
190     // The method is static, emit it as we would a regular call.
191     CGCallee callee =
192         CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
193     return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
194                     ReturnValue);
195   }
196 
197   bool HasQualifier = ME->hasQualifier();
198   NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
199   bool IsArrow = ME->isArrow();
200   const Expr *Base = ME->getBase();
201 
202   return EmitCXXMemberOrOperatorMemberCallExpr(
203       CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
204 }
205 
206 RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
207     const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
208     bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
209     const Expr *Base) {
210   assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
211 
212   // Compute the object pointer.
213   bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
214 
215   const CXXMethodDecl *DevirtualizedMethod = nullptr;
216   if (CanUseVirtualCall &&
217       MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {
218     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
219     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
220     assert(DevirtualizedMethod);
221     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
222     const Expr *Inner = Base->ignoreParenBaseCasts();
223     if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
224         MD->getReturnType().getCanonicalType())
225       // If the return types are not the same, this might be a case where more
226       // code needs to run to compensate for it. For example, the derived
227       // method might return a type that inherits form from the return
228       // type of MD and has a prefix.
229       // For now we just avoid devirtualizing these covariant cases.
230       DevirtualizedMethod = nullptr;
231     else if (getCXXRecord(Inner) == DevirtualizedClass)
232       // If the class of the Inner expression is where the dynamic method
233       // is defined, build the this pointer from it.
234       Base = Inner;
235     else if (getCXXRecord(Base) != DevirtualizedClass) {
236       // If the method is defined in a class that is not the best dynamic
237       // one or the one of the full expression, we would have to build
238       // a derived-to-base cast to compute the correct this pointer, but
239       // we don't have support for that yet, so do a virtual call.
240       DevirtualizedMethod = nullptr;
241     }
242   }
243 
244   // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
245   // operator before the LHS.
246   CallArgList RtlArgStorage;
247   CallArgList *RtlArgs = nullptr;
248   if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
249     if (OCE->isAssignmentOp()) {
250       RtlArgs = &RtlArgStorage;
251       EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
252                    drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
253                    /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
254     }
255   }
256 
257   LValue This;
258   if (IsArrow) {
259     LValueBaseInfo BaseInfo;
260     TBAAAccessInfo TBAAInfo;
261     Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
262     This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
263   } else {
264     This = EmitLValue(Base);
265   }
266 
267   if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
268     // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
269     // constructing a new complete object of type Ctor.
270     assert(!RtlArgs);
271     assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
272     CallArgList Args;
273     commonEmitCXXMemberOrOperatorCall(
274         *this, Ctor, This.getPointer(), /*ImplicitParam=*/nullptr,
275         /*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
276 
277     EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
278                            /*Delegating=*/false, This.getAddress(), Args,
279                            AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
280                            /*NewPointerIsChecked=*/false);
281     return RValue::get(nullptr);
282   }
283 
284   if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
285     if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
286     if (!MD->getParent()->mayInsertExtraPadding()) {
287       if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
288         // We don't like to generate the trivial copy/move assignment operator
289         // when it isn't necessary; just produce the proper effect here.
290         LValue RHS = isa<CXXOperatorCallExpr>(CE)
291                          ? MakeNaturalAlignAddrLValue(
292                                (*RtlArgs)[0].getRValue(*this).getScalarVal(),
293                                (*(CE->arg_begin() + 1))->getType())
294                          : EmitLValue(*CE->arg_begin());
295         EmitAggregateAssign(This, RHS, CE->getType());
296         return RValue::get(This.getPointer());
297       }
298       llvm_unreachable("unknown trivial member function");
299     }
300   }
301 
302   // Compute the function type we're calling.
303   const CXXMethodDecl *CalleeDecl =
304       DevirtualizedMethod ? DevirtualizedMethod : MD;
305   const CGFunctionInfo *FInfo = nullptr;
306   if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
307     FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
308         GlobalDecl(Dtor, Dtor_Complete));
309   else
310     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
311 
312   llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
313 
314   // C++11 [class.mfct.non-static]p2:
315   //   If a non-static member function of a class X is called for an object that
316   //   is not of type X, or of a type derived from X, the behavior is undefined.
317   SourceLocation CallLoc;
318   ASTContext &C = getContext();
319   if (CE)
320     CallLoc = CE->getExprLoc();
321 
322   SanitizerSet SkippedChecks;
323   if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
324     auto *IOA = CMCE->getImplicitObjectArgument();
325     bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
326     if (IsImplicitObjectCXXThis)
327       SkippedChecks.set(SanitizerKind::Alignment, true);
328     if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
329       SkippedChecks.set(SanitizerKind::Null, true);
330   }
331   EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, This.getPointer(),
332                 C.getRecordType(CalleeDecl->getParent()),
333                 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
334 
335   // C++ [class.virtual]p12:
336   //   Explicit qualification with the scope operator (5.1) suppresses the
337   //   virtual call mechanism.
338   //
339   // We also don't emit a virtual call if the base expression has a record type
340   // because then we know what the type is.
341   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
342 
343   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {
344     assert(CE->arg_begin() == CE->arg_end() &&
345            "Destructor shouldn't have explicit parameters");
346     assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
347     if (UseVirtualCall) {
348       CGM.getCXXABI().EmitVirtualDestructorCall(
349           *this, Dtor, Dtor_Complete, This.getAddress(),
350           cast<CXXMemberCallExpr>(CE));
351     } else {
352       GlobalDecl GD(Dtor, Dtor_Complete);
353       CGCallee Callee;
354       if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)
355         Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);
356       else if (!DevirtualizedMethod)
357         Callee =
358             CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);
359       else {
360         Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);
361       }
362 
363       QualType ThisTy =
364           IsArrow ? Base->getType()->getPointeeType() : Base->getType();
365       EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
366                             /*ImplicitParam=*/nullptr,
367                             /*ImplicitParamTy=*/QualType(), nullptr);
368     }
369     return RValue::get(nullptr);
370   }
371 
372   // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
373   // 'CalleeDecl' instead.
374 
375   CGCallee Callee;
376   if (UseVirtualCall) {
377     Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
378   } else {
379     if (SanOpts.has(SanitizerKind::CFINVCall) &&
380         MD->getParent()->isDynamicClass()) {
381       llvm::Value *VTable;
382       const CXXRecordDecl *RD;
383       std::tie(VTable, RD) =
384           CGM.getCXXABI().LoadVTablePtr(*this, This.getAddress(),
385                                         CalleeDecl->getParent());
386       EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
387     }
388 
389     if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
390       Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
391     else if (!DevirtualizedMethod)
392       Callee =
393           CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
394     else {
395       Callee =
396           CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
397                               GlobalDecl(DevirtualizedMethod));
398     }
399   }
400 
401   if (MD->isVirtual()) {
402     Address NewThisAddr =
403         CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
404             *this, CalleeDecl, This.getAddress(), UseVirtualCall);
405     This.setAddress(NewThisAddr);
406   }
407 
408   return EmitCXXMemberOrOperatorCall(
409       CalleeDecl, Callee, ReturnValue, This.getPointer(),
410       /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
411 }
412 
413 RValue
414 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
415                                               ReturnValueSlot ReturnValue) {
416   const BinaryOperator *BO =
417       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
418   const Expr *BaseExpr = BO->getLHS();
419   const Expr *MemFnExpr = BO->getRHS();
420 
421   const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();
422   const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
423   const auto *RD =
424       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
425 
426   // Emit the 'this' pointer.
427   Address This = Address::invalid();
428   if (BO->getOpcode() == BO_PtrMemI)
429     This = EmitPointerWithAlignment(BaseExpr);
430   else
431     This = EmitLValue(BaseExpr).getAddress();
432 
433   EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
434                 QualType(MPT->getClass(), 0));
435 
436   // Get the member function pointer.
437   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
438 
439   // Ask the ABI to load the callee.  Note that This is modified.
440   llvm::Value *ThisPtrForCall = nullptr;
441   CGCallee Callee =
442     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
443                                              ThisPtrForCall, MemFnPtr, MPT);
444 
445   CallArgList Args;
446 
447   QualType ThisType =
448     getContext().getPointerType(getContext().getTagDeclType(RD));
449 
450   // Push the this ptr.
451   Args.add(RValue::get(ThisPtrForCall), ThisType);
452 
453   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
454 
455   // And the rest of the call args
456   EmitCallArgs(Args, FPT, E->arguments());
457   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
458                                                       /*PrefixSize=*/0),
459                   Callee, ReturnValue, Args, nullptr, E->getExprLoc());
460 }
461 
462 RValue
463 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
464                                                const CXXMethodDecl *MD,
465                                                ReturnValueSlot ReturnValue) {
466   assert(MD->isInstance() &&
467          "Trying to emit a member call expr on a static method!");
468   return EmitCXXMemberOrOperatorMemberCallExpr(
469       E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
470       /*IsArrow=*/false, E->getArg(0));
471 }
472 
473 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
474                                                ReturnValueSlot ReturnValue) {
475   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
476 }
477 
478 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
479                                             Address DestPtr,
480                                             const CXXRecordDecl *Base) {
481   if (Base->isEmpty())
482     return;
483 
484   DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
485 
486   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
487   CharUnits NVSize = Layout.getNonVirtualSize();
488 
489   // We cannot simply zero-initialize the entire base sub-object if vbptrs are
490   // present, they are initialized by the most derived class before calling the
491   // constructor.
492   SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
493   Stores.emplace_back(CharUnits::Zero(), NVSize);
494 
495   // Each store is split by the existence of a vbptr.
496   CharUnits VBPtrWidth = CGF.getPointerSize();
497   std::vector<CharUnits> VBPtrOffsets =
498       CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
499   for (CharUnits VBPtrOffset : VBPtrOffsets) {
500     // Stop before we hit any virtual base pointers located in virtual bases.
501     if (VBPtrOffset >= NVSize)
502       break;
503     std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
504     CharUnits LastStoreOffset = LastStore.first;
505     CharUnits LastStoreSize = LastStore.second;
506 
507     CharUnits SplitBeforeOffset = LastStoreOffset;
508     CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
509     assert(!SplitBeforeSize.isNegative() && "negative store size!");
510     if (!SplitBeforeSize.isZero())
511       Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
512 
513     CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
514     CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
515     assert(!SplitAfterSize.isNegative() && "negative store size!");
516     if (!SplitAfterSize.isZero())
517       Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
518   }
519 
520   // If the type contains a pointer to data member we can't memset it to zero.
521   // Instead, create a null constant and copy it to the destination.
522   // TODO: there are other patterns besides zero that we can usefully memset,
523   // like -1, which happens to be the pattern used by member-pointers.
524   // TODO: isZeroInitializable can be over-conservative in the case where a
525   // virtual base contains a member pointer.
526   llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
527   if (!NullConstantForBase->isNullValue()) {
528     llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
529         CGF.CGM.getModule(), NullConstantForBase->getType(),
530         /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
531         NullConstantForBase, Twine());
532 
533     CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
534                                DestPtr.getAlignment());
535     NullVariable->setAlignment(Align.getAsAlign());
536 
537     Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
538 
539     // Get and call the appropriate llvm.memcpy overload.
540     for (std::pair<CharUnits, CharUnits> Store : Stores) {
541       CharUnits StoreOffset = Store.first;
542       CharUnits StoreSize = Store.second;
543       llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
544       CGF.Builder.CreateMemCpy(
545           CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
546           CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
547           StoreSizeVal);
548     }
549 
550   // Otherwise, just memset the whole thing to zero.  This is legal
551   // because in LLVM, all default initializers (other than the ones we just
552   // handled above) are guaranteed to have a bit pattern of all zeros.
553   } else {
554     for (std::pair<CharUnits, CharUnits> Store : Stores) {
555       CharUnits StoreOffset = Store.first;
556       CharUnits StoreSize = Store.second;
557       llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
558       CGF.Builder.CreateMemSet(
559           CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
560           CGF.Builder.getInt8(0), StoreSizeVal);
561     }
562   }
563 }
564 
565 void
566 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
567                                       AggValueSlot Dest) {
568   assert(!Dest.isIgnored() && "Must have a destination!");
569   const CXXConstructorDecl *CD = E->getConstructor();
570 
571   // If we require zero initialization before (or instead of) calling the
572   // constructor, as can be the case with a non-user-provided default
573   // constructor, emit the zero initialization now, unless destination is
574   // already zeroed.
575   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
576     switch (E->getConstructionKind()) {
577     case CXXConstructExpr::CK_Delegating:
578     case CXXConstructExpr::CK_Complete:
579       EmitNullInitialization(Dest.getAddress(), E->getType());
580       break;
581     case CXXConstructExpr::CK_VirtualBase:
582     case CXXConstructExpr::CK_NonVirtualBase:
583       EmitNullBaseClassInitialization(*this, Dest.getAddress(),
584                                       CD->getParent());
585       break;
586     }
587   }
588 
589   // If this is a call to a trivial default constructor, do nothing.
590   if (CD->isTrivial() && CD->isDefaultConstructor())
591     return;
592 
593   // Elide the constructor if we're constructing from a temporary.
594   // The temporary check is required because Sema sets this on NRVO
595   // returns.
596   if (getLangOpts().ElideConstructors && E->isElidable()) {
597     assert(getContext().hasSameUnqualifiedType(E->getType(),
598                                                E->getArg(0)->getType()));
599     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
600       EmitAggExpr(E->getArg(0), Dest);
601       return;
602     }
603   }
604 
605   if (const ArrayType *arrayType
606         = getContext().getAsArrayType(E->getType())) {
607     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
608                                Dest.isSanitizerChecked());
609   } else {
610     CXXCtorType Type = Ctor_Complete;
611     bool ForVirtualBase = false;
612     bool Delegating = false;
613 
614     switch (E->getConstructionKind()) {
615      case CXXConstructExpr::CK_Delegating:
616       // We should be emitting a constructor; GlobalDecl will assert this
617       Type = CurGD.getCtorType();
618       Delegating = true;
619       break;
620 
621      case CXXConstructExpr::CK_Complete:
622       Type = Ctor_Complete;
623       break;
624 
625      case CXXConstructExpr::CK_VirtualBase:
626       ForVirtualBase = true;
627       LLVM_FALLTHROUGH;
628 
629      case CXXConstructExpr::CK_NonVirtualBase:
630       Type = Ctor_Base;
631      }
632 
633      // Call the constructor.
634      EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
635   }
636 }
637 
638 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
639                                                  const Expr *Exp) {
640   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
641     Exp = E->getSubExpr();
642   assert(isa<CXXConstructExpr>(Exp) &&
643          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
644   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
645   const CXXConstructorDecl *CD = E->getConstructor();
646   RunCleanupsScope Scope(*this);
647 
648   // If we require zero initialization before (or instead of) calling the
649   // constructor, as can be the case with a non-user-provided default
650   // constructor, emit the zero initialization now.
651   // FIXME. Do I still need this for a copy ctor synthesis?
652   if (E->requiresZeroInitialization())
653     EmitNullInitialization(Dest, E->getType());
654 
655   assert(!getContext().getAsConstantArrayType(E->getType())
656          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
657   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
658 }
659 
660 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
661                                         const CXXNewExpr *E) {
662   if (!E->isArray())
663     return CharUnits::Zero();
664 
665   // No cookie is required if the operator new[] being used is the
666   // reserved placement operator new[].
667   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
668     return CharUnits::Zero();
669 
670   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
671 }
672 
673 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
674                                         const CXXNewExpr *e,
675                                         unsigned minElements,
676                                         llvm::Value *&numElements,
677                                         llvm::Value *&sizeWithoutCookie) {
678   QualType type = e->getAllocatedType();
679 
680   if (!e->isArray()) {
681     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
682     sizeWithoutCookie
683       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
684     return sizeWithoutCookie;
685   }
686 
687   // The width of size_t.
688   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
689 
690   // Figure out the cookie size.
691   llvm::APInt cookieSize(sizeWidth,
692                          CalculateCookiePadding(CGF, e).getQuantity());
693 
694   // Emit the array size expression.
695   // We multiply the size of all dimensions for NumElements.
696   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
697   numElements =
698     ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType());
699   if (!numElements)
700     numElements = CGF.EmitScalarExpr(*e->getArraySize());
701   assert(isa<llvm::IntegerType>(numElements->getType()));
702 
703   // The number of elements can be have an arbitrary integer type;
704   // essentially, we need to multiply it by a constant factor, add a
705   // cookie size, and verify that the result is representable as a
706   // size_t.  That's just a gloss, though, and it's wrong in one
707   // important way: if the count is negative, it's an error even if
708   // the cookie size would bring the total size >= 0.
709   bool isSigned
710     = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
711   llvm::IntegerType *numElementsType
712     = cast<llvm::IntegerType>(numElements->getType());
713   unsigned numElementsWidth = numElementsType->getBitWidth();
714 
715   // Compute the constant factor.
716   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
717   while (const ConstantArrayType *CAT
718              = CGF.getContext().getAsConstantArrayType(type)) {
719     type = CAT->getElementType();
720     arraySizeMultiplier *= CAT->getSize();
721   }
722 
723   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
724   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
725   typeSizeMultiplier *= arraySizeMultiplier;
726 
727   // This will be a size_t.
728   llvm::Value *size;
729 
730   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
731   // Don't bloat the -O0 code.
732   if (llvm::ConstantInt *numElementsC =
733         dyn_cast<llvm::ConstantInt>(numElements)) {
734     const llvm::APInt &count = numElementsC->getValue();
735 
736     bool hasAnyOverflow = false;
737 
738     // If 'count' was a negative number, it's an overflow.
739     if (isSigned && count.isNegative())
740       hasAnyOverflow = true;
741 
742     // We want to do all this arithmetic in size_t.  If numElements is
743     // wider than that, check whether it's already too big, and if so,
744     // overflow.
745     else if (numElementsWidth > sizeWidth &&
746              numElementsWidth - sizeWidth > count.countLeadingZeros())
747       hasAnyOverflow = true;
748 
749     // Okay, compute a count at the right width.
750     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
751 
752     // If there is a brace-initializer, we cannot allocate fewer elements than
753     // there are initializers. If we do, that's treated like an overflow.
754     if (adjustedCount.ult(minElements))
755       hasAnyOverflow = true;
756 
757     // Scale numElements by that.  This might overflow, but we don't
758     // care because it only overflows if allocationSize does, too, and
759     // if that overflows then we shouldn't use this.
760     numElements = llvm::ConstantInt::get(CGF.SizeTy,
761                                          adjustedCount * arraySizeMultiplier);
762 
763     // Compute the size before cookie, and track whether it overflowed.
764     bool overflow;
765     llvm::APInt allocationSize
766       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
767     hasAnyOverflow |= overflow;
768 
769     // Add in the cookie, and check whether it's overflowed.
770     if (cookieSize != 0) {
771       // Save the current size without a cookie.  This shouldn't be
772       // used if there was overflow.
773       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
774 
775       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
776       hasAnyOverflow |= overflow;
777     }
778 
779     // On overflow, produce a -1 so operator new will fail.
780     if (hasAnyOverflow) {
781       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
782     } else {
783       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
784     }
785 
786   // Otherwise, we might need to use the overflow intrinsics.
787   } else {
788     // There are up to five conditions we need to test for:
789     // 1) if isSigned, we need to check whether numElements is negative;
790     // 2) if numElementsWidth > sizeWidth, we need to check whether
791     //   numElements is larger than something representable in size_t;
792     // 3) if minElements > 0, we need to check whether numElements is smaller
793     //    than that.
794     // 4) we need to compute
795     //      sizeWithoutCookie := numElements * typeSizeMultiplier
796     //    and check whether it overflows; and
797     // 5) if we need a cookie, we need to compute
798     //      size := sizeWithoutCookie + cookieSize
799     //    and check whether it overflows.
800 
801     llvm::Value *hasOverflow = nullptr;
802 
803     // If numElementsWidth > sizeWidth, then one way or another, we're
804     // going to have to do a comparison for (2), and this happens to
805     // take care of (1), too.
806     if (numElementsWidth > sizeWidth) {
807       llvm::APInt threshold(numElementsWidth, 1);
808       threshold <<= sizeWidth;
809 
810       llvm::Value *thresholdV
811         = llvm::ConstantInt::get(numElementsType, threshold);
812 
813       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
814       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
815 
816     // Otherwise, if we're signed, we want to sext up to size_t.
817     } else if (isSigned) {
818       if (numElementsWidth < sizeWidth)
819         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
820 
821       // If there's a non-1 type size multiplier, then we can do the
822       // signedness check at the same time as we do the multiply
823       // because a negative number times anything will cause an
824       // unsigned overflow.  Otherwise, we have to do it here. But at least
825       // in this case, we can subsume the >= minElements check.
826       if (typeSizeMultiplier == 1)
827         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
828                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
829 
830     // Otherwise, zext up to size_t if necessary.
831     } else if (numElementsWidth < sizeWidth) {
832       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
833     }
834 
835     assert(numElements->getType() == CGF.SizeTy);
836 
837     if (minElements) {
838       // Don't allow allocation of fewer elements than we have initializers.
839       if (!hasOverflow) {
840         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
841                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
842       } else if (numElementsWidth > sizeWidth) {
843         // The other existing overflow subsumes this check.
844         // We do an unsigned comparison, since any signed value < -1 is
845         // taken care of either above or below.
846         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
847                           CGF.Builder.CreateICmpULT(numElements,
848                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
849       }
850     }
851 
852     size = numElements;
853 
854     // Multiply by the type size if necessary.  This multiplier
855     // includes all the factors for nested arrays.
856     //
857     // This step also causes numElements to be scaled up by the
858     // nested-array factor if necessary.  Overflow on this computation
859     // can be ignored because the result shouldn't be used if
860     // allocation fails.
861     if (typeSizeMultiplier != 1) {
862       llvm::Function *umul_with_overflow
863         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
864 
865       llvm::Value *tsmV =
866         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
867       llvm::Value *result =
868           CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
869 
870       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
871       if (hasOverflow)
872         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
873       else
874         hasOverflow = overflowed;
875 
876       size = CGF.Builder.CreateExtractValue(result, 0);
877 
878       // Also scale up numElements by the array size multiplier.
879       if (arraySizeMultiplier != 1) {
880         // If the base element type size is 1, then we can re-use the
881         // multiply we just did.
882         if (typeSize.isOne()) {
883           assert(arraySizeMultiplier == typeSizeMultiplier);
884           numElements = size;
885 
886         // Otherwise we need a separate multiply.
887         } else {
888           llvm::Value *asmV =
889             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
890           numElements = CGF.Builder.CreateMul(numElements, asmV);
891         }
892       }
893     } else {
894       // numElements doesn't need to be scaled.
895       assert(arraySizeMultiplier == 1);
896     }
897 
898     // Add in the cookie size if necessary.
899     if (cookieSize != 0) {
900       sizeWithoutCookie = size;
901 
902       llvm::Function *uadd_with_overflow
903         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
904 
905       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
906       llvm::Value *result =
907           CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
908 
909       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
910       if (hasOverflow)
911         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
912       else
913         hasOverflow = overflowed;
914 
915       size = CGF.Builder.CreateExtractValue(result, 0);
916     }
917 
918     // If we had any possibility of dynamic overflow, make a select to
919     // overwrite 'size' with an all-ones value, which should cause
920     // operator new to throw.
921     if (hasOverflow)
922       size = CGF.Builder.CreateSelect(hasOverflow,
923                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
924                                       size);
925   }
926 
927   if (cookieSize == 0)
928     sizeWithoutCookie = size;
929   else
930     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
931 
932   return size;
933 }
934 
935 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
936                                     QualType AllocType, Address NewPtr,
937                                     AggValueSlot::Overlap_t MayOverlap) {
938   // FIXME: Refactor with EmitExprAsInit.
939   switch (CGF.getEvaluationKind(AllocType)) {
940   case TEK_Scalar:
941     CGF.EmitScalarInit(Init, nullptr,
942                        CGF.MakeAddrLValue(NewPtr, AllocType), false);
943     return;
944   case TEK_Complex:
945     CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
946                                   /*isInit*/ true);
947     return;
948   case TEK_Aggregate: {
949     AggValueSlot Slot
950       = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
951                               AggValueSlot::IsDestructed,
952                               AggValueSlot::DoesNotNeedGCBarriers,
953                               AggValueSlot::IsNotAliased,
954                               MayOverlap, AggValueSlot::IsNotZeroed,
955                               AggValueSlot::IsSanitizerChecked);
956     CGF.EmitAggExpr(Init, Slot);
957     return;
958   }
959   }
960   llvm_unreachable("bad evaluation kind");
961 }
962 
963 void CodeGenFunction::EmitNewArrayInitializer(
964     const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
965     Address BeginPtr, llvm::Value *NumElements,
966     llvm::Value *AllocSizeWithoutCookie) {
967   // If we have a type with trivial initialization and no initializer,
968   // there's nothing to do.
969   if (!E->hasInitializer())
970     return;
971 
972   Address CurPtr = BeginPtr;
973 
974   unsigned InitListElements = 0;
975 
976   const Expr *Init = E->getInitializer();
977   Address EndOfInit = Address::invalid();
978   QualType::DestructionKind DtorKind = ElementType.isDestructedType();
979   EHScopeStack::stable_iterator Cleanup;
980   llvm::Instruction *CleanupDominator = nullptr;
981 
982   CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
983   CharUnits ElementAlign =
984     BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
985 
986   // Attempt to perform zero-initialization using memset.
987   auto TryMemsetInitialization = [&]() -> bool {
988     // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
989     // we can initialize with a memset to -1.
990     if (!CGM.getTypes().isZeroInitializable(ElementType))
991       return false;
992 
993     // Optimization: since zero initialization will just set the memory
994     // to all zeroes, generate a single memset to do it in one shot.
995 
996     // Subtract out the size of any elements we've already initialized.
997     auto *RemainingSize = AllocSizeWithoutCookie;
998     if (InitListElements) {
999       // We know this can't overflow; we check this when doing the allocation.
1000       auto *InitializedSize = llvm::ConstantInt::get(
1001           RemainingSize->getType(),
1002           getContext().getTypeSizeInChars(ElementType).getQuantity() *
1003               InitListElements);
1004       RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
1005     }
1006 
1007     // Create the memset.
1008     Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
1009     return true;
1010   };
1011 
1012   // If the initializer is an initializer list, first do the explicit elements.
1013   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
1014     // Initializing from a (braced) string literal is a special case; the init
1015     // list element does not initialize a (single) array element.
1016     if (ILE->isStringLiteralInit()) {
1017       // Initialize the initial portion of length equal to that of the string
1018       // literal. The allocation must be for at least this much; we emitted a
1019       // check for that earlier.
1020       AggValueSlot Slot =
1021           AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1022                                 AggValueSlot::IsDestructed,
1023                                 AggValueSlot::DoesNotNeedGCBarriers,
1024                                 AggValueSlot::IsNotAliased,
1025                                 AggValueSlot::DoesNotOverlap,
1026                                 AggValueSlot::IsNotZeroed,
1027                                 AggValueSlot::IsSanitizerChecked);
1028       EmitAggExpr(ILE->getInit(0), Slot);
1029 
1030       // Move past these elements.
1031       InitListElements =
1032           cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1033               ->getSize().getZExtValue();
1034       CurPtr =
1035           Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1036                                             Builder.getSize(InitListElements),
1037                                             "string.init.end"),
1038                   CurPtr.getAlignment().alignmentAtOffset(InitListElements *
1039                                                           ElementSize));
1040 
1041       // Zero out the rest, if any remain.
1042       llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1043       if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
1044         bool OK = TryMemsetInitialization();
1045         (void)OK;
1046         assert(OK && "couldn't memset character type?");
1047       }
1048       return;
1049     }
1050 
1051     InitListElements = ILE->getNumInits();
1052 
1053     // If this is a multi-dimensional array new, we will initialize multiple
1054     // elements with each init list element.
1055     QualType AllocType = E->getAllocatedType();
1056     if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1057             AllocType->getAsArrayTypeUnsafe())) {
1058       ElementTy = ConvertTypeForMem(AllocType);
1059       CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
1060       InitListElements *= getContext().getConstantArrayElementCount(CAT);
1061     }
1062 
1063     // Enter a partial-destruction Cleanup if necessary.
1064     if (needsEHCleanup(DtorKind)) {
1065       // In principle we could tell the Cleanup where we are more
1066       // directly, but the control flow can get so varied here that it
1067       // would actually be quite complex.  Therefore we go through an
1068       // alloca.
1069       EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1070                                    "array.init.end");
1071       CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
1072       pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
1073                                        ElementType, ElementAlign,
1074                                        getDestroyer(DtorKind));
1075       Cleanup = EHStack.stable_begin();
1076     }
1077 
1078     CharUnits StartAlign = CurPtr.getAlignment();
1079     for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
1080       // Tell the cleanup that it needs to destroy up to this
1081       // element.  TODO: some of these stores can be trivially
1082       // observed to be unnecessary.
1083       if (EndOfInit.isValid()) {
1084         auto FinishedPtr =
1085           Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
1086         Builder.CreateStore(FinishedPtr, EndOfInit);
1087       }
1088       // FIXME: If the last initializer is an incomplete initializer list for
1089       // an array, and we have an array filler, we can fold together the two
1090       // initialization loops.
1091       StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
1092                               ILE->getInit(i)->getType(), CurPtr,
1093                               AggValueSlot::DoesNotOverlap);
1094       CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1095                                                  Builder.getSize(1),
1096                                                  "array.exp.next"),
1097                        StartAlign.alignmentAtOffset((i + 1) * ElementSize));
1098     }
1099 
1100     // The remaining elements are filled with the array filler expression.
1101     Init = ILE->getArrayFiller();
1102 
1103     // Extract the initializer for the individual array elements by pulling
1104     // out the array filler from all the nested initializer lists. This avoids
1105     // generating a nested loop for the initialization.
1106     while (Init && Init->getType()->isConstantArrayType()) {
1107       auto *SubILE = dyn_cast<InitListExpr>(Init);
1108       if (!SubILE)
1109         break;
1110       assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1111       Init = SubILE->getArrayFiller();
1112     }
1113 
1114     // Switch back to initializing one base element at a time.
1115     CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
1116   }
1117 
1118   // If all elements have already been initialized, skip any further
1119   // initialization.
1120   llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1121   if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1122     // If there was a Cleanup, deactivate it.
1123     if (CleanupDominator)
1124       DeactivateCleanupBlock(Cleanup, CleanupDominator);
1125     return;
1126   }
1127 
1128   assert(Init && "have trailing elements to initialize but no initializer");
1129 
1130   // If this is a constructor call, try to optimize it out, and failing that
1131   // emit a single loop to initialize all remaining elements.
1132   if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1133     CXXConstructorDecl *Ctor = CCE->getConstructor();
1134     if (Ctor->isTrivial()) {
1135       // If new expression did not specify value-initialization, then there
1136       // is no initialization.
1137       if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1138         return;
1139 
1140       if (TryMemsetInitialization())
1141         return;
1142     }
1143 
1144     // Store the new Cleanup position for irregular Cleanups.
1145     //
1146     // FIXME: Share this cleanup with the constructor call emission rather than
1147     // having it create a cleanup of its own.
1148     if (EndOfInit.isValid())
1149       Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1150 
1151     // Emit a constructor call loop to initialize the remaining elements.
1152     if (InitListElements)
1153       NumElements = Builder.CreateSub(
1154           NumElements,
1155           llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1156     EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1157                                /*NewPointerIsChecked*/true,
1158                                CCE->requiresZeroInitialization());
1159     return;
1160   }
1161 
1162   // If this is value-initialization, we can usually use memset.
1163   ImplicitValueInitExpr IVIE(ElementType);
1164   if (isa<ImplicitValueInitExpr>(Init)) {
1165     if (TryMemsetInitialization())
1166       return;
1167 
1168     // Switch to an ImplicitValueInitExpr for the element type. This handles
1169     // only one case: multidimensional array new of pointers to members. In
1170     // all other cases, we already have an initializer for the array element.
1171     Init = &IVIE;
1172   }
1173 
1174   // At this point we should have found an initializer for the individual
1175   // elements of the array.
1176   assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1177          "got wrong type of element to initialize");
1178 
1179   // If we have an empty initializer list, we can usually use memset.
1180   if (auto *ILE = dyn_cast<InitListExpr>(Init))
1181     if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1182       return;
1183 
1184   // If we have a struct whose every field is value-initialized, we can
1185   // usually use memset.
1186   if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1187     if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1188       if (RType->getDecl()->isStruct()) {
1189         unsigned NumElements = 0;
1190         if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1191           NumElements = CXXRD->getNumBases();
1192         for (auto *Field : RType->getDecl()->fields())
1193           if (!Field->isUnnamedBitfield())
1194             ++NumElements;
1195         // FIXME: Recurse into nested InitListExprs.
1196         if (ILE->getNumInits() == NumElements)
1197           for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1198             if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1199               --NumElements;
1200         if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1201           return;
1202       }
1203     }
1204   }
1205 
1206   // Create the loop blocks.
1207   llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1208   llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1209   llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1210 
1211   // Find the end of the array, hoisted out of the loop.
1212   llvm::Value *EndPtr =
1213     Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
1214 
1215   // If the number of elements isn't constant, we have to now check if there is
1216   // anything left to initialize.
1217   if (!ConstNum) {
1218     llvm::Value *IsEmpty =
1219       Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1220     Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1221   }
1222 
1223   // Enter the loop.
1224   EmitBlock(LoopBB);
1225 
1226   // Set up the current-element phi.
1227   llvm::PHINode *CurPtrPhi =
1228     Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1229   CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1230 
1231   CurPtr = Address(CurPtrPhi, ElementAlign);
1232 
1233   // Store the new Cleanup position for irregular Cleanups.
1234   if (EndOfInit.isValid())
1235     Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1236 
1237   // Enter a partial-destruction Cleanup if necessary.
1238   if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1239     pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1240                                    ElementType, ElementAlign,
1241                                    getDestroyer(DtorKind));
1242     Cleanup = EHStack.stable_begin();
1243     CleanupDominator = Builder.CreateUnreachable();
1244   }
1245 
1246   // Emit the initializer into this element.
1247   StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
1248                           AggValueSlot::DoesNotOverlap);
1249 
1250   // Leave the Cleanup if we entered one.
1251   if (CleanupDominator) {
1252     DeactivateCleanupBlock(Cleanup, CleanupDominator);
1253     CleanupDominator->eraseFromParent();
1254   }
1255 
1256   // Advance to the next element by adjusting the pointer type as necessary.
1257   llvm::Value *NextPtr =
1258     Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1259                                        "array.next");
1260 
1261   // Check whether we've gotten to the end of the array and, if so,
1262   // exit the loop.
1263   llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1264   Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1265   CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1266 
1267   EmitBlock(ContBB);
1268 }
1269 
1270 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1271                                QualType ElementType, llvm::Type *ElementTy,
1272                                Address NewPtr, llvm::Value *NumElements,
1273                                llvm::Value *AllocSizeWithoutCookie) {
1274   ApplyDebugLocation DL(CGF, E);
1275   if (E->isArray())
1276     CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1277                                 AllocSizeWithoutCookie);
1278   else if (const Expr *Init = E->getInitializer())
1279     StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
1280                             AggValueSlot::DoesNotOverlap);
1281 }
1282 
1283 /// Emit a call to an operator new or operator delete function, as implicitly
1284 /// created by new-expressions and delete-expressions.
1285 static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1286                                 const FunctionDecl *CalleeDecl,
1287                                 const FunctionProtoType *CalleeType,
1288                                 const CallArgList &Args) {
1289   llvm::CallBase *CallOrInvoke;
1290   llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1291   CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
1292   RValue RV =
1293       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1294                        Args, CalleeType, /*ChainCall=*/false),
1295                    Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1296 
1297   /// C++1y [expr.new]p10:
1298   ///   [In a new-expression,] an implementation is allowed to omit a call
1299   ///   to a replaceable global allocation function.
1300   ///
1301   /// We model such elidable calls with the 'builtin' attribute.
1302   llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1303   if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1304       Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1305     CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex,
1306                                llvm::Attribute::Builtin);
1307   }
1308 
1309   return RV;
1310 }
1311 
1312 RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1313                                                  const CallExpr *TheCall,
1314                                                  bool IsDelete) {
1315   CallArgList Args;
1316   EmitCallArgs(Args, Type->getParamTypes(), TheCall->arguments());
1317   // Find the allocation or deallocation function that we're calling.
1318   ASTContext &Ctx = getContext();
1319   DeclarationName Name = Ctx.DeclarationNames
1320       .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1321 
1322   for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1323     if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1324       if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1325         return EmitNewDeleteCall(*this, FD, Type, Args);
1326   llvm_unreachable("predeclared global operator new/delete is missing");
1327 }
1328 
1329 namespace {
1330 /// The parameters to pass to a usual operator delete.
1331 struct UsualDeleteParams {
1332   bool DestroyingDelete = false;
1333   bool Size = false;
1334   bool Alignment = false;
1335 };
1336 }
1337 
1338 static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
1339   UsualDeleteParams Params;
1340 
1341   const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
1342   auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1343 
1344   // The first argument is always a void*.
1345   ++AI;
1346 
1347   // The next parameter may be a std::destroying_delete_t.
1348   if (FD->isDestroyingOperatorDelete()) {
1349     Params.DestroyingDelete = true;
1350     assert(AI != AE);
1351     ++AI;
1352   }
1353 
1354   // Figure out what other parameters we should be implicitly passing.
1355   if (AI != AE && (*AI)->isIntegerType()) {
1356     Params.Size = true;
1357     ++AI;
1358   }
1359 
1360   if (AI != AE && (*AI)->isAlignValT()) {
1361     Params.Alignment = true;
1362     ++AI;
1363   }
1364 
1365   assert(AI == AE && "unexpected usual deallocation function parameter");
1366   return Params;
1367 }
1368 
1369 namespace {
1370   /// A cleanup to call the given 'operator delete' function upon abnormal
1371   /// exit from a new expression. Templated on a traits type that deals with
1372   /// ensuring that the arguments dominate the cleanup if necessary.
1373   template<typename Traits>
1374   class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1375     /// Type used to hold llvm::Value*s.
1376     typedef typename Traits::ValueTy ValueTy;
1377     /// Type used to hold RValues.
1378     typedef typename Traits::RValueTy RValueTy;
1379     struct PlacementArg {
1380       RValueTy ArgValue;
1381       QualType ArgType;
1382     };
1383 
1384     unsigned NumPlacementArgs : 31;
1385     unsigned PassAlignmentToPlacementDelete : 1;
1386     const FunctionDecl *OperatorDelete;
1387     ValueTy Ptr;
1388     ValueTy AllocSize;
1389     CharUnits AllocAlign;
1390 
1391     PlacementArg *getPlacementArgs() {
1392       return reinterpret_cast<PlacementArg *>(this + 1);
1393     }
1394 
1395   public:
1396     static size_t getExtraSize(size_t NumPlacementArgs) {
1397       return NumPlacementArgs * sizeof(PlacementArg);
1398     }
1399 
1400     CallDeleteDuringNew(size_t NumPlacementArgs,
1401                         const FunctionDecl *OperatorDelete, ValueTy Ptr,
1402                         ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
1403                         CharUnits AllocAlign)
1404       : NumPlacementArgs(NumPlacementArgs),
1405         PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
1406         OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
1407         AllocAlign(AllocAlign) {}
1408 
1409     void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1410       assert(I < NumPlacementArgs && "index out of range");
1411       getPlacementArgs()[I] = {Arg, Type};
1412     }
1413 
1414     void Emit(CodeGenFunction &CGF, Flags flags) override {
1415       const FunctionProtoType *FPT =
1416           OperatorDelete->getType()->getAs<FunctionProtoType>();
1417       CallArgList DeleteArgs;
1418 
1419       // The first argument is always a void* (or C* for a destroying operator
1420       // delete for class type C).
1421       DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1422 
1423       // Figure out what other parameters we should be implicitly passing.
1424       UsualDeleteParams Params;
1425       if (NumPlacementArgs) {
1426         // A placement deallocation function is implicitly passed an alignment
1427         // if the placement allocation function was, but is never passed a size.
1428         Params.Alignment = PassAlignmentToPlacementDelete;
1429       } else {
1430         // For a non-placement new-expression, 'operator delete' can take a
1431         // size and/or an alignment if it has the right parameters.
1432         Params = getUsualDeleteParams(OperatorDelete);
1433       }
1434 
1435       assert(!Params.DestroyingDelete &&
1436              "should not call destroying delete in a new-expression");
1437 
1438       // The second argument can be a std::size_t (for non-placement delete).
1439       if (Params.Size)
1440         DeleteArgs.add(Traits::get(CGF, AllocSize),
1441                        CGF.getContext().getSizeType());
1442 
1443       // The next (second or third) argument can be a std::align_val_t, which
1444       // is an enum whose underlying type is std::size_t.
1445       // FIXME: Use the right type as the parameter type. Note that in a call
1446       // to operator delete(size_t, ...), we may not have it available.
1447       if (Params.Alignment)
1448         DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1449                            CGF.SizeTy, AllocAlign.getQuantity())),
1450                        CGF.getContext().getSizeType());
1451 
1452       // Pass the rest of the arguments, which must match exactly.
1453       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1454         auto Arg = getPlacementArgs()[I];
1455         DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1456       }
1457 
1458       // Call 'operator delete'.
1459       EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1460     }
1461   };
1462 }
1463 
1464 /// Enter a cleanup to call 'operator delete' if the initializer in a
1465 /// new-expression throws.
1466 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1467                                   const CXXNewExpr *E,
1468                                   Address NewPtr,
1469                                   llvm::Value *AllocSize,
1470                                   CharUnits AllocAlign,
1471                                   const CallArgList &NewArgs) {
1472   unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
1473 
1474   // If we're not inside a conditional branch, then the cleanup will
1475   // dominate and we can do the easier (and more efficient) thing.
1476   if (!CGF.isInConditionalBranch()) {
1477     struct DirectCleanupTraits {
1478       typedef llvm::Value *ValueTy;
1479       typedef RValue RValueTy;
1480       static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1481       static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1482     };
1483 
1484     typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1485 
1486     DirectCleanup *Cleanup = CGF.EHStack
1487       .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
1488                                            E->getNumPlacementArgs(),
1489                                            E->getOperatorDelete(),
1490                                            NewPtr.getPointer(),
1491                                            AllocSize,
1492                                            E->passAlignment(),
1493                                            AllocAlign);
1494     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1495       auto &Arg = NewArgs[I + NumNonPlacementArgs];
1496       Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
1497     }
1498 
1499     return;
1500   }
1501 
1502   // Otherwise, we need to save all this stuff.
1503   DominatingValue<RValue>::saved_type SavedNewPtr =
1504     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1505   DominatingValue<RValue>::saved_type SavedAllocSize =
1506     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1507 
1508   struct ConditionalCleanupTraits {
1509     typedef DominatingValue<RValue>::saved_type ValueTy;
1510     typedef DominatingValue<RValue>::saved_type RValueTy;
1511     static RValue get(CodeGenFunction &CGF, ValueTy V) {
1512       return V.restore(CGF);
1513     }
1514   };
1515   typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1516 
1517   ConditionalCleanup *Cleanup = CGF.EHStack
1518     .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
1519                                               E->getNumPlacementArgs(),
1520                                               E->getOperatorDelete(),
1521                                               SavedNewPtr,
1522                                               SavedAllocSize,
1523                                               E->passAlignment(),
1524                                               AllocAlign);
1525   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1526     auto &Arg = NewArgs[I + NumNonPlacementArgs];
1527     Cleanup->setPlacementArg(
1528         I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
1529   }
1530 
1531   CGF.initFullExprCleanup();
1532 }
1533 
1534 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1535   // The element type being allocated.
1536   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1537 
1538   // 1. Build a call to the allocation function.
1539   FunctionDecl *allocator = E->getOperatorNew();
1540 
1541   // If there is a brace-initializer, cannot allocate fewer elements than inits.
1542   unsigned minElements = 0;
1543   if (E->isArray() && E->hasInitializer()) {
1544     const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
1545     if (ILE && ILE->isStringLiteralInit())
1546       minElements =
1547           cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1548               ->getSize().getZExtValue();
1549     else if (ILE)
1550       minElements = ILE->getNumInits();
1551   }
1552 
1553   llvm::Value *numElements = nullptr;
1554   llvm::Value *allocSizeWithoutCookie = nullptr;
1555   llvm::Value *allocSize =
1556     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1557                         allocSizeWithoutCookie);
1558   CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1559 
1560   // Emit the allocation call.  If the allocator is a global placement
1561   // operator, just "inline" it directly.
1562   Address allocation = Address::invalid();
1563   CallArgList allocatorArgs;
1564   if (allocator->isReservedGlobalPlacementOperator()) {
1565     assert(E->getNumPlacementArgs() == 1);
1566     const Expr *arg = *E->placement_arguments().begin();
1567 
1568     LValueBaseInfo BaseInfo;
1569     allocation = EmitPointerWithAlignment(arg, &BaseInfo);
1570 
1571     // The pointer expression will, in many cases, be an opaque void*.
1572     // In these cases, discard the computed alignment and use the
1573     // formal alignment of the allocated type.
1574     if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1575       allocation = Address(allocation.getPointer(), allocAlign);
1576 
1577     // Set up allocatorArgs for the call to operator delete if it's not
1578     // the reserved global operator.
1579     if (E->getOperatorDelete() &&
1580         !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1581       allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1582       allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1583     }
1584 
1585   } else {
1586     const FunctionProtoType *allocatorType =
1587       allocator->getType()->castAs<FunctionProtoType>();
1588     unsigned ParamsToSkip = 0;
1589 
1590     // The allocation size is the first argument.
1591     QualType sizeType = getContext().getSizeType();
1592     allocatorArgs.add(RValue::get(allocSize), sizeType);
1593     ++ParamsToSkip;
1594 
1595     if (allocSize != allocSizeWithoutCookie) {
1596       CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1597       allocAlign = std::max(allocAlign, cookieAlign);
1598     }
1599 
1600     // The allocation alignment may be passed as the second argument.
1601     if (E->passAlignment()) {
1602       QualType AlignValT = sizeType;
1603       if (allocatorType->getNumParams() > 1) {
1604         AlignValT = allocatorType->getParamType(1);
1605         assert(getContext().hasSameUnqualifiedType(
1606                    AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1607                    sizeType) &&
1608                "wrong type for alignment parameter");
1609         ++ParamsToSkip;
1610       } else {
1611         // Corner case, passing alignment to 'operator new(size_t, ...)'.
1612         assert(allocator->isVariadic() && "can't pass alignment to allocator");
1613       }
1614       allocatorArgs.add(
1615           RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1616           AlignValT);
1617     }
1618 
1619     // FIXME: Why do we not pass a CalleeDecl here?
1620     EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1621                  /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1622 
1623     RValue RV =
1624       EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1625 
1626     // If this was a call to a global replaceable allocation function that does
1627     // not take an alignment argument, the allocator is known to produce
1628     // storage that's suitably aligned for any object that fits, up to a known
1629     // threshold. Otherwise assume it's suitably aligned for the allocated type.
1630     CharUnits allocationAlign = allocAlign;
1631     if (!E->passAlignment() &&
1632         allocator->isReplaceableGlobalAllocationFunction()) {
1633       unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
1634           Target.getNewAlign(), getContext().getTypeSize(allocType)));
1635       allocationAlign = std::max(
1636           allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1637     }
1638 
1639     allocation = Address(RV.getScalarVal(), allocationAlign);
1640   }
1641 
1642   // Emit a null check on the allocation result if the allocation
1643   // function is allowed to return null (because it has a non-throwing
1644   // exception spec or is the reserved placement new) and we have an
1645   // interesting initializer will be running sanitizers on the initialization.
1646   bool nullCheck = E->shouldNullCheckAllocation() &&
1647                    (!allocType.isPODType(getContext()) || E->hasInitializer() ||
1648                     sanitizePerformTypeCheck());
1649 
1650   llvm::BasicBlock *nullCheckBB = nullptr;
1651   llvm::BasicBlock *contBB = nullptr;
1652 
1653   // The null-check means that the initializer is conditionally
1654   // evaluated.
1655   ConditionalEvaluation conditional(*this);
1656 
1657   if (nullCheck) {
1658     conditional.begin(*this);
1659 
1660     nullCheckBB = Builder.GetInsertBlock();
1661     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1662     contBB = createBasicBlock("new.cont");
1663 
1664     llvm::Value *isNull =
1665       Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1666     Builder.CreateCondBr(isNull, contBB, notNullBB);
1667     EmitBlock(notNullBB);
1668   }
1669 
1670   // If there's an operator delete, enter a cleanup to call it if an
1671   // exception is thrown.
1672   EHScopeStack::stable_iterator operatorDeleteCleanup;
1673   llvm::Instruction *cleanupDominator = nullptr;
1674   if (E->getOperatorDelete() &&
1675       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1676     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
1677                           allocatorArgs);
1678     operatorDeleteCleanup = EHStack.stable_begin();
1679     cleanupDominator = Builder.CreateUnreachable();
1680   }
1681 
1682   assert((allocSize == allocSizeWithoutCookie) ==
1683          CalculateCookiePadding(*this, E).isZero());
1684   if (allocSize != allocSizeWithoutCookie) {
1685     assert(E->isArray());
1686     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1687                                                        numElements,
1688                                                        E, allocType);
1689   }
1690 
1691   llvm::Type *elementTy = ConvertTypeForMem(allocType);
1692   Address result = Builder.CreateElementBitCast(allocation, elementTy);
1693 
1694   // Passing pointer through launder.invariant.group to avoid propagation of
1695   // vptrs information which may be included in previous type.
1696   // To not break LTO with different optimizations levels, we do it regardless
1697   // of optimization level.
1698   if (CGM.getCodeGenOpts().StrictVTablePointers &&
1699       allocator->isReservedGlobalPlacementOperator())
1700     result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
1701                      result.getAlignment());
1702 
1703   // Emit sanitizer checks for pointer value now, so that in the case of an
1704   // array it was checked only once and not at each constructor call. We may
1705   // have already checked that the pointer is non-null.
1706   // FIXME: If we have an array cookie and a potentially-throwing allocator,
1707   // we'll null check the wrong pointer here.
1708   SanitizerSet SkippedChecks;
1709   SkippedChecks.set(SanitizerKind::Null, nullCheck);
1710   EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
1711                 E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1712                 result.getPointer(), allocType, result.getAlignment(),
1713                 SkippedChecks, numElements);
1714 
1715   EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1716                      allocSizeWithoutCookie);
1717   if (E->isArray()) {
1718     // NewPtr is a pointer to the base element type.  If we're
1719     // allocating an array of arrays, we'll need to cast back to the
1720     // array pointer type.
1721     llvm::Type *resultType = ConvertTypeForMem(E->getType());
1722     if (result.getType() != resultType)
1723       result = Builder.CreateBitCast(result, resultType);
1724   }
1725 
1726   // Deactivate the 'operator delete' cleanup if we finished
1727   // initialization.
1728   if (operatorDeleteCleanup.isValid()) {
1729     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1730     cleanupDominator->eraseFromParent();
1731   }
1732 
1733   llvm::Value *resultPtr = result.getPointer();
1734   if (nullCheck) {
1735     conditional.end(*this);
1736 
1737     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1738     EmitBlock(contBB);
1739 
1740     llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1741     PHI->addIncoming(resultPtr, notNullBB);
1742     PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1743                      nullCheckBB);
1744 
1745     resultPtr = PHI;
1746   }
1747 
1748   return resultPtr;
1749 }
1750 
1751 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1752                                      llvm::Value *Ptr, QualType DeleteTy,
1753                                      llvm::Value *NumElements,
1754                                      CharUnits CookieSize) {
1755   assert((!NumElements && CookieSize.isZero()) ||
1756          DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1757 
1758   const FunctionProtoType *DeleteFTy =
1759     DeleteFD->getType()->getAs<FunctionProtoType>();
1760 
1761   CallArgList DeleteArgs;
1762 
1763   auto Params = getUsualDeleteParams(DeleteFD);
1764   auto ParamTypeIt = DeleteFTy->param_type_begin();
1765 
1766   // Pass the pointer itself.
1767   QualType ArgTy = *ParamTypeIt++;
1768   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1769   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1770 
1771   // Pass the std::destroying_delete tag if present.
1772   if (Params.DestroyingDelete) {
1773     QualType DDTag = *ParamTypeIt++;
1774     // Just pass an 'undef'. We expect the tag type to be an empty struct.
1775     auto *V = llvm::UndefValue::get(getTypes().ConvertType(DDTag));
1776     DeleteArgs.add(RValue::get(V), DDTag);
1777   }
1778 
1779   // Pass the size if the delete function has a size_t parameter.
1780   if (Params.Size) {
1781     QualType SizeType = *ParamTypeIt++;
1782     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1783     llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1784                                                DeleteTypeSize.getQuantity());
1785 
1786     // For array new, multiply by the number of elements.
1787     if (NumElements)
1788       Size = Builder.CreateMul(Size, NumElements);
1789 
1790     // If there is a cookie, add the cookie size.
1791     if (!CookieSize.isZero())
1792       Size = Builder.CreateAdd(
1793           Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1794 
1795     DeleteArgs.add(RValue::get(Size), SizeType);
1796   }
1797 
1798   // Pass the alignment if the delete function has an align_val_t parameter.
1799   if (Params.Alignment) {
1800     QualType AlignValType = *ParamTypeIt++;
1801     CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
1802         getContext().getTypeAlignIfKnown(DeleteTy));
1803     llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1804                                                 DeleteTypeAlign.getQuantity());
1805     DeleteArgs.add(RValue::get(Align), AlignValType);
1806   }
1807 
1808   assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1809          "unknown parameter to usual delete function");
1810 
1811   // Emit the call to delete.
1812   EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1813 }
1814 
1815 namespace {
1816   /// Calls the given 'operator delete' on a single object.
1817   struct CallObjectDelete final : EHScopeStack::Cleanup {
1818     llvm::Value *Ptr;
1819     const FunctionDecl *OperatorDelete;
1820     QualType ElementType;
1821 
1822     CallObjectDelete(llvm::Value *Ptr,
1823                      const FunctionDecl *OperatorDelete,
1824                      QualType ElementType)
1825       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1826 
1827     void Emit(CodeGenFunction &CGF, Flags flags) override {
1828       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1829     }
1830   };
1831 }
1832 
1833 void
1834 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1835                                              llvm::Value *CompletePtr,
1836                                              QualType ElementType) {
1837   EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1838                                         OperatorDelete, ElementType);
1839 }
1840 
1841 /// Emit the code for deleting a single object with a destroying operator
1842 /// delete. If the element type has a non-virtual destructor, Ptr has already
1843 /// been converted to the type of the parameter of 'operator delete'. Otherwise
1844 /// Ptr points to an object of the static type.
1845 static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1846                                        const CXXDeleteExpr *DE, Address Ptr,
1847                                        QualType ElementType) {
1848   auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1849   if (Dtor && Dtor->isVirtual())
1850     CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1851                                                 Dtor);
1852   else
1853     CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
1854 }
1855 
1856 /// Emit the code for deleting a single object.
1857 static void EmitObjectDelete(CodeGenFunction &CGF,
1858                              const CXXDeleteExpr *DE,
1859                              Address Ptr,
1860                              QualType ElementType) {
1861   // C++11 [expr.delete]p3:
1862   //   If the static type of the object to be deleted is different from its
1863   //   dynamic type, the static type shall be a base class of the dynamic type
1864   //   of the object to be deleted and the static type shall have a virtual
1865   //   destructor or the behavior is undefined.
1866   CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
1867                     DE->getExprLoc(), Ptr.getPointer(),
1868                     ElementType);
1869 
1870   const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1871   assert(!OperatorDelete->isDestroyingOperatorDelete());
1872 
1873   // Find the destructor for the type, if applicable.  If the
1874   // destructor is virtual, we'll just emit the vcall and return.
1875   const CXXDestructorDecl *Dtor = nullptr;
1876   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1877     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1878     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1879       Dtor = RD->getDestructor();
1880 
1881       if (Dtor->isVirtual()) {
1882         bool UseVirtualCall = true;
1883         const Expr *Base = DE->getArgument();
1884         if (auto *DevirtualizedDtor =
1885                 dyn_cast_or_null<const CXXDestructorDecl>(
1886                     Dtor->getDevirtualizedMethod(
1887                         Base, CGF.CGM.getLangOpts().AppleKext))) {
1888           UseVirtualCall = false;
1889           const CXXRecordDecl *DevirtualizedClass =
1890               DevirtualizedDtor->getParent();
1891           if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) {
1892             // Devirtualized to the class of the base type (the type of the
1893             // whole expression).
1894             Dtor = DevirtualizedDtor;
1895           } else {
1896             // Devirtualized to some other type. Would need to cast the this
1897             // pointer to that type but we don't have support for that yet, so
1898             // do a virtual call. FIXME: handle the case where it is
1899             // devirtualized to the derived type (the type of the inner
1900             // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1901             UseVirtualCall = true;
1902           }
1903         }
1904         if (UseVirtualCall) {
1905           CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1906                                                       Dtor);
1907           return;
1908         }
1909       }
1910     }
1911   }
1912 
1913   // Make sure that we call delete even if the dtor throws.
1914   // This doesn't have to a conditional cleanup because we're going
1915   // to pop it off in a second.
1916   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1917                                             Ptr.getPointer(),
1918                                             OperatorDelete, ElementType);
1919 
1920   if (Dtor)
1921     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1922                               /*ForVirtualBase=*/false,
1923                               /*Delegating=*/false,
1924                               Ptr, ElementType);
1925   else if (auto Lifetime = ElementType.getObjCLifetime()) {
1926     switch (Lifetime) {
1927     case Qualifiers::OCL_None:
1928     case Qualifiers::OCL_ExplicitNone:
1929     case Qualifiers::OCL_Autoreleasing:
1930       break;
1931 
1932     case Qualifiers::OCL_Strong:
1933       CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1934       break;
1935 
1936     case Qualifiers::OCL_Weak:
1937       CGF.EmitARCDestroyWeak(Ptr);
1938       break;
1939     }
1940   }
1941 
1942   CGF.PopCleanupBlock();
1943 }
1944 
1945 namespace {
1946   /// Calls the given 'operator delete' on an array of objects.
1947   struct CallArrayDelete final : EHScopeStack::Cleanup {
1948     llvm::Value *Ptr;
1949     const FunctionDecl *OperatorDelete;
1950     llvm::Value *NumElements;
1951     QualType ElementType;
1952     CharUnits CookieSize;
1953 
1954     CallArrayDelete(llvm::Value *Ptr,
1955                     const FunctionDecl *OperatorDelete,
1956                     llvm::Value *NumElements,
1957                     QualType ElementType,
1958                     CharUnits CookieSize)
1959       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1960         ElementType(ElementType), CookieSize(CookieSize) {}
1961 
1962     void Emit(CodeGenFunction &CGF, Flags flags) override {
1963       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
1964                          CookieSize);
1965     }
1966   };
1967 }
1968 
1969 /// Emit the code for deleting an array of objects.
1970 static void EmitArrayDelete(CodeGenFunction &CGF,
1971                             const CXXDeleteExpr *E,
1972                             Address deletedPtr,
1973                             QualType elementType) {
1974   llvm::Value *numElements = nullptr;
1975   llvm::Value *allocatedPtr = nullptr;
1976   CharUnits cookieSize;
1977   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1978                                       numElements, allocatedPtr, cookieSize);
1979 
1980   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1981 
1982   // Make sure that we call delete even if one of the dtors throws.
1983   const FunctionDecl *operatorDelete = E->getOperatorDelete();
1984   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1985                                            allocatedPtr, operatorDelete,
1986                                            numElements, elementType,
1987                                            cookieSize);
1988 
1989   // Destroy the elements.
1990   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1991     assert(numElements && "no element count for a type with a destructor!");
1992 
1993     CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1994     CharUnits elementAlign =
1995       deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
1996 
1997     llvm::Value *arrayBegin = deletedPtr.getPointer();
1998     llvm::Value *arrayEnd =
1999       CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
2000 
2001     // Note that it is legal to allocate a zero-length array, and we
2002     // can never fold the check away because the length should always
2003     // come from a cookie.
2004     CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
2005                          CGF.getDestroyer(dtorKind),
2006                          /*checkZeroLength*/ true,
2007                          CGF.needsEHCleanup(dtorKind));
2008   }
2009 
2010   // Pop the cleanup block.
2011   CGF.PopCleanupBlock();
2012 }
2013 
2014 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
2015   const Expr *Arg = E->getArgument();
2016   Address Ptr = EmitPointerWithAlignment(Arg);
2017 
2018   // Null check the pointer.
2019   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
2020   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
2021 
2022   llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
2023 
2024   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
2025   EmitBlock(DeleteNotNull);
2026 
2027   QualType DeleteTy = E->getDestroyedType();
2028 
2029   // A destroying operator delete overrides the entire operation of the
2030   // delete expression.
2031   if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2032     EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);
2033     EmitBlock(DeleteEnd);
2034     return;
2035   }
2036 
2037   // We might be deleting a pointer to array.  If so, GEP down to the
2038   // first non-array element.
2039   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2040   if (DeleteTy->isConstantArrayType()) {
2041     llvm::Value *Zero = Builder.getInt32(0);
2042     SmallVector<llvm::Value*,8> GEP;
2043 
2044     GEP.push_back(Zero); // point at the outermost array
2045 
2046     // For each layer of array type we're pointing at:
2047     while (const ConstantArrayType *Arr
2048              = getContext().getAsConstantArrayType(DeleteTy)) {
2049       // 1. Unpeel the array type.
2050       DeleteTy = Arr->getElementType();
2051 
2052       // 2. GEP to the first element of the array.
2053       GEP.push_back(Zero);
2054     }
2055 
2056     Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
2057                   Ptr.getAlignment());
2058   }
2059 
2060   assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
2061 
2062   if (E->isArrayForm()) {
2063     EmitArrayDelete(*this, E, Ptr, DeleteTy);
2064   } else {
2065     EmitObjectDelete(*this, E, Ptr, DeleteTy);
2066   }
2067 
2068   EmitBlock(DeleteEnd);
2069 }
2070 
2071 static bool isGLValueFromPointerDeref(const Expr *E) {
2072   E = E->IgnoreParens();
2073 
2074   if (const auto *CE = dyn_cast<CastExpr>(E)) {
2075     if (!CE->getSubExpr()->isGLValue())
2076       return false;
2077     return isGLValueFromPointerDeref(CE->getSubExpr());
2078   }
2079 
2080   if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
2081     return isGLValueFromPointerDeref(OVE->getSourceExpr());
2082 
2083   if (const auto *BO = dyn_cast<BinaryOperator>(E))
2084     if (BO->getOpcode() == BO_Comma)
2085       return isGLValueFromPointerDeref(BO->getRHS());
2086 
2087   if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
2088     return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
2089            isGLValueFromPointerDeref(ACO->getFalseExpr());
2090 
2091   // C++11 [expr.sub]p1:
2092   //   The expression E1[E2] is identical (by definition) to *((E1)+(E2))
2093   if (isa<ArraySubscriptExpr>(E))
2094     return true;
2095 
2096   if (const auto *UO = dyn_cast<UnaryOperator>(E))
2097     if (UO->getOpcode() == UO_Deref)
2098       return true;
2099 
2100   return false;
2101 }
2102 
2103 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2104                                          llvm::Type *StdTypeInfoPtrTy) {
2105   // Get the vtable pointer.
2106   Address ThisPtr = CGF.EmitLValue(E).getAddress();
2107 
2108   QualType SrcRecordTy = E->getType();
2109 
2110   // C++ [class.cdtor]p4:
2111   //   If the operand of typeid refers to the object under construction or
2112   //   destruction and the static type of the operand is neither the constructor
2113   //   or destructor’s class nor one of its bases, the behavior is undefined.
2114   CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
2115                     ThisPtr.getPointer(), SrcRecordTy);
2116 
2117   // C++ [expr.typeid]p2:
2118   //   If the glvalue expression is obtained by applying the unary * operator to
2119   //   a pointer and the pointer is a null pointer value, the typeid expression
2120   //   throws the std::bad_typeid exception.
2121   //
2122   // However, this paragraph's intent is not clear.  We choose a very generous
2123   // interpretation which implores us to consider comma operators, conditional
2124   // operators, parentheses and other such constructs.
2125   if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
2126           isGLValueFromPointerDeref(E), SrcRecordTy)) {
2127     llvm::BasicBlock *BadTypeidBlock =
2128         CGF.createBasicBlock("typeid.bad_typeid");
2129     llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2130 
2131     llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
2132     CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2133 
2134     CGF.EmitBlock(BadTypeidBlock);
2135     CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2136     CGF.EmitBlock(EndBlock);
2137   }
2138 
2139   return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2140                                         StdTypeInfoPtrTy);
2141 }
2142 
2143 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2144   llvm::Type *StdTypeInfoPtrTy =
2145     ConvertType(E->getType())->getPointerTo();
2146 
2147   if (E->isTypeOperand()) {
2148     llvm::Constant *TypeInfo =
2149         CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2150     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
2151   }
2152 
2153   // C++ [expr.typeid]p2:
2154   //   When typeid is applied to a glvalue expression whose type is a
2155   //   polymorphic class type, the result refers to a std::type_info object
2156   //   representing the type of the most derived object (that is, the dynamic
2157   //   type) to which the glvalue refers.
2158   if (E->isPotentiallyEvaluated())
2159     return EmitTypeidFromVTable(*this, E->getExprOperand(),
2160                                 StdTypeInfoPtrTy);
2161 
2162   QualType OperandTy = E->getExprOperand()->getType();
2163   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
2164                                StdTypeInfoPtrTy);
2165 }
2166 
2167 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2168                                           QualType DestTy) {
2169   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2170   if (DestTy->isPointerType())
2171     return llvm::Constant::getNullValue(DestLTy);
2172 
2173   /// C++ [expr.dynamic.cast]p9:
2174   ///   A failed cast to reference type throws std::bad_cast
2175   if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2176     return nullptr;
2177 
2178   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
2179   return llvm::UndefValue::get(DestLTy);
2180 }
2181 
2182 llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2183                                               const CXXDynamicCastExpr *DCE) {
2184   CGM.EmitExplicitCastExprType(DCE, this);
2185   QualType DestTy = DCE->getTypeAsWritten();
2186 
2187   QualType SrcTy = DCE->getSubExpr()->getType();
2188 
2189   // C++ [expr.dynamic.cast]p7:
2190   //   If T is "pointer to cv void," then the result is a pointer to the most
2191   //   derived object pointed to by v.
2192   const PointerType *DestPTy = DestTy->getAs<PointerType>();
2193 
2194   bool isDynamicCastToVoid;
2195   QualType SrcRecordTy;
2196   QualType DestRecordTy;
2197   if (DestPTy) {
2198     isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
2199     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2200     DestRecordTy = DestPTy->getPointeeType();
2201   } else {
2202     isDynamicCastToVoid = false;
2203     SrcRecordTy = SrcTy;
2204     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2205   }
2206 
2207   // C++ [class.cdtor]p5:
2208   //   If the operand of the dynamic_cast refers to the object under
2209   //   construction or destruction and the static type of the operand is not a
2210   //   pointer to or object of the constructor or destructor’s own class or one
2211   //   of its bases, the dynamic_cast results in undefined behavior.
2212   EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
2213                 SrcRecordTy);
2214 
2215   if (DCE->isAlwaysNull())
2216     if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
2217       return T;
2218 
2219   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2220 
2221   // C++ [expr.dynamic.cast]p4:
2222   //   If the value of v is a null pointer value in the pointer case, the result
2223   //   is the null pointer value of type T.
2224   bool ShouldNullCheckSrcValue =
2225       CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
2226                                                          SrcRecordTy);
2227 
2228   llvm::BasicBlock *CastNull = nullptr;
2229   llvm::BasicBlock *CastNotNull = nullptr;
2230   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2231 
2232   if (ShouldNullCheckSrcValue) {
2233     CastNull = createBasicBlock("dynamic_cast.null");
2234     CastNotNull = createBasicBlock("dynamic_cast.notnull");
2235 
2236     llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
2237     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2238     EmitBlock(CastNotNull);
2239   }
2240 
2241   llvm::Value *Value;
2242   if (isDynamicCastToVoid) {
2243     Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
2244                                                   DestTy);
2245   } else {
2246     assert(DestRecordTy->isRecordType() &&
2247            "destination type must be a record type!");
2248     Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2249                                                 DestTy, DestRecordTy, CastEnd);
2250     CastNotNull = Builder.GetInsertBlock();
2251   }
2252 
2253   if (ShouldNullCheckSrcValue) {
2254     EmitBranch(CastEnd);
2255 
2256     EmitBlock(CastNull);
2257     EmitBranch(CastEnd);
2258   }
2259 
2260   EmitBlock(CastEnd);
2261 
2262   if (ShouldNullCheckSrcValue) {
2263     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2264     PHI->addIncoming(Value, CastNotNull);
2265     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
2266 
2267     Value = PHI;
2268   }
2269 
2270   return Value;
2271 }
2272