xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with code generation of C++ expressions
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCUDARuntime.h"
14 #include "CGCXXABI.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "ConstantEmitter.h"
19 #include "TargetInfo.h"
20 #include "clang/Basic/CodeGenOptions.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "llvm/IR/Intrinsics.h"
23 
24 using namespace clang;
25 using namespace CodeGen;
26 
27 namespace {
28 struct MemberCallInfo {
29   RequiredArgs ReqArgs;
30   // Number of prefix arguments for the call. Ignores the `this` pointer.
31   unsigned PrefixSize;
32 };
33 }
34 
35 static MemberCallInfo
commonEmitCXXMemberOrOperatorCall(CodeGenFunction & CGF,GlobalDecl GD,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE,CallArgList & Args,CallArgList * RtlArgs)36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD,
37                                   llvm::Value *This, llvm::Value *ImplicitParam,
38                                   QualType ImplicitParamTy, const CallExpr *CE,
39                                   CallArgList &Args, CallArgList *RtlArgs) {
40   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
41 
42   assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
43          isa<CXXOperatorCallExpr>(CE));
44   assert(MD->isImplicitObjectMemberFunction() &&
45          "Trying to emit a member or operator call expr on a static method!");
46 
47   // Push the this ptr.
48   const CXXRecordDecl *RD =
49       CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD);
50   Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
51 
52   // If there is an implicit parameter (e.g. VTT), emit it.
53   if (ImplicitParam) {
54     Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
55   }
56 
57   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
58   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
59   unsigned PrefixSize = Args.size() - 1;
60 
61   // And the rest of the call args.
62   if (RtlArgs) {
63     // Special case: if the caller emitted the arguments right-to-left already
64     // (prior to emitting the *this argument), we're done. This happens for
65     // assignment operators.
66     Args.addFrom(*RtlArgs);
67   } else if (CE) {
68     // Special case: skip first argument of CXXOperatorCall (it is "this").
69     unsigned ArgsToSkip = 0;
70     if (const auto *Op = dyn_cast<CXXOperatorCallExpr>(CE)) {
71       if (const auto *M = dyn_cast<CXXMethodDecl>(Op->getCalleeDecl()))
72         ArgsToSkip =
73             static_cast<unsigned>(!M->isExplicitObjectMemberFunction());
74     }
75     CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
76                      CE->getDirectCallee());
77   } else {
78     assert(
79         FPT->getNumParams() == 0 &&
80         "No CallExpr specified for function with non-zero number of arguments");
81   }
82   return {required, PrefixSize};
83 }
84 
EmitCXXMemberOrOperatorCall(const CXXMethodDecl * MD,const CGCallee & Callee,ReturnValueSlot ReturnValue,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE,CallArgList * RtlArgs,llvm::CallBase ** CallOrInvoke)85 RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
86     const CXXMethodDecl *MD, const CGCallee &Callee,
87     ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
88     QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs,
89     llvm::CallBase **CallOrInvoke) {
90   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
91   CallArgList Args;
92   MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
93       *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
94   auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
95       Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
96   return EmitCall(FnInfo, Callee, ReturnValue, Args, CallOrInvoke,
97                   CE && CE == MustTailCall,
98                   CE ? CE->getExprLoc() : SourceLocation());
99 }
100 
EmitCXXDestructorCall(GlobalDecl Dtor,const CGCallee & Callee,llvm::Value * This,QualType ThisTy,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE,llvm::CallBase ** CallOrInvoke)101 RValue CodeGenFunction::EmitCXXDestructorCall(
102     GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
103     llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
104     llvm::CallBase **CallOrInvoke) {
105   const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());
106 
107   assert(!ThisTy.isNull());
108   assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
109          "Pointer/Object mixup");
110 
111   LangAS SrcAS = ThisTy.getAddressSpace();
112   LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
113   if (SrcAS != DstAS) {
114     QualType DstTy = DtorDecl->getThisType();
115     llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);
116     This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, NewType);
117   }
118 
119   CallArgList Args;
120   commonEmitCXXMemberOrOperatorCall(*this, Dtor, This, ImplicitParam,
121                                     ImplicitParamTy, CE, Args, nullptr);
122   return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
123                   ReturnValueSlot(), Args, CallOrInvoke,
124                   CE && CE == MustTailCall,
125                   CE ? CE->getExprLoc() : SourceLocation{});
126 }
127 
EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr * E)128 RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
129                                             const CXXPseudoDestructorExpr *E) {
130   QualType DestroyedType = E->getDestroyedType();
131   if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
132     // Automatic Reference Counting:
133     //   If the pseudo-expression names a retainable object with weak or
134     //   strong lifetime, the object shall be released.
135     Expr *BaseExpr = E->getBase();
136     Address BaseValue = Address::invalid();
137     Qualifiers BaseQuals;
138 
139     // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
140     if (E->isArrow()) {
141       BaseValue = EmitPointerWithAlignment(BaseExpr);
142       const auto *PTy = BaseExpr->getType()->castAs<PointerType>();
143       BaseQuals = PTy->getPointeeType().getQualifiers();
144     } else {
145       LValue BaseLV = EmitLValue(BaseExpr);
146       BaseValue = BaseLV.getAddress();
147       QualType BaseTy = BaseExpr->getType();
148       BaseQuals = BaseTy.getQualifiers();
149     }
150 
151     switch (DestroyedType.getObjCLifetime()) {
152     case Qualifiers::OCL_None:
153     case Qualifiers::OCL_ExplicitNone:
154     case Qualifiers::OCL_Autoreleasing:
155       break;
156 
157     case Qualifiers::OCL_Strong:
158       EmitARCRelease(Builder.CreateLoad(BaseValue,
159                         DestroyedType.isVolatileQualified()),
160                      ARCPreciseLifetime);
161       break;
162 
163     case Qualifiers::OCL_Weak:
164       EmitARCDestroyWeak(BaseValue);
165       break;
166     }
167   } else {
168     // C++ [expr.pseudo]p1:
169     //   The result shall only be used as the operand for the function call
170     //   operator (), and the result of such a call has type void. The only
171     //   effect is the evaluation of the postfix-expression before the dot or
172     //   arrow.
173     EmitIgnoredExpr(E->getBase());
174   }
175 
176   return RValue::get(nullptr);
177 }
178 
getCXXRecord(const Expr * E)179 static CXXRecordDecl *getCXXRecord(const Expr *E) {
180   QualType T = E->getType();
181   if (const PointerType *PTy = T->getAs<PointerType>())
182     T = PTy->getPointeeType();
183   const RecordType *Ty = T->castAs<RecordType>();
184   return cast<CXXRecordDecl>(Ty->getDecl());
185 }
186 
187 // Note: This function also emit constructor calls to support a MSVC
188 // extensions allowing explicit constructor function call.
EmitCXXMemberCallExpr(const CXXMemberCallExpr * CE,ReturnValueSlot ReturnValue,llvm::CallBase ** CallOrInvoke)189 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
190                                               ReturnValueSlot ReturnValue,
191                                               llvm::CallBase **CallOrInvoke) {
192   const Expr *callee = CE->getCallee()->IgnoreParens();
193 
194   if (isa<BinaryOperator>(callee))
195     return EmitCXXMemberPointerCallExpr(CE, ReturnValue, CallOrInvoke);
196 
197   const MemberExpr *ME = cast<MemberExpr>(callee);
198   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
199 
200   if (MD->isStatic()) {
201     // The method is static, emit it as we would a regular call.
202     CGCallee callee =
203         CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
204     return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
205                     ReturnValue, /*Chain=*/nullptr, CallOrInvoke);
206   }
207 
208   bool HasQualifier = ME->hasQualifier();
209   NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
210   bool IsArrow = ME->isArrow();
211   const Expr *Base = ME->getBase();
212 
213   return EmitCXXMemberOrOperatorMemberCallExpr(CE, MD, ReturnValue,
214                                                HasQualifier, Qualifier, IsArrow,
215                                                Base, CallOrInvoke);
216 }
217 
EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr * CE,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue,bool HasQualifier,NestedNameSpecifier * Qualifier,bool IsArrow,const Expr * Base,llvm::CallBase ** CallOrInvoke)218 RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
219     const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
220     bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
221     const Expr *Base, llvm::CallBase **CallOrInvoke) {
222   assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
223 
224   // Compute the object pointer.
225   bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
226 
227   const CXXMethodDecl *DevirtualizedMethod = nullptr;
228   if (CanUseVirtualCall &&
229       MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {
230     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
231     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
232     assert(DevirtualizedMethod);
233     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
234     const Expr *Inner = Base->IgnoreParenBaseCasts();
235     if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
236         MD->getReturnType().getCanonicalType())
237       // If the return types are not the same, this might be a case where more
238       // code needs to run to compensate for it. For example, the derived
239       // method might return a type that inherits form from the return
240       // type of MD and has a prefix.
241       // For now we just avoid devirtualizing these covariant cases.
242       DevirtualizedMethod = nullptr;
243     else if (getCXXRecord(Inner) == DevirtualizedClass)
244       // If the class of the Inner expression is where the dynamic method
245       // is defined, build the this pointer from it.
246       Base = Inner;
247     else if (getCXXRecord(Base) != DevirtualizedClass) {
248       // If the method is defined in a class that is not the best dynamic
249       // one or the one of the full expression, we would have to build
250       // a derived-to-base cast to compute the correct this pointer, but
251       // we don't have support for that yet, so do a virtual call.
252       DevirtualizedMethod = nullptr;
253     }
254   }
255 
256   bool TrivialForCodegen =
257       MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion());
258   bool TrivialAssignment =
259       TrivialForCodegen &&
260       (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
261       !MD->getParent()->mayInsertExtraPadding();
262 
263   // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
264   // operator before the LHS.
265   CallArgList RtlArgStorage;
266   CallArgList *RtlArgs = nullptr;
267   LValue TrivialAssignmentRHS;
268   if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
269     if (OCE->isAssignmentOp()) {
270       if (TrivialAssignment) {
271         TrivialAssignmentRHS = EmitLValue(CE->getArg(1));
272       } else {
273         RtlArgs = &RtlArgStorage;
274         EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
275                      drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
276                      /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
277       }
278     }
279   }
280 
281   LValue This;
282   if (IsArrow) {
283     LValueBaseInfo BaseInfo;
284     TBAAAccessInfo TBAAInfo;
285     Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
286     This = MakeAddrLValue(ThisValue, Base->getType()->getPointeeType(),
287                           BaseInfo, TBAAInfo);
288   } else {
289     This = EmitLValue(Base);
290   }
291 
292   if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
293     // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
294     // constructing a new complete object of type Ctor.
295     assert(!RtlArgs);
296     assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
297     CallArgList Args;
298     commonEmitCXXMemberOrOperatorCall(
299         *this, {Ctor, Ctor_Complete}, This.getPointer(*this),
300         /*ImplicitParam=*/nullptr,
301         /*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
302 
303     EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
304                            /*Delegating=*/false, This.getAddress(), Args,
305                            AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
306                            /*NewPointerIsChecked=*/false, CallOrInvoke);
307     return RValue::get(nullptr);
308   }
309 
310   if (TrivialForCodegen) {
311     if (isa<CXXDestructorDecl>(MD))
312       return RValue::get(nullptr);
313 
314     if (TrivialAssignment) {
315       // We don't like to generate the trivial copy/move assignment operator
316       // when it isn't necessary; just produce the proper effect here.
317       // It's important that we use the result of EmitLValue here rather than
318       // emitting call arguments, in order to preserve TBAA information from
319       // the RHS.
320       LValue RHS = isa<CXXOperatorCallExpr>(CE)
321                        ? TrivialAssignmentRHS
322                        : EmitLValue(*CE->arg_begin());
323       EmitAggregateAssign(This, RHS, CE->getType());
324       return RValue::get(This.getPointer(*this));
325     }
326 
327     assert(MD->getParent()->mayInsertExtraPadding() &&
328            "unknown trivial member function");
329   }
330 
331   // Compute the function type we're calling.
332   const CXXMethodDecl *CalleeDecl =
333       DevirtualizedMethod ? DevirtualizedMethod : MD;
334   const CGFunctionInfo *FInfo = nullptr;
335   if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
336     FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
337         GlobalDecl(Dtor, Dtor_Complete));
338   else
339     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
340 
341   llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
342 
343   // C++11 [class.mfct.non-static]p2:
344   //   If a non-static member function of a class X is called for an object that
345   //   is not of type X, or of a type derived from X, the behavior is undefined.
346   SourceLocation CallLoc;
347   ASTContext &C = getContext();
348   if (CE)
349     CallLoc = CE->getExprLoc();
350 
351   SanitizerSet SkippedChecks;
352   if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
353     auto *IOA = CMCE->getImplicitObjectArgument();
354     bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
355     if (IsImplicitObjectCXXThis)
356       SkippedChecks.set(SanitizerKind::Alignment, true);
357     if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
358       SkippedChecks.set(SanitizerKind::Null, true);
359   }
360 
361   if (sanitizePerformTypeCheck())
362     EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
363                   This.emitRawPointer(*this),
364                   C.getRecordType(CalleeDecl->getParent()),
365                   /*Alignment=*/CharUnits::Zero(), SkippedChecks);
366 
367   // C++ [class.virtual]p12:
368   //   Explicit qualification with the scope operator (5.1) suppresses the
369   //   virtual call mechanism.
370   //
371   // We also don't emit a virtual call if the base expression has a record type
372   // because then we know what the type is.
373   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
374 
375   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {
376     assert(CE->arg_begin() == CE->arg_end() &&
377            "Destructor shouldn't have explicit parameters");
378     assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
379     if (UseVirtualCall) {
380       CGM.getCXXABI().EmitVirtualDestructorCall(
381           *this, Dtor, Dtor_Complete, This.getAddress(),
382           cast<CXXMemberCallExpr>(CE), CallOrInvoke);
383     } else {
384       GlobalDecl GD(Dtor, Dtor_Complete);
385       CGCallee Callee;
386       if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)
387         Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);
388       else if (!DevirtualizedMethod)
389         Callee =
390             CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);
391       else {
392         Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);
393       }
394 
395       QualType ThisTy =
396           IsArrow ? Base->getType()->getPointeeType() : Base->getType();
397       EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy,
398                             /*ImplicitParam=*/nullptr,
399                             /*ImplicitParamTy=*/QualType(), CE, CallOrInvoke);
400     }
401     return RValue::get(nullptr);
402   }
403 
404   // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
405   // 'CalleeDecl' instead.
406 
407   CGCallee Callee;
408   if (UseVirtualCall) {
409     Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
410   } else {
411     if (SanOpts.has(SanitizerKind::CFINVCall) &&
412         MD->getParent()->isDynamicClass()) {
413       llvm::Value *VTable;
414       const CXXRecordDecl *RD;
415       std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(
416           *this, This.getAddress(), CalleeDecl->getParent());
417       EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
418     }
419 
420     if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
421       Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
422     else if (!DevirtualizedMethod)
423       Callee =
424           CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
425     else {
426       Callee =
427           CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
428                               GlobalDecl(DevirtualizedMethod));
429     }
430   }
431 
432   if (MD->isVirtual()) {
433     Address NewThisAddr =
434         CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
435             *this, CalleeDecl, This.getAddress(), UseVirtualCall);
436     This.setAddress(NewThisAddr);
437   }
438 
439   return EmitCXXMemberOrOperatorCall(
440       CalleeDecl, Callee, ReturnValue, This.getPointer(*this),
441       /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs, CallOrInvoke);
442 }
443 
444 RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr * E,ReturnValueSlot ReturnValue,llvm::CallBase ** CallOrInvoke)445 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
446                                               ReturnValueSlot ReturnValue,
447                                               llvm::CallBase **CallOrInvoke) {
448   const BinaryOperator *BO =
449       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
450   const Expr *BaseExpr = BO->getLHS();
451   const Expr *MemFnExpr = BO->getRHS();
452 
453   const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();
454   const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
455   const auto *RD = MPT->getMostRecentCXXRecordDecl();
456 
457   // Emit the 'this' pointer.
458   Address This = Address::invalid();
459   if (BO->getOpcode() == BO_PtrMemI)
460     This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull);
461   else
462     This = EmitLValue(BaseExpr, KnownNonNull).getAddress();
463 
464   EmitTypeCheck(
465       TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
466       QualType(MPT->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0));
467 
468   // Get the member function pointer.
469   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
470 
471   // Ask the ABI to load the callee.  Note that This is modified.
472   llvm::Value *ThisPtrForCall = nullptr;
473   CGCallee Callee =
474     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
475                                              ThisPtrForCall, MemFnPtr, MPT);
476 
477   CallArgList Args;
478 
479   QualType ThisType =
480     getContext().getPointerType(getContext().getTagDeclType(RD));
481 
482   // Push the this ptr.
483   Args.add(RValue::get(ThisPtrForCall), ThisType);
484 
485   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
486 
487   // And the rest of the call args
488   EmitCallArgs(Args, FPT, E->arguments());
489   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
490                                                       /*PrefixSize=*/0),
491                   Callee, ReturnValue, Args, CallOrInvoke, E == MustTailCall,
492                   E->getExprLoc());
493 }
494 
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue,llvm::CallBase ** CallOrInvoke)495 RValue CodeGenFunction::EmitCXXOperatorMemberCallExpr(
496     const CXXOperatorCallExpr *E, const CXXMethodDecl *MD,
497     ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke) {
498   assert(MD->isImplicitObjectMemberFunction() &&
499          "Trying to emit a member call expr on a static method!");
500   return EmitCXXMemberOrOperatorMemberCallExpr(
501       E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
502       /*IsArrow=*/false, E->getArg(0), CallOrInvoke);
503 }
504 
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr * E,ReturnValueSlot ReturnValue,llvm::CallBase ** CallOrInvoke)505 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
506                                                ReturnValueSlot ReturnValue,
507                                                llvm::CallBase **CallOrInvoke) {
508   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue,
509                                                      CallOrInvoke);
510 }
511 
EmitNullBaseClassInitialization(CodeGenFunction & CGF,Address DestPtr,const CXXRecordDecl * Base)512 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
513                                             Address DestPtr,
514                                             const CXXRecordDecl *Base) {
515   if (Base->isEmpty())
516     return;
517 
518   DestPtr = DestPtr.withElementType(CGF.Int8Ty);
519 
520   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
521   CharUnits NVSize = Layout.getNonVirtualSize();
522 
523   // We cannot simply zero-initialize the entire base sub-object if vbptrs are
524   // present, they are initialized by the most derived class before calling the
525   // constructor.
526   SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
527   Stores.emplace_back(CharUnits::Zero(), NVSize);
528 
529   // Each store is split by the existence of a vbptr.
530   CharUnits VBPtrWidth = CGF.getPointerSize();
531   std::vector<CharUnits> VBPtrOffsets =
532       CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
533   for (CharUnits VBPtrOffset : VBPtrOffsets) {
534     // Stop before we hit any virtual base pointers located in virtual bases.
535     if (VBPtrOffset >= NVSize)
536       break;
537     std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
538     CharUnits LastStoreOffset = LastStore.first;
539     CharUnits LastStoreSize = LastStore.second;
540 
541     CharUnits SplitBeforeOffset = LastStoreOffset;
542     CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
543     assert(!SplitBeforeSize.isNegative() && "negative store size!");
544     if (!SplitBeforeSize.isZero())
545       Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
546 
547     CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
548     CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
549     assert(!SplitAfterSize.isNegative() && "negative store size!");
550     if (!SplitAfterSize.isZero())
551       Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
552   }
553 
554   // If the type contains a pointer to data member we can't memset it to zero.
555   // Instead, create a null constant and copy it to the destination.
556   // TODO: there are other patterns besides zero that we can usefully memset,
557   // like -1, which happens to be the pattern used by member-pointers.
558   // TODO: isZeroInitializable can be over-conservative in the case where a
559   // virtual base contains a member pointer.
560   llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
561   if (!NullConstantForBase->isNullValue()) {
562     llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
563         CGF.CGM.getModule(), NullConstantForBase->getType(),
564         /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
565         NullConstantForBase, Twine());
566 
567     CharUnits Align =
568         std::max(Layout.getNonVirtualAlignment(), DestPtr.getAlignment());
569     NullVariable->setAlignment(Align.getAsAlign());
570 
571     Address SrcPtr(NullVariable, CGF.Int8Ty, Align);
572 
573     // Get and call the appropriate llvm.memcpy overload.
574     for (std::pair<CharUnits, CharUnits> Store : Stores) {
575       CharUnits StoreOffset = Store.first;
576       CharUnits StoreSize = Store.second;
577       llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
578       CGF.Builder.CreateMemCpy(
579           CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
580           CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
581           StoreSizeVal);
582     }
583 
584   // Otherwise, just memset the whole thing to zero.  This is legal
585   // because in LLVM, all default initializers (other than the ones we just
586   // handled above) are guaranteed to have a bit pattern of all zeros.
587   } else {
588     for (std::pair<CharUnits, CharUnits> Store : Stores) {
589       CharUnits StoreOffset = Store.first;
590       CharUnits StoreSize = Store.second;
591       llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
592       CGF.Builder.CreateMemSet(
593           CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
594           CGF.Builder.getInt8(0), StoreSizeVal);
595     }
596   }
597 }
598 
599 void
EmitCXXConstructExpr(const CXXConstructExpr * E,AggValueSlot Dest)600 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
601                                       AggValueSlot Dest) {
602   assert(!Dest.isIgnored() && "Must have a destination!");
603   const CXXConstructorDecl *CD = E->getConstructor();
604 
605   // If we require zero initialization before (or instead of) calling the
606   // constructor, as can be the case with a non-user-provided default
607   // constructor, emit the zero initialization now, unless destination is
608   // already zeroed.
609   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
610     switch (E->getConstructionKind()) {
611     case CXXConstructionKind::Delegating:
612     case CXXConstructionKind::Complete:
613       EmitNullInitialization(Dest.getAddress(), E->getType());
614       break;
615     case CXXConstructionKind::VirtualBase:
616     case CXXConstructionKind::NonVirtualBase:
617       EmitNullBaseClassInitialization(*this, Dest.getAddress(),
618                                       CD->getParent());
619       break;
620     }
621   }
622 
623   // If this is a call to a trivial default constructor, do nothing.
624   if (CD->isTrivial() && CD->isDefaultConstructor())
625     return;
626 
627   // Elide the constructor if we're constructing from a temporary.
628   if (getLangOpts().ElideConstructors && E->isElidable()) {
629     // FIXME: This only handles the simplest case, where the source object
630     //        is passed directly as the first argument to the constructor.
631     //        This should also handle stepping though implicit casts and
632     //        conversion sequences which involve two steps, with a
633     //        conversion operator followed by a converting constructor.
634     const Expr *SrcObj = E->getArg(0);
635     assert(SrcObj->isTemporaryObject(getContext(), CD->getParent()));
636     assert(
637         getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType()));
638     EmitAggExpr(SrcObj, Dest);
639     return;
640   }
641 
642   if (const ArrayType *arrayType
643         = getContext().getAsArrayType(E->getType())) {
644     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
645                                Dest.isSanitizerChecked());
646   } else {
647     CXXCtorType Type = Ctor_Complete;
648     bool ForVirtualBase = false;
649     bool Delegating = false;
650 
651     switch (E->getConstructionKind()) {
652     case CXXConstructionKind::Delegating:
653       // We should be emitting a constructor; GlobalDecl will assert this
654       Type = CurGD.getCtorType();
655       Delegating = true;
656       break;
657 
658     case CXXConstructionKind::Complete:
659       Type = Ctor_Complete;
660       break;
661 
662     case CXXConstructionKind::VirtualBase:
663       ForVirtualBase = true;
664       [[fallthrough]];
665 
666     case CXXConstructionKind::NonVirtualBase:
667       Type = Ctor_Base;
668      }
669 
670      // Call the constructor.
671      EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
672   }
673 }
674 
EmitSynthesizedCXXCopyCtor(Address Dest,Address Src,const Expr * Exp)675 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
676                                                  const Expr *Exp) {
677   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
678     Exp = E->getSubExpr();
679   assert(isa<CXXConstructExpr>(Exp) &&
680          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
681   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
682   const CXXConstructorDecl *CD = E->getConstructor();
683   RunCleanupsScope Scope(*this);
684 
685   // If we require zero initialization before (or instead of) calling the
686   // constructor, as can be the case with a non-user-provided default
687   // constructor, emit the zero initialization now.
688   // FIXME. Do I still need this for a copy ctor synthesis?
689   if (E->requiresZeroInitialization())
690     EmitNullInitialization(Dest, E->getType());
691 
692   assert(!getContext().getAsConstantArrayType(E->getType())
693          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
694   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
695 }
696 
CalculateCookiePadding(CodeGenFunction & CGF,const CXXNewExpr * E)697 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
698                                         const CXXNewExpr *E) {
699   if (!E->isArray())
700     return CharUnits::Zero();
701 
702   // No cookie is required if the operator new[] being used is the
703   // reserved placement operator new[].
704   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
705     return CharUnits::Zero();
706 
707   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
708 }
709 
EmitCXXNewAllocSize(CodeGenFunction & CGF,const CXXNewExpr * e,unsigned minElements,llvm::Value * & numElements,llvm::Value * & sizeWithoutCookie)710 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
711                                         const CXXNewExpr *e,
712                                         unsigned minElements,
713                                         llvm::Value *&numElements,
714                                         llvm::Value *&sizeWithoutCookie) {
715   QualType type = e->getAllocatedType();
716 
717   if (!e->isArray()) {
718     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
719     sizeWithoutCookie
720       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
721     return sizeWithoutCookie;
722   }
723 
724   // The width of size_t.
725   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
726 
727   // Figure out the cookie size.
728   llvm::APInt cookieSize(sizeWidth,
729                          CalculateCookiePadding(CGF, e).getQuantity());
730 
731   // Emit the array size expression.
732   // We multiply the size of all dimensions for NumElements.
733   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
734   numElements = ConstantEmitter(CGF).tryEmitAbstract(
735       *e->getArraySize(), (*e->getArraySize())->getType());
736   if (!numElements)
737     numElements = CGF.EmitScalarExpr(*e->getArraySize());
738   assert(isa<llvm::IntegerType>(numElements->getType()));
739 
740   // The number of elements can be have an arbitrary integer type;
741   // essentially, we need to multiply it by a constant factor, add a
742   // cookie size, and verify that the result is representable as a
743   // size_t.  That's just a gloss, though, and it's wrong in one
744   // important way: if the count is negative, it's an error even if
745   // the cookie size would bring the total size >= 0.
746   bool isSigned
747     = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
748   llvm::IntegerType *numElementsType
749     = cast<llvm::IntegerType>(numElements->getType());
750   unsigned numElementsWidth = numElementsType->getBitWidth();
751 
752   // Compute the constant factor.
753   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
754   while (const ConstantArrayType *CAT
755              = CGF.getContext().getAsConstantArrayType(type)) {
756     type = CAT->getElementType();
757     arraySizeMultiplier *= CAT->getSize();
758   }
759 
760   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
761   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
762   typeSizeMultiplier *= arraySizeMultiplier;
763 
764   // This will be a size_t.
765   llvm::Value *size;
766 
767   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
768   // Don't bloat the -O0 code.
769   if (llvm::ConstantInt *numElementsC =
770         dyn_cast<llvm::ConstantInt>(numElements)) {
771     const llvm::APInt &count = numElementsC->getValue();
772 
773     bool hasAnyOverflow = false;
774 
775     // If 'count' was a negative number, it's an overflow.
776     if (isSigned && count.isNegative())
777       hasAnyOverflow = true;
778 
779     // We want to do all this arithmetic in size_t.  If numElements is
780     // wider than that, check whether it's already too big, and if so,
781     // overflow.
782     else if (numElementsWidth > sizeWidth &&
783              numElementsWidth - sizeWidth > count.countl_zero())
784       hasAnyOverflow = true;
785 
786     // Okay, compute a count at the right width.
787     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
788 
789     // If there is a brace-initializer, we cannot allocate fewer elements than
790     // there are initializers. If we do, that's treated like an overflow.
791     if (adjustedCount.ult(minElements))
792       hasAnyOverflow = true;
793 
794     // Scale numElements by that.  This might overflow, but we don't
795     // care because it only overflows if allocationSize does, too, and
796     // if that overflows then we shouldn't use this.
797     numElements = llvm::ConstantInt::get(CGF.SizeTy,
798                                          adjustedCount * arraySizeMultiplier);
799 
800     // Compute the size before cookie, and track whether it overflowed.
801     bool overflow;
802     llvm::APInt allocationSize
803       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
804     hasAnyOverflow |= overflow;
805 
806     // Add in the cookie, and check whether it's overflowed.
807     if (cookieSize != 0) {
808       // Save the current size without a cookie.  This shouldn't be
809       // used if there was overflow.
810       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
811 
812       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
813       hasAnyOverflow |= overflow;
814     }
815 
816     // On overflow, produce a -1 so operator new will fail.
817     if (hasAnyOverflow) {
818       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
819     } else {
820       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
821     }
822 
823   // Otherwise, we might need to use the overflow intrinsics.
824   } else {
825     // There are up to five conditions we need to test for:
826     // 1) if isSigned, we need to check whether numElements is negative;
827     // 2) if numElementsWidth > sizeWidth, we need to check whether
828     //   numElements is larger than something representable in size_t;
829     // 3) if minElements > 0, we need to check whether numElements is smaller
830     //    than that.
831     // 4) we need to compute
832     //      sizeWithoutCookie := numElements * typeSizeMultiplier
833     //    and check whether it overflows; and
834     // 5) if we need a cookie, we need to compute
835     //      size := sizeWithoutCookie + cookieSize
836     //    and check whether it overflows.
837 
838     llvm::Value *hasOverflow = nullptr;
839 
840     // If numElementsWidth > sizeWidth, then one way or another, we're
841     // going to have to do a comparison for (2), and this happens to
842     // take care of (1), too.
843     if (numElementsWidth > sizeWidth) {
844       llvm::APInt threshold =
845           llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth);
846 
847       llvm::Value *thresholdV
848         = llvm::ConstantInt::get(numElementsType, threshold);
849 
850       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
851       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
852 
853     // Otherwise, if we're signed, we want to sext up to size_t.
854     } else if (isSigned) {
855       if (numElementsWidth < sizeWidth)
856         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
857 
858       // If there's a non-1 type size multiplier, then we can do the
859       // signedness check at the same time as we do the multiply
860       // because a negative number times anything will cause an
861       // unsigned overflow.  Otherwise, we have to do it here. But at least
862       // in this case, we can subsume the >= minElements check.
863       if (typeSizeMultiplier == 1)
864         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
865                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
866 
867     // Otherwise, zext up to size_t if necessary.
868     } else if (numElementsWidth < sizeWidth) {
869       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
870     }
871 
872     assert(numElements->getType() == CGF.SizeTy);
873 
874     if (minElements) {
875       // Don't allow allocation of fewer elements than we have initializers.
876       if (!hasOverflow) {
877         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
878                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
879       } else if (numElementsWidth > sizeWidth) {
880         // The other existing overflow subsumes this check.
881         // We do an unsigned comparison, since any signed value < -1 is
882         // taken care of either above or below.
883         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
884                           CGF.Builder.CreateICmpULT(numElements,
885                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
886       }
887     }
888 
889     size = numElements;
890 
891     // Multiply by the type size if necessary.  This multiplier
892     // includes all the factors for nested arrays.
893     //
894     // This step also causes numElements to be scaled up by the
895     // nested-array factor if necessary.  Overflow on this computation
896     // can be ignored because the result shouldn't be used if
897     // allocation fails.
898     if (typeSizeMultiplier != 1) {
899       llvm::Function *umul_with_overflow
900         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
901 
902       llvm::Value *tsmV =
903         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
904       llvm::Value *result =
905           CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
906 
907       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
908       if (hasOverflow)
909         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
910       else
911         hasOverflow = overflowed;
912 
913       size = CGF.Builder.CreateExtractValue(result, 0);
914 
915       // Also scale up numElements by the array size multiplier.
916       if (arraySizeMultiplier != 1) {
917         // If the base element type size is 1, then we can re-use the
918         // multiply we just did.
919         if (typeSize.isOne()) {
920           assert(arraySizeMultiplier == typeSizeMultiplier);
921           numElements = size;
922 
923         // Otherwise we need a separate multiply.
924         } else {
925           llvm::Value *asmV =
926             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
927           numElements = CGF.Builder.CreateMul(numElements, asmV);
928         }
929       }
930     } else {
931       // numElements doesn't need to be scaled.
932       assert(arraySizeMultiplier == 1);
933     }
934 
935     // Add in the cookie size if necessary.
936     if (cookieSize != 0) {
937       sizeWithoutCookie = size;
938 
939       llvm::Function *uadd_with_overflow
940         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
941 
942       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
943       llvm::Value *result =
944           CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
945 
946       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
947       if (hasOverflow)
948         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
949       else
950         hasOverflow = overflowed;
951 
952       size = CGF.Builder.CreateExtractValue(result, 0);
953     }
954 
955     // If we had any possibility of dynamic overflow, make a select to
956     // overwrite 'size' with an all-ones value, which should cause
957     // operator new to throw.
958     if (hasOverflow)
959       size = CGF.Builder.CreateSelect(hasOverflow,
960                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
961                                       size);
962   }
963 
964   if (cookieSize == 0)
965     sizeWithoutCookie = size;
966   else
967     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
968 
969   return size;
970 }
971 
StoreAnyExprIntoOneUnit(CodeGenFunction & CGF,const Expr * Init,QualType AllocType,Address NewPtr,AggValueSlot::Overlap_t MayOverlap)972 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
973                                     QualType AllocType, Address NewPtr,
974                                     AggValueSlot::Overlap_t MayOverlap) {
975   // FIXME: Refactor with EmitExprAsInit.
976   switch (CGF.getEvaluationKind(AllocType)) {
977   case TEK_Scalar:
978     CGF.EmitScalarInit(Init, nullptr,
979                        CGF.MakeAddrLValue(NewPtr, AllocType), false);
980     return;
981   case TEK_Complex:
982     CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
983                                   /*isInit*/ true);
984     return;
985   case TEK_Aggregate: {
986     AggValueSlot Slot
987       = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
988                               AggValueSlot::IsDestructed,
989                               AggValueSlot::DoesNotNeedGCBarriers,
990                               AggValueSlot::IsNotAliased,
991                               MayOverlap, AggValueSlot::IsNotZeroed,
992                               AggValueSlot::IsSanitizerChecked);
993     CGF.EmitAggExpr(Init, Slot);
994     return;
995   }
996   }
997   llvm_unreachable("bad evaluation kind");
998 }
999 
EmitNewArrayInitializer(const CXXNewExpr * E,QualType ElementType,llvm::Type * ElementTy,Address BeginPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)1000 void CodeGenFunction::EmitNewArrayInitializer(
1001     const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
1002     Address BeginPtr, llvm::Value *NumElements,
1003     llvm::Value *AllocSizeWithoutCookie) {
1004   // If we have a type with trivial initialization and no initializer,
1005   // there's nothing to do.
1006   if (!E->hasInitializer())
1007     return;
1008 
1009   Address CurPtr = BeginPtr;
1010 
1011   unsigned InitListElements = 0;
1012 
1013   const Expr *Init = E->getInitializer();
1014   Address EndOfInit = Address::invalid();
1015   QualType::DestructionKind DtorKind = ElementType.isDestructedType();
1016   CleanupDeactivationScope deactivation(*this);
1017   bool pushedCleanup = false;
1018 
1019   CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
1020   CharUnits ElementAlign =
1021     BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
1022 
1023   // Attempt to perform zero-initialization using memset.
1024   auto TryMemsetInitialization = [&]() -> bool {
1025     // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1026     // we can initialize with a memset to -1.
1027     if (!CGM.getTypes().isZeroInitializable(ElementType))
1028       return false;
1029 
1030     // Optimization: since zero initialization will just set the memory
1031     // to all zeroes, generate a single memset to do it in one shot.
1032 
1033     // Subtract out the size of any elements we've already initialized.
1034     auto *RemainingSize = AllocSizeWithoutCookie;
1035     if (InitListElements) {
1036       // We know this can't overflow; we check this when doing the allocation.
1037       auto *InitializedSize = llvm::ConstantInt::get(
1038           RemainingSize->getType(),
1039           getContext().getTypeSizeInChars(ElementType).getQuantity() *
1040               InitListElements);
1041       RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
1042     }
1043 
1044     // Create the memset.
1045     Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
1046     return true;
1047   };
1048 
1049   const InitListExpr *ILE = dyn_cast<InitListExpr>(Init);
1050   const CXXParenListInitExpr *CPLIE = nullptr;
1051   const StringLiteral *SL = nullptr;
1052   const ObjCEncodeExpr *OCEE = nullptr;
1053   const Expr *IgnoreParen = nullptr;
1054   if (!ILE) {
1055     IgnoreParen = Init->IgnoreParenImpCasts();
1056     CPLIE = dyn_cast<CXXParenListInitExpr>(IgnoreParen);
1057     SL = dyn_cast<StringLiteral>(IgnoreParen);
1058     OCEE = dyn_cast<ObjCEncodeExpr>(IgnoreParen);
1059   }
1060 
1061   // If the initializer is an initializer list, first do the explicit elements.
1062   if (ILE || CPLIE || SL || OCEE) {
1063     // Initializing from a (braced) string literal is a special case; the init
1064     // list element does not initialize a (single) array element.
1065     if ((ILE && ILE->isStringLiteralInit()) || SL || OCEE) {
1066       if (!ILE)
1067         Init = IgnoreParen;
1068       // Initialize the initial portion of length equal to that of the string
1069       // literal. The allocation must be for at least this much; we emitted a
1070       // check for that earlier.
1071       AggValueSlot Slot =
1072           AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1073                                 AggValueSlot::IsDestructed,
1074                                 AggValueSlot::DoesNotNeedGCBarriers,
1075                                 AggValueSlot::IsNotAliased,
1076                                 AggValueSlot::DoesNotOverlap,
1077                                 AggValueSlot::IsNotZeroed,
1078                                 AggValueSlot::IsSanitizerChecked);
1079       EmitAggExpr(ILE ? ILE->getInit(0) : Init, Slot);
1080 
1081       // Move past these elements.
1082       InitListElements =
1083           cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
1084               ->getZExtSize();
1085       CurPtr = Builder.CreateConstInBoundsGEP(
1086           CurPtr, InitListElements, "string.init.end");
1087 
1088       // Zero out the rest, if any remain.
1089       llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1090       if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
1091         bool OK = TryMemsetInitialization();
1092         (void)OK;
1093         assert(OK && "couldn't memset character type?");
1094       }
1095       return;
1096     }
1097 
1098     ArrayRef<const Expr *> InitExprs =
1099         ILE ? ILE->inits() : CPLIE->getInitExprs();
1100     InitListElements = InitExprs.size();
1101 
1102     // If this is a multi-dimensional array new, we will initialize multiple
1103     // elements with each init list element.
1104     QualType AllocType = E->getAllocatedType();
1105     if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1106             AllocType->getAsArrayTypeUnsafe())) {
1107       ElementTy = ConvertTypeForMem(AllocType);
1108       CurPtr = CurPtr.withElementType(ElementTy);
1109       InitListElements *= getContext().getConstantArrayElementCount(CAT);
1110     }
1111 
1112     // Enter a partial-destruction Cleanup if necessary.
1113     if (DtorKind) {
1114       AllocaTrackerRAII AllocaTracker(*this);
1115       // In principle we could tell the Cleanup where we are more
1116       // directly, but the control flow can get so varied here that it
1117       // would actually be quite complex.  Therefore we go through an
1118       // alloca.
1119       llvm::Instruction *DominatingIP =
1120           Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy));
1121       EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1122                                    "array.init.end");
1123       pushIrregularPartialArrayCleanup(BeginPtr.emitRawPointer(*this),
1124                                        EndOfInit, ElementType, ElementAlign,
1125                                        getDestroyer(DtorKind));
1126       cast<EHCleanupScope>(*EHStack.find(EHStack.stable_begin()))
1127           .AddAuxAllocas(AllocaTracker.Take());
1128       DeferredDeactivationCleanupStack.push_back(
1129           {EHStack.stable_begin(), DominatingIP});
1130       pushedCleanup = true;
1131     }
1132 
1133     CharUnits StartAlign = CurPtr.getAlignment();
1134     unsigned i = 0;
1135     for (const Expr *IE : InitExprs) {
1136       // Tell the cleanup that it needs to destroy up to this
1137       // element.  TODO: some of these stores can be trivially
1138       // observed to be unnecessary.
1139       if (EndOfInit.isValid()) {
1140         Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
1141       }
1142       // FIXME: If the last initializer is an incomplete initializer list for
1143       // an array, and we have an array filler, we can fold together the two
1144       // initialization loops.
1145       StoreAnyExprIntoOneUnit(*this, IE, IE->getType(), CurPtr,
1146                               AggValueSlot::DoesNotOverlap);
1147       CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
1148                                                  CurPtr.emitRawPointer(*this),
1149                                                  Builder.getSize(1),
1150                                                  "array.exp.next"),
1151                        CurPtr.getElementType(),
1152                        StartAlign.alignmentAtOffset((++i) * ElementSize));
1153     }
1154 
1155     // The remaining elements are filled with the array filler expression.
1156     Init = ILE ? ILE->getArrayFiller() : CPLIE->getArrayFiller();
1157 
1158     // Extract the initializer for the individual array elements by pulling
1159     // out the array filler from all the nested initializer lists. This avoids
1160     // generating a nested loop for the initialization.
1161     while (Init && Init->getType()->isConstantArrayType()) {
1162       auto *SubILE = dyn_cast<InitListExpr>(Init);
1163       if (!SubILE)
1164         break;
1165       assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1166       Init = SubILE->getArrayFiller();
1167     }
1168 
1169     // Switch back to initializing one base element at a time.
1170     CurPtr = CurPtr.withElementType(BeginPtr.getElementType());
1171   }
1172 
1173   // If all elements have already been initialized, skip any further
1174   // initialization.
1175   llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1176   if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1177     return;
1178   }
1179 
1180   assert(Init && "have trailing elements to initialize but no initializer");
1181 
1182   // If this is a constructor call, try to optimize it out, and failing that
1183   // emit a single loop to initialize all remaining elements.
1184   if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1185     CXXConstructorDecl *Ctor = CCE->getConstructor();
1186     if (Ctor->isTrivial()) {
1187       // If new expression did not specify value-initialization, then there
1188       // is no initialization.
1189       if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1190         return;
1191 
1192       if (TryMemsetInitialization())
1193         return;
1194     }
1195 
1196     // Store the new Cleanup position for irregular Cleanups.
1197     //
1198     // FIXME: Share this cleanup with the constructor call emission rather than
1199     // having it create a cleanup of its own.
1200     if (EndOfInit.isValid())
1201       Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
1202 
1203     // Emit a constructor call loop to initialize the remaining elements.
1204     if (InitListElements)
1205       NumElements = Builder.CreateSub(
1206           NumElements,
1207           llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1208     EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1209                                /*NewPointerIsChecked*/true,
1210                                CCE->requiresZeroInitialization());
1211     return;
1212   }
1213 
1214   // If this is value-initialization, we can usually use memset.
1215   ImplicitValueInitExpr IVIE(ElementType);
1216   if (isa<ImplicitValueInitExpr>(Init)) {
1217     if (TryMemsetInitialization())
1218       return;
1219 
1220     // Switch to an ImplicitValueInitExpr for the element type. This handles
1221     // only one case: multidimensional array new of pointers to members. In
1222     // all other cases, we already have an initializer for the array element.
1223     Init = &IVIE;
1224   }
1225 
1226   // At this point we should have found an initializer for the individual
1227   // elements of the array.
1228   assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1229          "got wrong type of element to initialize");
1230 
1231   // If we have an empty initializer list, we can usually use memset.
1232   if (auto *ILE = dyn_cast<InitListExpr>(Init))
1233     if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1234       return;
1235 
1236   // If we have a struct whose every field is value-initialized, we can
1237   // usually use memset.
1238   if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1239     if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1240       if (RType->getDecl()->isStruct()) {
1241         unsigned NumElements = 0;
1242         if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1243           NumElements = CXXRD->getNumBases();
1244         for (auto *Field : RType->getDecl()->fields())
1245           if (!Field->isUnnamedBitField())
1246             ++NumElements;
1247         // FIXME: Recurse into nested InitListExprs.
1248         if (ILE->getNumInits() == NumElements)
1249           for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1250             if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1251               --NumElements;
1252         if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1253           return;
1254       }
1255     }
1256   }
1257 
1258   // Create the loop blocks.
1259   llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1260   llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1261   llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1262 
1263   // Find the end of the array, hoisted out of the loop.
1264   llvm::Value *EndPtr = Builder.CreateInBoundsGEP(
1265       BeginPtr.getElementType(), BeginPtr.emitRawPointer(*this), NumElements,
1266       "array.end");
1267 
1268   // If the number of elements isn't constant, we have to now check if there is
1269   // anything left to initialize.
1270   if (!ConstNum) {
1271     llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr.emitRawPointer(*this),
1272                                                 EndPtr, "array.isempty");
1273     Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1274   }
1275 
1276   // Enter the loop.
1277   EmitBlock(LoopBB);
1278 
1279   // Set up the current-element phi.
1280   llvm::PHINode *CurPtrPhi =
1281       Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1282   CurPtrPhi->addIncoming(CurPtr.emitRawPointer(*this), EntryBB);
1283 
1284   CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);
1285 
1286   // Store the new Cleanup position for irregular Cleanups.
1287   if (EndOfInit.isValid())
1288     Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);
1289 
1290   // Enter a partial-destruction Cleanup if necessary.
1291   if (!pushedCleanup && needsEHCleanup(DtorKind)) {
1292     llvm::Instruction *DominatingIP =
1293         Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy));
1294     pushRegularPartialArrayCleanup(BeginPtr.emitRawPointer(*this),
1295                                    CurPtr.emitRawPointer(*this), ElementType,
1296                                    ElementAlign, getDestroyer(DtorKind));
1297     DeferredDeactivationCleanupStack.push_back(
1298         {EHStack.stable_begin(), DominatingIP});
1299   }
1300 
1301   // Emit the initializer into this element.
1302   StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
1303                           AggValueSlot::DoesNotOverlap);
1304 
1305   // Leave the Cleanup if we entered one.
1306   deactivation.ForceDeactivate();
1307 
1308   // Advance to the next element by adjusting the pointer type as necessary.
1309   llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32(
1310       ElementTy, CurPtr.emitRawPointer(*this), 1, "array.next");
1311 
1312   // Check whether we've gotten to the end of the array and, if so,
1313   // exit the loop.
1314   llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1315   Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1316   CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1317 
1318   EmitBlock(ContBB);
1319 }
1320 
EmitNewInitializer(CodeGenFunction & CGF,const CXXNewExpr * E,QualType ElementType,llvm::Type * ElementTy,Address NewPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)1321 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1322                                QualType ElementType, llvm::Type *ElementTy,
1323                                Address NewPtr, llvm::Value *NumElements,
1324                                llvm::Value *AllocSizeWithoutCookie) {
1325   ApplyDebugLocation DL(CGF, E);
1326   if (E->isArray())
1327     CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1328                                 AllocSizeWithoutCookie);
1329   else if (const Expr *Init = E->getInitializer())
1330     StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
1331                             AggValueSlot::DoesNotOverlap);
1332 }
1333 
1334 /// Emit a call to an operator new or operator delete function, as implicitly
1335 /// created by new-expressions and delete-expressions.
EmitNewDeleteCall(CodeGenFunction & CGF,const FunctionDecl * CalleeDecl,const FunctionProtoType * CalleeType,const CallArgList & Args)1336 static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1337                                 const FunctionDecl *CalleeDecl,
1338                                 const FunctionProtoType *CalleeType,
1339                                 const CallArgList &Args) {
1340   llvm::CallBase *CallOrInvoke;
1341   llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1342   CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
1343   RValue RV =
1344       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1345                        Args, CalleeType, /*ChainCall=*/false),
1346                    Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1347 
1348   /// C++1y [expr.new]p10:
1349   ///   [In a new-expression,] an implementation is allowed to omit a call
1350   ///   to a replaceable global allocation function.
1351   ///
1352   /// We model such elidable calls with the 'builtin' attribute.
1353   llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1354   if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1355       Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1356     CallOrInvoke->addFnAttr(llvm::Attribute::Builtin);
1357   }
1358 
1359   return RV;
1360 }
1361 
EmitBuiltinNewDeleteCall(const FunctionProtoType * Type,const CallExpr * TheCall,bool IsDelete)1362 RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1363                                                  const CallExpr *TheCall,
1364                                                  bool IsDelete) {
1365   CallArgList Args;
1366   EmitCallArgs(Args, Type, TheCall->arguments());
1367   // Find the allocation or deallocation function that we're calling.
1368   ASTContext &Ctx = getContext();
1369   DeclarationName Name = Ctx.DeclarationNames
1370       .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1371 
1372   for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1373     if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1374       if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1375         return EmitNewDeleteCall(*this, FD, Type, Args);
1376   llvm_unreachable("predeclared global operator new/delete is missing");
1377 }
1378 
1379 namespace {
1380 /// The parameters to pass to a usual operator delete.
1381 struct UsualDeleteParams {
1382   TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No;
1383   bool DestroyingDelete = false;
1384   bool Size = false;
1385   AlignedAllocationMode Alignment = AlignedAllocationMode::No;
1386 };
1387 }
1388 
getUsualDeleteParams(const FunctionDecl * FD)1389 static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
1390   UsualDeleteParams Params;
1391 
1392   const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
1393   auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1394 
1395   if (FD->isTypeAwareOperatorNewOrDelete()) {
1396     Params.TypeAwareDelete = TypeAwareAllocationMode::Yes;
1397     assert(AI != AE);
1398     ++AI;
1399   }
1400 
1401   // The first argument after the type-identity parameter (if any) is
1402   // always a void* (or C* for a destroying operator delete for class
1403   // type C).
1404   ++AI;
1405 
1406   // The next parameter may be a std::destroying_delete_t.
1407   if (FD->isDestroyingOperatorDelete()) {
1408     assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
1409     Params.DestroyingDelete = true;
1410     assert(AI != AE);
1411     ++AI;
1412   }
1413 
1414   // Figure out what other parameters we should be implicitly passing.
1415   if (AI != AE && (*AI)->isIntegerType()) {
1416     Params.Size = true;
1417     ++AI;
1418   } else
1419     assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
1420 
1421   if (AI != AE && (*AI)->isAlignValT()) {
1422     Params.Alignment = AlignedAllocationMode::Yes;
1423     ++AI;
1424   } else
1425     assert(!isTypeAwareAllocation(Params.TypeAwareDelete));
1426 
1427   assert(AI == AE && "unexpected usual deallocation function parameter");
1428   return Params;
1429 }
1430 
1431 namespace {
1432   /// A cleanup to call the given 'operator delete' function upon abnormal
1433   /// exit from a new expression. Templated on a traits type that deals with
1434   /// ensuring that the arguments dominate the cleanup if necessary.
1435   template<typename Traits>
1436   class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1437     /// Type used to hold llvm::Value*s.
1438     typedef typename Traits::ValueTy ValueTy;
1439     /// Type used to hold RValues.
1440     typedef typename Traits::RValueTy RValueTy;
1441     struct PlacementArg {
1442       RValueTy ArgValue;
1443       QualType ArgType;
1444     };
1445 
1446     unsigned NumPlacementArgs : 30;
1447     LLVM_PREFERRED_TYPE(AlignedAllocationMode)
1448     unsigned PassAlignmentToPlacementDelete : 1;
1449     const FunctionDecl *OperatorDelete;
1450     RValueTy TypeIdentity;
1451     ValueTy Ptr;
1452     ValueTy AllocSize;
1453     CharUnits AllocAlign;
1454 
getPlacementArgs()1455     PlacementArg *getPlacementArgs() {
1456       return reinterpret_cast<PlacementArg *>(this + 1);
1457     }
1458 
1459   public:
getExtraSize(size_t NumPlacementArgs)1460     static size_t getExtraSize(size_t NumPlacementArgs) {
1461       return NumPlacementArgs * sizeof(PlacementArg);
1462     }
1463 
CallDeleteDuringNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,RValueTy TypeIdentity,ValueTy Ptr,ValueTy AllocSize,const ImplicitAllocationParameters & IAP,CharUnits AllocAlign)1464     CallDeleteDuringNew(size_t NumPlacementArgs,
1465                         const FunctionDecl *OperatorDelete,
1466                         RValueTy TypeIdentity, ValueTy Ptr, ValueTy AllocSize,
1467                         const ImplicitAllocationParameters &IAP,
1468                         CharUnits AllocAlign)
1469         : NumPlacementArgs(NumPlacementArgs),
1470           PassAlignmentToPlacementDelete(
1471               isAlignedAllocation(IAP.PassAlignment)),
1472           OperatorDelete(OperatorDelete), TypeIdentity(TypeIdentity), Ptr(Ptr),
1473           AllocSize(AllocSize), AllocAlign(AllocAlign) {}
1474 
setPlacementArg(unsigned I,RValueTy Arg,QualType Type)1475     void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1476       assert(I < NumPlacementArgs && "index out of range");
1477       getPlacementArgs()[I] = {Arg, Type};
1478     }
1479 
Emit(CodeGenFunction & CGF,Flags flags)1480     void Emit(CodeGenFunction &CGF, Flags flags) override {
1481       const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1482       CallArgList DeleteArgs;
1483       unsigned FirstNonTypeArg = 0;
1484       TypeAwareAllocationMode TypeAwareDeallocation =
1485           TypeAwareAllocationMode::No;
1486       if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) {
1487         TypeAwareDeallocation = TypeAwareAllocationMode::Yes;
1488         QualType SpecializedTypeIdentity = FPT->getParamType(0);
1489         ++FirstNonTypeArg;
1490         DeleteArgs.add(Traits::get(CGF, TypeIdentity), SpecializedTypeIdentity);
1491       }
1492       // The first argument after type-identity parameter (if any) is always
1493       // a void* (or C* for a destroying operator delete for class type C).
1494       DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(FirstNonTypeArg));
1495 
1496       // Figure out what other parameters we should be implicitly passing.
1497       UsualDeleteParams Params;
1498       if (NumPlacementArgs) {
1499         // A placement deallocation function is implicitly passed an alignment
1500         // if the placement allocation function was, but is never passed a size.
1501         Params.Alignment =
1502             alignedAllocationModeFromBool(PassAlignmentToPlacementDelete);
1503         Params.TypeAwareDelete = TypeAwareDeallocation;
1504         Params.Size = isTypeAwareAllocation(Params.TypeAwareDelete);
1505       } else {
1506         // For a non-placement new-expression, 'operator delete' can take a
1507         // size and/or an alignment if it has the right parameters.
1508         Params = getUsualDeleteParams(OperatorDelete);
1509       }
1510 
1511       assert(!Params.DestroyingDelete &&
1512              "should not call destroying delete in a new-expression");
1513 
1514       // The second argument can be a std::size_t (for non-placement delete).
1515       if (Params.Size)
1516         DeleteArgs.add(Traits::get(CGF, AllocSize),
1517                        CGF.getContext().getSizeType());
1518 
1519       // The next (second or third) argument can be a std::align_val_t, which
1520       // is an enum whose underlying type is std::size_t.
1521       // FIXME: Use the right type as the parameter type. Note that in a call
1522       // to operator delete(size_t, ...), we may not have it available.
1523       if (isAlignedAllocation(Params.Alignment))
1524         DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1525                            CGF.SizeTy, AllocAlign.getQuantity())),
1526                        CGF.getContext().getSizeType());
1527 
1528       // Pass the rest of the arguments, which must match exactly.
1529       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1530         auto Arg = getPlacementArgs()[I];
1531         DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1532       }
1533 
1534       // Call 'operator delete'.
1535       EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1536     }
1537   };
1538 }
1539 
1540 /// Enter a cleanup to call 'operator delete' if the initializer in a
1541 /// new-expression throws.
EnterNewDeleteCleanup(CodeGenFunction & CGF,const CXXNewExpr * E,RValue TypeIdentity,Address NewPtr,llvm::Value * AllocSize,CharUnits AllocAlign,const CallArgList & NewArgs)1542 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, const CXXNewExpr *E,
1543                                   RValue TypeIdentity, Address NewPtr,
1544                                   llvm::Value *AllocSize, CharUnits AllocAlign,
1545                                   const CallArgList &NewArgs) {
1546   unsigned NumNonPlacementArgs = E->getNumImplicitArgs();
1547 
1548   // If we're not inside a conditional branch, then the cleanup will
1549   // dominate and we can do the easier (and more efficient) thing.
1550   if (!CGF.isInConditionalBranch()) {
1551     struct DirectCleanupTraits {
1552       typedef llvm::Value *ValueTy;
1553       typedef RValue RValueTy;
1554       static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1555       static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1556     };
1557 
1558     typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1559 
1560     DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra<DirectCleanup>(
1561         EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(),
1562         TypeIdentity, NewPtr.emitRawPointer(CGF), AllocSize,
1563         E->implicitAllocationParameters(), AllocAlign);
1564     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1565       auto &Arg = NewArgs[I + NumNonPlacementArgs];
1566       Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
1567     }
1568 
1569     return;
1570   }
1571 
1572   // Otherwise, we need to save all this stuff.
1573   DominatingValue<RValue>::saved_type SavedNewPtr =
1574       DominatingValue<RValue>::save(CGF, RValue::get(NewPtr, CGF));
1575   DominatingValue<RValue>::saved_type SavedAllocSize =
1576     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1577   DominatingValue<RValue>::saved_type SavedTypeIdentity =
1578       DominatingValue<RValue>::save(CGF, TypeIdentity);
1579   struct ConditionalCleanupTraits {
1580     typedef DominatingValue<RValue>::saved_type ValueTy;
1581     typedef DominatingValue<RValue>::saved_type RValueTy;
1582     static RValue get(CodeGenFunction &CGF, ValueTy V) {
1583       return V.restore(CGF);
1584     }
1585   };
1586   typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1587 
1588   ConditionalCleanup *Cleanup =
1589       CGF.EHStack.pushCleanupWithExtra<ConditionalCleanup>(
1590           EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(),
1591           SavedTypeIdentity, SavedNewPtr, SavedAllocSize,
1592           E->implicitAllocationParameters(), AllocAlign);
1593   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1594     auto &Arg = NewArgs[I + NumNonPlacementArgs];
1595     Cleanup->setPlacementArg(
1596         I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
1597   }
1598 
1599   CGF.initFullExprCleanup();
1600 }
1601 
EmitCXXNewExpr(const CXXNewExpr * E)1602 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1603   // The element type being allocated.
1604   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1605 
1606   // 1. Build a call to the allocation function.
1607   FunctionDecl *allocator = E->getOperatorNew();
1608 
1609   // If there is a brace-initializer or C++20 parenthesized initializer, cannot
1610   // allocate fewer elements than inits.
1611   unsigned minElements = 0;
1612   unsigned IndexOfAlignArg = 1;
1613   if (E->isArray() && E->hasInitializer()) {
1614     const Expr *Init = E->getInitializer();
1615     const InitListExpr *ILE = dyn_cast<InitListExpr>(Init);
1616     const CXXParenListInitExpr *CPLIE = dyn_cast<CXXParenListInitExpr>(Init);
1617     const Expr *IgnoreParen = Init->IgnoreParenImpCasts();
1618     if ((ILE && ILE->isStringLiteralInit()) ||
1619         isa<StringLiteral>(IgnoreParen) || isa<ObjCEncodeExpr>(IgnoreParen)) {
1620       minElements =
1621           cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
1622               ->getZExtSize();
1623     } else if (ILE || CPLIE) {
1624       minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size();
1625     }
1626   }
1627 
1628   llvm::Value *numElements = nullptr;
1629   llvm::Value *allocSizeWithoutCookie = nullptr;
1630   llvm::Value *allocSize =
1631     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1632                         allocSizeWithoutCookie);
1633   CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1634 
1635   // Emit the allocation call.  If the allocator is a global placement
1636   // operator, just "inline" it directly.
1637   Address allocation = Address::invalid();
1638   CallArgList allocatorArgs;
1639   RValue TypeIdentityArg;
1640   if (allocator->isReservedGlobalPlacementOperator()) {
1641     assert(E->getNumPlacementArgs() == 1);
1642     const Expr *arg = *E->placement_arguments().begin();
1643 
1644     LValueBaseInfo BaseInfo;
1645     allocation = EmitPointerWithAlignment(arg, &BaseInfo);
1646 
1647     // The pointer expression will, in many cases, be an opaque void*.
1648     // In these cases, discard the computed alignment and use the
1649     // formal alignment of the allocated type.
1650     if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1651       allocation.setAlignment(allocAlign);
1652 
1653     // Set up allocatorArgs for the call to operator delete if it's not
1654     // the reserved global operator.
1655     if (E->getOperatorDelete() &&
1656         !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1657       allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1658       allocatorArgs.add(RValue::get(allocation, *this), arg->getType());
1659     }
1660 
1661   } else {
1662     const FunctionProtoType *allocatorType =
1663       allocator->getType()->castAs<FunctionProtoType>();
1664     ImplicitAllocationParameters IAP = E->implicitAllocationParameters();
1665     unsigned ParamsToSkip = 0;
1666     if (isTypeAwareAllocation(IAP.PassTypeIdentity)) {
1667       QualType SpecializedTypeIdentity = allocatorType->getParamType(0);
1668       CXXScalarValueInitExpr TypeIdentityParam(SpecializedTypeIdentity, nullptr,
1669                                                SourceLocation());
1670       TypeIdentityArg = EmitAnyExprToTemp(&TypeIdentityParam);
1671       allocatorArgs.add(TypeIdentityArg, SpecializedTypeIdentity);
1672       ++ParamsToSkip;
1673       ++IndexOfAlignArg;
1674     }
1675     // The allocation size is the first argument.
1676     QualType sizeType = getContext().getSizeType();
1677     allocatorArgs.add(RValue::get(allocSize), sizeType);
1678     ++ParamsToSkip;
1679 
1680     if (allocSize != allocSizeWithoutCookie) {
1681       CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1682       allocAlign = std::max(allocAlign, cookieAlign);
1683     }
1684 
1685     // The allocation alignment may be passed as the second argument.
1686     if (isAlignedAllocation(IAP.PassAlignment)) {
1687       QualType AlignValT = sizeType;
1688       if (allocatorType->getNumParams() > IndexOfAlignArg) {
1689         AlignValT = allocatorType->getParamType(IndexOfAlignArg);
1690         assert(getContext().hasSameUnqualifiedType(
1691                    AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1692                    sizeType) &&
1693                "wrong type for alignment parameter");
1694         ++ParamsToSkip;
1695       } else {
1696         // Corner case, passing alignment to 'operator new(size_t, ...)'.
1697         assert(allocator->isVariadic() && "can't pass alignment to allocator");
1698       }
1699       allocatorArgs.add(
1700           RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1701           AlignValT);
1702     }
1703 
1704     // FIXME: Why do we not pass a CalleeDecl here?
1705     EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1706                  /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1707 
1708     RValue RV =
1709       EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1710 
1711     // Set !heapallocsite metadata on the call to operator new.
1712     if (getDebugInfo())
1713       if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal()))
1714         getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType,
1715                                                  E->getExprLoc());
1716 
1717     // If this was a call to a global replaceable allocation function that does
1718     // not take an alignment argument, the allocator is known to produce
1719     // storage that's suitably aligned for any object that fits, up to a known
1720     // threshold. Otherwise assume it's suitably aligned for the allocated type.
1721     CharUnits allocationAlign = allocAlign;
1722     if (!E->passAlignment() &&
1723         allocator->isReplaceableGlobalAllocationFunction()) {
1724       unsigned AllocatorAlign = llvm::bit_floor(std::min<uint64_t>(
1725           Target.getNewAlign(), getContext().getTypeSize(allocType)));
1726       allocationAlign = std::max(
1727           allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1728     }
1729 
1730     allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign);
1731   }
1732 
1733   // Emit a null check on the allocation result if the allocation
1734   // function is allowed to return null (because it has a non-throwing
1735   // exception spec or is the reserved placement new) and we have an
1736   // interesting initializer will be running sanitizers on the initialization.
1737   bool nullCheck = E->shouldNullCheckAllocation() &&
1738                    (!allocType.isPODType(getContext()) || E->hasInitializer() ||
1739                     sanitizePerformTypeCheck());
1740 
1741   llvm::BasicBlock *nullCheckBB = nullptr;
1742   llvm::BasicBlock *contBB = nullptr;
1743 
1744   // The null-check means that the initializer is conditionally
1745   // evaluated.
1746   ConditionalEvaluation conditional(*this);
1747 
1748   if (nullCheck) {
1749     conditional.begin(*this);
1750 
1751     nullCheckBB = Builder.GetInsertBlock();
1752     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1753     contBB = createBasicBlock("new.cont");
1754 
1755     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1756     Builder.CreateCondBr(isNull, contBB, notNullBB);
1757     EmitBlock(notNullBB);
1758   }
1759 
1760   // If there's an operator delete, enter a cleanup to call it if an
1761   // exception is thrown.
1762   EHScopeStack::stable_iterator operatorDeleteCleanup;
1763   llvm::Instruction *cleanupDominator = nullptr;
1764   if (E->getOperatorDelete() &&
1765       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1766     EnterNewDeleteCleanup(*this, E, TypeIdentityArg, allocation, allocSize,
1767                           allocAlign, allocatorArgs);
1768     operatorDeleteCleanup = EHStack.stable_begin();
1769     cleanupDominator = Builder.CreateUnreachable();
1770   }
1771 
1772   assert((allocSize == allocSizeWithoutCookie) ==
1773          CalculateCookiePadding(*this, E).isZero());
1774   if (allocSize != allocSizeWithoutCookie) {
1775     assert(E->isArray());
1776     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1777                                                        numElements,
1778                                                        E, allocType);
1779   }
1780 
1781   llvm::Type *elementTy = ConvertTypeForMem(allocType);
1782   Address result = allocation.withElementType(elementTy);
1783 
1784   // Passing pointer through launder.invariant.group to avoid propagation of
1785   // vptrs information which may be included in previous type.
1786   // To not break LTO with different optimizations levels, we do it regardless
1787   // of optimization level.
1788   if (CGM.getCodeGenOpts().StrictVTablePointers &&
1789       allocator->isReservedGlobalPlacementOperator())
1790     result = Builder.CreateLaunderInvariantGroup(result);
1791 
1792   // Emit sanitizer checks for pointer value now, so that in the case of an
1793   // array it was checked only once and not at each constructor call. We may
1794   // have already checked that the pointer is non-null.
1795   // FIXME: If we have an array cookie and a potentially-throwing allocator,
1796   // we'll null check the wrong pointer here.
1797   SanitizerSet SkippedChecks;
1798   SkippedChecks.set(SanitizerKind::Null, nullCheck);
1799   EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
1800                 E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1801                 result, allocType, result.getAlignment(), SkippedChecks,
1802                 numElements);
1803 
1804   EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1805                      allocSizeWithoutCookie);
1806   llvm::Value *resultPtr = result.emitRawPointer(*this);
1807 
1808   // Deactivate the 'operator delete' cleanup if we finished
1809   // initialization.
1810   if (operatorDeleteCleanup.isValid()) {
1811     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1812     cleanupDominator->eraseFromParent();
1813   }
1814 
1815   if (nullCheck) {
1816     conditional.end(*this);
1817 
1818     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1819     EmitBlock(contBB);
1820 
1821     llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1822     PHI->addIncoming(resultPtr, notNullBB);
1823     PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1824                      nullCheckBB);
1825 
1826     resultPtr = PHI;
1827   }
1828 
1829   return resultPtr;
1830 }
1831 
EmitDeleteCall(const FunctionDecl * DeleteFD,llvm::Value * DeletePtr,QualType DeleteTy,llvm::Value * NumElements,CharUnits CookieSize)1832 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1833                                      llvm::Value *DeletePtr, QualType DeleteTy,
1834                                      llvm::Value *NumElements,
1835                                      CharUnits CookieSize) {
1836   assert((!NumElements && CookieSize.isZero()) ||
1837          DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1838 
1839   const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();
1840   CallArgList DeleteArgs;
1841 
1842   auto Params = getUsualDeleteParams(DeleteFD);
1843   auto ParamTypeIt = DeleteFTy->param_type_begin();
1844 
1845   std::optional<llvm::AllocaInst *> TagAlloca;
1846   auto EmitTag = [&](QualType TagType, const char *TagName) {
1847     assert(!TagAlloca);
1848     llvm::Type *Ty = getTypes().ConvertType(TagType);
1849     CharUnits Align = CGM.getNaturalTypeAlignment(TagType);
1850     llvm::AllocaInst *TagAllocation = CreateTempAlloca(Ty, TagName);
1851     TagAllocation->setAlignment(Align.getAsAlign());
1852     DeleteArgs.add(RValue::getAggregate(Address(TagAllocation, Ty, Align)),
1853                    TagType);
1854     TagAlloca = TagAllocation;
1855   };
1856 
1857   // Pass std::type_identity tag if present
1858   if (isTypeAwareAllocation(Params.TypeAwareDelete))
1859     EmitTag(*ParamTypeIt++, "typeaware.delete.tag");
1860 
1861   // Pass the pointer itself.
1862   QualType ArgTy = *ParamTypeIt++;
1863   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1864 
1865   // Pass the std::destroying_delete tag if present.
1866   if (Params.DestroyingDelete)
1867     EmitTag(*ParamTypeIt++, "destroying.delete.tag");
1868 
1869   // Pass the size if the delete function has a size_t parameter.
1870   if (Params.Size) {
1871     QualType SizeType = *ParamTypeIt++;
1872     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1873     llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1874                                                DeleteTypeSize.getQuantity());
1875 
1876     // For array new, multiply by the number of elements.
1877     if (NumElements)
1878       Size = Builder.CreateMul(Size, NumElements);
1879 
1880     // If there is a cookie, add the cookie size.
1881     if (!CookieSize.isZero())
1882       Size = Builder.CreateAdd(
1883           Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1884 
1885     DeleteArgs.add(RValue::get(Size), SizeType);
1886   }
1887 
1888   // Pass the alignment if the delete function has an align_val_t parameter.
1889   if (isAlignedAllocation(Params.Alignment)) {
1890     QualType AlignValType = *ParamTypeIt++;
1891     CharUnits DeleteTypeAlign =
1892         getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(
1893             DeleteTy, true /* NeedsPreferredAlignment */));
1894     llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1895                                                 DeleteTypeAlign.getQuantity());
1896     DeleteArgs.add(RValue::get(Align), AlignValType);
1897   }
1898 
1899   assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1900          "unknown parameter to usual delete function");
1901 
1902   // Emit the call to delete.
1903   EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1904 
1905   // If call argument lowering didn't use a generated tag argument alloca we
1906   // remove them
1907   if (TagAlloca && (*TagAlloca)->use_empty())
1908     (*TagAlloca)->eraseFromParent();
1909 }
1910 namespace {
1911   /// Calls the given 'operator delete' on a single object.
1912   struct CallObjectDelete final : EHScopeStack::Cleanup {
1913     llvm::Value *Ptr;
1914     const FunctionDecl *OperatorDelete;
1915     QualType ElementType;
1916 
CallObjectDelete__anondca4c77a0611::CallObjectDelete1917     CallObjectDelete(llvm::Value *Ptr,
1918                      const FunctionDecl *OperatorDelete,
1919                      QualType ElementType)
1920       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1921 
Emit__anondca4c77a0611::CallObjectDelete1922     void Emit(CodeGenFunction &CGF, Flags flags) override {
1923       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1924     }
1925   };
1926 }
1927 
1928 void
pushCallObjectDeleteCleanup(const FunctionDecl * OperatorDelete,llvm::Value * CompletePtr,QualType ElementType)1929 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1930                                              llvm::Value *CompletePtr,
1931                                              QualType ElementType) {
1932   EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1933                                         OperatorDelete, ElementType);
1934 }
1935 
1936 /// Emit the code for deleting a single object with a destroying operator
1937 /// delete. If the element type has a non-virtual destructor, Ptr has already
1938 /// been converted to the type of the parameter of 'operator delete'. Otherwise
1939 /// Ptr points to an object of the static type.
EmitDestroyingObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType)1940 static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1941                                        const CXXDeleteExpr *DE, Address Ptr,
1942                                        QualType ElementType) {
1943   auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1944   if (Dtor && Dtor->isVirtual())
1945     CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1946                                                 Dtor);
1947   else
1948     CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.emitRawPointer(CGF),
1949                        ElementType);
1950 }
1951 
1952 /// Emit the code for deleting a single object.
1953 /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1954 /// if not.
EmitObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType,llvm::BasicBlock * UnconditionalDeleteBlock)1955 static bool EmitObjectDelete(CodeGenFunction &CGF,
1956                              const CXXDeleteExpr *DE,
1957                              Address Ptr,
1958                              QualType ElementType,
1959                              llvm::BasicBlock *UnconditionalDeleteBlock) {
1960   // C++11 [expr.delete]p3:
1961   //   If the static type of the object to be deleted is different from its
1962   //   dynamic type, the static type shall be a base class of the dynamic type
1963   //   of the object to be deleted and the static type shall have a virtual
1964   //   destructor or the behavior is undefined.
1965   CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, DE->getExprLoc(), Ptr,
1966                     ElementType);
1967 
1968   const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1969   assert(!OperatorDelete->isDestroyingOperatorDelete());
1970 
1971   // Find the destructor for the type, if applicable.  If the
1972   // destructor is virtual, we'll just emit the vcall and return.
1973   const CXXDestructorDecl *Dtor = nullptr;
1974   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1975     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1976     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1977       Dtor = RD->getDestructor();
1978 
1979       if (Dtor->isVirtual()) {
1980         bool UseVirtualCall = true;
1981         const Expr *Base = DE->getArgument();
1982         if (auto *DevirtualizedDtor =
1983                 dyn_cast_or_null<const CXXDestructorDecl>(
1984                     Dtor->getDevirtualizedMethod(
1985                         Base, CGF.CGM.getLangOpts().AppleKext))) {
1986           UseVirtualCall = false;
1987           const CXXRecordDecl *DevirtualizedClass =
1988               DevirtualizedDtor->getParent();
1989           if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) {
1990             // Devirtualized to the class of the base type (the type of the
1991             // whole expression).
1992             Dtor = DevirtualizedDtor;
1993           } else {
1994             // Devirtualized to some other type. Would need to cast the this
1995             // pointer to that type but we don't have support for that yet, so
1996             // do a virtual call. FIXME: handle the case where it is
1997             // devirtualized to the derived type (the type of the inner
1998             // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1999             UseVirtualCall = true;
2000           }
2001         }
2002         if (UseVirtualCall) {
2003           CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
2004                                                       Dtor);
2005           return false;
2006         }
2007       }
2008     }
2009   }
2010 
2011   // Make sure that we call delete even if the dtor throws.
2012   // This doesn't have to a conditional cleanup because we're going
2013   // to pop it off in a second.
2014   CGF.EHStack.pushCleanup<CallObjectDelete>(
2015       NormalAndEHCleanup, Ptr.emitRawPointer(CGF), OperatorDelete, ElementType);
2016 
2017   if (Dtor)
2018     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
2019                               /*ForVirtualBase=*/false,
2020                               /*Delegating=*/false,
2021                               Ptr, ElementType);
2022   else if (auto Lifetime = ElementType.getObjCLifetime()) {
2023     switch (Lifetime) {
2024     case Qualifiers::OCL_None:
2025     case Qualifiers::OCL_ExplicitNone:
2026     case Qualifiers::OCL_Autoreleasing:
2027       break;
2028 
2029     case Qualifiers::OCL_Strong:
2030       CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
2031       break;
2032 
2033     case Qualifiers::OCL_Weak:
2034       CGF.EmitARCDestroyWeak(Ptr);
2035       break;
2036     }
2037   }
2038 
2039   // When optimizing for size, call 'operator delete' unconditionally.
2040   if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
2041     CGF.EmitBlock(UnconditionalDeleteBlock);
2042     CGF.PopCleanupBlock();
2043     return true;
2044   }
2045 
2046   CGF.PopCleanupBlock();
2047   return false;
2048 }
2049 
2050 namespace {
2051   /// Calls the given 'operator delete' on an array of objects.
2052   struct CallArrayDelete final : EHScopeStack::Cleanup {
2053     llvm::Value *Ptr;
2054     const FunctionDecl *OperatorDelete;
2055     llvm::Value *NumElements;
2056     QualType ElementType;
2057     CharUnits CookieSize;
2058 
CallArrayDelete__anondca4c77a0711::CallArrayDelete2059     CallArrayDelete(llvm::Value *Ptr,
2060                     const FunctionDecl *OperatorDelete,
2061                     llvm::Value *NumElements,
2062                     QualType ElementType,
2063                     CharUnits CookieSize)
2064       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
2065         ElementType(ElementType), CookieSize(CookieSize) {}
2066 
Emit__anondca4c77a0711::CallArrayDelete2067     void Emit(CodeGenFunction &CGF, Flags flags) override {
2068       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
2069                          CookieSize);
2070     }
2071   };
2072 }
2073 
2074 /// Emit the code for deleting an array of objects.
EmitArrayDelete(CodeGenFunction & CGF,const CXXDeleteExpr * E,Address deletedPtr,QualType elementType)2075 static void EmitArrayDelete(CodeGenFunction &CGF,
2076                             const CXXDeleteExpr *E,
2077                             Address deletedPtr,
2078                             QualType elementType) {
2079   llvm::Value *numElements = nullptr;
2080   llvm::Value *allocatedPtr = nullptr;
2081   CharUnits cookieSize;
2082   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
2083                                       numElements, allocatedPtr, cookieSize);
2084 
2085   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
2086 
2087   // Make sure that we call delete even if one of the dtors throws.
2088   const FunctionDecl *operatorDelete = E->getOperatorDelete();
2089   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
2090                                            allocatedPtr, operatorDelete,
2091                                            numElements, elementType,
2092                                            cookieSize);
2093 
2094   // Destroy the elements.
2095   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
2096     assert(numElements && "no element count for a type with a destructor!");
2097 
2098     CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
2099     CharUnits elementAlign =
2100       deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
2101 
2102     llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF);
2103     llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
2104       deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");
2105 
2106     // Note that it is legal to allocate a zero-length array, and we
2107     // can never fold the check away because the length should always
2108     // come from a cookie.
2109     CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
2110                          CGF.getDestroyer(dtorKind),
2111                          /*checkZeroLength*/ true,
2112                          CGF.needsEHCleanup(dtorKind));
2113   }
2114 
2115   // Pop the cleanup block.
2116   CGF.PopCleanupBlock();
2117 }
2118 
EmitCXXDeleteExpr(const CXXDeleteExpr * E)2119 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
2120   const Expr *Arg = E->getArgument();
2121   Address Ptr = EmitPointerWithAlignment(Arg);
2122 
2123   // Null check the pointer.
2124   //
2125   // We could avoid this null check if we can determine that the object
2126   // destruction is trivial and doesn't require an array cookie; we can
2127   // unconditionally perform the operator delete call in that case. For now, we
2128   // assume that deleted pointers are null rarely enough that it's better to
2129   // keep the branch. This might be worth revisiting for a -O0 code size win.
2130   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
2131   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
2132 
2133   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
2134 
2135   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
2136   EmitBlock(DeleteNotNull);
2137   Ptr.setKnownNonNull();
2138 
2139   QualType DeleteTy = E->getDestroyedType();
2140 
2141   // A destroying operator delete overrides the entire operation of the
2142   // delete expression.
2143   if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2144     EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);
2145     EmitBlock(DeleteEnd);
2146     return;
2147   }
2148 
2149   // We might be deleting a pointer to array.  If so, GEP down to the
2150   // first non-array element.
2151   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2152   if (DeleteTy->isConstantArrayType()) {
2153     llvm::Value *Zero = Builder.getInt32(0);
2154     SmallVector<llvm::Value*,8> GEP;
2155 
2156     GEP.push_back(Zero); // point at the outermost array
2157 
2158     // For each layer of array type we're pointing at:
2159     while (const ConstantArrayType *Arr
2160              = getContext().getAsConstantArrayType(DeleteTy)) {
2161       // 1. Unpeel the array type.
2162       DeleteTy = Arr->getElementType();
2163 
2164       // 2. GEP to the first element of the array.
2165       GEP.push_back(Zero);
2166     }
2167 
2168     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, ConvertTypeForMem(DeleteTy),
2169                                     Ptr.getAlignment(), "del.first");
2170   }
2171 
2172   assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
2173 
2174   if (E->isArrayForm()) {
2175     EmitArrayDelete(*this, E, Ptr, DeleteTy);
2176     EmitBlock(DeleteEnd);
2177   } else {
2178     if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd))
2179       EmitBlock(DeleteEnd);
2180   }
2181 }
2182 
EmitTypeidFromVTable(CodeGenFunction & CGF,const Expr * E,llvm::Type * StdTypeInfoPtrTy,bool HasNullCheck)2183 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2184                                          llvm::Type *StdTypeInfoPtrTy,
2185                                          bool HasNullCheck) {
2186   // Get the vtable pointer.
2187   Address ThisPtr = CGF.EmitLValue(E).getAddress();
2188 
2189   QualType SrcRecordTy = E->getType();
2190 
2191   // C++ [class.cdtor]p4:
2192   //   If the operand of typeid refers to the object under construction or
2193   //   destruction and the static type of the operand is neither the constructor
2194   //   or destructor’s class nor one of its bases, the behavior is undefined.
2195   CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
2196                     ThisPtr, SrcRecordTy);
2197 
2198   // Whether we need an explicit null pointer check. For example, with the
2199   // Microsoft ABI, if this is a call to __RTtypeid, the null pointer check and
2200   // exception throw is inside the __RTtypeid(nullptr) call
2201   if (HasNullCheck &&
2202       CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(SrcRecordTy)) {
2203     llvm::BasicBlock *BadTypeidBlock =
2204         CGF.createBasicBlock("typeid.bad_typeid");
2205     llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2206 
2207     llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
2208     CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2209 
2210     CGF.EmitBlock(BadTypeidBlock);
2211     CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2212     CGF.EmitBlock(EndBlock);
2213   }
2214 
2215   return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2216                                         StdTypeInfoPtrTy);
2217 }
2218 
EmitCXXTypeidExpr(const CXXTypeidExpr * E)2219 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2220   // Ideally, we would like to use GlobalsInt8PtrTy here, however, we cannot,
2221   // primarily because the result of applying typeid is a value of type
2222   // type_info, which is declared & defined by the standard library
2223   // implementation and expects to operate on the generic (default) AS.
2224   // https://reviews.llvm.org/D157452 has more context, and a possible solution.
2225   llvm::Type *PtrTy = Int8PtrTy;
2226   LangAS GlobAS = CGM.GetGlobalVarAddressSpace(nullptr);
2227 
2228   auto MaybeASCast = [=](auto &&TypeInfo) {
2229     if (GlobAS == LangAS::Default)
2230       return TypeInfo;
2231     return getTargetHooks().performAddrSpaceCast(CGM, TypeInfo, GlobAS, PtrTy);
2232   };
2233 
2234   if (E->isTypeOperand()) {
2235     llvm::Constant *TypeInfo =
2236         CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2237     return MaybeASCast(TypeInfo);
2238   }
2239 
2240   // C++ [expr.typeid]p2:
2241   //   When typeid is applied to a glvalue expression whose type is a
2242   //   polymorphic class type, the result refers to a std::type_info object
2243   //   representing the type of the most derived object (that is, the dynamic
2244   //   type) to which the glvalue refers.
2245   // If the operand is already most derived object, no need to look up vtable.
2246   if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext()))
2247     return EmitTypeidFromVTable(*this, E->getExprOperand(), PtrTy,
2248                                 E->hasNullCheck());
2249 
2250   QualType OperandTy = E->getExprOperand()->getType();
2251   return MaybeASCast(CGM.GetAddrOfRTTIDescriptor(OperandTy));
2252 }
2253 
EmitDynamicCastToNull(CodeGenFunction & CGF,QualType DestTy)2254 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2255                                           QualType DestTy) {
2256   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2257   if (DestTy->isPointerType())
2258     return llvm::Constant::getNullValue(DestLTy);
2259 
2260   /// C++ [expr.dynamic.cast]p9:
2261   ///   A failed cast to reference type throws std::bad_cast
2262   if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2263     return nullptr;
2264 
2265   CGF.Builder.ClearInsertionPoint();
2266   return llvm::PoisonValue::get(DestLTy);
2267 }
2268 
EmitDynamicCast(Address ThisAddr,const CXXDynamicCastExpr * DCE)2269 llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2270                                               const CXXDynamicCastExpr *DCE) {
2271   CGM.EmitExplicitCastExprType(DCE, this);
2272   QualType DestTy = DCE->getTypeAsWritten();
2273 
2274   QualType SrcTy = DCE->getSubExpr()->getType();
2275 
2276   // C++ [expr.dynamic.cast]p7:
2277   //   If T is "pointer to cv void," then the result is a pointer to the most
2278   //   derived object pointed to by v.
2279   bool IsDynamicCastToVoid = DestTy->isVoidPointerType();
2280   QualType SrcRecordTy;
2281   QualType DestRecordTy;
2282   if (IsDynamicCastToVoid) {
2283     SrcRecordTy = SrcTy->getPointeeType();
2284     // No DestRecordTy.
2285   } else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
2286     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2287     DestRecordTy = DestPTy->getPointeeType();
2288   } else {
2289     SrcRecordTy = SrcTy;
2290     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2291   }
2292 
2293   // C++ [class.cdtor]p5:
2294   //   If the operand of the dynamic_cast refers to the object under
2295   //   construction or destruction and the static type of the operand is not a
2296   //   pointer to or object of the constructor or destructor’s own class or one
2297   //   of its bases, the dynamic_cast results in undefined behavior.
2298   EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr, SrcRecordTy);
2299 
2300   if (DCE->isAlwaysNull()) {
2301     if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) {
2302       // Expression emission is expected to retain a valid insertion point.
2303       if (!Builder.GetInsertBlock())
2304         EmitBlock(createBasicBlock("dynamic_cast.unreachable"));
2305       return T;
2306     }
2307   }
2308 
2309   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2310 
2311   // If the destination is effectively final, the cast succeeds if and only
2312   // if the dynamic type of the pointer is exactly the destination type.
2313   bool IsExact = !IsDynamicCastToVoid &&
2314                  CGM.getCodeGenOpts().OptimizationLevel > 0 &&
2315                  DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
2316                  CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy) &&
2317                  !getLangOpts().PointerAuthCalls;
2318 
2319   // C++ [expr.dynamic.cast]p4:
2320   //   If the value of v is a null pointer value in the pointer case, the result
2321   //   is the null pointer value of type T.
2322   bool ShouldNullCheckSrcValue =
2323       IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(
2324                      SrcTy->isPointerType(), SrcRecordTy);
2325 
2326   llvm::BasicBlock *CastNull = nullptr;
2327   llvm::BasicBlock *CastNotNull = nullptr;
2328   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2329 
2330   if (ShouldNullCheckSrcValue) {
2331     CastNull = createBasicBlock("dynamic_cast.null");
2332     CastNotNull = createBasicBlock("dynamic_cast.notnull");
2333 
2334     llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr);
2335     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2336     EmitBlock(CastNotNull);
2337   }
2338 
2339   llvm::Value *Value;
2340   if (IsDynamicCastToVoid) {
2341     Value = CGM.getCXXABI().emitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy);
2342   } else if (IsExact) {
2343     // If the destination type is effectively final, this pointer points to the
2344     // right type if and only if its vptr has the right value.
2345     Value = CGM.getCXXABI().emitExactDynamicCast(
2346         *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastEnd, CastNull);
2347   } else {
2348     assert(DestRecordTy->isRecordType() &&
2349            "destination type must be a record type!");
2350     Value = CGM.getCXXABI().emitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2351                                                 DestTy, DestRecordTy, CastEnd);
2352   }
2353   CastNotNull = Builder.GetInsertBlock();
2354 
2355   llvm::Value *NullValue = nullptr;
2356   if (ShouldNullCheckSrcValue) {
2357     EmitBranch(CastEnd);
2358 
2359     EmitBlock(CastNull);
2360     NullValue = EmitDynamicCastToNull(*this, DestTy);
2361     CastNull = Builder.GetInsertBlock();
2362 
2363     EmitBranch(CastEnd);
2364   }
2365 
2366   EmitBlock(CastEnd);
2367 
2368   if (CastNull) {
2369     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2370     PHI->addIncoming(Value, CastNotNull);
2371     PHI->addIncoming(NullValue, CastNull);
2372 
2373     Value = PHI;
2374   }
2375 
2376   return Value;
2377 }
2378