1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code dealing with code generation of C++ expressions 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCUDARuntime.h" 14 #include "CGCXXABI.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CodeGenFunction.h" 18 #include "ConstantEmitter.h" 19 #include "TargetInfo.h" 20 #include "clang/Basic/CodeGenOptions.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "llvm/IR/Intrinsics.h" 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 namespace { 28 struct MemberCallInfo { 29 RequiredArgs ReqArgs; 30 // Number of prefix arguments for the call. Ignores the `this` pointer. 31 unsigned PrefixSize; 32 }; 33 } 34 35 static MemberCallInfo 36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD, 37 llvm::Value *This, llvm::Value *ImplicitParam, 38 QualType ImplicitParamTy, const CallExpr *CE, 39 CallArgList &Args, CallArgList *RtlArgs) { 40 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 41 42 assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) || 43 isa<CXXOperatorCallExpr>(CE)); 44 assert(MD->isInstance() && 45 "Trying to emit a member or operator call expr on a static method!"); 46 47 // Push the this ptr. 48 const CXXRecordDecl *RD = 49 CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD); 50 Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD)); 51 52 // If there is an implicit parameter (e.g. VTT), emit it. 53 if (ImplicitParam) { 54 Args.add(RValue::get(ImplicitParam), ImplicitParamTy); 55 } 56 57 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 58 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size()); 59 unsigned PrefixSize = Args.size() - 1; 60 61 // And the rest of the call args. 62 if (RtlArgs) { 63 // Special case: if the caller emitted the arguments right-to-left already 64 // (prior to emitting the *this argument), we're done. This happens for 65 // assignment operators. 66 Args.addFrom(*RtlArgs); 67 } else if (CE) { 68 // Special case: skip first argument of CXXOperatorCall (it is "this"). 69 unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0; 70 CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), 71 CE->getDirectCallee()); 72 } else { 73 assert( 74 FPT->getNumParams() == 0 && 75 "No CallExpr specified for function with non-zero number of arguments"); 76 } 77 return {required, PrefixSize}; 78 } 79 80 RValue CodeGenFunction::EmitCXXMemberOrOperatorCall( 81 const CXXMethodDecl *MD, const CGCallee &Callee, 82 ReturnValueSlot ReturnValue, 83 llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, 84 const CallExpr *CE, CallArgList *RtlArgs) { 85 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 86 CallArgList Args; 87 MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall( 88 *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs); 89 auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall( 90 Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); 91 return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr, 92 CE && CE == MustTailCall, 93 CE ? CE->getExprLoc() : SourceLocation()); 94 } 95 96 RValue CodeGenFunction::EmitCXXDestructorCall( 97 GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy, 98 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) { 99 const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl()); 100 101 assert(!ThisTy.isNull()); 102 assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() && 103 "Pointer/Object mixup"); 104 105 LangAS SrcAS = ThisTy.getAddressSpace(); 106 LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace(); 107 if (SrcAS != DstAS) { 108 QualType DstTy = DtorDecl->getThisType(); 109 llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy); 110 This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS, 111 NewType); 112 } 113 114 CallArgList Args; 115 commonEmitCXXMemberOrOperatorCall(*this, Dtor, This, ImplicitParam, 116 ImplicitParamTy, CE, Args, nullptr); 117 return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, 118 ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, 119 CE ? CE->getExprLoc() : SourceLocation{}); 120 } 121 122 RValue CodeGenFunction::EmitCXXPseudoDestructorExpr( 123 const CXXPseudoDestructorExpr *E) { 124 QualType DestroyedType = E->getDestroyedType(); 125 if (DestroyedType.hasStrongOrWeakObjCLifetime()) { 126 // Automatic Reference Counting: 127 // If the pseudo-expression names a retainable object with weak or 128 // strong lifetime, the object shall be released. 129 Expr *BaseExpr = E->getBase(); 130 Address BaseValue = Address::invalid(); 131 Qualifiers BaseQuals; 132 133 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 134 if (E->isArrow()) { 135 BaseValue = EmitPointerWithAlignment(BaseExpr); 136 const auto *PTy = BaseExpr->getType()->castAs<PointerType>(); 137 BaseQuals = PTy->getPointeeType().getQualifiers(); 138 } else { 139 LValue BaseLV = EmitLValue(BaseExpr); 140 BaseValue = BaseLV.getAddress(*this); 141 QualType BaseTy = BaseExpr->getType(); 142 BaseQuals = BaseTy.getQualifiers(); 143 } 144 145 switch (DestroyedType.getObjCLifetime()) { 146 case Qualifiers::OCL_None: 147 case Qualifiers::OCL_ExplicitNone: 148 case Qualifiers::OCL_Autoreleasing: 149 break; 150 151 case Qualifiers::OCL_Strong: 152 EmitARCRelease(Builder.CreateLoad(BaseValue, 153 DestroyedType.isVolatileQualified()), 154 ARCPreciseLifetime); 155 break; 156 157 case Qualifiers::OCL_Weak: 158 EmitARCDestroyWeak(BaseValue); 159 break; 160 } 161 } else { 162 // C++ [expr.pseudo]p1: 163 // The result shall only be used as the operand for the function call 164 // operator (), and the result of such a call has type void. The only 165 // effect is the evaluation of the postfix-expression before the dot or 166 // arrow. 167 EmitIgnoredExpr(E->getBase()); 168 } 169 170 return RValue::get(nullptr); 171 } 172 173 static CXXRecordDecl *getCXXRecord(const Expr *E) { 174 QualType T = E->getType(); 175 if (const PointerType *PTy = T->getAs<PointerType>()) 176 T = PTy->getPointeeType(); 177 const RecordType *Ty = T->castAs<RecordType>(); 178 return cast<CXXRecordDecl>(Ty->getDecl()); 179 } 180 181 // Note: This function also emit constructor calls to support a MSVC 182 // extensions allowing explicit constructor function call. 183 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 184 ReturnValueSlot ReturnValue) { 185 const Expr *callee = CE->getCallee()->IgnoreParens(); 186 187 if (isa<BinaryOperator>(callee)) 188 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 189 190 const MemberExpr *ME = cast<MemberExpr>(callee); 191 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 192 193 if (MD->isStatic()) { 194 // The method is static, emit it as we would a regular call. 195 CGCallee callee = 196 CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD)); 197 return EmitCall(getContext().getPointerType(MD->getType()), callee, CE, 198 ReturnValue); 199 } 200 201 bool HasQualifier = ME->hasQualifier(); 202 NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr; 203 bool IsArrow = ME->isArrow(); 204 const Expr *Base = ME->getBase(); 205 206 return EmitCXXMemberOrOperatorMemberCallExpr( 207 CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); 208 } 209 210 RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( 211 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, 212 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, 213 const Expr *Base) { 214 assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE)); 215 216 // Compute the object pointer. 217 bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; 218 219 const CXXMethodDecl *DevirtualizedMethod = nullptr; 220 if (CanUseVirtualCall && 221 MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { 222 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); 223 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl); 224 assert(DevirtualizedMethod); 225 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); 226 const Expr *Inner = Base->IgnoreParenBaseCasts(); 227 if (DevirtualizedMethod->getReturnType().getCanonicalType() != 228 MD->getReturnType().getCanonicalType()) 229 // If the return types are not the same, this might be a case where more 230 // code needs to run to compensate for it. For example, the derived 231 // method might return a type that inherits form from the return 232 // type of MD and has a prefix. 233 // For now we just avoid devirtualizing these covariant cases. 234 DevirtualizedMethod = nullptr; 235 else if (getCXXRecord(Inner) == DevirtualizedClass) 236 // If the class of the Inner expression is where the dynamic method 237 // is defined, build the this pointer from it. 238 Base = Inner; 239 else if (getCXXRecord(Base) != DevirtualizedClass) { 240 // If the method is defined in a class that is not the best dynamic 241 // one or the one of the full expression, we would have to build 242 // a derived-to-base cast to compute the correct this pointer, but 243 // we don't have support for that yet, so do a virtual call. 244 DevirtualizedMethod = nullptr; 245 } 246 } 247 248 bool TrivialForCodegen = 249 MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion()); 250 bool TrivialAssignment = 251 TrivialForCodegen && 252 (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 253 !MD->getParent()->mayInsertExtraPadding(); 254 255 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment 256 // operator before the LHS. 257 CallArgList RtlArgStorage; 258 CallArgList *RtlArgs = nullptr; 259 LValue TrivialAssignmentRHS; 260 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) { 261 if (OCE->isAssignmentOp()) { 262 if (TrivialAssignment) { 263 TrivialAssignmentRHS = EmitLValue(CE->getArg(1)); 264 } else { 265 RtlArgs = &RtlArgStorage; 266 EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(), 267 drop_begin(CE->arguments(), 1), CE->getDirectCallee(), 268 /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft); 269 } 270 } 271 } 272 273 LValue This; 274 if (IsArrow) { 275 LValueBaseInfo BaseInfo; 276 TBAAAccessInfo TBAAInfo; 277 Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); 278 This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo); 279 } else { 280 This = EmitLValue(Base); 281 } 282 283 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) { 284 // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's 285 // constructing a new complete object of type Ctor. 286 assert(!RtlArgs); 287 assert(ReturnValue.isNull() && "Constructor shouldn't have return value"); 288 CallArgList Args; 289 commonEmitCXXMemberOrOperatorCall( 290 *this, {Ctor, Ctor_Complete}, This.getPointer(*this), 291 /*ImplicitParam=*/nullptr, 292 /*ImplicitParamTy=*/QualType(), CE, Args, nullptr); 293 294 EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 295 /*Delegating=*/false, This.getAddress(*this), Args, 296 AggValueSlot::DoesNotOverlap, CE->getExprLoc(), 297 /*NewPointerIsChecked=*/false); 298 return RValue::get(nullptr); 299 } 300 301 if (TrivialForCodegen) { 302 if (isa<CXXDestructorDecl>(MD)) 303 return RValue::get(nullptr); 304 305 if (TrivialAssignment) { 306 // We don't like to generate the trivial copy/move assignment operator 307 // when it isn't necessary; just produce the proper effect here. 308 // It's important that we use the result of EmitLValue here rather than 309 // emitting call arguments, in order to preserve TBAA information from 310 // the RHS. 311 LValue RHS = isa<CXXOperatorCallExpr>(CE) 312 ? TrivialAssignmentRHS 313 : EmitLValue(*CE->arg_begin()); 314 EmitAggregateAssign(This, RHS, CE->getType()); 315 return RValue::get(This.getPointer(*this)); 316 } 317 318 assert(MD->getParent()->mayInsertExtraPadding() && 319 "unknown trivial member function"); 320 } 321 322 // Compute the function type we're calling. 323 const CXXMethodDecl *CalleeDecl = 324 DevirtualizedMethod ? DevirtualizedMethod : MD; 325 const CGFunctionInfo *FInfo = nullptr; 326 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) 327 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( 328 GlobalDecl(Dtor, Dtor_Complete)); 329 else 330 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); 331 332 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo); 333 334 // C++11 [class.mfct.non-static]p2: 335 // If a non-static member function of a class X is called for an object that 336 // is not of type X, or of a type derived from X, the behavior is undefined. 337 SourceLocation CallLoc; 338 ASTContext &C = getContext(); 339 if (CE) 340 CallLoc = CE->getExprLoc(); 341 342 SanitizerSet SkippedChecks; 343 if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) { 344 auto *IOA = CMCE->getImplicitObjectArgument(); 345 bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA); 346 if (IsImplicitObjectCXXThis) 347 SkippedChecks.set(SanitizerKind::Alignment, true); 348 if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA)) 349 SkippedChecks.set(SanitizerKind::Null, true); 350 } 351 EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, 352 This.getPointer(*this), 353 C.getRecordType(CalleeDecl->getParent()), 354 /*Alignment=*/CharUnits::Zero(), SkippedChecks); 355 356 // C++ [class.virtual]p12: 357 // Explicit qualification with the scope operator (5.1) suppresses the 358 // virtual call mechanism. 359 // 360 // We also don't emit a virtual call if the base expression has a record type 361 // because then we know what the type is. 362 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; 363 364 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) { 365 assert(CE->arg_begin() == CE->arg_end() && 366 "Destructor shouldn't have explicit parameters"); 367 assert(ReturnValue.isNull() && "Destructor shouldn't have return value"); 368 if (UseVirtualCall) { 369 CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete, 370 This.getAddress(*this), 371 cast<CXXMemberCallExpr>(CE)); 372 } else { 373 GlobalDecl GD(Dtor, Dtor_Complete); 374 CGCallee Callee; 375 if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier) 376 Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty); 377 else if (!DevirtualizedMethod) 378 Callee = 379 CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD); 380 else { 381 Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD); 382 } 383 384 QualType ThisTy = 385 IsArrow ? Base->getType()->getPointeeType() : Base->getType(); 386 EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy, 387 /*ImplicitParam=*/nullptr, 388 /*ImplicitParamTy=*/QualType(), CE); 389 } 390 return RValue::get(nullptr); 391 } 392 393 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use 394 // 'CalleeDecl' instead. 395 396 CGCallee Callee; 397 if (UseVirtualCall) { 398 Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty); 399 } else { 400 if (SanOpts.has(SanitizerKind::CFINVCall) && 401 MD->getParent()->isDynamicClass()) { 402 llvm::Value *VTable; 403 const CXXRecordDecl *RD; 404 std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr( 405 *this, This.getAddress(*this), CalleeDecl->getParent()); 406 EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc()); 407 } 408 409 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier) 410 Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty); 411 else if (!DevirtualizedMethod) 412 Callee = 413 CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD)); 414 else { 415 Callee = 416 CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty), 417 GlobalDecl(DevirtualizedMethod)); 418 } 419 } 420 421 if (MD->isVirtual()) { 422 Address NewThisAddr = 423 CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall( 424 *this, CalleeDecl, This.getAddress(*this), UseVirtualCall); 425 This.setAddress(NewThisAddr); 426 } 427 428 return EmitCXXMemberOrOperatorCall( 429 CalleeDecl, Callee, ReturnValue, This.getPointer(*this), 430 /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); 431 } 432 433 RValue 434 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 435 ReturnValueSlot ReturnValue) { 436 const BinaryOperator *BO = 437 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 438 const Expr *BaseExpr = BO->getLHS(); 439 const Expr *MemFnExpr = BO->getRHS(); 440 441 const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>(); 442 const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>(); 443 const auto *RD = 444 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl()); 445 446 // Emit the 'this' pointer. 447 Address This = Address::invalid(); 448 if (BO->getOpcode() == BO_PtrMemI) 449 This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull); 450 else 451 This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this); 452 453 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(), 454 QualType(MPT->getClass(), 0)); 455 456 // Get the member function pointer. 457 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 458 459 // Ask the ABI to load the callee. Note that This is modified. 460 llvm::Value *ThisPtrForCall = nullptr; 461 CGCallee Callee = 462 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, 463 ThisPtrForCall, MemFnPtr, MPT); 464 465 CallArgList Args; 466 467 QualType ThisType = 468 getContext().getPointerType(getContext().getTagDeclType(RD)); 469 470 // Push the this ptr. 471 Args.add(RValue::get(ThisPtrForCall), ThisType); 472 473 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); 474 475 // And the rest of the call args 476 EmitCallArgs(Args, FPT, E->arguments()); 477 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required, 478 /*PrefixSize=*/0), 479 Callee, ReturnValue, Args, nullptr, E == MustTailCall, 480 E->getExprLoc()); 481 } 482 483 RValue 484 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 485 const CXXMethodDecl *MD, 486 ReturnValueSlot ReturnValue) { 487 assert(MD->isInstance() && 488 "Trying to emit a member call expr on a static method!"); 489 return EmitCXXMemberOrOperatorMemberCallExpr( 490 E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, 491 /*IsArrow=*/false, E->getArg(0)); 492 } 493 494 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, 495 ReturnValueSlot ReturnValue) { 496 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue); 497 } 498 499 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, 500 Address DestPtr, 501 const CXXRecordDecl *Base) { 502 if (Base->isEmpty()) 503 return; 504 505 DestPtr = DestPtr.withElementType(CGF.Int8Ty); 506 507 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); 508 CharUnits NVSize = Layout.getNonVirtualSize(); 509 510 // We cannot simply zero-initialize the entire base sub-object if vbptrs are 511 // present, they are initialized by the most derived class before calling the 512 // constructor. 513 SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores; 514 Stores.emplace_back(CharUnits::Zero(), NVSize); 515 516 // Each store is split by the existence of a vbptr. 517 CharUnits VBPtrWidth = CGF.getPointerSize(); 518 std::vector<CharUnits> VBPtrOffsets = 519 CGF.CGM.getCXXABI().getVBPtrOffsets(Base); 520 for (CharUnits VBPtrOffset : VBPtrOffsets) { 521 // Stop before we hit any virtual base pointers located in virtual bases. 522 if (VBPtrOffset >= NVSize) 523 break; 524 std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val(); 525 CharUnits LastStoreOffset = LastStore.first; 526 CharUnits LastStoreSize = LastStore.second; 527 528 CharUnits SplitBeforeOffset = LastStoreOffset; 529 CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset; 530 assert(!SplitBeforeSize.isNegative() && "negative store size!"); 531 if (!SplitBeforeSize.isZero()) 532 Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize); 533 534 CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth; 535 CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset; 536 assert(!SplitAfterSize.isNegative() && "negative store size!"); 537 if (!SplitAfterSize.isZero()) 538 Stores.emplace_back(SplitAfterOffset, SplitAfterSize); 539 } 540 541 // If the type contains a pointer to data member we can't memset it to zero. 542 // Instead, create a null constant and copy it to the destination. 543 // TODO: there are other patterns besides zero that we can usefully memset, 544 // like -1, which happens to be the pattern used by member-pointers. 545 // TODO: isZeroInitializable can be over-conservative in the case where a 546 // virtual base contains a member pointer. 547 llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base); 548 if (!NullConstantForBase->isNullValue()) { 549 llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable( 550 CGF.CGM.getModule(), NullConstantForBase->getType(), 551 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, 552 NullConstantForBase, Twine()); 553 554 CharUnits Align = 555 std::max(Layout.getNonVirtualAlignment(), DestPtr.getAlignment()); 556 NullVariable->setAlignment(Align.getAsAlign()); 557 558 Address SrcPtr(NullVariable, CGF.Int8Ty, Align); 559 560 // Get and call the appropriate llvm.memcpy overload. 561 for (std::pair<CharUnits, CharUnits> Store : Stores) { 562 CharUnits StoreOffset = Store.first; 563 CharUnits StoreSize = Store.second; 564 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize); 565 CGF.Builder.CreateMemCpy( 566 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset), 567 CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset), 568 StoreSizeVal); 569 } 570 571 // Otherwise, just memset the whole thing to zero. This is legal 572 // because in LLVM, all default initializers (other than the ones we just 573 // handled above) are guaranteed to have a bit pattern of all zeros. 574 } else { 575 for (std::pair<CharUnits, CharUnits> Store : Stores) { 576 CharUnits StoreOffset = Store.first; 577 CharUnits StoreSize = Store.second; 578 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize); 579 CGF.Builder.CreateMemSet( 580 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset), 581 CGF.Builder.getInt8(0), StoreSizeVal); 582 } 583 } 584 } 585 586 void 587 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 588 AggValueSlot Dest) { 589 assert(!Dest.isIgnored() && "Must have a destination!"); 590 const CXXConstructorDecl *CD = E->getConstructor(); 591 592 // If we require zero initialization before (or instead of) calling the 593 // constructor, as can be the case with a non-user-provided default 594 // constructor, emit the zero initialization now, unless destination is 595 // already zeroed. 596 if (E->requiresZeroInitialization() && !Dest.isZeroed()) { 597 switch (E->getConstructionKind()) { 598 case CXXConstructExpr::CK_Delegating: 599 case CXXConstructExpr::CK_Complete: 600 EmitNullInitialization(Dest.getAddress(), E->getType()); 601 break; 602 case CXXConstructExpr::CK_VirtualBase: 603 case CXXConstructExpr::CK_NonVirtualBase: 604 EmitNullBaseClassInitialization(*this, Dest.getAddress(), 605 CD->getParent()); 606 break; 607 } 608 } 609 610 // If this is a call to a trivial default constructor, do nothing. 611 if (CD->isTrivial() && CD->isDefaultConstructor()) 612 return; 613 614 // Elide the constructor if we're constructing from a temporary. 615 if (getLangOpts().ElideConstructors && E->isElidable()) { 616 // FIXME: This only handles the simplest case, where the source object 617 // is passed directly as the first argument to the constructor. 618 // This should also handle stepping though implicit casts and 619 // conversion sequences which involve two steps, with a 620 // conversion operator followed by a converting constructor. 621 const Expr *SrcObj = E->getArg(0); 622 assert(SrcObj->isTemporaryObject(getContext(), CD->getParent())); 623 assert( 624 getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType())); 625 EmitAggExpr(SrcObj, Dest); 626 return; 627 } 628 629 if (const ArrayType *arrayType 630 = getContext().getAsArrayType(E->getType())) { 631 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, 632 Dest.isSanitizerChecked()); 633 } else { 634 CXXCtorType Type = Ctor_Complete; 635 bool ForVirtualBase = false; 636 bool Delegating = false; 637 638 switch (E->getConstructionKind()) { 639 case CXXConstructExpr::CK_Delegating: 640 // We should be emitting a constructor; GlobalDecl will assert this 641 Type = CurGD.getCtorType(); 642 Delegating = true; 643 break; 644 645 case CXXConstructExpr::CK_Complete: 646 Type = Ctor_Complete; 647 break; 648 649 case CXXConstructExpr::CK_VirtualBase: 650 ForVirtualBase = true; 651 [[fallthrough]]; 652 653 case CXXConstructExpr::CK_NonVirtualBase: 654 Type = Ctor_Base; 655 } 656 657 // Call the constructor. 658 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); 659 } 660 } 661 662 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, 663 const Expr *Exp) { 664 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 665 Exp = E->getSubExpr(); 666 assert(isa<CXXConstructExpr>(Exp) && 667 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 668 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 669 const CXXConstructorDecl *CD = E->getConstructor(); 670 RunCleanupsScope Scope(*this); 671 672 // If we require zero initialization before (or instead of) calling the 673 // constructor, as can be the case with a non-user-provided default 674 // constructor, emit the zero initialization now. 675 // FIXME. Do I still need this for a copy ctor synthesis? 676 if (E->requiresZeroInitialization()) 677 EmitNullInitialization(Dest, E->getType()); 678 679 assert(!getContext().getAsConstantArrayType(E->getType()) 680 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 681 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E); 682 } 683 684 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 685 const CXXNewExpr *E) { 686 if (!E->isArray()) 687 return CharUnits::Zero(); 688 689 // No cookie is required if the operator new[] being used is the 690 // reserved placement operator new[]. 691 if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) 692 return CharUnits::Zero(); 693 694 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 695 } 696 697 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 698 const CXXNewExpr *e, 699 unsigned minElements, 700 llvm::Value *&numElements, 701 llvm::Value *&sizeWithoutCookie) { 702 QualType type = e->getAllocatedType(); 703 704 if (!e->isArray()) { 705 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 706 sizeWithoutCookie 707 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); 708 return sizeWithoutCookie; 709 } 710 711 // The width of size_t. 712 unsigned sizeWidth = CGF.SizeTy->getBitWidth(); 713 714 // Figure out the cookie size. 715 llvm::APInt cookieSize(sizeWidth, 716 CalculateCookiePadding(CGF, e).getQuantity()); 717 718 // Emit the array size expression. 719 // We multiply the size of all dimensions for NumElements. 720 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 721 numElements = 722 ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType()); 723 if (!numElements) 724 numElements = CGF.EmitScalarExpr(*e->getArraySize()); 725 assert(isa<llvm::IntegerType>(numElements->getType())); 726 727 // The number of elements can be have an arbitrary integer type; 728 // essentially, we need to multiply it by a constant factor, add a 729 // cookie size, and verify that the result is representable as a 730 // size_t. That's just a gloss, though, and it's wrong in one 731 // important way: if the count is negative, it's an error even if 732 // the cookie size would bring the total size >= 0. 733 bool isSigned 734 = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType(); 735 llvm::IntegerType *numElementsType 736 = cast<llvm::IntegerType>(numElements->getType()); 737 unsigned numElementsWidth = numElementsType->getBitWidth(); 738 739 // Compute the constant factor. 740 llvm::APInt arraySizeMultiplier(sizeWidth, 1); 741 while (const ConstantArrayType *CAT 742 = CGF.getContext().getAsConstantArrayType(type)) { 743 type = CAT->getElementType(); 744 arraySizeMultiplier *= CAT->getSize(); 745 } 746 747 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 748 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); 749 typeSizeMultiplier *= arraySizeMultiplier; 750 751 // This will be a size_t. 752 llvm::Value *size; 753 754 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 755 // Don't bloat the -O0 code. 756 if (llvm::ConstantInt *numElementsC = 757 dyn_cast<llvm::ConstantInt>(numElements)) { 758 const llvm::APInt &count = numElementsC->getValue(); 759 760 bool hasAnyOverflow = false; 761 762 // If 'count' was a negative number, it's an overflow. 763 if (isSigned && count.isNegative()) 764 hasAnyOverflow = true; 765 766 // We want to do all this arithmetic in size_t. If numElements is 767 // wider than that, check whether it's already too big, and if so, 768 // overflow. 769 else if (numElementsWidth > sizeWidth && 770 numElementsWidth - sizeWidth > count.countl_zero()) 771 hasAnyOverflow = true; 772 773 // Okay, compute a count at the right width. 774 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); 775 776 // If there is a brace-initializer, we cannot allocate fewer elements than 777 // there are initializers. If we do, that's treated like an overflow. 778 if (adjustedCount.ult(minElements)) 779 hasAnyOverflow = true; 780 781 // Scale numElements by that. This might overflow, but we don't 782 // care because it only overflows if allocationSize does, too, and 783 // if that overflows then we shouldn't use this. 784 numElements = llvm::ConstantInt::get(CGF.SizeTy, 785 adjustedCount * arraySizeMultiplier); 786 787 // Compute the size before cookie, and track whether it overflowed. 788 bool overflow; 789 llvm::APInt allocationSize 790 = adjustedCount.umul_ov(typeSizeMultiplier, overflow); 791 hasAnyOverflow |= overflow; 792 793 // Add in the cookie, and check whether it's overflowed. 794 if (cookieSize != 0) { 795 // Save the current size without a cookie. This shouldn't be 796 // used if there was overflow. 797 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 798 799 allocationSize = allocationSize.uadd_ov(cookieSize, overflow); 800 hasAnyOverflow |= overflow; 801 } 802 803 // On overflow, produce a -1 so operator new will fail. 804 if (hasAnyOverflow) { 805 size = llvm::Constant::getAllOnesValue(CGF.SizeTy); 806 } else { 807 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 808 } 809 810 // Otherwise, we might need to use the overflow intrinsics. 811 } else { 812 // There are up to five conditions we need to test for: 813 // 1) if isSigned, we need to check whether numElements is negative; 814 // 2) if numElementsWidth > sizeWidth, we need to check whether 815 // numElements is larger than something representable in size_t; 816 // 3) if minElements > 0, we need to check whether numElements is smaller 817 // than that. 818 // 4) we need to compute 819 // sizeWithoutCookie := numElements * typeSizeMultiplier 820 // and check whether it overflows; and 821 // 5) if we need a cookie, we need to compute 822 // size := sizeWithoutCookie + cookieSize 823 // and check whether it overflows. 824 825 llvm::Value *hasOverflow = nullptr; 826 827 // If numElementsWidth > sizeWidth, then one way or another, we're 828 // going to have to do a comparison for (2), and this happens to 829 // take care of (1), too. 830 if (numElementsWidth > sizeWidth) { 831 llvm::APInt threshold = 832 llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth); 833 834 llvm::Value *thresholdV 835 = llvm::ConstantInt::get(numElementsType, threshold); 836 837 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); 838 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); 839 840 // Otherwise, if we're signed, we want to sext up to size_t. 841 } else if (isSigned) { 842 if (numElementsWidth < sizeWidth) 843 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); 844 845 // If there's a non-1 type size multiplier, then we can do the 846 // signedness check at the same time as we do the multiply 847 // because a negative number times anything will cause an 848 // unsigned overflow. Otherwise, we have to do it here. But at least 849 // in this case, we can subsume the >= minElements check. 850 if (typeSizeMultiplier == 1) 851 hasOverflow = CGF.Builder.CreateICmpSLT(numElements, 852 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 853 854 // Otherwise, zext up to size_t if necessary. 855 } else if (numElementsWidth < sizeWidth) { 856 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); 857 } 858 859 assert(numElements->getType() == CGF.SizeTy); 860 861 if (minElements) { 862 // Don't allow allocation of fewer elements than we have initializers. 863 if (!hasOverflow) { 864 hasOverflow = CGF.Builder.CreateICmpULT(numElements, 865 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 866 } else if (numElementsWidth > sizeWidth) { 867 // The other existing overflow subsumes this check. 868 // We do an unsigned comparison, since any signed value < -1 is 869 // taken care of either above or below. 870 hasOverflow = CGF.Builder.CreateOr(hasOverflow, 871 CGF.Builder.CreateICmpULT(numElements, 872 llvm::ConstantInt::get(CGF.SizeTy, minElements))); 873 } 874 } 875 876 size = numElements; 877 878 // Multiply by the type size if necessary. This multiplier 879 // includes all the factors for nested arrays. 880 // 881 // This step also causes numElements to be scaled up by the 882 // nested-array factor if necessary. Overflow on this computation 883 // can be ignored because the result shouldn't be used if 884 // allocation fails. 885 if (typeSizeMultiplier != 1) { 886 llvm::Function *umul_with_overflow 887 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); 888 889 llvm::Value *tsmV = 890 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); 891 llvm::Value *result = 892 CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV}); 893 894 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 895 if (hasOverflow) 896 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 897 else 898 hasOverflow = overflowed; 899 900 size = CGF.Builder.CreateExtractValue(result, 0); 901 902 // Also scale up numElements by the array size multiplier. 903 if (arraySizeMultiplier != 1) { 904 // If the base element type size is 1, then we can re-use the 905 // multiply we just did. 906 if (typeSize.isOne()) { 907 assert(arraySizeMultiplier == typeSizeMultiplier); 908 numElements = size; 909 910 // Otherwise we need a separate multiply. 911 } else { 912 llvm::Value *asmV = 913 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); 914 numElements = CGF.Builder.CreateMul(numElements, asmV); 915 } 916 } 917 } else { 918 // numElements doesn't need to be scaled. 919 assert(arraySizeMultiplier == 1); 920 } 921 922 // Add in the cookie size if necessary. 923 if (cookieSize != 0) { 924 sizeWithoutCookie = size; 925 926 llvm::Function *uadd_with_overflow 927 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); 928 929 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); 930 llvm::Value *result = 931 CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV}); 932 933 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 934 if (hasOverflow) 935 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 936 else 937 hasOverflow = overflowed; 938 939 size = CGF.Builder.CreateExtractValue(result, 0); 940 } 941 942 // If we had any possibility of dynamic overflow, make a select to 943 // overwrite 'size' with an all-ones value, which should cause 944 // operator new to throw. 945 if (hasOverflow) 946 size = CGF.Builder.CreateSelect(hasOverflow, 947 llvm::Constant::getAllOnesValue(CGF.SizeTy), 948 size); 949 } 950 951 if (cookieSize == 0) 952 sizeWithoutCookie = size; 953 else 954 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); 955 956 return size; 957 } 958 959 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, 960 QualType AllocType, Address NewPtr, 961 AggValueSlot::Overlap_t MayOverlap) { 962 // FIXME: Refactor with EmitExprAsInit. 963 switch (CGF.getEvaluationKind(AllocType)) { 964 case TEK_Scalar: 965 CGF.EmitScalarInit(Init, nullptr, 966 CGF.MakeAddrLValue(NewPtr, AllocType), false); 967 return; 968 case TEK_Complex: 969 CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType), 970 /*isInit*/ true); 971 return; 972 case TEK_Aggregate: { 973 AggValueSlot Slot 974 = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), 975 AggValueSlot::IsDestructed, 976 AggValueSlot::DoesNotNeedGCBarriers, 977 AggValueSlot::IsNotAliased, 978 MayOverlap, AggValueSlot::IsNotZeroed, 979 AggValueSlot::IsSanitizerChecked); 980 CGF.EmitAggExpr(Init, Slot); 981 return; 982 } 983 } 984 llvm_unreachable("bad evaluation kind"); 985 } 986 987 void CodeGenFunction::EmitNewArrayInitializer( 988 const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy, 989 Address BeginPtr, llvm::Value *NumElements, 990 llvm::Value *AllocSizeWithoutCookie) { 991 // If we have a type with trivial initialization and no initializer, 992 // there's nothing to do. 993 if (!E->hasInitializer()) 994 return; 995 996 Address CurPtr = BeginPtr; 997 998 unsigned InitListElements = 0; 999 1000 const Expr *Init = E->getInitializer(); 1001 Address EndOfInit = Address::invalid(); 1002 QualType::DestructionKind DtorKind = ElementType.isDestructedType(); 1003 EHScopeStack::stable_iterator Cleanup; 1004 llvm::Instruction *CleanupDominator = nullptr; 1005 1006 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); 1007 CharUnits ElementAlign = 1008 BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize); 1009 1010 // Attempt to perform zero-initialization using memset. 1011 auto TryMemsetInitialization = [&]() -> bool { 1012 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI, 1013 // we can initialize with a memset to -1. 1014 if (!CGM.getTypes().isZeroInitializable(ElementType)) 1015 return false; 1016 1017 // Optimization: since zero initialization will just set the memory 1018 // to all zeroes, generate a single memset to do it in one shot. 1019 1020 // Subtract out the size of any elements we've already initialized. 1021 auto *RemainingSize = AllocSizeWithoutCookie; 1022 if (InitListElements) { 1023 // We know this can't overflow; we check this when doing the allocation. 1024 auto *InitializedSize = llvm::ConstantInt::get( 1025 RemainingSize->getType(), 1026 getContext().getTypeSizeInChars(ElementType).getQuantity() * 1027 InitListElements); 1028 RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize); 1029 } 1030 1031 // Create the memset. 1032 Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false); 1033 return true; 1034 }; 1035 1036 // If the initializer is an initializer list, first do the explicit elements. 1037 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) { 1038 // Initializing from a (braced) string literal is a special case; the init 1039 // list element does not initialize a (single) array element. 1040 if (ILE->isStringLiteralInit()) { 1041 // Initialize the initial portion of length equal to that of the string 1042 // literal. The allocation must be for at least this much; we emitted a 1043 // check for that earlier. 1044 AggValueSlot Slot = 1045 AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(), 1046 AggValueSlot::IsDestructed, 1047 AggValueSlot::DoesNotNeedGCBarriers, 1048 AggValueSlot::IsNotAliased, 1049 AggValueSlot::DoesNotOverlap, 1050 AggValueSlot::IsNotZeroed, 1051 AggValueSlot::IsSanitizerChecked); 1052 EmitAggExpr(ILE->getInit(0), Slot); 1053 1054 // Move past these elements. 1055 InitListElements = 1056 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe()) 1057 ->getSize().getZExtValue(); 1058 CurPtr = Builder.CreateConstInBoundsGEP( 1059 CurPtr, InitListElements, "string.init.end"); 1060 1061 // Zero out the rest, if any remain. 1062 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements); 1063 if (!ConstNum || !ConstNum->equalsInt(InitListElements)) { 1064 bool OK = TryMemsetInitialization(); 1065 (void)OK; 1066 assert(OK && "couldn't memset character type?"); 1067 } 1068 return; 1069 } 1070 1071 InitListElements = ILE->getNumInits(); 1072 1073 // If this is a multi-dimensional array new, we will initialize multiple 1074 // elements with each init list element. 1075 QualType AllocType = E->getAllocatedType(); 1076 if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>( 1077 AllocType->getAsArrayTypeUnsafe())) { 1078 ElementTy = ConvertTypeForMem(AllocType); 1079 CurPtr = CurPtr.withElementType(ElementTy); 1080 InitListElements *= getContext().getConstantArrayElementCount(CAT); 1081 } 1082 1083 // Enter a partial-destruction Cleanup if necessary. 1084 if (needsEHCleanup(DtorKind)) { 1085 // In principle we could tell the Cleanup where we are more 1086 // directly, but the control flow can get so varied here that it 1087 // would actually be quite complex. Therefore we go through an 1088 // alloca. 1089 EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(), 1090 "array.init.end"); 1091 CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit); 1092 pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit, 1093 ElementType, ElementAlign, 1094 getDestroyer(DtorKind)); 1095 Cleanup = EHStack.stable_begin(); 1096 } 1097 1098 CharUnits StartAlign = CurPtr.getAlignment(); 1099 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) { 1100 // Tell the cleanup that it needs to destroy up to this 1101 // element. TODO: some of these stores can be trivially 1102 // observed to be unnecessary. 1103 if (EndOfInit.isValid()) { 1104 auto FinishedPtr = 1105 Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType()); 1106 Builder.CreateStore(FinishedPtr, EndOfInit); 1107 } 1108 // FIXME: If the last initializer is an incomplete initializer list for 1109 // an array, and we have an array filler, we can fold together the two 1110 // initialization loops. 1111 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), 1112 ILE->getInit(i)->getType(), CurPtr, 1113 AggValueSlot::DoesNotOverlap); 1114 CurPtr = Address(Builder.CreateInBoundsGEP( 1115 CurPtr.getElementType(), CurPtr.getPointer(), 1116 Builder.getSize(1), "array.exp.next"), 1117 CurPtr.getElementType(), 1118 StartAlign.alignmentAtOffset((i + 1) * ElementSize)); 1119 } 1120 1121 // The remaining elements are filled with the array filler expression. 1122 Init = ILE->getArrayFiller(); 1123 1124 // Extract the initializer for the individual array elements by pulling 1125 // out the array filler from all the nested initializer lists. This avoids 1126 // generating a nested loop for the initialization. 1127 while (Init && Init->getType()->isConstantArrayType()) { 1128 auto *SubILE = dyn_cast<InitListExpr>(Init); 1129 if (!SubILE) 1130 break; 1131 assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?"); 1132 Init = SubILE->getArrayFiller(); 1133 } 1134 1135 // Switch back to initializing one base element at a time. 1136 CurPtr = CurPtr.withElementType(BeginPtr.getElementType()); 1137 } 1138 1139 // If all elements have already been initialized, skip any further 1140 // initialization. 1141 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements); 1142 if (ConstNum && ConstNum->getZExtValue() <= InitListElements) { 1143 // If there was a Cleanup, deactivate it. 1144 if (CleanupDominator) 1145 DeactivateCleanupBlock(Cleanup, CleanupDominator); 1146 return; 1147 } 1148 1149 assert(Init && "have trailing elements to initialize but no initializer"); 1150 1151 // If this is a constructor call, try to optimize it out, and failing that 1152 // emit a single loop to initialize all remaining elements. 1153 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) { 1154 CXXConstructorDecl *Ctor = CCE->getConstructor(); 1155 if (Ctor->isTrivial()) { 1156 // If new expression did not specify value-initialization, then there 1157 // is no initialization. 1158 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty()) 1159 return; 1160 1161 if (TryMemsetInitialization()) 1162 return; 1163 } 1164 1165 // Store the new Cleanup position for irregular Cleanups. 1166 // 1167 // FIXME: Share this cleanup with the constructor call emission rather than 1168 // having it create a cleanup of its own. 1169 if (EndOfInit.isValid()) 1170 Builder.CreateStore(CurPtr.getPointer(), EndOfInit); 1171 1172 // Emit a constructor call loop to initialize the remaining elements. 1173 if (InitListElements) 1174 NumElements = Builder.CreateSub( 1175 NumElements, 1176 llvm::ConstantInt::get(NumElements->getType(), InitListElements)); 1177 EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE, 1178 /*NewPointerIsChecked*/true, 1179 CCE->requiresZeroInitialization()); 1180 return; 1181 } 1182 1183 // If this is value-initialization, we can usually use memset. 1184 ImplicitValueInitExpr IVIE(ElementType); 1185 if (isa<ImplicitValueInitExpr>(Init)) { 1186 if (TryMemsetInitialization()) 1187 return; 1188 1189 // Switch to an ImplicitValueInitExpr for the element type. This handles 1190 // only one case: multidimensional array new of pointers to members. In 1191 // all other cases, we already have an initializer for the array element. 1192 Init = &IVIE; 1193 } 1194 1195 // At this point we should have found an initializer for the individual 1196 // elements of the array. 1197 assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) && 1198 "got wrong type of element to initialize"); 1199 1200 // If we have an empty initializer list, we can usually use memset. 1201 if (auto *ILE = dyn_cast<InitListExpr>(Init)) 1202 if (ILE->getNumInits() == 0 && TryMemsetInitialization()) 1203 return; 1204 1205 // If we have a struct whose every field is value-initialized, we can 1206 // usually use memset. 1207 if (auto *ILE = dyn_cast<InitListExpr>(Init)) { 1208 if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) { 1209 if (RType->getDecl()->isStruct()) { 1210 unsigned NumElements = 0; 1211 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl())) 1212 NumElements = CXXRD->getNumBases(); 1213 for (auto *Field : RType->getDecl()->fields()) 1214 if (!Field->isUnnamedBitfield()) 1215 ++NumElements; 1216 // FIXME: Recurse into nested InitListExprs. 1217 if (ILE->getNumInits() == NumElements) 1218 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 1219 if (!isa<ImplicitValueInitExpr>(ILE->getInit(i))) 1220 --NumElements; 1221 if (ILE->getNumInits() == NumElements && TryMemsetInitialization()) 1222 return; 1223 } 1224 } 1225 } 1226 1227 // Create the loop blocks. 1228 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 1229 llvm::BasicBlock *LoopBB = createBasicBlock("new.loop"); 1230 llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end"); 1231 1232 // Find the end of the array, hoisted out of the loop. 1233 llvm::Value *EndPtr = 1234 Builder.CreateInBoundsGEP(BeginPtr.getElementType(), BeginPtr.getPointer(), 1235 NumElements, "array.end"); 1236 1237 // If the number of elements isn't constant, we have to now check if there is 1238 // anything left to initialize. 1239 if (!ConstNum) { 1240 llvm::Value *IsEmpty = 1241 Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty"); 1242 Builder.CreateCondBr(IsEmpty, ContBB, LoopBB); 1243 } 1244 1245 // Enter the loop. 1246 EmitBlock(LoopBB); 1247 1248 // Set up the current-element phi. 1249 llvm::PHINode *CurPtrPhi = 1250 Builder.CreatePHI(CurPtr.getType(), 2, "array.cur"); 1251 CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB); 1252 1253 CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign); 1254 1255 // Store the new Cleanup position for irregular Cleanups. 1256 if (EndOfInit.isValid()) 1257 Builder.CreateStore(CurPtr.getPointer(), EndOfInit); 1258 1259 // Enter a partial-destruction Cleanup if necessary. 1260 if (!CleanupDominator && needsEHCleanup(DtorKind)) { 1261 pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(), 1262 ElementType, ElementAlign, 1263 getDestroyer(DtorKind)); 1264 Cleanup = EHStack.stable_begin(); 1265 CleanupDominator = Builder.CreateUnreachable(); 1266 } 1267 1268 // Emit the initializer into this element. 1269 StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr, 1270 AggValueSlot::DoesNotOverlap); 1271 1272 // Leave the Cleanup if we entered one. 1273 if (CleanupDominator) { 1274 DeactivateCleanupBlock(Cleanup, CleanupDominator); 1275 CleanupDominator->eraseFromParent(); 1276 } 1277 1278 // Advance to the next element by adjusting the pointer type as necessary. 1279 llvm::Value *NextPtr = 1280 Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1, 1281 "array.next"); 1282 1283 // Check whether we've gotten to the end of the array and, if so, 1284 // exit the loop. 1285 llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend"); 1286 Builder.CreateCondBr(IsEnd, ContBB, LoopBB); 1287 CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock()); 1288 1289 EmitBlock(ContBB); 1290 } 1291 1292 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 1293 QualType ElementType, llvm::Type *ElementTy, 1294 Address NewPtr, llvm::Value *NumElements, 1295 llvm::Value *AllocSizeWithoutCookie) { 1296 ApplyDebugLocation DL(CGF, E); 1297 if (E->isArray()) 1298 CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements, 1299 AllocSizeWithoutCookie); 1300 else if (const Expr *Init = E->getInitializer()) 1301 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr, 1302 AggValueSlot::DoesNotOverlap); 1303 } 1304 1305 /// Emit a call to an operator new or operator delete function, as implicitly 1306 /// created by new-expressions and delete-expressions. 1307 static RValue EmitNewDeleteCall(CodeGenFunction &CGF, 1308 const FunctionDecl *CalleeDecl, 1309 const FunctionProtoType *CalleeType, 1310 const CallArgList &Args) { 1311 llvm::CallBase *CallOrInvoke; 1312 llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); 1313 CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); 1314 RValue RV = 1315 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( 1316 Args, CalleeType, /*ChainCall=*/false), 1317 Callee, ReturnValueSlot(), Args, &CallOrInvoke); 1318 1319 /// C++1y [expr.new]p10: 1320 /// [In a new-expression,] an implementation is allowed to omit a call 1321 /// to a replaceable global allocation function. 1322 /// 1323 /// We model such elidable calls with the 'builtin' attribute. 1324 llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr); 1325 if (CalleeDecl->isReplaceableGlobalAllocationFunction() && 1326 Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) { 1327 CallOrInvoke->addFnAttr(llvm::Attribute::Builtin); 1328 } 1329 1330 return RV; 1331 } 1332 1333 RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, 1334 const CallExpr *TheCall, 1335 bool IsDelete) { 1336 CallArgList Args; 1337 EmitCallArgs(Args, Type, TheCall->arguments()); 1338 // Find the allocation or deallocation function that we're calling. 1339 ASTContext &Ctx = getContext(); 1340 DeclarationName Name = Ctx.DeclarationNames 1341 .getCXXOperatorName(IsDelete ? OO_Delete : OO_New); 1342 1343 for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name)) 1344 if (auto *FD = dyn_cast<FunctionDecl>(Decl)) 1345 if (Ctx.hasSameType(FD->getType(), QualType(Type, 0))) 1346 return EmitNewDeleteCall(*this, FD, Type, Args); 1347 llvm_unreachable("predeclared global operator new/delete is missing"); 1348 } 1349 1350 namespace { 1351 /// The parameters to pass to a usual operator delete. 1352 struct UsualDeleteParams { 1353 bool DestroyingDelete = false; 1354 bool Size = false; 1355 bool Alignment = false; 1356 }; 1357 } 1358 1359 static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { 1360 UsualDeleteParams Params; 1361 1362 const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>(); 1363 auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); 1364 1365 // The first argument is always a void*. 1366 ++AI; 1367 1368 // The next parameter may be a std::destroying_delete_t. 1369 if (FD->isDestroyingOperatorDelete()) { 1370 Params.DestroyingDelete = true; 1371 assert(AI != AE); 1372 ++AI; 1373 } 1374 1375 // Figure out what other parameters we should be implicitly passing. 1376 if (AI != AE && (*AI)->isIntegerType()) { 1377 Params.Size = true; 1378 ++AI; 1379 } 1380 1381 if (AI != AE && (*AI)->isAlignValT()) { 1382 Params.Alignment = true; 1383 ++AI; 1384 } 1385 1386 assert(AI == AE && "unexpected usual deallocation function parameter"); 1387 return Params; 1388 } 1389 1390 namespace { 1391 /// A cleanup to call the given 'operator delete' function upon abnormal 1392 /// exit from a new expression. Templated on a traits type that deals with 1393 /// ensuring that the arguments dominate the cleanup if necessary. 1394 template<typename Traits> 1395 class CallDeleteDuringNew final : public EHScopeStack::Cleanup { 1396 /// Type used to hold llvm::Value*s. 1397 typedef typename Traits::ValueTy ValueTy; 1398 /// Type used to hold RValues. 1399 typedef typename Traits::RValueTy RValueTy; 1400 struct PlacementArg { 1401 RValueTy ArgValue; 1402 QualType ArgType; 1403 }; 1404 1405 unsigned NumPlacementArgs : 31; 1406 unsigned PassAlignmentToPlacementDelete : 1; 1407 const FunctionDecl *OperatorDelete; 1408 ValueTy Ptr; 1409 ValueTy AllocSize; 1410 CharUnits AllocAlign; 1411 1412 PlacementArg *getPlacementArgs() { 1413 return reinterpret_cast<PlacementArg *>(this + 1); 1414 } 1415 1416 public: 1417 static size_t getExtraSize(size_t NumPlacementArgs) { 1418 return NumPlacementArgs * sizeof(PlacementArg); 1419 } 1420 1421 CallDeleteDuringNew(size_t NumPlacementArgs, 1422 const FunctionDecl *OperatorDelete, ValueTy Ptr, 1423 ValueTy AllocSize, bool PassAlignmentToPlacementDelete, 1424 CharUnits AllocAlign) 1425 : NumPlacementArgs(NumPlacementArgs), 1426 PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete), 1427 OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize), 1428 AllocAlign(AllocAlign) {} 1429 1430 void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { 1431 assert(I < NumPlacementArgs && "index out of range"); 1432 getPlacementArgs()[I] = {Arg, Type}; 1433 } 1434 1435 void Emit(CodeGenFunction &CGF, Flags flags) override { 1436 const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>(); 1437 CallArgList DeleteArgs; 1438 1439 // The first argument is always a void* (or C* for a destroying operator 1440 // delete for class type C). 1441 DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0)); 1442 1443 // Figure out what other parameters we should be implicitly passing. 1444 UsualDeleteParams Params; 1445 if (NumPlacementArgs) { 1446 // A placement deallocation function is implicitly passed an alignment 1447 // if the placement allocation function was, but is never passed a size. 1448 Params.Alignment = PassAlignmentToPlacementDelete; 1449 } else { 1450 // For a non-placement new-expression, 'operator delete' can take a 1451 // size and/or an alignment if it has the right parameters. 1452 Params = getUsualDeleteParams(OperatorDelete); 1453 } 1454 1455 assert(!Params.DestroyingDelete && 1456 "should not call destroying delete in a new-expression"); 1457 1458 // The second argument can be a std::size_t (for non-placement delete). 1459 if (Params.Size) 1460 DeleteArgs.add(Traits::get(CGF, AllocSize), 1461 CGF.getContext().getSizeType()); 1462 1463 // The next (second or third) argument can be a std::align_val_t, which 1464 // is an enum whose underlying type is std::size_t. 1465 // FIXME: Use the right type as the parameter type. Note that in a call 1466 // to operator delete(size_t, ...), we may not have it available. 1467 if (Params.Alignment) 1468 DeleteArgs.add(RValue::get(llvm::ConstantInt::get( 1469 CGF.SizeTy, AllocAlign.getQuantity())), 1470 CGF.getContext().getSizeType()); 1471 1472 // Pass the rest of the arguments, which must match exactly. 1473 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 1474 auto Arg = getPlacementArgs()[I]; 1475 DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); 1476 } 1477 1478 // Call 'operator delete'. 1479 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); 1480 } 1481 }; 1482 } 1483 1484 /// Enter a cleanup to call 'operator delete' if the initializer in a 1485 /// new-expression throws. 1486 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 1487 const CXXNewExpr *E, 1488 Address NewPtr, 1489 llvm::Value *AllocSize, 1490 CharUnits AllocAlign, 1491 const CallArgList &NewArgs) { 1492 unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1; 1493 1494 // If we're not inside a conditional branch, then the cleanup will 1495 // dominate and we can do the easier (and more efficient) thing. 1496 if (!CGF.isInConditionalBranch()) { 1497 struct DirectCleanupTraits { 1498 typedef llvm::Value *ValueTy; 1499 typedef RValue RValueTy; 1500 static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); } 1501 static RValue get(CodeGenFunction &, RValueTy V) { return V; } 1502 }; 1503 1504 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup; 1505 1506 DirectCleanup *Cleanup = CGF.EHStack 1507 .pushCleanupWithExtra<DirectCleanup>(EHCleanup, 1508 E->getNumPlacementArgs(), 1509 E->getOperatorDelete(), 1510 NewPtr.getPointer(), 1511 AllocSize, 1512 E->passAlignment(), 1513 AllocAlign); 1514 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { 1515 auto &Arg = NewArgs[I + NumNonPlacementArgs]; 1516 Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty); 1517 } 1518 1519 return; 1520 } 1521 1522 // Otherwise, we need to save all this stuff. 1523 DominatingValue<RValue>::saved_type SavedNewPtr = 1524 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer())); 1525 DominatingValue<RValue>::saved_type SavedAllocSize = 1526 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); 1527 1528 struct ConditionalCleanupTraits { 1529 typedef DominatingValue<RValue>::saved_type ValueTy; 1530 typedef DominatingValue<RValue>::saved_type RValueTy; 1531 static RValue get(CodeGenFunction &CGF, ValueTy V) { 1532 return V.restore(CGF); 1533 } 1534 }; 1535 typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup; 1536 1537 ConditionalCleanup *Cleanup = CGF.EHStack 1538 .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup, 1539 E->getNumPlacementArgs(), 1540 E->getOperatorDelete(), 1541 SavedNewPtr, 1542 SavedAllocSize, 1543 E->passAlignment(), 1544 AllocAlign); 1545 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { 1546 auto &Arg = NewArgs[I + NumNonPlacementArgs]; 1547 Cleanup->setPlacementArg( 1548 I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty); 1549 } 1550 1551 CGF.initFullExprCleanup(); 1552 } 1553 1554 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 1555 // The element type being allocated. 1556 QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); 1557 1558 // 1. Build a call to the allocation function. 1559 FunctionDecl *allocator = E->getOperatorNew(); 1560 1561 // If there is a brace-initializer, cannot allocate fewer elements than inits. 1562 unsigned minElements = 0; 1563 if (E->isArray() && E->hasInitializer()) { 1564 const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()); 1565 if (ILE && ILE->isStringLiteralInit()) 1566 minElements = 1567 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe()) 1568 ->getSize().getZExtValue(); 1569 else if (ILE) 1570 minElements = ILE->getNumInits(); 1571 } 1572 1573 llvm::Value *numElements = nullptr; 1574 llvm::Value *allocSizeWithoutCookie = nullptr; 1575 llvm::Value *allocSize = 1576 EmitCXXNewAllocSize(*this, E, minElements, numElements, 1577 allocSizeWithoutCookie); 1578 CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); 1579 1580 // Emit the allocation call. If the allocator is a global placement 1581 // operator, just "inline" it directly. 1582 Address allocation = Address::invalid(); 1583 CallArgList allocatorArgs; 1584 if (allocator->isReservedGlobalPlacementOperator()) { 1585 assert(E->getNumPlacementArgs() == 1); 1586 const Expr *arg = *E->placement_arguments().begin(); 1587 1588 LValueBaseInfo BaseInfo; 1589 allocation = EmitPointerWithAlignment(arg, &BaseInfo); 1590 1591 // The pointer expression will, in many cases, be an opaque void*. 1592 // In these cases, discard the computed alignment and use the 1593 // formal alignment of the allocated type. 1594 if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl) 1595 allocation = allocation.withAlignment(allocAlign); 1596 1597 // Set up allocatorArgs for the call to operator delete if it's not 1598 // the reserved global operator. 1599 if (E->getOperatorDelete() && 1600 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1601 allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType()); 1602 allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType()); 1603 } 1604 1605 } else { 1606 const FunctionProtoType *allocatorType = 1607 allocator->getType()->castAs<FunctionProtoType>(); 1608 unsigned ParamsToSkip = 0; 1609 1610 // The allocation size is the first argument. 1611 QualType sizeType = getContext().getSizeType(); 1612 allocatorArgs.add(RValue::get(allocSize), sizeType); 1613 ++ParamsToSkip; 1614 1615 if (allocSize != allocSizeWithoutCookie) { 1616 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI. 1617 allocAlign = std::max(allocAlign, cookieAlign); 1618 } 1619 1620 // The allocation alignment may be passed as the second argument. 1621 if (E->passAlignment()) { 1622 QualType AlignValT = sizeType; 1623 if (allocatorType->getNumParams() > 1) { 1624 AlignValT = allocatorType->getParamType(1); 1625 assert(getContext().hasSameUnqualifiedType( 1626 AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(), 1627 sizeType) && 1628 "wrong type for alignment parameter"); 1629 ++ParamsToSkip; 1630 } else { 1631 // Corner case, passing alignment to 'operator new(size_t, ...)'. 1632 assert(allocator->isVariadic() && "can't pass alignment to allocator"); 1633 } 1634 allocatorArgs.add( 1635 RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())), 1636 AlignValT); 1637 } 1638 1639 // FIXME: Why do we not pass a CalleeDecl here? 1640 EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), 1641 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip); 1642 1643 RValue RV = 1644 EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); 1645 1646 // Set !heapallocsite metadata on the call to operator new. 1647 if (getDebugInfo()) 1648 if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal())) 1649 getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType, 1650 E->getExprLoc()); 1651 1652 // If this was a call to a global replaceable allocation function that does 1653 // not take an alignment argument, the allocator is known to produce 1654 // storage that's suitably aligned for any object that fits, up to a known 1655 // threshold. Otherwise assume it's suitably aligned for the allocated type. 1656 CharUnits allocationAlign = allocAlign; 1657 if (!E->passAlignment() && 1658 allocator->isReplaceableGlobalAllocationFunction()) { 1659 unsigned AllocatorAlign = llvm::bit_floor(std::min<uint64_t>( 1660 Target.getNewAlign(), getContext().getTypeSize(allocType))); 1661 allocationAlign = std::max( 1662 allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign)); 1663 } 1664 1665 allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign); 1666 } 1667 1668 // Emit a null check on the allocation result if the allocation 1669 // function is allowed to return null (because it has a non-throwing 1670 // exception spec or is the reserved placement new) and we have an 1671 // interesting initializer will be running sanitizers on the initialization. 1672 bool nullCheck = E->shouldNullCheckAllocation() && 1673 (!allocType.isPODType(getContext()) || E->hasInitializer() || 1674 sanitizePerformTypeCheck()); 1675 1676 llvm::BasicBlock *nullCheckBB = nullptr; 1677 llvm::BasicBlock *contBB = nullptr; 1678 1679 // The null-check means that the initializer is conditionally 1680 // evaluated. 1681 ConditionalEvaluation conditional(*this); 1682 1683 if (nullCheck) { 1684 conditional.begin(*this); 1685 1686 nullCheckBB = Builder.GetInsertBlock(); 1687 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); 1688 contBB = createBasicBlock("new.cont"); 1689 1690 llvm::Value *isNull = 1691 Builder.CreateIsNull(allocation.getPointer(), "new.isnull"); 1692 Builder.CreateCondBr(isNull, contBB, notNullBB); 1693 EmitBlock(notNullBB); 1694 } 1695 1696 // If there's an operator delete, enter a cleanup to call it if an 1697 // exception is thrown. 1698 EHScopeStack::stable_iterator operatorDeleteCleanup; 1699 llvm::Instruction *cleanupDominator = nullptr; 1700 if (E->getOperatorDelete() && 1701 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1702 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign, 1703 allocatorArgs); 1704 operatorDeleteCleanup = EHStack.stable_begin(); 1705 cleanupDominator = Builder.CreateUnreachable(); 1706 } 1707 1708 assert((allocSize == allocSizeWithoutCookie) == 1709 CalculateCookiePadding(*this, E).isZero()); 1710 if (allocSize != allocSizeWithoutCookie) { 1711 assert(E->isArray()); 1712 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, 1713 numElements, 1714 E, allocType); 1715 } 1716 1717 llvm::Type *elementTy = ConvertTypeForMem(allocType); 1718 Address result = allocation.withElementType(elementTy); 1719 1720 // Passing pointer through launder.invariant.group to avoid propagation of 1721 // vptrs information which may be included in previous type. 1722 // To not break LTO with different optimizations levels, we do it regardless 1723 // of optimization level. 1724 if (CGM.getCodeGenOpts().StrictVTablePointers && 1725 allocator->isReservedGlobalPlacementOperator()) 1726 result = Builder.CreateLaunderInvariantGroup(result); 1727 1728 // Emit sanitizer checks for pointer value now, so that in the case of an 1729 // array it was checked only once and not at each constructor call. We may 1730 // have already checked that the pointer is non-null. 1731 // FIXME: If we have an array cookie and a potentially-throwing allocator, 1732 // we'll null check the wrong pointer here. 1733 SanitizerSet SkippedChecks; 1734 SkippedChecks.set(SanitizerKind::Null, nullCheck); 1735 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, 1736 E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), 1737 result.getPointer(), allocType, result.getAlignment(), 1738 SkippedChecks, numElements); 1739 1740 EmitNewInitializer(*this, E, allocType, elementTy, result, numElements, 1741 allocSizeWithoutCookie); 1742 llvm::Value *resultPtr = result.getPointer(); 1743 if (E->isArray()) { 1744 // NewPtr is a pointer to the base element type. If we're 1745 // allocating an array of arrays, we'll need to cast back to the 1746 // array pointer type. 1747 llvm::Type *resultType = ConvertTypeForMem(E->getType()); 1748 if (resultPtr->getType() != resultType) 1749 resultPtr = Builder.CreateBitCast(resultPtr, resultType); 1750 } 1751 1752 // Deactivate the 'operator delete' cleanup if we finished 1753 // initialization. 1754 if (operatorDeleteCleanup.isValid()) { 1755 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator); 1756 cleanupDominator->eraseFromParent(); 1757 } 1758 1759 if (nullCheck) { 1760 conditional.end(*this); 1761 1762 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 1763 EmitBlock(contBB); 1764 1765 llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2); 1766 PHI->addIncoming(resultPtr, notNullBB); 1767 PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()), 1768 nullCheckBB); 1769 1770 resultPtr = PHI; 1771 } 1772 1773 return resultPtr; 1774 } 1775 1776 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1777 llvm::Value *Ptr, QualType DeleteTy, 1778 llvm::Value *NumElements, 1779 CharUnits CookieSize) { 1780 assert((!NumElements && CookieSize.isZero()) || 1781 DeleteFD->getOverloadedOperator() == OO_Array_Delete); 1782 1783 const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>(); 1784 CallArgList DeleteArgs; 1785 1786 auto Params = getUsualDeleteParams(DeleteFD); 1787 auto ParamTypeIt = DeleteFTy->param_type_begin(); 1788 1789 // Pass the pointer itself. 1790 QualType ArgTy = *ParamTypeIt++; 1791 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1792 DeleteArgs.add(RValue::get(DeletePtr), ArgTy); 1793 1794 // Pass the std::destroying_delete tag if present. 1795 llvm::AllocaInst *DestroyingDeleteTag = nullptr; 1796 if (Params.DestroyingDelete) { 1797 QualType DDTag = *ParamTypeIt++; 1798 llvm::Type *Ty = getTypes().ConvertType(DDTag); 1799 CharUnits Align = CGM.getNaturalTypeAlignment(DDTag); 1800 DestroyingDeleteTag = CreateTempAlloca(Ty, "destroying.delete.tag"); 1801 DestroyingDeleteTag->setAlignment(Align.getAsAlign()); 1802 DeleteArgs.add( 1803 RValue::getAggregate(Address(DestroyingDeleteTag, Ty, Align)), DDTag); 1804 } 1805 1806 // Pass the size if the delete function has a size_t parameter. 1807 if (Params.Size) { 1808 QualType SizeType = *ParamTypeIt++; 1809 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1810 llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType), 1811 DeleteTypeSize.getQuantity()); 1812 1813 // For array new, multiply by the number of elements. 1814 if (NumElements) 1815 Size = Builder.CreateMul(Size, NumElements); 1816 1817 // If there is a cookie, add the cookie size. 1818 if (!CookieSize.isZero()) 1819 Size = Builder.CreateAdd( 1820 Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity())); 1821 1822 DeleteArgs.add(RValue::get(Size), SizeType); 1823 } 1824 1825 // Pass the alignment if the delete function has an align_val_t parameter. 1826 if (Params.Alignment) { 1827 QualType AlignValType = *ParamTypeIt++; 1828 CharUnits DeleteTypeAlign = 1829 getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown( 1830 DeleteTy, true /* NeedsPreferredAlignment */)); 1831 llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType), 1832 DeleteTypeAlign.getQuantity()); 1833 DeleteArgs.add(RValue::get(Align), AlignValType); 1834 } 1835 1836 assert(ParamTypeIt == DeleteFTy->param_type_end() && 1837 "unknown parameter to usual delete function"); 1838 1839 // Emit the call to delete. 1840 EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); 1841 1842 // If call argument lowering didn't use the destroying_delete_t alloca, 1843 // remove it again. 1844 if (DestroyingDeleteTag && DestroyingDeleteTag->use_empty()) 1845 DestroyingDeleteTag->eraseFromParent(); 1846 } 1847 1848 namespace { 1849 /// Calls the given 'operator delete' on a single object. 1850 struct CallObjectDelete final : EHScopeStack::Cleanup { 1851 llvm::Value *Ptr; 1852 const FunctionDecl *OperatorDelete; 1853 QualType ElementType; 1854 1855 CallObjectDelete(llvm::Value *Ptr, 1856 const FunctionDecl *OperatorDelete, 1857 QualType ElementType) 1858 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1859 1860 void Emit(CodeGenFunction &CGF, Flags flags) override { 1861 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1862 } 1863 }; 1864 } 1865 1866 void 1867 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, 1868 llvm::Value *CompletePtr, 1869 QualType ElementType) { 1870 EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr, 1871 OperatorDelete, ElementType); 1872 } 1873 1874 /// Emit the code for deleting a single object with a destroying operator 1875 /// delete. If the element type has a non-virtual destructor, Ptr has already 1876 /// been converted to the type of the parameter of 'operator delete'. Otherwise 1877 /// Ptr points to an object of the static type. 1878 static void EmitDestroyingObjectDelete(CodeGenFunction &CGF, 1879 const CXXDeleteExpr *DE, Address Ptr, 1880 QualType ElementType) { 1881 auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor(); 1882 if (Dtor && Dtor->isVirtual()) 1883 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, 1884 Dtor); 1885 else 1886 CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType); 1887 } 1888 1889 /// Emit the code for deleting a single object. 1890 /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false 1891 /// if not. 1892 static bool EmitObjectDelete(CodeGenFunction &CGF, 1893 const CXXDeleteExpr *DE, 1894 Address Ptr, 1895 QualType ElementType, 1896 llvm::BasicBlock *UnconditionalDeleteBlock) { 1897 // C++11 [expr.delete]p3: 1898 // If the static type of the object to be deleted is different from its 1899 // dynamic type, the static type shall be a base class of the dynamic type 1900 // of the object to be deleted and the static type shall have a virtual 1901 // destructor or the behavior is undefined. 1902 CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, 1903 DE->getExprLoc(), Ptr.getPointer(), 1904 ElementType); 1905 1906 const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); 1907 assert(!OperatorDelete->isDestroyingOperatorDelete()); 1908 1909 // Find the destructor for the type, if applicable. If the 1910 // destructor is virtual, we'll just emit the vcall and return. 1911 const CXXDestructorDecl *Dtor = nullptr; 1912 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1913 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1914 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) { 1915 Dtor = RD->getDestructor(); 1916 1917 if (Dtor->isVirtual()) { 1918 bool UseVirtualCall = true; 1919 const Expr *Base = DE->getArgument(); 1920 if (auto *DevirtualizedDtor = 1921 dyn_cast_or_null<const CXXDestructorDecl>( 1922 Dtor->getDevirtualizedMethod( 1923 Base, CGF.CGM.getLangOpts().AppleKext))) { 1924 UseVirtualCall = false; 1925 const CXXRecordDecl *DevirtualizedClass = 1926 DevirtualizedDtor->getParent(); 1927 if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) { 1928 // Devirtualized to the class of the base type (the type of the 1929 // whole expression). 1930 Dtor = DevirtualizedDtor; 1931 } else { 1932 // Devirtualized to some other type. Would need to cast the this 1933 // pointer to that type but we don't have support for that yet, so 1934 // do a virtual call. FIXME: handle the case where it is 1935 // devirtualized to the derived type (the type of the inner 1936 // expression) as in EmitCXXMemberOrOperatorMemberCallExpr. 1937 UseVirtualCall = true; 1938 } 1939 } 1940 if (UseVirtualCall) { 1941 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, 1942 Dtor); 1943 return false; 1944 } 1945 } 1946 } 1947 } 1948 1949 // Make sure that we call delete even if the dtor throws. 1950 // This doesn't have to a conditional cleanup because we're going 1951 // to pop it off in a second. 1952 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1953 Ptr.getPointer(), 1954 OperatorDelete, ElementType); 1955 1956 if (Dtor) 1957 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1958 /*ForVirtualBase=*/false, 1959 /*Delegating=*/false, 1960 Ptr, ElementType); 1961 else if (auto Lifetime = ElementType.getObjCLifetime()) { 1962 switch (Lifetime) { 1963 case Qualifiers::OCL_None: 1964 case Qualifiers::OCL_ExplicitNone: 1965 case Qualifiers::OCL_Autoreleasing: 1966 break; 1967 1968 case Qualifiers::OCL_Strong: 1969 CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime); 1970 break; 1971 1972 case Qualifiers::OCL_Weak: 1973 CGF.EmitARCDestroyWeak(Ptr); 1974 break; 1975 } 1976 } 1977 1978 // When optimizing for size, call 'operator delete' unconditionally. 1979 if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) { 1980 CGF.EmitBlock(UnconditionalDeleteBlock); 1981 CGF.PopCleanupBlock(); 1982 return true; 1983 } 1984 1985 CGF.PopCleanupBlock(); 1986 return false; 1987 } 1988 1989 namespace { 1990 /// Calls the given 'operator delete' on an array of objects. 1991 struct CallArrayDelete final : EHScopeStack::Cleanup { 1992 llvm::Value *Ptr; 1993 const FunctionDecl *OperatorDelete; 1994 llvm::Value *NumElements; 1995 QualType ElementType; 1996 CharUnits CookieSize; 1997 1998 CallArrayDelete(llvm::Value *Ptr, 1999 const FunctionDecl *OperatorDelete, 2000 llvm::Value *NumElements, 2001 QualType ElementType, 2002 CharUnits CookieSize) 2003 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 2004 ElementType(ElementType), CookieSize(CookieSize) {} 2005 2006 void Emit(CodeGenFunction &CGF, Flags flags) override { 2007 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements, 2008 CookieSize); 2009 } 2010 }; 2011 } 2012 2013 /// Emit the code for deleting an array of objects. 2014 static void EmitArrayDelete(CodeGenFunction &CGF, 2015 const CXXDeleteExpr *E, 2016 Address deletedPtr, 2017 QualType elementType) { 2018 llvm::Value *numElements = nullptr; 2019 llvm::Value *allocatedPtr = nullptr; 2020 CharUnits cookieSize; 2021 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType, 2022 numElements, allocatedPtr, cookieSize); 2023 2024 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer"); 2025 2026 // Make sure that we call delete even if one of the dtors throws. 2027 const FunctionDecl *operatorDelete = E->getOperatorDelete(); 2028 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 2029 allocatedPtr, operatorDelete, 2030 numElements, elementType, 2031 cookieSize); 2032 2033 // Destroy the elements. 2034 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) { 2035 assert(numElements && "no element count for a type with a destructor!"); 2036 2037 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); 2038 CharUnits elementAlign = 2039 deletedPtr.getAlignment().alignmentOfArrayElement(elementSize); 2040 2041 llvm::Value *arrayBegin = deletedPtr.getPointer(); 2042 llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP( 2043 deletedPtr.getElementType(), arrayBegin, numElements, "delete.end"); 2044 2045 // Note that it is legal to allocate a zero-length array, and we 2046 // can never fold the check away because the length should always 2047 // come from a cookie. 2048 CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign, 2049 CGF.getDestroyer(dtorKind), 2050 /*checkZeroLength*/ true, 2051 CGF.needsEHCleanup(dtorKind)); 2052 } 2053 2054 // Pop the cleanup block. 2055 CGF.PopCleanupBlock(); 2056 } 2057 2058 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 2059 const Expr *Arg = E->getArgument(); 2060 Address Ptr = EmitPointerWithAlignment(Arg); 2061 2062 // Null check the pointer. 2063 // 2064 // We could avoid this null check if we can determine that the object 2065 // destruction is trivial and doesn't require an array cookie; we can 2066 // unconditionally perform the operator delete call in that case. For now, we 2067 // assume that deleted pointers are null rarely enough that it's better to 2068 // keep the branch. This might be worth revisiting for a -O0 code size win. 2069 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 2070 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 2071 2072 llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull"); 2073 2074 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 2075 EmitBlock(DeleteNotNull); 2076 Ptr.setKnownNonNull(); 2077 2078 QualType DeleteTy = E->getDestroyedType(); 2079 2080 // A destroying operator delete overrides the entire operation of the 2081 // delete expression. 2082 if (E->getOperatorDelete()->isDestroyingOperatorDelete()) { 2083 EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy); 2084 EmitBlock(DeleteEnd); 2085 return; 2086 } 2087 2088 // We might be deleting a pointer to array. If so, GEP down to the 2089 // first non-array element. 2090 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 2091 if (DeleteTy->isConstantArrayType()) { 2092 llvm::Value *Zero = Builder.getInt32(0); 2093 SmallVector<llvm::Value*,8> GEP; 2094 2095 GEP.push_back(Zero); // point at the outermost array 2096 2097 // For each layer of array type we're pointing at: 2098 while (const ConstantArrayType *Arr 2099 = getContext().getAsConstantArrayType(DeleteTy)) { 2100 // 1. Unpeel the array type. 2101 DeleteTy = Arr->getElementType(); 2102 2103 // 2. GEP to the first element of the array. 2104 GEP.push_back(Zero); 2105 } 2106 2107 Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(), 2108 Ptr.getPointer(), GEP, "del.first"), 2109 ConvertTypeForMem(DeleteTy), Ptr.getAlignment(), 2110 Ptr.isKnownNonNull()); 2111 } 2112 2113 assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType()); 2114 2115 if (E->isArrayForm()) { 2116 EmitArrayDelete(*this, E, Ptr, DeleteTy); 2117 EmitBlock(DeleteEnd); 2118 } else { 2119 if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd)) 2120 EmitBlock(DeleteEnd); 2121 } 2122 } 2123 2124 static bool isGLValueFromPointerDeref(const Expr *E) { 2125 E = E->IgnoreParens(); 2126 2127 if (const auto *CE = dyn_cast<CastExpr>(E)) { 2128 if (!CE->getSubExpr()->isGLValue()) 2129 return false; 2130 return isGLValueFromPointerDeref(CE->getSubExpr()); 2131 } 2132 2133 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 2134 return isGLValueFromPointerDeref(OVE->getSourceExpr()); 2135 2136 if (const auto *BO = dyn_cast<BinaryOperator>(E)) 2137 if (BO->getOpcode() == BO_Comma) 2138 return isGLValueFromPointerDeref(BO->getRHS()); 2139 2140 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E)) 2141 return isGLValueFromPointerDeref(ACO->getTrueExpr()) || 2142 isGLValueFromPointerDeref(ACO->getFalseExpr()); 2143 2144 // C++11 [expr.sub]p1: 2145 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)) 2146 if (isa<ArraySubscriptExpr>(E)) 2147 return true; 2148 2149 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2150 if (UO->getOpcode() == UO_Deref) 2151 return true; 2152 2153 return false; 2154 } 2155 2156 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, 2157 llvm::Type *StdTypeInfoPtrTy) { 2158 // Get the vtable pointer. 2159 Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF); 2160 2161 QualType SrcRecordTy = E->getType(); 2162 2163 // C++ [class.cdtor]p4: 2164 // If the operand of typeid refers to the object under construction or 2165 // destruction and the static type of the operand is neither the constructor 2166 // or destructor’s class nor one of its bases, the behavior is undefined. 2167 CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(), 2168 ThisPtr.getPointer(), SrcRecordTy); 2169 2170 // C++ [expr.typeid]p2: 2171 // If the glvalue expression is obtained by applying the unary * operator to 2172 // a pointer and the pointer is a null pointer value, the typeid expression 2173 // throws the std::bad_typeid exception. 2174 // 2175 // However, this paragraph's intent is not clear. We choose a very generous 2176 // interpretation which implores us to consider comma operators, conditional 2177 // operators, parentheses and other such constructs. 2178 if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked( 2179 isGLValueFromPointerDeref(E), SrcRecordTy)) { 2180 llvm::BasicBlock *BadTypeidBlock = 2181 CGF.createBasicBlock("typeid.bad_typeid"); 2182 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end"); 2183 2184 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer()); 2185 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); 2186 2187 CGF.EmitBlock(BadTypeidBlock); 2188 CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF); 2189 CGF.EmitBlock(EndBlock); 2190 } 2191 2192 return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr, 2193 StdTypeInfoPtrTy); 2194 } 2195 2196 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 2197 llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext()); 2198 2199 if (E->isTypeOperand()) { 2200 llvm::Constant *TypeInfo = 2201 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext())); 2202 return TypeInfo; 2203 } 2204 2205 // C++ [expr.typeid]p2: 2206 // When typeid is applied to a glvalue expression whose type is a 2207 // polymorphic class type, the result refers to a std::type_info object 2208 // representing the type of the most derived object (that is, the dynamic 2209 // type) to which the glvalue refers. 2210 // If the operand is already most derived object, no need to look up vtable. 2211 if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext())) 2212 return EmitTypeidFromVTable(*this, E->getExprOperand(), PtrTy); 2213 2214 QualType OperandTy = E->getExprOperand()->getType(); 2215 return CGM.GetAddrOfRTTIDescriptor(OperandTy); 2216 } 2217 2218 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, 2219 QualType DestTy) { 2220 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 2221 if (DestTy->isPointerType()) 2222 return llvm::Constant::getNullValue(DestLTy); 2223 2224 /// C++ [expr.dynamic.cast]p9: 2225 /// A failed cast to reference type throws std::bad_cast 2226 if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF)) 2227 return nullptr; 2228 2229 CGF.Builder.ClearInsertionPoint(); 2230 return llvm::PoisonValue::get(DestLTy); 2231 } 2232 2233 llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, 2234 const CXXDynamicCastExpr *DCE) { 2235 CGM.EmitExplicitCastExprType(DCE, this); 2236 QualType DestTy = DCE->getTypeAsWritten(); 2237 2238 QualType SrcTy = DCE->getSubExpr()->getType(); 2239 2240 // C++ [expr.dynamic.cast]p7: 2241 // If T is "pointer to cv void," then the result is a pointer to the most 2242 // derived object pointed to by v. 2243 bool IsDynamicCastToVoid = DestTy->isVoidPointerType(); 2244 QualType SrcRecordTy; 2245 QualType DestRecordTy; 2246 if (IsDynamicCastToVoid) { 2247 SrcRecordTy = SrcTy->getPointeeType(); 2248 // No DestRecordTy. 2249 } else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) { 2250 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); 2251 DestRecordTy = DestPTy->getPointeeType(); 2252 } else { 2253 SrcRecordTy = SrcTy; 2254 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); 2255 } 2256 2257 // C++ [class.cdtor]p5: 2258 // If the operand of the dynamic_cast refers to the object under 2259 // construction or destruction and the static type of the operand is not a 2260 // pointer to or object of the constructor or destructor’s own class or one 2261 // of its bases, the dynamic_cast results in undefined behavior. 2262 EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), 2263 SrcRecordTy); 2264 2265 if (DCE->isAlwaysNull()) { 2266 if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) { 2267 // Expression emission is expected to retain a valid insertion point. 2268 if (!Builder.GetInsertBlock()) 2269 EmitBlock(createBasicBlock("dynamic_cast.unreachable")); 2270 return T; 2271 } 2272 } 2273 2274 assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); 2275 2276 // If the destination is effectively final, the cast succeeds if and only 2277 // if the dynamic type of the pointer is exactly the destination type. 2278 bool IsExact = !IsDynamicCastToVoid && 2279 CGM.getCodeGenOpts().OptimizationLevel > 0 && 2280 DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() && 2281 CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy); 2282 2283 // C++ [expr.dynamic.cast]p4: 2284 // If the value of v is a null pointer value in the pointer case, the result 2285 // is the null pointer value of type T. 2286 bool ShouldNullCheckSrcValue = 2287 IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked( 2288 SrcTy->isPointerType(), SrcRecordTy); 2289 2290 llvm::BasicBlock *CastNull = nullptr; 2291 llvm::BasicBlock *CastNotNull = nullptr; 2292 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); 2293 2294 if (ShouldNullCheckSrcValue) { 2295 CastNull = createBasicBlock("dynamic_cast.null"); 2296 CastNotNull = createBasicBlock("dynamic_cast.notnull"); 2297 2298 llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer()); 2299 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 2300 EmitBlock(CastNotNull); 2301 } 2302 2303 llvm::Value *Value; 2304 if (IsDynamicCastToVoid) { 2305 Value = CGM.getCXXABI().emitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy); 2306 } else if (IsExact) { 2307 // If the destination type is effectively final, this pointer points to the 2308 // right type if and only if its vptr has the right value. 2309 Value = CGM.getCXXABI().emitExactDynamicCast( 2310 *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastEnd, CastNull); 2311 } else { 2312 assert(DestRecordTy->isRecordType() && 2313 "destination type must be a record type!"); 2314 Value = CGM.getCXXABI().emitDynamicCastCall(*this, ThisAddr, SrcRecordTy, 2315 DestTy, DestRecordTy, CastEnd); 2316 } 2317 CastNotNull = Builder.GetInsertBlock(); 2318 2319 llvm::Value *NullValue = nullptr; 2320 if (ShouldNullCheckSrcValue) { 2321 EmitBranch(CastEnd); 2322 2323 EmitBlock(CastNull); 2324 NullValue = EmitDynamicCastToNull(*this, DestTy); 2325 CastNull = Builder.GetInsertBlock(); 2326 2327 EmitBranch(CastEnd); 2328 } 2329 2330 EmitBlock(CastEnd); 2331 2332 if (CastNull) { 2333 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 2334 PHI->addIncoming(Value, CastNotNull); 2335 PHI->addIncoming(NullValue, CastNull); 2336 2337 Value = PHI; 2338 } 2339 2340 return Value; 2341 } 2342