1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code dealing with code generation of C++ expressions 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCUDARuntime.h" 14 #include "CGCXXABI.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CodeGenFunction.h" 18 #include "ConstantEmitter.h" 19 #include "TargetInfo.h" 20 #include "clang/Basic/CodeGenOptions.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "llvm/IR/Intrinsics.h" 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 namespace { 28 struct MemberCallInfo { 29 RequiredArgs ReqArgs; 30 // Number of prefix arguments for the call. Ignores the `this` pointer. 31 unsigned PrefixSize; 32 }; 33 } 34 35 static MemberCallInfo 36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD, 37 llvm::Value *This, llvm::Value *ImplicitParam, 38 QualType ImplicitParamTy, const CallExpr *CE, 39 CallArgList &Args, CallArgList *RtlArgs) { 40 assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) || 41 isa<CXXOperatorCallExpr>(CE)); 42 assert(MD->isInstance() && 43 "Trying to emit a member or operator call expr on a static method!"); 44 45 // Push the this ptr. 46 const CXXRecordDecl *RD = 47 CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD); 48 Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD)); 49 50 // If there is an implicit parameter (e.g. VTT), emit it. 51 if (ImplicitParam) { 52 Args.add(RValue::get(ImplicitParam), ImplicitParamTy); 53 } 54 55 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 56 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size()); 57 unsigned PrefixSize = Args.size() - 1; 58 59 // And the rest of the call args. 60 if (RtlArgs) { 61 // Special case: if the caller emitted the arguments right-to-left already 62 // (prior to emitting the *this argument), we're done. This happens for 63 // assignment operators. 64 Args.addFrom(*RtlArgs); 65 } else if (CE) { 66 // Special case: skip first argument of CXXOperatorCall (it is "this"). 67 unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0; 68 CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), 69 CE->getDirectCallee()); 70 } else { 71 assert( 72 FPT->getNumParams() == 0 && 73 "No CallExpr specified for function with non-zero number of arguments"); 74 } 75 return {required, PrefixSize}; 76 } 77 78 RValue CodeGenFunction::EmitCXXMemberOrOperatorCall( 79 const CXXMethodDecl *MD, const CGCallee &Callee, 80 ReturnValueSlot ReturnValue, 81 llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, 82 const CallExpr *CE, CallArgList *RtlArgs) { 83 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 84 CallArgList Args; 85 MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall( 86 *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs); 87 auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall( 88 Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); 89 return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr, 90 CE ? CE->getExprLoc() : SourceLocation()); 91 } 92 93 RValue CodeGenFunction::EmitCXXDestructorCall( 94 GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy, 95 llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) { 96 const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl()); 97 98 assert(!ThisTy.isNull()); 99 assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() && 100 "Pointer/Object mixup"); 101 102 LangAS SrcAS = ThisTy.getAddressSpace(); 103 LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace(); 104 if (SrcAS != DstAS) { 105 QualType DstTy = DtorDecl->getThisType(); 106 llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy); 107 This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS, 108 NewType); 109 } 110 111 CallArgList Args; 112 commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, 113 ImplicitParamTy, CE, Args, nullptr); 114 return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, 115 ReturnValueSlot(), Args); 116 } 117 118 RValue CodeGenFunction::EmitCXXPseudoDestructorExpr( 119 const CXXPseudoDestructorExpr *E) { 120 QualType DestroyedType = E->getDestroyedType(); 121 if (DestroyedType.hasStrongOrWeakObjCLifetime()) { 122 // Automatic Reference Counting: 123 // If the pseudo-expression names a retainable object with weak or 124 // strong lifetime, the object shall be released. 125 Expr *BaseExpr = E->getBase(); 126 Address BaseValue = Address::invalid(); 127 Qualifiers BaseQuals; 128 129 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 130 if (E->isArrow()) { 131 BaseValue = EmitPointerWithAlignment(BaseExpr); 132 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 133 BaseQuals = PTy->getPointeeType().getQualifiers(); 134 } else { 135 LValue BaseLV = EmitLValue(BaseExpr); 136 BaseValue = BaseLV.getAddress(); 137 QualType BaseTy = BaseExpr->getType(); 138 BaseQuals = BaseTy.getQualifiers(); 139 } 140 141 switch (DestroyedType.getObjCLifetime()) { 142 case Qualifiers::OCL_None: 143 case Qualifiers::OCL_ExplicitNone: 144 case Qualifiers::OCL_Autoreleasing: 145 break; 146 147 case Qualifiers::OCL_Strong: 148 EmitARCRelease(Builder.CreateLoad(BaseValue, 149 DestroyedType.isVolatileQualified()), 150 ARCPreciseLifetime); 151 break; 152 153 case Qualifiers::OCL_Weak: 154 EmitARCDestroyWeak(BaseValue); 155 break; 156 } 157 } else { 158 // C++ [expr.pseudo]p1: 159 // The result shall only be used as the operand for the function call 160 // operator (), and the result of such a call has type void. The only 161 // effect is the evaluation of the postfix-expression before the dot or 162 // arrow. 163 EmitIgnoredExpr(E->getBase()); 164 } 165 166 return RValue::get(nullptr); 167 } 168 169 static CXXRecordDecl *getCXXRecord(const Expr *E) { 170 QualType T = E->getType(); 171 if (const PointerType *PTy = T->getAs<PointerType>()) 172 T = PTy->getPointeeType(); 173 const RecordType *Ty = T->castAs<RecordType>(); 174 return cast<CXXRecordDecl>(Ty->getDecl()); 175 } 176 177 // Note: This function also emit constructor calls to support a MSVC 178 // extensions allowing explicit constructor function call. 179 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 180 ReturnValueSlot ReturnValue) { 181 const Expr *callee = CE->getCallee()->IgnoreParens(); 182 183 if (isa<BinaryOperator>(callee)) 184 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 185 186 const MemberExpr *ME = cast<MemberExpr>(callee); 187 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 188 189 if (MD->isStatic()) { 190 // The method is static, emit it as we would a regular call. 191 CGCallee callee = 192 CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD)); 193 return EmitCall(getContext().getPointerType(MD->getType()), callee, CE, 194 ReturnValue); 195 } 196 197 bool HasQualifier = ME->hasQualifier(); 198 NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr; 199 bool IsArrow = ME->isArrow(); 200 const Expr *Base = ME->getBase(); 201 202 return EmitCXXMemberOrOperatorMemberCallExpr( 203 CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); 204 } 205 206 RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( 207 const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, 208 bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, 209 const Expr *Base) { 210 assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE)); 211 212 // Compute the object pointer. 213 bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; 214 215 const CXXMethodDecl *DevirtualizedMethod = nullptr; 216 if (CanUseVirtualCall && 217 MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) { 218 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); 219 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl); 220 assert(DevirtualizedMethod); 221 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); 222 const Expr *Inner = Base->ignoreParenBaseCasts(); 223 if (DevirtualizedMethod->getReturnType().getCanonicalType() != 224 MD->getReturnType().getCanonicalType()) 225 // If the return types are not the same, this might be a case where more 226 // code needs to run to compensate for it. For example, the derived 227 // method might return a type that inherits form from the return 228 // type of MD and has a prefix. 229 // For now we just avoid devirtualizing these covariant cases. 230 DevirtualizedMethod = nullptr; 231 else if (getCXXRecord(Inner) == DevirtualizedClass) 232 // If the class of the Inner expression is where the dynamic method 233 // is defined, build the this pointer from it. 234 Base = Inner; 235 else if (getCXXRecord(Base) != DevirtualizedClass) { 236 // If the method is defined in a class that is not the best dynamic 237 // one or the one of the full expression, we would have to build 238 // a derived-to-base cast to compute the correct this pointer, but 239 // we don't have support for that yet, so do a virtual call. 240 DevirtualizedMethod = nullptr; 241 } 242 } 243 244 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment 245 // operator before the LHS. 246 CallArgList RtlArgStorage; 247 CallArgList *RtlArgs = nullptr; 248 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) { 249 if (OCE->isAssignmentOp()) { 250 RtlArgs = &RtlArgStorage; 251 EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(), 252 drop_begin(CE->arguments(), 1), CE->getDirectCallee(), 253 /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft); 254 } 255 } 256 257 LValue This; 258 if (IsArrow) { 259 LValueBaseInfo BaseInfo; 260 TBAAAccessInfo TBAAInfo; 261 Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); 262 This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo); 263 } else { 264 This = EmitLValue(Base); 265 } 266 267 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) { 268 // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's 269 // constructing a new complete object of type Ctor. 270 assert(!RtlArgs); 271 assert(ReturnValue.isNull() && "Constructor shouldn't have return value"); 272 CallArgList Args; 273 commonEmitCXXMemberOrOperatorCall( 274 *this, Ctor, This.getPointer(), /*ImplicitParam=*/nullptr, 275 /*ImplicitParamTy=*/QualType(), CE, Args, nullptr); 276 277 EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 278 /*Delegating=*/false, This.getAddress(), Args, 279 AggValueSlot::DoesNotOverlap, CE->getExprLoc(), 280 /*NewPointerIsChecked=*/false); 281 return RValue::get(nullptr); 282 } 283 284 if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) { 285 if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr); 286 if (!MD->getParent()->mayInsertExtraPadding()) { 287 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) { 288 // We don't like to generate the trivial copy/move assignment operator 289 // when it isn't necessary; just produce the proper effect here. 290 LValue RHS = isa<CXXOperatorCallExpr>(CE) 291 ? MakeNaturalAlignAddrLValue( 292 (*RtlArgs)[0].getRValue(*this).getScalarVal(), 293 (*(CE->arg_begin() + 1))->getType()) 294 : EmitLValue(*CE->arg_begin()); 295 EmitAggregateAssign(This, RHS, CE->getType()); 296 return RValue::get(This.getPointer()); 297 } 298 llvm_unreachable("unknown trivial member function"); 299 } 300 } 301 302 // Compute the function type we're calling. 303 const CXXMethodDecl *CalleeDecl = 304 DevirtualizedMethod ? DevirtualizedMethod : MD; 305 const CGFunctionInfo *FInfo = nullptr; 306 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) 307 FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( 308 GlobalDecl(Dtor, Dtor_Complete)); 309 else 310 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); 311 312 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo); 313 314 // C++11 [class.mfct.non-static]p2: 315 // If a non-static member function of a class X is called for an object that 316 // is not of type X, or of a type derived from X, the behavior is undefined. 317 SourceLocation CallLoc; 318 ASTContext &C = getContext(); 319 if (CE) 320 CallLoc = CE->getExprLoc(); 321 322 SanitizerSet SkippedChecks; 323 if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) { 324 auto *IOA = CMCE->getImplicitObjectArgument(); 325 bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA); 326 if (IsImplicitObjectCXXThis) 327 SkippedChecks.set(SanitizerKind::Alignment, true); 328 if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA)) 329 SkippedChecks.set(SanitizerKind::Null, true); 330 } 331 EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, This.getPointer(), 332 C.getRecordType(CalleeDecl->getParent()), 333 /*Alignment=*/CharUnits::Zero(), SkippedChecks); 334 335 // C++ [class.virtual]p12: 336 // Explicit qualification with the scope operator (5.1) suppresses the 337 // virtual call mechanism. 338 // 339 // We also don't emit a virtual call if the base expression has a record type 340 // because then we know what the type is. 341 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; 342 343 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) { 344 assert(CE->arg_begin() == CE->arg_end() && 345 "Destructor shouldn't have explicit parameters"); 346 assert(ReturnValue.isNull() && "Destructor shouldn't have return value"); 347 if (UseVirtualCall) { 348 CGM.getCXXABI().EmitVirtualDestructorCall( 349 *this, Dtor, Dtor_Complete, This.getAddress(), 350 cast<CXXMemberCallExpr>(CE)); 351 } else { 352 GlobalDecl GD(Dtor, Dtor_Complete); 353 CGCallee Callee; 354 if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier) 355 Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty); 356 else if (!DevirtualizedMethod) 357 Callee = 358 CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD); 359 else { 360 Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD); 361 } 362 363 QualType ThisTy = 364 IsArrow ? Base->getType()->getPointeeType() : Base->getType(); 365 EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, 366 /*ImplicitParam=*/nullptr, 367 /*ImplicitParamTy=*/QualType(), nullptr); 368 } 369 return RValue::get(nullptr); 370 } 371 372 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use 373 // 'CalleeDecl' instead. 374 375 CGCallee Callee; 376 if (UseVirtualCall) { 377 Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty); 378 } else { 379 if (SanOpts.has(SanitizerKind::CFINVCall) && 380 MD->getParent()->isDynamicClass()) { 381 llvm::Value *VTable; 382 const CXXRecordDecl *RD; 383 std::tie(VTable, RD) = 384 CGM.getCXXABI().LoadVTablePtr(*this, This.getAddress(), 385 MD->getParent()); 386 EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc()); 387 } 388 389 if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier) 390 Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty); 391 else if (!DevirtualizedMethod) 392 Callee = 393 CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD)); 394 else { 395 Callee = 396 CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty), 397 GlobalDecl(DevirtualizedMethod)); 398 } 399 } 400 401 if (MD->isVirtual()) { 402 Address NewThisAddr = 403 CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall( 404 *this, CalleeDecl, This.getAddress(), UseVirtualCall); 405 This.setAddress(NewThisAddr); 406 } 407 408 return EmitCXXMemberOrOperatorCall( 409 CalleeDecl, Callee, ReturnValue, This.getPointer(), 410 /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); 411 } 412 413 RValue 414 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 415 ReturnValueSlot ReturnValue) { 416 const BinaryOperator *BO = 417 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 418 const Expr *BaseExpr = BO->getLHS(); 419 const Expr *MemFnExpr = BO->getRHS(); 420 421 const MemberPointerType *MPT = 422 MemFnExpr->getType()->castAs<MemberPointerType>(); 423 424 const FunctionProtoType *FPT = 425 MPT->getPointeeType()->castAs<FunctionProtoType>(); 426 const CXXRecordDecl *RD = 427 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 428 429 // Emit the 'this' pointer. 430 Address This = Address::invalid(); 431 if (BO->getOpcode() == BO_PtrMemI) 432 This = EmitPointerWithAlignment(BaseExpr); 433 else 434 This = EmitLValue(BaseExpr).getAddress(); 435 436 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(), 437 QualType(MPT->getClass(), 0)); 438 439 // Get the member function pointer. 440 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 441 442 // Ask the ABI to load the callee. Note that This is modified. 443 llvm::Value *ThisPtrForCall = nullptr; 444 CGCallee Callee = 445 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, 446 ThisPtrForCall, MemFnPtr, MPT); 447 448 CallArgList Args; 449 450 QualType ThisType = 451 getContext().getPointerType(getContext().getTagDeclType(RD)); 452 453 // Push the this ptr. 454 Args.add(RValue::get(ThisPtrForCall), ThisType); 455 456 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); 457 458 // And the rest of the call args 459 EmitCallArgs(Args, FPT, E->arguments()); 460 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required, 461 /*PrefixSize=*/0), 462 Callee, ReturnValue, Args, nullptr, E->getExprLoc()); 463 } 464 465 RValue 466 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 467 const CXXMethodDecl *MD, 468 ReturnValueSlot ReturnValue) { 469 assert(MD->isInstance() && 470 "Trying to emit a member call expr on a static method!"); 471 return EmitCXXMemberOrOperatorMemberCallExpr( 472 E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, 473 /*IsArrow=*/false, E->getArg(0)); 474 } 475 476 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, 477 ReturnValueSlot ReturnValue) { 478 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue); 479 } 480 481 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, 482 Address DestPtr, 483 const CXXRecordDecl *Base) { 484 if (Base->isEmpty()) 485 return; 486 487 DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty); 488 489 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); 490 CharUnits NVSize = Layout.getNonVirtualSize(); 491 492 // We cannot simply zero-initialize the entire base sub-object if vbptrs are 493 // present, they are initialized by the most derived class before calling the 494 // constructor. 495 SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores; 496 Stores.emplace_back(CharUnits::Zero(), NVSize); 497 498 // Each store is split by the existence of a vbptr. 499 CharUnits VBPtrWidth = CGF.getPointerSize(); 500 std::vector<CharUnits> VBPtrOffsets = 501 CGF.CGM.getCXXABI().getVBPtrOffsets(Base); 502 for (CharUnits VBPtrOffset : VBPtrOffsets) { 503 // Stop before we hit any virtual base pointers located in virtual bases. 504 if (VBPtrOffset >= NVSize) 505 break; 506 std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val(); 507 CharUnits LastStoreOffset = LastStore.first; 508 CharUnits LastStoreSize = LastStore.second; 509 510 CharUnits SplitBeforeOffset = LastStoreOffset; 511 CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset; 512 assert(!SplitBeforeSize.isNegative() && "negative store size!"); 513 if (!SplitBeforeSize.isZero()) 514 Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize); 515 516 CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth; 517 CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset; 518 assert(!SplitAfterSize.isNegative() && "negative store size!"); 519 if (!SplitAfterSize.isZero()) 520 Stores.emplace_back(SplitAfterOffset, SplitAfterSize); 521 } 522 523 // If the type contains a pointer to data member we can't memset it to zero. 524 // Instead, create a null constant and copy it to the destination. 525 // TODO: there are other patterns besides zero that we can usefully memset, 526 // like -1, which happens to be the pattern used by member-pointers. 527 // TODO: isZeroInitializable can be over-conservative in the case where a 528 // virtual base contains a member pointer. 529 llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base); 530 if (!NullConstantForBase->isNullValue()) { 531 llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable( 532 CGF.CGM.getModule(), NullConstantForBase->getType(), 533 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, 534 NullConstantForBase, Twine()); 535 536 CharUnits Align = std::max(Layout.getNonVirtualAlignment(), 537 DestPtr.getAlignment()); 538 NullVariable->setAlignment(Align.getQuantity()); 539 540 Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align); 541 542 // Get and call the appropriate llvm.memcpy overload. 543 for (std::pair<CharUnits, CharUnits> Store : Stores) { 544 CharUnits StoreOffset = Store.first; 545 CharUnits StoreSize = Store.second; 546 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize); 547 CGF.Builder.CreateMemCpy( 548 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset), 549 CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset), 550 StoreSizeVal); 551 } 552 553 // Otherwise, just memset the whole thing to zero. This is legal 554 // because in LLVM, all default initializers (other than the ones we just 555 // handled above) are guaranteed to have a bit pattern of all zeros. 556 } else { 557 for (std::pair<CharUnits, CharUnits> Store : Stores) { 558 CharUnits StoreOffset = Store.first; 559 CharUnits StoreSize = Store.second; 560 llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize); 561 CGF.Builder.CreateMemSet( 562 CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset), 563 CGF.Builder.getInt8(0), StoreSizeVal); 564 } 565 } 566 } 567 568 void 569 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 570 AggValueSlot Dest) { 571 assert(!Dest.isIgnored() && "Must have a destination!"); 572 const CXXConstructorDecl *CD = E->getConstructor(); 573 574 // If we require zero initialization before (or instead of) calling the 575 // constructor, as can be the case with a non-user-provided default 576 // constructor, emit the zero initialization now, unless destination is 577 // already zeroed. 578 if (E->requiresZeroInitialization() && !Dest.isZeroed()) { 579 switch (E->getConstructionKind()) { 580 case CXXConstructExpr::CK_Delegating: 581 case CXXConstructExpr::CK_Complete: 582 EmitNullInitialization(Dest.getAddress(), E->getType()); 583 break; 584 case CXXConstructExpr::CK_VirtualBase: 585 case CXXConstructExpr::CK_NonVirtualBase: 586 EmitNullBaseClassInitialization(*this, Dest.getAddress(), 587 CD->getParent()); 588 break; 589 } 590 } 591 592 // If this is a call to a trivial default constructor, do nothing. 593 if (CD->isTrivial() && CD->isDefaultConstructor()) 594 return; 595 596 // Elide the constructor if we're constructing from a temporary. 597 // The temporary check is required because Sema sets this on NRVO 598 // returns. 599 if (getLangOpts().ElideConstructors && E->isElidable()) { 600 assert(getContext().hasSameUnqualifiedType(E->getType(), 601 E->getArg(0)->getType())); 602 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 603 EmitAggExpr(E->getArg(0), Dest); 604 return; 605 } 606 } 607 608 if (const ArrayType *arrayType 609 = getContext().getAsArrayType(E->getType())) { 610 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, 611 Dest.isSanitizerChecked()); 612 } else { 613 CXXCtorType Type = Ctor_Complete; 614 bool ForVirtualBase = false; 615 bool Delegating = false; 616 617 switch (E->getConstructionKind()) { 618 case CXXConstructExpr::CK_Delegating: 619 // We should be emitting a constructor; GlobalDecl will assert this 620 Type = CurGD.getCtorType(); 621 Delegating = true; 622 break; 623 624 case CXXConstructExpr::CK_Complete: 625 Type = Ctor_Complete; 626 break; 627 628 case CXXConstructExpr::CK_VirtualBase: 629 ForVirtualBase = true; 630 LLVM_FALLTHROUGH; 631 632 case CXXConstructExpr::CK_NonVirtualBase: 633 Type = Ctor_Base; 634 } 635 636 // Call the constructor. 637 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); 638 } 639 } 640 641 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, 642 const Expr *Exp) { 643 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 644 Exp = E->getSubExpr(); 645 assert(isa<CXXConstructExpr>(Exp) && 646 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 647 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 648 const CXXConstructorDecl *CD = E->getConstructor(); 649 RunCleanupsScope Scope(*this); 650 651 // If we require zero initialization before (or instead of) calling the 652 // constructor, as can be the case with a non-user-provided default 653 // constructor, emit the zero initialization now. 654 // FIXME. Do I still need this for a copy ctor synthesis? 655 if (E->requiresZeroInitialization()) 656 EmitNullInitialization(Dest, E->getType()); 657 658 assert(!getContext().getAsConstantArrayType(E->getType()) 659 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 660 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E); 661 } 662 663 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 664 const CXXNewExpr *E) { 665 if (!E->isArray()) 666 return CharUnits::Zero(); 667 668 // No cookie is required if the operator new[] being used is the 669 // reserved placement operator new[]. 670 if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) 671 return CharUnits::Zero(); 672 673 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 674 } 675 676 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 677 const CXXNewExpr *e, 678 unsigned minElements, 679 llvm::Value *&numElements, 680 llvm::Value *&sizeWithoutCookie) { 681 QualType type = e->getAllocatedType(); 682 683 if (!e->isArray()) { 684 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 685 sizeWithoutCookie 686 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); 687 return sizeWithoutCookie; 688 } 689 690 // The width of size_t. 691 unsigned sizeWidth = CGF.SizeTy->getBitWidth(); 692 693 // Figure out the cookie size. 694 llvm::APInt cookieSize(sizeWidth, 695 CalculateCookiePadding(CGF, e).getQuantity()); 696 697 // Emit the array size expression. 698 // We multiply the size of all dimensions for NumElements. 699 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 700 numElements = 701 ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType()); 702 if (!numElements) 703 numElements = CGF.EmitScalarExpr(*e->getArraySize()); 704 assert(isa<llvm::IntegerType>(numElements->getType())); 705 706 // The number of elements can be have an arbitrary integer type; 707 // essentially, we need to multiply it by a constant factor, add a 708 // cookie size, and verify that the result is representable as a 709 // size_t. That's just a gloss, though, and it's wrong in one 710 // important way: if the count is negative, it's an error even if 711 // the cookie size would bring the total size >= 0. 712 bool isSigned 713 = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType(); 714 llvm::IntegerType *numElementsType 715 = cast<llvm::IntegerType>(numElements->getType()); 716 unsigned numElementsWidth = numElementsType->getBitWidth(); 717 718 // Compute the constant factor. 719 llvm::APInt arraySizeMultiplier(sizeWidth, 1); 720 while (const ConstantArrayType *CAT 721 = CGF.getContext().getAsConstantArrayType(type)) { 722 type = CAT->getElementType(); 723 arraySizeMultiplier *= CAT->getSize(); 724 } 725 726 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 727 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); 728 typeSizeMultiplier *= arraySizeMultiplier; 729 730 // This will be a size_t. 731 llvm::Value *size; 732 733 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 734 // Don't bloat the -O0 code. 735 if (llvm::ConstantInt *numElementsC = 736 dyn_cast<llvm::ConstantInt>(numElements)) { 737 const llvm::APInt &count = numElementsC->getValue(); 738 739 bool hasAnyOverflow = false; 740 741 // If 'count' was a negative number, it's an overflow. 742 if (isSigned && count.isNegative()) 743 hasAnyOverflow = true; 744 745 // We want to do all this arithmetic in size_t. If numElements is 746 // wider than that, check whether it's already too big, and if so, 747 // overflow. 748 else if (numElementsWidth > sizeWidth && 749 numElementsWidth - sizeWidth > count.countLeadingZeros()) 750 hasAnyOverflow = true; 751 752 // Okay, compute a count at the right width. 753 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); 754 755 // If there is a brace-initializer, we cannot allocate fewer elements than 756 // there are initializers. If we do, that's treated like an overflow. 757 if (adjustedCount.ult(minElements)) 758 hasAnyOverflow = true; 759 760 // Scale numElements by that. This might overflow, but we don't 761 // care because it only overflows if allocationSize does, too, and 762 // if that overflows then we shouldn't use this. 763 numElements = llvm::ConstantInt::get(CGF.SizeTy, 764 adjustedCount * arraySizeMultiplier); 765 766 // Compute the size before cookie, and track whether it overflowed. 767 bool overflow; 768 llvm::APInt allocationSize 769 = adjustedCount.umul_ov(typeSizeMultiplier, overflow); 770 hasAnyOverflow |= overflow; 771 772 // Add in the cookie, and check whether it's overflowed. 773 if (cookieSize != 0) { 774 // Save the current size without a cookie. This shouldn't be 775 // used if there was overflow. 776 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 777 778 allocationSize = allocationSize.uadd_ov(cookieSize, overflow); 779 hasAnyOverflow |= overflow; 780 } 781 782 // On overflow, produce a -1 so operator new will fail. 783 if (hasAnyOverflow) { 784 size = llvm::Constant::getAllOnesValue(CGF.SizeTy); 785 } else { 786 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 787 } 788 789 // Otherwise, we might need to use the overflow intrinsics. 790 } else { 791 // There are up to five conditions we need to test for: 792 // 1) if isSigned, we need to check whether numElements is negative; 793 // 2) if numElementsWidth > sizeWidth, we need to check whether 794 // numElements is larger than something representable in size_t; 795 // 3) if minElements > 0, we need to check whether numElements is smaller 796 // than that. 797 // 4) we need to compute 798 // sizeWithoutCookie := numElements * typeSizeMultiplier 799 // and check whether it overflows; and 800 // 5) if we need a cookie, we need to compute 801 // size := sizeWithoutCookie + cookieSize 802 // and check whether it overflows. 803 804 llvm::Value *hasOverflow = nullptr; 805 806 // If numElementsWidth > sizeWidth, then one way or another, we're 807 // going to have to do a comparison for (2), and this happens to 808 // take care of (1), too. 809 if (numElementsWidth > sizeWidth) { 810 llvm::APInt threshold(numElementsWidth, 1); 811 threshold <<= sizeWidth; 812 813 llvm::Value *thresholdV 814 = llvm::ConstantInt::get(numElementsType, threshold); 815 816 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); 817 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); 818 819 // Otherwise, if we're signed, we want to sext up to size_t. 820 } else if (isSigned) { 821 if (numElementsWidth < sizeWidth) 822 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); 823 824 // If there's a non-1 type size multiplier, then we can do the 825 // signedness check at the same time as we do the multiply 826 // because a negative number times anything will cause an 827 // unsigned overflow. Otherwise, we have to do it here. But at least 828 // in this case, we can subsume the >= minElements check. 829 if (typeSizeMultiplier == 1) 830 hasOverflow = CGF.Builder.CreateICmpSLT(numElements, 831 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 832 833 // Otherwise, zext up to size_t if necessary. 834 } else if (numElementsWidth < sizeWidth) { 835 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); 836 } 837 838 assert(numElements->getType() == CGF.SizeTy); 839 840 if (minElements) { 841 // Don't allow allocation of fewer elements than we have initializers. 842 if (!hasOverflow) { 843 hasOverflow = CGF.Builder.CreateICmpULT(numElements, 844 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 845 } else if (numElementsWidth > sizeWidth) { 846 // The other existing overflow subsumes this check. 847 // We do an unsigned comparison, since any signed value < -1 is 848 // taken care of either above or below. 849 hasOverflow = CGF.Builder.CreateOr(hasOverflow, 850 CGF.Builder.CreateICmpULT(numElements, 851 llvm::ConstantInt::get(CGF.SizeTy, minElements))); 852 } 853 } 854 855 size = numElements; 856 857 // Multiply by the type size if necessary. This multiplier 858 // includes all the factors for nested arrays. 859 // 860 // This step also causes numElements to be scaled up by the 861 // nested-array factor if necessary. Overflow on this computation 862 // can be ignored because the result shouldn't be used if 863 // allocation fails. 864 if (typeSizeMultiplier != 1) { 865 llvm::Function *umul_with_overflow 866 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); 867 868 llvm::Value *tsmV = 869 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); 870 llvm::Value *result = 871 CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV}); 872 873 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 874 if (hasOverflow) 875 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 876 else 877 hasOverflow = overflowed; 878 879 size = CGF.Builder.CreateExtractValue(result, 0); 880 881 // Also scale up numElements by the array size multiplier. 882 if (arraySizeMultiplier != 1) { 883 // If the base element type size is 1, then we can re-use the 884 // multiply we just did. 885 if (typeSize.isOne()) { 886 assert(arraySizeMultiplier == typeSizeMultiplier); 887 numElements = size; 888 889 // Otherwise we need a separate multiply. 890 } else { 891 llvm::Value *asmV = 892 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); 893 numElements = CGF.Builder.CreateMul(numElements, asmV); 894 } 895 } 896 } else { 897 // numElements doesn't need to be scaled. 898 assert(arraySizeMultiplier == 1); 899 } 900 901 // Add in the cookie size if necessary. 902 if (cookieSize != 0) { 903 sizeWithoutCookie = size; 904 905 llvm::Function *uadd_with_overflow 906 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); 907 908 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); 909 llvm::Value *result = 910 CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV}); 911 912 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 913 if (hasOverflow) 914 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 915 else 916 hasOverflow = overflowed; 917 918 size = CGF.Builder.CreateExtractValue(result, 0); 919 } 920 921 // If we had any possibility of dynamic overflow, make a select to 922 // overwrite 'size' with an all-ones value, which should cause 923 // operator new to throw. 924 if (hasOverflow) 925 size = CGF.Builder.CreateSelect(hasOverflow, 926 llvm::Constant::getAllOnesValue(CGF.SizeTy), 927 size); 928 } 929 930 if (cookieSize == 0) 931 sizeWithoutCookie = size; 932 else 933 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); 934 935 return size; 936 } 937 938 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, 939 QualType AllocType, Address NewPtr, 940 AggValueSlot::Overlap_t MayOverlap) { 941 // FIXME: Refactor with EmitExprAsInit. 942 switch (CGF.getEvaluationKind(AllocType)) { 943 case TEK_Scalar: 944 CGF.EmitScalarInit(Init, nullptr, 945 CGF.MakeAddrLValue(NewPtr, AllocType), false); 946 return; 947 case TEK_Complex: 948 CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType), 949 /*isInit*/ true); 950 return; 951 case TEK_Aggregate: { 952 AggValueSlot Slot 953 = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), 954 AggValueSlot::IsDestructed, 955 AggValueSlot::DoesNotNeedGCBarriers, 956 AggValueSlot::IsNotAliased, 957 MayOverlap, AggValueSlot::IsNotZeroed, 958 AggValueSlot::IsSanitizerChecked); 959 CGF.EmitAggExpr(Init, Slot); 960 return; 961 } 962 } 963 llvm_unreachable("bad evaluation kind"); 964 } 965 966 void CodeGenFunction::EmitNewArrayInitializer( 967 const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy, 968 Address BeginPtr, llvm::Value *NumElements, 969 llvm::Value *AllocSizeWithoutCookie) { 970 // If we have a type with trivial initialization and no initializer, 971 // there's nothing to do. 972 if (!E->hasInitializer()) 973 return; 974 975 Address CurPtr = BeginPtr; 976 977 unsigned InitListElements = 0; 978 979 const Expr *Init = E->getInitializer(); 980 Address EndOfInit = Address::invalid(); 981 QualType::DestructionKind DtorKind = ElementType.isDestructedType(); 982 EHScopeStack::stable_iterator Cleanup; 983 llvm::Instruction *CleanupDominator = nullptr; 984 985 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); 986 CharUnits ElementAlign = 987 BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize); 988 989 // Attempt to perform zero-initialization using memset. 990 auto TryMemsetInitialization = [&]() -> bool { 991 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI, 992 // we can initialize with a memset to -1. 993 if (!CGM.getTypes().isZeroInitializable(ElementType)) 994 return false; 995 996 // Optimization: since zero initialization will just set the memory 997 // to all zeroes, generate a single memset to do it in one shot. 998 999 // Subtract out the size of any elements we've already initialized. 1000 auto *RemainingSize = AllocSizeWithoutCookie; 1001 if (InitListElements) { 1002 // We know this can't overflow; we check this when doing the allocation. 1003 auto *InitializedSize = llvm::ConstantInt::get( 1004 RemainingSize->getType(), 1005 getContext().getTypeSizeInChars(ElementType).getQuantity() * 1006 InitListElements); 1007 RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize); 1008 } 1009 1010 // Create the memset. 1011 Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false); 1012 return true; 1013 }; 1014 1015 // If the initializer is an initializer list, first do the explicit elements. 1016 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) { 1017 // Initializing from a (braced) string literal is a special case; the init 1018 // list element does not initialize a (single) array element. 1019 if (ILE->isStringLiteralInit()) { 1020 // Initialize the initial portion of length equal to that of the string 1021 // literal. The allocation must be for at least this much; we emitted a 1022 // check for that earlier. 1023 AggValueSlot Slot = 1024 AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(), 1025 AggValueSlot::IsDestructed, 1026 AggValueSlot::DoesNotNeedGCBarriers, 1027 AggValueSlot::IsNotAliased, 1028 AggValueSlot::DoesNotOverlap, 1029 AggValueSlot::IsNotZeroed, 1030 AggValueSlot::IsSanitizerChecked); 1031 EmitAggExpr(ILE->getInit(0), Slot); 1032 1033 // Move past these elements. 1034 InitListElements = 1035 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe()) 1036 ->getSize().getZExtValue(); 1037 CurPtr = 1038 Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(), 1039 Builder.getSize(InitListElements), 1040 "string.init.end"), 1041 CurPtr.getAlignment().alignmentAtOffset(InitListElements * 1042 ElementSize)); 1043 1044 // Zero out the rest, if any remain. 1045 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements); 1046 if (!ConstNum || !ConstNum->equalsInt(InitListElements)) { 1047 bool OK = TryMemsetInitialization(); 1048 (void)OK; 1049 assert(OK && "couldn't memset character type?"); 1050 } 1051 return; 1052 } 1053 1054 InitListElements = ILE->getNumInits(); 1055 1056 // If this is a multi-dimensional array new, we will initialize multiple 1057 // elements with each init list element. 1058 QualType AllocType = E->getAllocatedType(); 1059 if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>( 1060 AllocType->getAsArrayTypeUnsafe())) { 1061 ElementTy = ConvertTypeForMem(AllocType); 1062 CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy); 1063 InitListElements *= getContext().getConstantArrayElementCount(CAT); 1064 } 1065 1066 // Enter a partial-destruction Cleanup if necessary. 1067 if (needsEHCleanup(DtorKind)) { 1068 // In principle we could tell the Cleanup where we are more 1069 // directly, but the control flow can get so varied here that it 1070 // would actually be quite complex. Therefore we go through an 1071 // alloca. 1072 EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(), 1073 "array.init.end"); 1074 CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit); 1075 pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit, 1076 ElementType, ElementAlign, 1077 getDestroyer(DtorKind)); 1078 Cleanup = EHStack.stable_begin(); 1079 } 1080 1081 CharUnits StartAlign = CurPtr.getAlignment(); 1082 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) { 1083 // Tell the cleanup that it needs to destroy up to this 1084 // element. TODO: some of these stores can be trivially 1085 // observed to be unnecessary. 1086 if (EndOfInit.isValid()) { 1087 auto FinishedPtr = 1088 Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType()); 1089 Builder.CreateStore(FinishedPtr, EndOfInit); 1090 } 1091 // FIXME: If the last initializer is an incomplete initializer list for 1092 // an array, and we have an array filler, we can fold together the two 1093 // initialization loops. 1094 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), 1095 ILE->getInit(i)->getType(), CurPtr, 1096 AggValueSlot::DoesNotOverlap); 1097 CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(), 1098 Builder.getSize(1), 1099 "array.exp.next"), 1100 StartAlign.alignmentAtOffset((i + 1) * ElementSize)); 1101 } 1102 1103 // The remaining elements are filled with the array filler expression. 1104 Init = ILE->getArrayFiller(); 1105 1106 // Extract the initializer for the individual array elements by pulling 1107 // out the array filler from all the nested initializer lists. This avoids 1108 // generating a nested loop for the initialization. 1109 while (Init && Init->getType()->isConstantArrayType()) { 1110 auto *SubILE = dyn_cast<InitListExpr>(Init); 1111 if (!SubILE) 1112 break; 1113 assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?"); 1114 Init = SubILE->getArrayFiller(); 1115 } 1116 1117 // Switch back to initializing one base element at a time. 1118 CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType()); 1119 } 1120 1121 // If all elements have already been initialized, skip any further 1122 // initialization. 1123 llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements); 1124 if (ConstNum && ConstNum->getZExtValue() <= InitListElements) { 1125 // If there was a Cleanup, deactivate it. 1126 if (CleanupDominator) 1127 DeactivateCleanupBlock(Cleanup, CleanupDominator); 1128 return; 1129 } 1130 1131 assert(Init && "have trailing elements to initialize but no initializer"); 1132 1133 // If this is a constructor call, try to optimize it out, and failing that 1134 // emit a single loop to initialize all remaining elements. 1135 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) { 1136 CXXConstructorDecl *Ctor = CCE->getConstructor(); 1137 if (Ctor->isTrivial()) { 1138 // If new expression did not specify value-initialization, then there 1139 // is no initialization. 1140 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty()) 1141 return; 1142 1143 if (TryMemsetInitialization()) 1144 return; 1145 } 1146 1147 // Store the new Cleanup position for irregular Cleanups. 1148 // 1149 // FIXME: Share this cleanup with the constructor call emission rather than 1150 // having it create a cleanup of its own. 1151 if (EndOfInit.isValid()) 1152 Builder.CreateStore(CurPtr.getPointer(), EndOfInit); 1153 1154 // Emit a constructor call loop to initialize the remaining elements. 1155 if (InitListElements) 1156 NumElements = Builder.CreateSub( 1157 NumElements, 1158 llvm::ConstantInt::get(NumElements->getType(), InitListElements)); 1159 EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE, 1160 /*NewPointerIsChecked*/true, 1161 CCE->requiresZeroInitialization()); 1162 return; 1163 } 1164 1165 // If this is value-initialization, we can usually use memset. 1166 ImplicitValueInitExpr IVIE(ElementType); 1167 if (isa<ImplicitValueInitExpr>(Init)) { 1168 if (TryMemsetInitialization()) 1169 return; 1170 1171 // Switch to an ImplicitValueInitExpr for the element type. This handles 1172 // only one case: multidimensional array new of pointers to members. In 1173 // all other cases, we already have an initializer for the array element. 1174 Init = &IVIE; 1175 } 1176 1177 // At this point we should have found an initializer for the individual 1178 // elements of the array. 1179 assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) && 1180 "got wrong type of element to initialize"); 1181 1182 // If we have an empty initializer list, we can usually use memset. 1183 if (auto *ILE = dyn_cast<InitListExpr>(Init)) 1184 if (ILE->getNumInits() == 0 && TryMemsetInitialization()) 1185 return; 1186 1187 // If we have a struct whose every field is value-initialized, we can 1188 // usually use memset. 1189 if (auto *ILE = dyn_cast<InitListExpr>(Init)) { 1190 if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) { 1191 if (RType->getDecl()->isStruct()) { 1192 unsigned NumElements = 0; 1193 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl())) 1194 NumElements = CXXRD->getNumBases(); 1195 for (auto *Field : RType->getDecl()->fields()) 1196 if (!Field->isUnnamedBitfield()) 1197 ++NumElements; 1198 // FIXME: Recurse into nested InitListExprs. 1199 if (ILE->getNumInits() == NumElements) 1200 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 1201 if (!isa<ImplicitValueInitExpr>(ILE->getInit(i))) 1202 --NumElements; 1203 if (ILE->getNumInits() == NumElements && TryMemsetInitialization()) 1204 return; 1205 } 1206 } 1207 } 1208 1209 // Create the loop blocks. 1210 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 1211 llvm::BasicBlock *LoopBB = createBasicBlock("new.loop"); 1212 llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end"); 1213 1214 // Find the end of the array, hoisted out of the loop. 1215 llvm::Value *EndPtr = 1216 Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end"); 1217 1218 // If the number of elements isn't constant, we have to now check if there is 1219 // anything left to initialize. 1220 if (!ConstNum) { 1221 llvm::Value *IsEmpty = 1222 Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty"); 1223 Builder.CreateCondBr(IsEmpty, ContBB, LoopBB); 1224 } 1225 1226 // Enter the loop. 1227 EmitBlock(LoopBB); 1228 1229 // Set up the current-element phi. 1230 llvm::PHINode *CurPtrPhi = 1231 Builder.CreatePHI(CurPtr.getType(), 2, "array.cur"); 1232 CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB); 1233 1234 CurPtr = Address(CurPtrPhi, ElementAlign); 1235 1236 // Store the new Cleanup position for irregular Cleanups. 1237 if (EndOfInit.isValid()) 1238 Builder.CreateStore(CurPtr.getPointer(), EndOfInit); 1239 1240 // Enter a partial-destruction Cleanup if necessary. 1241 if (!CleanupDominator && needsEHCleanup(DtorKind)) { 1242 pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(), 1243 ElementType, ElementAlign, 1244 getDestroyer(DtorKind)); 1245 Cleanup = EHStack.stable_begin(); 1246 CleanupDominator = Builder.CreateUnreachable(); 1247 } 1248 1249 // Emit the initializer into this element. 1250 StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr, 1251 AggValueSlot::DoesNotOverlap); 1252 1253 // Leave the Cleanup if we entered one. 1254 if (CleanupDominator) { 1255 DeactivateCleanupBlock(Cleanup, CleanupDominator); 1256 CleanupDominator->eraseFromParent(); 1257 } 1258 1259 // Advance to the next element by adjusting the pointer type as necessary. 1260 llvm::Value *NextPtr = 1261 Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1, 1262 "array.next"); 1263 1264 // Check whether we've gotten to the end of the array and, if so, 1265 // exit the loop. 1266 llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend"); 1267 Builder.CreateCondBr(IsEnd, ContBB, LoopBB); 1268 CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock()); 1269 1270 EmitBlock(ContBB); 1271 } 1272 1273 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 1274 QualType ElementType, llvm::Type *ElementTy, 1275 Address NewPtr, llvm::Value *NumElements, 1276 llvm::Value *AllocSizeWithoutCookie) { 1277 ApplyDebugLocation DL(CGF, E); 1278 if (E->isArray()) 1279 CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements, 1280 AllocSizeWithoutCookie); 1281 else if (const Expr *Init = E->getInitializer()) 1282 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr, 1283 AggValueSlot::DoesNotOverlap); 1284 } 1285 1286 /// Emit a call to an operator new or operator delete function, as implicitly 1287 /// created by new-expressions and delete-expressions. 1288 static RValue EmitNewDeleteCall(CodeGenFunction &CGF, 1289 const FunctionDecl *CalleeDecl, 1290 const FunctionProtoType *CalleeType, 1291 const CallArgList &Args) { 1292 llvm::CallBase *CallOrInvoke; 1293 llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); 1294 CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); 1295 RValue RV = 1296 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( 1297 Args, CalleeType, /*ChainCall=*/false), 1298 Callee, ReturnValueSlot(), Args, &CallOrInvoke); 1299 1300 /// C++1y [expr.new]p10: 1301 /// [In a new-expression,] an implementation is allowed to omit a call 1302 /// to a replaceable global allocation function. 1303 /// 1304 /// We model such elidable calls with the 'builtin' attribute. 1305 llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr); 1306 if (CalleeDecl->isReplaceableGlobalAllocationFunction() && 1307 Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) { 1308 CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex, 1309 llvm::Attribute::Builtin); 1310 } 1311 1312 return RV; 1313 } 1314 1315 RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, 1316 const CallExpr *TheCall, 1317 bool IsDelete) { 1318 CallArgList Args; 1319 EmitCallArgs(Args, Type->getParamTypes(), TheCall->arguments()); 1320 // Find the allocation or deallocation function that we're calling. 1321 ASTContext &Ctx = getContext(); 1322 DeclarationName Name = Ctx.DeclarationNames 1323 .getCXXOperatorName(IsDelete ? OO_Delete : OO_New); 1324 1325 for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name)) 1326 if (auto *FD = dyn_cast<FunctionDecl>(Decl)) 1327 if (Ctx.hasSameType(FD->getType(), QualType(Type, 0))) 1328 return EmitNewDeleteCall(*this, FD, Type, Args); 1329 llvm_unreachable("predeclared global operator new/delete is missing"); 1330 } 1331 1332 namespace { 1333 /// The parameters to pass to a usual operator delete. 1334 struct UsualDeleteParams { 1335 bool DestroyingDelete = false; 1336 bool Size = false; 1337 bool Alignment = false; 1338 }; 1339 } 1340 1341 static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { 1342 UsualDeleteParams Params; 1343 1344 const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>(); 1345 auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); 1346 1347 // The first argument is always a void*. 1348 ++AI; 1349 1350 // The next parameter may be a std::destroying_delete_t. 1351 if (FD->isDestroyingOperatorDelete()) { 1352 Params.DestroyingDelete = true; 1353 assert(AI != AE); 1354 ++AI; 1355 } 1356 1357 // Figure out what other parameters we should be implicitly passing. 1358 if (AI != AE && (*AI)->isIntegerType()) { 1359 Params.Size = true; 1360 ++AI; 1361 } 1362 1363 if (AI != AE && (*AI)->isAlignValT()) { 1364 Params.Alignment = true; 1365 ++AI; 1366 } 1367 1368 assert(AI == AE && "unexpected usual deallocation function parameter"); 1369 return Params; 1370 } 1371 1372 namespace { 1373 /// A cleanup to call the given 'operator delete' function upon abnormal 1374 /// exit from a new expression. Templated on a traits type that deals with 1375 /// ensuring that the arguments dominate the cleanup if necessary. 1376 template<typename Traits> 1377 class CallDeleteDuringNew final : public EHScopeStack::Cleanup { 1378 /// Type used to hold llvm::Value*s. 1379 typedef typename Traits::ValueTy ValueTy; 1380 /// Type used to hold RValues. 1381 typedef typename Traits::RValueTy RValueTy; 1382 struct PlacementArg { 1383 RValueTy ArgValue; 1384 QualType ArgType; 1385 }; 1386 1387 unsigned NumPlacementArgs : 31; 1388 unsigned PassAlignmentToPlacementDelete : 1; 1389 const FunctionDecl *OperatorDelete; 1390 ValueTy Ptr; 1391 ValueTy AllocSize; 1392 CharUnits AllocAlign; 1393 1394 PlacementArg *getPlacementArgs() { 1395 return reinterpret_cast<PlacementArg *>(this + 1); 1396 } 1397 1398 public: 1399 static size_t getExtraSize(size_t NumPlacementArgs) { 1400 return NumPlacementArgs * sizeof(PlacementArg); 1401 } 1402 1403 CallDeleteDuringNew(size_t NumPlacementArgs, 1404 const FunctionDecl *OperatorDelete, ValueTy Ptr, 1405 ValueTy AllocSize, bool PassAlignmentToPlacementDelete, 1406 CharUnits AllocAlign) 1407 : NumPlacementArgs(NumPlacementArgs), 1408 PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete), 1409 OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize), 1410 AllocAlign(AllocAlign) {} 1411 1412 void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { 1413 assert(I < NumPlacementArgs && "index out of range"); 1414 getPlacementArgs()[I] = {Arg, Type}; 1415 } 1416 1417 void Emit(CodeGenFunction &CGF, Flags flags) override { 1418 const FunctionProtoType *FPT = 1419 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1420 CallArgList DeleteArgs; 1421 1422 // The first argument is always a void* (or C* for a destroying operator 1423 // delete for class type C). 1424 DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0)); 1425 1426 // Figure out what other parameters we should be implicitly passing. 1427 UsualDeleteParams Params; 1428 if (NumPlacementArgs) { 1429 // A placement deallocation function is implicitly passed an alignment 1430 // if the placement allocation function was, but is never passed a size. 1431 Params.Alignment = PassAlignmentToPlacementDelete; 1432 } else { 1433 // For a non-placement new-expression, 'operator delete' can take a 1434 // size and/or an alignment if it has the right parameters. 1435 Params = getUsualDeleteParams(OperatorDelete); 1436 } 1437 1438 assert(!Params.DestroyingDelete && 1439 "should not call destroying delete in a new-expression"); 1440 1441 // The second argument can be a std::size_t (for non-placement delete). 1442 if (Params.Size) 1443 DeleteArgs.add(Traits::get(CGF, AllocSize), 1444 CGF.getContext().getSizeType()); 1445 1446 // The next (second or third) argument can be a std::align_val_t, which 1447 // is an enum whose underlying type is std::size_t. 1448 // FIXME: Use the right type as the parameter type. Note that in a call 1449 // to operator delete(size_t, ...), we may not have it available. 1450 if (Params.Alignment) 1451 DeleteArgs.add(RValue::get(llvm::ConstantInt::get( 1452 CGF.SizeTy, AllocAlign.getQuantity())), 1453 CGF.getContext().getSizeType()); 1454 1455 // Pass the rest of the arguments, which must match exactly. 1456 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 1457 auto Arg = getPlacementArgs()[I]; 1458 DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); 1459 } 1460 1461 // Call 'operator delete'. 1462 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); 1463 } 1464 }; 1465 } 1466 1467 /// Enter a cleanup to call 'operator delete' if the initializer in a 1468 /// new-expression throws. 1469 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 1470 const CXXNewExpr *E, 1471 Address NewPtr, 1472 llvm::Value *AllocSize, 1473 CharUnits AllocAlign, 1474 const CallArgList &NewArgs) { 1475 unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1; 1476 1477 // If we're not inside a conditional branch, then the cleanup will 1478 // dominate and we can do the easier (and more efficient) thing. 1479 if (!CGF.isInConditionalBranch()) { 1480 struct DirectCleanupTraits { 1481 typedef llvm::Value *ValueTy; 1482 typedef RValue RValueTy; 1483 static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); } 1484 static RValue get(CodeGenFunction &, RValueTy V) { return V; } 1485 }; 1486 1487 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup; 1488 1489 DirectCleanup *Cleanup = CGF.EHStack 1490 .pushCleanupWithExtra<DirectCleanup>(EHCleanup, 1491 E->getNumPlacementArgs(), 1492 E->getOperatorDelete(), 1493 NewPtr.getPointer(), 1494 AllocSize, 1495 E->passAlignment(), 1496 AllocAlign); 1497 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { 1498 auto &Arg = NewArgs[I + NumNonPlacementArgs]; 1499 Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty); 1500 } 1501 1502 return; 1503 } 1504 1505 // Otherwise, we need to save all this stuff. 1506 DominatingValue<RValue>::saved_type SavedNewPtr = 1507 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer())); 1508 DominatingValue<RValue>::saved_type SavedAllocSize = 1509 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); 1510 1511 struct ConditionalCleanupTraits { 1512 typedef DominatingValue<RValue>::saved_type ValueTy; 1513 typedef DominatingValue<RValue>::saved_type RValueTy; 1514 static RValue get(CodeGenFunction &CGF, ValueTy V) { 1515 return V.restore(CGF); 1516 } 1517 }; 1518 typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup; 1519 1520 ConditionalCleanup *Cleanup = CGF.EHStack 1521 .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup, 1522 E->getNumPlacementArgs(), 1523 E->getOperatorDelete(), 1524 SavedNewPtr, 1525 SavedAllocSize, 1526 E->passAlignment(), 1527 AllocAlign); 1528 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) { 1529 auto &Arg = NewArgs[I + NumNonPlacementArgs]; 1530 Cleanup->setPlacementArg( 1531 I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty); 1532 } 1533 1534 CGF.initFullExprCleanup(); 1535 } 1536 1537 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 1538 // The element type being allocated. 1539 QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); 1540 1541 // 1. Build a call to the allocation function. 1542 FunctionDecl *allocator = E->getOperatorNew(); 1543 1544 // If there is a brace-initializer, cannot allocate fewer elements than inits. 1545 unsigned minElements = 0; 1546 if (E->isArray() && E->hasInitializer()) { 1547 const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()); 1548 if (ILE && ILE->isStringLiteralInit()) 1549 minElements = 1550 cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe()) 1551 ->getSize().getZExtValue(); 1552 else if (ILE) 1553 minElements = ILE->getNumInits(); 1554 } 1555 1556 llvm::Value *numElements = nullptr; 1557 llvm::Value *allocSizeWithoutCookie = nullptr; 1558 llvm::Value *allocSize = 1559 EmitCXXNewAllocSize(*this, E, minElements, numElements, 1560 allocSizeWithoutCookie); 1561 CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); 1562 1563 // Emit the allocation call. If the allocator is a global placement 1564 // operator, just "inline" it directly. 1565 Address allocation = Address::invalid(); 1566 CallArgList allocatorArgs; 1567 if (allocator->isReservedGlobalPlacementOperator()) { 1568 assert(E->getNumPlacementArgs() == 1); 1569 const Expr *arg = *E->placement_arguments().begin(); 1570 1571 LValueBaseInfo BaseInfo; 1572 allocation = EmitPointerWithAlignment(arg, &BaseInfo); 1573 1574 // The pointer expression will, in many cases, be an opaque void*. 1575 // In these cases, discard the computed alignment and use the 1576 // formal alignment of the allocated type. 1577 if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl) 1578 allocation = Address(allocation.getPointer(), allocAlign); 1579 1580 // Set up allocatorArgs for the call to operator delete if it's not 1581 // the reserved global operator. 1582 if (E->getOperatorDelete() && 1583 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1584 allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType()); 1585 allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType()); 1586 } 1587 1588 } else { 1589 const FunctionProtoType *allocatorType = 1590 allocator->getType()->castAs<FunctionProtoType>(); 1591 unsigned ParamsToSkip = 0; 1592 1593 // The allocation size is the first argument. 1594 QualType sizeType = getContext().getSizeType(); 1595 allocatorArgs.add(RValue::get(allocSize), sizeType); 1596 ++ParamsToSkip; 1597 1598 if (allocSize != allocSizeWithoutCookie) { 1599 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI. 1600 allocAlign = std::max(allocAlign, cookieAlign); 1601 } 1602 1603 // The allocation alignment may be passed as the second argument. 1604 if (E->passAlignment()) { 1605 QualType AlignValT = sizeType; 1606 if (allocatorType->getNumParams() > 1) { 1607 AlignValT = allocatorType->getParamType(1); 1608 assert(getContext().hasSameUnqualifiedType( 1609 AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(), 1610 sizeType) && 1611 "wrong type for alignment parameter"); 1612 ++ParamsToSkip; 1613 } else { 1614 // Corner case, passing alignment to 'operator new(size_t, ...)'. 1615 assert(allocator->isVariadic() && "can't pass alignment to allocator"); 1616 } 1617 allocatorArgs.add( 1618 RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())), 1619 AlignValT); 1620 } 1621 1622 // FIXME: Why do we not pass a CalleeDecl here? 1623 EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), 1624 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip); 1625 1626 RValue RV = 1627 EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); 1628 1629 // If this was a call to a global replaceable allocation function that does 1630 // not take an alignment argument, the allocator is known to produce 1631 // storage that's suitably aligned for any object that fits, up to a known 1632 // threshold. Otherwise assume it's suitably aligned for the allocated type. 1633 CharUnits allocationAlign = allocAlign; 1634 if (!E->passAlignment() && 1635 allocator->isReplaceableGlobalAllocationFunction()) { 1636 unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>( 1637 Target.getNewAlign(), getContext().getTypeSize(allocType))); 1638 allocationAlign = std::max( 1639 allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign)); 1640 } 1641 1642 allocation = Address(RV.getScalarVal(), allocationAlign); 1643 } 1644 1645 // Emit a null check on the allocation result if the allocation 1646 // function is allowed to return null (because it has a non-throwing 1647 // exception spec or is the reserved placement new) and we have an 1648 // interesting initializer will be running sanitizers on the initialization. 1649 bool nullCheck = E->shouldNullCheckAllocation() && 1650 (!allocType.isPODType(getContext()) || E->hasInitializer() || 1651 sanitizePerformTypeCheck()); 1652 1653 llvm::BasicBlock *nullCheckBB = nullptr; 1654 llvm::BasicBlock *contBB = nullptr; 1655 1656 // The null-check means that the initializer is conditionally 1657 // evaluated. 1658 ConditionalEvaluation conditional(*this); 1659 1660 if (nullCheck) { 1661 conditional.begin(*this); 1662 1663 nullCheckBB = Builder.GetInsertBlock(); 1664 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); 1665 contBB = createBasicBlock("new.cont"); 1666 1667 llvm::Value *isNull = 1668 Builder.CreateIsNull(allocation.getPointer(), "new.isnull"); 1669 Builder.CreateCondBr(isNull, contBB, notNullBB); 1670 EmitBlock(notNullBB); 1671 } 1672 1673 // If there's an operator delete, enter a cleanup to call it if an 1674 // exception is thrown. 1675 EHScopeStack::stable_iterator operatorDeleteCleanup; 1676 llvm::Instruction *cleanupDominator = nullptr; 1677 if (E->getOperatorDelete() && 1678 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1679 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign, 1680 allocatorArgs); 1681 operatorDeleteCleanup = EHStack.stable_begin(); 1682 cleanupDominator = Builder.CreateUnreachable(); 1683 } 1684 1685 assert((allocSize == allocSizeWithoutCookie) == 1686 CalculateCookiePadding(*this, E).isZero()); 1687 if (allocSize != allocSizeWithoutCookie) { 1688 assert(E->isArray()); 1689 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, 1690 numElements, 1691 E, allocType); 1692 } 1693 1694 llvm::Type *elementTy = ConvertTypeForMem(allocType); 1695 Address result = Builder.CreateElementBitCast(allocation, elementTy); 1696 1697 // Passing pointer through launder.invariant.group to avoid propagation of 1698 // vptrs information which may be included in previous type. 1699 // To not break LTO with different optimizations levels, we do it regardless 1700 // of optimization level. 1701 if (CGM.getCodeGenOpts().StrictVTablePointers && 1702 allocator->isReservedGlobalPlacementOperator()) 1703 result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()), 1704 result.getAlignment()); 1705 1706 // Emit sanitizer checks for pointer value now, so that in the case of an 1707 // array it was checked only once and not at each constructor call. We may 1708 // have already checked that the pointer is non-null. 1709 // FIXME: If we have an array cookie and a potentially-throwing allocator, 1710 // we'll null check the wrong pointer here. 1711 SanitizerSet SkippedChecks; 1712 SkippedChecks.set(SanitizerKind::Null, nullCheck); 1713 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, 1714 E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), 1715 result.getPointer(), allocType, result.getAlignment(), 1716 SkippedChecks, numElements); 1717 1718 EmitNewInitializer(*this, E, allocType, elementTy, result, numElements, 1719 allocSizeWithoutCookie); 1720 if (E->isArray()) { 1721 // NewPtr is a pointer to the base element type. If we're 1722 // allocating an array of arrays, we'll need to cast back to the 1723 // array pointer type. 1724 llvm::Type *resultType = ConvertTypeForMem(E->getType()); 1725 if (result.getType() != resultType) 1726 result = Builder.CreateBitCast(result, resultType); 1727 } 1728 1729 // Deactivate the 'operator delete' cleanup if we finished 1730 // initialization. 1731 if (operatorDeleteCleanup.isValid()) { 1732 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator); 1733 cleanupDominator->eraseFromParent(); 1734 } 1735 1736 llvm::Value *resultPtr = result.getPointer(); 1737 if (nullCheck) { 1738 conditional.end(*this); 1739 1740 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 1741 EmitBlock(contBB); 1742 1743 llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2); 1744 PHI->addIncoming(resultPtr, notNullBB); 1745 PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()), 1746 nullCheckBB); 1747 1748 resultPtr = PHI; 1749 } 1750 1751 return resultPtr; 1752 } 1753 1754 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1755 llvm::Value *Ptr, QualType DeleteTy, 1756 llvm::Value *NumElements, 1757 CharUnits CookieSize) { 1758 assert((!NumElements && CookieSize.isZero()) || 1759 DeleteFD->getOverloadedOperator() == OO_Array_Delete); 1760 1761 const FunctionProtoType *DeleteFTy = 1762 DeleteFD->getType()->getAs<FunctionProtoType>(); 1763 1764 CallArgList DeleteArgs; 1765 1766 auto Params = getUsualDeleteParams(DeleteFD); 1767 auto ParamTypeIt = DeleteFTy->param_type_begin(); 1768 1769 // Pass the pointer itself. 1770 QualType ArgTy = *ParamTypeIt++; 1771 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1772 DeleteArgs.add(RValue::get(DeletePtr), ArgTy); 1773 1774 // Pass the std::destroying_delete tag if present. 1775 if (Params.DestroyingDelete) { 1776 QualType DDTag = *ParamTypeIt++; 1777 // Just pass an 'undef'. We expect the tag type to be an empty struct. 1778 auto *V = llvm::UndefValue::get(getTypes().ConvertType(DDTag)); 1779 DeleteArgs.add(RValue::get(V), DDTag); 1780 } 1781 1782 // Pass the size if the delete function has a size_t parameter. 1783 if (Params.Size) { 1784 QualType SizeType = *ParamTypeIt++; 1785 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1786 llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType), 1787 DeleteTypeSize.getQuantity()); 1788 1789 // For array new, multiply by the number of elements. 1790 if (NumElements) 1791 Size = Builder.CreateMul(Size, NumElements); 1792 1793 // If there is a cookie, add the cookie size. 1794 if (!CookieSize.isZero()) 1795 Size = Builder.CreateAdd( 1796 Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity())); 1797 1798 DeleteArgs.add(RValue::get(Size), SizeType); 1799 } 1800 1801 // Pass the alignment if the delete function has an align_val_t parameter. 1802 if (Params.Alignment) { 1803 QualType AlignValType = *ParamTypeIt++; 1804 CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits( 1805 getContext().getTypeAlignIfKnown(DeleteTy)); 1806 llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType), 1807 DeleteTypeAlign.getQuantity()); 1808 DeleteArgs.add(RValue::get(Align), AlignValType); 1809 } 1810 1811 assert(ParamTypeIt == DeleteFTy->param_type_end() && 1812 "unknown parameter to usual delete function"); 1813 1814 // Emit the call to delete. 1815 EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); 1816 } 1817 1818 namespace { 1819 /// Calls the given 'operator delete' on a single object. 1820 struct CallObjectDelete final : EHScopeStack::Cleanup { 1821 llvm::Value *Ptr; 1822 const FunctionDecl *OperatorDelete; 1823 QualType ElementType; 1824 1825 CallObjectDelete(llvm::Value *Ptr, 1826 const FunctionDecl *OperatorDelete, 1827 QualType ElementType) 1828 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1829 1830 void Emit(CodeGenFunction &CGF, Flags flags) override { 1831 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1832 } 1833 }; 1834 } 1835 1836 void 1837 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, 1838 llvm::Value *CompletePtr, 1839 QualType ElementType) { 1840 EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr, 1841 OperatorDelete, ElementType); 1842 } 1843 1844 /// Emit the code for deleting a single object with a destroying operator 1845 /// delete. If the element type has a non-virtual destructor, Ptr has already 1846 /// been converted to the type of the parameter of 'operator delete'. Otherwise 1847 /// Ptr points to an object of the static type. 1848 static void EmitDestroyingObjectDelete(CodeGenFunction &CGF, 1849 const CXXDeleteExpr *DE, Address Ptr, 1850 QualType ElementType) { 1851 auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor(); 1852 if (Dtor && Dtor->isVirtual()) 1853 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, 1854 Dtor); 1855 else 1856 CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType); 1857 } 1858 1859 /// Emit the code for deleting a single object. 1860 static void EmitObjectDelete(CodeGenFunction &CGF, 1861 const CXXDeleteExpr *DE, 1862 Address Ptr, 1863 QualType ElementType) { 1864 // C++11 [expr.delete]p3: 1865 // If the static type of the object to be deleted is different from its 1866 // dynamic type, the static type shall be a base class of the dynamic type 1867 // of the object to be deleted and the static type shall have a virtual 1868 // destructor or the behavior is undefined. 1869 CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, 1870 DE->getExprLoc(), Ptr.getPointer(), 1871 ElementType); 1872 1873 const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); 1874 assert(!OperatorDelete->isDestroyingOperatorDelete()); 1875 1876 // Find the destructor for the type, if applicable. If the 1877 // destructor is virtual, we'll just emit the vcall and return. 1878 const CXXDestructorDecl *Dtor = nullptr; 1879 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1880 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1881 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) { 1882 Dtor = RD->getDestructor(); 1883 1884 if (Dtor->isVirtual()) { 1885 CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, 1886 Dtor); 1887 return; 1888 } 1889 } 1890 } 1891 1892 // Make sure that we call delete even if the dtor throws. 1893 // This doesn't have to a conditional cleanup because we're going 1894 // to pop it off in a second. 1895 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1896 Ptr.getPointer(), 1897 OperatorDelete, ElementType); 1898 1899 if (Dtor) 1900 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1901 /*ForVirtualBase=*/false, 1902 /*Delegating=*/false, 1903 Ptr, ElementType); 1904 else if (auto Lifetime = ElementType.getObjCLifetime()) { 1905 switch (Lifetime) { 1906 case Qualifiers::OCL_None: 1907 case Qualifiers::OCL_ExplicitNone: 1908 case Qualifiers::OCL_Autoreleasing: 1909 break; 1910 1911 case Qualifiers::OCL_Strong: 1912 CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime); 1913 break; 1914 1915 case Qualifiers::OCL_Weak: 1916 CGF.EmitARCDestroyWeak(Ptr); 1917 break; 1918 } 1919 } 1920 1921 CGF.PopCleanupBlock(); 1922 } 1923 1924 namespace { 1925 /// Calls the given 'operator delete' on an array of objects. 1926 struct CallArrayDelete final : EHScopeStack::Cleanup { 1927 llvm::Value *Ptr; 1928 const FunctionDecl *OperatorDelete; 1929 llvm::Value *NumElements; 1930 QualType ElementType; 1931 CharUnits CookieSize; 1932 1933 CallArrayDelete(llvm::Value *Ptr, 1934 const FunctionDecl *OperatorDelete, 1935 llvm::Value *NumElements, 1936 QualType ElementType, 1937 CharUnits CookieSize) 1938 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1939 ElementType(ElementType), CookieSize(CookieSize) {} 1940 1941 void Emit(CodeGenFunction &CGF, Flags flags) override { 1942 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements, 1943 CookieSize); 1944 } 1945 }; 1946 } 1947 1948 /// Emit the code for deleting an array of objects. 1949 static void EmitArrayDelete(CodeGenFunction &CGF, 1950 const CXXDeleteExpr *E, 1951 Address deletedPtr, 1952 QualType elementType) { 1953 llvm::Value *numElements = nullptr; 1954 llvm::Value *allocatedPtr = nullptr; 1955 CharUnits cookieSize; 1956 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType, 1957 numElements, allocatedPtr, cookieSize); 1958 1959 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer"); 1960 1961 // Make sure that we call delete even if one of the dtors throws. 1962 const FunctionDecl *operatorDelete = E->getOperatorDelete(); 1963 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1964 allocatedPtr, operatorDelete, 1965 numElements, elementType, 1966 cookieSize); 1967 1968 // Destroy the elements. 1969 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) { 1970 assert(numElements && "no element count for a type with a destructor!"); 1971 1972 CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); 1973 CharUnits elementAlign = 1974 deletedPtr.getAlignment().alignmentOfArrayElement(elementSize); 1975 1976 llvm::Value *arrayBegin = deletedPtr.getPointer(); 1977 llvm::Value *arrayEnd = 1978 CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end"); 1979 1980 // Note that it is legal to allocate a zero-length array, and we 1981 // can never fold the check away because the length should always 1982 // come from a cookie. 1983 CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign, 1984 CGF.getDestroyer(dtorKind), 1985 /*checkZeroLength*/ true, 1986 CGF.needsEHCleanup(dtorKind)); 1987 } 1988 1989 // Pop the cleanup block. 1990 CGF.PopCleanupBlock(); 1991 } 1992 1993 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1994 const Expr *Arg = E->getArgument(); 1995 Address Ptr = EmitPointerWithAlignment(Arg); 1996 1997 // Null check the pointer. 1998 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1999 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 2000 2001 llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull"); 2002 2003 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 2004 EmitBlock(DeleteNotNull); 2005 2006 QualType DeleteTy = E->getDestroyedType(); 2007 2008 // A destroying operator delete overrides the entire operation of the 2009 // delete expression. 2010 if (E->getOperatorDelete()->isDestroyingOperatorDelete()) { 2011 EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy); 2012 EmitBlock(DeleteEnd); 2013 return; 2014 } 2015 2016 // We might be deleting a pointer to array. If so, GEP down to the 2017 // first non-array element. 2018 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 2019 if (DeleteTy->isConstantArrayType()) { 2020 llvm::Value *Zero = Builder.getInt32(0); 2021 SmallVector<llvm::Value*,8> GEP; 2022 2023 GEP.push_back(Zero); // point at the outermost array 2024 2025 // For each layer of array type we're pointing at: 2026 while (const ConstantArrayType *Arr 2027 = getContext().getAsConstantArrayType(DeleteTy)) { 2028 // 1. Unpeel the array type. 2029 DeleteTy = Arr->getElementType(); 2030 2031 // 2. GEP to the first element of the array. 2032 GEP.push_back(Zero); 2033 } 2034 2035 Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"), 2036 Ptr.getAlignment()); 2037 } 2038 2039 assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType()); 2040 2041 if (E->isArrayForm()) { 2042 EmitArrayDelete(*this, E, Ptr, DeleteTy); 2043 } else { 2044 EmitObjectDelete(*this, E, Ptr, DeleteTy); 2045 } 2046 2047 EmitBlock(DeleteEnd); 2048 } 2049 2050 static bool isGLValueFromPointerDeref(const Expr *E) { 2051 E = E->IgnoreParens(); 2052 2053 if (const auto *CE = dyn_cast<CastExpr>(E)) { 2054 if (!CE->getSubExpr()->isGLValue()) 2055 return false; 2056 return isGLValueFromPointerDeref(CE->getSubExpr()); 2057 } 2058 2059 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 2060 return isGLValueFromPointerDeref(OVE->getSourceExpr()); 2061 2062 if (const auto *BO = dyn_cast<BinaryOperator>(E)) 2063 if (BO->getOpcode() == BO_Comma) 2064 return isGLValueFromPointerDeref(BO->getRHS()); 2065 2066 if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E)) 2067 return isGLValueFromPointerDeref(ACO->getTrueExpr()) || 2068 isGLValueFromPointerDeref(ACO->getFalseExpr()); 2069 2070 // C++11 [expr.sub]p1: 2071 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)) 2072 if (isa<ArraySubscriptExpr>(E)) 2073 return true; 2074 2075 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2076 if (UO->getOpcode() == UO_Deref) 2077 return true; 2078 2079 return false; 2080 } 2081 2082 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, 2083 llvm::Type *StdTypeInfoPtrTy) { 2084 // Get the vtable pointer. 2085 Address ThisPtr = CGF.EmitLValue(E).getAddress(); 2086 2087 QualType SrcRecordTy = E->getType(); 2088 2089 // C++ [class.cdtor]p4: 2090 // If the operand of typeid refers to the object under construction or 2091 // destruction and the static type of the operand is neither the constructor 2092 // or destructor’s class nor one of its bases, the behavior is undefined. 2093 CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(), 2094 ThisPtr.getPointer(), SrcRecordTy); 2095 2096 // C++ [expr.typeid]p2: 2097 // If the glvalue expression is obtained by applying the unary * operator to 2098 // a pointer and the pointer is a null pointer value, the typeid expression 2099 // throws the std::bad_typeid exception. 2100 // 2101 // However, this paragraph's intent is not clear. We choose a very generous 2102 // interpretation which implores us to consider comma operators, conditional 2103 // operators, parentheses and other such constructs. 2104 if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked( 2105 isGLValueFromPointerDeref(E), SrcRecordTy)) { 2106 llvm::BasicBlock *BadTypeidBlock = 2107 CGF.createBasicBlock("typeid.bad_typeid"); 2108 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end"); 2109 2110 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer()); 2111 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); 2112 2113 CGF.EmitBlock(BadTypeidBlock); 2114 CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF); 2115 CGF.EmitBlock(EndBlock); 2116 } 2117 2118 return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr, 2119 StdTypeInfoPtrTy); 2120 } 2121 2122 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 2123 llvm::Type *StdTypeInfoPtrTy = 2124 ConvertType(E->getType())->getPointerTo(); 2125 2126 if (E->isTypeOperand()) { 2127 llvm::Constant *TypeInfo = 2128 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext())); 2129 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy); 2130 } 2131 2132 // C++ [expr.typeid]p2: 2133 // When typeid is applied to a glvalue expression whose type is a 2134 // polymorphic class type, the result refers to a std::type_info object 2135 // representing the type of the most derived object (that is, the dynamic 2136 // type) to which the glvalue refers. 2137 if (E->isPotentiallyEvaluated()) 2138 return EmitTypeidFromVTable(*this, E->getExprOperand(), 2139 StdTypeInfoPtrTy); 2140 2141 QualType OperandTy = E->getExprOperand()->getType(); 2142 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy), 2143 StdTypeInfoPtrTy); 2144 } 2145 2146 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, 2147 QualType DestTy) { 2148 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 2149 if (DestTy->isPointerType()) 2150 return llvm::Constant::getNullValue(DestLTy); 2151 2152 /// C++ [expr.dynamic.cast]p9: 2153 /// A failed cast to reference type throws std::bad_cast 2154 if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF)) 2155 return nullptr; 2156 2157 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end")); 2158 return llvm::UndefValue::get(DestLTy); 2159 } 2160 2161 llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, 2162 const CXXDynamicCastExpr *DCE) { 2163 CGM.EmitExplicitCastExprType(DCE, this); 2164 QualType DestTy = DCE->getTypeAsWritten(); 2165 2166 QualType SrcTy = DCE->getSubExpr()->getType(); 2167 2168 // C++ [expr.dynamic.cast]p7: 2169 // If T is "pointer to cv void," then the result is a pointer to the most 2170 // derived object pointed to by v. 2171 const PointerType *DestPTy = DestTy->getAs<PointerType>(); 2172 2173 bool isDynamicCastToVoid; 2174 QualType SrcRecordTy; 2175 QualType DestRecordTy; 2176 if (DestPTy) { 2177 isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType(); 2178 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); 2179 DestRecordTy = DestPTy->getPointeeType(); 2180 } else { 2181 isDynamicCastToVoid = false; 2182 SrcRecordTy = SrcTy; 2183 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); 2184 } 2185 2186 // C++ [class.cdtor]p5: 2187 // If the operand of the dynamic_cast refers to the object under 2188 // construction or destruction and the static type of the operand is not a 2189 // pointer to or object of the constructor or destructor’s own class or one 2190 // of its bases, the dynamic_cast results in undefined behavior. 2191 EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), 2192 SrcRecordTy); 2193 2194 if (DCE->isAlwaysNull()) 2195 if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) 2196 return T; 2197 2198 assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); 2199 2200 // C++ [expr.dynamic.cast]p4: 2201 // If the value of v is a null pointer value in the pointer case, the result 2202 // is the null pointer value of type T. 2203 bool ShouldNullCheckSrcValue = 2204 CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(), 2205 SrcRecordTy); 2206 2207 llvm::BasicBlock *CastNull = nullptr; 2208 llvm::BasicBlock *CastNotNull = nullptr; 2209 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); 2210 2211 if (ShouldNullCheckSrcValue) { 2212 CastNull = createBasicBlock("dynamic_cast.null"); 2213 CastNotNull = createBasicBlock("dynamic_cast.notnull"); 2214 2215 llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer()); 2216 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 2217 EmitBlock(CastNotNull); 2218 } 2219 2220 llvm::Value *Value; 2221 if (isDynamicCastToVoid) { 2222 Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy, 2223 DestTy); 2224 } else { 2225 assert(DestRecordTy->isRecordType() && 2226 "destination type must be a record type!"); 2227 Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy, 2228 DestTy, DestRecordTy, CastEnd); 2229 CastNotNull = Builder.GetInsertBlock(); 2230 } 2231 2232 if (ShouldNullCheckSrcValue) { 2233 EmitBranch(CastEnd); 2234 2235 EmitBlock(CastNull); 2236 EmitBranch(CastEnd); 2237 } 2238 2239 EmitBlock(CastEnd); 2240 2241 if (ShouldNullCheckSrcValue) { 2242 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 2243 PHI->addIncoming(Value, CastNotNull); 2244 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); 2245 2246 Value = PHI; 2247 } 2248 2249 return Value; 2250 } 2251