1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCall.h" 15 #include "CGCleanup.h" 16 #include "CGDebugInfo.h" 17 #include "CGObjCRuntime.h" 18 #include "CGOpenMPRuntime.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "ConstantEmitter.h" 23 #include "TargetInfo.h" 24 #include "clang/AST/ASTContext.h" 25 #include "clang/AST/Attr.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/AST/NSAPI.h" 28 #include "clang/Basic/Builtins.h" 29 #include "clang/Basic/CodeGenOptions.h" 30 #include "clang/Basic/SourceManager.h" 31 #include "llvm/ADT/Hashing.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/IR/LLVMContext.h" 36 #include "llvm/IR/MDBuilder.h" 37 #include "llvm/Support/ConvertUTF.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/Path.h" 40 #include "llvm/Transforms/Utils/SanitizerStats.h" 41 42 #include <string> 43 44 using namespace clang; 45 using namespace CodeGen; 46 47 //===--------------------------------------------------------------------===// 48 // Miscellaneous Helper Methods 49 //===--------------------------------------------------------------------===// 50 51 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 52 unsigned addressSpace = 53 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 54 55 llvm::PointerType *destType = Int8PtrTy; 56 if (addressSpace) 57 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 58 59 if (value->getType() == destType) return value; 60 return Builder.CreateBitCast(value, destType); 61 } 62 63 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 64 /// block. 65 Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, 66 CharUnits Align, 67 const Twine &Name, 68 llvm::Value *ArraySize) { 69 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); 70 Alloca->setAlignment(Align.getAsAlign()); 71 return Address(Alloca, Align); 72 } 73 74 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 75 /// block. The alloca is casted to default address space if necessary. 76 Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, 77 const Twine &Name, 78 llvm::Value *ArraySize, 79 Address *AllocaAddr) { 80 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); 81 if (AllocaAddr) 82 *AllocaAddr = Alloca; 83 llvm::Value *V = Alloca.getPointer(); 84 // Alloca always returns a pointer in alloca address space, which may 85 // be different from the type defined by the language. For example, 86 // in C++ the auto variables are in the default address space. Therefore 87 // cast alloca to the default address space when necessary. 88 if (getASTAllocaAddressSpace() != LangAS::Default) { 89 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default); 90 llvm::IRBuilderBase::InsertPointGuard IPG(Builder); 91 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, 92 // otherwise alloca is inserted at the current insertion point of the 93 // builder. 94 if (!ArraySize) 95 Builder.SetInsertPoint(AllocaInsertPt); 96 V = getTargetHooks().performAddrSpaceCast( 97 *this, V, getASTAllocaAddressSpace(), LangAS::Default, 98 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true); 99 } 100 101 return Address(V, Align); 102 } 103 104 /// CreateTempAlloca - This creates an alloca and inserts it into the entry 105 /// block if \p ArraySize is nullptr, otherwise inserts it at the current 106 /// insertion point of the builder. 107 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 108 const Twine &Name, 109 llvm::Value *ArraySize) { 110 if (ArraySize) 111 return Builder.CreateAlloca(Ty, ArraySize, Name); 112 return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), 113 ArraySize, Name, AllocaInsertPt); 114 } 115 116 /// CreateDefaultAlignTempAlloca - This creates an alloca with the 117 /// default alignment of the corresponding LLVM type, which is *not* 118 /// guaranteed to be related in any way to the expected alignment of 119 /// an AST type that might have been lowered to Ty. 120 Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, 121 const Twine &Name) { 122 CharUnits Align = 123 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty)); 124 return CreateTempAlloca(Ty, Align, Name); 125 } 126 127 void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) { 128 auto *Alloca = Var.getPointer(); 129 assert(isa<llvm::AllocaInst>(Alloca) || 130 (isa<llvm::AddrSpaceCastInst>(Alloca) && 131 isa<llvm::AllocaInst>( 132 cast<llvm::AddrSpaceCastInst>(Alloca)->getPointerOperand()))); 133 134 auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false, 135 Var.getAlignment().getAsAlign()); 136 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 137 Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store); 138 } 139 140 Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { 141 CharUnits Align = getContext().getTypeAlignInChars(Ty); 142 return CreateTempAlloca(ConvertType(Ty), Align, Name); 143 } 144 145 Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, 146 Address *Alloca) { 147 // FIXME: Should we prefer the preferred type alignment here? 148 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca); 149 } 150 151 Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, 152 const Twine &Name, Address *Alloca) { 153 Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, 154 /*ArraySize=*/nullptr, Alloca); 155 156 if (Ty->isConstantMatrixType()) { 157 auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType()); 158 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), 159 ArrayTy->getNumElements()); 160 161 Result = Address( 162 Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()), 163 Result.getAlignment()); 164 } 165 return Result; 166 } 167 168 Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align, 169 const Twine &Name) { 170 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name); 171 } 172 173 Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, 174 const Twine &Name) { 175 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty), 176 Name); 177 } 178 179 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 180 /// expression and compare the result against zero, returning an Int1Ty value. 181 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 182 PGO.setCurrentStmt(E); 183 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 184 llvm::Value *MemPtr = EmitScalarExpr(E); 185 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 186 } 187 188 QualType BoolTy = getContext().BoolTy; 189 SourceLocation Loc = E->getExprLoc(); 190 CGFPOptionsRAII FPOptsRAII(*this, E); 191 if (!E->getType()->isAnyComplexType()) 192 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc); 193 194 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy, 195 Loc); 196 } 197 198 /// EmitIgnoredExpr - Emit code to compute the specified expression, 199 /// ignoring the result. 200 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 201 if (E->isRValue()) 202 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 203 204 // Just emit it as an l-value and drop the result. 205 EmitLValue(E); 206 } 207 208 /// EmitAnyExpr - Emit code to compute the specified expression which 209 /// can have any type. The result is returned as an RValue struct. 210 /// If this is an aggregate expression, AggSlot indicates where the 211 /// result should be returned. 212 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 213 AggValueSlot aggSlot, 214 bool ignoreResult) { 215 switch (getEvaluationKind(E->getType())) { 216 case TEK_Scalar: 217 return RValue::get(EmitScalarExpr(E, ignoreResult)); 218 case TEK_Complex: 219 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 220 case TEK_Aggregate: 221 if (!ignoreResult && aggSlot.isIgnored()) 222 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 223 EmitAggExpr(E, aggSlot); 224 return aggSlot.asRValue(); 225 } 226 llvm_unreachable("bad evaluation kind"); 227 } 228 229 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will 230 /// always be accessible even if no aggregate location is provided. 231 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 232 AggValueSlot AggSlot = AggValueSlot::ignored(); 233 234 if (hasAggregateEvaluationKind(E->getType())) 235 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 236 return EmitAnyExpr(E, AggSlot); 237 } 238 239 /// EmitAnyExprToMem - Evaluate an expression into a given memory 240 /// location. 241 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 242 Address Location, 243 Qualifiers Quals, 244 bool IsInit) { 245 // FIXME: This function should take an LValue as an argument. 246 switch (getEvaluationKind(E->getType())) { 247 case TEK_Complex: 248 EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()), 249 /*isInit*/ false); 250 return; 251 252 case TEK_Aggregate: { 253 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, 254 AggValueSlot::IsDestructed_t(IsInit), 255 AggValueSlot::DoesNotNeedGCBarriers, 256 AggValueSlot::IsAliased_t(!IsInit), 257 AggValueSlot::MayOverlap)); 258 return; 259 } 260 261 case TEK_Scalar: { 262 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 263 LValue LV = MakeAddrLValue(Location, E->getType()); 264 EmitStoreThroughLValue(RV, LV); 265 return; 266 } 267 } 268 llvm_unreachable("bad evaluation kind"); 269 } 270 271 static void 272 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, 273 const Expr *E, Address ReferenceTemporary) { 274 // Objective-C++ ARC: 275 // If we are binding a reference to a temporary that has ownership, we 276 // need to perform retain/release operations on the temporary. 277 // 278 // FIXME: This should be looking at E, not M. 279 if (auto Lifetime = M->getType().getObjCLifetime()) { 280 switch (Lifetime) { 281 case Qualifiers::OCL_None: 282 case Qualifiers::OCL_ExplicitNone: 283 // Carry on to normal cleanup handling. 284 break; 285 286 case Qualifiers::OCL_Autoreleasing: 287 // Nothing to do; cleaned up by an autorelease pool. 288 return; 289 290 case Qualifiers::OCL_Strong: 291 case Qualifiers::OCL_Weak: 292 switch (StorageDuration Duration = M->getStorageDuration()) { 293 case SD_Static: 294 // Note: we intentionally do not register a cleanup to release 295 // the object on program termination. 296 return; 297 298 case SD_Thread: 299 // FIXME: We should probably register a cleanup in this case. 300 return; 301 302 case SD_Automatic: 303 case SD_FullExpression: 304 CodeGenFunction::Destroyer *Destroy; 305 CleanupKind CleanupKind; 306 if (Lifetime == Qualifiers::OCL_Strong) { 307 const ValueDecl *VD = M->getExtendingDecl(); 308 bool Precise = 309 VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 310 CleanupKind = CGF.getARCCleanupKind(); 311 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise 312 : &CodeGenFunction::destroyARCStrongImprecise; 313 } else { 314 // __weak objects always get EH cleanups; otherwise, exceptions 315 // could cause really nasty crashes instead of mere leaks. 316 CleanupKind = NormalAndEHCleanup; 317 Destroy = &CodeGenFunction::destroyARCWeak; 318 } 319 if (Duration == SD_FullExpression) 320 CGF.pushDestroy(CleanupKind, ReferenceTemporary, 321 M->getType(), *Destroy, 322 CleanupKind & EHCleanup); 323 else 324 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, 325 M->getType(), 326 *Destroy, CleanupKind & EHCleanup); 327 return; 328 329 case SD_Dynamic: 330 llvm_unreachable("temporary cannot have dynamic storage duration"); 331 } 332 llvm_unreachable("unknown storage duration"); 333 } 334 } 335 336 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; 337 if (const RecordType *RT = 338 E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { 339 // Get the destructor for the reference temporary. 340 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 341 if (!ClassDecl->hasTrivialDestructor()) 342 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 343 } 344 345 if (!ReferenceTemporaryDtor) 346 return; 347 348 // Call the destructor for the temporary. 349 switch (M->getStorageDuration()) { 350 case SD_Static: 351 case SD_Thread: { 352 llvm::FunctionCallee CleanupFn; 353 llvm::Constant *CleanupArg; 354 if (E->getType()->isArrayType()) { 355 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( 356 ReferenceTemporary, E->getType(), 357 CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, 358 dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); 359 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); 360 } else { 361 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( 362 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); 363 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer()); 364 } 365 CGF.CGM.getCXXABI().registerGlobalDtor( 366 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); 367 break; 368 } 369 370 case SD_FullExpression: 371 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), 372 CodeGenFunction::destroyCXXObject, 373 CGF.getLangOpts().Exceptions); 374 break; 375 376 case SD_Automatic: 377 CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, 378 ReferenceTemporary, E->getType(), 379 CodeGenFunction::destroyCXXObject, 380 CGF.getLangOpts().Exceptions); 381 break; 382 383 case SD_Dynamic: 384 llvm_unreachable("temporary cannot have dynamic storage duration"); 385 } 386 } 387 388 static Address createReferenceTemporary(CodeGenFunction &CGF, 389 const MaterializeTemporaryExpr *M, 390 const Expr *Inner, 391 Address *Alloca = nullptr) { 392 auto &TCG = CGF.getTargetHooks(); 393 switch (M->getStorageDuration()) { 394 case SD_FullExpression: 395 case SD_Automatic: { 396 // If we have a constant temporary array or record try to promote it into a 397 // constant global under the same rules a normal constant would've been 398 // promoted. This is easier on the optimizer and generally emits fewer 399 // instructions. 400 QualType Ty = Inner->getType(); 401 if (CGF.CGM.getCodeGenOpts().MergeAllConstants && 402 (Ty->isArrayType() || Ty->isRecordType()) && 403 CGF.CGM.isTypeConstant(Ty, true)) 404 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) { 405 if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) { 406 auto AS = AddrSpace.getValue(); 407 auto *GV = new llvm::GlobalVariable( 408 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, 409 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr, 410 llvm::GlobalValue::NotThreadLocal, 411 CGF.getContext().getTargetAddressSpace(AS)); 412 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty); 413 GV->setAlignment(alignment.getAsAlign()); 414 llvm::Constant *C = GV; 415 if (AS != LangAS::Default) 416 C = TCG.performAddrSpaceCast( 417 CGF.CGM, GV, AS, LangAS::Default, 418 GV->getValueType()->getPointerTo( 419 CGF.getContext().getTargetAddressSpace(LangAS::Default))); 420 // FIXME: Should we put the new global into a COMDAT? 421 return Address(C, alignment); 422 } 423 } 424 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca); 425 } 426 case SD_Thread: 427 case SD_Static: 428 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); 429 430 case SD_Dynamic: 431 llvm_unreachable("temporary can't have dynamic storage duration"); 432 } 433 llvm_unreachable("unknown storage duration"); 434 } 435 436 /// Helper method to check if the underlying ABI is AAPCS 437 static bool isAAPCS(const TargetInfo &TargetInfo) { 438 return TargetInfo.getABI().startswith("aapcs"); 439 } 440 441 LValue CodeGenFunction:: 442 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { 443 const Expr *E = M->getSubExpr(); 444 445 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || 446 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && 447 "Reference should never be pseudo-strong!"); 448 449 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so 450 // as that will cause the lifetime adjustment to be lost for ARC 451 auto ownership = M->getType().getObjCLifetime(); 452 if (ownership != Qualifiers::OCL_None && 453 ownership != Qualifiers::OCL_ExplicitNone) { 454 Address Object = createReferenceTemporary(*this, M, E); 455 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) { 456 Object = Address(llvm::ConstantExpr::getBitCast(Var, 457 ConvertTypeForMem(E->getType()) 458 ->getPointerTo(Object.getAddressSpace())), 459 Object.getAlignment()); 460 461 // createReferenceTemporary will promote the temporary to a global with a 462 // constant initializer if it can. It can only do this to a value of 463 // ARC-manageable type if the value is global and therefore "immune" to 464 // ref-counting operations. Therefore we have no need to emit either a 465 // dynamic initialization or a cleanup and we can just return the address 466 // of the temporary. 467 if (Var->hasInitializer()) 468 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); 469 470 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 471 } 472 LValue RefTempDst = MakeAddrLValue(Object, M->getType(), 473 AlignmentSource::Decl); 474 475 switch (getEvaluationKind(E->getType())) { 476 default: llvm_unreachable("expected scalar or aggregate expression"); 477 case TEK_Scalar: 478 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); 479 break; 480 case TEK_Aggregate: { 481 EmitAggExpr(E, AggValueSlot::forAddr(Object, 482 E->getType().getQualifiers(), 483 AggValueSlot::IsDestructed, 484 AggValueSlot::DoesNotNeedGCBarriers, 485 AggValueSlot::IsNotAliased, 486 AggValueSlot::DoesNotOverlap)); 487 break; 488 } 489 } 490 491 pushTemporaryCleanup(*this, M, E, Object); 492 return RefTempDst; 493 } 494 495 SmallVector<const Expr *, 2> CommaLHSs; 496 SmallVector<SubobjectAdjustment, 2> Adjustments; 497 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); 498 499 for (const auto &Ignored : CommaLHSs) 500 EmitIgnoredExpr(Ignored); 501 502 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { 503 if (opaque->getType()->isRecordType()) { 504 assert(Adjustments.empty()); 505 return EmitOpaqueValueLValue(opaque); 506 } 507 } 508 509 // Create and initialize the reference temporary. 510 Address Alloca = Address::invalid(); 511 Address Object = createReferenceTemporary(*this, M, E, &Alloca); 512 if (auto *Var = dyn_cast<llvm::GlobalVariable>( 513 Object.getPointer()->stripPointerCasts())) { 514 Object = Address(llvm::ConstantExpr::getBitCast( 515 cast<llvm::Constant>(Object.getPointer()), 516 ConvertTypeForMem(E->getType())->getPointerTo()), 517 Object.getAlignment()); 518 // If the temporary is a global and has a constant initializer or is a 519 // constant temporary that we promoted to a global, we may have already 520 // initialized it. 521 if (!Var->hasInitializer()) { 522 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 523 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 524 } 525 } else { 526 switch (M->getStorageDuration()) { 527 case SD_Automatic: 528 if (auto *Size = EmitLifetimeStart( 529 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), 530 Alloca.getPointer())) { 531 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker, 532 Alloca, Size); 533 } 534 break; 535 536 case SD_FullExpression: { 537 if (!ShouldEmitLifetimeMarkers) 538 break; 539 540 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end 541 // marker. Instead, start the lifetime of a conditional temporary earlier 542 // so that it's unconditional. Don't do this with sanitizers which need 543 // more precise lifetime marks. 544 ConditionalEvaluation *OldConditional = nullptr; 545 CGBuilderTy::InsertPoint OldIP; 546 if (isInConditionalBranch() && !E->getType().isDestructedType() && 547 !SanOpts.has(SanitizerKind::HWAddress) && 548 !SanOpts.has(SanitizerKind::Memory) && 549 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) { 550 OldConditional = OutermostConditional; 551 OutermostConditional = nullptr; 552 553 OldIP = Builder.saveIP(); 554 llvm::BasicBlock *Block = OldConditional->getStartingBlock(); 555 Builder.restoreIP(CGBuilderTy::InsertPoint( 556 Block, llvm::BasicBlock::iterator(Block->back()))); 557 } 558 559 if (auto *Size = EmitLifetimeStart( 560 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), 561 Alloca.getPointer())) { 562 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca, 563 Size); 564 } 565 566 if (OldConditional) { 567 OutermostConditional = OldConditional; 568 Builder.restoreIP(OldIP); 569 } 570 break; 571 } 572 573 default: 574 break; 575 } 576 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 577 } 578 pushTemporaryCleanup(*this, M, E, Object); 579 580 // Perform derived-to-base casts and/or field accesses, to get from the 581 // temporary object we created (and, potentially, for which we extended 582 // the lifetime) to the subobject we're binding the reference to. 583 for (unsigned I = Adjustments.size(); I != 0; --I) { 584 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 585 switch (Adjustment.Kind) { 586 case SubobjectAdjustment::DerivedToBaseAdjustment: 587 Object = 588 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, 589 Adjustment.DerivedToBase.BasePath->path_begin(), 590 Adjustment.DerivedToBase.BasePath->path_end(), 591 /*NullCheckValue=*/ false, E->getExprLoc()); 592 break; 593 594 case SubobjectAdjustment::FieldAdjustment: { 595 LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl); 596 LV = EmitLValueForField(LV, Adjustment.Field); 597 assert(LV.isSimple() && 598 "materialized temporary field is not a simple lvalue"); 599 Object = LV.getAddress(*this); 600 break; 601 } 602 603 case SubobjectAdjustment::MemberPointerAdjustment: { 604 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); 605 Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr, 606 Adjustment.Ptr.MPT); 607 break; 608 } 609 } 610 } 611 612 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); 613 } 614 615 RValue 616 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { 617 // Emit the expression as an lvalue. 618 LValue LV = EmitLValue(E); 619 assert(LV.isSimple()); 620 llvm::Value *Value = LV.getPointer(*this); 621 622 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { 623 // C++11 [dcl.ref]p5 (as amended by core issue 453): 624 // If a glvalue to which a reference is directly bound designates neither 625 // an existing object or function of an appropriate type nor a region of 626 // storage of suitable size and alignment to contain an object of the 627 // reference's type, the behavior is undefined. 628 QualType Ty = E->getType(); 629 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 630 } 631 632 return RValue::get(Value); 633 } 634 635 636 /// getAccessedFieldNo - Given an encoded value and a result number, return the 637 /// input field number being accessed. 638 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 639 const llvm::Constant *Elts) { 640 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 641 ->getZExtValue(); 642 } 643 644 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. 645 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, 646 llvm::Value *High) { 647 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); 648 llvm::Value *K47 = Builder.getInt64(47); 649 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); 650 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); 651 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); 652 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); 653 return Builder.CreateMul(B1, KMul); 654 } 655 656 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { 657 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || 658 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; 659 } 660 661 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { 662 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 663 return (RD && RD->hasDefinition() && RD->isDynamicClass()) && 664 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || 665 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || 666 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); 667 } 668 669 bool CodeGenFunction::sanitizePerformTypeCheck() const { 670 return SanOpts.has(SanitizerKind::Null) | 671 SanOpts.has(SanitizerKind::Alignment) | 672 SanOpts.has(SanitizerKind::ObjectSize) | 673 SanOpts.has(SanitizerKind::Vptr); 674 } 675 676 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 677 llvm::Value *Ptr, QualType Ty, 678 CharUnits Alignment, 679 SanitizerSet SkippedChecks, 680 llvm::Value *ArraySize) { 681 if (!sanitizePerformTypeCheck()) 682 return; 683 684 // Don't check pointers outside the default address space. The null check 685 // isn't correct, the object-size check isn't supported by LLVM, and we can't 686 // communicate the addresses to the runtime handler for the vptr check. 687 if (Ptr->getType()->getPointerAddressSpace()) 688 return; 689 690 // Don't check pointers to volatile data. The behavior here is implementation- 691 // defined. 692 if (Ty.isVolatileQualified()) 693 return; 694 695 SanitizerScope SanScope(this); 696 697 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; 698 llvm::BasicBlock *Done = nullptr; 699 700 // Quickly determine whether we have a pointer to an alloca. It's possible 701 // to skip null checks, and some alignment checks, for these pointers. This 702 // can reduce compile-time significantly. 703 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts()); 704 705 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext()); 706 llvm::Value *IsNonNull = nullptr; 707 bool IsGuaranteedNonNull = 708 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca; 709 bool AllowNullPointers = isNullPointerAllowed(TCK); 710 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && 711 !IsGuaranteedNonNull) { 712 // The glvalue must not be an empty glvalue. 713 IsNonNull = Builder.CreateIsNotNull(Ptr); 714 715 // The IR builder can constant-fold the null check if the pointer points to 716 // a constant. 717 IsGuaranteedNonNull = IsNonNull == True; 718 719 // Skip the null check if the pointer is known to be non-null. 720 if (!IsGuaranteedNonNull) { 721 if (AllowNullPointers) { 722 // When performing pointer casts, it's OK if the value is null. 723 // Skip the remaining checks in that case. 724 Done = createBasicBlock("null"); 725 llvm::BasicBlock *Rest = createBasicBlock("not.null"); 726 Builder.CreateCondBr(IsNonNull, Rest, Done); 727 EmitBlock(Rest); 728 } else { 729 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); 730 } 731 } 732 } 733 734 if (SanOpts.has(SanitizerKind::ObjectSize) && 735 !SkippedChecks.has(SanitizerKind::ObjectSize) && 736 !Ty->isIncompleteType()) { 737 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); 738 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize); 739 if (ArraySize) 740 Size = Builder.CreateMul(Size, ArraySize); 741 742 // Degenerate case: new X[0] does not need an objectsize check. 743 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size); 744 if (!ConstantSize || !ConstantSize->isNullValue()) { 745 // The glvalue must refer to a large enough storage region. 746 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation 747 // to check this. 748 // FIXME: Get object address space 749 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; 750 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); 751 llvm::Value *Min = Builder.getFalse(); 752 llvm::Value *NullIsUnknown = Builder.getFalse(); 753 llvm::Value *Dynamic = Builder.getFalse(); 754 llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy); 755 llvm::Value *LargeEnough = Builder.CreateICmpUGE( 756 Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size); 757 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); 758 } 759 } 760 761 uint64_t AlignVal = 0; 762 llvm::Value *PtrAsInt = nullptr; 763 764 if (SanOpts.has(SanitizerKind::Alignment) && 765 !SkippedChecks.has(SanitizerKind::Alignment)) { 766 AlignVal = Alignment.getQuantity(); 767 if (!Ty->isIncompleteType() && !AlignVal) 768 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr, 769 /*ForPointeeType=*/true) 770 .getQuantity(); 771 772 // The glvalue must be suitably aligned. 773 if (AlignVal > 1 && 774 (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) { 775 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy); 776 llvm::Value *Align = Builder.CreateAnd( 777 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); 778 llvm::Value *Aligned = 779 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 780 if (Aligned != True) 781 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); 782 } 783 } 784 785 if (Checks.size() > 0) { 786 // Make sure we're not losing information. Alignment needs to be a power of 787 // 2 788 assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal); 789 llvm::Constant *StaticData[] = { 790 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), 791 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1), 792 llvm::ConstantInt::get(Int8Ty, TCK)}; 793 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData, 794 PtrAsInt ? PtrAsInt : Ptr); 795 } 796 797 // If possible, check that the vptr indicates that there is a subobject of 798 // type Ty at offset zero within this object. 799 // 800 // C++11 [basic.life]p5,6: 801 // [For storage which does not refer to an object within its lifetime] 802 // The program has undefined behavior if: 803 // -- the [pointer or glvalue] is used to access a non-static data member 804 // or call a non-static member function 805 if (SanOpts.has(SanitizerKind::Vptr) && 806 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { 807 // Ensure that the pointer is non-null before loading it. If there is no 808 // compile-time guarantee, reuse the run-time null check or emit a new one. 809 if (!IsGuaranteedNonNull) { 810 if (!IsNonNull) 811 IsNonNull = Builder.CreateIsNotNull(Ptr); 812 if (!Done) 813 Done = createBasicBlock("vptr.null"); 814 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null"); 815 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done); 816 EmitBlock(VptrNotNull); 817 } 818 819 // Compute a hash of the mangled name of the type. 820 // 821 // FIXME: This is not guaranteed to be deterministic! Move to a 822 // fingerprinting mechanism once LLVM provides one. For the time 823 // being the implementation happens to be deterministic. 824 SmallString<64> MangledName; 825 llvm::raw_svector_ostream Out(MangledName); 826 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 827 Out); 828 829 // Blacklist based on the mangled type. 830 if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType( 831 SanitizerKind::Vptr, Out.str())) { 832 llvm::hash_code TypeHash = hash_value(Out.str()); 833 834 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). 835 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); 836 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); 837 Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign()); 838 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); 839 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); 840 841 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); 842 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 843 844 // Look the hash up in our cache. 845 const int CacheSize = 128; 846 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 847 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 848 "__ubsan_vptr_type_cache"); 849 llvm::Value *Slot = Builder.CreateAnd(Hash, 850 llvm::ConstantInt::get(IntPtrTy, 851 CacheSize-1)); 852 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 853 llvm::Value *CacheVal = 854 Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices), 855 getPointerAlign()); 856 857 // If the hash isn't in the cache, call a runtime handler to perform the 858 // hard work of checking whether the vptr is for an object of the right 859 // type. This will either fill in the cache and return, or produce a 860 // diagnostic. 861 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); 862 llvm::Constant *StaticData[] = { 863 EmitCheckSourceLocation(Loc), 864 EmitCheckTypeDescriptor(Ty), 865 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 866 llvm::ConstantInt::get(Int8Ty, TCK) 867 }; 868 llvm::Value *DynamicData[] = { Ptr, Hash }; 869 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), 870 SanitizerHandler::DynamicTypeCacheMiss, StaticData, 871 DynamicData); 872 } 873 } 874 875 if (Done) { 876 Builder.CreateBr(Done); 877 EmitBlock(Done); 878 } 879 } 880 881 /// Determine whether this expression refers to a flexible array member in a 882 /// struct. We disable array bounds checks for such members. 883 static bool isFlexibleArrayMemberExpr(const Expr *E) { 884 // For compatibility with existing code, we treat arrays of length 0 or 885 // 1 as flexible array members. 886 // FIXME: This is inconsistent with the warning code in SemaChecking. Unify 887 // the two mechanisms. 888 const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); 889 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 890 // FIXME: Sema doesn't treat [1] as a flexible array member if the bound 891 // was produced by macro expansion. 892 if (CAT->getSize().ugt(1)) 893 return false; 894 } else if (!isa<IncompleteArrayType>(AT)) 895 return false; 896 897 E = E->IgnoreParens(); 898 899 // A flexible array member must be the last member in the class. 900 if (const auto *ME = dyn_cast<MemberExpr>(E)) { 901 // FIXME: If the base type of the member expr is not FD->getParent(), 902 // this should not be treated as a flexible array member access. 903 if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 904 // FIXME: Sema doesn't treat a T[1] union member as a flexible array 905 // member, only a T[0] or T[] member gets that treatment. 906 if (FD->getParent()->isUnion()) 907 return true; 908 RecordDecl::field_iterator FI( 909 DeclContext::decl_iterator(const_cast<FieldDecl *>(FD))); 910 return ++FI == FD->getParent()->field_end(); 911 } 912 } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) { 913 return IRE->getDecl()->getNextIvar() == nullptr; 914 } 915 916 return false; 917 } 918 919 llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, 920 QualType EltTy) { 921 ASTContext &C = getContext(); 922 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity(); 923 if (!EltSize) 924 return nullptr; 925 926 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()); 927 if (!ArrayDeclRef) 928 return nullptr; 929 930 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl()); 931 if (!ParamDecl) 932 return nullptr; 933 934 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); 935 if (!POSAttr) 936 return nullptr; 937 938 // Don't load the size if it's a lower bound. 939 int POSType = POSAttr->getType(); 940 if (POSType != 0 && POSType != 1) 941 return nullptr; 942 943 // Find the implicit size parameter. 944 auto PassedSizeIt = SizeArguments.find(ParamDecl); 945 if (PassedSizeIt == SizeArguments.end()) 946 return nullptr; 947 948 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; 949 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable"); 950 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second; 951 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false, 952 C.getSizeType(), E->getExprLoc()); 953 llvm::Value *SizeOfElement = 954 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize); 955 return Builder.CreateUDiv(SizeInBytes, SizeOfElement); 956 } 957 958 /// If Base is known to point to the start of an array, return the length of 959 /// that array. Return 0 if the length cannot be determined. 960 static llvm::Value *getArrayIndexingBound( 961 CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { 962 // For the vector indexing extension, the bound is the number of elements. 963 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { 964 IndexedType = Base->getType(); 965 return CGF.Builder.getInt32(VT->getNumElements()); 966 } 967 968 Base = Base->IgnoreParens(); 969 970 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 971 if (CE->getCastKind() == CK_ArrayToPointerDecay && 972 !isFlexibleArrayMemberExpr(CE->getSubExpr())) { 973 IndexedType = CE->getSubExpr()->getType(); 974 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); 975 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 976 return CGF.Builder.getInt(CAT->getSize()); 977 else if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) 978 return CGF.getVLASize(VAT).NumElts; 979 // Ignore pass_object_size here. It's not applicable on decayed pointers. 980 } 981 } 982 983 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; 984 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) { 985 IndexedType = Base->getType(); 986 return POS; 987 } 988 989 return nullptr; 990 } 991 992 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, 993 llvm::Value *Index, QualType IndexType, 994 bool Accessed) { 995 assert(SanOpts.has(SanitizerKind::ArrayBounds) && 996 "should not be called unless adding bounds checks"); 997 SanitizerScope SanScope(this); 998 999 QualType IndexedType; 1000 llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); 1001 if (!Bound) 1002 return; 1003 1004 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); 1005 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); 1006 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); 1007 1008 llvm::Constant *StaticData[] = { 1009 EmitCheckSourceLocation(E->getExprLoc()), 1010 EmitCheckTypeDescriptor(IndexedType), 1011 EmitCheckTypeDescriptor(IndexType) 1012 }; 1013 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) 1014 : Builder.CreateICmpULE(IndexVal, BoundVal); 1015 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), 1016 SanitizerHandler::OutOfBounds, StaticData, Index); 1017 } 1018 1019 1020 CodeGenFunction::ComplexPairTy CodeGenFunction:: 1021 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 1022 bool isInc, bool isPre) { 1023 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); 1024 1025 llvm::Value *NextVal; 1026 if (isa<llvm::IntegerType>(InVal.first->getType())) { 1027 uint64_t AmountVal = isInc ? 1 : -1; 1028 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 1029 1030 // Add the inc/dec to the real part. 1031 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 1032 } else { 1033 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); 1034 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 1035 if (!isInc) 1036 FVal.changeSign(); 1037 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 1038 1039 // Add the inc/dec to the real part. 1040 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 1041 } 1042 1043 ComplexPairTy IncVal(NextVal, InVal.second); 1044 1045 // Store the updated result through the lvalue. 1046 EmitStoreOfComplex(IncVal, LV, /*init*/ false); 1047 if (getLangOpts().OpenMP) 1048 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, 1049 E->getSubExpr()); 1050 1051 // If this is a postinc, return the value read from memory, otherwise use the 1052 // updated value. 1053 return isPre ? IncVal : InVal; 1054 } 1055 1056 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, 1057 CodeGenFunction *CGF) { 1058 // Bind VLAs in the cast type. 1059 if (CGF && E->getType()->isVariablyModifiedType()) 1060 CGF->EmitVariablyModifiedType(E->getType()); 1061 1062 if (CGDebugInfo *DI = getModuleDebugInfo()) 1063 DI->EmitExplicitCastType(E->getType()); 1064 } 1065 1066 //===----------------------------------------------------------------------===// 1067 // LValue Expression Emission 1068 //===----------------------------------------------------------------------===// 1069 1070 /// EmitPointerWithAlignment - Given an expression of pointer type, try to 1071 /// derive a more accurate bound on the alignment of the pointer. 1072 Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E, 1073 LValueBaseInfo *BaseInfo, 1074 TBAAAccessInfo *TBAAInfo) { 1075 // We allow this with ObjC object pointers because of fragile ABIs. 1076 assert(E->getType()->isPointerType() || 1077 E->getType()->isObjCObjectPointerType()); 1078 E = E->IgnoreParens(); 1079 1080 // Casts: 1081 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 1082 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE)) 1083 CGM.EmitExplicitCastExprType(ECE, this); 1084 1085 switch (CE->getCastKind()) { 1086 // Non-converting casts (but not C's implicit conversion from void*). 1087 case CK_BitCast: 1088 case CK_NoOp: 1089 case CK_AddressSpaceConversion: 1090 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { 1091 if (PtrTy->getPointeeType()->isVoidType()) 1092 break; 1093 1094 LValueBaseInfo InnerBaseInfo; 1095 TBAAAccessInfo InnerTBAAInfo; 1096 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), 1097 &InnerBaseInfo, 1098 &InnerTBAAInfo); 1099 if (BaseInfo) *BaseInfo = InnerBaseInfo; 1100 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; 1101 1102 if (isa<ExplicitCastExpr>(CE)) { 1103 LValueBaseInfo TargetTypeBaseInfo; 1104 TBAAAccessInfo TargetTypeTBAAInfo; 1105 CharUnits Align = CGM.getNaturalPointeeTypeAlignment( 1106 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo); 1107 if (TBAAInfo) 1108 *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo, 1109 TargetTypeTBAAInfo); 1110 // If the source l-value is opaque, honor the alignment of the 1111 // casted-to type. 1112 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { 1113 if (BaseInfo) 1114 BaseInfo->mergeForCast(TargetTypeBaseInfo); 1115 Addr = Address(Addr.getPointer(), Align); 1116 } 1117 } 1118 1119 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) && 1120 CE->getCastKind() == CK_BitCast) { 1121 if (auto PT = E->getType()->getAs<PointerType>()) 1122 EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(), 1123 /*MayBeNull=*/true, 1124 CodeGenFunction::CFITCK_UnrelatedCast, 1125 CE->getBeginLoc()); 1126 } 1127 return CE->getCastKind() != CK_AddressSpaceConversion 1128 ? Builder.CreateBitCast(Addr, ConvertType(E->getType())) 1129 : Builder.CreateAddrSpaceCast(Addr, 1130 ConvertType(E->getType())); 1131 } 1132 break; 1133 1134 // Array-to-pointer decay. 1135 case CK_ArrayToPointerDecay: 1136 return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo); 1137 1138 // Derived-to-base conversions. 1139 case CK_UncheckedDerivedToBase: 1140 case CK_DerivedToBase: { 1141 // TODO: Support accesses to members of base classes in TBAA. For now, we 1142 // conservatively pretend that the complete object is of the base class 1143 // type. 1144 if (TBAAInfo) 1145 *TBAAInfo = CGM.getTBAAAccessInfo(E->getType()); 1146 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo); 1147 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); 1148 return GetAddressOfBaseClass(Addr, Derived, 1149 CE->path_begin(), CE->path_end(), 1150 ShouldNullCheckClassCastValue(CE), 1151 CE->getExprLoc()); 1152 } 1153 1154 // TODO: Is there any reason to treat base-to-derived conversions 1155 // specially? 1156 default: 1157 break; 1158 } 1159 } 1160 1161 // Unary &. 1162 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 1163 if (UO->getOpcode() == UO_AddrOf) { 1164 LValue LV = EmitLValue(UO->getSubExpr()); 1165 if (BaseInfo) *BaseInfo = LV.getBaseInfo(); 1166 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); 1167 return LV.getAddress(*this); 1168 } 1169 } 1170 1171 // TODO: conditional operators, comma. 1172 1173 // Otherwise, use the alignment of the type. 1174 CharUnits Align = 1175 CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo); 1176 return Address(EmitScalarExpr(E), Align); 1177 } 1178 1179 llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { 1180 llvm::Value *V = RV.getScalarVal(); 1181 if (auto MPT = T->getAs<MemberPointerType>()) 1182 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT); 1183 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 1184 } 1185 1186 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 1187 if (Ty->isVoidType()) 1188 return RValue::get(nullptr); 1189 1190 switch (getEvaluationKind(Ty)) { 1191 case TEK_Complex: { 1192 llvm::Type *EltTy = 1193 ConvertType(Ty->castAs<ComplexType>()->getElementType()); 1194 llvm::Value *U = llvm::UndefValue::get(EltTy); 1195 return RValue::getComplex(std::make_pair(U, U)); 1196 } 1197 1198 // If this is a use of an undefined aggregate type, the aggregate must have an 1199 // identifiable address. Just because the contents of the value are undefined 1200 // doesn't mean that the address can't be taken and compared. 1201 case TEK_Aggregate: { 1202 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 1203 return RValue::getAggregate(DestPtr); 1204 } 1205 1206 case TEK_Scalar: 1207 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 1208 } 1209 llvm_unreachable("bad evaluation kind"); 1210 } 1211 1212 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 1213 const char *Name) { 1214 ErrorUnsupported(E, Name); 1215 return GetUndefRValue(E->getType()); 1216 } 1217 1218 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 1219 const char *Name) { 1220 ErrorUnsupported(E, Name); 1221 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 1222 return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()), 1223 E->getType()); 1224 } 1225 1226 bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { 1227 const Expr *Base = Obj; 1228 while (!isa<CXXThisExpr>(Base)) { 1229 // The result of a dynamic_cast can be null. 1230 if (isa<CXXDynamicCastExpr>(Base)) 1231 return false; 1232 1233 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 1234 Base = CE->getSubExpr(); 1235 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) { 1236 Base = PE->getSubExpr(); 1237 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) { 1238 if (UO->getOpcode() == UO_Extension) 1239 Base = UO->getSubExpr(); 1240 else 1241 return false; 1242 } else { 1243 return false; 1244 } 1245 } 1246 return true; 1247 } 1248 1249 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 1250 LValue LV; 1251 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)) 1252 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); 1253 else 1254 LV = EmitLValue(E); 1255 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) { 1256 SanitizerSet SkippedChecks; 1257 if (const auto *ME = dyn_cast<MemberExpr>(E)) { 1258 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); 1259 if (IsBaseCXXThis) 1260 SkippedChecks.set(SanitizerKind::Alignment, true); 1261 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase())) 1262 SkippedChecks.set(SanitizerKind::Null, true); 1263 } 1264 EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(), 1265 LV.getAlignment(), SkippedChecks); 1266 } 1267 return LV; 1268 } 1269 1270 /// EmitLValue - Emit code to compute a designator that specifies the location 1271 /// of the expression. 1272 /// 1273 /// This can return one of two things: a simple address or a bitfield reference. 1274 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 1275 /// an LLVM pointer type. 1276 /// 1277 /// If this returns a bitfield reference, nothing about the pointee type of the 1278 /// LLVM value is known: For example, it may not be a pointer to an integer. 1279 /// 1280 /// If this returns a normal address, and if the lvalue's C type is fixed size, 1281 /// this method guarantees that the returned pointer type will point to an LLVM 1282 /// type of the same size of the lvalue's type. If the lvalue has a variable 1283 /// length type, this is not possible. 1284 /// 1285 LValue CodeGenFunction::EmitLValue(const Expr *E) { 1286 ApplyDebugLocation DL(*this, E); 1287 switch (E->getStmtClass()) { 1288 default: return EmitUnsupportedLValue(E, "l-value expression"); 1289 1290 case Expr::ObjCPropertyRefExprClass: 1291 llvm_unreachable("cannot emit a property reference directly"); 1292 1293 case Expr::ObjCSelectorExprClass: 1294 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 1295 case Expr::ObjCIsaExprClass: 1296 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 1297 case Expr::BinaryOperatorClass: 1298 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 1299 case Expr::CompoundAssignOperatorClass: { 1300 QualType Ty = E->getType(); 1301 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 1302 Ty = AT->getValueType(); 1303 if (!Ty->isAnyComplexType()) 1304 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 1305 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 1306 } 1307 case Expr::CallExprClass: 1308 case Expr::CXXMemberCallExprClass: 1309 case Expr::CXXOperatorCallExprClass: 1310 case Expr::UserDefinedLiteralClass: 1311 return EmitCallExprLValue(cast<CallExpr>(E)); 1312 case Expr::CXXRewrittenBinaryOperatorClass: 1313 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm()); 1314 case Expr::VAArgExprClass: 1315 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 1316 case Expr::DeclRefExprClass: 1317 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 1318 case Expr::ConstantExprClass: { 1319 const ConstantExpr *CE = cast<ConstantExpr>(E); 1320 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { 1321 QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit()) 1322 ->getCallReturnType(getContext()); 1323 return MakeNaturalAlignAddrLValue(Result, RetType); 1324 } 1325 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr()); 1326 } 1327 case Expr::ParenExprClass: 1328 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 1329 case Expr::GenericSelectionExprClass: 1330 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 1331 case Expr::PredefinedExprClass: 1332 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 1333 case Expr::StringLiteralClass: 1334 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 1335 case Expr::ObjCEncodeExprClass: 1336 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 1337 case Expr::PseudoObjectExprClass: 1338 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 1339 case Expr::InitListExprClass: 1340 return EmitInitListLValue(cast<InitListExpr>(E)); 1341 case Expr::CXXTemporaryObjectExprClass: 1342 case Expr::CXXConstructExprClass: 1343 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 1344 case Expr::CXXBindTemporaryExprClass: 1345 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 1346 case Expr::CXXUuidofExprClass: 1347 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 1348 case Expr::LambdaExprClass: 1349 return EmitAggExprToLValue(E); 1350 1351 case Expr::ExprWithCleanupsClass: { 1352 const auto *cleanups = cast<ExprWithCleanups>(E); 1353 RunCleanupsScope Scope(*this); 1354 LValue LV = EmitLValue(cleanups->getSubExpr()); 1355 if (LV.isSimple()) { 1356 // Defend against branches out of gnu statement expressions surrounded by 1357 // cleanups. 1358 llvm::Value *V = LV.getPointer(*this); 1359 Scope.ForceCleanup({&V}); 1360 return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(), 1361 getContext(), LV.getBaseInfo(), LV.getTBAAInfo()); 1362 } 1363 // FIXME: Is it possible to create an ExprWithCleanups that produces a 1364 // bitfield lvalue or some other non-simple lvalue? 1365 return LV; 1366 } 1367 1368 case Expr::CXXDefaultArgExprClass: { 1369 auto *DAE = cast<CXXDefaultArgExpr>(E); 1370 CXXDefaultArgExprScope Scope(*this, DAE); 1371 return EmitLValue(DAE->getExpr()); 1372 } 1373 case Expr::CXXDefaultInitExprClass: { 1374 auto *DIE = cast<CXXDefaultInitExpr>(E); 1375 CXXDefaultInitExprScope Scope(*this, DIE); 1376 return EmitLValue(DIE->getExpr()); 1377 } 1378 case Expr::CXXTypeidExprClass: 1379 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 1380 1381 case Expr::ObjCMessageExprClass: 1382 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 1383 case Expr::ObjCIvarRefExprClass: 1384 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 1385 case Expr::StmtExprClass: 1386 return EmitStmtExprLValue(cast<StmtExpr>(E)); 1387 case Expr::UnaryOperatorClass: 1388 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 1389 case Expr::ArraySubscriptExprClass: 1390 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 1391 case Expr::MatrixSubscriptExprClass: 1392 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E)); 1393 case Expr::OMPArraySectionExprClass: 1394 return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E)); 1395 case Expr::ExtVectorElementExprClass: 1396 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 1397 case Expr::MemberExprClass: 1398 return EmitMemberExpr(cast<MemberExpr>(E)); 1399 case Expr::CompoundLiteralExprClass: 1400 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 1401 case Expr::ConditionalOperatorClass: 1402 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 1403 case Expr::BinaryConditionalOperatorClass: 1404 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 1405 case Expr::ChooseExprClass: 1406 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr()); 1407 case Expr::OpaqueValueExprClass: 1408 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 1409 case Expr::SubstNonTypeTemplateParmExprClass: 1410 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 1411 case Expr::ImplicitCastExprClass: 1412 case Expr::CStyleCastExprClass: 1413 case Expr::CXXFunctionalCastExprClass: 1414 case Expr::CXXStaticCastExprClass: 1415 case Expr::CXXDynamicCastExprClass: 1416 case Expr::CXXReinterpretCastExprClass: 1417 case Expr::CXXConstCastExprClass: 1418 case Expr::CXXAddrspaceCastExprClass: 1419 case Expr::ObjCBridgedCastExprClass: 1420 return EmitCastLValue(cast<CastExpr>(E)); 1421 1422 case Expr::MaterializeTemporaryExprClass: 1423 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 1424 1425 case Expr::CoawaitExprClass: 1426 return EmitCoawaitLValue(cast<CoawaitExpr>(E)); 1427 case Expr::CoyieldExprClass: 1428 return EmitCoyieldLValue(cast<CoyieldExpr>(E)); 1429 } 1430 } 1431 1432 /// Given an object of the given canonical type, can we safely copy a 1433 /// value out of it based on its initializer? 1434 static bool isConstantEmittableObjectType(QualType type) { 1435 assert(type.isCanonical()); 1436 assert(!type->isReferenceType()); 1437 1438 // Must be const-qualified but non-volatile. 1439 Qualifiers qs = type.getLocalQualifiers(); 1440 if (!qs.hasConst() || qs.hasVolatile()) return false; 1441 1442 // Otherwise, all object types satisfy this except C++ classes with 1443 // mutable subobjects or non-trivial copy/destroy behavior. 1444 if (const auto *RT = dyn_cast<RecordType>(type)) 1445 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1446 if (RD->hasMutableFields() || !RD->isTrivial()) 1447 return false; 1448 1449 return true; 1450 } 1451 1452 /// Can we constant-emit a load of a reference to a variable of the 1453 /// given type? This is different from predicates like 1454 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply 1455 /// in situations that don't necessarily satisfy the language's rules 1456 /// for this (e.g. C++'s ODR-use rules). For example, we want to able 1457 /// to do this with const float variables even if those variables 1458 /// aren't marked 'constexpr'. 1459 enum ConstantEmissionKind { 1460 CEK_None, 1461 CEK_AsReferenceOnly, 1462 CEK_AsValueOrReference, 1463 CEK_AsValueOnly 1464 }; 1465 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 1466 type = type.getCanonicalType(); 1467 if (const auto *ref = dyn_cast<ReferenceType>(type)) { 1468 if (isConstantEmittableObjectType(ref->getPointeeType())) 1469 return CEK_AsValueOrReference; 1470 return CEK_AsReferenceOnly; 1471 } 1472 if (isConstantEmittableObjectType(type)) 1473 return CEK_AsValueOnly; 1474 return CEK_None; 1475 } 1476 1477 /// Try to emit a reference to the given value without producing it as 1478 /// an l-value. This is just an optimization, but it avoids us needing 1479 /// to emit global copies of variables if they're named without triggering 1480 /// a formal use in a context where we can't emit a direct reference to them, 1481 /// for instance if a block or lambda or a member of a local class uses a 1482 /// const int variable or constexpr variable from an enclosing function. 1483 CodeGenFunction::ConstantEmission 1484 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 1485 ValueDecl *value = refExpr->getDecl(); 1486 1487 // The value needs to be an enum constant or a constant variable. 1488 ConstantEmissionKind CEK; 1489 if (isa<ParmVarDecl>(value)) { 1490 CEK = CEK_None; 1491 } else if (auto *var = dyn_cast<VarDecl>(value)) { 1492 CEK = checkVarTypeForConstantEmission(var->getType()); 1493 } else if (isa<EnumConstantDecl>(value)) { 1494 CEK = CEK_AsValueOnly; 1495 } else { 1496 CEK = CEK_None; 1497 } 1498 if (CEK == CEK_None) return ConstantEmission(); 1499 1500 Expr::EvalResult result; 1501 bool resultIsReference; 1502 QualType resultType; 1503 1504 // It's best to evaluate all the way as an r-value if that's permitted. 1505 if (CEK != CEK_AsReferenceOnly && 1506 refExpr->EvaluateAsRValue(result, getContext())) { 1507 resultIsReference = false; 1508 resultType = refExpr->getType(); 1509 1510 // Otherwise, try to evaluate as an l-value. 1511 } else if (CEK != CEK_AsValueOnly && 1512 refExpr->EvaluateAsLValue(result, getContext())) { 1513 resultIsReference = true; 1514 resultType = value->getType(); 1515 1516 // Failure. 1517 } else { 1518 return ConstantEmission(); 1519 } 1520 1521 // In any case, if the initializer has side-effects, abandon ship. 1522 if (result.HasSideEffects) 1523 return ConstantEmission(); 1524 1525 // In CUDA/HIP device compilation, a lambda may capture a reference variable 1526 // referencing a global host variable by copy. In this case the lambda should 1527 // make a copy of the value of the global host variable. The DRE of the 1528 // captured reference variable cannot be emitted as load from the host 1529 // global variable as compile time constant, since the host variable is not 1530 // accessible on device. The DRE of the captured reference variable has to be 1531 // loaded from captures. 1532 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && 1533 refExpr->refersToEnclosingVariableOrCapture()) { 1534 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl); 1535 if (MD && MD->getParent()->isLambda() && 1536 MD->getOverloadedOperator() == OO_Call) { 1537 const APValue::LValueBase &base = result.Val.getLValueBase(); 1538 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { 1539 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) { 1540 if (!VD->hasAttr<CUDADeviceAttr>()) { 1541 return ConstantEmission(); 1542 } 1543 } 1544 } 1545 } 1546 } 1547 1548 // Emit as a constant. 1549 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), 1550 result.Val, resultType); 1551 1552 // Make sure we emit a debug reference to the global variable. 1553 // This should probably fire even for 1554 if (isa<VarDecl>(value)) { 1555 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) 1556 EmitDeclRefExprDbgValue(refExpr, result.Val); 1557 } else { 1558 assert(isa<EnumConstantDecl>(value)); 1559 EmitDeclRefExprDbgValue(refExpr, result.Val); 1560 } 1561 1562 // If we emitted a reference constant, we need to dereference that. 1563 if (resultIsReference) 1564 return ConstantEmission::forReference(C); 1565 1566 return ConstantEmission::forValue(C); 1567 } 1568 1569 static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, 1570 const MemberExpr *ME) { 1571 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) { 1572 // Try to emit static variable member expressions as DREs. 1573 return DeclRefExpr::Create( 1574 CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, 1575 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), 1576 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); 1577 } 1578 return nullptr; 1579 } 1580 1581 CodeGenFunction::ConstantEmission 1582 CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { 1583 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME)) 1584 return tryEmitAsConstant(DRE); 1585 return ConstantEmission(); 1586 } 1587 1588 llvm::Value *CodeGenFunction::emitScalarConstant( 1589 const CodeGenFunction::ConstantEmission &Constant, Expr *E) { 1590 assert(Constant && "not a constant"); 1591 if (Constant.isReference()) 1592 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E), 1593 E->getExprLoc()) 1594 .getScalarVal(); 1595 return Constant.getValue(); 1596 } 1597 1598 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, 1599 SourceLocation Loc) { 1600 return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(), 1601 lvalue.getType(), Loc, lvalue.getBaseInfo(), 1602 lvalue.getTBAAInfo(), lvalue.isNontemporal()); 1603 } 1604 1605 static bool hasBooleanRepresentation(QualType Ty) { 1606 if (Ty->isBooleanType()) 1607 return true; 1608 1609 if (const EnumType *ET = Ty->getAs<EnumType>()) 1610 return ET->getDecl()->getIntegerType()->isBooleanType(); 1611 1612 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 1613 return hasBooleanRepresentation(AT->getValueType()); 1614 1615 return false; 1616 } 1617 1618 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, 1619 llvm::APInt &Min, llvm::APInt &End, 1620 bool StrictEnums, bool IsBool) { 1621 const EnumType *ET = Ty->getAs<EnumType>(); 1622 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && 1623 ET && !ET->getDecl()->isFixed(); 1624 if (!IsBool && !IsRegularCPlusPlusEnum) 1625 return false; 1626 1627 if (IsBool) { 1628 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); 1629 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); 1630 } else { 1631 const EnumDecl *ED = ET->getDecl(); 1632 llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); 1633 unsigned Bitwidth = LTy->getScalarSizeInBits(); 1634 unsigned NumNegativeBits = ED->getNumNegativeBits(); 1635 unsigned NumPositiveBits = ED->getNumPositiveBits(); 1636 1637 if (NumNegativeBits) { 1638 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); 1639 assert(NumBits <= Bitwidth); 1640 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); 1641 Min = -End; 1642 } else { 1643 assert(NumPositiveBits <= Bitwidth); 1644 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; 1645 Min = llvm::APInt(Bitwidth, 0); 1646 } 1647 } 1648 return true; 1649 } 1650 1651 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 1652 llvm::APInt Min, End; 1653 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums, 1654 hasBooleanRepresentation(Ty))) 1655 return nullptr; 1656 1657 llvm::MDBuilder MDHelper(getLLVMContext()); 1658 return MDHelper.createRange(Min, End); 1659 } 1660 1661 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, 1662 SourceLocation Loc) { 1663 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool); 1664 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum); 1665 if (!HasBoolCheck && !HasEnumCheck) 1666 return false; 1667 1668 bool IsBool = hasBooleanRepresentation(Ty) || 1669 NSAPI(CGM.getContext()).isObjCBOOLType(Ty); 1670 bool NeedsBoolCheck = HasBoolCheck && IsBool; 1671 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); 1672 if (!NeedsBoolCheck && !NeedsEnumCheck) 1673 return false; 1674 1675 // Single-bit booleans don't need to be checked. Special-case this to avoid 1676 // a bit width mismatch when handling bitfield values. This is handled by 1677 // EmitFromMemory for the non-bitfield case. 1678 if (IsBool && 1679 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1) 1680 return false; 1681 1682 llvm::APInt Min, End; 1683 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) 1684 return true; 1685 1686 auto &Ctx = getLLVMContext(); 1687 SanitizerScope SanScope(this); 1688 llvm::Value *Check; 1689 --End; 1690 if (!Min) { 1691 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End)); 1692 } else { 1693 llvm::Value *Upper = 1694 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End)); 1695 llvm::Value *Lower = 1696 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min)); 1697 Check = Builder.CreateAnd(Upper, Lower); 1698 } 1699 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), 1700 EmitCheckTypeDescriptor(Ty)}; 1701 SanitizerMask Kind = 1702 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; 1703 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue, 1704 StaticArgs, EmitCheckValue(Value)); 1705 return true; 1706 } 1707 1708 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, 1709 QualType Ty, 1710 SourceLocation Loc, 1711 LValueBaseInfo BaseInfo, 1712 TBAAAccessInfo TBAAInfo, 1713 bool isNontemporal) { 1714 if (!CGM.getCodeGenOpts().PreserveVec3Type) { 1715 // For better performance, handle vector loads differently. 1716 if (Ty->isVectorType()) { 1717 const llvm::Type *EltTy = Addr.getElementType(); 1718 1719 const auto *VTy = cast<llvm::FixedVectorType>(EltTy); 1720 1721 // Handle vectors of size 3 like size 4 for better performance. 1722 if (VTy->getNumElements() == 3) { 1723 1724 // Bitcast to vec4 type. 1725 auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4); 1726 Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4"); 1727 // Now load value. 1728 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4"); 1729 1730 // Shuffle vector to get vec3. 1731 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, 1732 "extractVec"); 1733 return EmitFromMemory(V, Ty); 1734 } 1735 } 1736 } 1737 1738 // Atomic operations have to be done on integral types. 1739 LValue AtomicLValue = 1740 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); 1741 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { 1742 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal(); 1743 } 1744 1745 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile); 1746 if (isNontemporal) { 1747 llvm::MDNode *Node = llvm::MDNode::get( 1748 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 1749 Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 1750 } 1751 1752 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); 1753 1754 if (EmitScalarRangeCheck(Load, Ty, Loc)) { 1755 // In order to prevent the optimizer from throwing away the check, don't 1756 // attach range metadata to the load. 1757 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) 1758 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) 1759 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1760 1761 return EmitFromMemory(Load, Ty); 1762 } 1763 1764 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 1765 // Bool has a different representation in memory than in registers. 1766 if (hasBooleanRepresentation(Ty)) { 1767 // This should really always be an i1, but sometimes it's already 1768 // an i8, and it's awkward to track those cases down. 1769 if (Value->getType()->isIntegerTy(1)) 1770 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); 1771 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1772 "wrong value rep of bool"); 1773 } 1774 1775 return Value; 1776 } 1777 1778 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 1779 // Bool has a different representation in memory than in registers. 1780 if (hasBooleanRepresentation(Ty)) { 1781 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1782 "wrong value rep of bool"); 1783 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 1784 } 1785 1786 return Value; 1787 } 1788 1789 // Convert the pointer of \p Addr to a pointer to a vector (the value type of 1790 // MatrixType), if it points to a array (the memory type of MatrixType). 1791 static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF, 1792 bool IsVector = true) { 1793 auto *ArrayTy = dyn_cast<llvm::ArrayType>( 1794 cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType()); 1795 if (ArrayTy && IsVector) { 1796 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), 1797 ArrayTy->getNumElements()); 1798 1799 return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy)); 1800 } 1801 auto *VectorTy = dyn_cast<llvm::VectorType>( 1802 cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType()); 1803 if (VectorTy && !IsVector) { 1804 auto *ArrayTy = llvm::ArrayType::get( 1805 VectorTy->getElementType(), 1806 cast<llvm::FixedVectorType>(VectorTy)->getNumElements()); 1807 1808 return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy)); 1809 } 1810 1811 return Addr; 1812 } 1813 1814 // Emit a store of a matrix LValue. This may require casting the original 1815 // pointer to memory address (ArrayType) to a pointer to the value type 1816 // (VectorType). 1817 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, 1818 bool isInit, CodeGenFunction &CGF) { 1819 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF, 1820 value->getType()->isVectorTy()); 1821 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(), 1822 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit, 1823 lvalue.isNontemporal()); 1824 } 1825 1826 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, 1827 bool Volatile, QualType Ty, 1828 LValueBaseInfo BaseInfo, 1829 TBAAAccessInfo TBAAInfo, 1830 bool isInit, bool isNontemporal) { 1831 if (!CGM.getCodeGenOpts().PreserveVec3Type) { 1832 // Handle vectors differently to get better performance. 1833 if (Ty->isVectorType()) { 1834 llvm::Type *SrcTy = Value->getType(); 1835 auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy); 1836 // Handle vec3 special. 1837 if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) { 1838 // Our source is a vec3, do a shuffle vector to make it a vec4. 1839 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1}, 1840 "extractVec"); 1841 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4); 1842 } 1843 if (Addr.getElementType() != SrcTy) { 1844 Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp"); 1845 } 1846 } 1847 } 1848 1849 Value = EmitToMemory(Value, Ty); 1850 1851 LValue AtomicLValue = 1852 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); 1853 if (Ty->isAtomicType() || 1854 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) { 1855 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit); 1856 return; 1857 } 1858 1859 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 1860 if (isNontemporal) { 1861 llvm::MDNode *Node = 1862 llvm::MDNode::get(Store->getContext(), 1863 llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 1864 Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 1865 } 1866 1867 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); 1868 } 1869 1870 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 1871 bool isInit) { 1872 if (lvalue.getType()->isConstantMatrixType()) { 1873 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this); 1874 return; 1875 } 1876 1877 EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(), 1878 lvalue.getType(), lvalue.getBaseInfo(), 1879 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); 1880 } 1881 1882 // Emit a load of a LValue of matrix type. This may require casting the pointer 1883 // to memory address (ArrayType) to a pointer to the value type (VectorType). 1884 static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, 1885 CodeGenFunction &CGF) { 1886 assert(LV.getType()->isConstantMatrixType()); 1887 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF); 1888 LV.setAddress(Addr); 1889 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc)); 1890 } 1891 1892 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 1893 /// method emits the address of the lvalue, then loads the result as an rvalue, 1894 /// returning the rvalue. 1895 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 1896 if (LV.isObjCWeak()) { 1897 // load of a __weak object. 1898 Address AddrWeakObj = LV.getAddress(*this); 1899 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 1900 AddrWeakObj)); 1901 } 1902 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1903 // In MRC mode, we do a load+autorelease. 1904 if (!getLangOpts().ObjCAutoRefCount) { 1905 return RValue::get(EmitARCLoadWeak(LV.getAddress(*this))); 1906 } 1907 1908 // In ARC mode, we load retained and then consume the value. 1909 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this)); 1910 Object = EmitObjCConsumeObject(LV.getType(), Object); 1911 return RValue::get(Object); 1912 } 1913 1914 if (LV.isSimple()) { 1915 assert(!LV.getType()->isFunctionType()); 1916 1917 if (LV.getType()->isConstantMatrixType()) 1918 return EmitLoadOfMatrixLValue(LV, Loc, *this); 1919 1920 // Everything needs a load. 1921 return RValue::get(EmitLoadOfScalar(LV, Loc)); 1922 } 1923 1924 if (LV.isVectorElt()) { 1925 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(), 1926 LV.isVolatileQualified()); 1927 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 1928 "vecext")); 1929 } 1930 1931 // If this is a reference to a subset of the elements of a vector, either 1932 // shuffle the input or extract/insert them as appropriate. 1933 if (LV.isExtVectorElt()) { 1934 return EmitLoadOfExtVectorElementLValue(LV); 1935 } 1936 1937 // Global Register variables always invoke intrinsics 1938 if (LV.isGlobalReg()) 1939 return EmitLoadOfGlobalRegLValue(LV); 1940 1941 if (LV.isMatrixElt()) { 1942 llvm::LoadInst *Load = 1943 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified()); 1944 return RValue::get( 1945 Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext")); 1946 } 1947 1948 assert(LV.isBitField() && "Unknown LValue type!"); 1949 return EmitLoadOfBitfieldLValue(LV, Loc); 1950 } 1951 1952 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 1953 SourceLocation Loc) { 1954 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 1955 1956 // Get the output type. 1957 llvm::Type *ResLTy = ConvertType(LV.getType()); 1958 1959 Address Ptr = LV.getBitFieldAddress(); 1960 llvm::Value *Val = 1961 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load"); 1962 1963 bool UseVolatile = LV.isVolatileQualified() && 1964 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); 1965 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; 1966 const unsigned StorageSize = 1967 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; 1968 if (Info.IsSigned) { 1969 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); 1970 unsigned HighBits = StorageSize - Offset - Info.Size; 1971 if (HighBits) 1972 Val = Builder.CreateShl(Val, HighBits, "bf.shl"); 1973 if (Offset + HighBits) 1974 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr"); 1975 } else { 1976 if (Offset) 1977 Val = Builder.CreateLShr(Val, Offset, "bf.lshr"); 1978 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) 1979 Val = Builder.CreateAnd( 1980 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear"); 1981 } 1982 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); 1983 EmitScalarRangeCheck(Val, LV.getType(), Loc); 1984 return RValue::get(Val); 1985 } 1986 1987 // If this is a reference to a subset of the elements of a vector, create an 1988 // appropriate shufflevector. 1989 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 1990 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(), 1991 LV.isVolatileQualified()); 1992 1993 const llvm::Constant *Elts = LV.getExtVectorElts(); 1994 1995 // If the result of the expression is a non-vector type, we must be extracting 1996 // a single element. Just codegen as an extractelement. 1997 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1998 if (!ExprVT) { 1999 unsigned InIdx = getAccessedFieldNo(0, Elts); 2000 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 2001 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 2002 } 2003 2004 // Always use shuffle vector to try to retain the original program structure 2005 unsigned NumResultElts = ExprVT->getNumElements(); 2006 2007 SmallVector<int, 4> Mask; 2008 for (unsigned i = 0; i != NumResultElts; ++i) 2009 Mask.push_back(getAccessedFieldNo(i, Elts)); 2010 2011 Vec = Builder.CreateShuffleVector(Vec, Mask); 2012 return RValue::get(Vec); 2013 } 2014 2015 /// Generates lvalue for partial ext_vector access. 2016 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { 2017 Address VectorAddress = LV.getExtVectorAddress(); 2018 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); 2019 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); 2020 2021 Address CastToPointerElement = 2022 Builder.CreateElementBitCast(VectorAddress, VectorElementTy, 2023 "conv.ptr.element"); 2024 2025 const llvm::Constant *Elts = LV.getExtVectorElts(); 2026 unsigned ix = getAccessedFieldNo(0, Elts); 2027 2028 Address VectorBasePtrPlusIx = 2029 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix, 2030 "vector.elt"); 2031 2032 return VectorBasePtrPlusIx; 2033 } 2034 2035 /// Load of global gamed gegisters are always calls to intrinsics. 2036 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { 2037 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && 2038 "Bad type for register variable"); 2039 llvm::MDNode *RegName = cast<llvm::MDNode>( 2040 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); 2041 2042 // We accept integer and pointer types only 2043 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); 2044 llvm::Type *Ty = OrigTy; 2045 if (OrigTy->isPointerTy()) 2046 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 2047 llvm::Type *Types[] = { Ty }; 2048 2049 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); 2050 llvm::Value *Call = Builder.CreateCall( 2051 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); 2052 if (OrigTy->isPointerTy()) 2053 Call = Builder.CreateIntToPtr(Call, OrigTy); 2054 return RValue::get(Call); 2055 } 2056 2057 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 2058 /// lvalue, where both are guaranteed to the have the same type, and that type 2059 /// is 'Ty'. 2060 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 2061 bool isInit) { 2062 if (!Dst.isSimple()) { 2063 if (Dst.isVectorElt()) { 2064 // Read/modify/write the vector, inserting the new element. 2065 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(), 2066 Dst.isVolatileQualified()); 2067 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 2068 Dst.getVectorIdx(), "vecins"); 2069 Builder.CreateStore(Vec, Dst.getVectorAddress(), 2070 Dst.isVolatileQualified()); 2071 return; 2072 } 2073 2074 // If this is an update of extended vector elements, insert them as 2075 // appropriate. 2076 if (Dst.isExtVectorElt()) 2077 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 2078 2079 if (Dst.isGlobalReg()) 2080 return EmitStoreThroughGlobalRegLValue(Src, Dst); 2081 2082 if (Dst.isMatrixElt()) { 2083 llvm::Value *Vec = Builder.CreateLoad(Dst.getMatrixAddress()); 2084 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 2085 Dst.getMatrixIdx(), "matins"); 2086 Builder.CreateStore(Vec, Dst.getMatrixAddress(), 2087 Dst.isVolatileQualified()); 2088 return; 2089 } 2090 2091 assert(Dst.isBitField() && "Unknown LValue type"); 2092 return EmitStoreThroughBitfieldLValue(Src, Dst); 2093 } 2094 2095 // There's special magic for assigning into an ARC-qualified l-value. 2096 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 2097 switch (Lifetime) { 2098 case Qualifiers::OCL_None: 2099 llvm_unreachable("present but none"); 2100 2101 case Qualifiers::OCL_ExplicitNone: 2102 // nothing special 2103 break; 2104 2105 case Qualifiers::OCL_Strong: 2106 if (isInit) { 2107 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal())); 2108 break; 2109 } 2110 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 2111 return; 2112 2113 case Qualifiers::OCL_Weak: 2114 if (isInit) 2115 // Initialize and then skip the primitive store. 2116 EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal()); 2117 else 2118 EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(), 2119 /*ignore*/ true); 2120 return; 2121 2122 case Qualifiers::OCL_Autoreleasing: 2123 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 2124 Src.getScalarVal())); 2125 // fall into the normal path 2126 break; 2127 } 2128 } 2129 2130 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 2131 // load of a __weak object. 2132 Address LvalueDst = Dst.getAddress(*this); 2133 llvm::Value *src = Src.getScalarVal(); 2134 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 2135 return; 2136 } 2137 2138 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 2139 // load of a __strong object. 2140 Address LvalueDst = Dst.getAddress(*this); 2141 llvm::Value *src = Src.getScalarVal(); 2142 if (Dst.isObjCIvar()) { 2143 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 2144 llvm::Type *ResultType = IntPtrTy; 2145 Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); 2146 llvm::Value *RHS = dst.getPointer(); 2147 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 2148 llvm::Value *LHS = 2149 Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType, 2150 "sub.ptr.lhs.cast"); 2151 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 2152 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 2153 BytesBetween); 2154 } else if (Dst.isGlobalObjCRef()) { 2155 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 2156 Dst.isThreadLocalRef()); 2157 } 2158 else 2159 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 2160 return; 2161 } 2162 2163 assert(Src.isScalar() && "Can't emit an agg store with this method"); 2164 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 2165 } 2166 2167 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 2168 llvm::Value **Result) { 2169 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 2170 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 2171 Address Ptr = Dst.getBitFieldAddress(); 2172 2173 // Get the source value, truncated to the width of the bit-field. 2174 llvm::Value *SrcVal = Src.getScalarVal(); 2175 2176 // Cast the source to the storage type and shift it into place. 2177 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(), 2178 /*isSigned=*/false); 2179 llvm::Value *MaskedVal = SrcVal; 2180 2181 const bool UseVolatile = 2182 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && 2183 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); 2184 const unsigned StorageSize = 2185 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; 2186 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; 2187 // See if there are other bits in the bitfield's storage we'll need to load 2188 // and mask together with source before storing. 2189 if (StorageSize != Info.Size) { 2190 assert(StorageSize > Info.Size && "Invalid bitfield size."); 2191 llvm::Value *Val = 2192 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); 2193 2194 // Mask the source value as needed. 2195 if (!hasBooleanRepresentation(Dst.getType())) 2196 SrcVal = Builder.CreateAnd( 2197 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), 2198 "bf.value"); 2199 MaskedVal = SrcVal; 2200 if (Offset) 2201 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl"); 2202 2203 // Mask out the original value. 2204 Val = Builder.CreateAnd( 2205 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size), 2206 "bf.clear"); 2207 2208 // Or together the unchanged values and the source value. 2209 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); 2210 } else { 2211 assert(Offset == 0); 2212 // According to the AACPS: 2213 // When a volatile bit-field is written, and its container does not overlap 2214 // with any non-bit-field member, its container must be read exactly once 2215 // and written exactly once using the access width appropriate to the type 2216 // of the container. The two accesses are not atomic. 2217 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && 2218 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) 2219 Builder.CreateLoad(Ptr, true, "bf.load"); 2220 } 2221 2222 // Write the new value back out. 2223 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified()); 2224 2225 // Return the new value of the bit-field, if requested. 2226 if (Result) { 2227 llvm::Value *ResultVal = MaskedVal; 2228 2229 // Sign extend the value if needed. 2230 if (Info.IsSigned) { 2231 assert(Info.Size <= StorageSize); 2232 unsigned HighBits = StorageSize - Info.Size; 2233 if (HighBits) { 2234 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); 2235 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); 2236 } 2237 } 2238 2239 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, 2240 "bf.result.cast"); 2241 *Result = EmitFromMemory(ResultVal, Dst.getType()); 2242 } 2243 } 2244 2245 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 2246 LValue Dst) { 2247 // This access turns into a read/modify/write of the vector. Load the input 2248 // value now. 2249 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(), 2250 Dst.isVolatileQualified()); 2251 const llvm::Constant *Elts = Dst.getExtVectorElts(); 2252 2253 llvm::Value *SrcVal = Src.getScalarVal(); 2254 2255 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 2256 unsigned NumSrcElts = VTy->getNumElements(); 2257 unsigned NumDstElts = 2258 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements(); 2259 if (NumDstElts == NumSrcElts) { 2260 // Use shuffle vector is the src and destination are the same number of 2261 // elements and restore the vector mask since it is on the side it will be 2262 // stored. 2263 SmallVector<int, 4> Mask(NumDstElts); 2264 for (unsigned i = 0; i != NumSrcElts; ++i) 2265 Mask[getAccessedFieldNo(i, Elts)] = i; 2266 2267 Vec = Builder.CreateShuffleVector(SrcVal, Mask); 2268 } else if (NumDstElts > NumSrcElts) { 2269 // Extended the source vector to the same length and then shuffle it 2270 // into the destination. 2271 // FIXME: since we're shuffling with undef, can we just use the indices 2272 // into that? This could be simpler. 2273 SmallVector<int, 4> ExtMask; 2274 for (unsigned i = 0; i != NumSrcElts; ++i) 2275 ExtMask.push_back(i); 2276 ExtMask.resize(NumDstElts, -1); 2277 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask); 2278 // build identity 2279 SmallVector<int, 4> Mask; 2280 for (unsigned i = 0; i != NumDstElts; ++i) 2281 Mask.push_back(i); 2282 2283 // When the vector size is odd and .odd or .hi is used, the last element 2284 // of the Elts constant array will be one past the size of the vector. 2285 // Ignore the last element here, if it is greater than the mask size. 2286 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) 2287 NumSrcElts--; 2288 2289 // modify when what gets shuffled in 2290 for (unsigned i = 0; i != NumSrcElts; ++i) 2291 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts; 2292 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask); 2293 } else { 2294 // We should never shorten the vector 2295 llvm_unreachable("unexpected shorten vector length"); 2296 } 2297 } else { 2298 // If the Src is a scalar (not a vector) it must be updating one element. 2299 unsigned InIdx = getAccessedFieldNo(0, Elts); 2300 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 2301 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 2302 } 2303 2304 Builder.CreateStore(Vec, Dst.getExtVectorAddress(), 2305 Dst.isVolatileQualified()); 2306 } 2307 2308 /// Store of global named registers are always calls to intrinsics. 2309 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { 2310 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && 2311 "Bad type for register variable"); 2312 llvm::MDNode *RegName = cast<llvm::MDNode>( 2313 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); 2314 assert(RegName && "Register LValue is not metadata"); 2315 2316 // We accept integer and pointer types only 2317 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); 2318 llvm::Type *Ty = OrigTy; 2319 if (OrigTy->isPointerTy()) 2320 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 2321 llvm::Type *Types[] = { Ty }; 2322 2323 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); 2324 llvm::Value *Value = Src.getScalarVal(); 2325 if (OrigTy->isPointerTy()) 2326 Value = Builder.CreatePtrToInt(Value, Ty); 2327 Builder.CreateCall( 2328 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); 2329 } 2330 2331 // setObjCGCLValueClass - sets class of the lvalue for the purpose of 2332 // generating write-barries API. It is currently a global, ivar, 2333 // or neither. 2334 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 2335 LValue &LV, 2336 bool IsMemberAccess=false) { 2337 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 2338 return; 2339 2340 if (isa<ObjCIvarRefExpr>(E)) { 2341 QualType ExpTy = E->getType(); 2342 if (IsMemberAccess && ExpTy->isPointerType()) { 2343 // If ivar is a structure pointer, assigning to field of 2344 // this struct follows gcc's behavior and makes it a non-ivar 2345 // writer-barrier conservatively. 2346 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); 2347 if (ExpTy->isRecordType()) { 2348 LV.setObjCIvar(false); 2349 return; 2350 } 2351 } 2352 LV.setObjCIvar(true); 2353 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); 2354 LV.setBaseIvarExp(Exp->getBase()); 2355 LV.setObjCArray(E->getType()->isArrayType()); 2356 return; 2357 } 2358 2359 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { 2360 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 2361 if (VD->hasGlobalStorage()) { 2362 LV.setGlobalObjCRef(true); 2363 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); 2364 } 2365 } 2366 LV.setObjCArray(E->getType()->isArrayType()); 2367 return; 2368 } 2369 2370 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { 2371 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2372 return; 2373 } 2374 2375 if (const auto *Exp = dyn_cast<ParenExpr>(E)) { 2376 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2377 if (LV.isObjCIvar()) { 2378 // If cast is to a structure pointer, follow gcc's behavior and make it 2379 // a non-ivar write-barrier. 2380 QualType ExpTy = E->getType(); 2381 if (ExpTy->isPointerType()) 2382 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); 2383 if (ExpTy->isRecordType()) 2384 LV.setObjCIvar(false); 2385 } 2386 return; 2387 } 2388 2389 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { 2390 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 2391 return; 2392 } 2393 2394 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { 2395 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2396 return; 2397 } 2398 2399 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { 2400 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2401 return; 2402 } 2403 2404 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 2405 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2406 return; 2407 } 2408 2409 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 2410 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 2411 if (LV.isObjCIvar() && !LV.isObjCArray()) 2412 // Using array syntax to assigning to what an ivar points to is not 2413 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 2414 LV.setObjCIvar(false); 2415 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 2416 // Using array syntax to assigning to what global points to is not 2417 // same as assigning to the global itself. {id *G;} G[i] = 0; 2418 LV.setGlobalObjCRef(false); 2419 return; 2420 } 2421 2422 if (const auto *Exp = dyn_cast<MemberExpr>(E)) { 2423 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 2424 // We don't know if member is an 'ivar', but this flag is looked at 2425 // only in the context of LV.isObjCIvar(). 2426 LV.setObjCArray(E->getType()->isArrayType()); 2427 return; 2428 } 2429 } 2430 2431 static llvm::Value * 2432 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 2433 llvm::Value *V, llvm::Type *IRType, 2434 StringRef Name = StringRef()) { 2435 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 2436 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 2437 } 2438 2439 static LValue EmitThreadPrivateVarDeclLValue( 2440 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, 2441 llvm::Type *RealVarTy, SourceLocation Loc) { 2442 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) 2443 Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( 2444 CGF, VD, Addr, Loc); 2445 else 2446 Addr = 2447 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc); 2448 2449 Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy); 2450 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2451 } 2452 2453 static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, 2454 const VarDecl *VD, QualType T) { 2455 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = 2456 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); 2457 // Return an invalid address if variable is MT_To and unified 2458 // memory is not enabled. For all other cases: MT_Link and 2459 // MT_To with unified memory, return a valid address. 2460 if (!Res || (*Res == OMPDeclareTargetDeclAttr::MT_To && 2461 !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) 2462 return Address::invalid(); 2463 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || 2464 (*Res == OMPDeclareTargetDeclAttr::MT_To && 2465 CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && 2466 "Expected link clause OR to clause with unified memory enabled."); 2467 QualType PtrTy = CGF.getContext().getPointerType(VD->getType()); 2468 Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); 2469 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>()); 2470 } 2471 2472 Address 2473 CodeGenFunction::EmitLoadOfReference(LValue RefLVal, 2474 LValueBaseInfo *PointeeBaseInfo, 2475 TBAAAccessInfo *PointeeTBAAInfo) { 2476 llvm::LoadInst *Load = 2477 Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile()); 2478 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); 2479 2480 CharUnits Align = CGM.getNaturalTypeAlignment( 2481 RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo, 2482 /* forPointeeType= */ true); 2483 return Address(Load, Align); 2484 } 2485 2486 LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { 2487 LValueBaseInfo PointeeBaseInfo; 2488 TBAAAccessInfo PointeeTBAAInfo; 2489 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo, 2490 &PointeeTBAAInfo); 2491 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), 2492 PointeeBaseInfo, PointeeTBAAInfo); 2493 } 2494 2495 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, 2496 const PointerType *PtrTy, 2497 LValueBaseInfo *BaseInfo, 2498 TBAAAccessInfo *TBAAInfo) { 2499 llvm::Value *Addr = Builder.CreateLoad(Ptr); 2500 return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), 2501 BaseInfo, TBAAInfo, 2502 /*forPointeeType=*/true)); 2503 } 2504 2505 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, 2506 const PointerType *PtrTy) { 2507 LValueBaseInfo BaseInfo; 2508 TBAAAccessInfo TBAAInfo; 2509 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo); 2510 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo); 2511 } 2512 2513 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 2514 const Expr *E, const VarDecl *VD) { 2515 QualType T = E->getType(); 2516 2517 // If it's thread_local, emit a call to its wrapper function instead. 2518 if (VD->getTLSKind() == VarDecl::TLS_Dynamic && 2519 CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) 2520 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); 2521 // Check if the variable is marked as declare target with link clause in 2522 // device codegen. 2523 if (CGF.getLangOpts().OpenMPIsDevice) { 2524 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); 2525 if (Addr.isValid()) 2526 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2527 } 2528 2529 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 2530 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 2531 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 2532 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 2533 Address Addr(V, Alignment); 2534 // Emit reference to the private copy of the variable if it is an OpenMP 2535 // threadprivate variable. 2536 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && 2537 VD->hasAttr<OMPThreadPrivateDeclAttr>()) { 2538 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, 2539 E->getExprLoc()); 2540 } 2541 LValue LV = VD->getType()->isReferenceType() ? 2542 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), 2543 AlignmentSource::Decl) : 2544 CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2545 setObjCGCLValueClass(CGF.getContext(), E, LV); 2546 return LV; 2547 } 2548 2549 static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, 2550 GlobalDecl GD) { 2551 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 2552 if (FD->hasAttr<WeakRefAttr>()) { 2553 ConstantAddress aliasee = CGM.GetWeakRefReference(FD); 2554 return aliasee.getPointer(); 2555 } 2556 2557 llvm::Constant *V = CGM.GetAddrOfFunction(GD); 2558 if (!FD->hasPrototype()) { 2559 if (const FunctionProtoType *Proto = 2560 FD->getType()->getAs<FunctionProtoType>()) { 2561 // Ugly case: for a K&R-style definition, the type of the definition 2562 // isn't the same as the type of a use. Correct for this with a 2563 // bitcast. 2564 QualType NoProtoType = 2565 CGM.getContext().getFunctionNoProtoType(Proto->getReturnType()); 2566 NoProtoType = CGM.getContext().getPointerType(NoProtoType); 2567 V = llvm::ConstantExpr::getBitCast(V, 2568 CGM.getTypes().ConvertType(NoProtoType)); 2569 } 2570 } 2571 return V; 2572 } 2573 2574 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, 2575 GlobalDecl GD) { 2576 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 2577 llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD); 2578 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 2579 return CGF.MakeAddrLValue(V, E->getType(), Alignment, 2580 AlignmentSource::Decl); 2581 } 2582 2583 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, 2584 llvm::Value *ThisValue) { 2585 QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); 2586 LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); 2587 return CGF.EmitLValueForField(LV, FD); 2588 } 2589 2590 /// Named Registers are named metadata pointing to the register name 2591 /// which will be read from/written to as an argument to the intrinsic 2592 /// @llvm.read/write_register. 2593 /// So far, only the name is being passed down, but other options such as 2594 /// register type, allocation type or even optimization options could be 2595 /// passed down via the metadata node. 2596 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { 2597 SmallString<64> Name("llvm.named.register."); 2598 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); 2599 assert(Asm->getLabel().size() < 64-Name.size() && 2600 "Register name too big"); 2601 Name.append(Asm->getLabel()); 2602 llvm::NamedMDNode *M = 2603 CGM.getModule().getOrInsertNamedMetadata(Name); 2604 if (M->getNumOperands() == 0) { 2605 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), 2606 Asm->getLabel()); 2607 llvm::Metadata *Ops[] = {Str}; 2608 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); 2609 } 2610 2611 CharUnits Alignment = CGM.getContext().getDeclAlign(VD); 2612 2613 llvm::Value *Ptr = 2614 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)); 2615 return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType()); 2616 } 2617 2618 /// Determine whether we can emit a reference to \p VD from the current 2619 /// context, despite not necessarily having seen an odr-use of the variable in 2620 /// this context. 2621 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, 2622 const DeclRefExpr *E, 2623 const VarDecl *VD, 2624 bool IsConstant) { 2625 // For a variable declared in an enclosing scope, do not emit a spurious 2626 // reference even if we have a capture, as that will emit an unwarranted 2627 // reference to our capture state, and will likely generate worse code than 2628 // emitting a local copy. 2629 if (E->refersToEnclosingVariableOrCapture()) 2630 return false; 2631 2632 // For a local declaration declared in this function, we can always reference 2633 // it even if we don't have an odr-use. 2634 if (VD->hasLocalStorage()) { 2635 return VD->getDeclContext() == 2636 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl); 2637 } 2638 2639 // For a global declaration, we can emit a reference to it if we know 2640 // for sure that we are able to emit a definition of it. 2641 VD = VD->getDefinition(CGF.getContext()); 2642 if (!VD) 2643 return false; 2644 2645 // Don't emit a spurious reference if it might be to a variable that only 2646 // exists on a different device / target. 2647 // FIXME: This is unnecessarily broad. Check whether this would actually be a 2648 // cross-target reference. 2649 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || 2650 CGF.getLangOpts().OpenCL) { 2651 return false; 2652 } 2653 2654 // We can emit a spurious reference only if the linkage implies that we'll 2655 // be emitting a non-interposable symbol that will be retained until link 2656 // time. 2657 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) { 2658 case llvm::GlobalValue::ExternalLinkage: 2659 case llvm::GlobalValue::LinkOnceODRLinkage: 2660 case llvm::GlobalValue::WeakODRLinkage: 2661 case llvm::GlobalValue::InternalLinkage: 2662 case llvm::GlobalValue::PrivateLinkage: 2663 return true; 2664 default: 2665 return false; 2666 } 2667 } 2668 2669 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 2670 const NamedDecl *ND = E->getDecl(); 2671 QualType T = E->getType(); 2672 2673 assert(E->isNonOdrUse() != NOUR_Unevaluated && 2674 "should not emit an unevaluated operand"); 2675 2676 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 2677 // Global Named registers access via intrinsics only 2678 if (VD->getStorageClass() == SC_Register && 2679 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) 2680 return EmitGlobalNamedRegister(VD, CGM); 2681 2682 // If this DeclRefExpr does not constitute an odr-use of the variable, 2683 // we're not permitted to emit a reference to it in general, and it might 2684 // not be captured if capture would be necessary for a use. Emit the 2685 // constant value directly instead. 2686 if (E->isNonOdrUse() == NOUR_Constant && 2687 (VD->getType()->isReferenceType() || 2688 !canEmitSpuriousReferenceToVariable(*this, E, VD, true))) { 2689 VD->getAnyInitializer(VD); 2690 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( 2691 E->getLocation(), *VD->evaluateValue(), VD->getType()); 2692 assert(Val && "failed to emit constant expression"); 2693 2694 Address Addr = Address::invalid(); 2695 if (!VD->getType()->isReferenceType()) { 2696 // Spill the constant value to a global. 2697 Addr = CGM.createUnnamedGlobalFrom(*VD, Val, 2698 getContext().getDeclAlign(VD)); 2699 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType()); 2700 auto *PTy = llvm::PointerType::get( 2701 VarTy, getContext().getTargetAddressSpace(VD->getType())); 2702 if (PTy != Addr.getType()) 2703 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy); 2704 } else { 2705 // Should we be using the alignment of the constant pointer we emitted? 2706 CharUnits Alignment = 2707 CGM.getNaturalTypeAlignment(E->getType(), 2708 /* BaseInfo= */ nullptr, 2709 /* TBAAInfo= */ nullptr, 2710 /* forPointeeType= */ true); 2711 Addr = Address(Val, Alignment); 2712 } 2713 return MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2714 } 2715 2716 // FIXME: Handle other kinds of non-odr-use DeclRefExprs. 2717 2718 // Check for captured variables. 2719 if (E->refersToEnclosingVariableOrCapture()) { 2720 VD = VD->getCanonicalDecl(); 2721 if (auto *FD = LambdaCaptureFields.lookup(VD)) 2722 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); 2723 if (CapturedStmtInfo) { 2724 auto I = LocalDeclMap.find(VD); 2725 if (I != LocalDeclMap.end()) { 2726 LValue CapLVal; 2727 if (VD->getType()->isReferenceType()) 2728 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(), 2729 AlignmentSource::Decl); 2730 else 2731 CapLVal = MakeAddrLValue(I->second, T); 2732 // Mark lvalue as nontemporal if the variable is marked as nontemporal 2733 // in simd context. 2734 if (getLangOpts().OpenMP && 2735 CGM.getOpenMPRuntime().isNontemporalDecl(VD)) 2736 CapLVal.setNontemporal(/*Value=*/true); 2737 return CapLVal; 2738 } 2739 LValue CapLVal = 2740 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), 2741 CapturedStmtInfo->getContextValue()); 2742 CapLVal = MakeAddrLValue( 2743 Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)), 2744 CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl), 2745 CapLVal.getTBAAInfo()); 2746 // Mark lvalue as nontemporal if the variable is marked as nontemporal 2747 // in simd context. 2748 if (getLangOpts().OpenMP && 2749 CGM.getOpenMPRuntime().isNontemporalDecl(VD)) 2750 CapLVal.setNontemporal(/*Value=*/true); 2751 return CapLVal; 2752 } 2753 2754 assert(isa<BlockDecl>(CurCodeDecl)); 2755 Address addr = GetAddrOfBlockDecl(VD); 2756 return MakeAddrLValue(addr, T, AlignmentSource::Decl); 2757 } 2758 } 2759 2760 // FIXME: We should be able to assert this for FunctionDecls as well! 2761 // FIXME: We should be able to assert this for all DeclRefExprs, not just 2762 // those with a valid source location. 2763 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || 2764 !E->getLocation().isValid()) && 2765 "Should not use decl without marking it used!"); 2766 2767 if (ND->hasAttr<WeakRefAttr>()) { 2768 const auto *VD = cast<ValueDecl>(ND); 2769 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); 2770 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl); 2771 } 2772 2773 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 2774 // Check if this is a global variable. 2775 if (VD->hasLinkage() || VD->isStaticDataMember()) 2776 return EmitGlobalVarDeclLValue(*this, E, VD); 2777 2778 Address addr = Address::invalid(); 2779 2780 // The variable should generally be present in the local decl map. 2781 auto iter = LocalDeclMap.find(VD); 2782 if (iter != LocalDeclMap.end()) { 2783 addr = iter->second; 2784 2785 // Otherwise, it might be static local we haven't emitted yet for 2786 // some reason; most likely, because it's in an outer function. 2787 } else if (VD->isStaticLocal()) { 2788 addr = Address(CGM.getOrCreateStaticVarDecl( 2789 *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)), 2790 getContext().getDeclAlign(VD)); 2791 2792 // No other cases for now. 2793 } else { 2794 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?"); 2795 } 2796 2797 2798 // Check for OpenMP threadprivate variables. 2799 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && 2800 VD->hasAttr<OMPThreadPrivateDeclAttr>()) { 2801 return EmitThreadPrivateVarDeclLValue( 2802 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()), 2803 E->getExprLoc()); 2804 } 2805 2806 // Drill into block byref variables. 2807 bool isBlockByref = VD->isEscapingByref(); 2808 if (isBlockByref) { 2809 addr = emitBlockByrefAddress(addr, VD); 2810 } 2811 2812 // Drill into reference types. 2813 LValue LV = VD->getType()->isReferenceType() ? 2814 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) : 2815 MakeAddrLValue(addr, T, AlignmentSource::Decl); 2816 2817 bool isLocalStorage = VD->hasLocalStorage(); 2818 2819 bool NonGCable = isLocalStorage && 2820 !VD->getType()->isReferenceType() && 2821 !isBlockByref; 2822 if (NonGCable) { 2823 LV.getQuals().removeObjCGCAttr(); 2824 LV.setNonGC(true); 2825 } 2826 2827 bool isImpreciseLifetime = 2828 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); 2829 if (isImpreciseLifetime) 2830 LV.setARCPreciseLifetime(ARCImpreciseLifetime); 2831 setObjCGCLValueClass(getContext(), E, LV); 2832 return LV; 2833 } 2834 2835 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2836 return EmitFunctionDeclLValue(*this, E, FD); 2837 2838 // FIXME: While we're emitting a binding from an enclosing scope, all other 2839 // DeclRefExprs we see should be implicitly treated as if they also refer to 2840 // an enclosing scope. 2841 if (const auto *BD = dyn_cast<BindingDecl>(ND)) 2842 return EmitLValue(BD->getBinding()); 2843 2844 // We can form DeclRefExprs naming GUID declarations when reconstituting 2845 // non-type template parameters into expressions. 2846 if (const auto *GD = dyn_cast<MSGuidDecl>(ND)) 2847 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T, 2848 AlignmentSource::Decl); 2849 2850 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) 2851 return MakeAddrLValue(CGM.GetAddrOfTemplateParamObject(TPO), T, 2852 AlignmentSource::Decl); 2853 2854 llvm_unreachable("Unhandled DeclRefExpr"); 2855 } 2856 2857 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 2858 // __extension__ doesn't affect lvalue-ness. 2859 if (E->getOpcode() == UO_Extension) 2860 return EmitLValue(E->getSubExpr()); 2861 2862 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 2863 switch (E->getOpcode()) { 2864 default: llvm_unreachable("Unknown unary operator lvalue!"); 2865 case UO_Deref: { 2866 QualType T = E->getSubExpr()->getType()->getPointeeType(); 2867 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 2868 2869 LValueBaseInfo BaseInfo; 2870 TBAAAccessInfo TBAAInfo; 2871 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo, 2872 &TBAAInfo); 2873 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); 2874 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 2875 2876 // We should not generate __weak write barrier on indirect reference 2877 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 2878 // But, we continue to generate __strong write barrier on indirect write 2879 // into a pointer to object. 2880 if (getLangOpts().ObjC && 2881 getLangOpts().getGC() != LangOptions::NonGC && 2882 LV.isObjCWeak()) 2883 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2884 return LV; 2885 } 2886 case UO_Real: 2887 case UO_Imag: { 2888 LValue LV = EmitLValue(E->getSubExpr()); 2889 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 2890 2891 // __real is valid on scalars. This is a faster way of testing that. 2892 // __imag can only produce an rvalue on scalars. 2893 if (E->getOpcode() == UO_Real && 2894 !LV.getAddress(*this).getElementType()->isStructTy()) { 2895 assert(E->getSubExpr()->getType()->isArithmeticType()); 2896 return LV; 2897 } 2898 2899 QualType T = ExprTy->castAs<ComplexType>()->getElementType(); 2900 2901 Address Component = 2902 (E->getOpcode() == UO_Real 2903 ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType()) 2904 : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType())); 2905 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(), 2906 CGM.getTBAAInfoForSubobject(LV, T)); 2907 ElemLV.getQuals().addQualifiers(LV.getQuals()); 2908 return ElemLV; 2909 } 2910 case UO_PreInc: 2911 case UO_PreDec: { 2912 LValue LV = EmitLValue(E->getSubExpr()); 2913 bool isInc = E->getOpcode() == UO_PreInc; 2914 2915 if (E->getType()->isAnyComplexType()) 2916 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 2917 else 2918 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 2919 return LV; 2920 } 2921 } 2922 } 2923 2924 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 2925 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 2926 E->getType(), AlignmentSource::Decl); 2927 } 2928 2929 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 2930 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 2931 E->getType(), AlignmentSource::Decl); 2932 } 2933 2934 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 2935 auto SL = E->getFunctionName(); 2936 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); 2937 StringRef FnName = CurFn->getName(); 2938 if (FnName.startswith("\01")) 2939 FnName = FnName.substr(1); 2940 StringRef NameItems[] = { 2941 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName}; 2942 std::string GVName = llvm::join(NameItems, NameItems + 2, "."); 2943 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) { 2944 std::string Name = std::string(SL->getString()); 2945 if (!Name.empty()) { 2946 unsigned Discriminator = 2947 CGM.getCXXABI().getMangleContext().getBlockId(BD, true); 2948 if (Discriminator) 2949 Name += "_" + Twine(Discriminator + 1).str(); 2950 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str()); 2951 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2952 } else { 2953 auto C = 2954 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str()); 2955 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2956 } 2957 } 2958 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); 2959 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2960 } 2961 2962 /// Emit a type description suitable for use by a runtime sanitizer library. The 2963 /// format of a type descriptor is 2964 /// 2965 /// \code 2966 /// { i16 TypeKind, i16 TypeInfo } 2967 /// \endcode 2968 /// 2969 /// followed by an array of i8 containing the type name. TypeKind is 0 for an 2970 /// integer, 1 for a floating point value, and -1 for anything else. 2971 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 2972 // Only emit each type's descriptor once. 2973 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) 2974 return C; 2975 2976 uint16_t TypeKind = -1; 2977 uint16_t TypeInfo = 0; 2978 2979 if (T->isIntegerType()) { 2980 TypeKind = 0; 2981 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 2982 (T->isSignedIntegerType() ? 1 : 0); 2983 } else if (T->isFloatingType()) { 2984 TypeKind = 1; 2985 TypeInfo = getContext().getTypeSize(T); 2986 } 2987 2988 // Format the type name as if for a diagnostic, including quotes and 2989 // optionally an 'aka'. 2990 SmallString<32> Buffer; 2991 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 2992 (intptr_t)T.getAsOpaquePtr(), 2993 StringRef(), StringRef(), None, Buffer, 2994 None); 2995 2996 llvm::Constant *Components[] = { 2997 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 2998 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 2999 }; 3000 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 3001 3002 auto *GV = new llvm::GlobalVariable( 3003 CGM.getModule(), Descriptor->getType(), 3004 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); 3005 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3006 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); 3007 3008 // Remember the descriptor for this type. 3009 CGM.setTypeDescriptorInMap(T, GV); 3010 3011 return GV; 3012 } 3013 3014 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 3015 llvm::Type *TargetTy = IntPtrTy; 3016 3017 if (V->getType() == TargetTy) 3018 return V; 3019 3020 // Floating-point types which fit into intptr_t are bitcast to integers 3021 // and then passed directly (after zero-extension, if necessary). 3022 if (V->getType()->isFloatingPointTy()) { 3023 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize(); 3024 if (Bits <= TargetTy->getIntegerBitWidth()) 3025 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), 3026 Bits)); 3027 } 3028 3029 // Integers which fit in intptr_t are zero-extended and passed directly. 3030 if (V->getType()->isIntegerTy() && 3031 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 3032 return Builder.CreateZExt(V, TargetTy); 3033 3034 // Pointers are passed directly, everything else is passed by address. 3035 if (!V->getType()->isPointerTy()) { 3036 Address Ptr = CreateDefaultAlignTempAlloca(V->getType()); 3037 Builder.CreateStore(V, Ptr); 3038 V = Ptr.getPointer(); 3039 } 3040 return Builder.CreatePtrToInt(V, TargetTy); 3041 } 3042 3043 /// Emit a representation of a SourceLocation for passing to a handler 3044 /// in a sanitizer runtime library. The format for this data is: 3045 /// \code 3046 /// struct SourceLocation { 3047 /// const char *Filename; 3048 /// int32_t Line, Column; 3049 /// }; 3050 /// \endcode 3051 /// For an invalid SourceLocation, the Filename pointer is null. 3052 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 3053 llvm::Constant *Filename; 3054 int Line, Column; 3055 3056 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 3057 if (PLoc.isValid()) { 3058 StringRef FilenameString = PLoc.getFilename(); 3059 3060 int PathComponentsToStrip = 3061 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; 3062 if (PathComponentsToStrip < 0) { 3063 assert(PathComponentsToStrip != INT_MIN); 3064 int PathComponentsToKeep = -PathComponentsToStrip; 3065 auto I = llvm::sys::path::rbegin(FilenameString); 3066 auto E = llvm::sys::path::rend(FilenameString); 3067 while (I != E && --PathComponentsToKeep) 3068 ++I; 3069 3070 FilenameString = FilenameString.substr(I - E); 3071 } else if (PathComponentsToStrip > 0) { 3072 auto I = llvm::sys::path::begin(FilenameString); 3073 auto E = llvm::sys::path::end(FilenameString); 3074 while (I != E && PathComponentsToStrip--) 3075 ++I; 3076 3077 if (I != E) 3078 FilenameString = 3079 FilenameString.substr(I - llvm::sys::path::begin(FilenameString)); 3080 else 3081 FilenameString = llvm::sys::path::filename(FilenameString); 3082 } 3083 3084 auto FilenameGV = 3085 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src"); 3086 CGM.getSanitizerMetadata()->disableSanitizerForGlobal( 3087 cast<llvm::GlobalVariable>(FilenameGV.getPointer())); 3088 Filename = FilenameGV.getPointer(); 3089 Line = PLoc.getLine(); 3090 Column = PLoc.getColumn(); 3091 } else { 3092 Filename = llvm::Constant::getNullValue(Int8PtrTy); 3093 Line = Column = 0; 3094 } 3095 3096 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), 3097 Builder.getInt32(Column)}; 3098 3099 return llvm::ConstantStruct::getAnon(Data); 3100 } 3101 3102 namespace { 3103 /// Specify under what conditions this check can be recovered 3104 enum class CheckRecoverableKind { 3105 /// Always terminate program execution if this check fails. 3106 Unrecoverable, 3107 /// Check supports recovering, runtime has both fatal (noreturn) and 3108 /// non-fatal handlers for this check. 3109 Recoverable, 3110 /// Runtime conditionally aborts, always need to support recovery. 3111 AlwaysRecoverable 3112 }; 3113 } 3114 3115 static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { 3116 assert(Kind.countPopulation() == 1); 3117 if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr) 3118 return CheckRecoverableKind::AlwaysRecoverable; 3119 else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable) 3120 return CheckRecoverableKind::Unrecoverable; 3121 else 3122 return CheckRecoverableKind::Recoverable; 3123 } 3124 3125 namespace { 3126 struct SanitizerHandlerInfo { 3127 char const *const Name; 3128 unsigned Version; 3129 }; 3130 } 3131 3132 const SanitizerHandlerInfo SanitizerHandlers[] = { 3133 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, 3134 LIST_SANITIZER_CHECKS 3135 #undef SANITIZER_CHECK 3136 }; 3137 3138 static void emitCheckHandlerCall(CodeGenFunction &CGF, 3139 llvm::FunctionType *FnType, 3140 ArrayRef<llvm::Value *> FnArgs, 3141 SanitizerHandler CheckHandler, 3142 CheckRecoverableKind RecoverKind, bool IsFatal, 3143 llvm::BasicBlock *ContBB) { 3144 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); 3145 Optional<ApplyDebugLocation> DL; 3146 if (!CGF.Builder.getCurrentDebugLocation()) { 3147 // Ensure that the call has at least an artificial debug location. 3148 DL.emplace(CGF, SourceLocation()); 3149 } 3150 bool NeedsAbortSuffix = 3151 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; 3152 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; 3153 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; 3154 const StringRef CheckName = CheckInfo.Name; 3155 std::string FnName = "__ubsan_handle_" + CheckName.str(); 3156 if (CheckInfo.Version && !MinimalRuntime) 3157 FnName += "_v" + llvm::utostr(CheckInfo.Version); 3158 if (MinimalRuntime) 3159 FnName += "_minimal"; 3160 if (NeedsAbortSuffix) 3161 FnName += "_abort"; 3162 bool MayReturn = 3163 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; 3164 3165 llvm::AttrBuilder B; 3166 if (!MayReturn) { 3167 B.addAttribute(llvm::Attribute::NoReturn) 3168 .addAttribute(llvm::Attribute::NoUnwind); 3169 } 3170 B.addAttribute(llvm::Attribute::UWTable); 3171 3172 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( 3173 FnType, FnName, 3174 llvm::AttributeList::get(CGF.getLLVMContext(), 3175 llvm::AttributeList::FunctionIndex, B), 3176 /*Local=*/true); 3177 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); 3178 if (!MayReturn) { 3179 HandlerCall->setDoesNotReturn(); 3180 CGF.Builder.CreateUnreachable(); 3181 } else { 3182 CGF.Builder.CreateBr(ContBB); 3183 } 3184 } 3185 3186 void CodeGenFunction::EmitCheck( 3187 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, 3188 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, 3189 ArrayRef<llvm::Value *> DynamicArgs) { 3190 assert(IsSanitizerScope); 3191 assert(Checked.size() > 0); 3192 assert(CheckHandler >= 0 && 3193 size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers)); 3194 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; 3195 3196 llvm::Value *FatalCond = nullptr; 3197 llvm::Value *RecoverableCond = nullptr; 3198 llvm::Value *TrapCond = nullptr; 3199 for (int i = 0, n = Checked.size(); i < n; ++i) { 3200 llvm::Value *Check = Checked[i].first; 3201 // -fsanitize-trap= overrides -fsanitize-recover=. 3202 llvm::Value *&Cond = 3203 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) 3204 ? TrapCond 3205 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) 3206 ? RecoverableCond 3207 : FatalCond; 3208 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; 3209 } 3210 3211 if (TrapCond) 3212 EmitTrapCheck(TrapCond, CheckHandler); 3213 if (!FatalCond && !RecoverableCond) 3214 return; 3215 3216 llvm::Value *JointCond; 3217 if (FatalCond && RecoverableCond) 3218 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); 3219 else 3220 JointCond = FatalCond ? FatalCond : RecoverableCond; 3221 assert(JointCond); 3222 3223 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); 3224 assert(SanOpts.has(Checked[0].second)); 3225 #ifndef NDEBUG 3226 for (int i = 1, n = Checked.size(); i < n; ++i) { 3227 assert(RecoverKind == getRecoverableKind(Checked[i].second) && 3228 "All recoverable kinds in a single check must be same!"); 3229 assert(SanOpts.has(Checked[i].second)); 3230 } 3231 #endif 3232 3233 llvm::BasicBlock *Cont = createBasicBlock("cont"); 3234 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); 3235 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); 3236 // Give hint that we very much don't expect to execute the handler 3237 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp 3238 llvm::MDBuilder MDHelper(getLLVMContext()); 3239 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 3240 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); 3241 EmitBlock(Handlers); 3242 3243 // Handler functions take an i8* pointing to the (handler-specific) static 3244 // information block, followed by a sequence of intptr_t arguments 3245 // representing operand values. 3246 SmallVector<llvm::Value *, 4> Args; 3247 SmallVector<llvm::Type *, 4> ArgTypes; 3248 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { 3249 Args.reserve(DynamicArgs.size() + 1); 3250 ArgTypes.reserve(DynamicArgs.size() + 1); 3251 3252 // Emit handler arguments and create handler function type. 3253 if (!StaticArgs.empty()) { 3254 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 3255 auto *InfoPtr = 3256 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 3257 llvm::GlobalVariable::PrivateLinkage, Info); 3258 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3259 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 3260 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); 3261 ArgTypes.push_back(Int8PtrTy); 3262 } 3263 3264 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { 3265 Args.push_back(EmitCheckValue(DynamicArgs[i])); 3266 ArgTypes.push_back(IntPtrTy); 3267 } 3268 } 3269 3270 llvm::FunctionType *FnType = 3271 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 3272 3273 if (!FatalCond || !RecoverableCond) { 3274 // Simple case: we need to generate a single handler call, either 3275 // fatal, or non-fatal. 3276 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, 3277 (FatalCond != nullptr), Cont); 3278 } else { 3279 // Emit two handler calls: first one for set of unrecoverable checks, 3280 // another one for recoverable. 3281 llvm::BasicBlock *NonFatalHandlerBB = 3282 createBasicBlock("non_fatal." + CheckName); 3283 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); 3284 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); 3285 EmitBlock(FatalHandlerBB); 3286 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true, 3287 NonFatalHandlerBB); 3288 EmitBlock(NonFatalHandlerBB); 3289 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false, 3290 Cont); 3291 } 3292 3293 EmitBlock(Cont); 3294 } 3295 3296 void CodeGenFunction::EmitCfiSlowPathCheck( 3297 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, 3298 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { 3299 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont"); 3300 3301 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath"); 3302 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB); 3303 3304 llvm::MDBuilder MDHelper(getLLVMContext()); 3305 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 3306 BI->setMetadata(llvm::LLVMContext::MD_prof, Node); 3307 3308 EmitBlock(CheckBB); 3309 3310 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind); 3311 3312 llvm::CallInst *CheckCall; 3313 llvm::FunctionCallee SlowPathFn; 3314 if (WithDiag) { 3315 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 3316 auto *InfoPtr = 3317 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 3318 llvm::GlobalVariable::PrivateLinkage, Info); 3319 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3320 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 3321 3322 SlowPathFn = CGM.getModule().getOrInsertFunction( 3323 "__cfi_slowpath_diag", 3324 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, 3325 false)); 3326 CheckCall = Builder.CreateCall( 3327 SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)}); 3328 } else { 3329 SlowPathFn = CGM.getModule().getOrInsertFunction( 3330 "__cfi_slowpath", 3331 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false)); 3332 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr}); 3333 } 3334 3335 CGM.setDSOLocal( 3336 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts())); 3337 CheckCall->setDoesNotThrow(); 3338 3339 EmitBlock(Cont); 3340 } 3341 3342 // Emit a stub for __cfi_check function so that the linker knows about this 3343 // symbol in LTO mode. 3344 void CodeGenFunction::EmitCfiCheckStub() { 3345 llvm::Module *M = &CGM.getModule(); 3346 auto &Ctx = M->getContext(); 3347 llvm::Function *F = llvm::Function::Create( 3348 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false), 3349 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M); 3350 CGM.setDSOLocal(F); 3351 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F); 3352 // FIXME: consider emitting an intrinsic call like 3353 // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2) 3354 // which can be lowered in CrossDSOCFI pass to the actual contents of 3355 // __cfi_check. This would allow inlining of __cfi_check calls. 3356 llvm::CallInst::Create( 3357 llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB); 3358 llvm::ReturnInst::Create(Ctx, nullptr, BB); 3359 } 3360 3361 // This function is basically a switch over the CFI failure kind, which is 3362 // extracted from CFICheckFailData (1st function argument). Each case is either 3363 // llvm.trap or a call to one of the two runtime handlers, based on 3364 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid 3365 // failure kind) traps, but this should really never happen. CFICheckFailData 3366 // can be nullptr if the calling module has -fsanitize-trap behavior for this 3367 // check kind; in this case __cfi_check_fail traps as well. 3368 void CodeGenFunction::EmitCfiCheckFail() { 3369 SanitizerScope SanScope(this); 3370 FunctionArgList Args; 3371 ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, 3372 ImplicitParamDecl::Other); 3373 ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, 3374 ImplicitParamDecl::Other); 3375 Args.push_back(&ArgData); 3376 Args.push_back(&ArgAddr); 3377 3378 const CGFunctionInfo &FI = 3379 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); 3380 3381 llvm::Function *F = llvm::Function::Create( 3382 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false), 3383 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule()); 3384 3385 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F); 3386 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); 3387 F->setVisibility(llvm::GlobalValue::HiddenVisibility); 3388 3389 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args, 3390 SourceLocation()); 3391 3392 // This function should not be affected by blacklist. This function does 3393 // not have a source location, but "src:*" would still apply. Revert any 3394 // changes to SanOpts made in StartFunction. 3395 SanOpts = CGM.getLangOpts().Sanitize; 3396 3397 llvm::Value *Data = 3398 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, 3399 CGM.getContext().VoidPtrTy, ArgData.getLocation()); 3400 llvm::Value *Addr = 3401 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, 3402 CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); 3403 3404 // Data == nullptr means the calling module has trap behaviour for this check. 3405 llvm::Value *DataIsNotNullPtr = 3406 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy)); 3407 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail); 3408 3409 llvm::StructType *SourceLocationTy = 3410 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty); 3411 llvm::StructType *CfiCheckFailDataTy = 3412 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy); 3413 3414 llvm::Value *V = Builder.CreateConstGEP2_32( 3415 CfiCheckFailDataTy, 3416 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0, 3417 0); 3418 Address CheckKindAddr(V, getIntAlign()); 3419 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); 3420 3421 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 3422 CGM.getLLVMContext(), 3423 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 3424 llvm::Value *ValidVtable = Builder.CreateZExt( 3425 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 3426 {Addr, AllVtables}), 3427 IntPtrTy); 3428 3429 const std::pair<int, SanitizerMask> CheckKinds[] = { 3430 {CFITCK_VCall, SanitizerKind::CFIVCall}, 3431 {CFITCK_NVCall, SanitizerKind::CFINVCall}, 3432 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, 3433 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, 3434 {CFITCK_ICall, SanitizerKind::CFIICall}}; 3435 3436 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; 3437 for (auto CheckKindMaskPair : CheckKinds) { 3438 int Kind = CheckKindMaskPair.first; 3439 SanitizerMask Mask = CheckKindMaskPair.second; 3440 llvm::Value *Cond = 3441 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind)); 3442 if (CGM.getLangOpts().Sanitize.has(Mask)) 3443 EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {}, 3444 {Data, Addr, ValidVtable}); 3445 else 3446 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail); 3447 } 3448 3449 FinishFunction(); 3450 // The only reference to this function will be created during LTO link. 3451 // Make sure it survives until then. 3452 CGM.addUsedGlobal(F); 3453 } 3454 3455 void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { 3456 if (SanOpts.has(SanitizerKind::Unreachable)) { 3457 SanitizerScope SanScope(this); 3458 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()), 3459 SanitizerKind::Unreachable), 3460 SanitizerHandler::BuiltinUnreachable, 3461 EmitCheckSourceLocation(Loc), None); 3462 } 3463 Builder.CreateUnreachable(); 3464 } 3465 3466 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, 3467 SanitizerHandler CheckHandlerID) { 3468 llvm::BasicBlock *Cont = createBasicBlock("cont"); 3469 3470 // If we're optimizing, collapse all calls to trap down to just one per 3471 // check-type per function to save on code size. 3472 if (TrapBBs.size() <= CheckHandlerID) 3473 TrapBBs.resize(CheckHandlerID + 1); 3474 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; 3475 3476 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { 3477 TrapBB = createBasicBlock("trap"); 3478 Builder.CreateCondBr(Checked, Cont, TrapBB); 3479 EmitBlock(TrapBB); 3480 3481 llvm::CallInst *TrapCall = 3482 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap), 3483 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID)); 3484 3485 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { 3486 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", 3487 CGM.getCodeGenOpts().TrapFuncName); 3488 TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A); 3489 } 3490 TrapCall->setDoesNotReturn(); 3491 TrapCall->setDoesNotThrow(); 3492 Builder.CreateUnreachable(); 3493 } else { 3494 auto Call = TrapBB->begin(); 3495 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB"); 3496 3497 Call->applyMergedLocation(Call->getDebugLoc(), 3498 Builder.getCurrentDebugLocation()); 3499 Builder.CreateCondBr(Checked, Cont, TrapBB); 3500 } 3501 3502 EmitBlock(Cont); 3503 } 3504 3505 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { 3506 llvm::CallInst *TrapCall = 3507 Builder.CreateCall(CGM.getIntrinsic(IntrID)); 3508 3509 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { 3510 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", 3511 CGM.getCodeGenOpts().TrapFuncName); 3512 TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A); 3513 } 3514 3515 return TrapCall; 3516 } 3517 3518 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, 3519 LValueBaseInfo *BaseInfo, 3520 TBAAAccessInfo *TBAAInfo) { 3521 assert(E->getType()->isArrayType() && 3522 "Array to pointer decay must have array source type!"); 3523 3524 // Expressions of array type can't be bitfields or vector elements. 3525 LValue LV = EmitLValue(E); 3526 Address Addr = LV.getAddress(*this); 3527 3528 // If the array type was an incomplete type, we need to make sure 3529 // the decay ends up being the right type. 3530 llvm::Type *NewTy = ConvertType(E->getType()); 3531 Addr = Builder.CreateElementBitCast(Addr, NewTy); 3532 3533 // Note that VLA pointers are always decayed, so we don't need to do 3534 // anything here. 3535 if (!E->getType()->isVariableArrayType()) { 3536 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 3537 "Expected pointer to array"); 3538 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); 3539 } 3540 3541 // The result of this decay conversion points to an array element within the 3542 // base lvalue. However, since TBAA currently does not support representing 3543 // accesses to elements of member arrays, we conservatively represent accesses 3544 // to the pointee object as if it had no any base lvalue specified. 3545 // TODO: Support TBAA for member arrays. 3546 QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); 3547 if (BaseInfo) *BaseInfo = LV.getBaseInfo(); 3548 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType); 3549 3550 return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType)); 3551 } 3552 3553 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 3554 /// array to pointer, return the array subexpression. 3555 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 3556 // If this isn't just an array->pointer decay, bail out. 3557 const auto *CE = dyn_cast<CastExpr>(E); 3558 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) 3559 return nullptr; 3560 3561 // If this is a decay from variable width array, bail out. 3562 const Expr *SubExpr = CE->getSubExpr(); 3563 if (SubExpr->getType()->isVariableArrayType()) 3564 return nullptr; 3565 3566 return SubExpr; 3567 } 3568 3569 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, 3570 llvm::Value *ptr, 3571 ArrayRef<llvm::Value*> indices, 3572 bool inbounds, 3573 bool signedIndices, 3574 SourceLocation loc, 3575 const llvm::Twine &name = "arrayidx") { 3576 if (inbounds) { 3577 return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices, 3578 CodeGenFunction::NotSubtraction, loc, 3579 name); 3580 } else { 3581 return CGF.Builder.CreateGEP(ptr, indices, name); 3582 } 3583 } 3584 3585 static CharUnits getArrayElementAlign(CharUnits arrayAlign, 3586 llvm::Value *idx, 3587 CharUnits eltSize) { 3588 // If we have a constant index, we can use the exact offset of the 3589 // element we're accessing. 3590 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) { 3591 CharUnits offset = constantIdx->getZExtValue() * eltSize; 3592 return arrayAlign.alignmentAtOffset(offset); 3593 3594 // Otherwise, use the worst-case alignment for any element. 3595 } else { 3596 return arrayAlign.alignmentOfArrayElement(eltSize); 3597 } 3598 } 3599 3600 static QualType getFixedSizeElementType(const ASTContext &ctx, 3601 const VariableArrayType *vla) { 3602 QualType eltType; 3603 do { 3604 eltType = vla->getElementType(); 3605 } while ((vla = ctx.getAsVariableArrayType(eltType))); 3606 return eltType; 3607 } 3608 3609 /// Given an array base, check whether its member access belongs to a record 3610 /// with preserve_access_index attribute or not. 3611 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { 3612 if (!ArrayBase || !CGF.getDebugInfo()) 3613 return false; 3614 3615 // Only support base as either a MemberExpr or DeclRefExpr. 3616 // DeclRefExpr to cover cases like: 3617 // struct s { int a; int b[10]; }; 3618 // struct s *p; 3619 // p[1].a 3620 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. 3621 // p->b[5] is a MemberExpr example. 3622 const Expr *E = ArrayBase->IgnoreImpCasts(); 3623 if (const auto *ME = dyn_cast<MemberExpr>(E)) 3624 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); 3625 3626 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 3627 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl()); 3628 if (!VarDef) 3629 return false; 3630 3631 const auto *PtrT = VarDef->getType()->getAs<PointerType>(); 3632 if (!PtrT) 3633 return false; 3634 3635 const auto *PointeeT = PtrT->getPointeeType() 3636 ->getUnqualifiedDesugaredType(); 3637 if (const auto *RecT = dyn_cast<RecordType>(PointeeT)) 3638 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); 3639 return false; 3640 } 3641 3642 return false; 3643 } 3644 3645 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, 3646 ArrayRef<llvm::Value *> indices, 3647 QualType eltType, bool inbounds, 3648 bool signedIndices, SourceLocation loc, 3649 QualType *arrayType = nullptr, 3650 const Expr *Base = nullptr, 3651 const llvm::Twine &name = "arrayidx") { 3652 // All the indices except that last must be zero. 3653 #ifndef NDEBUG 3654 for (auto idx : indices.drop_back()) 3655 assert(isa<llvm::ConstantInt>(idx) && 3656 cast<llvm::ConstantInt>(idx)->isZero()); 3657 #endif 3658 3659 // Determine the element size of the statically-sized base. This is 3660 // the thing that the indices are expressed in terms of. 3661 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { 3662 eltType = getFixedSizeElementType(CGF.getContext(), vla); 3663 } 3664 3665 // We can use that to compute the best alignment of the element. 3666 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); 3667 CharUnits eltAlign = 3668 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); 3669 3670 llvm::Value *eltPtr; 3671 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back()); 3672 if (!LastIndex || 3673 (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) { 3674 eltPtr = emitArraySubscriptGEP( 3675 CGF, addr.getPointer(), indices, inbounds, signedIndices, 3676 loc, name); 3677 } else { 3678 // Remember the original array subscript for bpf target 3679 unsigned idx = LastIndex->getZExtValue(); 3680 llvm::DIType *DbgInfo = nullptr; 3681 if (arrayType) 3682 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc); 3683 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(), 3684 addr.getPointer(), 3685 indices.size() - 1, 3686 idx, DbgInfo); 3687 } 3688 3689 return Address(eltPtr, eltAlign); 3690 } 3691 3692 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, 3693 bool Accessed) { 3694 // The index must always be an integer, which is not an aggregate. Emit it 3695 // in lexical order (this complexity is, sadly, required by C++17). 3696 llvm::Value *IdxPre = 3697 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; 3698 bool SignedIndices = false; 3699 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { 3700 auto *Idx = IdxPre; 3701 if (E->getLHS() != E->getIdx()) { 3702 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); 3703 Idx = EmitScalarExpr(E->getIdx()); 3704 } 3705 3706 QualType IdxTy = E->getIdx()->getType(); 3707 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 3708 SignedIndices |= IdxSigned; 3709 3710 if (SanOpts.has(SanitizerKind::ArrayBounds)) 3711 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); 3712 3713 // Extend or truncate the index type to 32 or 64-bits. 3714 if (Promote && Idx->getType() != IntPtrTy) 3715 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 3716 3717 return Idx; 3718 }; 3719 IdxPre = nullptr; 3720 3721 // If the base is a vector type, then we are forming a vector element lvalue 3722 // with this subscript. 3723 if (E->getBase()->getType()->isVectorType() && 3724 !isa<ExtVectorElementExpr>(E->getBase())) { 3725 // Emit the vector as an lvalue to get its address. 3726 LValue LHS = EmitLValue(E->getBase()); 3727 auto *Idx = EmitIdxAfterBase(/*Promote*/false); 3728 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 3729 return LValue::MakeVectorElt(LHS.getAddress(*this), Idx, 3730 E->getBase()->getType(), LHS.getBaseInfo(), 3731 TBAAAccessInfo()); 3732 } 3733 3734 // All the other cases basically behave like simple offsetting. 3735 3736 // Handle the extvector case we ignored above. 3737 if (isa<ExtVectorElementExpr>(E->getBase())) { 3738 LValue LV = EmitLValue(E->getBase()); 3739 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3740 Address Addr = EmitExtVectorElementLValue(LV); 3741 3742 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); 3743 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true, 3744 SignedIndices, E->getExprLoc()); 3745 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(), 3746 CGM.getTBAAInfoForSubobject(LV, EltType)); 3747 } 3748 3749 LValueBaseInfo EltBaseInfo; 3750 TBAAAccessInfo EltTBAAInfo; 3751 Address Addr = Address::invalid(); 3752 if (const VariableArrayType *vla = 3753 getContext().getAsVariableArrayType(E->getType())) { 3754 // The base must be a pointer, which is not an aggregate. Emit 3755 // it. It needs to be emitted first in case it's what captures 3756 // the VLA bounds. 3757 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 3758 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3759 3760 // The element count here is the total number of non-VLA elements. 3761 llvm::Value *numElements = getVLASize(vla).NumElts; 3762 3763 // Effectively, the multiply by the VLA size is part of the GEP. 3764 // GEP indexes are signed, and scaling an index isn't permitted to 3765 // signed-overflow, so we use the same semantics for our explicit 3766 // multiply. We suppress this if overflow is not undefined behavior. 3767 if (getLangOpts().isSignedOverflowDefined()) { 3768 Idx = Builder.CreateMul(Idx, numElements); 3769 } else { 3770 Idx = Builder.CreateNSWMul(Idx, numElements); 3771 } 3772 3773 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), 3774 !getLangOpts().isSignedOverflowDefined(), 3775 SignedIndices, E->getExprLoc()); 3776 3777 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 3778 // Indexing over an interface, as in "NSString *P; P[4];" 3779 3780 // Emit the base pointer. 3781 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 3782 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3783 3784 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); 3785 llvm::Value *InterfaceSizeVal = 3786 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity()); 3787 3788 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal); 3789 3790 // We don't necessarily build correct LLVM struct types for ObjC 3791 // interfaces, so we can't rely on GEP to do this scaling 3792 // correctly, so we need to cast to i8*. FIXME: is this actually 3793 // true? A lot of other things in the fragile ABI would break... 3794 llvm::Type *OrigBaseTy = Addr.getType(); 3795 Addr = Builder.CreateElementBitCast(Addr, Int8Ty); 3796 3797 // Do the GEP. 3798 CharUnits EltAlign = 3799 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); 3800 llvm::Value *EltPtr = 3801 emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false, 3802 SignedIndices, E->getExprLoc()); 3803 Addr = Address(EltPtr, EltAlign); 3804 3805 // Cast back. 3806 Addr = Builder.CreateBitCast(Addr, OrigBaseTy); 3807 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 3808 // If this is A[i] where A is an array, the frontend will have decayed the 3809 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 3810 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 3811 // "gep x, i" here. Emit one "gep A, 0, i". 3812 assert(Array->getType()->isArrayType() && 3813 "Array to pointer decay must have array source type!"); 3814 LValue ArrayLV; 3815 // For simple multidimensional array indexing, set the 'accessed' flag for 3816 // better bounds-checking of the base expression. 3817 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 3818 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 3819 else 3820 ArrayLV = EmitLValue(Array); 3821 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3822 3823 // Propagate the alignment from the array itself to the result. 3824 QualType arrayType = Array->getType(); 3825 Addr = emitArraySubscriptGEP( 3826 *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, 3827 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, 3828 E->getExprLoc(), &arrayType, E->getBase()); 3829 EltBaseInfo = ArrayLV.getBaseInfo(); 3830 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType()); 3831 } else { 3832 // The base must be a pointer; emit it with an estimate of its alignment. 3833 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 3834 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3835 QualType ptrType = E->getBase()->getType(); 3836 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), 3837 !getLangOpts().isSignedOverflowDefined(), 3838 SignedIndices, E->getExprLoc(), &ptrType, 3839 E->getBase()); 3840 } 3841 3842 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); 3843 3844 if (getLangOpts().ObjC && 3845 getLangOpts().getGC() != LangOptions::NonGC) { 3846 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 3847 setObjCGCLValueClass(getContext(), E, LV); 3848 } 3849 return LV; 3850 } 3851 3852 LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { 3853 assert( 3854 !E->isIncomplete() && 3855 "incomplete matrix subscript expressions should be rejected during Sema"); 3856 LValue Base = EmitLValue(E->getBase()); 3857 llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx()); 3858 llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx()); 3859 llvm::Value *NumRows = Builder.getIntN( 3860 RowIdx->getType()->getScalarSizeInBits(), 3861 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); 3862 llvm::Value *FinalIdx = 3863 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx); 3864 return LValue::MakeMatrixElt( 3865 MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx, 3866 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo()); 3867 } 3868 3869 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, 3870 LValueBaseInfo &BaseInfo, 3871 TBAAAccessInfo &TBAAInfo, 3872 QualType BaseTy, QualType ElTy, 3873 bool IsLowerBound) { 3874 LValue BaseLVal; 3875 if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) { 3876 BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound); 3877 if (BaseTy->isArrayType()) { 3878 Address Addr = BaseLVal.getAddress(CGF); 3879 BaseInfo = BaseLVal.getBaseInfo(); 3880 3881 // If the array type was an incomplete type, we need to make sure 3882 // the decay ends up being the right type. 3883 llvm::Type *NewTy = CGF.ConvertType(BaseTy); 3884 Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy); 3885 3886 // Note that VLA pointers are always decayed, so we don't need to do 3887 // anything here. 3888 if (!BaseTy->isVariableArrayType()) { 3889 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 3890 "Expected pointer to array"); 3891 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); 3892 } 3893 3894 return CGF.Builder.CreateElementBitCast(Addr, 3895 CGF.ConvertTypeForMem(ElTy)); 3896 } 3897 LValueBaseInfo TypeBaseInfo; 3898 TBAAAccessInfo TypeTBAAInfo; 3899 CharUnits Align = 3900 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo); 3901 BaseInfo.mergeForCast(TypeBaseInfo); 3902 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo); 3903 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align); 3904 } 3905 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); 3906 } 3907 3908 LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, 3909 bool IsLowerBound) { 3910 QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase()); 3911 QualType ResultExprTy; 3912 if (auto *AT = getContext().getAsArrayType(BaseTy)) 3913 ResultExprTy = AT->getElementType(); 3914 else 3915 ResultExprTy = BaseTy->getPointeeType(); 3916 llvm::Value *Idx = nullptr; 3917 if (IsLowerBound || E->getColonLocFirst().isInvalid()) { 3918 // Requesting lower bound or upper bound, but without provided length and 3919 // without ':' symbol for the default length -> length = 1. 3920 // Idx = LowerBound ?: 0; 3921 if (auto *LowerBound = E->getLowerBound()) { 3922 Idx = Builder.CreateIntCast( 3923 EmitScalarExpr(LowerBound), IntPtrTy, 3924 LowerBound->getType()->hasSignedIntegerRepresentation()); 3925 } else 3926 Idx = llvm::ConstantInt::getNullValue(IntPtrTy); 3927 } else { 3928 // Try to emit length or lower bound as constant. If this is possible, 1 3929 // is subtracted from constant length or lower bound. Otherwise, emit LLVM 3930 // IR (LB + Len) - 1. 3931 auto &C = CGM.getContext(); 3932 auto *Length = E->getLength(); 3933 llvm::APSInt ConstLength; 3934 if (Length) { 3935 // Idx = LowerBound + Length - 1; 3936 if (Optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) { 3937 ConstLength = CL->zextOrTrunc(PointerWidthInBits); 3938 Length = nullptr; 3939 } 3940 auto *LowerBound = E->getLowerBound(); 3941 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); 3942 if (LowerBound) { 3943 if (Optional<llvm::APSInt> LB = LowerBound->getIntegerConstantExpr(C)) { 3944 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits); 3945 LowerBound = nullptr; 3946 } 3947 } 3948 if (!Length) 3949 --ConstLength; 3950 else if (!LowerBound) 3951 --ConstLowerBound; 3952 3953 if (Length || LowerBound) { 3954 auto *LowerBoundVal = 3955 LowerBound 3956 ? Builder.CreateIntCast( 3957 EmitScalarExpr(LowerBound), IntPtrTy, 3958 LowerBound->getType()->hasSignedIntegerRepresentation()) 3959 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound); 3960 auto *LengthVal = 3961 Length 3962 ? Builder.CreateIntCast( 3963 EmitScalarExpr(Length), IntPtrTy, 3964 Length->getType()->hasSignedIntegerRepresentation()) 3965 : llvm::ConstantInt::get(IntPtrTy, ConstLength); 3966 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len", 3967 /*HasNUW=*/false, 3968 !getLangOpts().isSignedOverflowDefined()); 3969 if (Length && LowerBound) { 3970 Idx = Builder.CreateSub( 3971 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1", 3972 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); 3973 } 3974 } else 3975 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound); 3976 } else { 3977 // Idx = ArraySize - 1; 3978 QualType ArrayTy = BaseTy->isPointerType() 3979 ? E->getBase()->IgnoreParenImpCasts()->getType() 3980 : BaseTy; 3981 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) { 3982 Length = VAT->getSizeExpr(); 3983 if (Optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) { 3984 ConstLength = *L; 3985 Length = nullptr; 3986 } 3987 } else { 3988 auto *CAT = C.getAsConstantArrayType(ArrayTy); 3989 ConstLength = CAT->getSize(); 3990 } 3991 if (Length) { 3992 auto *LengthVal = Builder.CreateIntCast( 3993 EmitScalarExpr(Length), IntPtrTy, 3994 Length->getType()->hasSignedIntegerRepresentation()); 3995 Idx = Builder.CreateSub( 3996 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1", 3997 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); 3998 } else { 3999 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); 4000 --ConstLength; 4001 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength); 4002 } 4003 } 4004 } 4005 assert(Idx); 4006 4007 Address EltPtr = Address::invalid(); 4008 LValueBaseInfo BaseInfo; 4009 TBAAAccessInfo TBAAInfo; 4010 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) { 4011 // The base must be a pointer, which is not an aggregate. Emit 4012 // it. It needs to be emitted first in case it's what captures 4013 // the VLA bounds. 4014 Address Base = 4015 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, 4016 BaseTy, VLA->getElementType(), IsLowerBound); 4017 // The element count here is the total number of non-VLA elements. 4018 llvm::Value *NumElements = getVLASize(VLA).NumElts; 4019 4020 // Effectively, the multiply by the VLA size is part of the GEP. 4021 // GEP indexes are signed, and scaling an index isn't permitted to 4022 // signed-overflow, so we use the same semantics for our explicit 4023 // multiply. We suppress this if overflow is not undefined behavior. 4024 if (getLangOpts().isSignedOverflowDefined()) 4025 Idx = Builder.CreateMul(Idx, NumElements); 4026 else 4027 Idx = Builder.CreateNSWMul(Idx, NumElements); 4028 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), 4029 !getLangOpts().isSignedOverflowDefined(), 4030 /*signedIndices=*/false, E->getExprLoc()); 4031 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 4032 // If this is A[i] where A is an array, the frontend will have decayed the 4033 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 4034 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 4035 // "gep x, i" here. Emit one "gep A, 0, i". 4036 assert(Array->getType()->isArrayType() && 4037 "Array to pointer decay must have array source type!"); 4038 LValue ArrayLV; 4039 // For simple multidimensional array indexing, set the 'accessed' flag for 4040 // better bounds-checking of the base expression. 4041 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 4042 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 4043 else 4044 ArrayLV = EmitLValue(Array); 4045 4046 // Propagate the alignment from the array itself to the result. 4047 EltPtr = emitArraySubscriptGEP( 4048 *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, 4049 ResultExprTy, !getLangOpts().isSignedOverflowDefined(), 4050 /*signedIndices=*/false, E->getExprLoc()); 4051 BaseInfo = ArrayLV.getBaseInfo(); 4052 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy); 4053 } else { 4054 Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, 4055 TBAAInfo, BaseTy, ResultExprTy, 4056 IsLowerBound); 4057 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, 4058 !getLangOpts().isSignedOverflowDefined(), 4059 /*signedIndices=*/false, E->getExprLoc()); 4060 } 4061 4062 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo); 4063 } 4064 4065 LValue CodeGenFunction:: 4066 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 4067 // Emit the base vector as an l-value. 4068 LValue Base; 4069 4070 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 4071 if (E->isArrow()) { 4072 // If it is a pointer to a vector, emit the address and form an lvalue with 4073 // it. 4074 LValueBaseInfo BaseInfo; 4075 TBAAAccessInfo TBAAInfo; 4076 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo); 4077 const auto *PT = E->getBase()->getType()->castAs<PointerType>(); 4078 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo); 4079 Base.getQuals().removeObjCGCAttr(); 4080 } else if (E->getBase()->isGLValue()) { 4081 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 4082 // emit the base as an lvalue. 4083 assert(E->getBase()->getType()->isVectorType()); 4084 Base = EmitLValue(E->getBase()); 4085 } else { 4086 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 4087 assert(E->getBase()->getType()->isVectorType() && 4088 "Result must be a vector"); 4089 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 4090 4091 // Store the vector to memory (because LValue wants an address). 4092 Address VecMem = CreateMemTemp(E->getBase()->getType()); 4093 Builder.CreateStore(Vec, VecMem); 4094 Base = MakeAddrLValue(VecMem, E->getBase()->getType(), 4095 AlignmentSource::Decl); 4096 } 4097 4098 QualType type = 4099 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 4100 4101 // Encode the element access list into a vector of unsigned indices. 4102 SmallVector<uint32_t, 4> Indices; 4103 E->getEncodedElementAccess(Indices); 4104 4105 if (Base.isSimple()) { 4106 llvm::Constant *CV = 4107 llvm::ConstantDataVector::get(getLLVMContext(), Indices); 4108 return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type, 4109 Base.getBaseInfo(), TBAAAccessInfo()); 4110 } 4111 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 4112 4113 llvm::Constant *BaseElts = Base.getExtVectorElts(); 4114 SmallVector<llvm::Constant *, 4> CElts; 4115 4116 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 4117 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 4118 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 4119 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type, 4120 Base.getBaseInfo(), TBAAAccessInfo()); 4121 } 4122 4123 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 4124 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { 4125 EmitIgnoredExpr(E->getBase()); 4126 return EmitDeclRefLValue(DRE); 4127 } 4128 4129 Expr *BaseExpr = E->getBase(); 4130 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 4131 LValue BaseLV; 4132 if (E->isArrow()) { 4133 LValueBaseInfo BaseInfo; 4134 TBAAAccessInfo TBAAInfo; 4135 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); 4136 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 4137 SanitizerSet SkippedChecks; 4138 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); 4139 if (IsBaseCXXThis) 4140 SkippedChecks.set(SanitizerKind::Alignment, true); 4141 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr)) 4142 SkippedChecks.set(SanitizerKind::Null, true); 4143 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, 4144 /*Alignment=*/CharUnits::Zero(), SkippedChecks); 4145 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); 4146 } else 4147 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 4148 4149 NamedDecl *ND = E->getMemberDecl(); 4150 if (auto *Field = dyn_cast<FieldDecl>(ND)) { 4151 LValue LV = EmitLValueForField(BaseLV, Field); 4152 setObjCGCLValueClass(getContext(), E, LV); 4153 if (getLangOpts().OpenMP) { 4154 // If the member was explicitly marked as nontemporal, mark it as 4155 // nontemporal. If the base lvalue is marked as nontemporal, mark access 4156 // to children as nontemporal too. 4157 if ((IsWrappedCXXThis(BaseExpr) && 4158 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) || 4159 BaseLV.isNontemporal()) 4160 LV.setNontemporal(/*Value=*/true); 4161 } 4162 return LV; 4163 } 4164 4165 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 4166 return EmitFunctionDeclLValue(*this, E, FD); 4167 4168 llvm_unreachable("Unhandled member declaration!"); 4169 } 4170 4171 /// Given that we are currently emitting a lambda, emit an l-value for 4172 /// one of its members. 4173 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { 4174 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda()); 4175 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent()); 4176 QualType LambdaTagType = 4177 getContext().getTagDeclType(Field->getParent()); 4178 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); 4179 return EmitLValueForField(LambdaLV, Field); 4180 } 4181 4182 /// Get the field index in the debug info. The debug info structure/union 4183 /// will ignore the unnamed bitfields. 4184 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, 4185 unsigned FieldIndex) { 4186 unsigned I = 0, Skipped = 0; 4187 4188 for (auto F : Rec->getDefinition()->fields()) { 4189 if (I == FieldIndex) 4190 break; 4191 if (F->isUnnamedBitfield()) 4192 Skipped++; 4193 I++; 4194 } 4195 4196 return FieldIndex - Skipped; 4197 } 4198 4199 /// Get the address of a zero-sized field within a record. The resulting 4200 /// address doesn't necessarily have the right type. 4201 static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, 4202 const FieldDecl *Field) { 4203 CharUnits Offset = CGF.getContext().toCharUnitsFromBits( 4204 CGF.getContext().getFieldOffset(Field)); 4205 if (Offset.isZero()) 4206 return Base; 4207 Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty); 4208 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset); 4209 } 4210 4211 /// Drill down to the storage of a field without walking into 4212 /// reference types. 4213 /// 4214 /// The resulting address doesn't necessarily have the right type. 4215 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, 4216 const FieldDecl *field) { 4217 if (field->isZeroSize(CGF.getContext())) 4218 return emitAddrOfZeroSizeField(CGF, base, field); 4219 4220 const RecordDecl *rec = field->getParent(); 4221 4222 unsigned idx = 4223 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 4224 4225 return CGF.Builder.CreateStructGEP(base, idx, field->getName()); 4226 } 4227 4228 static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, 4229 Address addr, const FieldDecl *field) { 4230 const RecordDecl *rec = field->getParent(); 4231 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( 4232 base.getType(), rec->getLocation()); 4233 4234 unsigned idx = 4235 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 4236 4237 return CGF.Builder.CreatePreserveStructAccessIndex( 4238 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo); 4239 } 4240 4241 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { 4242 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); 4243 if (!RD) 4244 return false; 4245 4246 if (RD->isDynamicClass()) 4247 return true; 4248 4249 for (const auto &Base : RD->bases()) 4250 if (hasAnyVptr(Base.getType(), Context)) 4251 return true; 4252 4253 for (const FieldDecl *Field : RD->fields()) 4254 if (hasAnyVptr(Field->getType(), Context)) 4255 return true; 4256 4257 return false; 4258 } 4259 4260 LValue CodeGenFunction::EmitLValueForField(LValue base, 4261 const FieldDecl *field) { 4262 LValueBaseInfo BaseInfo = base.getBaseInfo(); 4263 4264 if (field->isBitField()) { 4265 const CGRecordLayout &RL = 4266 CGM.getTypes().getCGRecordLayout(field->getParent()); 4267 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 4268 const bool UseVolatile = isAAPCS(CGM.getTarget()) && 4269 CGM.getCodeGenOpts().AAPCSBitfieldWidth && 4270 Info.VolatileStorageSize != 0 && 4271 field->getType() 4272 .withCVRQualifiers(base.getVRQualifiers()) 4273 .isVolatileQualified(); 4274 Address Addr = base.getAddress(*this); 4275 unsigned Idx = RL.getLLVMFieldNo(field); 4276 const RecordDecl *rec = field->getParent(); 4277 if (!UseVolatile) { 4278 if (!IsInPreservedAIRegion && 4279 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { 4280 if (Idx != 0) 4281 // For structs, we GEP to the field that the record layout suggests. 4282 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); 4283 } else { 4284 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( 4285 getContext().getRecordType(rec), rec->getLocation()); 4286 Addr = Builder.CreatePreserveStructAccessIndex( 4287 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()), 4288 DbgInfo); 4289 } 4290 } 4291 const unsigned SS = 4292 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; 4293 // Get the access type. 4294 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS); 4295 if (Addr.getElementType() != FieldIntTy) 4296 Addr = Builder.CreateElementBitCast(Addr, FieldIntTy); 4297 if (UseVolatile) { 4298 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); 4299 if (VolatileOffset) 4300 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset); 4301 } 4302 4303 QualType fieldType = 4304 field->getType().withCVRQualifiers(base.getVRQualifiers()); 4305 // TODO: Support TBAA for bit fields. 4306 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); 4307 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo, 4308 TBAAAccessInfo()); 4309 } 4310 4311 // Fields of may-alias structures are may-alias themselves. 4312 // FIXME: this should get propagated down through anonymous structs 4313 // and unions. 4314 QualType FieldType = field->getType(); 4315 const RecordDecl *rec = field->getParent(); 4316 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); 4317 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); 4318 TBAAAccessInfo FieldTBAAInfo; 4319 if (base.getTBAAInfo().isMayAlias() || 4320 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { 4321 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); 4322 } else if (rec->isUnion()) { 4323 // TODO: Support TBAA for unions. 4324 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); 4325 } else { 4326 // If no base type been assigned for the base access, then try to generate 4327 // one for this base lvalue. 4328 FieldTBAAInfo = base.getTBAAInfo(); 4329 if (!FieldTBAAInfo.BaseType) { 4330 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType()); 4331 assert(!FieldTBAAInfo.Offset && 4332 "Nonzero offset for an access with no base type!"); 4333 } 4334 4335 // Adjust offset to be relative to the base type. 4336 const ASTRecordLayout &Layout = 4337 getContext().getASTRecordLayout(field->getParent()); 4338 unsigned CharWidth = getContext().getCharWidth(); 4339 if (FieldTBAAInfo.BaseType) 4340 FieldTBAAInfo.Offset += 4341 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth; 4342 4343 // Update the final access type and size. 4344 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType); 4345 FieldTBAAInfo.Size = 4346 getContext().getTypeSizeInChars(FieldType).getQuantity(); 4347 } 4348 4349 Address addr = base.getAddress(*this); 4350 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) { 4351 if (CGM.getCodeGenOpts().StrictVTablePointers && 4352 ClassDef->isDynamicClass()) { 4353 // Getting to any field of dynamic object requires stripping dynamic 4354 // information provided by invariant.group. This is because accessing 4355 // fields may leak the real address of dynamic object, which could result 4356 // in miscompilation when leaked pointer would be compared. 4357 auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer()); 4358 addr = Address(stripped, addr.getAlignment()); 4359 } 4360 } 4361 4362 unsigned RecordCVR = base.getVRQualifiers(); 4363 if (rec->isUnion()) { 4364 // For unions, there is no pointer adjustment. 4365 if (CGM.getCodeGenOpts().StrictVTablePointers && 4366 hasAnyVptr(FieldType, getContext())) 4367 // Because unions can easily skip invariant.barriers, we need to add 4368 // a barrier every time CXXRecord field with vptr is referenced. 4369 addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()), 4370 addr.getAlignment()); 4371 4372 if (IsInPreservedAIRegion || 4373 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) { 4374 // Remember the original union field index 4375 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(), 4376 rec->getLocation()); 4377 addr = Address( 4378 Builder.CreatePreserveUnionAccessIndex( 4379 addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo), 4380 addr.getAlignment()); 4381 } 4382 4383 if (FieldType->isReferenceType()) 4384 addr = Builder.CreateElementBitCast( 4385 addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName()); 4386 } else { 4387 if (!IsInPreservedAIRegion && 4388 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) 4389 // For structs, we GEP to the field that the record layout suggests. 4390 addr = emitAddrOfFieldStorage(*this, addr, field); 4391 else 4392 // Remember the original struct field index 4393 addr = emitPreserveStructAccess(*this, base, addr, field); 4394 } 4395 4396 // If this is a reference field, load the reference right now. 4397 if (FieldType->isReferenceType()) { 4398 LValue RefLVal = 4399 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); 4400 if (RecordCVR & Qualifiers::Volatile) 4401 RefLVal.getQuals().addVolatile(); 4402 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo); 4403 4404 // Qualifiers on the struct don't apply to the referencee. 4405 RecordCVR = 0; 4406 FieldType = FieldType->getPointeeType(); 4407 } 4408 4409 // Make sure that the address is pointing to the right type. This is critical 4410 // for both unions and structs. A union needs a bitcast, a struct element 4411 // will need a bitcast if the LLVM type laid out doesn't match the desired 4412 // type. 4413 addr = Builder.CreateElementBitCast( 4414 addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName()); 4415 4416 if (field->hasAttr<AnnotateAttr>()) 4417 addr = EmitFieldAnnotations(field, addr); 4418 4419 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); 4420 LV.getQuals().addCVRQualifiers(RecordCVR); 4421 4422 // __weak attribute on a field is ignored. 4423 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 4424 LV.getQuals().removeObjCGCAttr(); 4425 4426 return LV; 4427 } 4428 4429 LValue 4430 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 4431 const FieldDecl *Field) { 4432 QualType FieldType = Field->getType(); 4433 4434 if (!FieldType->isReferenceType()) 4435 return EmitLValueForField(Base, Field); 4436 4437 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field); 4438 4439 // Make sure that the address is pointing to the right type. 4440 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 4441 V = Builder.CreateElementBitCast(V, llvmType, Field->getName()); 4442 4443 // TODO: Generate TBAA information that describes this access as a structure 4444 // member access and not just an access to an object of the field's type. This 4445 // should be similar to what we do in EmitLValueForField(). 4446 LValueBaseInfo BaseInfo = Base.getBaseInfo(); 4447 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); 4448 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); 4449 return MakeAddrLValue(V, FieldType, FieldBaseInfo, 4450 CGM.getTBAAInfoForSubobject(Base, FieldType)); 4451 } 4452 4453 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 4454 if (E->isFileScope()) { 4455 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 4456 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); 4457 } 4458 if (E->getType()->isVariablyModifiedType()) 4459 // make sure to emit the VLA size. 4460 EmitVariablyModifiedType(E->getType()); 4461 4462 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 4463 const Expr *InitExpr = E->getInitializer(); 4464 LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); 4465 4466 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 4467 /*Init*/ true); 4468 4469 // Block-scope compound literals are destroyed at the end of the enclosing 4470 // scope in C. 4471 if (!getLangOpts().CPlusPlus) 4472 if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) 4473 pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr, 4474 E->getType(), getDestroyer(DtorKind), 4475 DtorKind & EHCleanup); 4476 4477 return Result; 4478 } 4479 4480 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 4481 if (!E->isGLValue()) 4482 // Initializing an aggregate temporary in C++11: T{...}. 4483 return EmitAggExprToLValue(E); 4484 4485 // An lvalue initializer list must be initializing a reference. 4486 assert(E->isTransparent() && "non-transparent glvalue init list"); 4487 return EmitLValue(E->getInit(0)); 4488 } 4489 4490 /// Emit the operand of a glvalue conditional operator. This is either a glvalue 4491 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no 4492 /// LValue is returned and the current block has been terminated. 4493 static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, 4494 const Expr *Operand) { 4495 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { 4496 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); 4497 return None; 4498 } 4499 4500 return CGF.EmitLValue(Operand); 4501 } 4502 4503 LValue CodeGenFunction:: 4504 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 4505 if (!expr->isGLValue()) { 4506 // ?: here should be an aggregate. 4507 assert(hasAggregateEvaluationKind(expr->getType()) && 4508 "Unexpected conditional operator!"); 4509 return EmitAggExprToLValue(expr); 4510 } 4511 4512 OpaqueValueMapping binding(*this, expr); 4513 4514 const Expr *condExpr = expr->getCond(); 4515 bool CondExprBool; 4516 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4517 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 4518 if (!CondExprBool) std::swap(live, dead); 4519 4520 if (!ContainsLabel(dead)) { 4521 // If the true case is live, we need to track its region. 4522 if (CondExprBool) 4523 incrementProfileCounter(expr); 4524 // If a throw expression we emit it and return an undefined lvalue 4525 // because it can't be used. 4526 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) { 4527 EmitCXXThrowExpr(ThrowExpr); 4528 llvm::Type *Ty = 4529 llvm::PointerType::getUnqual(ConvertType(dead->getType())); 4530 return MakeAddrLValue( 4531 Address(llvm::UndefValue::get(Ty), CharUnits::One()), 4532 dead->getType()); 4533 } 4534 return EmitLValue(live); 4535 } 4536 } 4537 4538 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 4539 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 4540 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 4541 4542 ConditionalEvaluation eval(*this); 4543 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr)); 4544 4545 // Any temporaries created here are conditional. 4546 EmitBlock(lhsBlock); 4547 incrementProfileCounter(expr); 4548 eval.begin(*this); 4549 Optional<LValue> lhs = 4550 EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); 4551 eval.end(*this); 4552 4553 if (lhs && !lhs->isSimple()) 4554 return EmitUnsupportedLValue(expr, "conditional operator"); 4555 4556 lhsBlock = Builder.GetInsertBlock(); 4557 if (lhs) 4558 Builder.CreateBr(contBlock); 4559 4560 // Any temporaries created here are conditional. 4561 EmitBlock(rhsBlock); 4562 eval.begin(*this); 4563 Optional<LValue> rhs = 4564 EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); 4565 eval.end(*this); 4566 if (rhs && !rhs->isSimple()) 4567 return EmitUnsupportedLValue(expr, "conditional operator"); 4568 rhsBlock = Builder.GetInsertBlock(); 4569 4570 EmitBlock(contBlock); 4571 4572 if (lhs && rhs) { 4573 llvm::PHINode *phi = 4574 Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue"); 4575 phi->addIncoming(lhs->getPointer(*this), lhsBlock); 4576 phi->addIncoming(rhs->getPointer(*this), rhsBlock); 4577 Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment())); 4578 AlignmentSource alignSource = 4579 std::max(lhs->getBaseInfo().getAlignmentSource(), 4580 rhs->getBaseInfo().getAlignmentSource()); 4581 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( 4582 lhs->getTBAAInfo(), rhs->getTBAAInfo()); 4583 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), 4584 TBAAInfo); 4585 } else { 4586 assert((lhs || rhs) && 4587 "both operands of glvalue conditional are throw-expressions?"); 4588 return lhs ? *lhs : *rhs; 4589 } 4590 } 4591 4592 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 4593 /// type. If the cast is to a reference, we can have the usual lvalue result, 4594 /// otherwise if a cast is needed by the code generator in an lvalue context, 4595 /// then it must mean that we need the address of an aggregate in order to 4596 /// access one of its members. This can happen for all the reasons that casts 4597 /// are permitted with aggregate result, including noop aggregate casts, and 4598 /// cast from scalar to union. 4599 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 4600 switch (E->getCastKind()) { 4601 case CK_ToVoid: 4602 case CK_BitCast: 4603 case CK_LValueToRValueBitCast: 4604 case CK_ArrayToPointerDecay: 4605 case CK_FunctionToPointerDecay: 4606 case CK_NullToMemberPointer: 4607 case CK_NullToPointer: 4608 case CK_IntegralToPointer: 4609 case CK_PointerToIntegral: 4610 case CK_PointerToBoolean: 4611 case CK_VectorSplat: 4612 case CK_IntegralCast: 4613 case CK_BooleanToSignedIntegral: 4614 case CK_IntegralToBoolean: 4615 case CK_IntegralToFloating: 4616 case CK_FloatingToIntegral: 4617 case CK_FloatingToBoolean: 4618 case CK_FloatingCast: 4619 case CK_FloatingRealToComplex: 4620 case CK_FloatingComplexToReal: 4621 case CK_FloatingComplexToBoolean: 4622 case CK_FloatingComplexCast: 4623 case CK_FloatingComplexToIntegralComplex: 4624 case CK_IntegralRealToComplex: 4625 case CK_IntegralComplexToReal: 4626 case CK_IntegralComplexToBoolean: 4627 case CK_IntegralComplexCast: 4628 case CK_IntegralComplexToFloatingComplex: 4629 case CK_DerivedToBaseMemberPointer: 4630 case CK_BaseToDerivedMemberPointer: 4631 case CK_MemberPointerToBoolean: 4632 case CK_ReinterpretMemberPointer: 4633 case CK_AnyPointerToBlockPointerCast: 4634 case CK_ARCProduceObject: 4635 case CK_ARCConsumeObject: 4636 case CK_ARCReclaimReturnedObject: 4637 case CK_ARCExtendBlockObject: 4638 case CK_CopyAndAutoreleaseBlockObject: 4639 case CK_IntToOCLSampler: 4640 case CK_FloatingToFixedPoint: 4641 case CK_FixedPointToFloating: 4642 case CK_FixedPointCast: 4643 case CK_FixedPointToBoolean: 4644 case CK_FixedPointToIntegral: 4645 case CK_IntegralToFixedPoint: 4646 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 4647 4648 case CK_Dependent: 4649 llvm_unreachable("dependent cast kind in IR gen!"); 4650 4651 case CK_BuiltinFnToFnPtr: 4652 llvm_unreachable("builtin functions are handled elsewhere"); 4653 4654 // These are never l-values; just use the aggregate emission code. 4655 case CK_NonAtomicToAtomic: 4656 case CK_AtomicToNonAtomic: 4657 return EmitAggExprToLValue(E); 4658 4659 case CK_Dynamic: { 4660 LValue LV = EmitLValue(E->getSubExpr()); 4661 Address V = LV.getAddress(*this); 4662 const auto *DCE = cast<CXXDynamicCastExpr>(E); 4663 return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 4664 } 4665 4666 case CK_ConstructorConversion: 4667 case CK_UserDefinedConversion: 4668 case CK_CPointerToObjCPointerCast: 4669 case CK_BlockPointerToObjCPointerCast: 4670 case CK_NoOp: 4671 case CK_LValueToRValue: 4672 return EmitLValue(E->getSubExpr()); 4673 4674 case CK_UncheckedDerivedToBase: 4675 case CK_DerivedToBase: { 4676 const auto *DerivedClassTy = 4677 E->getSubExpr()->getType()->castAs<RecordType>(); 4678 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 4679 4680 LValue LV = EmitLValue(E->getSubExpr()); 4681 Address This = LV.getAddress(*this); 4682 4683 // Perform the derived-to-base conversion 4684 Address Base = GetAddressOfBaseClass( 4685 This, DerivedClassDecl, E->path_begin(), E->path_end(), 4686 /*NullCheckValue=*/false, E->getExprLoc()); 4687 4688 // TODO: Support accesses to members of base classes in TBAA. For now, we 4689 // conservatively pretend that the complete object is of the base class 4690 // type. 4691 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(), 4692 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4693 } 4694 case CK_ToUnion: 4695 return EmitAggExprToLValue(E); 4696 case CK_BaseToDerived: { 4697 const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); 4698 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 4699 4700 LValue LV = EmitLValue(E->getSubExpr()); 4701 4702 // Perform the base-to-derived conversion 4703 Address Derived = GetAddressOfDerivedClass( 4704 LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(), 4705 /*NullCheckValue=*/false); 4706 4707 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is 4708 // performed and the object is not of the derived type. 4709 if (sanitizePerformTypeCheck()) 4710 EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), 4711 Derived.getPointer(), E->getType()); 4712 4713 if (SanOpts.has(SanitizerKind::CFIDerivedCast)) 4714 EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(), 4715 /*MayBeNull=*/false, CFITCK_DerivedCast, 4716 E->getBeginLoc()); 4717 4718 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(), 4719 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4720 } 4721 case CK_LValueBitCast: { 4722 // This must be a reinterpret_cast (or c-style equivalent). 4723 const auto *CE = cast<ExplicitCastExpr>(E); 4724 4725 CGM.EmitExplicitCastExprType(CE, this); 4726 LValue LV = EmitLValue(E->getSubExpr()); 4727 Address V = Builder.CreateBitCast(LV.getAddress(*this), 4728 ConvertType(CE->getTypeAsWritten())); 4729 4730 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) 4731 EmitVTablePtrCheckForCast(E->getType(), V.getPointer(), 4732 /*MayBeNull=*/false, CFITCK_UnrelatedCast, 4733 E->getBeginLoc()); 4734 4735 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), 4736 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4737 } 4738 case CK_AddressSpaceConversion: { 4739 LValue LV = EmitLValue(E->getSubExpr()); 4740 QualType DestTy = getContext().getPointerType(E->getType()); 4741 llvm::Value *V = getTargetHooks().performAddrSpaceCast( 4742 *this, LV.getPointer(*this), 4743 E->getSubExpr()->getType().getAddressSpace(), 4744 E->getType().getAddressSpace(), ConvertType(DestTy)); 4745 return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()), 4746 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); 4747 } 4748 case CK_ObjCObjectLValueCast: { 4749 LValue LV = EmitLValue(E->getSubExpr()); 4750 Address V = Builder.CreateElementBitCast(LV.getAddress(*this), 4751 ConvertType(E->getType())); 4752 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), 4753 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4754 } 4755 case CK_ZeroToOCLOpaqueType: 4756 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); 4757 } 4758 4759 llvm_unreachable("Unhandled lvalue cast kind?"); 4760 } 4761 4762 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 4763 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 4764 return getOrCreateOpaqueLValueMapping(e); 4765 } 4766 4767 LValue 4768 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { 4769 assert(OpaqueValueMapping::shouldBindAsLValue(e)); 4770 4771 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator 4772 it = OpaqueLValues.find(e); 4773 4774 if (it != OpaqueLValues.end()) 4775 return it->second; 4776 4777 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); 4778 return EmitLValue(e->getSourceExpr()); 4779 } 4780 4781 RValue 4782 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { 4783 assert(!OpaqueValueMapping::shouldBindAsLValue(e)); 4784 4785 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator 4786 it = OpaqueRValues.find(e); 4787 4788 if (it != OpaqueRValues.end()) 4789 return it->second; 4790 4791 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); 4792 return EmitAnyExpr(e->getSourceExpr()); 4793 } 4794 4795 RValue CodeGenFunction::EmitRValueForField(LValue LV, 4796 const FieldDecl *FD, 4797 SourceLocation Loc) { 4798 QualType FT = FD->getType(); 4799 LValue FieldLV = EmitLValueForField(LV, FD); 4800 switch (getEvaluationKind(FT)) { 4801 case TEK_Complex: 4802 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); 4803 case TEK_Aggregate: 4804 return FieldLV.asAggregateRValue(*this); 4805 case TEK_Scalar: 4806 // This routine is used to load fields one-by-one to perform a copy, so 4807 // don't load reference fields. 4808 if (FD->getType()->isReferenceType()) 4809 return RValue::get(FieldLV.getPointer(*this)); 4810 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a 4811 // primitive load. 4812 if (FieldLV.isBitField()) 4813 return EmitLoadOfLValue(FieldLV, Loc); 4814 return RValue::get(EmitLoadOfScalar(FieldLV, Loc)); 4815 } 4816 llvm_unreachable("bad evaluation kind"); 4817 } 4818 4819 //===--------------------------------------------------------------------===// 4820 // Expression Emission 4821 //===--------------------------------------------------------------------===// 4822 4823 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 4824 ReturnValueSlot ReturnValue) { 4825 // Builtins never have block type. 4826 if (E->getCallee()->getType()->isBlockPointerType()) 4827 return EmitBlockCallExpr(E, ReturnValue); 4828 4829 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) 4830 return EmitCXXMemberCallExpr(CE, ReturnValue); 4831 4832 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) 4833 return EmitCUDAKernelCallExpr(CE, ReturnValue); 4834 4835 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) 4836 if (const CXXMethodDecl *MD = 4837 dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl())) 4838 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 4839 4840 CGCallee callee = EmitCallee(E->getCallee()); 4841 4842 if (callee.isBuiltin()) { 4843 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), 4844 E, ReturnValue); 4845 } 4846 4847 if (callee.isPseudoDestructor()) { 4848 return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr()); 4849 } 4850 4851 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue); 4852 } 4853 4854 /// Emit a CallExpr without considering whether it might be a subclass. 4855 RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, 4856 ReturnValueSlot ReturnValue) { 4857 CGCallee Callee = EmitCallee(E->getCallee()); 4858 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue); 4859 } 4860 4861 static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { 4862 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 4863 4864 if (auto builtinID = FD->getBuiltinID()) { 4865 // Replaceable builtin provide their own implementation of a builtin. Unless 4866 // we are in the builtin implementation itself, don't call the actual 4867 // builtin. If we are in the builtin implementation, avoid trivial infinite 4868 // recursion. 4869 if (!FD->isInlineBuiltinDeclaration() || 4870 CGF.CurFn->getName() == FD->getName()) 4871 return CGCallee::forBuiltin(builtinID, FD); 4872 } 4873 4874 llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, GD); 4875 return CGCallee::forDirect(calleePtr, GD); 4876 } 4877 4878 CGCallee CodeGenFunction::EmitCallee(const Expr *E) { 4879 E = E->IgnoreParens(); 4880 4881 // Look through function-to-pointer decay. 4882 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) { 4883 if (ICE->getCastKind() == CK_FunctionToPointerDecay || 4884 ICE->getCastKind() == CK_BuiltinFnToFnPtr) { 4885 return EmitCallee(ICE->getSubExpr()); 4886 } 4887 4888 // Resolve direct calls. 4889 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) { 4890 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) { 4891 return EmitDirectCallee(*this, FD); 4892 } 4893 } else if (auto ME = dyn_cast<MemberExpr>(E)) { 4894 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) { 4895 EmitIgnoredExpr(ME->getBase()); 4896 return EmitDirectCallee(*this, FD); 4897 } 4898 4899 // Look through template substitutions. 4900 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { 4901 return EmitCallee(NTTP->getReplacement()); 4902 4903 // Treat pseudo-destructor calls differently. 4904 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) { 4905 return CGCallee::forPseudoDestructor(PDE); 4906 } 4907 4908 // Otherwise, we have an indirect reference. 4909 llvm::Value *calleePtr; 4910 QualType functionType; 4911 if (auto ptrType = E->getType()->getAs<PointerType>()) { 4912 calleePtr = EmitScalarExpr(E); 4913 functionType = ptrType->getPointeeType(); 4914 } else { 4915 functionType = E->getType(); 4916 calleePtr = EmitLValue(E).getPointer(*this); 4917 } 4918 assert(functionType->isFunctionType()); 4919 4920 GlobalDecl GD; 4921 if (const auto *VD = 4922 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) 4923 GD = GlobalDecl(VD); 4924 4925 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); 4926 CGCallee callee(calleeInfo, calleePtr); 4927 return callee; 4928 } 4929 4930 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 4931 // Comma expressions just emit their LHS then their RHS as an l-value. 4932 if (E->getOpcode() == BO_Comma) { 4933 EmitIgnoredExpr(E->getLHS()); 4934 EnsureInsertPoint(); 4935 return EmitLValue(E->getRHS()); 4936 } 4937 4938 if (E->getOpcode() == BO_PtrMemD || 4939 E->getOpcode() == BO_PtrMemI) 4940 return EmitPointerToDataMemberBinaryExpr(E); 4941 4942 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 4943 4944 // Note that in all of these cases, __block variables need the RHS 4945 // evaluated first just in case the variable gets moved by the RHS. 4946 4947 switch (getEvaluationKind(E->getType())) { 4948 case TEK_Scalar: { 4949 switch (E->getLHS()->getType().getObjCLifetime()) { 4950 case Qualifiers::OCL_Strong: 4951 return EmitARCStoreStrong(E, /*ignored*/ false).first; 4952 4953 case Qualifiers::OCL_Autoreleasing: 4954 return EmitARCStoreAutoreleasing(E).first; 4955 4956 // No reason to do any of these differently. 4957 case Qualifiers::OCL_None: 4958 case Qualifiers::OCL_ExplicitNone: 4959 case Qualifiers::OCL_Weak: 4960 break; 4961 } 4962 4963 RValue RV = EmitAnyExpr(E->getRHS()); 4964 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 4965 if (RV.isScalar()) 4966 EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc()); 4967 EmitStoreThroughLValue(RV, LV); 4968 if (getLangOpts().OpenMP) 4969 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, 4970 E->getLHS()); 4971 return LV; 4972 } 4973 4974 case TEK_Complex: 4975 return EmitComplexAssignmentLValue(E); 4976 4977 case TEK_Aggregate: 4978 return EmitAggExprToLValue(E); 4979 } 4980 llvm_unreachable("bad evaluation kind"); 4981 } 4982 4983 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 4984 RValue RV = EmitCallExpr(E); 4985 4986 if (!RV.isScalar()) 4987 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 4988 AlignmentSource::Decl); 4989 4990 assert(E->getCallReturnType(getContext())->isReferenceType() && 4991 "Can't have a scalar return unless the return type is a " 4992 "reference type!"); 4993 4994 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 4995 } 4996 4997 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 4998 // FIXME: This shouldn't require another copy. 4999 return EmitAggExprToLValue(E); 5000 } 5001 5002 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 5003 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 5004 && "binding l-value to type which needs a temporary"); 5005 AggValueSlot Slot = CreateAggTemp(E->getType()); 5006 EmitCXXConstructExpr(E, Slot); 5007 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); 5008 } 5009 5010 LValue 5011 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 5012 return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 5013 } 5014 5015 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 5016 return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()), 5017 ConvertType(E->getType())); 5018 } 5019 5020 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 5021 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), 5022 AlignmentSource::Decl); 5023 } 5024 5025 LValue 5026 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 5027 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 5028 Slot.setExternallyDestructed(); 5029 EmitAggExpr(E->getSubExpr(), Slot); 5030 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress()); 5031 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); 5032 } 5033 5034 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 5035 RValue RV = EmitObjCMessageExpr(E); 5036 5037 if (!RV.isScalar()) 5038 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 5039 AlignmentSource::Decl); 5040 5041 assert(E->getMethodDecl()->getReturnType()->isReferenceType() && 5042 "Can't have a scalar return unless the return type is a " 5043 "reference type!"); 5044 5045 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 5046 } 5047 5048 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 5049 Address V = 5050 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector()); 5051 return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); 5052 } 5053 5054 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 5055 const ObjCIvarDecl *Ivar) { 5056 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 5057 } 5058 5059 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 5060 llvm::Value *BaseValue, 5061 const ObjCIvarDecl *Ivar, 5062 unsigned CVRQualifiers) { 5063 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 5064 Ivar, CVRQualifiers); 5065 } 5066 5067 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 5068 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 5069 llvm::Value *BaseValue = nullptr; 5070 const Expr *BaseExpr = E->getBase(); 5071 Qualifiers BaseQuals; 5072 QualType ObjectTy; 5073 if (E->isArrow()) { 5074 BaseValue = EmitScalarExpr(BaseExpr); 5075 ObjectTy = BaseExpr->getType()->getPointeeType(); 5076 BaseQuals = ObjectTy.getQualifiers(); 5077 } else { 5078 LValue BaseLV = EmitLValue(BaseExpr); 5079 BaseValue = BaseLV.getPointer(*this); 5080 ObjectTy = BaseExpr->getType(); 5081 BaseQuals = ObjectTy.getQualifiers(); 5082 } 5083 5084 LValue LV = 5085 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 5086 BaseQuals.getCVRQualifiers()); 5087 setObjCGCLValueClass(getContext(), E, LV); 5088 return LV; 5089 } 5090 5091 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 5092 // Can only get l-value for message expression returning aggregate type 5093 RValue RV = EmitAnyExprToTemp(E); 5094 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 5095 AlignmentSource::Decl); 5096 } 5097 5098 RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, 5099 const CallExpr *E, ReturnValueSlot ReturnValue, 5100 llvm::Value *Chain) { 5101 // Get the actual function type. The callee type will always be a pointer to 5102 // function type or a block pointer type. 5103 assert(CalleeType->isFunctionPointerType() && 5104 "Call must have function pointer type!"); 5105 5106 const Decl *TargetDecl = 5107 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); 5108 5109 CalleeType = getContext().getCanonicalType(CalleeType); 5110 5111 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType(); 5112 5113 CGCallee Callee = OrigCallee; 5114 5115 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) && 5116 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 5117 if (llvm::Constant *PrefixSig = 5118 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { 5119 SanitizerScope SanScope(this); 5120 // Remove any (C++17) exception specifications, to allow calling e.g. a 5121 // noexcept function through a non-noexcept pointer. 5122 auto ProtoTy = 5123 getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None); 5124 llvm::Constant *FTRTTIConst = 5125 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); 5126 llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty}; 5127 llvm::StructType *PrefixStructTy = llvm::StructType::get( 5128 CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true); 5129 5130 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 5131 5132 llvm::Value *CalleePrefixStruct = Builder.CreateBitCast( 5133 CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy)); 5134 llvm::Value *CalleeSigPtr = 5135 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0); 5136 llvm::Value *CalleeSig = 5137 Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign()); 5138 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); 5139 5140 llvm::BasicBlock *Cont = createBasicBlock("cont"); 5141 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); 5142 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); 5143 5144 EmitBlock(TypeCheck); 5145 llvm::Value *CalleeRTTIPtr = 5146 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1); 5147 llvm::Value *CalleeRTTIEncoded = 5148 Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign()); 5149 llvm::Value *CalleeRTTI = 5150 DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded); 5151 llvm::Value *CalleeRTTIMatch = 5152 Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst); 5153 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()), 5154 EmitCheckTypeDescriptor(CalleeType)}; 5155 EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function), 5156 SanitizerHandler::FunctionTypeMismatch, StaticData, 5157 {CalleePtr, CalleeRTTI, FTRTTIConst}); 5158 5159 Builder.CreateBr(Cont); 5160 EmitBlock(Cont); 5161 } 5162 } 5163 5164 const auto *FnType = cast<FunctionType>(PointeeType); 5165 5166 // If we are checking indirect calls and this call is indirect, check that the 5167 // function pointer is a member of the bit set for the function type. 5168 if (SanOpts.has(SanitizerKind::CFIICall) && 5169 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 5170 SanitizerScope SanScope(this); 5171 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall); 5172 5173 llvm::Metadata *MD; 5174 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) 5175 MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0)); 5176 else 5177 MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0)); 5178 5179 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD); 5180 5181 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 5182 llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy); 5183 llvm::Value *TypeTest = Builder.CreateCall( 5184 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId}); 5185 5186 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); 5187 llvm::Constant *StaticData[] = { 5188 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall), 5189 EmitCheckSourceLocation(E->getBeginLoc()), 5190 EmitCheckTypeDescriptor(QualType(FnType, 0)), 5191 }; 5192 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { 5193 EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId, 5194 CastedCallee, StaticData); 5195 } else { 5196 EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall), 5197 SanitizerHandler::CFICheckFail, StaticData, 5198 {CastedCallee, llvm::UndefValue::get(IntPtrTy)}); 5199 } 5200 } 5201 5202 CallArgList Args; 5203 if (Chain) 5204 Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)), 5205 CGM.getContext().VoidPtrTy); 5206 5207 // C++17 requires that we evaluate arguments to a call using assignment syntax 5208 // right-to-left, and that we evaluate arguments to certain other operators 5209 // left-to-right. Note that we allow this to override the order dictated by 5210 // the calling convention on the MS ABI, which means that parameter 5211 // destruction order is not necessarily reverse construction order. 5212 // FIXME: Revisit this based on C++ committee response to unimplementability. 5213 EvaluationOrder Order = EvaluationOrder::Default; 5214 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) { 5215 if (OCE->isAssignmentOp()) 5216 Order = EvaluationOrder::ForceRightToLeft; 5217 else { 5218 switch (OCE->getOperator()) { 5219 case OO_LessLess: 5220 case OO_GreaterGreater: 5221 case OO_AmpAmp: 5222 case OO_PipePipe: 5223 case OO_Comma: 5224 case OO_ArrowStar: 5225 Order = EvaluationOrder::ForceLeftToRight; 5226 break; 5227 default: 5228 break; 5229 } 5230 } 5231 } 5232 5233 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(), 5234 E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); 5235 5236 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( 5237 Args, FnType, /*ChainCall=*/Chain); 5238 5239 // C99 6.5.2.2p6: 5240 // If the expression that denotes the called function has a type 5241 // that does not include a prototype, [the default argument 5242 // promotions are performed]. If the number of arguments does not 5243 // equal the number of parameters, the behavior is undefined. If 5244 // the function is defined with a type that includes a prototype, 5245 // and either the prototype ends with an ellipsis (, ...) or the 5246 // types of the arguments after promotion are not compatible with 5247 // the types of the parameters, the behavior is undefined. If the 5248 // function is defined with a type that does not include a 5249 // prototype, and the types of the arguments after promotion are 5250 // not compatible with those of the parameters after promotion, 5251 // the behavior is undefined [except in some trivial cases]. 5252 // That is, in the general case, we should assume that a call 5253 // through an unprototyped function type works like a *non-variadic* 5254 // call. The way we make this work is to cast to the exact type 5255 // of the promoted arguments. 5256 // 5257 // Chain calls use this same code path to add the invisible chain parameter 5258 // to the function type. 5259 if (isa<FunctionNoProtoType>(FnType) || Chain) { 5260 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); 5261 int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace(); 5262 CalleeTy = CalleeTy->getPointerTo(AS); 5263 5264 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 5265 CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); 5266 Callee.setFunctionPointer(CalleePtr); 5267 } 5268 5269 llvm::CallBase *CallOrInvoke = nullptr; 5270 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke, 5271 E->getExprLoc()); 5272 5273 // Generate function declaration DISuprogram in order to be used 5274 // in debug info about call sites. 5275 if (CGDebugInfo *DI = getDebugInfo()) { 5276 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 5277 DI->EmitFuncDeclForCallSite(CallOrInvoke, QualType(FnType, 0), 5278 CalleeDecl); 5279 } 5280 5281 return Call; 5282 } 5283 5284 LValue CodeGenFunction:: 5285 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 5286 Address BaseAddr = Address::invalid(); 5287 if (E->getOpcode() == BO_PtrMemI) { 5288 BaseAddr = EmitPointerWithAlignment(E->getLHS()); 5289 } else { 5290 BaseAddr = EmitLValue(E->getLHS()).getAddress(*this); 5291 } 5292 5293 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 5294 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>(); 5295 5296 LValueBaseInfo BaseInfo; 5297 TBAAAccessInfo TBAAInfo; 5298 Address MemberAddr = 5299 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo, 5300 &TBAAInfo); 5301 5302 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo); 5303 } 5304 5305 /// Given the address of a temporary variable, produce an r-value of 5306 /// its type. 5307 RValue CodeGenFunction::convertTempToRValue(Address addr, 5308 QualType type, 5309 SourceLocation loc) { 5310 LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl); 5311 switch (getEvaluationKind(type)) { 5312 case TEK_Complex: 5313 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); 5314 case TEK_Aggregate: 5315 return lvalue.asAggregateRValue(*this); 5316 case TEK_Scalar: 5317 return RValue::get(EmitLoadOfScalar(lvalue, loc)); 5318 } 5319 llvm_unreachable("bad evaluation kind"); 5320 } 5321 5322 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 5323 assert(Val->getType()->isFPOrFPVectorTy()); 5324 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 5325 return; 5326 5327 llvm::MDBuilder MDHelper(getLLVMContext()); 5328 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 5329 5330 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 5331 } 5332 5333 namespace { 5334 struct LValueOrRValue { 5335 LValue LV; 5336 RValue RV; 5337 }; 5338 } 5339 5340 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 5341 const PseudoObjectExpr *E, 5342 bool forLValue, 5343 AggValueSlot slot) { 5344 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 5345 5346 // Find the result expression, if any. 5347 const Expr *resultExpr = E->getResultExpr(); 5348 LValueOrRValue result; 5349 5350 for (PseudoObjectExpr::const_semantics_iterator 5351 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 5352 const Expr *semantic = *i; 5353 5354 // If this semantic expression is an opaque value, bind it 5355 // to the result of its source expression. 5356 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 5357 // Skip unique OVEs. 5358 if (ov->isUnique()) { 5359 assert(ov != resultExpr && 5360 "A unique OVE cannot be used as the result expression"); 5361 continue; 5362 } 5363 5364 // If this is the result expression, we may need to evaluate 5365 // directly into the slot. 5366 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 5367 OVMA opaqueData; 5368 if (ov == resultExpr && ov->isRValue() && !forLValue && 5369 CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { 5370 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 5371 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(), 5372 AlignmentSource::Decl); 5373 opaqueData = OVMA::bind(CGF, ov, LV); 5374 result.RV = slot.asRValue(); 5375 5376 // Otherwise, emit as normal. 5377 } else { 5378 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 5379 5380 // If this is the result, also evaluate the result now. 5381 if (ov == resultExpr) { 5382 if (forLValue) 5383 result.LV = CGF.EmitLValue(ov); 5384 else 5385 result.RV = CGF.EmitAnyExpr(ov, slot); 5386 } 5387 } 5388 5389 opaques.push_back(opaqueData); 5390 5391 // Otherwise, if the expression is the result, evaluate it 5392 // and remember the result. 5393 } else if (semantic == resultExpr) { 5394 if (forLValue) 5395 result.LV = CGF.EmitLValue(semantic); 5396 else 5397 result.RV = CGF.EmitAnyExpr(semantic, slot); 5398 5399 // Otherwise, evaluate the expression in an ignored context. 5400 } else { 5401 CGF.EmitIgnoredExpr(semantic); 5402 } 5403 } 5404 5405 // Unbind all the opaques now. 5406 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 5407 opaques[i].unbind(CGF); 5408 5409 return result; 5410 } 5411 5412 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 5413 AggValueSlot slot) { 5414 return emitPseudoObjectExpr(*this, E, false, slot).RV; 5415 } 5416 5417 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 5418 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 5419 } 5420