1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ABIInfoImpl.h" 14 #include "CGCUDARuntime.h" 15 #include "CGCXXABI.h" 16 #include "CGCall.h" 17 #include "CGCleanup.h" 18 #include "CGDebugInfo.h" 19 #include "CGObjCRuntime.h" 20 #include "CGOpenMPRuntime.h" 21 #include "CGRecordLayout.h" 22 #include "CodeGenFunction.h" 23 #include "CodeGenModule.h" 24 #include "CodeGenPGO.h" 25 #include "ConstantEmitter.h" 26 #include "TargetInfo.h" 27 #include "clang/AST/ASTContext.h" 28 #include "clang/AST/ASTLambda.h" 29 #include "clang/AST/Attr.h" 30 #include "clang/AST/DeclObjC.h" 31 #include "clang/AST/NSAPI.h" 32 #include "clang/AST/StmtVisitor.h" 33 #include "clang/Basic/Builtins.h" 34 #include "clang/Basic/CodeGenOptions.h" 35 #include "clang/Basic/Module.h" 36 #include "clang/Basic/SourceManager.h" 37 #include "llvm/ADT/STLExtras.h" 38 #include "llvm/ADT/ScopeExit.h" 39 #include "llvm/ADT/StringExtras.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/Intrinsics.h" 42 #include "llvm/IR/LLVMContext.h" 43 #include "llvm/IR/MDBuilder.h" 44 #include "llvm/IR/MatrixBuilder.h" 45 #include "llvm/Support/ConvertUTF.h" 46 #include "llvm/Support/Endian.h" 47 #include "llvm/Support/MathExtras.h" 48 #include "llvm/Support/Path.h" 49 #include "llvm/Support/xxhash.h" 50 #include "llvm/Transforms/Utils/SanitizerStats.h" 51 52 #include <numeric> 53 #include <optional> 54 #include <string> 55 56 using namespace clang; 57 using namespace CodeGen; 58 59 namespace clang { 60 // TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed 61 // by -fsanitize-skip-hot-cutoff 62 llvm::cl::opt<bool> ClSanitizeGuardChecks( 63 "ubsan-guard-checks", llvm::cl::Optional, 64 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`.")); 65 66 } // namespace clang 67 68 //===--------------------------------------------------------------------===// 69 // Defines for metadata 70 //===--------------------------------------------------------------------===// 71 72 // Those values are crucial to be the SAME as in ubsan runtime library. 73 enum VariableTypeDescriptorKind : uint16_t { 74 /// An integer type. 75 TK_Integer = 0x0000, 76 /// A floating-point type. 77 TK_Float = 0x0001, 78 /// An _BitInt(N) type. 79 TK_BitInt = 0x0002, 80 /// Any other type. The value representation is unspecified. 81 TK_Unknown = 0xffff 82 }; 83 84 //===--------------------------------------------------------------------===// 85 // Miscellaneous Helper Methods 86 //===--------------------------------------------------------------------===// 87 88 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 89 /// block. 90 RawAddress 91 CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align, 92 const Twine &Name, 93 llvm::Value *ArraySize) { 94 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); 95 Alloca->setAlignment(Align.getAsAlign()); 96 return RawAddress(Alloca, Ty, Align, KnownNonNull); 97 } 98 99 RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, LangAS DestLangAS, 100 CharUnits Align, const Twine &Name, 101 llvm::Value *ArraySize, 102 RawAddress *AllocaAddr) { 103 RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); 104 if (AllocaAddr) 105 *AllocaAddr = Alloca; 106 llvm::Value *V = Alloca.getPointer(); 107 // Alloca always returns a pointer in alloca address space, which may 108 // be different from the type defined by the language. For example, 109 // in C++ the auto variables are in the default address space. Therefore 110 // cast alloca to the default address space when necessary. 111 112 unsigned DestAddrSpace = getContext().getTargetAddressSpace(DestLangAS); 113 if (DestAddrSpace != Alloca.getAddressSpace()) { 114 llvm::IRBuilderBase::InsertPointGuard IPG(Builder); 115 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, 116 // otherwise alloca is inserted at the current insertion point of the 117 // builder. 118 if (!ArraySize) 119 Builder.SetInsertPoint(getPostAllocaInsertPoint()); 120 V = getTargetHooks().performAddrSpaceCast( 121 *this, V, getASTAllocaAddressSpace(), Builder.getPtrTy(DestAddrSpace), 122 /*IsNonNull=*/true); 123 } 124 125 return RawAddress(V, Ty, Align, KnownNonNull); 126 } 127 128 /// CreateTempAlloca - This creates an alloca and inserts it into the entry 129 /// block if \p ArraySize is nullptr, otherwise inserts it at the current 130 /// insertion point of the builder. 131 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 132 const Twine &Name, 133 llvm::Value *ArraySize) { 134 llvm::AllocaInst *Alloca; 135 if (ArraySize) 136 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name); 137 else 138 Alloca = 139 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), 140 ArraySize, Name, AllocaInsertPt->getIterator()); 141 if (SanOpts.Mask & SanitizerKind::Address) { 142 Alloca->addAnnotationMetadata({"alloca_name_altered", Name.str()}); 143 } 144 if (Allocas) { 145 Allocas->Add(Alloca); 146 } 147 return Alloca; 148 } 149 150 /// CreateDefaultAlignTempAlloca - This creates an alloca with the 151 /// default alignment of the corresponding LLVM type, which is *not* 152 /// guaranteed to be related in any way to the expected alignment of 153 /// an AST type that might have been lowered to Ty. 154 RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, 155 const Twine &Name) { 156 CharUnits Align = 157 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty)); 158 return CreateTempAlloca(Ty, Align, Name); 159 } 160 161 RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { 162 CharUnits Align = getContext().getTypeAlignInChars(Ty); 163 return CreateTempAlloca(ConvertType(Ty), Align, Name); 164 } 165 166 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, 167 RawAddress *Alloca) { 168 // FIXME: Should we prefer the preferred type alignment here? 169 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca); 170 } 171 172 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, 173 const Twine &Name, 174 RawAddress *Alloca) { 175 RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, 176 /*ArraySize=*/nullptr, Alloca); 177 178 if (Ty->isConstantMatrixType()) { 179 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType()); 180 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), 181 ArrayTy->getNumElements()); 182 183 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(), 184 KnownNonNull); 185 } 186 return Result; 187 } 188 189 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, 190 CharUnits Align, 191 const Twine &Name) { 192 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name); 193 } 194 195 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, 196 const Twine &Name) { 197 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty), 198 Name); 199 } 200 201 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 202 /// expression and compare the result against zero, returning an Int1Ty value. 203 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 204 PGO->setCurrentStmt(E); 205 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 206 llvm::Value *MemPtr = EmitScalarExpr(E); 207 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 208 } 209 210 QualType BoolTy = getContext().BoolTy; 211 SourceLocation Loc = E->getExprLoc(); 212 CGFPOptionsRAII FPOptsRAII(*this, E); 213 if (!E->getType()->isAnyComplexType()) 214 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc); 215 216 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy, 217 Loc); 218 } 219 220 /// EmitIgnoredExpr - Emit code to compute the specified expression, 221 /// ignoring the result. 222 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 223 if (E->isPRValue()) 224 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true); 225 226 // if this is a bitfield-resulting conditional operator, we can special case 227 // emit this. The normal 'EmitLValue' version of this is particularly 228 // difficult to codegen for, since creating a single "LValue" for two 229 // different sized arguments here is not particularly doable. 230 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>( 231 E->IgnoreParenNoopCasts(getContext()))) { 232 if (CondOp->getObjectKind() == OK_BitField) 233 return EmitIgnoredConditionalOperator(CondOp); 234 } 235 236 // Just emit it as an l-value and drop the result. 237 EmitLValue(E); 238 } 239 240 /// EmitAnyExpr - Emit code to compute the specified expression which 241 /// can have any type. The result is returned as an RValue struct. 242 /// If this is an aggregate expression, AggSlot indicates where the 243 /// result should be returned. 244 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 245 AggValueSlot aggSlot, 246 bool ignoreResult) { 247 switch (getEvaluationKind(E->getType())) { 248 case TEK_Scalar: 249 return RValue::get(EmitScalarExpr(E, ignoreResult)); 250 case TEK_Complex: 251 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 252 case TEK_Aggregate: 253 if (!ignoreResult && aggSlot.isIgnored()) 254 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 255 EmitAggExpr(E, aggSlot); 256 return aggSlot.asRValue(); 257 } 258 llvm_unreachable("bad evaluation kind"); 259 } 260 261 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will 262 /// always be accessible even if no aggregate location is provided. 263 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 264 AggValueSlot AggSlot = AggValueSlot::ignored(); 265 266 if (hasAggregateEvaluationKind(E->getType())) 267 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 268 return EmitAnyExpr(E, AggSlot); 269 } 270 271 /// EmitAnyExprToMem - Evaluate an expression into a given memory 272 /// location. 273 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 274 Address Location, 275 Qualifiers Quals, 276 bool IsInit) { 277 // FIXME: This function should take an LValue as an argument. 278 switch (getEvaluationKind(E->getType())) { 279 case TEK_Complex: 280 EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()), 281 /*isInit*/ false); 282 return; 283 284 case TEK_Aggregate: { 285 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, 286 AggValueSlot::IsDestructed_t(IsInit), 287 AggValueSlot::DoesNotNeedGCBarriers, 288 AggValueSlot::IsAliased_t(!IsInit), 289 AggValueSlot::MayOverlap)); 290 return; 291 } 292 293 case TEK_Scalar: { 294 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 295 LValue LV = MakeAddrLValue(Location, E->getType()); 296 EmitStoreThroughLValue(RV, LV); 297 return; 298 } 299 } 300 llvm_unreachable("bad evaluation kind"); 301 } 302 303 void CodeGenFunction::EmitInitializationToLValue( 304 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) { 305 QualType Type = LV.getType(); 306 switch (getEvaluationKind(Type)) { 307 case TEK_Complex: 308 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); 309 return; 310 case TEK_Aggregate: 311 EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, 312 AggValueSlot::DoesNotNeedGCBarriers, 313 AggValueSlot::IsNotAliased, 314 AggValueSlot::MayOverlap, IsZeroed)); 315 return; 316 case TEK_Scalar: 317 if (LV.isSimple()) 318 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); 319 else 320 EmitStoreThroughLValue(RValue::get(EmitScalarExpr(E)), LV); 321 return; 322 } 323 llvm_unreachable("bad evaluation kind"); 324 } 325 326 static void 327 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, 328 const Expr *E, Address ReferenceTemporary) { 329 // Objective-C++ ARC: 330 // If we are binding a reference to a temporary that has ownership, we 331 // need to perform retain/release operations on the temporary. 332 // 333 // FIXME: This should be looking at E, not M. 334 if (auto Lifetime = M->getType().getObjCLifetime()) { 335 switch (Lifetime) { 336 case Qualifiers::OCL_None: 337 case Qualifiers::OCL_ExplicitNone: 338 // Carry on to normal cleanup handling. 339 break; 340 341 case Qualifiers::OCL_Autoreleasing: 342 // Nothing to do; cleaned up by an autorelease pool. 343 return; 344 345 case Qualifiers::OCL_Strong: 346 case Qualifiers::OCL_Weak: 347 switch (StorageDuration Duration = M->getStorageDuration()) { 348 case SD_Static: 349 // Note: we intentionally do not register a cleanup to release 350 // the object on program termination. 351 return; 352 353 case SD_Thread: 354 // FIXME: We should probably register a cleanup in this case. 355 return; 356 357 case SD_Automatic: 358 case SD_FullExpression: 359 CodeGenFunction::Destroyer *Destroy; 360 CleanupKind CleanupKind; 361 if (Lifetime == Qualifiers::OCL_Strong) { 362 const ValueDecl *VD = M->getExtendingDecl(); 363 bool Precise = isa_and_nonnull<VarDecl>(VD) && 364 VD->hasAttr<ObjCPreciseLifetimeAttr>(); 365 CleanupKind = CGF.getARCCleanupKind(); 366 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise 367 : &CodeGenFunction::destroyARCStrongImprecise; 368 } else { 369 // __weak objects always get EH cleanups; otherwise, exceptions 370 // could cause really nasty crashes instead of mere leaks. 371 CleanupKind = NormalAndEHCleanup; 372 Destroy = &CodeGenFunction::destroyARCWeak; 373 } 374 if (Duration == SD_FullExpression) 375 CGF.pushDestroy(CleanupKind, ReferenceTemporary, 376 M->getType(), *Destroy, 377 CleanupKind & EHCleanup); 378 else 379 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, 380 M->getType(), 381 *Destroy, CleanupKind & EHCleanup); 382 return; 383 384 case SD_Dynamic: 385 llvm_unreachable("temporary cannot have dynamic storage duration"); 386 } 387 llvm_unreachable("unknown storage duration"); 388 } 389 } 390 391 QualType::DestructionKind DK = E->getType().isDestructedType(); 392 if (DK != QualType::DK_none) { 393 switch (M->getStorageDuration()) { 394 case SD_Static: 395 case SD_Thread: { 396 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; 397 if (const RecordType *RT = 398 E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { 399 // Get the destructor for the reference temporary. 400 if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()); 401 ClassDecl && !ClassDecl->hasTrivialDestructor()) 402 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 403 } 404 405 if (!ReferenceTemporaryDtor) 406 return; 407 408 llvm::FunctionCallee CleanupFn; 409 llvm::Constant *CleanupArg; 410 if (E->getType()->isArrayType()) { 411 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( 412 ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject, 413 CGF.getLangOpts().Exceptions, 414 dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); 415 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); 416 } else { 417 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( 418 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); 419 CleanupArg = 420 cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF)); 421 } 422 CGF.CGM.getCXXABI().registerGlobalDtor( 423 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); 424 } break; 425 case SD_FullExpression: 426 CGF.pushDestroy(DK, ReferenceTemporary, E->getType()); 427 break; 428 case SD_Automatic: 429 CGF.pushLifetimeExtendedDestroy(DK, ReferenceTemporary, E->getType()); 430 break; 431 case SD_Dynamic: 432 llvm_unreachable("temporary cannot have dynamic storage duration"); 433 } 434 } 435 } 436 437 static RawAddress createReferenceTemporary(CodeGenFunction &CGF, 438 const MaterializeTemporaryExpr *M, 439 const Expr *Inner, 440 RawAddress *Alloca = nullptr) { 441 auto &TCG = CGF.getTargetHooks(); 442 switch (M->getStorageDuration()) { 443 case SD_FullExpression: 444 case SD_Automatic: { 445 // If we have a constant temporary array or record try to promote it into a 446 // constant global under the same rules a normal constant would've been 447 // promoted. This is easier on the optimizer and generally emits fewer 448 // instructions. 449 QualType Ty = Inner->getType(); 450 if (CGF.CGM.getCodeGenOpts().MergeAllConstants && 451 (Ty->isArrayType() || Ty->isRecordType()) && 452 Ty.isConstantStorage(CGF.getContext(), true, false)) 453 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) { 454 auto AS = CGF.CGM.GetGlobalConstantAddressSpace(); 455 auto *GV = new llvm::GlobalVariable( 456 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, 457 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr, 458 llvm::GlobalValue::NotThreadLocal, 459 CGF.getContext().getTargetAddressSpace(AS)); 460 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty); 461 GV->setAlignment(alignment.getAsAlign()); 462 llvm::Constant *C = GV; 463 if (AS != LangAS::Default) 464 C = TCG.performAddrSpaceCast( 465 CGF.CGM, GV, AS, 466 llvm::PointerType::get( 467 CGF.getLLVMContext(), 468 CGF.getContext().getTargetAddressSpace(LangAS::Default))); 469 // FIXME: Should we put the new global into a COMDAT? 470 return RawAddress(C, GV->getValueType(), alignment); 471 } 472 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca); 473 } 474 case SD_Thread: 475 case SD_Static: 476 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); 477 478 case SD_Dynamic: 479 llvm_unreachable("temporary can't have dynamic storage duration"); 480 } 481 llvm_unreachable("unknown storage duration"); 482 } 483 484 /// Helper method to check if the underlying ABI is AAPCS 485 static bool isAAPCS(const TargetInfo &TargetInfo) { 486 return TargetInfo.getABI().starts_with("aapcs"); 487 } 488 489 LValue CodeGenFunction:: 490 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { 491 const Expr *E = M->getSubExpr(); 492 493 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || 494 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && 495 "Reference should never be pseudo-strong!"); 496 497 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so 498 // as that will cause the lifetime adjustment to be lost for ARC 499 auto ownership = M->getType().getObjCLifetime(); 500 if (ownership != Qualifiers::OCL_None && 501 ownership != Qualifiers::OCL_ExplicitNone) { 502 RawAddress Object = createReferenceTemporary(*this, M, E); 503 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) { 504 llvm::Type *Ty = ConvertTypeForMem(E->getType()); 505 Object = Object.withElementType(Ty); 506 507 // createReferenceTemporary will promote the temporary to a global with a 508 // constant initializer if it can. It can only do this to a value of 509 // ARC-manageable type if the value is global and therefore "immune" to 510 // ref-counting operations. Therefore we have no need to emit either a 511 // dynamic initialization or a cleanup and we can just return the address 512 // of the temporary. 513 if (Var->hasInitializer()) 514 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); 515 516 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 517 } 518 LValue RefTempDst = MakeAddrLValue(Object, M->getType(), 519 AlignmentSource::Decl); 520 521 switch (getEvaluationKind(E->getType())) { 522 default: llvm_unreachable("expected scalar or aggregate expression"); 523 case TEK_Scalar: 524 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); 525 break; 526 case TEK_Aggregate: { 527 EmitAggExpr(E, AggValueSlot::forAddr(Object, 528 E->getType().getQualifiers(), 529 AggValueSlot::IsDestructed, 530 AggValueSlot::DoesNotNeedGCBarriers, 531 AggValueSlot::IsNotAliased, 532 AggValueSlot::DoesNotOverlap)); 533 break; 534 } 535 } 536 537 pushTemporaryCleanup(*this, M, E, Object); 538 return RefTempDst; 539 } 540 541 SmallVector<const Expr *, 2> CommaLHSs; 542 SmallVector<SubobjectAdjustment, 2> Adjustments; 543 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); 544 545 for (const auto &Ignored : CommaLHSs) 546 EmitIgnoredExpr(Ignored); 547 548 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { 549 if (opaque->getType()->isRecordType()) { 550 assert(Adjustments.empty()); 551 return EmitOpaqueValueLValue(opaque); 552 } 553 } 554 555 // Create and initialize the reference temporary. 556 RawAddress Alloca = Address::invalid(); 557 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca); 558 if (auto *Var = dyn_cast<llvm::GlobalVariable>( 559 Object.getPointer()->stripPointerCasts())) { 560 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType()); 561 Object = Object.withElementType(TemporaryType); 562 // If the temporary is a global and has a constant initializer or is a 563 // constant temporary that we promoted to a global, we may have already 564 // initialized it. 565 if (!Var->hasInitializer()) { 566 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 567 QualType RefType = M->getType().withoutLocalFastQualifiers(); 568 if (RefType.getPointerAuth()) { 569 // Use the qualifier of the reference temporary to sign the pointer. 570 LValue LV = MakeRawAddrLValue(Object.getPointer(), RefType, 571 Object.getAlignment()); 572 EmitScalarInit(E, M->getExtendingDecl(), LV, false); 573 } else { 574 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true); 575 } 576 } 577 } else { 578 switch (M->getStorageDuration()) { 579 case SD_Automatic: 580 if (auto *Size = EmitLifetimeStart( 581 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), 582 Alloca.getPointer())) { 583 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker, 584 Alloca, Size); 585 } 586 break; 587 588 case SD_FullExpression: { 589 if (!ShouldEmitLifetimeMarkers) 590 break; 591 592 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end 593 // marker. Instead, start the lifetime of a conditional temporary earlier 594 // so that it's unconditional. Don't do this with sanitizers which need 595 // more precise lifetime marks. However when inside an "await.suspend" 596 // block, we should always avoid conditional cleanup because it creates 597 // boolean marker that lives across await_suspend, which can destroy coro 598 // frame. 599 ConditionalEvaluation *OldConditional = nullptr; 600 CGBuilderTy::InsertPoint OldIP; 601 if (isInConditionalBranch() && !E->getType().isDestructedType() && 602 ((!SanOpts.has(SanitizerKind::HWAddress) && 603 !SanOpts.has(SanitizerKind::Memory) && 604 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) || 605 inSuspendBlock())) { 606 OldConditional = OutermostConditional; 607 OutermostConditional = nullptr; 608 609 OldIP = Builder.saveIP(); 610 llvm::BasicBlock *Block = OldConditional->getStartingBlock(); 611 Builder.restoreIP(CGBuilderTy::InsertPoint( 612 Block, llvm::BasicBlock::iterator(Block->back()))); 613 } 614 615 if (auto *Size = EmitLifetimeStart( 616 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), 617 Alloca.getPointer())) { 618 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca, 619 Size); 620 } 621 622 if (OldConditional) { 623 OutermostConditional = OldConditional; 624 Builder.restoreIP(OldIP); 625 } 626 break; 627 } 628 629 default: 630 break; 631 } 632 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 633 } 634 pushTemporaryCleanup(*this, M, E, Object); 635 636 // Perform derived-to-base casts and/or field accesses, to get from the 637 // temporary object we created (and, potentially, for which we extended 638 // the lifetime) to the subobject we're binding the reference to. 639 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) { 640 switch (Adjustment.Kind) { 641 case SubobjectAdjustment::DerivedToBaseAdjustment: 642 Object = 643 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, 644 Adjustment.DerivedToBase.BasePath->path_begin(), 645 Adjustment.DerivedToBase.BasePath->path_end(), 646 /*NullCheckValue=*/ false, E->getExprLoc()); 647 break; 648 649 case SubobjectAdjustment::FieldAdjustment: { 650 LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl); 651 LV = EmitLValueForField(LV, Adjustment.Field); 652 assert(LV.isSimple() && 653 "materialized temporary field is not a simple lvalue"); 654 Object = LV.getAddress(); 655 break; 656 } 657 658 case SubobjectAdjustment::MemberPointerAdjustment: { 659 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); 660 Object = EmitCXXMemberDataPointerAddress( 661 E, Object, Ptr, Adjustment.Ptr.MPT, /*IsInBounds=*/true); 662 break; 663 } 664 } 665 } 666 667 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); 668 } 669 670 RValue 671 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { 672 // Emit the expression as an lvalue. 673 LValue LV = EmitLValue(E); 674 assert(LV.isSimple()); 675 llvm::Value *Value = LV.getPointer(*this); 676 677 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { 678 // C++11 [dcl.ref]p5 (as amended by core issue 453): 679 // If a glvalue to which a reference is directly bound designates neither 680 // an existing object or function of an appropriate type nor a region of 681 // storage of suitable size and alignment to contain an object of the 682 // reference's type, the behavior is undefined. 683 QualType Ty = E->getType(); 684 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 685 } 686 687 return RValue::get(Value); 688 } 689 690 691 /// getAccessedFieldNo - Given an encoded value and a result number, return the 692 /// input field number being accessed. 693 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 694 const llvm::Constant *Elts) { 695 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 696 ->getZExtValue(); 697 } 698 699 static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, 700 llvm::Value *Ptr) { 701 llvm::Value *A0 = 702 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u)); 703 llvm::Value *A1 = 704 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31))); 705 return Builder.CreateXor(Acc, A1); 706 } 707 708 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { 709 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || 710 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; 711 } 712 713 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { 714 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 715 return (RD && RD->hasDefinition() && RD->isDynamicClass()) && 716 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || 717 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || 718 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); 719 } 720 721 bool CodeGenFunction::sanitizePerformTypeCheck() const { 722 return SanOpts.has(SanitizerKind::Null) || 723 SanOpts.has(SanitizerKind::Alignment) || 724 SanOpts.has(SanitizerKind::ObjectSize) || 725 SanOpts.has(SanitizerKind::Vptr); 726 } 727 728 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 729 llvm::Value *Ptr, QualType Ty, 730 CharUnits Alignment, 731 SanitizerSet SkippedChecks, 732 llvm::Value *ArraySize) { 733 if (!sanitizePerformTypeCheck()) 734 return; 735 736 // Don't check pointers outside the default address space. The null check 737 // isn't correct, the object-size check isn't supported by LLVM, and we can't 738 // communicate the addresses to the runtime handler for the vptr check. 739 if (Ptr->getType()->getPointerAddressSpace()) 740 return; 741 742 // Don't check pointers to volatile data. The behavior here is implementation- 743 // defined. 744 if (Ty.isVolatileQualified()) 745 return; 746 747 // Quickly determine whether we have a pointer to an alloca. It's possible 748 // to skip null checks, and some alignment checks, for these pointers. This 749 // can reduce compile-time significantly. 750 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts()); 751 752 llvm::Value *IsNonNull = nullptr; 753 bool IsGuaranteedNonNull = 754 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca; 755 756 llvm::BasicBlock *Done = nullptr; 757 bool DoneViaNullSanitize = false; 758 759 { 760 auto CheckHandler = SanitizerHandler::TypeMismatch; 761 SanitizerDebugLocation SanScope(this, 762 {SanitizerKind::SO_Null, 763 SanitizerKind::SO_ObjectSize, 764 SanitizerKind::SO_Alignment}, 765 CheckHandler); 766 767 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 3> 768 Checks; 769 770 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext()); 771 bool AllowNullPointers = isNullPointerAllowed(TCK); 772 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && 773 !IsGuaranteedNonNull) { 774 // The glvalue must not be an empty glvalue. 775 IsNonNull = Builder.CreateIsNotNull(Ptr); 776 777 // The IR builder can constant-fold the null check if the pointer points 778 // to a constant. 779 IsGuaranteedNonNull = IsNonNull == True; 780 781 // Skip the null check if the pointer is known to be non-null. 782 if (!IsGuaranteedNonNull) { 783 if (AllowNullPointers) { 784 // When performing pointer casts, it's OK if the value is null. 785 // Skip the remaining checks in that case. 786 Done = createBasicBlock("null"); 787 DoneViaNullSanitize = true; 788 llvm::BasicBlock *Rest = createBasicBlock("not.null"); 789 Builder.CreateCondBr(IsNonNull, Rest, Done); 790 EmitBlock(Rest); 791 } else { 792 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null)); 793 } 794 } 795 } 796 797 if (SanOpts.has(SanitizerKind::ObjectSize) && 798 !SkippedChecks.has(SanitizerKind::ObjectSize) && 799 !Ty->isIncompleteType()) { 800 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity(); 801 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize); 802 if (ArraySize) 803 Size = Builder.CreateMul(Size, ArraySize); 804 805 // Degenerate case: new X[0] does not need an objectsize check. 806 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size); 807 if (!ConstantSize || !ConstantSize->isNullValue()) { 808 // The glvalue must refer to a large enough storage region. 809 // FIXME: If Address Sanitizer is enabled, insert dynamic 810 // instrumentation 811 // to check this. 812 // FIXME: Get object address space 813 llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy}; 814 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); 815 llvm::Value *Min = Builder.getFalse(); 816 llvm::Value *NullIsUnknown = Builder.getFalse(); 817 llvm::Value *Dynamic = Builder.getFalse(); 818 llvm::Value *LargeEnough = Builder.CreateICmpUGE( 819 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size); 820 Checks.push_back( 821 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize)); 822 } 823 } 824 825 llvm::MaybeAlign AlignVal; 826 llvm::Value *PtrAsInt = nullptr; 827 828 if (SanOpts.has(SanitizerKind::Alignment) && 829 !SkippedChecks.has(SanitizerKind::Alignment)) { 830 AlignVal = Alignment.getAsMaybeAlign(); 831 if (!Ty->isIncompleteType() && !AlignVal) 832 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr, 833 /*ForPointeeType=*/true) 834 .getAsMaybeAlign(); 835 836 // The glvalue must be suitably aligned. 837 if (AlignVal && *AlignVal > llvm::Align(1) && 838 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) { 839 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy); 840 llvm::Value *Align = Builder.CreateAnd( 841 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1)); 842 llvm::Value *Aligned = 843 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 844 if (Aligned != True) 845 Checks.push_back( 846 std::make_pair(Aligned, SanitizerKind::SO_Alignment)); 847 } 848 } 849 850 if (Checks.size() > 0) { 851 llvm::Constant *StaticData[] = { 852 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), 853 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1), 854 llvm::ConstantInt::get(Int8Ty, TCK)}; 855 EmitCheck(Checks, CheckHandler, StaticData, PtrAsInt ? PtrAsInt : Ptr); 856 } 857 } 858 859 // If possible, check that the vptr indicates that there is a subobject of 860 // type Ty at offset zero within this object. 861 // 862 // C++11 [basic.life]p5,6: 863 // [For storage which does not refer to an object within its lifetime] 864 // The program has undefined behavior if: 865 // -- the [pointer or glvalue] is used to access a non-static data member 866 // or call a non-static member function 867 if (SanOpts.has(SanitizerKind::Vptr) && 868 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { 869 SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr}, 870 SanitizerHandler::DynamicTypeCacheMiss); 871 872 // Ensure that the pointer is non-null before loading it. If there is no 873 // compile-time guarantee, reuse the run-time null check or emit a new one. 874 if (!IsGuaranteedNonNull) { 875 if (!IsNonNull) 876 IsNonNull = Builder.CreateIsNotNull(Ptr); 877 if (!Done) 878 Done = createBasicBlock("vptr.null"); 879 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null"); 880 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done); 881 EmitBlock(VptrNotNull); 882 } 883 884 // Compute a deterministic hash of the mangled name of the type. 885 SmallString<64> MangledName; 886 llvm::raw_svector_ostream Out(MangledName); 887 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 888 Out); 889 890 // Contained in NoSanitizeList based on the mangled type. 891 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr, 892 Out.str())) { 893 // Load the vptr, and mix it with TypeHash. 894 llvm::Value *TypeHash = 895 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str())); 896 897 llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0); 898 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign()); 899 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy, 900 Ty->getAsCXXRecordDecl(), 901 VTableAuthMode::UnsafeUbsanStrip); 902 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy); 903 904 llvm::Value *Hash = 905 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty)); 906 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 907 908 // Look the hash up in our cache. 909 const int CacheSize = 128; 910 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 911 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 912 "__ubsan_vptr_type_cache"); 913 llvm::Value *Slot = Builder.CreateAnd(Hash, 914 llvm::ConstantInt::get(IntPtrTy, 915 CacheSize-1)); 916 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 917 llvm::Value *CacheVal = Builder.CreateAlignedLoad( 918 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices), 919 getPointerAlign()); 920 921 // If the hash isn't in the cache, call a runtime handler to perform the 922 // hard work of checking whether the vptr is for an object of the right 923 // type. This will either fill in the cache and return, or produce a 924 // diagnostic. 925 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); 926 llvm::Constant *StaticData[] = { 927 EmitCheckSourceLocation(Loc), 928 EmitCheckTypeDescriptor(Ty), 929 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 930 llvm::ConstantInt::get(Int8Ty, TCK) 931 }; 932 llvm::Value *DynamicData[] = { Ptr, Hash }; 933 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr), 934 SanitizerHandler::DynamicTypeCacheMiss, StaticData, 935 DynamicData); 936 } 937 } 938 939 if (Done) { 940 SanitizerDebugLocation SanScope( 941 this, 942 {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr}, 943 DoneViaNullSanitize ? SanitizerHandler::TypeMismatch 944 : SanitizerHandler::DynamicTypeCacheMiss); 945 Builder.CreateBr(Done); 946 EmitBlock(Done); 947 } 948 } 949 950 llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, 951 QualType EltTy) { 952 ASTContext &C = getContext(); 953 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity(); 954 if (!EltSize) 955 return nullptr; 956 957 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()); 958 if (!ArrayDeclRef) 959 return nullptr; 960 961 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl()); 962 if (!ParamDecl) 963 return nullptr; 964 965 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); 966 if (!POSAttr) 967 return nullptr; 968 969 // Don't load the size if it's a lower bound. 970 int POSType = POSAttr->getType(); 971 if (POSType != 0 && POSType != 1) 972 return nullptr; 973 974 // Find the implicit size parameter. 975 auto PassedSizeIt = SizeArguments.find(ParamDecl); 976 if (PassedSizeIt == SizeArguments.end()) 977 return nullptr; 978 979 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; 980 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable"); 981 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second; 982 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false, 983 C.getSizeType(), E->getExprLoc()); 984 llvm::Value *SizeOfElement = 985 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize); 986 return Builder.CreateUDiv(SizeInBytes, SizeOfElement); 987 } 988 989 /// If Base is known to point to the start of an array, return the length of 990 /// that array. Return 0 if the length cannot be determined. 991 static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF, 992 const Expr *Base, 993 QualType &IndexedType, 994 LangOptions::StrictFlexArraysLevelKind 995 StrictFlexArraysLevel) { 996 // For the vector indexing extension, the bound is the number of elements. 997 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { 998 IndexedType = Base->getType(); 999 return CGF.Builder.getInt32(VT->getNumElements()); 1000 } 1001 1002 Base = Base->IgnoreParens(); 1003 1004 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 1005 if (CE->getCastKind() == CK_ArrayToPointerDecay && 1006 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(), 1007 StrictFlexArraysLevel)) { 1008 CodeGenFunction::SanitizerScope SanScope(&CGF); 1009 1010 IndexedType = CE->getSubExpr()->getType(); 1011 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); 1012 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 1013 return CGF.Builder.getInt(CAT->getSize()); 1014 1015 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) 1016 return CGF.getVLASize(VAT).NumElts; 1017 // Ignore pass_object_size here. It's not applicable on decayed pointers. 1018 } 1019 } 1020 1021 CodeGenFunction::SanitizerScope SanScope(&CGF); 1022 1023 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; 1024 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) { 1025 IndexedType = Base->getType(); 1026 return POS; 1027 } 1028 1029 return nullptr; 1030 } 1031 1032 namespace { 1033 1034 /// \p StructAccessBase returns the base \p Expr of a field access. It returns 1035 /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.: 1036 /// 1037 /// p in p-> a.b.c 1038 /// 1039 /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're 1040 /// looking for: 1041 /// 1042 /// struct s { 1043 /// struct s *ptr; 1044 /// int count; 1045 /// char array[] __attribute__((counted_by(count))); 1046 /// }; 1047 /// 1048 /// If we have an expression like \p p->ptr->array[index], we want the 1049 /// \p MemberExpr for \p p->ptr instead of \p p. 1050 class StructAccessBase 1051 : public ConstStmtVisitor<StructAccessBase, const Expr *> { 1052 const RecordDecl *ExpectedRD; 1053 1054 bool IsExpectedRecordDecl(const Expr *E) const { 1055 QualType Ty = E->getType(); 1056 if (Ty->isPointerType()) 1057 Ty = Ty->getPointeeType(); 1058 return ExpectedRD == Ty->getAsRecordDecl(); 1059 } 1060 1061 public: 1062 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {} 1063 1064 //===--------------------------------------------------------------------===// 1065 // Visitor Methods 1066 //===--------------------------------------------------------------------===// 1067 1068 // NOTE: If we build C++ support for counted_by, then we'll have to handle 1069 // horrors like this: 1070 // 1071 // struct S { 1072 // int x, y; 1073 // int blah[] __attribute__((counted_by(x))); 1074 // } s; 1075 // 1076 // int foo(int index, int val) { 1077 // int (S::*IHatePMDs)[] = &S::blah; 1078 // (s.*IHatePMDs)[index] = val; 1079 // } 1080 1081 const Expr *Visit(const Expr *E) { 1082 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E); 1083 } 1084 1085 const Expr *VisitStmt(const Stmt *S) { return nullptr; } 1086 1087 // These are the types we expect to return (in order of most to least 1088 // likely): 1089 // 1090 // 1. DeclRefExpr - This is the expression for the base of the structure. 1091 // It's exactly what we want to build an access to the \p counted_by 1092 // field. 1093 // 2. MemberExpr - This is the expression that has the same \p RecordDecl 1094 // as the flexble array member's lexical enclosing \p RecordDecl. This 1095 // allows us to catch things like: "p->p->array" 1096 // 3. CompoundLiteralExpr - This is for people who create something 1097 // heretical like (struct foo has a flexible array member): 1098 // 1099 // (struct foo){ 1, 2 }.blah[idx]; 1100 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) { 1101 return IsExpectedRecordDecl(E) ? E : nullptr; 1102 } 1103 const Expr *VisitMemberExpr(const MemberExpr *E) { 1104 if (IsExpectedRecordDecl(E) && E->isArrow()) 1105 return E; 1106 const Expr *Res = Visit(E->getBase()); 1107 return !Res && IsExpectedRecordDecl(E) ? E : Res; 1108 } 1109 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { 1110 return IsExpectedRecordDecl(E) ? E : nullptr; 1111 } 1112 const Expr *VisitCallExpr(const CallExpr *E) { 1113 return IsExpectedRecordDecl(E) ? E : nullptr; 1114 } 1115 1116 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1117 if (IsExpectedRecordDecl(E)) 1118 return E; 1119 return Visit(E->getBase()); 1120 } 1121 const Expr *VisitCastExpr(const CastExpr *E) { 1122 if (E->getCastKind() == CK_LValueToRValue) 1123 return IsExpectedRecordDecl(E) ? E : nullptr; 1124 return Visit(E->getSubExpr()); 1125 } 1126 const Expr *VisitParenExpr(const ParenExpr *E) { 1127 return Visit(E->getSubExpr()); 1128 } 1129 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) { 1130 return Visit(E->getSubExpr()); 1131 } 1132 const Expr *VisitUnaryDeref(const UnaryOperator *E) { 1133 return Visit(E->getSubExpr()); 1134 } 1135 }; 1136 1137 } // end anonymous namespace 1138 1139 using RecIndicesTy = SmallVector<llvm::Value *, 8>; 1140 1141 static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, 1142 const FieldDecl *Field, 1143 RecIndicesTy &Indices) { 1144 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD); 1145 int64_t FieldNo = -1; 1146 for (const FieldDecl *FD : RD->fields()) { 1147 if (!Layout.containsFieldDecl(FD)) 1148 // This could happen if the field has a struct type that's empty. I don't 1149 // know why either. 1150 continue; 1151 1152 FieldNo = Layout.getLLVMFieldNo(FD); 1153 if (FD == Field) { 1154 Indices.emplace_back(CGF.Builder.getInt32(FieldNo)); 1155 return true; 1156 } 1157 1158 QualType Ty = FD->getType(); 1159 if (Ty->isRecordType()) { 1160 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) { 1161 if (RD->isUnion()) 1162 FieldNo = 0; 1163 Indices.emplace_back(CGF.Builder.getInt32(FieldNo)); 1164 return true; 1165 } 1166 } 1167 } 1168 1169 return false; 1170 } 1171 1172 llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP( 1173 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { 1174 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext(); 1175 1176 // Find the base struct expr (i.e. p in p->a.b.c.d). 1177 const Expr *StructBase = StructAccessBase(RD).Visit(Base); 1178 if (!StructBase || StructBase->HasSideEffects(getContext())) 1179 return nullptr; 1180 1181 llvm::Value *Res = nullptr; 1182 if (StructBase->getType()->isPointerType()) { 1183 LValueBaseInfo BaseInfo; 1184 TBAAAccessInfo TBAAInfo; 1185 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo); 1186 Res = Addr.emitRawPointer(*this); 1187 } else if (StructBase->isLValue()) { 1188 LValue LV = EmitLValue(StructBase); 1189 Address Addr = LV.getAddress(); 1190 Res = Addr.emitRawPointer(*this); 1191 } else { 1192 return nullptr; 1193 } 1194 1195 RecIndicesTy Indices; 1196 getGEPIndicesToField(*this, RD, CountDecl, Indices); 1197 if (Indices.empty()) 1198 return nullptr; 1199 1200 Indices.push_back(Builder.getInt32(0)); 1201 return Builder.CreateInBoundsGEP( 1202 ConvertType(QualType(RD->getTypeForDecl(), 0)), Res, 1203 RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep"); 1204 } 1205 1206 /// This method is typically called in contexts where we can't generate 1207 /// side-effects, like in __builtin_dynamic_object_size. When finding 1208 /// expressions, only choose those that have either already been emitted or can 1209 /// be loaded without side-effects. 1210 /// 1211 /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be 1212 /// within the top-level struct. 1213 /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl. 1214 llvm::Value *CodeGenFunction::EmitLoadOfCountedByField( 1215 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) { 1216 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl)) 1217 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP, 1218 getIntAlign(), "counted_by.load"); 1219 return nullptr; 1220 } 1221 1222 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, 1223 llvm::Value *Index, QualType IndexType, 1224 bool Accessed) { 1225 assert(SanOpts.has(SanitizerKind::ArrayBounds) && 1226 "should not be called unless adding bounds checks"); 1227 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = 1228 getLangOpts().getStrictFlexArraysLevel(); 1229 QualType IndexedType; 1230 llvm::Value *Bound = 1231 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel); 1232 1233 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed); 1234 } 1235 1236 void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, 1237 llvm::Value *Index, 1238 QualType IndexType, 1239 QualType IndexedType, bool Accessed) { 1240 if (!Bound) 1241 return; 1242 1243 auto CheckKind = SanitizerKind::SO_ArrayBounds; 1244 auto CheckHandler = SanitizerHandler::OutOfBounds; 1245 SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler); 1246 1247 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); 1248 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); 1249 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); 1250 1251 llvm::Constant *StaticData[] = { 1252 EmitCheckSourceLocation(E->getExprLoc()), 1253 EmitCheckTypeDescriptor(IndexedType), 1254 EmitCheckTypeDescriptor(IndexType) 1255 }; 1256 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) 1257 : Builder.CreateICmpULE(IndexVal, BoundVal); 1258 EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData, Index); 1259 } 1260 1261 CodeGenFunction::ComplexPairTy CodeGenFunction:: 1262 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 1263 bool isInc, bool isPre) { 1264 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); 1265 1266 llvm::Value *NextVal; 1267 if (isa<llvm::IntegerType>(InVal.first->getType())) { 1268 uint64_t AmountVal = isInc ? 1 : -1; 1269 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 1270 1271 // Add the inc/dec to the real part. 1272 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 1273 } else { 1274 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); 1275 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 1276 if (!isInc) 1277 FVal.changeSign(); 1278 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 1279 1280 // Add the inc/dec to the real part. 1281 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 1282 } 1283 1284 ComplexPairTy IncVal(NextVal, InVal.second); 1285 1286 // Store the updated result through the lvalue. 1287 EmitStoreOfComplex(IncVal, LV, /*init*/ false); 1288 if (getLangOpts().OpenMP) 1289 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, 1290 E->getSubExpr()); 1291 1292 // If this is a postinc, return the value read from memory, otherwise use the 1293 // updated value. 1294 return isPre ? IncVal : InVal; 1295 } 1296 1297 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, 1298 CodeGenFunction *CGF) { 1299 // Bind VLAs in the cast type. 1300 if (CGF && E->getType()->isVariablyModifiedType()) 1301 CGF->EmitVariablyModifiedType(E->getType()); 1302 1303 if (CGDebugInfo *DI = getModuleDebugInfo()) 1304 DI->EmitExplicitCastType(E->getType()); 1305 } 1306 1307 //===----------------------------------------------------------------------===// 1308 // LValue Expression Emission 1309 //===----------------------------------------------------------------------===// 1310 1311 static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, 1312 TBAAAccessInfo *TBAAInfo, 1313 KnownNonNull_t IsKnownNonNull, 1314 CodeGenFunction &CGF) { 1315 // We allow this with ObjC object pointers because of fragile ABIs. 1316 assert(E->getType()->isPointerType() || 1317 E->getType()->isObjCObjectPointerType()); 1318 E = E->IgnoreParens(); 1319 1320 // Casts: 1321 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 1322 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE)) 1323 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF); 1324 1325 switch (CE->getCastKind()) { 1326 // Non-converting casts (but not C's implicit conversion from void*). 1327 case CK_BitCast: 1328 case CK_NoOp: 1329 case CK_AddressSpaceConversion: 1330 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { 1331 if (PtrTy->getPointeeType()->isVoidType()) 1332 break; 1333 1334 LValueBaseInfo InnerBaseInfo; 1335 TBAAAccessInfo InnerTBAAInfo; 1336 Address Addr = CGF.EmitPointerWithAlignment( 1337 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull); 1338 if (BaseInfo) *BaseInfo = InnerBaseInfo; 1339 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; 1340 1341 if (isa<ExplicitCastExpr>(CE)) { 1342 LValueBaseInfo TargetTypeBaseInfo; 1343 TBAAAccessInfo TargetTypeTBAAInfo; 1344 CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( 1345 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo); 1346 if (TBAAInfo) 1347 *TBAAInfo = 1348 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo); 1349 // If the source l-value is opaque, honor the alignment of the 1350 // casted-to type. 1351 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { 1352 if (BaseInfo) 1353 BaseInfo->mergeForCast(TargetTypeBaseInfo); 1354 Addr.setAlignment(Align); 1355 } 1356 } 1357 1358 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && 1359 CE->getCastKind() == CK_BitCast) { 1360 if (auto PT = E->getType()->getAs<PointerType>()) 1361 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr, 1362 /*MayBeNull=*/true, 1363 CodeGenFunction::CFITCK_UnrelatedCast, 1364 CE->getBeginLoc()); 1365 } 1366 1367 llvm::Type *ElemTy = 1368 CGF.ConvertTypeForMem(E->getType()->getPointeeType()); 1369 Addr = Addr.withElementType(ElemTy); 1370 if (CE->getCastKind() == CK_AddressSpaceConversion) 1371 Addr = CGF.Builder.CreateAddrSpaceCast( 1372 Addr, CGF.ConvertType(E->getType()), ElemTy); 1373 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(), 1374 CE->getType()); 1375 } 1376 break; 1377 1378 // Array-to-pointer decay. 1379 case CK_ArrayToPointerDecay: 1380 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo); 1381 1382 // Derived-to-base conversions. 1383 case CK_UncheckedDerivedToBase: 1384 case CK_DerivedToBase: { 1385 // TODO: Support accesses to members of base classes in TBAA. For now, we 1386 // conservatively pretend that the complete object is of the base class 1387 // type. 1388 if (TBAAInfo) 1389 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType()); 1390 Address Addr = CGF.EmitPointerWithAlignment( 1391 CE->getSubExpr(), BaseInfo, nullptr, 1392 (KnownNonNull_t)(IsKnownNonNull || 1393 CE->getCastKind() == CK_UncheckedDerivedToBase)); 1394 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); 1395 return CGF.GetAddressOfBaseClass( 1396 Addr, Derived, CE->path_begin(), CE->path_end(), 1397 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc()); 1398 } 1399 1400 // TODO: Is there any reason to treat base-to-derived conversions 1401 // specially? 1402 default: 1403 break; 1404 } 1405 } 1406 1407 // Unary &. 1408 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 1409 if (UO->getOpcode() == UO_AddrOf) { 1410 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull); 1411 if (BaseInfo) *BaseInfo = LV.getBaseInfo(); 1412 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); 1413 return LV.getAddress(); 1414 } 1415 } 1416 1417 // std::addressof and variants. 1418 if (auto *Call = dyn_cast<CallExpr>(E)) { 1419 switch (Call->getBuiltinCallee()) { 1420 default: 1421 break; 1422 case Builtin::BIaddressof: 1423 case Builtin::BI__addressof: 1424 case Builtin::BI__builtin_addressof: { 1425 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull); 1426 if (BaseInfo) *BaseInfo = LV.getBaseInfo(); 1427 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); 1428 return LV.getAddress(); 1429 } 1430 } 1431 } 1432 1433 // TODO: conditional operators, comma. 1434 1435 // Otherwise, use the alignment of the type. 1436 return CGF.makeNaturalAddressForPointer( 1437 CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(), 1438 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull); 1439 } 1440 1441 /// EmitPointerWithAlignment - Given an expression of pointer type, try to 1442 /// derive a more accurate bound on the alignment of the pointer. 1443 Address CodeGenFunction::EmitPointerWithAlignment( 1444 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, 1445 KnownNonNull_t IsKnownNonNull) { 1446 Address Addr = 1447 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this); 1448 if (IsKnownNonNull && !Addr.isKnownNonNull()) 1449 Addr.setKnownNonNull(); 1450 return Addr; 1451 } 1452 1453 llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) { 1454 llvm::Value *V = RV.getScalarVal(); 1455 if (auto MPT = T->getAs<MemberPointerType>()) 1456 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT); 1457 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 1458 } 1459 1460 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 1461 if (Ty->isVoidType()) 1462 return RValue::get(nullptr); 1463 1464 switch (getEvaluationKind(Ty)) { 1465 case TEK_Complex: { 1466 llvm::Type *EltTy = 1467 ConvertType(Ty->castAs<ComplexType>()->getElementType()); 1468 llvm::Value *U = llvm::UndefValue::get(EltTy); 1469 return RValue::getComplex(std::make_pair(U, U)); 1470 } 1471 1472 // If this is a use of an undefined aggregate type, the aggregate must have an 1473 // identifiable address. Just because the contents of the value are undefined 1474 // doesn't mean that the address can't be taken and compared. 1475 case TEK_Aggregate: { 1476 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 1477 return RValue::getAggregate(DestPtr); 1478 } 1479 1480 case TEK_Scalar: 1481 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 1482 } 1483 llvm_unreachable("bad evaluation kind"); 1484 } 1485 1486 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 1487 const char *Name) { 1488 ErrorUnsupported(E, Name); 1489 return GetUndefRValue(E->getType()); 1490 } 1491 1492 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 1493 const char *Name) { 1494 ErrorUnsupported(E, Name); 1495 llvm::Type *ElTy = ConvertType(E->getType()); 1496 llvm::Type *Ty = UnqualPtrTy; 1497 return MakeAddrLValue( 1498 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType()); 1499 } 1500 1501 bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { 1502 const Expr *Base = Obj; 1503 while (!isa<CXXThisExpr>(Base)) { 1504 // The result of a dynamic_cast can be null. 1505 if (isa<CXXDynamicCastExpr>(Base)) 1506 return false; 1507 1508 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 1509 Base = CE->getSubExpr(); 1510 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) { 1511 Base = PE->getSubExpr(); 1512 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) { 1513 if (UO->getOpcode() == UO_Extension) 1514 Base = UO->getSubExpr(); 1515 else 1516 return false; 1517 } else { 1518 return false; 1519 } 1520 } 1521 return true; 1522 } 1523 1524 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 1525 LValue LV; 1526 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)) 1527 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); 1528 else 1529 LV = EmitLValue(E); 1530 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) { 1531 SanitizerSet SkippedChecks; 1532 if (const auto *ME = dyn_cast<MemberExpr>(E)) { 1533 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); 1534 if (IsBaseCXXThis) 1535 SkippedChecks.set(SanitizerKind::Alignment, true); 1536 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase())) 1537 SkippedChecks.set(SanitizerKind::Null, true); 1538 } 1539 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks); 1540 } 1541 return LV; 1542 } 1543 1544 /// EmitLValue - Emit code to compute a designator that specifies the location 1545 /// of the expression. 1546 /// 1547 /// This can return one of two things: a simple address or a bitfield reference. 1548 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 1549 /// an LLVM pointer type. 1550 /// 1551 /// If this returns a bitfield reference, nothing about the pointee type of the 1552 /// LLVM value is known: For example, it may not be a pointer to an integer. 1553 /// 1554 /// If this returns a normal address, and if the lvalue's C type is fixed size, 1555 /// this method guarantees that the returned pointer type will point to an LLVM 1556 /// type of the same size of the lvalue's type. If the lvalue has a variable 1557 /// length type, this is not possible. 1558 /// 1559 LValue CodeGenFunction::EmitLValue(const Expr *E, 1560 KnownNonNull_t IsKnownNonNull) { 1561 // Running with sufficient stack space to avoid deeply nested expressions 1562 // cause a stack overflow. 1563 LValue LV; 1564 CGM.runWithSufficientStackSpace( 1565 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); }); 1566 1567 if (IsKnownNonNull && !LV.isKnownNonNull()) 1568 LV.setKnownNonNull(); 1569 return LV; 1570 } 1571 1572 static QualType getConstantExprReferredType(const FullExpr *E, 1573 const ASTContext &Ctx) { 1574 const Expr *SE = E->getSubExpr()->IgnoreImplicit(); 1575 if (isa<OpaqueValueExpr>(SE)) 1576 return SE->getType(); 1577 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType(); 1578 } 1579 1580 LValue CodeGenFunction::EmitLValueHelper(const Expr *E, 1581 KnownNonNull_t IsKnownNonNull) { 1582 ApplyDebugLocation DL(*this, E); 1583 switch (E->getStmtClass()) { 1584 default: return EmitUnsupportedLValue(E, "l-value expression"); 1585 1586 case Expr::ObjCPropertyRefExprClass: 1587 llvm_unreachable("cannot emit a property reference directly"); 1588 1589 case Expr::ObjCSelectorExprClass: 1590 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 1591 case Expr::ObjCIsaExprClass: 1592 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 1593 case Expr::BinaryOperatorClass: 1594 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 1595 case Expr::CompoundAssignOperatorClass: { 1596 QualType Ty = E->getType(); 1597 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 1598 Ty = AT->getValueType(); 1599 if (!Ty->isAnyComplexType()) 1600 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 1601 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 1602 } 1603 case Expr::CallExprClass: 1604 case Expr::CXXMemberCallExprClass: 1605 case Expr::CXXOperatorCallExprClass: 1606 case Expr::UserDefinedLiteralClass: 1607 return EmitCallExprLValue(cast<CallExpr>(E)); 1608 case Expr::CXXRewrittenBinaryOperatorClass: 1609 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(), 1610 IsKnownNonNull); 1611 case Expr::VAArgExprClass: 1612 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 1613 case Expr::DeclRefExprClass: 1614 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 1615 case Expr::ConstantExprClass: { 1616 const ConstantExpr *CE = cast<ConstantExpr>(E); 1617 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) { 1618 QualType RetType = getConstantExprReferredType(CE, getContext()); 1619 return MakeNaturalAlignAddrLValue(Result, RetType); 1620 } 1621 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull); 1622 } 1623 case Expr::ParenExprClass: 1624 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull); 1625 case Expr::GenericSelectionExprClass: 1626 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(), 1627 IsKnownNonNull); 1628 case Expr::PredefinedExprClass: 1629 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 1630 case Expr::StringLiteralClass: 1631 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 1632 case Expr::ObjCEncodeExprClass: 1633 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 1634 case Expr::PseudoObjectExprClass: 1635 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 1636 case Expr::InitListExprClass: 1637 return EmitInitListLValue(cast<InitListExpr>(E)); 1638 case Expr::CXXTemporaryObjectExprClass: 1639 case Expr::CXXConstructExprClass: 1640 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 1641 case Expr::CXXBindTemporaryExprClass: 1642 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 1643 case Expr::CXXUuidofExprClass: 1644 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 1645 case Expr::LambdaExprClass: 1646 return EmitAggExprToLValue(E); 1647 1648 case Expr::ExprWithCleanupsClass: { 1649 const auto *cleanups = cast<ExprWithCleanups>(E); 1650 RunCleanupsScope Scope(*this); 1651 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull); 1652 if (LV.isSimple()) { 1653 // Defend against branches out of gnu statement expressions surrounded by 1654 // cleanups. 1655 Address Addr = LV.getAddress(); 1656 llvm::Value *V = Addr.getBasePointer(); 1657 Scope.ForceCleanup({&V}); 1658 Addr.replaceBasePointer(V); 1659 return LValue::MakeAddr(Addr, LV.getType(), getContext(), 1660 LV.getBaseInfo(), LV.getTBAAInfo()); 1661 } 1662 // FIXME: Is it possible to create an ExprWithCleanups that produces a 1663 // bitfield lvalue or some other non-simple lvalue? 1664 return LV; 1665 } 1666 1667 case Expr::CXXDefaultArgExprClass: { 1668 auto *DAE = cast<CXXDefaultArgExpr>(E); 1669 CXXDefaultArgExprScope Scope(*this, DAE); 1670 return EmitLValue(DAE->getExpr(), IsKnownNonNull); 1671 } 1672 case Expr::CXXDefaultInitExprClass: { 1673 auto *DIE = cast<CXXDefaultInitExpr>(E); 1674 CXXDefaultInitExprScope Scope(*this, DIE); 1675 return EmitLValue(DIE->getExpr(), IsKnownNonNull); 1676 } 1677 case Expr::CXXTypeidExprClass: 1678 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 1679 1680 case Expr::ObjCMessageExprClass: 1681 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 1682 case Expr::ObjCIvarRefExprClass: 1683 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 1684 case Expr::StmtExprClass: 1685 return EmitStmtExprLValue(cast<StmtExpr>(E)); 1686 case Expr::UnaryOperatorClass: 1687 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 1688 case Expr::ArraySubscriptExprClass: 1689 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 1690 case Expr::MatrixSubscriptExprClass: 1691 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E)); 1692 case Expr::ArraySectionExprClass: 1693 return EmitArraySectionExpr(cast<ArraySectionExpr>(E)); 1694 case Expr::ExtVectorElementExprClass: 1695 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 1696 case Expr::CXXThisExprClass: 1697 return MakeAddrLValue(LoadCXXThisAddress(), E->getType()); 1698 case Expr::MemberExprClass: 1699 return EmitMemberExpr(cast<MemberExpr>(E)); 1700 case Expr::CompoundLiteralExprClass: 1701 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 1702 case Expr::ConditionalOperatorClass: 1703 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 1704 case Expr::BinaryConditionalOperatorClass: 1705 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 1706 case Expr::ChooseExprClass: 1707 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull); 1708 case Expr::OpaqueValueExprClass: 1709 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 1710 case Expr::SubstNonTypeTemplateParmExprClass: 1711 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), 1712 IsKnownNonNull); 1713 case Expr::ImplicitCastExprClass: 1714 case Expr::CStyleCastExprClass: 1715 case Expr::CXXFunctionalCastExprClass: 1716 case Expr::CXXStaticCastExprClass: 1717 case Expr::CXXDynamicCastExprClass: 1718 case Expr::CXXReinterpretCastExprClass: 1719 case Expr::CXXConstCastExprClass: 1720 case Expr::CXXAddrspaceCastExprClass: 1721 case Expr::ObjCBridgedCastExprClass: 1722 return EmitCastLValue(cast<CastExpr>(E)); 1723 1724 case Expr::MaterializeTemporaryExprClass: 1725 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 1726 1727 case Expr::CoawaitExprClass: 1728 return EmitCoawaitLValue(cast<CoawaitExpr>(E)); 1729 case Expr::CoyieldExprClass: 1730 return EmitCoyieldLValue(cast<CoyieldExpr>(E)); 1731 case Expr::PackIndexingExprClass: 1732 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr()); 1733 case Expr::HLSLOutArgExprClass: 1734 llvm_unreachable("cannot emit a HLSL out argument directly"); 1735 } 1736 } 1737 1738 /// Given an object of the given canonical type, can we safely copy a 1739 /// value out of it based on its initializer? 1740 static bool isConstantEmittableObjectType(QualType type) { 1741 assert(type.isCanonical()); 1742 assert(!type->isReferenceType()); 1743 1744 // Must be const-qualified but non-volatile. 1745 Qualifiers qs = type.getLocalQualifiers(); 1746 if (!qs.hasConst() || qs.hasVolatile()) return false; 1747 1748 // Otherwise, all object types satisfy this except C++ classes with 1749 // mutable subobjects or non-trivial copy/destroy behavior. 1750 if (const auto *RT = dyn_cast<RecordType>(type)) 1751 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1752 if (RD->hasMutableFields() || !RD->isTrivial()) 1753 return false; 1754 1755 return true; 1756 } 1757 1758 /// Can we constant-emit a load of a reference to a variable of the 1759 /// given type? This is different from predicates like 1760 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply 1761 /// in situations that don't necessarily satisfy the language's rules 1762 /// for this (e.g. C++'s ODR-use rules). For example, we want to able 1763 /// to do this with const float variables even if those variables 1764 /// aren't marked 'constexpr'. 1765 enum ConstantEmissionKind { 1766 CEK_None, 1767 CEK_AsReferenceOnly, 1768 CEK_AsValueOrReference, 1769 CEK_AsValueOnly 1770 }; 1771 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 1772 type = type.getCanonicalType(); 1773 if (const auto *ref = dyn_cast<ReferenceType>(type)) { 1774 if (isConstantEmittableObjectType(ref->getPointeeType())) 1775 return CEK_AsValueOrReference; 1776 return CEK_AsReferenceOnly; 1777 } 1778 if (isConstantEmittableObjectType(type)) 1779 return CEK_AsValueOnly; 1780 return CEK_None; 1781 } 1782 1783 /// Try to emit a reference to the given value without producing it as 1784 /// an l-value. This is just an optimization, but it avoids us needing 1785 /// to emit global copies of variables if they're named without triggering 1786 /// a formal use in a context where we can't emit a direct reference to them, 1787 /// for instance if a block or lambda or a member of a local class uses a 1788 /// const int variable or constexpr variable from an enclosing function. 1789 CodeGenFunction::ConstantEmission 1790 CodeGenFunction::tryEmitAsConstant(const DeclRefExpr *RefExpr) { 1791 const ValueDecl *Value = RefExpr->getDecl(); 1792 1793 // The value needs to be an enum constant or a constant variable. 1794 ConstantEmissionKind CEK; 1795 if (isa<ParmVarDecl>(Value)) { 1796 CEK = CEK_None; 1797 } else if (const auto *var = dyn_cast<VarDecl>(Value)) { 1798 CEK = checkVarTypeForConstantEmission(var->getType()); 1799 } else if (isa<EnumConstantDecl>(Value)) { 1800 CEK = CEK_AsValueOnly; 1801 } else { 1802 CEK = CEK_None; 1803 } 1804 if (CEK == CEK_None) return ConstantEmission(); 1805 1806 Expr::EvalResult result; 1807 bool resultIsReference; 1808 QualType resultType; 1809 1810 // It's best to evaluate all the way as an r-value if that's permitted. 1811 if (CEK != CEK_AsReferenceOnly && 1812 RefExpr->EvaluateAsRValue(result, getContext())) { 1813 resultIsReference = false; 1814 resultType = RefExpr->getType().getUnqualifiedType(); 1815 1816 // Otherwise, try to evaluate as an l-value. 1817 } else if (CEK != CEK_AsValueOnly && 1818 RefExpr->EvaluateAsLValue(result, getContext())) { 1819 resultIsReference = true; 1820 resultType = Value->getType(); 1821 1822 // Failure. 1823 } else { 1824 return ConstantEmission(); 1825 } 1826 1827 // In any case, if the initializer has side-effects, abandon ship. 1828 if (result.HasSideEffects) 1829 return ConstantEmission(); 1830 1831 // In CUDA/HIP device compilation, a lambda may capture a reference variable 1832 // referencing a global host variable by copy. In this case the lambda should 1833 // make a copy of the value of the global host variable. The DRE of the 1834 // captured reference variable cannot be emitted as load from the host 1835 // global variable as compile time constant, since the host variable is not 1836 // accessible on device. The DRE of the captured reference variable has to be 1837 // loaded from captures. 1838 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() && 1839 RefExpr->refersToEnclosingVariableOrCapture()) { 1840 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl); 1841 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) { 1842 const APValue::LValueBase &base = result.Val.getLValueBase(); 1843 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) { 1844 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) { 1845 if (!VD->hasAttr<CUDADeviceAttr>()) { 1846 return ConstantEmission(); 1847 } 1848 } 1849 } 1850 } 1851 } 1852 1853 // Emit as a constant. 1854 llvm::Constant *C = ConstantEmitter(*this).emitAbstract( 1855 RefExpr->getLocation(), result.Val, resultType); 1856 1857 // Make sure we emit a debug reference to the global variable. 1858 // This should probably fire even for 1859 if (isa<VarDecl>(Value)) { 1860 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Value))) 1861 EmitDeclRefExprDbgValue(RefExpr, result.Val); 1862 } else { 1863 assert(isa<EnumConstantDecl>(Value)); 1864 EmitDeclRefExprDbgValue(RefExpr, result.Val); 1865 } 1866 1867 // If we emitted a reference constant, we need to dereference that. 1868 if (resultIsReference) 1869 return ConstantEmission::forReference(C); 1870 1871 return ConstantEmission::forValue(C); 1872 } 1873 1874 static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, 1875 const MemberExpr *ME) { 1876 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) { 1877 // Try to emit static variable member expressions as DREs. 1878 return DeclRefExpr::Create( 1879 CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, 1880 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), 1881 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); 1882 } 1883 return nullptr; 1884 } 1885 1886 CodeGenFunction::ConstantEmission 1887 CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { 1888 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME)) 1889 return tryEmitAsConstant(DRE); 1890 return ConstantEmission(); 1891 } 1892 1893 llvm::Value *CodeGenFunction::emitScalarConstant( 1894 const CodeGenFunction::ConstantEmission &Constant, Expr *E) { 1895 assert(Constant && "not a constant"); 1896 if (Constant.isReference()) 1897 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E), 1898 E->getExprLoc()) 1899 .getScalarVal(); 1900 return Constant.getValue(); 1901 } 1902 1903 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, 1904 SourceLocation Loc) { 1905 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 1906 lvalue.getType(), Loc, lvalue.getBaseInfo(), 1907 lvalue.getTBAAInfo(), lvalue.isNontemporal()); 1908 } 1909 1910 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, 1911 llvm::APInt &Min, llvm::APInt &End, 1912 bool StrictEnums, bool IsBool) { 1913 const EnumType *ET = Ty->getAs<EnumType>(); 1914 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && 1915 ET && !ET->getDecl()->isFixed(); 1916 if (!IsBool && !IsRegularCPlusPlusEnum) 1917 return false; 1918 1919 if (IsBool) { 1920 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); 1921 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); 1922 } else { 1923 const EnumDecl *ED = ET->getDecl(); 1924 ED->getValueRange(End, Min); 1925 } 1926 return true; 1927 } 1928 1929 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 1930 llvm::APInt Min, End; 1931 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums, 1932 Ty->hasBooleanRepresentation() && !Ty->isVectorType())) 1933 return nullptr; 1934 1935 llvm::MDBuilder MDHelper(getLLVMContext()); 1936 return MDHelper.createRange(Min, End); 1937 } 1938 1939 void CodeGenFunction::maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, 1940 SourceLocation Loc) { 1941 if (EmitScalarRangeCheck(Load, Ty, Loc)) { 1942 // In order to prevent the optimizer from throwing away the check, don't 1943 // attach range metadata to the load. 1944 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) { 1945 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) { 1946 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1947 Load->setMetadata(llvm::LLVMContext::MD_noundef, 1948 llvm::MDNode::get(CGM.getLLVMContext(), {})); 1949 } 1950 } 1951 } 1952 1953 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, 1954 SourceLocation Loc) { 1955 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool); 1956 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum); 1957 if (!HasBoolCheck && !HasEnumCheck) 1958 return false; 1959 1960 bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) || 1961 NSAPI(CGM.getContext()).isObjCBOOLType(Ty); 1962 bool NeedsBoolCheck = HasBoolCheck && IsBool; 1963 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); 1964 if (!NeedsBoolCheck && !NeedsEnumCheck) 1965 return false; 1966 1967 // Single-bit booleans don't need to be checked. Special-case this to avoid 1968 // a bit width mismatch when handling bitfield values. This is handled by 1969 // EmitFromMemory for the non-bitfield case. 1970 if (IsBool && 1971 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1) 1972 return false; 1973 1974 if (NeedsEnumCheck && 1975 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty)) 1976 return false; 1977 1978 llvm::APInt Min, End; 1979 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) 1980 return true; 1981 1982 SanitizerKind::SanitizerOrdinal Kind = 1983 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool; 1984 1985 auto &Ctx = getLLVMContext(); 1986 auto CheckHandler = SanitizerHandler::LoadInvalidValue; 1987 SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler); 1988 llvm::Value *Check; 1989 --End; 1990 if (!Min) { 1991 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End)); 1992 } else { 1993 llvm::Value *Upper = 1994 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End)); 1995 llvm::Value *Lower = 1996 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min)); 1997 Check = Builder.CreateAnd(Upper, Lower); 1998 } 1999 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), 2000 EmitCheckTypeDescriptor(Ty)}; 2001 EmitCheck(std::make_pair(Check, Kind), CheckHandler, StaticArgs, Value); 2002 return true; 2003 } 2004 2005 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, 2006 QualType Ty, 2007 SourceLocation Loc, 2008 LValueBaseInfo BaseInfo, 2009 TBAAAccessInfo TBAAInfo, 2010 bool isNontemporal) { 2011 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer())) 2012 if (GV->isThreadLocal()) 2013 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV), 2014 NotKnownNonNull); 2015 2016 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { 2017 // Boolean vectors use `iN` as storage type. 2018 if (ClangVecTy->isPackedVectorBoolType(getContext())) { 2019 llvm::Type *ValTy = ConvertType(Ty); 2020 unsigned ValNumElems = 2021 cast<llvm::FixedVectorType>(ValTy)->getNumElements(); 2022 // Load the `iP` storage object (P is the padded vector size). 2023 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits"); 2024 const auto *RawIntTy = RawIntV->getType(); 2025 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors"); 2026 // Bitcast iP --> <P x i1>. 2027 auto *PaddedVecTy = llvm::FixedVectorType::get( 2028 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits()); 2029 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy); 2030 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). 2031 V = emitBoolVecConversion(V, ValNumElems, "extractvec"); 2032 2033 return EmitFromMemory(V, Ty); 2034 } 2035 2036 // Handles vectors of sizes that are likely to be expanded to a larger size 2037 // to optimize performance. 2038 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType()); 2039 auto *NewVecTy = 2040 CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts()); 2041 2042 if (VTy != NewVecTy) { 2043 Address Cast = Addr.withElementType(NewVecTy); 2044 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN"); 2045 unsigned OldNumElements = VTy->getNumElements(); 2046 SmallVector<int, 16> Mask(OldNumElements); 2047 std::iota(Mask.begin(), Mask.end(), 0); 2048 V = Builder.CreateShuffleVector(V, Mask, "extractVec"); 2049 return EmitFromMemory(V, Ty); 2050 } 2051 } 2052 2053 // Atomic operations have to be done on integral types. 2054 LValue AtomicLValue = 2055 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); 2056 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { 2057 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal(); 2058 } 2059 2060 Addr = 2061 Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType())); 2062 2063 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile); 2064 if (isNontemporal) { 2065 llvm::MDNode *Node = llvm::MDNode::get( 2066 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 2067 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node); 2068 } 2069 2070 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); 2071 2072 maybeAttachRangeForLoad(Load, Ty, Loc); 2073 2074 return EmitFromMemory(Load, Ty); 2075 } 2076 2077 /// Converts a scalar value from its primary IR type (as returned 2078 /// by ConvertType) to its load/store type (as returned by 2079 /// convertTypeForLoadStore). 2080 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 2081 if (auto *AtomicTy = Ty->getAs<AtomicType>()) 2082 Ty = AtomicTy->getValueType(); 2083 2084 if (Ty->isExtVectorBoolType()) { 2085 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType()); 2086 if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() > 2087 Value->getType()->getScalarSizeInBits()) 2088 return Builder.CreateZExt(Value, StoreTy); 2089 2090 // Expand to the memory bit width. 2091 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits(); 2092 // <N x i1> --> <P x i1>. 2093 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec"); 2094 // <P x i1> --> iP. 2095 Value = Builder.CreateBitCast(Value, StoreTy); 2096 } 2097 2098 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) { 2099 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType()); 2100 bool Signed = Ty->isSignedIntegerOrEnumerationType(); 2101 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv"); 2102 } 2103 2104 return Value; 2105 } 2106 2107 /// Converts a scalar value from its load/store type (as returned 2108 /// by convertTypeForLoadStore) to its primary IR type (as returned 2109 /// by ConvertType). 2110 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 2111 if (auto *AtomicTy = Ty->getAs<AtomicType>()) 2112 Ty = AtomicTy->getValueType(); 2113 2114 if (Ty->isPackedVectorBoolType(getContext())) { 2115 const auto *RawIntTy = Value->getType(); 2116 2117 // Bitcast iP --> <P x i1>. 2118 auto *PaddedVecTy = llvm::FixedVectorType::get( 2119 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits()); 2120 auto *V = Builder.CreateBitCast(Value, PaddedVecTy); 2121 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size). 2122 llvm::Type *ValTy = ConvertType(Ty); 2123 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements(); 2124 return emitBoolVecConversion(V, ValNumElems, "extractvec"); 2125 } 2126 2127 llvm::Type *ResTy = ConvertType(Ty); 2128 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType() || 2129 Ty->isExtVectorBoolType()) 2130 return Builder.CreateTrunc(Value, ResTy, "loadedv"); 2131 2132 return Value; 2133 } 2134 2135 // Convert the pointer of \p Addr to a pointer to a vector (the value type of 2136 // MatrixType), if it points to a array (the memory type of MatrixType). 2137 static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, 2138 CodeGenFunction &CGF, 2139 bool IsVector = true) { 2140 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType()); 2141 if (ArrayTy && IsVector) { 2142 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(), 2143 ArrayTy->getNumElements()); 2144 2145 return Addr.withElementType(VectorTy); 2146 } 2147 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType()); 2148 if (VectorTy && !IsVector) { 2149 auto *ArrayTy = llvm::ArrayType::get( 2150 VectorTy->getElementType(), 2151 cast<llvm::FixedVectorType>(VectorTy)->getNumElements()); 2152 2153 return Addr.withElementType(ArrayTy); 2154 } 2155 2156 return Addr; 2157 } 2158 2159 // Emit a store of a matrix LValue. This may require casting the original 2160 // pointer to memory address (ArrayType) to a pointer to the value type 2161 // (VectorType). 2162 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, 2163 bool isInit, CodeGenFunction &CGF) { 2164 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF, 2165 value->getType()->isVectorTy()); 2166 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(), 2167 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit, 2168 lvalue.isNontemporal()); 2169 } 2170 2171 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, 2172 bool Volatile, QualType Ty, 2173 LValueBaseInfo BaseInfo, 2174 TBAAAccessInfo TBAAInfo, 2175 bool isInit, bool isNontemporal) { 2176 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer())) 2177 if (GV->isThreadLocal()) 2178 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV), 2179 NotKnownNonNull); 2180 2181 // Handles vectors of sizes that are likely to be expanded to a larger size 2182 // to optimize performance. 2183 llvm::Type *SrcTy = Value->getType(); 2184 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) { 2185 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 2186 auto *NewVecTy = 2187 CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts()); 2188 if (!ClangVecTy->isPackedVectorBoolType(getContext()) && 2189 VecTy != NewVecTy) { 2190 SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1); 2191 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0); 2192 Value = Builder.CreateShuffleVector(Value, Mask, "extractVec"); 2193 SrcTy = NewVecTy; 2194 } 2195 if (Addr.getElementType() != SrcTy) 2196 Addr = Addr.withElementType(SrcTy); 2197 } 2198 } 2199 2200 Value = EmitToMemory(Value, Ty); 2201 2202 LValue AtomicLValue = 2203 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); 2204 if (Ty->isAtomicType() || 2205 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) { 2206 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit); 2207 return; 2208 } 2209 2210 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 2211 addInstToCurrentSourceAtom(Store, Value); 2212 2213 if (isNontemporal) { 2214 llvm::MDNode *Node = 2215 llvm::MDNode::get(Store->getContext(), 2216 llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 2217 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node); 2218 } 2219 2220 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); 2221 } 2222 2223 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 2224 bool isInit) { 2225 if (lvalue.getType()->isConstantMatrixType()) { 2226 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this); 2227 return; 2228 } 2229 2230 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 2231 lvalue.getType(), lvalue.getBaseInfo(), 2232 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); 2233 } 2234 2235 // Emit a load of a LValue of matrix type. This may require casting the pointer 2236 // to memory address (ArrayType) to a pointer to the value type (VectorType). 2237 static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, 2238 CodeGenFunction &CGF) { 2239 assert(LV.getType()->isConstantMatrixType()); 2240 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF); 2241 LV.setAddress(Addr); 2242 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc)); 2243 } 2244 2245 RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot, 2246 SourceLocation Loc) { 2247 QualType Ty = LV.getType(); 2248 switch (getEvaluationKind(Ty)) { 2249 case TEK_Scalar: 2250 return EmitLoadOfLValue(LV, Loc); 2251 case TEK_Complex: 2252 return RValue::getComplex(EmitLoadOfComplex(LV, Loc)); 2253 case TEK_Aggregate: 2254 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue); 2255 return Slot.asRValue(); 2256 } 2257 llvm_unreachable("bad evaluation kind"); 2258 } 2259 2260 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 2261 /// method emits the address of the lvalue, then loads the result as an rvalue, 2262 /// returning the rvalue. 2263 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 2264 // Load from __ptrauth. 2265 if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) { 2266 LV.getQuals().removePointerAuth(); 2267 llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal(); 2268 return RValue::get(EmitPointerAuthUnqualify(PtrAuth, Value, LV.getType(), 2269 LV.getAddress(), 2270 /*known nonnull*/ false)); 2271 } 2272 2273 if (LV.isObjCWeak()) { 2274 // load of a __weak object. 2275 Address AddrWeakObj = LV.getAddress(); 2276 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 2277 AddrWeakObj)); 2278 } 2279 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 2280 // In MRC mode, we do a load+autorelease. 2281 if (!getLangOpts().ObjCAutoRefCount) { 2282 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 2283 } 2284 2285 // In ARC mode, we load retained and then consume the value. 2286 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); 2287 Object = EmitObjCConsumeObject(LV.getType(), Object); 2288 return RValue::get(Object); 2289 } 2290 2291 if (LV.isSimple()) { 2292 assert(!LV.getType()->isFunctionType()); 2293 2294 if (LV.getType()->isConstantMatrixType()) 2295 return EmitLoadOfMatrixLValue(LV, Loc, *this); 2296 2297 // Everything needs a load. 2298 return RValue::get(EmitLoadOfScalar(LV, Loc)); 2299 } 2300 2301 if (LV.isVectorElt()) { 2302 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(), 2303 LV.isVolatileQualified()); 2304 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 2305 "vecext")); 2306 } 2307 2308 // If this is a reference to a subset of the elements of a vector, either 2309 // shuffle the input or extract/insert them as appropriate. 2310 if (LV.isExtVectorElt()) { 2311 return EmitLoadOfExtVectorElementLValue(LV); 2312 } 2313 2314 // Global Register variables always invoke intrinsics 2315 if (LV.isGlobalReg()) 2316 return EmitLoadOfGlobalRegLValue(LV); 2317 2318 if (LV.isMatrixElt()) { 2319 llvm::Value *Idx = LV.getMatrixIdx(); 2320 if (CGM.getCodeGenOpts().OptimizationLevel > 0) { 2321 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>(); 2322 llvm::MatrixBuilder MB(Builder); 2323 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened()); 2324 } 2325 llvm::LoadInst *Load = 2326 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified()); 2327 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext")); 2328 } 2329 2330 assert(LV.isBitField() && "Unknown LValue type!"); 2331 return EmitLoadOfBitfieldLValue(LV, Loc); 2332 } 2333 2334 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 2335 SourceLocation Loc) { 2336 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 2337 2338 // Get the output type. 2339 llvm::Type *ResLTy = ConvertType(LV.getType()); 2340 2341 Address Ptr = LV.getBitFieldAddress(); 2342 llvm::Value *Val = 2343 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load"); 2344 2345 bool UseVolatile = LV.isVolatileQualified() && 2346 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); 2347 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; 2348 const unsigned StorageSize = 2349 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; 2350 if (Info.IsSigned) { 2351 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); 2352 unsigned HighBits = StorageSize - Offset - Info.Size; 2353 if (HighBits) 2354 Val = Builder.CreateShl(Val, HighBits, "bf.shl"); 2355 if (Offset + HighBits) 2356 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr"); 2357 } else { 2358 if (Offset) 2359 Val = Builder.CreateLShr(Val, Offset, "bf.lshr"); 2360 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) 2361 Val = Builder.CreateAnd( 2362 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear"); 2363 } 2364 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); 2365 EmitScalarRangeCheck(Val, LV.getType(), Loc); 2366 return RValue::get(Val); 2367 } 2368 2369 // If this is a reference to a subset of the elements of a vector, create an 2370 // appropriate shufflevector. 2371 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 2372 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(), 2373 LV.isVolatileQualified()); 2374 2375 // HLSL allows treating scalars as one-element vectors. Converting the scalar 2376 // IR value to a vector here allows the rest of codegen to behave as normal. 2377 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) { 2378 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1); 2379 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 2380 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat"); 2381 } 2382 2383 const llvm::Constant *Elts = LV.getExtVectorElts(); 2384 2385 // If the result of the expression is a non-vector type, we must be extracting 2386 // a single element. Just codegen as an extractelement. 2387 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 2388 if (!ExprVT) { 2389 unsigned InIdx = getAccessedFieldNo(0, Elts); 2390 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 2391 2392 llvm::Value *Element = Builder.CreateExtractElement(Vec, Elt); 2393 2394 llvm::Type *LVTy = ConvertType(LV.getType()); 2395 if (Element->getType()->getPrimitiveSizeInBits() > 2396 LVTy->getPrimitiveSizeInBits()) 2397 Element = Builder.CreateTrunc(Element, LVTy); 2398 2399 return RValue::get(Element); 2400 } 2401 2402 // Always use shuffle vector to try to retain the original program structure 2403 unsigned NumResultElts = ExprVT->getNumElements(); 2404 2405 SmallVector<int, 4> Mask; 2406 for (unsigned i = 0; i != NumResultElts; ++i) 2407 Mask.push_back(getAccessedFieldNo(i, Elts)); 2408 2409 Vec = Builder.CreateShuffleVector(Vec, Mask); 2410 2411 if (LV.getType()->isExtVectorBoolType()) 2412 Vec = Builder.CreateTrunc(Vec, ConvertType(LV.getType()), "truncv"); 2413 2414 return RValue::get(Vec); 2415 } 2416 2417 /// Generates lvalue for partial ext_vector access. 2418 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { 2419 Address VectorAddress = LV.getExtVectorAddress(); 2420 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType(); 2421 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); 2422 2423 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy); 2424 2425 const llvm::Constant *Elts = LV.getExtVectorElts(); 2426 unsigned ix = getAccessedFieldNo(0, Elts); 2427 2428 Address VectorBasePtrPlusIx = 2429 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix, 2430 "vector.elt"); 2431 2432 return VectorBasePtrPlusIx; 2433 } 2434 2435 /// Load of global named registers are always calls to intrinsics. 2436 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { 2437 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && 2438 "Bad type for register variable"); 2439 llvm::MDNode *RegName = cast<llvm::MDNode>( 2440 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); 2441 2442 // We accept integer and pointer types only 2443 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); 2444 llvm::Type *Ty = OrigTy; 2445 if (OrigTy->isPointerTy()) 2446 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 2447 llvm::Type *Types[] = { Ty }; 2448 2449 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); 2450 llvm::Value *Call = Builder.CreateCall( 2451 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); 2452 if (OrigTy->isPointerTy()) 2453 Call = Builder.CreateIntToPtr(Call, OrigTy); 2454 return RValue::get(Call); 2455 } 2456 2457 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 2458 /// lvalue, where both are guaranteed to the have the same type, and that type 2459 /// is 'Ty'. 2460 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 2461 bool isInit) { 2462 if (!Dst.isSimple()) { 2463 if (Dst.isVectorElt()) { 2464 // Read/modify/write the vector, inserting the new element. 2465 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(), 2466 Dst.isVolatileQualified()); 2467 llvm::Type *VecTy = Vec->getType(); 2468 llvm::Value *SrcVal = Src.getScalarVal(); 2469 2470 if (SrcVal->getType()->getPrimitiveSizeInBits() < 2471 VecTy->getScalarSizeInBits()) 2472 SrcVal = Builder.CreateZExt(SrcVal, VecTy->getScalarType()); 2473 2474 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType()); 2475 if (IRStoreTy) { 2476 auto *IRVecTy = llvm::FixedVectorType::get( 2477 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits()); 2478 Vec = Builder.CreateBitCast(Vec, IRVecTy); 2479 // iN --> <N x i1>. 2480 } 2481 2482 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar 2483 // types which are mapped to vector LLVM IR types (e.g. for implementing 2484 // an ABI). 2485 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType()); 2486 EltTy && EltTy->getNumElements() == 1) 2487 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType()); 2488 2489 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(), 2490 "vecins"); 2491 if (IRStoreTy) { 2492 // <N x i1> --> <iN>. 2493 Vec = Builder.CreateBitCast(Vec, IRStoreTy); 2494 } 2495 2496 auto *I = Builder.CreateStore(Vec, Dst.getVectorAddress(), 2497 Dst.isVolatileQualified()); 2498 addInstToCurrentSourceAtom(I, Vec); 2499 return; 2500 } 2501 2502 // If this is an update of extended vector elements, insert them as 2503 // appropriate. 2504 if (Dst.isExtVectorElt()) 2505 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 2506 2507 if (Dst.isGlobalReg()) 2508 return EmitStoreThroughGlobalRegLValue(Src, Dst); 2509 2510 if (Dst.isMatrixElt()) { 2511 llvm::Value *Idx = Dst.getMatrixIdx(); 2512 if (CGM.getCodeGenOpts().OptimizationLevel > 0) { 2513 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>(); 2514 llvm::MatrixBuilder MB(Builder); 2515 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened()); 2516 } 2517 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress()); 2518 llvm::Value *Vec = 2519 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins"); 2520 auto *I = Builder.CreateStore(Vec, Dst.getMatrixAddress(), 2521 Dst.isVolatileQualified()); 2522 addInstToCurrentSourceAtom(I, Vec); 2523 return; 2524 } 2525 2526 assert(Dst.isBitField() && "Unknown LValue type"); 2527 return EmitStoreThroughBitfieldLValue(Src, Dst); 2528 } 2529 2530 // Handle __ptrauth qualification by re-signing the value. 2531 if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) { 2532 Src = RValue::get(EmitPointerAuthQualify(PointerAuth, Src.getScalarVal(), 2533 Dst.getType(), Dst.getAddress(), 2534 /*known nonnull*/ false)); 2535 } 2536 2537 // There's special magic for assigning into an ARC-qualified l-value. 2538 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 2539 switch (Lifetime) { 2540 case Qualifiers::OCL_None: 2541 llvm_unreachable("present but none"); 2542 2543 case Qualifiers::OCL_ExplicitNone: 2544 // nothing special 2545 break; 2546 2547 case Qualifiers::OCL_Strong: 2548 if (isInit) { 2549 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal())); 2550 break; 2551 } 2552 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 2553 return; 2554 2555 case Qualifiers::OCL_Weak: 2556 if (isInit) 2557 // Initialize and then skip the primitive store. 2558 EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal()); 2559 else 2560 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), 2561 /*ignore*/ true); 2562 return; 2563 2564 case Qualifiers::OCL_Autoreleasing: 2565 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 2566 Src.getScalarVal())); 2567 // fall into the normal path 2568 break; 2569 } 2570 } 2571 2572 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 2573 // load of a __weak object. 2574 Address LvalueDst = Dst.getAddress(); 2575 llvm::Value *src = Src.getScalarVal(); 2576 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 2577 return; 2578 } 2579 2580 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 2581 // load of a __strong object. 2582 Address LvalueDst = Dst.getAddress(); 2583 llvm::Value *src = Src.getScalarVal(); 2584 if (Dst.isObjCIvar()) { 2585 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 2586 llvm::Type *ResultType = IntPtrTy; 2587 Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); 2588 llvm::Value *RHS = dst.emitRawPointer(*this); 2589 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 2590 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this), 2591 ResultType, "sub.ptr.lhs.cast"); 2592 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 2593 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween); 2594 } else if (Dst.isGlobalObjCRef()) { 2595 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 2596 Dst.isThreadLocalRef()); 2597 } 2598 else 2599 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 2600 return; 2601 } 2602 2603 assert(Src.isScalar() && "Can't emit an agg store with this method"); 2604 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 2605 } 2606 2607 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 2608 llvm::Value **Result) { 2609 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 2610 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType()); 2611 Address Ptr = Dst.getBitFieldAddress(); 2612 2613 // Get the source value, truncated to the width of the bit-field. 2614 llvm::Value *SrcVal = Src.getScalarVal(); 2615 2616 // Cast the source to the storage type and shift it into place. 2617 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(), 2618 /*isSigned=*/false); 2619 llvm::Value *MaskedVal = SrcVal; 2620 2621 const bool UseVolatile = 2622 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && 2623 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); 2624 const unsigned StorageSize = 2625 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; 2626 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; 2627 // See if there are other bits in the bitfield's storage we'll need to load 2628 // and mask together with source before storing. 2629 if (StorageSize != Info.Size) { 2630 assert(StorageSize > Info.Size && "Invalid bitfield size."); 2631 llvm::Value *Val = 2632 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); 2633 2634 // Mask the source value as needed. 2635 if (!Dst.getType()->hasBooleanRepresentation()) 2636 SrcVal = Builder.CreateAnd( 2637 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), 2638 "bf.value"); 2639 MaskedVal = SrcVal; 2640 if (Offset) 2641 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl"); 2642 2643 // Mask out the original value. 2644 Val = Builder.CreateAnd( 2645 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size), 2646 "bf.clear"); 2647 2648 // Or together the unchanged values and the source value. 2649 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); 2650 } else { 2651 assert(Offset == 0); 2652 // According to the AACPS: 2653 // When a volatile bit-field is written, and its container does not overlap 2654 // with any non-bit-field member, its container must be read exactly once 2655 // and written exactly once using the access width appropriate to the type 2656 // of the container. The two accesses are not atomic. 2657 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && 2658 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) 2659 Builder.CreateLoad(Ptr, true, "bf.load"); 2660 } 2661 2662 // Write the new value back out. 2663 auto *I = Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified()); 2664 addInstToCurrentSourceAtom(I, SrcVal); 2665 2666 // Return the new value of the bit-field, if requested. 2667 if (Result) { 2668 llvm::Value *ResultVal = MaskedVal; 2669 2670 // Sign extend the value if needed. 2671 if (Info.IsSigned) { 2672 assert(Info.Size <= StorageSize); 2673 unsigned HighBits = StorageSize - Info.Size; 2674 if (HighBits) { 2675 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); 2676 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); 2677 } 2678 } 2679 2680 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, 2681 "bf.result.cast"); 2682 *Result = EmitFromMemory(ResultVal, Dst.getType()); 2683 } 2684 } 2685 2686 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 2687 LValue Dst) { 2688 llvm::Value *SrcVal = Src.getScalarVal(); 2689 Address DstAddr = Dst.getExtVectorAddress(); 2690 if (DstAddr.getElementType()->getScalarSizeInBits() > 2691 SrcVal->getType()->getScalarSizeInBits()) 2692 SrcVal = Builder.CreateZExt( 2693 SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType())); 2694 2695 // HLSL allows storing to scalar values through ExtVector component LValues. 2696 // To support this we need to handle the case where the destination address is 2697 // a scalar. 2698 if (!DstAddr.getElementType()->isVectorTy()) { 2699 assert(!Dst.getType()->isVectorType() && 2700 "this should only occur for non-vector l-values"); 2701 Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified()); 2702 return; 2703 } 2704 2705 // This access turns into a read/modify/write of the vector. Load the input 2706 // value now. 2707 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified()); 2708 llvm::Type *VecTy = Vec->getType(); 2709 const llvm::Constant *Elts = Dst.getExtVectorElts(); 2710 2711 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 2712 unsigned NumSrcElts = VTy->getNumElements(); 2713 unsigned NumDstElts = cast<llvm::FixedVectorType>(VecTy)->getNumElements(); 2714 if (NumDstElts == NumSrcElts) { 2715 // Use shuffle vector is the src and destination are the same number of 2716 // elements and restore the vector mask since it is on the side it will be 2717 // stored. 2718 SmallVector<int, 4> Mask(NumDstElts); 2719 for (unsigned i = 0; i != NumSrcElts; ++i) 2720 Mask[getAccessedFieldNo(i, Elts)] = i; 2721 2722 Vec = Builder.CreateShuffleVector(SrcVal, Mask); 2723 } else if (NumDstElts > NumSrcElts) { 2724 // Extended the source vector to the same length and then shuffle it 2725 // into the destination. 2726 // FIXME: since we're shuffling with undef, can we just use the indices 2727 // into that? This could be simpler. 2728 SmallVector<int, 4> ExtMask; 2729 for (unsigned i = 0; i != NumSrcElts; ++i) 2730 ExtMask.push_back(i); 2731 ExtMask.resize(NumDstElts, -1); 2732 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask); 2733 // build identity 2734 SmallVector<int, 4> Mask; 2735 for (unsigned i = 0; i != NumDstElts; ++i) 2736 Mask.push_back(i); 2737 2738 // When the vector size is odd and .odd or .hi is used, the last element 2739 // of the Elts constant array will be one past the size of the vector. 2740 // Ignore the last element here, if it is greater than the mask size. 2741 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) 2742 NumSrcElts--; 2743 2744 // modify when what gets shuffled in 2745 for (unsigned i = 0; i != NumSrcElts; ++i) 2746 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts; 2747 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask); 2748 } else { 2749 // We should never shorten the vector 2750 llvm_unreachable("unexpected shorten vector length"); 2751 } 2752 } else { 2753 // If the Src is a scalar (not a vector), and the target is a vector it must 2754 // be updating one element. 2755 unsigned InIdx = getAccessedFieldNo(0, Elts); 2756 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 2757 2758 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 2759 } 2760 2761 Builder.CreateStore(Vec, Dst.getExtVectorAddress(), 2762 Dst.isVolatileQualified()); 2763 } 2764 2765 /// Store of global named registers are always calls to intrinsics. 2766 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { 2767 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && 2768 "Bad type for register variable"); 2769 llvm::MDNode *RegName = cast<llvm::MDNode>( 2770 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); 2771 assert(RegName && "Register LValue is not metadata"); 2772 2773 // We accept integer and pointer types only 2774 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); 2775 llvm::Type *Ty = OrigTy; 2776 if (OrigTy->isPointerTy()) 2777 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 2778 llvm::Type *Types[] = { Ty }; 2779 2780 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); 2781 llvm::Value *Value = Src.getScalarVal(); 2782 if (OrigTy->isPointerTy()) 2783 Value = Builder.CreatePtrToInt(Value, Ty); 2784 Builder.CreateCall( 2785 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); 2786 } 2787 2788 // setObjCGCLValueClass - sets class of the lvalue for the purpose of 2789 // generating write-barries API. It is currently a global, ivar, 2790 // or neither. 2791 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 2792 LValue &LV, 2793 bool IsMemberAccess=false) { 2794 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 2795 return; 2796 2797 if (isa<ObjCIvarRefExpr>(E)) { 2798 QualType ExpTy = E->getType(); 2799 if (IsMemberAccess && ExpTy->isPointerType()) { 2800 // If ivar is a structure pointer, assigning to field of 2801 // this struct follows gcc's behavior and makes it a non-ivar 2802 // writer-barrier conservatively. 2803 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); 2804 if (ExpTy->isRecordType()) { 2805 LV.setObjCIvar(false); 2806 return; 2807 } 2808 } 2809 LV.setObjCIvar(true); 2810 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); 2811 LV.setBaseIvarExp(Exp->getBase()); 2812 LV.setObjCArray(E->getType()->isArrayType()); 2813 return; 2814 } 2815 2816 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { 2817 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 2818 if (VD->hasGlobalStorage()) { 2819 LV.setGlobalObjCRef(true); 2820 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); 2821 } 2822 } 2823 LV.setObjCArray(E->getType()->isArrayType()); 2824 return; 2825 } 2826 2827 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { 2828 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2829 return; 2830 } 2831 2832 if (const auto *Exp = dyn_cast<ParenExpr>(E)) { 2833 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2834 if (LV.isObjCIvar()) { 2835 // If cast is to a structure pointer, follow gcc's behavior and make it 2836 // a non-ivar write-barrier. 2837 QualType ExpTy = E->getType(); 2838 if (ExpTy->isPointerType()) 2839 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType(); 2840 if (ExpTy->isRecordType()) 2841 LV.setObjCIvar(false); 2842 } 2843 return; 2844 } 2845 2846 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { 2847 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 2848 return; 2849 } 2850 2851 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { 2852 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2853 return; 2854 } 2855 2856 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { 2857 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2858 return; 2859 } 2860 2861 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 2862 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2863 return; 2864 } 2865 2866 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 2867 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 2868 if (LV.isObjCIvar() && !LV.isObjCArray()) 2869 // Using array syntax to assigning to what an ivar points to is not 2870 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 2871 LV.setObjCIvar(false); 2872 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 2873 // Using array syntax to assigning to what global points to is not 2874 // same as assigning to the global itself. {id *G;} G[i] = 0; 2875 LV.setGlobalObjCRef(false); 2876 return; 2877 } 2878 2879 if (const auto *Exp = dyn_cast<MemberExpr>(E)) { 2880 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 2881 // We don't know if member is an 'ivar', but this flag is looked at 2882 // only in the context of LV.isObjCIvar(). 2883 LV.setObjCArray(E->getType()->isArrayType()); 2884 return; 2885 } 2886 } 2887 2888 static LValue EmitThreadPrivateVarDeclLValue( 2889 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, 2890 llvm::Type *RealVarTy, SourceLocation Loc) { 2891 if (CGF.CGM.getLangOpts().OpenMPIRBuilder) 2892 Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( 2893 CGF, VD, Addr, Loc); 2894 else 2895 Addr = 2896 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc); 2897 2898 Addr = Addr.withElementType(RealVarTy); 2899 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2900 } 2901 2902 static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, 2903 const VarDecl *VD, QualType T) { 2904 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = 2905 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); 2906 // Return an invalid address if variable is MT_To (or MT_Enter starting with 2907 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link 2908 // and MT_To (or MT_Enter) with unified memory, return a valid address. 2909 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To || 2910 *Res == OMPDeclareTargetDeclAttr::MT_Enter) && 2911 !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) 2912 return Address::invalid(); 2913 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) || 2914 ((*Res == OMPDeclareTargetDeclAttr::MT_To || 2915 *Res == OMPDeclareTargetDeclAttr::MT_Enter) && 2916 CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) && 2917 "Expected link clause OR to clause with unified memory enabled."); 2918 QualType PtrTy = CGF.getContext().getPointerType(VD->getType()); 2919 Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD); 2920 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>()); 2921 } 2922 2923 Address 2924 CodeGenFunction::EmitLoadOfReference(LValue RefLVal, 2925 LValueBaseInfo *PointeeBaseInfo, 2926 TBAAAccessInfo *PointeeTBAAInfo) { 2927 llvm::LoadInst *Load = 2928 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile()); 2929 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); 2930 QualType PTy = RefLVal.getType()->getPointeeType(); 2931 CharUnits Align = CGM.getNaturalTypeAlignment( 2932 PTy, PointeeBaseInfo, PointeeTBAAInfo, /*ForPointeeType=*/true); 2933 if (!PTy->isIncompleteType()) { 2934 llvm::LLVMContext &Ctx = getLLVMContext(); 2935 llvm::MDBuilder MDB(Ctx); 2936 // Emit !nonnull metadata 2937 if (CGM.getTypes().getTargetAddressSpace(PTy) == 0 && 2938 !CGM.getCodeGenOpts().NullPointerIsValid) 2939 Load->setMetadata(llvm::LLVMContext::MD_nonnull, 2940 llvm::MDNode::get(Ctx, {})); 2941 // Emit !align metadata 2942 if (PTy->isObjectType()) { 2943 auto AlignVal = Align.getQuantity(); 2944 if (AlignVal > 1) { 2945 Load->setMetadata( 2946 llvm::LLVMContext::MD_align, 2947 llvm::MDNode::get(Ctx, MDB.createConstant(llvm::ConstantInt::get( 2948 Builder.getInt64Ty(), AlignVal)))); 2949 } 2950 } 2951 } 2952 return makeNaturalAddressForPointer(Load, PTy, Align, 2953 /*ForPointeeType=*/true, PointeeBaseInfo, 2954 PointeeTBAAInfo); 2955 } 2956 2957 LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { 2958 LValueBaseInfo PointeeBaseInfo; 2959 TBAAAccessInfo PointeeTBAAInfo; 2960 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo, 2961 &PointeeTBAAInfo); 2962 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), 2963 PointeeBaseInfo, PointeeTBAAInfo); 2964 } 2965 2966 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, 2967 const PointerType *PtrTy, 2968 LValueBaseInfo *BaseInfo, 2969 TBAAAccessInfo *TBAAInfo) { 2970 llvm::Value *Addr = Builder.CreateLoad(Ptr); 2971 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(), 2972 CharUnits(), /*ForPointeeType=*/true, 2973 BaseInfo, TBAAInfo); 2974 } 2975 2976 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, 2977 const PointerType *PtrTy) { 2978 LValueBaseInfo BaseInfo; 2979 TBAAAccessInfo TBAAInfo; 2980 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo); 2981 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo); 2982 } 2983 2984 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 2985 const Expr *E, const VarDecl *VD) { 2986 QualType T = E->getType(); 2987 2988 // If it's thread_local, emit a call to its wrapper function instead. 2989 if (VD->getTLSKind() == VarDecl::TLS_Dynamic && 2990 CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD)) 2991 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); 2992 // Check if the variable is marked as declare target with link clause in 2993 // device codegen. 2994 if (CGF.getLangOpts().OpenMPIsTargetDevice) { 2995 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T); 2996 if (Addr.isValid()) 2997 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2998 } 2999 3000 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 3001 3002 if (VD->getTLSKind() != VarDecl::TLS_None) 3003 V = CGF.Builder.CreateThreadLocalAddress(V); 3004 3005 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 3006 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 3007 Address Addr(V, RealVarTy, Alignment); 3008 // Emit reference to the private copy of the variable if it is an OpenMP 3009 // threadprivate variable. 3010 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && 3011 VD->hasAttr<OMPThreadPrivateDeclAttr>()) { 3012 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, 3013 E->getExprLoc()); 3014 } 3015 LValue LV = VD->getType()->isReferenceType() ? 3016 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), 3017 AlignmentSource::Decl) : 3018 CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 3019 setObjCGCLValueClass(CGF.getContext(), E, LV); 3020 return LV; 3021 } 3022 3023 llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD, 3024 llvm::Type *Ty) { 3025 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 3026 if (FD->hasAttr<WeakRefAttr>()) { 3027 ConstantAddress aliasee = GetWeakRefReference(FD); 3028 return aliasee.getPointer(); 3029 } 3030 3031 llvm::Constant *V = GetAddrOfFunction(GD, Ty); 3032 return V; 3033 } 3034 3035 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, 3036 GlobalDecl GD) { 3037 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 3038 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD); 3039 QualType ETy = E->getType(); 3040 if (ETy->isCFIUncheckedCalleeFunctionType()) { 3041 if (auto *GV = dyn_cast<llvm::GlobalValue>(V)) 3042 V = llvm::NoCFIValue::get(GV); 3043 } 3044 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 3045 return CGF.MakeAddrLValue(V, ETy, Alignment, AlignmentSource::Decl); 3046 } 3047 3048 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, 3049 llvm::Value *ThisValue) { 3050 3051 return CGF.EmitLValueForLambdaField(FD, ThisValue); 3052 } 3053 3054 /// Named Registers are named metadata pointing to the register name 3055 /// which will be read from/written to as an argument to the intrinsic 3056 /// @llvm.read/write_register. 3057 /// So far, only the name is being passed down, but other options such as 3058 /// register type, allocation type or even optimization options could be 3059 /// passed down via the metadata node. 3060 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { 3061 SmallString<64> Name("llvm.named.register."); 3062 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); 3063 assert(Asm->getLabel().size() < 64-Name.size() && 3064 "Register name too big"); 3065 Name.append(Asm->getLabel()); 3066 llvm::NamedMDNode *M = 3067 CGM.getModule().getOrInsertNamedMetadata(Name); 3068 if (M->getNumOperands() == 0) { 3069 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), 3070 Asm->getLabel()); 3071 llvm::Metadata *Ops[] = {Str}; 3072 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); 3073 } 3074 3075 CharUnits Alignment = CGM.getContext().getDeclAlign(VD); 3076 3077 llvm::Value *Ptr = 3078 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)); 3079 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType()); 3080 } 3081 3082 /// Determine whether we can emit a reference to \p VD from the current 3083 /// context, despite not necessarily having seen an odr-use of the variable in 3084 /// this context. 3085 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, 3086 const DeclRefExpr *E, 3087 const VarDecl *VD) { 3088 // For a variable declared in an enclosing scope, do not emit a spurious 3089 // reference even if we have a capture, as that will emit an unwarranted 3090 // reference to our capture state, and will likely generate worse code than 3091 // emitting a local copy. 3092 if (E->refersToEnclosingVariableOrCapture()) 3093 return false; 3094 3095 // For a local declaration declared in this function, we can always reference 3096 // it even if we don't have an odr-use. 3097 if (VD->hasLocalStorage()) { 3098 return VD->getDeclContext() == 3099 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl); 3100 } 3101 3102 // For a global declaration, we can emit a reference to it if we know 3103 // for sure that we are able to emit a definition of it. 3104 VD = VD->getDefinition(CGF.getContext()); 3105 if (!VD) 3106 return false; 3107 3108 // Don't emit a spurious reference if it might be to a variable that only 3109 // exists on a different device / target. 3110 // FIXME: This is unnecessarily broad. Check whether this would actually be a 3111 // cross-target reference. 3112 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || 3113 CGF.getLangOpts().OpenCL) { 3114 return false; 3115 } 3116 3117 // We can emit a spurious reference only if the linkage implies that we'll 3118 // be emitting a non-interposable symbol that will be retained until link 3119 // time. 3120 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) { 3121 case llvm::GlobalValue::ExternalLinkage: 3122 case llvm::GlobalValue::LinkOnceODRLinkage: 3123 case llvm::GlobalValue::WeakODRLinkage: 3124 case llvm::GlobalValue::InternalLinkage: 3125 case llvm::GlobalValue::PrivateLinkage: 3126 return true; 3127 default: 3128 return false; 3129 } 3130 } 3131 3132 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 3133 const NamedDecl *ND = E->getDecl(); 3134 QualType T = E->getType(); 3135 3136 assert(E->isNonOdrUse() != NOUR_Unevaluated && 3137 "should not emit an unevaluated operand"); 3138 3139 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 3140 // Global Named registers access via intrinsics only 3141 if (VD->getStorageClass() == SC_Register && 3142 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) 3143 return EmitGlobalNamedRegister(VD, CGM); 3144 3145 // If this DeclRefExpr does not constitute an odr-use of the variable, 3146 // we're not permitted to emit a reference to it in general, and it might 3147 // not be captured if capture would be necessary for a use. Emit the 3148 // constant value directly instead. 3149 if (E->isNonOdrUse() == NOUR_Constant && 3150 (VD->getType()->isReferenceType() || 3151 !canEmitSpuriousReferenceToVariable(*this, E, VD))) { 3152 VD->getAnyInitializer(VD); 3153 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( 3154 E->getLocation(), *VD->evaluateValue(), VD->getType()); 3155 assert(Val && "failed to emit constant expression"); 3156 3157 Address Addr = Address::invalid(); 3158 if (!VD->getType()->isReferenceType()) { 3159 // Spill the constant value to a global. 3160 Addr = CGM.createUnnamedGlobalFrom(*VD, Val, 3161 getContext().getDeclAlign(VD)); 3162 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType()); 3163 auto *PTy = llvm::PointerType::get( 3164 getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType())); 3165 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy); 3166 } else { 3167 // Should we be using the alignment of the constant pointer we emitted? 3168 CharUnits Alignment = 3169 CGM.getNaturalTypeAlignment(E->getType(), 3170 /* BaseInfo= */ nullptr, 3171 /* TBAAInfo= */ nullptr, 3172 /* forPointeeType= */ true); 3173 Addr = makeNaturalAddressForPointer(Val, T, Alignment); 3174 } 3175 return MakeAddrLValue(Addr, T, AlignmentSource::Decl); 3176 } 3177 3178 // FIXME: Handle other kinds of non-odr-use DeclRefExprs. 3179 3180 // Check for captured variables. 3181 if (E->refersToEnclosingVariableOrCapture()) { 3182 VD = VD->getCanonicalDecl(); 3183 if (auto *FD = LambdaCaptureFields.lookup(VD)) 3184 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); 3185 if (CapturedStmtInfo) { 3186 auto I = LocalDeclMap.find(VD); 3187 if (I != LocalDeclMap.end()) { 3188 LValue CapLVal; 3189 if (VD->getType()->isReferenceType()) 3190 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(), 3191 AlignmentSource::Decl); 3192 else 3193 CapLVal = MakeAddrLValue(I->second, T); 3194 // Mark lvalue as nontemporal if the variable is marked as nontemporal 3195 // in simd context. 3196 if (getLangOpts().OpenMP && 3197 CGM.getOpenMPRuntime().isNontemporalDecl(VD)) 3198 CapLVal.setNontemporal(/*Value=*/true); 3199 return CapLVal; 3200 } 3201 LValue CapLVal = 3202 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), 3203 CapturedStmtInfo->getContextValue()); 3204 Address LValueAddress = CapLVal.getAddress(); 3205 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this), 3206 LValueAddress.getElementType(), 3207 getContext().getDeclAlign(VD)), 3208 CapLVal.getType(), 3209 LValueBaseInfo(AlignmentSource::Decl), 3210 CapLVal.getTBAAInfo()); 3211 // Mark lvalue as nontemporal if the variable is marked as nontemporal 3212 // in simd context. 3213 if (getLangOpts().OpenMP && 3214 CGM.getOpenMPRuntime().isNontemporalDecl(VD)) 3215 CapLVal.setNontemporal(/*Value=*/true); 3216 return CapLVal; 3217 } 3218 3219 assert(isa<BlockDecl>(CurCodeDecl)); 3220 Address addr = GetAddrOfBlockDecl(VD); 3221 return MakeAddrLValue(addr, T, AlignmentSource::Decl); 3222 } 3223 } 3224 3225 // FIXME: We should be able to assert this for FunctionDecls as well! 3226 // FIXME: We should be able to assert this for all DeclRefExprs, not just 3227 // those with a valid source location. 3228 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || 3229 !E->getLocation().isValid()) && 3230 "Should not use decl without marking it used!"); 3231 3232 if (ND->hasAttr<WeakRefAttr>()) { 3233 const auto *VD = cast<ValueDecl>(ND); 3234 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); 3235 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl); 3236 } 3237 3238 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 3239 // Check if this is a global variable. 3240 if (VD->hasLinkage() || VD->isStaticDataMember()) 3241 return EmitGlobalVarDeclLValue(*this, E, VD); 3242 3243 Address addr = Address::invalid(); 3244 3245 // The variable should generally be present in the local decl map. 3246 auto iter = LocalDeclMap.find(VD); 3247 if (iter != LocalDeclMap.end()) { 3248 addr = iter->second; 3249 3250 // Otherwise, it might be static local we haven't emitted yet for 3251 // some reason; most likely, because it's in an outer function. 3252 } else if (VD->isStaticLocal()) { 3253 llvm::Constant *var = CGM.getOrCreateStaticVarDecl( 3254 *VD, CGM.getLLVMLinkageVarDefinition(VD)); 3255 addr = Address( 3256 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD)); 3257 3258 // No other cases for now. 3259 } else { 3260 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?"); 3261 } 3262 3263 // Handle threadlocal function locals. 3264 if (VD->getTLSKind() != VarDecl::TLS_None) 3265 addr = addr.withPointer( 3266 Builder.CreateThreadLocalAddress(addr.getBasePointer()), 3267 NotKnownNonNull); 3268 3269 // Check for OpenMP threadprivate variables. 3270 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && 3271 VD->hasAttr<OMPThreadPrivateDeclAttr>()) { 3272 return EmitThreadPrivateVarDeclLValue( 3273 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()), 3274 E->getExprLoc()); 3275 } 3276 3277 // Drill into block byref variables. 3278 bool isBlockByref = VD->isEscapingByref(); 3279 if (isBlockByref) { 3280 addr = emitBlockByrefAddress(addr, VD); 3281 } 3282 3283 // Drill into reference types. 3284 LValue LV = VD->getType()->isReferenceType() ? 3285 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) : 3286 MakeAddrLValue(addr, T, AlignmentSource::Decl); 3287 3288 bool isLocalStorage = VD->hasLocalStorage(); 3289 3290 bool NonGCable = isLocalStorage && 3291 !VD->getType()->isReferenceType() && 3292 !isBlockByref; 3293 if (NonGCable) { 3294 LV.getQuals().removeObjCGCAttr(); 3295 LV.setNonGC(true); 3296 } 3297 3298 bool isImpreciseLifetime = 3299 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); 3300 if (isImpreciseLifetime) 3301 LV.setARCPreciseLifetime(ARCImpreciseLifetime); 3302 setObjCGCLValueClass(getContext(), E, LV); 3303 return LV; 3304 } 3305 3306 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 3307 return EmitFunctionDeclLValue(*this, E, FD); 3308 3309 // FIXME: While we're emitting a binding from an enclosing scope, all other 3310 // DeclRefExprs we see should be implicitly treated as if they also refer to 3311 // an enclosing scope. 3312 if (const auto *BD = dyn_cast<BindingDecl>(ND)) { 3313 if (E->refersToEnclosingVariableOrCapture()) { 3314 auto *FD = LambdaCaptureFields.lookup(BD); 3315 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); 3316 } 3317 return EmitLValue(BD->getBinding()); 3318 } 3319 3320 // We can form DeclRefExprs naming GUID declarations when reconstituting 3321 // non-type template parameters into expressions. 3322 if (const auto *GD = dyn_cast<MSGuidDecl>(ND)) 3323 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T, 3324 AlignmentSource::Decl); 3325 3326 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) { 3327 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO); 3328 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace()); 3329 3330 if (AS != T.getAddressSpace()) { 3331 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace()); 3332 auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS); 3333 auto ASC = getTargetHooks().performAddrSpaceCast(CGM, ATPO.getPointer(), 3334 AS, PtrTy); 3335 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment()); 3336 } 3337 3338 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl); 3339 } 3340 3341 llvm_unreachable("Unhandled DeclRefExpr"); 3342 } 3343 3344 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 3345 // __extension__ doesn't affect lvalue-ness. 3346 if (E->getOpcode() == UO_Extension) 3347 return EmitLValue(E->getSubExpr()); 3348 3349 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 3350 switch (E->getOpcode()) { 3351 default: llvm_unreachable("Unknown unary operator lvalue!"); 3352 case UO_Deref: { 3353 QualType T = E->getSubExpr()->getType()->getPointeeType(); 3354 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 3355 3356 LValueBaseInfo BaseInfo; 3357 TBAAAccessInfo TBAAInfo; 3358 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo, 3359 &TBAAInfo); 3360 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); 3361 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 3362 3363 // We should not generate __weak write barrier on indirect reference 3364 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 3365 // But, we continue to generate __strong write barrier on indirect write 3366 // into a pointer to object. 3367 if (getLangOpts().ObjC && 3368 getLangOpts().getGC() != LangOptions::NonGC && 3369 LV.isObjCWeak()) 3370 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 3371 return LV; 3372 } 3373 case UO_Real: 3374 case UO_Imag: { 3375 LValue LV = EmitLValue(E->getSubExpr()); 3376 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 3377 3378 // __real is valid on scalars. This is a faster way of testing that. 3379 // __imag can only produce an rvalue on scalars. 3380 if (E->getOpcode() == UO_Real && 3381 !LV.getAddress().getElementType()->isStructTy()) { 3382 assert(E->getSubExpr()->getType()->isArithmeticType()); 3383 return LV; 3384 } 3385 3386 QualType T = ExprTy->castAs<ComplexType>()->getElementType(); 3387 3388 Address Component = 3389 (E->getOpcode() == UO_Real 3390 ? emitAddrOfRealComponent(LV.getAddress(), LV.getType()) 3391 : emitAddrOfImagComponent(LV.getAddress(), LV.getType())); 3392 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(), 3393 CGM.getTBAAInfoForSubobject(LV, T)); 3394 ElemLV.getQuals().addQualifiers(LV.getQuals()); 3395 return ElemLV; 3396 } 3397 case UO_PreInc: 3398 case UO_PreDec: { 3399 LValue LV = EmitLValue(E->getSubExpr()); 3400 bool isInc = E->getOpcode() == UO_PreInc; 3401 3402 if (E->getType()->isAnyComplexType()) 3403 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 3404 else 3405 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 3406 return LV; 3407 } 3408 } 3409 } 3410 3411 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 3412 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 3413 E->getType(), AlignmentSource::Decl); 3414 } 3415 3416 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 3417 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 3418 E->getType(), AlignmentSource::Decl); 3419 } 3420 3421 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 3422 auto SL = E->getFunctionName(); 3423 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); 3424 StringRef FnName = CurFn->getName(); 3425 FnName.consume_front("\01"); 3426 StringRef NameItems[] = { 3427 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName}; 3428 std::string GVName = llvm::join(NameItems, NameItems + 2, "."); 3429 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) { 3430 std::string Name = std::string(SL->getString()); 3431 if (!Name.empty()) { 3432 unsigned Discriminator = 3433 CGM.getCXXABI().getMangleContext().getBlockId(BD, true); 3434 if (Discriminator) 3435 Name += "_" + Twine(Discriminator + 1).str(); 3436 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str()); 3437 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 3438 } else { 3439 auto C = 3440 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str()); 3441 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 3442 } 3443 } 3444 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); 3445 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 3446 } 3447 3448 /// Emit a type description suitable for use by a runtime sanitizer library. The 3449 /// format of a type descriptor is 3450 /// 3451 /// \code 3452 /// { i16 TypeKind, i16 TypeInfo } 3453 /// \endcode 3454 /// 3455 /// followed by an array of i8 containing the type name with extra information 3456 /// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a 3457 /// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for 3458 /// anything else. 3459 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 3460 // Only emit each type's descriptor once. 3461 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) 3462 return C; 3463 3464 uint16_t TypeKind = TK_Unknown; 3465 uint16_t TypeInfo = 0; 3466 bool IsBitInt = false; 3467 3468 if (T->isIntegerType()) { 3469 TypeKind = TK_Integer; 3470 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 3471 (T->isSignedIntegerType() ? 1 : 0); 3472 // Follow suggestion from discussion of issue 64100. 3473 // So we can write the exact amount of bits in TypeName after '\0' 3474 // making it <diagnostic-like type name>.'\0'.<32-bit width>. 3475 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) { 3476 // Do a sanity checks as we are using 32-bit type to store bit length. 3477 assert(getContext().getTypeSize(T) > 0 && 3478 " non positive amount of bits in __BitInt type"); 3479 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF && 3480 " too many bits in __BitInt type"); 3481 3482 // Redefine TypeKind with the actual __BitInt type if we have signed 3483 // BitInt. 3484 TypeKind = TK_BitInt; 3485 IsBitInt = true; 3486 } 3487 } else if (T->isFloatingType()) { 3488 TypeKind = TK_Float; 3489 TypeInfo = getContext().getTypeSize(T); 3490 } 3491 3492 // Format the type name as if for a diagnostic, including quotes and 3493 // optionally an 'aka'. 3494 SmallString<32> Buffer; 3495 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 3496 (intptr_t)T.getAsOpaquePtr(), StringRef(), 3497 StringRef(), {}, Buffer, {}); 3498 3499 if (IsBitInt) { 3500 // The Structure is: 0 to end the string, 32 bit unsigned integer in target 3501 // endianness, zero. 3502 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'}; 3503 const auto *EIT = T->castAs<BitIntType>(); 3504 uint32_t Bits = EIT->getNumBits(); 3505 llvm::support::endian::write32(S + 1, Bits, 3506 getTarget().isBigEndian() 3507 ? llvm::endianness::big 3508 : llvm::endianness::little); 3509 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0]))); 3510 Buffer.append(Str); 3511 } 3512 3513 llvm::Constant *Components[] = { 3514 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 3515 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 3516 }; 3517 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 3518 3519 auto *GV = new llvm::GlobalVariable( 3520 CGM.getModule(), Descriptor->getType(), 3521 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); 3522 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3523 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); 3524 3525 // Remember the descriptor for this type. 3526 CGM.setTypeDescriptorInMap(T, GV); 3527 3528 return GV; 3529 } 3530 3531 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 3532 llvm::Type *TargetTy = IntPtrTy; 3533 3534 if (V->getType() == TargetTy) 3535 return V; 3536 3537 // Floating-point types which fit into intptr_t are bitcast to integers 3538 // and then passed directly (after zero-extension, if necessary). 3539 if (V->getType()->isFloatingPointTy()) { 3540 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); 3541 if (Bits <= TargetTy->getIntegerBitWidth()) 3542 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), 3543 Bits)); 3544 } 3545 3546 // Integers which fit in intptr_t are zero-extended and passed directly. 3547 if (V->getType()->isIntegerTy() && 3548 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 3549 return Builder.CreateZExt(V, TargetTy); 3550 3551 // Pointers are passed directly, everything else is passed by address. 3552 if (!V->getType()->isPointerTy()) { 3553 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType()); 3554 Builder.CreateStore(V, Ptr); 3555 V = Ptr.getPointer(); 3556 } 3557 return Builder.CreatePtrToInt(V, TargetTy); 3558 } 3559 3560 /// Emit a representation of a SourceLocation for passing to a handler 3561 /// in a sanitizer runtime library. The format for this data is: 3562 /// \code 3563 /// struct SourceLocation { 3564 /// const char *Filename; 3565 /// int32_t Line, Column; 3566 /// }; 3567 /// \endcode 3568 /// For an invalid SourceLocation, the Filename pointer is null. 3569 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 3570 llvm::Constant *Filename; 3571 int Line, Column; 3572 3573 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 3574 if (PLoc.isValid()) { 3575 StringRef FilenameString = PLoc.getFilename(); 3576 3577 int PathComponentsToStrip = 3578 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; 3579 if (PathComponentsToStrip < 0) { 3580 assert(PathComponentsToStrip != INT_MIN); 3581 int PathComponentsToKeep = -PathComponentsToStrip; 3582 auto I = llvm::sys::path::rbegin(FilenameString); 3583 auto E = llvm::sys::path::rend(FilenameString); 3584 while (I != E && --PathComponentsToKeep) 3585 ++I; 3586 3587 FilenameString = FilenameString.substr(I - E); 3588 } else if (PathComponentsToStrip > 0) { 3589 auto I = llvm::sys::path::begin(FilenameString); 3590 auto E = llvm::sys::path::end(FilenameString); 3591 while (I != E && PathComponentsToStrip--) 3592 ++I; 3593 3594 if (I != E) 3595 FilenameString = 3596 FilenameString.substr(I - llvm::sys::path::begin(FilenameString)); 3597 else 3598 FilenameString = llvm::sys::path::filename(FilenameString); 3599 } 3600 3601 auto FilenameGV = 3602 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src"); 3603 CGM.getSanitizerMetadata()->disableSanitizerForGlobal( 3604 cast<llvm::GlobalVariable>( 3605 FilenameGV.getPointer()->stripPointerCasts())); 3606 Filename = FilenameGV.getPointer(); 3607 Line = PLoc.getLine(); 3608 Column = PLoc.getColumn(); 3609 } else { 3610 Filename = llvm::Constant::getNullValue(Int8PtrTy); 3611 Line = Column = 0; 3612 } 3613 3614 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), 3615 Builder.getInt32(Column)}; 3616 3617 return llvm::ConstantStruct::getAnon(Data); 3618 } 3619 3620 namespace { 3621 /// Specify under what conditions this check can be recovered 3622 enum class CheckRecoverableKind { 3623 /// Always terminate program execution if this check fails. 3624 Unrecoverable, 3625 /// Check supports recovering, runtime has both fatal (noreturn) and 3626 /// non-fatal handlers for this check. 3627 Recoverable, 3628 /// Runtime conditionally aborts, always need to support recovery. 3629 AlwaysRecoverable 3630 }; 3631 } 3632 3633 static CheckRecoverableKind 3634 getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal) { 3635 if (Ordinal == SanitizerKind::SO_Vptr) 3636 return CheckRecoverableKind::AlwaysRecoverable; 3637 else if (Ordinal == SanitizerKind::SO_Return || 3638 Ordinal == SanitizerKind::SO_Unreachable) 3639 return CheckRecoverableKind::Unrecoverable; 3640 else 3641 return CheckRecoverableKind::Recoverable; 3642 } 3643 3644 namespace { 3645 struct SanitizerHandlerInfo { 3646 char const *const Name; 3647 unsigned Version; 3648 }; 3649 } 3650 3651 const SanitizerHandlerInfo SanitizerHandlers[] = { 3652 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, 3653 LIST_SANITIZER_CHECKS 3654 #undef SANITIZER_CHECK 3655 }; 3656 3657 static void emitCheckHandlerCall(CodeGenFunction &CGF, 3658 llvm::FunctionType *FnType, 3659 ArrayRef<llvm::Value *> FnArgs, 3660 SanitizerHandler CheckHandler, 3661 CheckRecoverableKind RecoverKind, bool IsFatal, 3662 llvm::BasicBlock *ContBB, bool NoMerge) { 3663 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); 3664 std::optional<ApplyDebugLocation> DL; 3665 if (!CGF.Builder.getCurrentDebugLocation()) { 3666 // Ensure that the call has at least an artificial debug location. 3667 DL.emplace(CGF, SourceLocation()); 3668 } 3669 bool NeedsAbortSuffix = 3670 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; 3671 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; 3672 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; 3673 const StringRef CheckName = CheckInfo.Name; 3674 std::string FnName = "__ubsan_handle_" + CheckName.str(); 3675 if (CheckInfo.Version && !MinimalRuntime) 3676 FnName += "_v" + llvm::utostr(CheckInfo.Version); 3677 if (MinimalRuntime) 3678 FnName += "_minimal"; 3679 if (NeedsAbortSuffix) 3680 FnName += "_abort"; 3681 bool MayReturn = 3682 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; 3683 3684 llvm::AttrBuilder B(CGF.getLLVMContext()); 3685 if (!MayReturn) { 3686 B.addAttribute(llvm::Attribute::NoReturn) 3687 .addAttribute(llvm::Attribute::NoUnwind); 3688 } 3689 B.addUWTableAttr(llvm::UWTableKind::Default); 3690 3691 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( 3692 FnType, FnName, 3693 llvm::AttributeList::get(CGF.getLLVMContext(), 3694 llvm::AttributeList::FunctionIndex, B), 3695 /*Local=*/true); 3696 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); 3697 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel || 3698 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>()); 3699 if (NoMerge) 3700 HandlerCall->addFnAttr(llvm::Attribute::NoMerge); 3701 if (!MayReturn) { 3702 HandlerCall->setDoesNotReturn(); 3703 CGF.Builder.CreateUnreachable(); 3704 } else { 3705 CGF.Builder.CreateBr(ContBB); 3706 } 3707 } 3708 3709 void CodeGenFunction::EmitCheck( 3710 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked, 3711 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, 3712 ArrayRef<llvm::Value *> DynamicArgs) { 3713 assert(IsSanitizerScope); 3714 assert(Checked.size() > 0); 3715 assert(CheckHandler >= 0 && 3716 size_t(CheckHandler) < std::size(SanitizerHandlers)); 3717 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; 3718 3719 llvm::Value *FatalCond = nullptr; 3720 llvm::Value *RecoverableCond = nullptr; 3721 llvm::Value *TrapCond = nullptr; 3722 bool NoMerge = false; 3723 // Expand checks into: 3724 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ... 3725 // We need separate allow_ubsan_check intrinsics because they have separately 3726 // specified cutoffs. 3727 // This expression looks expensive but will be simplified after 3728 // LowerAllowCheckPass. 3729 for (auto &[Check, Ord] : Checked) { 3730 llvm::Value *GuardedCheck = Check; 3731 if (ClSanitizeGuardChecks || 3732 (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) { 3733 llvm::Value *Allow = Builder.CreateCall( 3734 CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check), 3735 llvm::ConstantInt::get(CGM.Int8Ty, Ord)); 3736 GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow)); 3737 } 3738 3739 // -fsanitize-trap= overrides -fsanitize-recover=. 3740 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond 3741 : CGM.getCodeGenOpts().SanitizeRecover.has(Ord) 3742 ? RecoverableCond 3743 : FatalCond; 3744 Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck; 3745 3746 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord)) 3747 NoMerge = true; 3748 } 3749 3750 if (TrapCond) 3751 EmitTrapCheck(TrapCond, CheckHandler, NoMerge); 3752 if (!FatalCond && !RecoverableCond) 3753 return; 3754 3755 llvm::Value *JointCond; 3756 if (FatalCond && RecoverableCond) 3757 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); 3758 else 3759 JointCond = FatalCond ? FatalCond : RecoverableCond; 3760 assert(JointCond); 3761 3762 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); 3763 assert(SanOpts.has(Checked[0].second)); 3764 #ifndef NDEBUG 3765 for (int i = 1, n = Checked.size(); i < n; ++i) { 3766 assert(RecoverKind == getRecoverableKind(Checked[i].second) && 3767 "All recoverable kinds in a single check must be same!"); 3768 assert(SanOpts.has(Checked[i].second)); 3769 } 3770 #endif 3771 3772 llvm::BasicBlock *Cont = createBasicBlock("cont"); 3773 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); 3774 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); 3775 // Give hint that we very much don't expect to execute the handler 3776 llvm::MDBuilder MDHelper(getLLVMContext()); 3777 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); 3778 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); 3779 EmitBlock(Handlers); 3780 3781 // Handler functions take an i8* pointing to the (handler-specific) static 3782 // information block, followed by a sequence of intptr_t arguments 3783 // representing operand values. 3784 SmallVector<llvm::Value *, 4> Args; 3785 SmallVector<llvm::Type *, 4> ArgTypes; 3786 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { 3787 Args.reserve(DynamicArgs.size() + 1); 3788 ArgTypes.reserve(DynamicArgs.size() + 1); 3789 3790 // Emit handler arguments and create handler function type. 3791 if (!StaticArgs.empty()) { 3792 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 3793 auto *InfoPtr = new llvm::GlobalVariable( 3794 CGM.getModule(), Info->getType(), false, 3795 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr, 3796 llvm::GlobalVariable::NotThreadLocal, 3797 CGM.getDataLayout().getDefaultGlobalsAddressSpace()); 3798 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3799 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 3800 Args.push_back(InfoPtr); 3801 ArgTypes.push_back(Args.back()->getType()); 3802 } 3803 3804 for (llvm::Value *DynamicArg : DynamicArgs) { 3805 Args.push_back(EmitCheckValue(DynamicArg)); 3806 ArgTypes.push_back(IntPtrTy); 3807 } 3808 } 3809 3810 llvm::FunctionType *FnType = 3811 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 3812 3813 if (!FatalCond || !RecoverableCond) { 3814 // Simple case: we need to generate a single handler call, either 3815 // fatal, or non-fatal. 3816 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, 3817 (FatalCond != nullptr), Cont, NoMerge); 3818 } else { 3819 // Emit two handler calls: first one for set of unrecoverable checks, 3820 // another one for recoverable. 3821 llvm::BasicBlock *NonFatalHandlerBB = 3822 createBasicBlock("non_fatal." + CheckName); 3823 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); 3824 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); 3825 EmitBlock(FatalHandlerBB); 3826 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true, 3827 NonFatalHandlerBB, NoMerge); 3828 EmitBlock(NonFatalHandlerBB); 3829 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false, 3830 Cont, NoMerge); 3831 } 3832 3833 EmitBlock(Cont); 3834 } 3835 3836 void CodeGenFunction::EmitCfiSlowPathCheck( 3837 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, 3838 llvm::ConstantInt *TypeId, llvm::Value *Ptr, 3839 ArrayRef<llvm::Constant *> StaticArgs) { 3840 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont"); 3841 3842 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath"); 3843 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB); 3844 3845 llvm::MDBuilder MDHelper(getLLVMContext()); 3846 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights(); 3847 BI->setMetadata(llvm::LLVMContext::MD_prof, Node); 3848 3849 EmitBlock(CheckBB); 3850 3851 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal); 3852 3853 llvm::CallInst *CheckCall; 3854 llvm::FunctionCallee SlowPathFn; 3855 if (WithDiag) { 3856 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 3857 auto *InfoPtr = 3858 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 3859 llvm::GlobalVariable::PrivateLinkage, Info); 3860 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3861 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 3862 3863 SlowPathFn = CGM.getModule().getOrInsertFunction( 3864 "__cfi_slowpath_diag", 3865 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, 3866 false)); 3867 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr}); 3868 } else { 3869 SlowPathFn = CGM.getModule().getOrInsertFunction( 3870 "__cfi_slowpath", 3871 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false)); 3872 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr}); 3873 } 3874 3875 CGM.setDSOLocal( 3876 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts())); 3877 CheckCall->setDoesNotThrow(); 3878 3879 EmitBlock(Cont); 3880 } 3881 3882 // Emit a stub for __cfi_check function so that the linker knows about this 3883 // symbol in LTO mode. 3884 void CodeGenFunction::EmitCfiCheckStub() { 3885 llvm::Module *M = &CGM.getModule(); 3886 ASTContext &C = getContext(); 3887 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false); 3888 3889 FunctionArgList FnArgs; 3890 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other); 3891 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other); 3892 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy, 3893 ImplicitParamKind::Other); 3894 FnArgs.push_back(&ArgCallsiteTypeId); 3895 FnArgs.push_back(&ArgAddr); 3896 FnArgs.push_back(&ArgCFICheckFailData); 3897 const CGFunctionInfo &FI = 3898 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs); 3899 3900 llvm::Function *F = llvm::Function::Create( 3901 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false), 3902 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M); 3903 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false); 3904 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); 3905 F->setAlignment(llvm::Align(4096)); 3906 CGM.setDSOLocal(F); 3907 3908 llvm::LLVMContext &Ctx = M->getContext(); 3909 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F); 3910 // CrossDSOCFI pass is not executed if there is no executable code. 3911 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)}; 3912 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB); 3913 llvm::ReturnInst::Create(Ctx, nullptr, BB); 3914 } 3915 3916 // This function is basically a switch over the CFI failure kind, which is 3917 // extracted from CFICheckFailData (1st function argument). Each case is either 3918 // llvm.trap or a call to one of the two runtime handlers, based on 3919 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid 3920 // failure kind) traps, but this should really never happen. CFICheckFailData 3921 // can be nullptr if the calling module has -fsanitize-trap behavior for this 3922 // check kind; in this case __cfi_check_fail traps as well. 3923 void CodeGenFunction::EmitCfiCheckFail() { 3924 auto CheckHandler = SanitizerHandler::CFICheckFail; 3925 // TODO: the SanitizerKind is not yet determined for this check (and might 3926 // not even be available, if Data == nullptr). However, we still want to 3927 // annotate the instrumentation. We approximate this by using all the CFI 3928 // kinds. 3929 SanitizerDebugLocation SanScope( 3930 this, 3931 {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall, 3932 SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast, 3933 SanitizerKind::SO_CFIICall}, 3934 CheckHandler); 3935 FunctionArgList Args; 3936 ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, 3937 ImplicitParamKind::Other); 3938 ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, 3939 ImplicitParamKind::Other); 3940 Args.push_back(&ArgData); 3941 Args.push_back(&ArgAddr); 3942 3943 const CGFunctionInfo &FI = 3944 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); 3945 3946 llvm::Function *F = llvm::Function::Create( 3947 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false), 3948 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule()); 3949 3950 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false); 3951 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F); 3952 F->setVisibility(llvm::GlobalValue::HiddenVisibility); 3953 3954 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args, 3955 SourceLocation()); 3956 3957 // This function is not affected by NoSanitizeList. This function does 3958 // not have a source location, but "src:*" would still apply. Revert any 3959 // changes to SanOpts made in StartFunction. 3960 SanOpts = CGM.getLangOpts().Sanitize; 3961 3962 llvm::Value *Data = 3963 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, 3964 CGM.getContext().VoidPtrTy, ArgData.getLocation()); 3965 llvm::Value *Addr = 3966 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, 3967 CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); 3968 3969 // Data == nullptr means the calling module has trap behaviour for this check. 3970 llvm::Value *DataIsNotNullPtr = 3971 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy)); 3972 // TODO: since there is no data, we don't know the CheckKind, and therefore 3973 // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to 3974 // NoMerge = false. Users can disable merging by disabling optimization. 3975 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail, 3976 /*NoMerge=*/false); 3977 3978 llvm::StructType *SourceLocationTy = 3979 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty); 3980 llvm::StructType *CfiCheckFailDataTy = 3981 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy); 3982 3983 llvm::Value *V = Builder.CreateConstGEP2_32( 3984 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0); 3985 3986 Address CheckKindAddr(V, Int8Ty, getIntAlign()); 3987 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); 3988 3989 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 3990 CGM.getLLVMContext(), 3991 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 3992 llvm::Value *ValidVtable = Builder.CreateZExt( 3993 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 3994 {Addr, AllVtables}), 3995 IntPtrTy); 3996 3997 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = { 3998 {CFITCK_VCall, SanitizerKind::SO_CFIVCall}, 3999 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall}, 4000 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast}, 4001 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast}, 4002 {CFITCK_ICall, SanitizerKind::SO_CFIICall}}; 4003 4004 for (auto CheckKindOrdinalPair : CheckKinds) { 4005 int Kind = CheckKindOrdinalPair.first; 4006 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second; 4007 4008 // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of 4009 // relying on the SanitizerScope with all CFI ordinals 4010 4011 llvm::Value *Cond = 4012 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind)); 4013 if (CGM.getLangOpts().Sanitize.has(Ordinal)) 4014 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail, 4015 {}, {Data, Addr, ValidVtable}); 4016 else 4017 // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers. 4018 // Although the compiler allows SanitizeMergeHandlers to be set 4019 // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp 4020 // requires that SanitizeMergeHandlers is a subset of Sanitize. 4021 EmitTrapCheck(Cond, CheckHandler, /*NoMerge=*/false); 4022 } 4023 4024 FinishFunction(); 4025 // The only reference to this function will be created during LTO link. 4026 // Make sure it survives until then. 4027 CGM.addUsedGlobal(F); 4028 } 4029 4030 void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { 4031 if (SanOpts.has(SanitizerKind::Unreachable)) { 4032 auto CheckOrdinal = SanitizerKind::SO_Unreachable; 4033 auto CheckHandler = SanitizerHandler::BuiltinUnreachable; 4034 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler); 4035 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()), 4036 CheckOrdinal), 4037 CheckHandler, EmitCheckSourceLocation(Loc), {}); 4038 } 4039 Builder.CreateUnreachable(); 4040 } 4041 4042 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked, 4043 SanitizerHandler CheckHandlerID, 4044 bool NoMerge) { 4045 llvm::BasicBlock *Cont = createBasicBlock("cont"); 4046 4047 // If we're optimizing, collapse all calls to trap down to just one per 4048 // check-type per function to save on code size. 4049 if ((int)TrapBBs.size() <= CheckHandlerID) 4050 TrapBBs.resize(CheckHandlerID + 1); 4051 4052 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID]; 4053 4054 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel || 4055 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>()); 4056 4057 llvm::MDBuilder MDHelper(getLLVMContext()); 4058 if (TrapBB && !NoMerge) { 4059 auto Call = TrapBB->begin(); 4060 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB"); 4061 4062 Call->applyMergedLocation(Call->getDebugLoc(), 4063 Builder.getCurrentDebugLocation()); 4064 Builder.CreateCondBr(Checked, Cont, TrapBB, 4065 MDHelper.createLikelyBranchWeights()); 4066 } else { 4067 TrapBB = createBasicBlock("trap"); 4068 Builder.CreateCondBr(Checked, Cont, TrapBB, 4069 MDHelper.createLikelyBranchWeights()); 4070 EmitBlock(TrapBB); 4071 4072 llvm::CallInst *TrapCall = 4073 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap), 4074 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID)); 4075 4076 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { 4077 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", 4078 CGM.getCodeGenOpts().TrapFuncName); 4079 TrapCall->addFnAttr(A); 4080 } 4081 if (NoMerge) 4082 TrapCall->addFnAttr(llvm::Attribute::NoMerge); 4083 TrapCall->setDoesNotReturn(); 4084 TrapCall->setDoesNotThrow(); 4085 Builder.CreateUnreachable(); 4086 } 4087 4088 EmitBlock(Cont); 4089 } 4090 4091 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { 4092 llvm::CallInst *TrapCall = 4093 Builder.CreateCall(CGM.getIntrinsic(IntrID)); 4094 4095 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { 4096 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", 4097 CGM.getCodeGenOpts().TrapFuncName); 4098 TrapCall->addFnAttr(A); 4099 } 4100 4101 if (InNoMergeAttributedStmt) 4102 TrapCall->addFnAttr(llvm::Attribute::NoMerge); 4103 return TrapCall; 4104 } 4105 4106 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, 4107 LValueBaseInfo *BaseInfo, 4108 TBAAAccessInfo *TBAAInfo) { 4109 assert(E->getType()->isArrayType() && 4110 "Array to pointer decay must have array source type!"); 4111 4112 // Expressions of array type can't be bitfields or vector elements. 4113 LValue LV = EmitLValue(E); 4114 Address Addr = LV.getAddress(); 4115 4116 // If the array type was an incomplete type, we need to make sure 4117 // the decay ends up being the right type. 4118 llvm::Type *NewTy = ConvertType(E->getType()); 4119 Addr = Addr.withElementType(NewTy); 4120 4121 // Note that VLA pointers are always decayed, so we don't need to do 4122 // anything here. 4123 if (!E->getType()->isVariableArrayType()) { 4124 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 4125 "Expected pointer to array"); 4126 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); 4127 } 4128 4129 // The result of this decay conversion points to an array element within the 4130 // base lvalue. However, since TBAA currently does not support representing 4131 // accesses to elements of member arrays, we conservatively represent accesses 4132 // to the pointee object as if it had no any base lvalue specified. 4133 // TODO: Support TBAA for member arrays. 4134 QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); 4135 if (BaseInfo) *BaseInfo = LV.getBaseInfo(); 4136 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType); 4137 4138 return Addr.withElementType(ConvertTypeForMem(EltType)); 4139 } 4140 4141 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 4142 /// array to pointer, return the array subexpression. 4143 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 4144 // If this isn't just an array->pointer decay, bail out. 4145 const auto *CE = dyn_cast<CastExpr>(E); 4146 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) 4147 return nullptr; 4148 4149 // If this is a decay from variable width array, bail out. 4150 const Expr *SubExpr = CE->getSubExpr(); 4151 if (SubExpr->getType()->isVariableArrayType()) 4152 return nullptr; 4153 4154 return SubExpr; 4155 } 4156 4157 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, 4158 llvm::Type *elemType, 4159 llvm::Value *ptr, 4160 ArrayRef<llvm::Value*> indices, 4161 bool inbounds, 4162 bool signedIndices, 4163 SourceLocation loc, 4164 const llvm::Twine &name = "arrayidx") { 4165 if (inbounds) { 4166 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices, 4167 CodeGenFunction::NotSubtraction, loc, 4168 name); 4169 } else { 4170 return CGF.Builder.CreateGEP(elemType, ptr, indices, name); 4171 } 4172 } 4173 4174 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, 4175 ArrayRef<llvm::Value *> indices, 4176 llvm::Type *elementType, bool inbounds, 4177 bool signedIndices, SourceLocation loc, 4178 CharUnits align, 4179 const llvm::Twine &name = "arrayidx") { 4180 if (inbounds) { 4181 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices, 4182 CodeGenFunction::NotSubtraction, loc, 4183 align, name); 4184 } else { 4185 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name); 4186 } 4187 } 4188 4189 static CharUnits getArrayElementAlign(CharUnits arrayAlign, 4190 llvm::Value *idx, 4191 CharUnits eltSize) { 4192 // If we have a constant index, we can use the exact offset of the 4193 // element we're accessing. 4194 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) { 4195 CharUnits offset = constantIdx->getZExtValue() * eltSize; 4196 return arrayAlign.alignmentAtOffset(offset); 4197 4198 // Otherwise, use the worst-case alignment for any element. 4199 } else { 4200 return arrayAlign.alignmentOfArrayElement(eltSize); 4201 } 4202 } 4203 4204 static QualType getFixedSizeElementType(const ASTContext &ctx, 4205 const VariableArrayType *vla) { 4206 QualType eltType; 4207 do { 4208 eltType = vla->getElementType(); 4209 } while ((vla = ctx.getAsVariableArrayType(eltType))); 4210 return eltType; 4211 } 4212 4213 static bool hasBPFPreserveStaticOffset(const RecordDecl *D) { 4214 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>(); 4215 } 4216 4217 static bool hasBPFPreserveStaticOffset(const Expr *E) { 4218 if (!E) 4219 return false; 4220 QualType PointeeType = E->getType()->getPointeeType(); 4221 if (PointeeType.isNull()) 4222 return false; 4223 if (const auto *BaseDecl = PointeeType->getAsRecordDecl()) 4224 return hasBPFPreserveStaticOffset(BaseDecl); 4225 return false; 4226 } 4227 4228 // Wraps Addr with a call to llvm.preserve.static.offset intrinsic. 4229 static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, 4230 Address &Addr) { 4231 if (!CGF.getTarget().getTriple().isBPF()) 4232 return Addr; 4233 4234 llvm::Function *Fn = 4235 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset); 4236 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)}); 4237 return Address(Call, Addr.getElementType(), Addr.getAlignment()); 4238 } 4239 4240 /// Given an array base, check whether its member access belongs to a record 4241 /// with preserve_access_index attribute or not. 4242 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) { 4243 if (!ArrayBase || !CGF.getDebugInfo()) 4244 return false; 4245 4246 // Only support base as either a MemberExpr or DeclRefExpr. 4247 // DeclRefExpr to cover cases like: 4248 // struct s { int a; int b[10]; }; 4249 // struct s *p; 4250 // p[1].a 4251 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr. 4252 // p->b[5] is a MemberExpr example. 4253 const Expr *E = ArrayBase->IgnoreImpCasts(); 4254 if (const auto *ME = dyn_cast<MemberExpr>(E)) 4255 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); 4256 4257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 4258 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl()); 4259 if (!VarDef) 4260 return false; 4261 4262 const auto *PtrT = VarDef->getType()->getAs<PointerType>(); 4263 if (!PtrT) 4264 return false; 4265 4266 const auto *PointeeT = PtrT->getPointeeType() 4267 ->getUnqualifiedDesugaredType(); 4268 if (const auto *RecT = dyn_cast<RecordType>(PointeeT)) 4269 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>(); 4270 return false; 4271 } 4272 4273 return false; 4274 } 4275 4276 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, 4277 ArrayRef<llvm::Value *> indices, 4278 QualType eltType, bool inbounds, 4279 bool signedIndices, SourceLocation loc, 4280 QualType *arrayType = nullptr, 4281 const Expr *Base = nullptr, 4282 const llvm::Twine &name = "arrayidx") { 4283 // All the indices except that last must be zero. 4284 #ifndef NDEBUG 4285 for (auto *idx : indices.drop_back()) 4286 assert(isa<llvm::ConstantInt>(idx) && 4287 cast<llvm::ConstantInt>(idx)->isZero()); 4288 #endif 4289 4290 // Determine the element size of the statically-sized base. This is 4291 // the thing that the indices are expressed in terms of. 4292 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { 4293 eltType = getFixedSizeElementType(CGF.getContext(), vla); 4294 } 4295 4296 // We can use that to compute the best alignment of the element. 4297 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); 4298 CharUnits eltAlign = 4299 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); 4300 4301 if (hasBPFPreserveStaticOffset(Base)) 4302 addr = wrapWithBPFPreserveStaticOffset(CGF, addr); 4303 4304 llvm::Value *eltPtr; 4305 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back()); 4306 if (!LastIndex || 4307 (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) { 4308 addr = emitArraySubscriptGEP(CGF, addr, indices, 4309 CGF.ConvertTypeForMem(eltType), inbounds, 4310 signedIndices, loc, eltAlign, name); 4311 return addr; 4312 } else { 4313 // Remember the original array subscript for bpf target 4314 unsigned idx = LastIndex->getZExtValue(); 4315 llvm::DIType *DbgInfo = nullptr; 4316 if (arrayType) 4317 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc); 4318 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex( 4319 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1, 4320 idx, DbgInfo); 4321 } 4322 4323 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign); 4324 } 4325 4326 namespace { 4327 4328 /// StructFieldAccess is a simple visitor class to grab the first l-value to 4329 /// r-value cast Expr. 4330 struct StructFieldAccess 4331 : public ConstStmtVisitor<StructFieldAccess, const Expr *> { 4332 const Expr *VisitCastExpr(const CastExpr *E) { 4333 if (E->getCastKind() == CK_LValueToRValue) 4334 return E; 4335 return Visit(E->getSubExpr()); 4336 } 4337 const Expr *VisitParenExpr(const ParenExpr *E) { 4338 return Visit(E->getSubExpr()); 4339 } 4340 }; 4341 4342 } // end anonymous namespace 4343 4344 /// The offset of a field from the beginning of the record. 4345 static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, 4346 const FieldDecl *Field, int64_t &Offset) { 4347 ASTContext &Ctx = CGF.getContext(); 4348 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD); 4349 unsigned FieldNo = 0; 4350 4351 for (const FieldDecl *FD : RD->fields()) { 4352 if (FD == Field) { 4353 Offset += Layout.getFieldOffset(FieldNo); 4354 return true; 4355 } 4356 4357 QualType Ty = FD->getType(); 4358 if (Ty->isRecordType()) 4359 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) { 4360 Offset += Layout.getFieldOffset(FieldNo); 4361 return true; 4362 } 4363 4364 if (!RD->isUnion()) 4365 ++FieldNo; 4366 } 4367 4368 return false; 4369 } 4370 4371 /// Returns the relative offset difference between \p FD1 and \p FD2. 4372 /// \code 4373 /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2) 4374 /// \endcode 4375 /// Both fields must be within the same struct. 4376 static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF, 4377 const FieldDecl *FD1, 4378 const FieldDecl *FD2) { 4379 const RecordDecl *FD1OuterRec = 4380 FD1->getParent()->getOuterLexicalRecordContext(); 4381 const RecordDecl *FD2OuterRec = 4382 FD2->getParent()->getOuterLexicalRecordContext(); 4383 4384 if (FD1OuterRec != FD2OuterRec) 4385 // Fields must be within the same RecordDecl. 4386 return std::optional<int64_t>(); 4387 4388 int64_t FD1Offset = 0; 4389 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset)) 4390 return std::optional<int64_t>(); 4391 4392 int64_t FD2Offset = 0; 4393 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset)) 4394 return std::optional<int64_t>(); 4395 4396 return std::make_optional<int64_t>(FD1Offset - FD2Offset); 4397 } 4398 4399 /// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by" 4400 /// attribute, generate bounds checking code. The "count" field is at the top 4401 /// level of the struct or in an anonymous struct, that's also at the top level. 4402 /// Future expansions may allow the "count" to reside at any place in the 4403 /// struct, but the value of "counted_by" will be a "simple" path to the count, 4404 /// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or 4405 /// similar to emit the correct GEP. 4406 void CodeGenFunction::EmitCountedByBoundsChecking( 4407 const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy, 4408 QualType ArrayTy, bool Accessed, bool FlexibleArray) { 4409 const auto *ME = dyn_cast<MemberExpr>(E->IgnoreImpCasts()); 4410 if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType()) 4411 return; 4412 4413 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel = 4414 getLangOpts().getStrictFlexArraysLevel(); 4415 if (FlexibleArray && 4416 !ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel)) 4417 return; 4418 4419 const FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl()); 4420 const FieldDecl *CountFD = FD->findCountedByField(); 4421 if (!CountFD) 4422 return; 4423 4424 if (std::optional<int64_t> Diff = 4425 getOffsetDifferenceInBits(*this, CountFD, FD)) { 4426 if (!Addr.isValid()) { 4427 // An invalid Address indicates we're checking a pointer array access. 4428 // Emit the checked L-Value here. 4429 LValue LV = EmitCheckedLValue(E, TCK_MemberAccess); 4430 Addr = LV.getAddress(); 4431 } 4432 4433 // FIXME: The 'static_cast' is necessary, otherwise the result turns into a 4434 // uint64_t, which messes things up if we have a negative offset difference. 4435 Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth()); 4436 4437 // Create a GEP with the byte offset between the counted object and the 4438 // count and use that to load the count value. 4439 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Int8PtrTy, Int8Ty); 4440 4441 llvm::Type *CountTy = ConvertType(CountFD->getType()); 4442 llvm::Value *Res = 4443 Builder.CreateInBoundsGEP(Int8Ty, Addr.emitRawPointer(*this), 4444 Builder.getInt32(*Diff), ".counted_by.gep"); 4445 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(), 4446 ".counted_by.load"); 4447 4448 // Now emit the bounds checking. 4449 EmitBoundsCheckImpl(E, Res, Idx, IdxTy, ArrayTy, Accessed); 4450 } 4451 } 4452 4453 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, 4454 bool Accessed) { 4455 // The index must always be an integer, which is not an aggregate. Emit it 4456 // in lexical order (this complexity is, sadly, required by C++17). 4457 llvm::Value *IdxPre = 4458 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; 4459 bool SignedIndices = false; 4460 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { 4461 auto *Idx = IdxPre; 4462 if (E->getLHS() != E->getIdx()) { 4463 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); 4464 Idx = EmitScalarExpr(E->getIdx()); 4465 } 4466 4467 QualType IdxTy = E->getIdx()->getType(); 4468 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 4469 SignedIndices |= IdxSigned; 4470 4471 if (SanOpts.has(SanitizerKind::ArrayBounds)) 4472 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); 4473 4474 // Extend or truncate the index type to 32 or 64-bits. 4475 if (Promote && Idx->getType() != IntPtrTy) 4476 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 4477 4478 return Idx; 4479 }; 4480 IdxPre = nullptr; 4481 4482 // If the base is a vector type, then we are forming a vector element lvalue 4483 // with this subscript. 4484 if (E->getBase()->getType()->isSubscriptableVectorType() && 4485 !isa<ExtVectorElementExpr>(E->getBase())) { 4486 // Emit the vector as an lvalue to get its address. 4487 LValue LHS = EmitLValue(E->getBase()); 4488 auto *Idx = EmitIdxAfterBase(/*Promote*/false); 4489 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 4490 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(), 4491 LHS.getBaseInfo(), TBAAAccessInfo()); 4492 } 4493 4494 // All the other cases basically behave like simple offsetting. 4495 4496 // Handle the extvector case we ignored above. 4497 if (isa<ExtVectorElementExpr>(E->getBase())) { 4498 LValue LV = EmitLValue(E->getBase()); 4499 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 4500 Address Addr = EmitExtVectorElementLValue(LV); 4501 4502 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); 4503 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true, 4504 SignedIndices, E->getExprLoc()); 4505 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(), 4506 CGM.getTBAAInfoForSubobject(LV, EltType)); 4507 } 4508 4509 LValueBaseInfo EltBaseInfo; 4510 TBAAAccessInfo EltTBAAInfo; 4511 Address Addr = Address::invalid(); 4512 if (const VariableArrayType *vla = 4513 getContext().getAsVariableArrayType(E->getType())) { 4514 // The base must be a pointer, which is not an aggregate. Emit 4515 // it. It needs to be emitted first in case it's what captures 4516 // the VLA bounds. 4517 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 4518 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 4519 4520 // The element count here is the total number of non-VLA elements. 4521 llvm::Value *numElements = getVLASize(vla).NumElts; 4522 4523 // Effectively, the multiply by the VLA size is part of the GEP. 4524 // GEP indexes are signed, and scaling an index isn't permitted to 4525 // signed-overflow, so we use the same semantics for our explicit 4526 // multiply. We suppress this if overflow is not undefined behavior. 4527 if (getLangOpts().PointerOverflowDefined) { 4528 Idx = Builder.CreateMul(Idx, numElements); 4529 } else { 4530 Idx = Builder.CreateNSWMul(Idx, numElements); 4531 } 4532 4533 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), 4534 !getLangOpts().PointerOverflowDefined, 4535 SignedIndices, E->getExprLoc()); 4536 4537 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 4538 // Indexing over an interface, as in "NSString *P; P[4];" 4539 4540 // Emit the base pointer. 4541 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 4542 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 4543 4544 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); 4545 llvm::Value *InterfaceSizeVal = 4546 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity()); 4547 4548 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal); 4549 4550 // We don't necessarily build correct LLVM struct types for ObjC 4551 // interfaces, so we can't rely on GEP to do this scaling 4552 // correctly, so we need to cast to i8*. FIXME: is this actually 4553 // true? A lot of other things in the fragile ABI would break... 4554 llvm::Type *OrigBaseElemTy = Addr.getElementType(); 4555 4556 // Do the GEP. 4557 CharUnits EltAlign = 4558 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); 4559 llvm::Value *EltPtr = 4560 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this), 4561 ScaledIdx, false, SignedIndices, E->getExprLoc()); 4562 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign); 4563 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 4564 // If this is A[i] where A is an array, the frontend will have decayed the 4565 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 4566 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 4567 // "gep x, i" here. Emit one "gep A, 0, i". 4568 assert(Array->getType()->isArrayType() && 4569 "Array to pointer decay must have array source type!"); 4570 LValue ArrayLV; 4571 // For simple multidimensional array indexing, set the 'accessed' flag for 4572 // better bounds-checking of the base expression. 4573 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 4574 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 4575 else 4576 ArrayLV = EmitLValue(Array); 4577 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 4578 4579 if (SanOpts.has(SanitizerKind::ArrayBounds)) 4580 EmitCountedByBoundsChecking(Array, Idx, ArrayLV.getAddress(), 4581 E->getIdx()->getType(), Array->getType(), 4582 Accessed, /*FlexibleArray=*/true); 4583 4584 // Propagate the alignment from the array itself to the result. 4585 QualType arrayType = Array->getType(); 4586 Addr = emitArraySubscriptGEP( 4587 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, 4588 E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices, 4589 E->getExprLoc(), &arrayType, E->getBase()); 4590 EltBaseInfo = ArrayLV.getBaseInfo(); 4591 if (!CGM.getCodeGenOpts().NewStructPathTBAA) { 4592 // Since CodeGenTBAA::getTypeInfoHelper only handles array types for 4593 // new struct path TBAA, we must a use a plain access. 4594 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType()); 4595 } else if (ArrayLV.getTBAAInfo().isMayAlias()) { 4596 EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); 4597 } else if (ArrayLV.getTBAAInfo().isIncomplete()) { 4598 // The array element is complete, even if the array is not. 4599 EltTBAAInfo = CGM.getTBAAAccessInfo(E->getType()); 4600 } else { 4601 // The TBAA access info from the array (base) lvalue is ordinary. We will 4602 // adapt it to create access info for the element. 4603 EltTBAAInfo = ArrayLV.getTBAAInfo(); 4604 4605 // We retain the TBAA struct path (BaseType and Offset members) from the 4606 // array. In the TBAA representation, we map any array access to the 4607 // element at index 0, as the index is generally a runtime value. This 4608 // element has the same offset in the base type as the array itself. 4609 // If the array lvalue had no base type, there is no point trying to 4610 // generate one, since an array itself is not a valid base type. 4611 4612 // We also retain the access type from the base lvalue, but the access 4613 // size must be updated to the size of an individual element. 4614 EltTBAAInfo.Size = 4615 getContext().getTypeSizeInChars(E->getType()).getQuantity(); 4616 } 4617 } else { 4618 // The base must be a pointer; emit it with an estimate of its alignment. 4619 Address BaseAddr = 4620 EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 4621 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 4622 QualType ptrType = E->getBase()->getType(); 4623 Addr = emitArraySubscriptGEP(*this, BaseAddr, Idx, E->getType(), 4624 !getLangOpts().PointerOverflowDefined, 4625 SignedIndices, E->getExprLoc(), &ptrType, 4626 E->getBase()); 4627 4628 if (SanOpts.has(SanitizerKind::ArrayBounds)) { 4629 StructFieldAccess Visitor; 4630 const Expr *Base = Visitor.Visit(E->getBase()); 4631 4632 if (const auto *CE = dyn_cast_if_present<CastExpr>(Base); 4633 CE && CE->getCastKind() == CK_LValueToRValue) 4634 EmitCountedByBoundsChecking(CE, Idx, Address::invalid(), 4635 E->getIdx()->getType(), ptrType, Accessed, 4636 /*FlexibleArray=*/false); 4637 } 4638 } 4639 4640 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); 4641 4642 if (getLangOpts().ObjC && 4643 getLangOpts().getGC() != LangOptions::NonGC) { 4644 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 4645 setObjCGCLValueClass(getContext(), E, LV); 4646 } 4647 return LV; 4648 } 4649 4650 llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) { 4651 llvm::Value *Idx = EmitScalarExpr(E); 4652 if (Idx->getType() == IntPtrTy) 4653 return Idx; 4654 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType(); 4655 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned); 4656 } 4657 4658 LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) { 4659 assert( 4660 !E->isIncomplete() && 4661 "incomplete matrix subscript expressions should be rejected during Sema"); 4662 LValue Base = EmitLValue(E->getBase()); 4663 4664 // Extend or truncate the index type to 32 or 64-bits if needed. 4665 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx()); 4666 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx()); 4667 4668 llvm::Value *NumRows = Builder.getIntN( 4669 RowIdx->getType()->getScalarSizeInBits(), 4670 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows()); 4671 llvm::Value *FinalIdx = 4672 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx); 4673 return LValue::MakeMatrixElt( 4674 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx, 4675 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo()); 4676 } 4677 4678 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, 4679 LValueBaseInfo &BaseInfo, 4680 TBAAAccessInfo &TBAAInfo, 4681 QualType BaseTy, QualType ElTy, 4682 bool IsLowerBound) { 4683 LValue BaseLVal; 4684 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) { 4685 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound); 4686 if (BaseTy->isArrayType()) { 4687 Address Addr = BaseLVal.getAddress(); 4688 BaseInfo = BaseLVal.getBaseInfo(); 4689 4690 // If the array type was an incomplete type, we need to make sure 4691 // the decay ends up being the right type. 4692 llvm::Type *NewTy = CGF.ConvertType(BaseTy); 4693 Addr = Addr.withElementType(NewTy); 4694 4695 // Note that VLA pointers are always decayed, so we don't need to do 4696 // anything here. 4697 if (!BaseTy->isVariableArrayType()) { 4698 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 4699 "Expected pointer to array"); 4700 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); 4701 } 4702 4703 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy)); 4704 } 4705 LValueBaseInfo TypeBaseInfo; 4706 TBAAAccessInfo TypeTBAAInfo; 4707 CharUnits Align = 4708 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo); 4709 BaseInfo.mergeForCast(TypeBaseInfo); 4710 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo); 4711 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()), 4712 CGF.ConvertTypeForMem(ElTy), Align); 4713 } 4714 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); 4715 } 4716 4717 LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E, 4718 bool IsLowerBound) { 4719 4720 assert(!E->isOpenACCArraySection() && 4721 "OpenACC Array section codegen not implemented"); 4722 4723 QualType BaseTy = ArraySectionExpr::getBaseOriginalType(E->getBase()); 4724 QualType ResultExprTy; 4725 if (auto *AT = getContext().getAsArrayType(BaseTy)) 4726 ResultExprTy = AT->getElementType(); 4727 else 4728 ResultExprTy = BaseTy->getPointeeType(); 4729 llvm::Value *Idx = nullptr; 4730 if (IsLowerBound || E->getColonLocFirst().isInvalid()) { 4731 // Requesting lower bound or upper bound, but without provided length and 4732 // without ':' symbol for the default length -> length = 1. 4733 // Idx = LowerBound ?: 0; 4734 if (auto *LowerBound = E->getLowerBound()) { 4735 Idx = Builder.CreateIntCast( 4736 EmitScalarExpr(LowerBound), IntPtrTy, 4737 LowerBound->getType()->hasSignedIntegerRepresentation()); 4738 } else 4739 Idx = llvm::ConstantInt::getNullValue(IntPtrTy); 4740 } else { 4741 // Try to emit length or lower bound as constant. If this is possible, 1 4742 // is subtracted from constant length or lower bound. Otherwise, emit LLVM 4743 // IR (LB + Len) - 1. 4744 auto &C = CGM.getContext(); 4745 auto *Length = E->getLength(); 4746 llvm::APSInt ConstLength; 4747 if (Length) { 4748 // Idx = LowerBound + Length - 1; 4749 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) { 4750 ConstLength = CL->zextOrTrunc(PointerWidthInBits); 4751 Length = nullptr; 4752 } 4753 auto *LowerBound = E->getLowerBound(); 4754 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); 4755 if (LowerBound) { 4756 if (std::optional<llvm::APSInt> LB = 4757 LowerBound->getIntegerConstantExpr(C)) { 4758 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits); 4759 LowerBound = nullptr; 4760 } 4761 } 4762 if (!Length) 4763 --ConstLength; 4764 else if (!LowerBound) 4765 --ConstLowerBound; 4766 4767 if (Length || LowerBound) { 4768 auto *LowerBoundVal = 4769 LowerBound 4770 ? Builder.CreateIntCast( 4771 EmitScalarExpr(LowerBound), IntPtrTy, 4772 LowerBound->getType()->hasSignedIntegerRepresentation()) 4773 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound); 4774 auto *LengthVal = 4775 Length 4776 ? Builder.CreateIntCast( 4777 EmitScalarExpr(Length), IntPtrTy, 4778 Length->getType()->hasSignedIntegerRepresentation()) 4779 : llvm::ConstantInt::get(IntPtrTy, ConstLength); 4780 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len", 4781 /*HasNUW=*/false, 4782 !getLangOpts().PointerOverflowDefined); 4783 if (Length && LowerBound) { 4784 Idx = Builder.CreateSub( 4785 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1", 4786 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined); 4787 } 4788 } else 4789 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound); 4790 } else { 4791 // Idx = ArraySize - 1; 4792 QualType ArrayTy = BaseTy->isPointerType() 4793 ? E->getBase()->IgnoreParenImpCasts()->getType() 4794 : BaseTy; 4795 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) { 4796 Length = VAT->getSizeExpr(); 4797 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) { 4798 ConstLength = *L; 4799 Length = nullptr; 4800 } 4801 } else { 4802 auto *CAT = C.getAsConstantArrayType(ArrayTy); 4803 assert(CAT && "unexpected type for array initializer"); 4804 ConstLength = CAT->getSize(); 4805 } 4806 if (Length) { 4807 auto *LengthVal = Builder.CreateIntCast( 4808 EmitScalarExpr(Length), IntPtrTy, 4809 Length->getType()->hasSignedIntegerRepresentation()); 4810 Idx = Builder.CreateSub( 4811 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1", 4812 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined); 4813 } else { 4814 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); 4815 --ConstLength; 4816 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength); 4817 } 4818 } 4819 } 4820 assert(Idx); 4821 4822 Address EltPtr = Address::invalid(); 4823 LValueBaseInfo BaseInfo; 4824 TBAAAccessInfo TBAAInfo; 4825 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) { 4826 // The base must be a pointer, which is not an aggregate. Emit 4827 // it. It needs to be emitted first in case it's what captures 4828 // the VLA bounds. 4829 Address Base = 4830 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, 4831 BaseTy, VLA->getElementType(), IsLowerBound); 4832 // The element count here is the total number of non-VLA elements. 4833 llvm::Value *NumElements = getVLASize(VLA).NumElts; 4834 4835 // Effectively, the multiply by the VLA size is part of the GEP. 4836 // GEP indexes are signed, and scaling an index isn't permitted to 4837 // signed-overflow, so we use the same semantics for our explicit 4838 // multiply. We suppress this if overflow is not undefined behavior. 4839 if (getLangOpts().PointerOverflowDefined) 4840 Idx = Builder.CreateMul(Idx, NumElements); 4841 else 4842 Idx = Builder.CreateNSWMul(Idx, NumElements); 4843 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), 4844 !getLangOpts().PointerOverflowDefined, 4845 /*signedIndices=*/false, E->getExprLoc()); 4846 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 4847 // If this is A[i] where A is an array, the frontend will have decayed the 4848 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 4849 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 4850 // "gep x, i" here. Emit one "gep A, 0, i". 4851 assert(Array->getType()->isArrayType() && 4852 "Array to pointer decay must have array source type!"); 4853 LValue ArrayLV; 4854 // For simple multidimensional array indexing, set the 'accessed' flag for 4855 // better bounds-checking of the base expression. 4856 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 4857 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 4858 else 4859 ArrayLV = EmitLValue(Array); 4860 4861 // Propagate the alignment from the array itself to the result. 4862 EltPtr = emitArraySubscriptGEP( 4863 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, 4864 ResultExprTy, !getLangOpts().PointerOverflowDefined, 4865 /*signedIndices=*/false, E->getExprLoc()); 4866 BaseInfo = ArrayLV.getBaseInfo(); 4867 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy); 4868 } else { 4869 Address Base = 4870 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy, 4871 ResultExprTy, IsLowerBound); 4872 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, 4873 !getLangOpts().PointerOverflowDefined, 4874 /*signedIndices=*/false, E->getExprLoc()); 4875 } 4876 4877 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo); 4878 } 4879 4880 LValue CodeGenFunction:: 4881 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 4882 // Emit the base vector as an l-value. 4883 LValue Base; 4884 4885 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 4886 if (E->isArrow()) { 4887 // If it is a pointer to a vector, emit the address and form an lvalue with 4888 // it. 4889 LValueBaseInfo BaseInfo; 4890 TBAAAccessInfo TBAAInfo; 4891 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo); 4892 const auto *PT = E->getBase()->getType()->castAs<PointerType>(); 4893 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo); 4894 Base.getQuals().removeObjCGCAttr(); 4895 } else if (E->getBase()->isGLValue()) { 4896 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 4897 // emit the base as an lvalue. 4898 assert(E->getBase()->getType()->isVectorType()); 4899 Base = EmitLValue(E->getBase()); 4900 } else { 4901 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 4902 assert(E->getBase()->getType()->isVectorType() && 4903 "Result must be a vector"); 4904 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 4905 4906 // Store the vector to memory (because LValue wants an address). 4907 Address VecMem = CreateMemTemp(E->getBase()->getType()); 4908 // need to zero extend an hlsl boolean vector to store it back to memory 4909 QualType Ty = E->getBase()->getType(); 4910 llvm::Type *LTy = convertTypeForLoadStore(Ty, Vec->getType()); 4911 if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits()) 4912 Vec = Builder.CreateZExt(Vec, LTy); 4913 Builder.CreateStore(Vec, VecMem); 4914 Base = MakeAddrLValue(VecMem, Ty, AlignmentSource::Decl); 4915 } 4916 4917 QualType type = 4918 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 4919 4920 // Encode the element access list into a vector of unsigned indices. 4921 SmallVector<uint32_t, 4> Indices; 4922 E->getEncodedElementAccess(Indices); 4923 4924 if (Base.isSimple()) { 4925 llvm::Constant *CV = 4926 llvm::ConstantDataVector::get(getLLVMContext(), Indices); 4927 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, 4928 Base.getBaseInfo(), TBAAAccessInfo()); 4929 } 4930 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 4931 4932 llvm::Constant *BaseElts = Base.getExtVectorElts(); 4933 SmallVector<llvm::Constant *, 4> CElts; 4934 4935 for (unsigned Index : Indices) 4936 CElts.push_back(BaseElts->getAggregateElement(Index)); 4937 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 4938 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type, 4939 Base.getBaseInfo(), TBAAAccessInfo()); 4940 } 4941 4942 bool CodeGenFunction::isUnderlyingBasePointerConstantNull(const Expr *E) { 4943 const Expr *UnderlyingBaseExpr = E->IgnoreParens(); 4944 while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(UnderlyingBaseExpr)) 4945 UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens(); 4946 return getContext().isSentinelNullExpr(UnderlyingBaseExpr); 4947 } 4948 4949 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 4950 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { 4951 EmitIgnoredExpr(E->getBase()); 4952 return EmitDeclRefLValue(DRE); 4953 } 4954 4955 Expr *BaseExpr = E->getBase(); 4956 // Check whether the underlying base pointer is a constant null. 4957 // If so, we do not set inbounds flag for GEP to avoid breaking some 4958 // old-style offsetof idioms. 4959 bool IsInBounds = !getLangOpts().PointerOverflowDefined && 4960 !isUnderlyingBasePointerConstantNull(BaseExpr); 4961 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 4962 LValue BaseLV; 4963 if (E->isArrow()) { 4964 LValueBaseInfo BaseInfo; 4965 TBAAAccessInfo TBAAInfo; 4966 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); 4967 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 4968 SanitizerSet SkippedChecks; 4969 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); 4970 if (IsBaseCXXThis) 4971 SkippedChecks.set(SanitizerKind::Alignment, true); 4972 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr)) 4973 SkippedChecks.set(SanitizerKind::Null, true); 4974 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy, 4975 /*Alignment=*/CharUnits::Zero(), SkippedChecks); 4976 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); 4977 } else 4978 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 4979 4980 NamedDecl *ND = E->getMemberDecl(); 4981 if (auto *Field = dyn_cast<FieldDecl>(ND)) { 4982 LValue LV = EmitLValueForField(BaseLV, Field, IsInBounds); 4983 setObjCGCLValueClass(getContext(), E, LV); 4984 if (getLangOpts().OpenMP) { 4985 // If the member was explicitly marked as nontemporal, mark it as 4986 // nontemporal. If the base lvalue is marked as nontemporal, mark access 4987 // to children as nontemporal too. 4988 if ((IsWrappedCXXThis(BaseExpr) && 4989 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) || 4990 BaseLV.isNontemporal()) 4991 LV.setNontemporal(/*Value=*/true); 4992 } 4993 return LV; 4994 } 4995 4996 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 4997 return EmitFunctionDeclLValue(*this, E, FD); 4998 4999 llvm_unreachable("Unhandled member declaration!"); 5000 } 5001 5002 /// Given that we are currently emitting a lambda, emit an l-value for 5003 /// one of its members. 5004 /// 5005 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field, 5006 llvm::Value *ThisValue) { 5007 bool HasExplicitObjectParameter = false; 5008 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl); 5009 if (MD) { 5010 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction(); 5011 assert(MD->getParent()->isLambda()); 5012 assert(MD->getParent() == Field->getParent()); 5013 } 5014 LValue LambdaLV; 5015 if (HasExplicitObjectParameter) { 5016 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0); 5017 auto It = LocalDeclMap.find(D); 5018 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?"); 5019 Address AddrOfExplicitObject = It->getSecond(); 5020 if (D->getType()->isReferenceType()) 5021 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(), 5022 AlignmentSource::Decl); 5023 else 5024 LambdaLV = MakeAddrLValue(AddrOfExplicitObject, 5025 D->getType().getNonReferenceType()); 5026 5027 // Make sure we have an lvalue to the lambda itself and not a derived class. 5028 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl(); 5029 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent()); 5030 if (ThisTy != LambdaTy) { 5031 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD); 5032 Address Base = GetAddressOfBaseClass( 5033 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(), 5034 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation()); 5035 LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0}); 5036 } 5037 } else { 5038 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent()); 5039 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType); 5040 } 5041 return EmitLValueForField(LambdaLV, Field); 5042 } 5043 5044 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { 5045 return EmitLValueForLambdaField(Field, CXXABIThisValue); 5046 } 5047 5048 /// Get the field index in the debug info. The debug info structure/union 5049 /// will ignore the unnamed bitfields. 5050 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec, 5051 unsigned FieldIndex) { 5052 unsigned I = 0, Skipped = 0; 5053 5054 for (auto *F : Rec->getDefinition()->fields()) { 5055 if (I == FieldIndex) 5056 break; 5057 if (F->isUnnamedBitField()) 5058 Skipped++; 5059 I++; 5060 } 5061 5062 return FieldIndex - Skipped; 5063 } 5064 5065 /// Get the address of a zero-sized field within a record. The resulting 5066 /// address doesn't necessarily have the right type. 5067 static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, 5068 const FieldDecl *Field, 5069 bool IsInBounds) { 5070 CharUnits Offset = CGF.getContext().toCharUnitsFromBits( 5071 CGF.getContext().getFieldOffset(Field)); 5072 if (Offset.isZero()) 5073 return Base; 5074 Base = Base.withElementType(CGF.Int8Ty); 5075 if (!IsInBounds) 5076 return CGF.Builder.CreateConstByteGEP(Base, Offset); 5077 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset); 5078 } 5079 5080 /// Drill down to the storage of a field without walking into 5081 /// reference types. 5082 /// 5083 /// The resulting address doesn't necessarily have the right type. 5084 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, 5085 const FieldDecl *field, bool IsInBounds) { 5086 if (isEmptyFieldForLayout(CGF.getContext(), field)) 5087 return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds); 5088 5089 const RecordDecl *rec = field->getParent(); 5090 5091 unsigned idx = 5092 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 5093 5094 if (!IsInBounds) 5095 return CGF.Builder.CreateConstGEP2_32(base, 0, idx, field->getName()); 5096 5097 return CGF.Builder.CreateStructGEP(base, idx, field->getName()); 5098 } 5099 5100 static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, 5101 Address addr, const FieldDecl *field) { 5102 const RecordDecl *rec = field->getParent(); 5103 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType( 5104 base.getType(), rec->getLocation()); 5105 5106 unsigned idx = 5107 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 5108 5109 return CGF.Builder.CreatePreserveStructAccessIndex( 5110 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo); 5111 } 5112 5113 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { 5114 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); 5115 if (!RD) 5116 return false; 5117 5118 if (RD->isDynamicClass()) 5119 return true; 5120 5121 for (const auto &Base : RD->bases()) 5122 if (hasAnyVptr(Base.getType(), Context)) 5123 return true; 5124 5125 for (const FieldDecl *Field : RD->fields()) 5126 if (hasAnyVptr(Field->getType(), Context)) 5127 return true; 5128 5129 return false; 5130 } 5131 5132 LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field, 5133 bool IsInBounds) { 5134 LValueBaseInfo BaseInfo = base.getBaseInfo(); 5135 5136 if (field->isBitField()) { 5137 const CGRecordLayout &RL = 5138 CGM.getTypes().getCGRecordLayout(field->getParent()); 5139 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 5140 const bool UseVolatile = isAAPCS(CGM.getTarget()) && 5141 CGM.getCodeGenOpts().AAPCSBitfieldWidth && 5142 Info.VolatileStorageSize != 0 && 5143 field->getType() 5144 .withCVRQualifiers(base.getVRQualifiers()) 5145 .isVolatileQualified(); 5146 Address Addr = base.getAddress(); 5147 unsigned Idx = RL.getLLVMFieldNo(field); 5148 const RecordDecl *rec = field->getParent(); 5149 if (hasBPFPreserveStaticOffset(rec)) 5150 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr); 5151 if (!UseVolatile) { 5152 if (!IsInPreservedAIRegion && 5153 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { 5154 if (Idx != 0) { 5155 // For structs, we GEP to the field that the record layout suggests. 5156 if (!IsInBounds) 5157 Addr = Builder.CreateConstGEP2_32(Addr, 0, Idx, field->getName()); 5158 else 5159 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); 5160 } 5161 } else { 5162 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( 5163 getContext().getRecordType(rec), rec->getLocation()); 5164 Addr = Builder.CreatePreserveStructAccessIndex( 5165 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()), 5166 DbgInfo); 5167 } 5168 } 5169 const unsigned SS = 5170 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; 5171 // Get the access type. 5172 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS); 5173 Addr = Addr.withElementType(FieldIntTy); 5174 if (UseVolatile) { 5175 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity(); 5176 if (VolatileOffset) 5177 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset); 5178 } 5179 5180 QualType fieldType = 5181 field->getType().withCVRQualifiers(base.getVRQualifiers()); 5182 // TODO: Support TBAA for bit fields. 5183 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); 5184 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo, 5185 TBAAAccessInfo()); 5186 } 5187 5188 // Fields of may-alias structures are may-alias themselves. 5189 // FIXME: this should get propagated down through anonymous structs 5190 // and unions. 5191 QualType FieldType = field->getType(); 5192 const RecordDecl *rec = field->getParent(); 5193 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); 5194 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); 5195 TBAAAccessInfo FieldTBAAInfo; 5196 if (base.getTBAAInfo().isMayAlias() || 5197 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { 5198 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); 5199 } else if (rec->isUnion()) { 5200 // TODO: Support TBAA for unions. 5201 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); 5202 } else { 5203 // If no base type been assigned for the base access, then try to generate 5204 // one for this base lvalue. 5205 FieldTBAAInfo = base.getTBAAInfo(); 5206 if (!FieldTBAAInfo.BaseType) { 5207 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType()); 5208 assert(!FieldTBAAInfo.Offset && 5209 "Nonzero offset for an access with no base type!"); 5210 } 5211 5212 // Adjust offset to be relative to the base type. 5213 const ASTRecordLayout &Layout = 5214 getContext().getASTRecordLayout(field->getParent()); 5215 unsigned CharWidth = getContext().getCharWidth(); 5216 if (FieldTBAAInfo.BaseType) 5217 FieldTBAAInfo.Offset += 5218 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth; 5219 5220 // Update the final access type and size. 5221 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType); 5222 FieldTBAAInfo.Size = 5223 getContext().getTypeSizeInChars(FieldType).getQuantity(); 5224 } 5225 5226 Address addr = base.getAddress(); 5227 if (hasBPFPreserveStaticOffset(rec)) 5228 addr = wrapWithBPFPreserveStaticOffset(*this, addr); 5229 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) { 5230 if (CGM.getCodeGenOpts().StrictVTablePointers && 5231 ClassDef->isDynamicClass()) { 5232 // Getting to any field of dynamic object requires stripping dynamic 5233 // information provided by invariant.group. This is because accessing 5234 // fields may leak the real address of dynamic object, which could result 5235 // in miscompilation when leaked pointer would be compared. 5236 auto *stripped = 5237 Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this)); 5238 addr = Address(stripped, addr.getElementType(), addr.getAlignment()); 5239 } 5240 } 5241 5242 unsigned RecordCVR = base.getVRQualifiers(); 5243 if (rec->isUnion()) { 5244 // For unions, there is no pointer adjustment. 5245 if (CGM.getCodeGenOpts().StrictVTablePointers && 5246 hasAnyVptr(FieldType, getContext())) 5247 // Because unions can easily skip invariant.barriers, we need to add 5248 // a barrier every time CXXRecord field with vptr is referenced. 5249 addr = Builder.CreateLaunderInvariantGroup(addr); 5250 5251 if (IsInPreservedAIRegion || 5252 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) { 5253 // Remember the original union field index 5254 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(), 5255 rec->getLocation()); 5256 addr = 5257 Address(Builder.CreatePreserveUnionAccessIndex( 5258 addr.emitRawPointer(*this), 5259 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo), 5260 addr.getElementType(), addr.getAlignment()); 5261 } 5262 5263 if (FieldType->isReferenceType()) 5264 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType)); 5265 } else { 5266 if (!IsInPreservedAIRegion && 5267 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) 5268 // For structs, we GEP to the field that the record layout suggests. 5269 addr = emitAddrOfFieldStorage(*this, addr, field, IsInBounds); 5270 else 5271 // Remember the original struct field index 5272 addr = emitPreserveStructAccess(*this, base, addr, field); 5273 } 5274 5275 // If this is a reference field, load the reference right now. 5276 if (FieldType->isReferenceType()) { 5277 LValue RefLVal = 5278 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); 5279 if (RecordCVR & Qualifiers::Volatile) 5280 RefLVal.getQuals().addVolatile(); 5281 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo); 5282 5283 // Qualifiers on the struct don't apply to the referencee. 5284 RecordCVR = 0; 5285 FieldType = FieldType->getPointeeType(); 5286 } 5287 5288 // Make sure that the address is pointing to the right type. This is critical 5289 // for both unions and structs. 5290 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType)); 5291 5292 if (field->hasAttr<AnnotateAttr>()) 5293 addr = EmitFieldAnnotations(field, addr); 5294 5295 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); 5296 LV.getQuals().addCVRQualifiers(RecordCVR); 5297 5298 // __weak attribute on a field is ignored. 5299 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 5300 LV.getQuals().removeObjCGCAttr(); 5301 5302 return LV; 5303 } 5304 5305 LValue 5306 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 5307 const FieldDecl *Field) { 5308 QualType FieldType = Field->getType(); 5309 5310 if (!FieldType->isReferenceType()) 5311 return EmitLValueForField(Base, Field); 5312 5313 Address V = emitAddrOfFieldStorage( 5314 *this, Base.getAddress(), Field, 5315 /*IsInBounds=*/!getLangOpts().PointerOverflowDefined); 5316 5317 // Make sure that the address is pointing to the right type. 5318 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 5319 V = V.withElementType(llvmType); 5320 5321 // TODO: Generate TBAA information that describes this access as a structure 5322 // member access and not just an access to an object of the field's type. This 5323 // should be similar to what we do in EmitLValueForField(). 5324 LValueBaseInfo BaseInfo = Base.getBaseInfo(); 5325 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); 5326 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); 5327 return MakeAddrLValue(V, FieldType, FieldBaseInfo, 5328 CGM.getTBAAInfoForSubobject(Base, FieldType)); 5329 } 5330 5331 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 5332 if (E->isFileScope()) { 5333 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 5334 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); 5335 } 5336 if (E->getType()->isVariablyModifiedType()) 5337 // make sure to emit the VLA size. 5338 EmitVariablyModifiedType(E->getType()); 5339 5340 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 5341 const Expr *InitExpr = E->getInitializer(); 5342 LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); 5343 5344 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 5345 /*Init*/ true); 5346 5347 // Block-scope compound literals are destroyed at the end of the enclosing 5348 // scope in C. 5349 if (!getLangOpts().CPlusPlus) 5350 if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) 5351 pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr, 5352 E->getType(), getDestroyer(DtorKind), 5353 DtorKind & EHCleanup); 5354 5355 return Result; 5356 } 5357 5358 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 5359 if (!E->isGLValue()) 5360 // Initializing an aggregate temporary in C++11: T{...}. 5361 return EmitAggExprToLValue(E); 5362 5363 // An lvalue initializer list must be initializing a reference. 5364 assert(E->isTransparent() && "non-transparent glvalue init list"); 5365 return EmitLValue(E->getInit(0)); 5366 } 5367 5368 /// Emit the operand of a glvalue conditional operator. This is either a glvalue 5369 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no 5370 /// LValue is returned and the current block has been terminated. 5371 static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, 5372 const Expr *Operand) { 5373 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { 5374 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); 5375 return std::nullopt; 5376 } 5377 5378 return CGF.EmitLValue(Operand); 5379 } 5380 5381 namespace { 5382 // Handle the case where the condition is a constant evaluatable simple integer, 5383 // which means we don't have to separately handle the true/false blocks. 5384 std::optional<LValue> HandleConditionalOperatorLValueSimpleCase( 5385 CodeGenFunction &CGF, const AbstractConditionalOperator *E) { 5386 const Expr *condExpr = E->getCond(); 5387 bool CondExprBool; 5388 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 5389 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr(); 5390 if (!CondExprBool) 5391 std::swap(Live, Dead); 5392 5393 if (!CGF.ContainsLabel(Dead)) { 5394 // If the true case is live, we need to track its region. 5395 if (CondExprBool) 5396 CGF.incrementProfileCounter(E); 5397 CGF.markStmtMaybeUsed(Dead); 5398 // If a throw expression we emit it and return an undefined lvalue 5399 // because it can't be used. 5400 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) { 5401 CGF.EmitCXXThrowExpr(ThrowExpr); 5402 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType()); 5403 llvm::Type *Ty = CGF.UnqualPtrTy; 5404 return CGF.MakeAddrLValue( 5405 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()), 5406 Dead->getType()); 5407 } 5408 return CGF.EmitLValue(Live); 5409 } 5410 } 5411 return std::nullopt; 5412 } 5413 struct ConditionalInfo { 5414 llvm::BasicBlock *lhsBlock, *rhsBlock; 5415 std::optional<LValue> LHS, RHS; 5416 }; 5417 5418 // Create and generate the 3 blocks for a conditional operator. 5419 // Leaves the 'current block' in the continuation basic block. 5420 template<typename FuncTy> 5421 ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF, 5422 const AbstractConditionalOperator *E, 5423 const FuncTy &BranchGenFunc) { 5424 ConditionalInfo Info{CGF.createBasicBlock("cond.true"), 5425 CGF.createBasicBlock("cond.false"), std::nullopt, 5426 std::nullopt}; 5427 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end"); 5428 5429 CodeGenFunction::ConditionalEvaluation eval(CGF); 5430 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock, 5431 CGF.getProfileCount(E)); 5432 5433 // Any temporaries created here are conditional. 5434 CGF.EmitBlock(Info.lhsBlock); 5435 CGF.incrementProfileCounter(E); 5436 eval.begin(CGF); 5437 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr()); 5438 eval.end(CGF); 5439 Info.lhsBlock = CGF.Builder.GetInsertBlock(); 5440 5441 if (Info.LHS) 5442 CGF.Builder.CreateBr(endBlock); 5443 5444 // Any temporaries created here are conditional. 5445 CGF.EmitBlock(Info.rhsBlock); 5446 eval.begin(CGF); 5447 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr()); 5448 eval.end(CGF); 5449 Info.rhsBlock = CGF.Builder.GetInsertBlock(); 5450 CGF.EmitBlock(endBlock); 5451 5452 return Info; 5453 } 5454 } // namespace 5455 5456 void CodeGenFunction::EmitIgnoredConditionalOperator( 5457 const AbstractConditionalOperator *E) { 5458 if (!E->isGLValue()) { 5459 // ?: here should be an aggregate. 5460 assert(hasAggregateEvaluationKind(E->getType()) && 5461 "Unexpected conditional operator!"); 5462 return (void)EmitAggExprToLValue(E); 5463 } 5464 5465 OpaqueValueMapping binding(*this, E); 5466 if (HandleConditionalOperatorLValueSimpleCase(*this, E)) 5467 return; 5468 5469 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) { 5470 CGF.EmitIgnoredExpr(E); 5471 return LValue{}; 5472 }); 5473 } 5474 LValue CodeGenFunction::EmitConditionalOperatorLValue( 5475 const AbstractConditionalOperator *expr) { 5476 if (!expr->isGLValue()) { 5477 // ?: here should be an aggregate. 5478 assert(hasAggregateEvaluationKind(expr->getType()) && 5479 "Unexpected conditional operator!"); 5480 return EmitAggExprToLValue(expr); 5481 } 5482 5483 OpaqueValueMapping binding(*this, expr); 5484 if (std::optional<LValue> Res = 5485 HandleConditionalOperatorLValueSimpleCase(*this, expr)) 5486 return *Res; 5487 5488 ConditionalInfo Info = EmitConditionalBlocks( 5489 *this, expr, [](CodeGenFunction &CGF, const Expr *E) { 5490 return EmitLValueOrThrowExpression(CGF, E); 5491 }); 5492 5493 if ((Info.LHS && !Info.LHS->isSimple()) || 5494 (Info.RHS && !Info.RHS->isSimple())) 5495 return EmitUnsupportedLValue(expr, "conditional operator"); 5496 5497 if (Info.LHS && Info.RHS) { 5498 Address lhsAddr = Info.LHS->getAddress(); 5499 Address rhsAddr = Info.RHS->getAddress(); 5500 Address result = mergeAddressesInConditionalExpr( 5501 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock, 5502 Builder.GetInsertBlock(), expr->getType()); 5503 AlignmentSource alignSource = 5504 std::max(Info.LHS->getBaseInfo().getAlignmentSource(), 5505 Info.RHS->getBaseInfo().getAlignmentSource()); 5506 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( 5507 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo()); 5508 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), 5509 TBAAInfo); 5510 } else { 5511 assert((Info.LHS || Info.RHS) && 5512 "both operands of glvalue conditional are throw-expressions?"); 5513 return Info.LHS ? *Info.LHS : *Info.RHS; 5514 } 5515 } 5516 5517 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 5518 /// type. If the cast is to a reference, we can have the usual lvalue result, 5519 /// otherwise if a cast is needed by the code generator in an lvalue context, 5520 /// then it must mean that we need the address of an aggregate in order to 5521 /// access one of its members. This can happen for all the reasons that casts 5522 /// are permitted with aggregate result, including noop aggregate casts, and 5523 /// cast from scalar to union. 5524 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 5525 switch (E->getCastKind()) { 5526 case CK_ToVoid: 5527 case CK_BitCast: 5528 case CK_LValueToRValueBitCast: 5529 case CK_ArrayToPointerDecay: 5530 case CK_FunctionToPointerDecay: 5531 case CK_NullToMemberPointer: 5532 case CK_NullToPointer: 5533 case CK_IntegralToPointer: 5534 case CK_PointerToIntegral: 5535 case CK_PointerToBoolean: 5536 case CK_IntegralCast: 5537 case CK_BooleanToSignedIntegral: 5538 case CK_IntegralToBoolean: 5539 case CK_IntegralToFloating: 5540 case CK_FloatingToIntegral: 5541 case CK_FloatingToBoolean: 5542 case CK_FloatingCast: 5543 case CK_FloatingRealToComplex: 5544 case CK_FloatingComplexToReal: 5545 case CK_FloatingComplexToBoolean: 5546 case CK_FloatingComplexCast: 5547 case CK_FloatingComplexToIntegralComplex: 5548 case CK_IntegralRealToComplex: 5549 case CK_IntegralComplexToReal: 5550 case CK_IntegralComplexToBoolean: 5551 case CK_IntegralComplexCast: 5552 case CK_IntegralComplexToFloatingComplex: 5553 case CK_DerivedToBaseMemberPointer: 5554 case CK_BaseToDerivedMemberPointer: 5555 case CK_MemberPointerToBoolean: 5556 case CK_ReinterpretMemberPointer: 5557 case CK_AnyPointerToBlockPointerCast: 5558 case CK_ARCProduceObject: 5559 case CK_ARCConsumeObject: 5560 case CK_ARCReclaimReturnedObject: 5561 case CK_ARCExtendBlockObject: 5562 case CK_CopyAndAutoreleaseBlockObject: 5563 case CK_IntToOCLSampler: 5564 case CK_FloatingToFixedPoint: 5565 case CK_FixedPointToFloating: 5566 case CK_FixedPointCast: 5567 case CK_FixedPointToBoolean: 5568 case CK_FixedPointToIntegral: 5569 case CK_IntegralToFixedPoint: 5570 case CK_MatrixCast: 5571 case CK_HLSLVectorTruncation: 5572 case CK_HLSLArrayRValue: 5573 case CK_HLSLElementwiseCast: 5574 case CK_HLSLAggregateSplatCast: 5575 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 5576 5577 case CK_Dependent: 5578 llvm_unreachable("dependent cast kind in IR gen!"); 5579 5580 case CK_BuiltinFnToFnPtr: 5581 llvm_unreachable("builtin functions are handled elsewhere"); 5582 5583 // These are never l-values; just use the aggregate emission code. 5584 case CK_NonAtomicToAtomic: 5585 case CK_AtomicToNonAtomic: 5586 return EmitAggExprToLValue(E); 5587 5588 case CK_Dynamic: { 5589 LValue LV = EmitLValue(E->getSubExpr()); 5590 Address V = LV.getAddress(); 5591 const auto *DCE = cast<CXXDynamicCastExpr>(E); 5592 return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 5593 } 5594 5595 case CK_ConstructorConversion: 5596 case CK_UserDefinedConversion: 5597 case CK_CPointerToObjCPointerCast: 5598 case CK_BlockPointerToObjCPointerCast: 5599 case CK_LValueToRValue: 5600 return EmitLValue(E->getSubExpr()); 5601 5602 case CK_NoOp: { 5603 // CK_NoOp can model a qualification conversion, which can remove an array 5604 // bound and change the IR type. 5605 // FIXME: Once pointee types are removed from IR, remove this. 5606 LValue LV = EmitLValue(E->getSubExpr()); 5607 // Propagate the volatile qualifer to LValue, if exist in E. 5608 if (E->changesVolatileQualification()) 5609 LV.getQuals() = E->getType().getQualifiers(); 5610 if (LV.isSimple()) { 5611 Address V = LV.getAddress(); 5612 if (V.isValid()) { 5613 llvm::Type *T = ConvertTypeForMem(E->getType()); 5614 if (V.getElementType() != T) 5615 LV.setAddress(V.withElementType(T)); 5616 } 5617 } 5618 return LV; 5619 } 5620 5621 case CK_UncheckedDerivedToBase: 5622 case CK_DerivedToBase: { 5623 const auto *DerivedClassTy = 5624 E->getSubExpr()->getType()->castAs<RecordType>(); 5625 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 5626 5627 LValue LV = EmitLValue(E->getSubExpr()); 5628 Address This = LV.getAddress(); 5629 5630 // Perform the derived-to-base conversion 5631 Address Base = GetAddressOfBaseClass( 5632 This, DerivedClassDecl, E->path_begin(), E->path_end(), 5633 /*NullCheckValue=*/false, E->getExprLoc()); 5634 5635 // TODO: Support accesses to members of base classes in TBAA. For now, we 5636 // conservatively pretend that the complete object is of the base class 5637 // type. 5638 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(), 5639 CGM.getTBAAInfoForSubobject(LV, E->getType())); 5640 } 5641 case CK_ToUnion: 5642 return EmitAggExprToLValue(E); 5643 case CK_BaseToDerived: { 5644 const auto *DerivedClassTy = E->getType()->castAs<RecordType>(); 5645 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 5646 5647 LValue LV = EmitLValue(E->getSubExpr()); 5648 5649 // Perform the base-to-derived conversion 5650 Address Derived = GetAddressOfDerivedClass( 5651 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(), 5652 /*NullCheckValue=*/false); 5653 5654 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is 5655 // performed and the object is not of the derived type. 5656 if (sanitizePerformTypeCheck()) 5657 EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived, 5658 E->getType()); 5659 5660 if (SanOpts.has(SanitizerKind::CFIDerivedCast)) 5661 EmitVTablePtrCheckForCast(E->getType(), Derived, 5662 /*MayBeNull=*/false, CFITCK_DerivedCast, 5663 E->getBeginLoc()); 5664 5665 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(), 5666 CGM.getTBAAInfoForSubobject(LV, E->getType())); 5667 } 5668 case CK_LValueBitCast: { 5669 // This must be a reinterpret_cast (or c-style equivalent). 5670 const auto *CE = cast<ExplicitCastExpr>(E); 5671 5672 CGM.EmitExplicitCastExprType(CE, this); 5673 LValue LV = EmitLValue(E->getSubExpr()); 5674 Address V = LV.getAddress().withElementType( 5675 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType())); 5676 5677 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) 5678 EmitVTablePtrCheckForCast(E->getType(), V, 5679 /*MayBeNull=*/false, CFITCK_UnrelatedCast, 5680 E->getBeginLoc()); 5681 5682 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), 5683 CGM.getTBAAInfoForSubobject(LV, E->getType())); 5684 } 5685 case CK_AddressSpaceConversion: { 5686 LValue LV = EmitLValue(E->getSubExpr()); 5687 QualType DestTy = getContext().getPointerType(E->getType()); 5688 llvm::Value *V = getTargetHooks().performAddrSpaceCast( 5689 *this, LV.getPointer(*this), 5690 E->getSubExpr()->getType().getAddressSpace(), ConvertType(DestTy)); 5691 return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()), 5692 LV.getAddress().getAlignment()), 5693 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); 5694 } 5695 case CK_ObjCObjectLValueCast: { 5696 LValue LV = EmitLValue(E->getSubExpr()); 5697 Address V = LV.getAddress().withElementType(ConvertType(E->getType())); 5698 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), 5699 CGM.getTBAAInfoForSubobject(LV, E->getType())); 5700 } 5701 case CK_ZeroToOCLOpaqueType: 5702 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); 5703 5704 case CK_VectorSplat: { 5705 // LValue results of vector splats are only supported in HLSL. 5706 if (!getLangOpts().HLSL) 5707 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 5708 return EmitLValue(E->getSubExpr()); 5709 } 5710 } 5711 5712 llvm_unreachable("Unhandled lvalue cast kind?"); 5713 } 5714 5715 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 5716 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 5717 return getOrCreateOpaqueLValueMapping(e); 5718 } 5719 5720 std::pair<LValue, LValue> 5721 CodeGenFunction::EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty) { 5722 // Emitting the casted temporary through an opaque value. 5723 LValue BaseLV = EmitLValue(E->getArgLValue()); 5724 OpaqueValueMappingData::bind(*this, E->getOpaqueArgLValue(), BaseLV); 5725 5726 QualType ExprTy = E->getType(); 5727 Address OutTemp = CreateIRTemp(ExprTy); 5728 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy); 5729 5730 if (E->isInOut()) 5731 EmitInitializationToLValue(E->getCastedTemporary()->getSourceExpr(), 5732 TempLV); 5733 5734 OpaqueValueMappingData::bind(*this, E->getCastedTemporary(), TempLV); 5735 return std::make_pair(BaseLV, TempLV); 5736 } 5737 5738 LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, 5739 CallArgList &Args, QualType Ty) { 5740 5741 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty); 5742 5743 llvm::Value *Addr = TempLV.getAddress().getBasePointer(); 5744 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType()); 5745 5746 llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy); 5747 5748 llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr); 5749 5750 Address TmpAddr(Addr, ElTy, TempLV.getAlignment()); 5751 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(), 5752 LifetimeSize); 5753 Args.add(RValue::get(TmpAddr, *this), Ty); 5754 return TempLV; 5755 } 5756 5757 LValue 5758 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { 5759 assert(OpaqueValueMapping::shouldBindAsLValue(e)); 5760 5761 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator 5762 it = OpaqueLValues.find(e); 5763 5764 if (it != OpaqueLValues.end()) 5765 return it->second; 5766 5767 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); 5768 return EmitLValue(e->getSourceExpr()); 5769 } 5770 5771 RValue 5772 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { 5773 assert(!OpaqueValueMapping::shouldBindAsLValue(e)); 5774 5775 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator 5776 it = OpaqueRValues.find(e); 5777 5778 if (it != OpaqueRValues.end()) 5779 return it->second; 5780 5781 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); 5782 return EmitAnyExpr(e->getSourceExpr()); 5783 } 5784 5785 bool CodeGenFunction::isOpaqueValueEmitted(const OpaqueValueExpr *E) { 5786 if (OpaqueValueMapping::shouldBindAsLValue(E)) 5787 return OpaqueLValues.contains(E); 5788 return OpaqueRValues.contains(E); 5789 } 5790 5791 RValue CodeGenFunction::EmitRValueForField(LValue LV, 5792 const FieldDecl *FD, 5793 SourceLocation Loc) { 5794 QualType FT = FD->getType(); 5795 LValue FieldLV = EmitLValueForField(LV, FD); 5796 switch (getEvaluationKind(FT)) { 5797 case TEK_Complex: 5798 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); 5799 case TEK_Aggregate: 5800 return FieldLV.asAggregateRValue(); 5801 case TEK_Scalar: 5802 // This routine is used to load fields one-by-one to perform a copy, so 5803 // don't load reference fields. 5804 if (FD->getType()->isReferenceType()) 5805 return RValue::get(FieldLV.getPointer(*this)); 5806 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a 5807 // primitive load. 5808 if (FieldLV.isBitField()) 5809 return EmitLoadOfLValue(FieldLV, Loc); 5810 return RValue::get(EmitLoadOfScalar(FieldLV, Loc)); 5811 } 5812 llvm_unreachable("bad evaluation kind"); 5813 } 5814 5815 //===--------------------------------------------------------------------===// 5816 // Expression Emission 5817 //===--------------------------------------------------------------------===// 5818 5819 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 5820 ReturnValueSlot ReturnValue, 5821 llvm::CallBase **CallOrInvoke) { 5822 llvm::CallBase *CallOrInvokeStorage; 5823 if (!CallOrInvoke) { 5824 CallOrInvoke = &CallOrInvokeStorage; 5825 } 5826 5827 auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] { 5828 if (E->isCoroElideSafe()) { 5829 auto *I = *CallOrInvoke; 5830 if (I) 5831 I->addFnAttr(llvm::Attribute::CoroElideSafe); 5832 } 5833 }); 5834 5835 // Builtins never have block type. 5836 if (E->getCallee()->getType()->isBlockPointerType()) 5837 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke); 5838 5839 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) 5840 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke); 5841 5842 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) 5843 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke); 5844 5845 // A CXXOperatorCallExpr is created even for explicit object methods, but 5846 // these should be treated like static function call. 5847 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) 5848 if (const auto *MD = 5849 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl()); 5850 MD && MD->isImplicitObjectMemberFunction()) 5851 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke); 5852 5853 CGCallee callee = EmitCallee(E->getCallee()); 5854 5855 if (callee.isBuiltin()) { 5856 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), 5857 E, ReturnValue); 5858 } 5859 5860 if (callee.isPseudoDestructor()) { 5861 return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr()); 5862 } 5863 5864 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue, 5865 /*Chain=*/nullptr, CallOrInvoke); 5866 } 5867 5868 /// Emit a CallExpr without considering whether it might be a subclass. 5869 RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, 5870 ReturnValueSlot ReturnValue, 5871 llvm::CallBase **CallOrInvoke) { 5872 CGCallee Callee = EmitCallee(E->getCallee()); 5873 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, 5874 /*Chain=*/nullptr, CallOrInvoke); 5875 } 5876 5877 // Detect the unusual situation where an inline version is shadowed by a 5878 // non-inline version. In that case we should pick the external one 5879 // everywhere. That's GCC behavior too. 5880 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { 5881 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl()) 5882 if (!PD->isInlineBuiltinDeclaration()) 5883 return false; 5884 return true; 5885 } 5886 5887 static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) { 5888 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 5889 5890 if (auto builtinID = FD->getBuiltinID()) { 5891 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str(); 5892 std::string NoBuiltins = "no-builtins"; 5893 5894 StringRef Ident = CGF.CGM.getMangledName(GD); 5895 std::string FDInlineName = (Ident + ".inline").str(); 5896 5897 bool IsPredefinedLibFunction = 5898 CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID); 5899 bool HasAttributeNoBuiltin = 5900 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) || 5901 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins); 5902 5903 // When directing calling an inline builtin, call it through it's mangled 5904 // name to make it clear it's not the actual builtin. 5905 if (CGF.CurFn->getName() != FDInlineName && 5906 OnlyHasInlineBuiltinDeclaration(FD)) { 5907 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); 5908 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr); 5909 llvm::Module *M = Fn->getParent(); 5910 llvm::Function *Clone = M->getFunction(FDInlineName); 5911 if (!Clone) { 5912 Clone = llvm::Function::Create(Fn->getFunctionType(), 5913 llvm::GlobalValue::InternalLinkage, 5914 Fn->getAddressSpace(), FDInlineName, M); 5915 Clone->addFnAttr(llvm::Attribute::AlwaysInline); 5916 } 5917 return CGCallee::forDirect(Clone, GD); 5918 } 5919 5920 // Replaceable builtins provide their own implementation of a builtin. If we 5921 // are in an inline builtin implementation, avoid trivial infinite 5922 // recursion. Honor __attribute__((no_builtin("foo"))) or 5923 // __attribute__((no_builtin)) on the current function unless foo is 5924 // not a predefined library function which means we must generate the 5925 // builtin no matter what. 5926 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin) 5927 return CGCallee::forBuiltin(builtinID, FD); 5928 } 5929 5930 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD); 5931 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice && 5932 FD->hasAttr<CUDAGlobalAttr>()) 5933 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub( 5934 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts())); 5935 5936 return CGCallee::forDirect(CalleePtr, GD); 5937 } 5938 5939 static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD) { 5940 if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>())) 5941 return GlobalDecl(FD, KernelReferenceKind::Stub); 5942 return GlobalDecl(FD); 5943 } 5944 5945 CGCallee CodeGenFunction::EmitCallee(const Expr *E) { 5946 E = E->IgnoreParens(); 5947 5948 // Look through function-to-pointer decay. 5949 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) { 5950 if (ICE->getCastKind() == CK_FunctionToPointerDecay || 5951 ICE->getCastKind() == CK_BuiltinFnToFnPtr) { 5952 return EmitCallee(ICE->getSubExpr()); 5953 } 5954 5955 // Try to remember the original __ptrauth qualifier for loads of 5956 // function pointers. 5957 if (ICE->getCastKind() == CK_LValueToRValue) { 5958 const Expr *SubExpr = ICE->getSubExpr(); 5959 if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) { 5960 std::pair<llvm::Value *, CGPointerAuthInfo> Result = 5961 EmitOrigPointerRValue(E); 5962 5963 QualType FunctionType = PtrType->getPointeeType(); 5964 assert(FunctionType->isFunctionType()); 5965 5966 GlobalDecl GD; 5967 if (const auto *VD = 5968 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) { 5969 GD = GlobalDecl(VD); 5970 } 5971 CGCalleeInfo CalleeInfo(FunctionType->getAs<FunctionProtoType>(), GD); 5972 CGCallee Callee(CalleeInfo, Result.first, Result.second); 5973 return Callee; 5974 } 5975 } 5976 5977 // Resolve direct calls. 5978 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) { 5979 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) { 5980 return EmitDirectCallee(*this, getGlobalDeclForDirectCall(FD)); 5981 } 5982 } else if (auto ME = dyn_cast<MemberExpr>(E)) { 5983 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) { 5984 EmitIgnoredExpr(ME->getBase()); 5985 return EmitDirectCallee(*this, FD); 5986 } 5987 5988 // Look through template substitutions. 5989 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { 5990 return EmitCallee(NTTP->getReplacement()); 5991 5992 // Treat pseudo-destructor calls differently. 5993 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) { 5994 return CGCallee::forPseudoDestructor(PDE); 5995 } 5996 5997 // Otherwise, we have an indirect reference. 5998 llvm::Value *calleePtr; 5999 QualType functionType; 6000 if (auto ptrType = E->getType()->getAs<PointerType>()) { 6001 calleePtr = EmitScalarExpr(E); 6002 functionType = ptrType->getPointeeType(); 6003 } else { 6004 functionType = E->getType(); 6005 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this); 6006 } 6007 assert(functionType->isFunctionType()); 6008 6009 GlobalDecl GD; 6010 if (const auto *VD = 6011 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) 6012 GD = GlobalDecl(VD); 6013 6014 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); 6015 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType); 6016 CGCallee callee(calleeInfo, calleePtr, pointerAuth); 6017 return callee; 6018 } 6019 6020 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 6021 // Comma expressions just emit their LHS then their RHS as an l-value. 6022 if (E->getOpcode() == BO_Comma) { 6023 EmitIgnoredExpr(E->getLHS()); 6024 EnsureInsertPoint(); 6025 return EmitLValue(E->getRHS()); 6026 } 6027 6028 if (E->getOpcode() == BO_PtrMemD || 6029 E->getOpcode() == BO_PtrMemI) 6030 return EmitPointerToDataMemberBinaryExpr(E); 6031 6032 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 6033 6034 // Create a Key Instructions source location atom group that covers both 6035 // LHS and RHS expressions. Nested RHS expressions may get subsequently 6036 // separately grouped (1 below): 6037 // 6038 // 1. `a = b = c` -> Two atoms. 6039 // 2. `x = new(1)` -> One atom (for both addr store and value store). 6040 // 3. Complex and agg assignment -> One atom. 6041 ApplyAtomGroup Grp(getDebugInfo()); 6042 6043 // Note that in all of these cases, __block variables need the RHS 6044 // evaluated first just in case the variable gets moved by the RHS. 6045 6046 switch (getEvaluationKind(E->getType())) { 6047 case TEK_Scalar: { 6048 if (PointerAuthQualifier PtrAuth = 6049 E->getLHS()->getType().getPointerAuth()) { 6050 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 6051 LValue CopiedLV = LV; 6052 CopiedLV.getQuals().removePointerAuth(); 6053 llvm::Value *RV = 6054 EmitPointerAuthQualify(PtrAuth, E->getRHS(), CopiedLV.getAddress()); 6055 EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc()); 6056 EmitStoreThroughLValue(RValue::get(RV), CopiedLV); 6057 return LV; 6058 } 6059 6060 switch (E->getLHS()->getType().getObjCLifetime()) { 6061 case Qualifiers::OCL_Strong: 6062 return EmitARCStoreStrong(E, /*ignored*/ false).first; 6063 6064 case Qualifiers::OCL_Autoreleasing: 6065 return EmitARCStoreAutoreleasing(E).first; 6066 6067 // No reason to do any of these differently. 6068 case Qualifiers::OCL_None: 6069 case Qualifiers::OCL_ExplicitNone: 6070 case Qualifiers::OCL_Weak: 6071 break; 6072 } 6073 6074 // TODO: Can we de-duplicate this code with the corresponding code in 6075 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works? 6076 RValue RV; 6077 llvm::Value *Previous = nullptr; 6078 QualType SrcType = E->getRHS()->getType(); 6079 // Check if LHS is a bitfield, if RHS contains an implicit cast expression 6080 // we want to extract that value and potentially (if the bitfield sanitizer 6081 // is enabled) use it to check for an implicit conversion. 6082 if (E->getLHS()->refersToBitField()) { 6083 llvm::Value *RHS = 6084 EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType); 6085 RV = RValue::get(RHS); 6086 } else 6087 RV = EmitAnyExpr(E->getRHS()); 6088 6089 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 6090 6091 if (RV.isScalar()) 6092 EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc()); 6093 6094 if (LV.isBitField()) { 6095 llvm::Value *Result = nullptr; 6096 // If bitfield sanitizers are enabled we want to use the result 6097 // to check whether a truncation or sign change has occurred. 6098 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) 6099 EmitStoreThroughBitfieldLValue(RV, LV, &Result); 6100 else 6101 EmitStoreThroughBitfieldLValue(RV, LV); 6102 6103 // If the expression contained an implicit conversion, make sure 6104 // to use the value before the scalar conversion. 6105 llvm::Value *Src = Previous ? Previous : RV.getScalarVal(); 6106 QualType DstType = E->getLHS()->getType(); 6107 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType, 6108 LV.getBitFieldInfo(), E->getExprLoc()); 6109 } else 6110 EmitStoreThroughLValue(RV, LV); 6111 6112 if (getLangOpts().OpenMP) 6113 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, 6114 E->getLHS()); 6115 return LV; 6116 } 6117 6118 case TEK_Complex: 6119 return EmitComplexAssignmentLValue(E); 6120 6121 case TEK_Aggregate: 6122 // If the lang opt is HLSL and the LHS is a constant array 6123 // then we are performing a copy assignment and call a special 6124 // function because EmitAggExprToLValue emits to a temporary LValue 6125 if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType()) 6126 return EmitHLSLArrayAssignLValue(E); 6127 6128 return EmitAggExprToLValue(E); 6129 } 6130 llvm_unreachable("bad evaluation kind"); 6131 } 6132 6133 // This function implements trivial copy assignment for HLSL's 6134 // assignable constant arrays. 6135 LValue CodeGenFunction::EmitHLSLArrayAssignLValue(const BinaryOperator *E) { 6136 // Don't emit an LValue for the RHS because it might not be an LValue 6137 LValue LHS = EmitLValue(E->getLHS()); 6138 // In C the RHS of an assignment operator is an RValue. 6139 // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call 6140 // EmitInitializationToLValue to emit an RValue into an LValue. 6141 EmitInitializationToLValue(E->getRHS(), LHS); 6142 return LHS; 6143 } 6144 6145 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E, 6146 llvm::CallBase **CallOrInvoke) { 6147 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke); 6148 6149 if (!RV.isScalar()) 6150 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 6151 AlignmentSource::Decl); 6152 6153 assert(E->getCallReturnType(getContext())->isReferenceType() && 6154 "Can't have a scalar return unless the return type is a " 6155 "reference type!"); 6156 6157 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 6158 } 6159 6160 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 6161 // FIXME: This shouldn't require another copy. 6162 return EmitAggExprToLValue(E); 6163 } 6164 6165 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 6166 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 6167 && "binding l-value to type which needs a temporary"); 6168 AggValueSlot Slot = CreateAggTemp(E->getType()); 6169 EmitCXXConstructExpr(E, Slot); 6170 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); 6171 } 6172 6173 LValue 6174 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 6175 return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 6176 } 6177 6178 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 6179 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()) 6180 .withElementType(ConvertType(E->getType())); 6181 } 6182 6183 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 6184 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), 6185 AlignmentSource::Decl); 6186 } 6187 6188 LValue 6189 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 6190 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 6191 Slot.setExternallyDestructed(); 6192 EmitAggExpr(E->getSubExpr(), Slot); 6193 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress()); 6194 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); 6195 } 6196 6197 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 6198 RValue RV = EmitObjCMessageExpr(E); 6199 6200 if (!RV.isScalar()) 6201 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 6202 AlignmentSource::Decl); 6203 6204 assert(E->getMethodDecl()->getReturnType()->isReferenceType() && 6205 "Can't have a scalar return unless the return type is a " 6206 "reference type!"); 6207 6208 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 6209 } 6210 6211 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 6212 Address V = 6213 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector()); 6214 return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); 6215 } 6216 6217 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 6218 const ObjCIvarDecl *Ivar) { 6219 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 6220 } 6221 6222 llvm::Value * 6223 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, 6224 const ObjCIvarDecl *Ivar) { 6225 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar); 6226 QualType PointerDiffType = getContext().getPointerDiffType(); 6227 return Builder.CreateZExtOrTrunc(OffsetValue, 6228 getTypes().ConvertType(PointerDiffType)); 6229 } 6230 6231 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 6232 llvm::Value *BaseValue, 6233 const ObjCIvarDecl *Ivar, 6234 unsigned CVRQualifiers) { 6235 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 6236 Ivar, CVRQualifiers); 6237 } 6238 6239 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 6240 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 6241 llvm::Value *BaseValue = nullptr; 6242 const Expr *BaseExpr = E->getBase(); 6243 Qualifiers BaseQuals; 6244 QualType ObjectTy; 6245 if (E->isArrow()) { 6246 BaseValue = EmitScalarExpr(BaseExpr); 6247 ObjectTy = BaseExpr->getType()->getPointeeType(); 6248 BaseQuals = ObjectTy.getQualifiers(); 6249 } else { 6250 LValue BaseLV = EmitLValue(BaseExpr); 6251 BaseValue = BaseLV.getPointer(*this); 6252 ObjectTy = BaseExpr->getType(); 6253 BaseQuals = ObjectTy.getQualifiers(); 6254 } 6255 6256 LValue LV = 6257 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 6258 BaseQuals.getCVRQualifiers()); 6259 setObjCGCLValueClass(getContext(), E, LV); 6260 return LV; 6261 } 6262 6263 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 6264 // Can only get l-value for message expression returning aggregate type 6265 RValue RV = EmitAnyExprToTemp(E); 6266 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 6267 AlignmentSource::Decl); 6268 } 6269 6270 RValue CodeGenFunction::EmitCall(QualType CalleeType, 6271 const CGCallee &OrigCallee, const CallExpr *E, 6272 ReturnValueSlot ReturnValue, 6273 llvm::Value *Chain, 6274 llvm::CallBase **CallOrInvoke, 6275 CGFunctionInfo const **ResolvedFnInfo) { 6276 // Get the actual function type. The callee type will always be a pointer to 6277 // function type or a block pointer type. 6278 assert(CalleeType->isFunctionPointerType() && 6279 "Call must have function pointer type!"); 6280 6281 const Decl *TargetDecl = 6282 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); 6283 6284 assert((!isa_and_present<FunctionDecl>(TargetDecl) || 6285 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) && 6286 "trying to emit a call to an immediate function"); 6287 6288 CalleeType = getContext().getCanonicalType(CalleeType); 6289 6290 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType(); 6291 6292 CGCallee Callee = OrigCallee; 6293 6294 if (SanOpts.has(SanitizerKind::Function) && 6295 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && 6296 !isa<FunctionNoProtoType>(PointeeType)) { 6297 if (llvm::Constant *PrefixSig = 6298 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { 6299 auto CheckOrdinal = SanitizerKind::SO_Function; 6300 auto CheckHandler = SanitizerHandler::FunctionTypeMismatch; 6301 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler); 6302 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType); 6303 6304 llvm::Type *PrefixSigType = PrefixSig->getType(); 6305 llvm::StructType *PrefixStructTy = llvm::StructType::get( 6306 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true); 6307 6308 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 6309 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) { 6310 // Use raw pointer since we are using the callee pointer as data here. 6311 Address Addr = 6312 Address(CalleePtr, CalleePtr->getType(), 6313 CharUnits::fromQuantity( 6314 CalleePtr->getPointerAlignment(CGM.getDataLayout())), 6315 Callee.getPointerAuthInfo(), nullptr); 6316 CalleePtr = Addr.emitRawPointer(*this); 6317 } 6318 6319 // On 32-bit Arm, the low bit of a function pointer indicates whether 6320 // it's using the Arm or Thumb instruction set. The actual first 6321 // instruction lives at the same address either way, so we must clear 6322 // that low bit before using the function address to find the prefix 6323 // structure. 6324 // 6325 // This applies to both Arm and Thumb target triples, because 6326 // either one could be used in an interworking context where it 6327 // might be passed function pointers of both types. 6328 llvm::Value *AlignedCalleePtr; 6329 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) { 6330 llvm::Value *CalleeAddress = 6331 Builder.CreatePtrToInt(CalleePtr, IntPtrTy); 6332 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1); 6333 llvm::Value *AlignedCalleeAddress = 6334 Builder.CreateAnd(CalleeAddress, Mask); 6335 AlignedCalleePtr = 6336 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType()); 6337 } else { 6338 AlignedCalleePtr = CalleePtr; 6339 } 6340 6341 llvm::Value *CalleePrefixStruct = AlignedCalleePtr; 6342 llvm::Value *CalleeSigPtr = 6343 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0); 6344 llvm::Value *CalleeSig = 6345 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign()); 6346 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); 6347 6348 llvm::BasicBlock *Cont = createBasicBlock("cont"); 6349 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); 6350 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); 6351 6352 EmitBlock(TypeCheck); 6353 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad( 6354 Int32Ty, 6355 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1), 6356 getPointerAlign()); 6357 llvm::Value *CalleeTypeHashMatch = 6358 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash); 6359 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()), 6360 EmitCheckTypeDescriptor(CalleeType)}; 6361 EmitCheck(std::make_pair(CalleeTypeHashMatch, CheckOrdinal), CheckHandler, 6362 StaticData, {CalleePtr}); 6363 6364 Builder.CreateBr(Cont); 6365 EmitBlock(Cont); 6366 } 6367 } 6368 6369 const auto *FnType = cast<FunctionType>(PointeeType); 6370 6371 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 6372 FD && DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>())) 6373 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType); 6374 6375 bool CFIUnchecked = 6376 CalleeType->hasPointeeToToCFIUncheckedCalleeFunctionType(); 6377 6378 // If we are checking indirect calls and this call is indirect, check that the 6379 // function pointer is a member of the bit set for the function type. 6380 if (SanOpts.has(SanitizerKind::CFIICall) && 6381 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && !CFIUnchecked) { 6382 auto CheckOrdinal = SanitizerKind::SO_CFIICall; 6383 auto CheckHandler = SanitizerHandler::CFICheckFail; 6384 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler); 6385 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall); 6386 6387 llvm::Metadata *MD; 6388 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) 6389 MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0)); 6390 else 6391 MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0)); 6392 6393 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD); 6394 6395 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 6396 llvm::Value *TypeTest = Builder.CreateCall( 6397 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId}); 6398 6399 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); 6400 llvm::Constant *StaticData[] = { 6401 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall), 6402 EmitCheckSourceLocation(E->getBeginLoc()), 6403 EmitCheckTypeDescriptor(QualType(FnType, 0)), 6404 }; 6405 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { 6406 EmitCfiSlowPathCheck(CheckOrdinal, TypeTest, CrossDsoTypeId, CalleePtr, 6407 StaticData); 6408 } else { 6409 EmitCheck(std::make_pair(TypeTest, CheckOrdinal), CheckHandler, 6410 StaticData, {CalleePtr, llvm::UndefValue::get(IntPtrTy)}); 6411 } 6412 } 6413 6414 CallArgList Args; 6415 if (Chain) 6416 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy); 6417 6418 // C++17 requires that we evaluate arguments to a call using assignment syntax 6419 // right-to-left, and that we evaluate arguments to certain other operators 6420 // left-to-right. Note that we allow this to override the order dictated by 6421 // the calling convention on the MS ABI, which means that parameter 6422 // destruction order is not necessarily reverse construction order. 6423 // FIXME: Revisit this based on C++ committee response to unimplementability. 6424 EvaluationOrder Order = EvaluationOrder::Default; 6425 bool StaticOperator = false; 6426 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) { 6427 if (OCE->isAssignmentOp()) 6428 Order = EvaluationOrder::ForceRightToLeft; 6429 else { 6430 switch (OCE->getOperator()) { 6431 case OO_LessLess: 6432 case OO_GreaterGreater: 6433 case OO_AmpAmp: 6434 case OO_PipePipe: 6435 case OO_Comma: 6436 case OO_ArrowStar: 6437 Order = EvaluationOrder::ForceLeftToRight; 6438 break; 6439 default: 6440 break; 6441 } 6442 } 6443 6444 if (const auto *MD = 6445 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl()); 6446 MD && MD->isStatic()) 6447 StaticOperator = true; 6448 } 6449 6450 auto Arguments = E->arguments(); 6451 if (StaticOperator) { 6452 // If we're calling a static operator, we need to emit the object argument 6453 // and ignore it. 6454 EmitIgnoredExpr(E->getArg(0)); 6455 Arguments = drop_begin(Arguments, 1); 6456 } 6457 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments, 6458 E->getDirectCallee(), /*ParamsToSkip=*/0, Order); 6459 6460 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( 6461 Args, FnType, /*ChainCall=*/Chain); 6462 6463 if (ResolvedFnInfo) 6464 *ResolvedFnInfo = &FnInfo; 6465 6466 // HIP function pointer contains kernel handle when it is used in triple 6467 // chevron. The kernel stub needs to be loaded from kernel handle and used 6468 // as callee. 6469 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice && 6470 isa<CUDAKernelCallExpr>(E) && 6471 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 6472 llvm::Value *Handle = Callee.getFunctionPointer(); 6473 auto *Stub = Builder.CreateLoad( 6474 Address(Handle, Handle->getType(), CGM.getPointerAlign())); 6475 Callee.setFunctionPointer(Stub); 6476 } 6477 llvm::CallBase *LocalCallOrInvoke = nullptr; 6478 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke, 6479 E == MustTailCall, E->getExprLoc()); 6480 6481 // Generate function declaration DISuprogram in order to be used 6482 // in debug info about call sites. 6483 if (CGDebugInfo *DI = getDebugInfo()) { 6484 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 6485 FunctionArgList Args; 6486 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args); 6487 DI->EmitFuncDeclForCallSite(LocalCallOrInvoke, 6488 DI->getFunctionType(CalleeDecl, ResTy, Args), 6489 CalleeDecl); 6490 } 6491 } 6492 if (CallOrInvoke) 6493 *CallOrInvoke = LocalCallOrInvoke; 6494 6495 return Call; 6496 } 6497 6498 LValue CodeGenFunction:: 6499 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 6500 Address BaseAddr = Address::invalid(); 6501 if (E->getOpcode() == BO_PtrMemI) { 6502 BaseAddr = EmitPointerWithAlignment(E->getLHS()); 6503 } else { 6504 BaseAddr = EmitLValue(E->getLHS()).getAddress(); 6505 } 6506 6507 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 6508 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>(); 6509 6510 LValueBaseInfo BaseInfo; 6511 TBAAAccessInfo TBAAInfo; 6512 bool IsInBounds = !getLangOpts().PointerOverflowDefined && 6513 !isUnderlyingBasePointerConstantNull(E->getLHS()); 6514 Address MemberAddr = EmitCXXMemberDataPointerAddress( 6515 E, BaseAddr, OffsetV, MPT, IsInBounds, &BaseInfo, &TBAAInfo); 6516 6517 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo); 6518 } 6519 6520 /// Given the address of a temporary variable, produce an r-value of 6521 /// its type. 6522 RValue CodeGenFunction::convertTempToRValue(Address addr, 6523 QualType type, 6524 SourceLocation loc) { 6525 LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl); 6526 switch (getEvaluationKind(type)) { 6527 case TEK_Complex: 6528 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); 6529 case TEK_Aggregate: 6530 return lvalue.asAggregateRValue(); 6531 case TEK_Scalar: 6532 return RValue::get(EmitLoadOfScalar(lvalue, loc)); 6533 } 6534 llvm_unreachable("bad evaluation kind"); 6535 } 6536 6537 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 6538 assert(Val->getType()->isFPOrFPVectorTy()); 6539 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 6540 return; 6541 6542 llvm::MDBuilder MDHelper(getLLVMContext()); 6543 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 6544 6545 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 6546 } 6547 6548 void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) { 6549 llvm::Type *EltTy = Val->getType()->getScalarType(); 6550 if (!EltTy->isFloatTy()) 6551 return; 6552 6553 if ((getLangOpts().OpenCL && 6554 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || 6555 (getLangOpts().HIP && getLangOpts().CUDAIsDevice && 6556 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { 6557 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp 6558 // 6559 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt 6560 // build option allows an application to specify that single precision 6561 // floating-point divide (x/y and 1/x) and sqrt used in the program 6562 // source are correctly rounded. 6563 // 6564 // TODO: CUDA has a prec-sqrt flag 6565 SetFPAccuracy(Val, 3.0f); 6566 } 6567 } 6568 6569 void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) { 6570 llvm::Type *EltTy = Val->getType()->getScalarType(); 6571 if (!EltTy->isFloatTy()) 6572 return; 6573 6574 if ((getLangOpts().OpenCL && 6575 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) || 6576 (getLangOpts().HIP && getLangOpts().CUDAIsDevice && 6577 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) { 6578 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp 6579 // 6580 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt 6581 // build option allows an application to specify that single precision 6582 // floating-point divide (x/y and 1/x) and sqrt used in the program 6583 // source are correctly rounded. 6584 // 6585 // TODO: CUDA has a prec-div flag 6586 SetFPAccuracy(Val, 2.5f); 6587 } 6588 } 6589 6590 namespace { 6591 struct LValueOrRValue { 6592 LValue LV; 6593 RValue RV; 6594 }; 6595 } 6596 6597 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 6598 const PseudoObjectExpr *E, 6599 bool forLValue, 6600 AggValueSlot slot) { 6601 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 6602 6603 // Find the result expression, if any. 6604 const Expr *resultExpr = E->getResultExpr(); 6605 LValueOrRValue result; 6606 6607 for (PseudoObjectExpr::const_semantics_iterator 6608 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 6609 const Expr *semantic = *i; 6610 6611 // If this semantic expression is an opaque value, bind it 6612 // to the result of its source expression. 6613 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 6614 // Skip unique OVEs. 6615 if (ov->isUnique()) { 6616 assert(ov != resultExpr && 6617 "A unique OVE cannot be used as the result expression"); 6618 continue; 6619 } 6620 6621 // If this is the result expression, we may need to evaluate 6622 // directly into the slot. 6623 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 6624 OVMA opaqueData; 6625 if (ov == resultExpr && ov->isPRValue() && !forLValue && 6626 CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { 6627 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 6628 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(), 6629 AlignmentSource::Decl); 6630 opaqueData = OVMA::bind(CGF, ov, LV); 6631 result.RV = slot.asRValue(); 6632 6633 // Otherwise, emit as normal. 6634 } else { 6635 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 6636 6637 // If this is the result, also evaluate the result now. 6638 if (ov == resultExpr) { 6639 if (forLValue) 6640 result.LV = CGF.EmitLValue(ov); 6641 else 6642 result.RV = CGF.EmitAnyExpr(ov, slot); 6643 } 6644 } 6645 6646 opaques.push_back(opaqueData); 6647 6648 // Otherwise, if the expression is the result, evaluate it 6649 // and remember the result. 6650 } else if (semantic == resultExpr) { 6651 if (forLValue) 6652 result.LV = CGF.EmitLValue(semantic); 6653 else 6654 result.RV = CGF.EmitAnyExpr(semantic, slot); 6655 6656 // Otherwise, evaluate the expression in an ignored context. 6657 } else { 6658 CGF.EmitIgnoredExpr(semantic); 6659 } 6660 } 6661 6662 // Unbind all the opaques now. 6663 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques) 6664 opaque.unbind(CGF); 6665 6666 return result; 6667 } 6668 6669 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 6670 AggValueSlot slot) { 6671 return emitPseudoObjectExpr(*this, E, false, slot).RV; 6672 } 6673 6674 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 6675 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 6676 } 6677 6678 void CodeGenFunction::FlattenAccessAndType( 6679 Address Addr, QualType AddrType, 6680 SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList, 6681 SmallVectorImpl<QualType> &FlatTypes) { 6682 // WorkList is list of type we are processing + the Index List to access 6683 // the field of that type in Addr for use in a GEP 6684 llvm::SmallVector<std::pair<QualType, llvm::SmallVector<llvm::Value *, 4>>, 6685 16> 6686 WorkList; 6687 llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32); 6688 // Addr should be a pointer so we need to 'dereference' it 6689 WorkList.push_back({AddrType, {llvm::ConstantInt::get(IdxTy, 0)}}); 6690 6691 while (!WorkList.empty()) { 6692 auto [T, IdxList] = WorkList.pop_back_val(); 6693 T = T.getCanonicalType().getUnqualifiedType(); 6694 assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL"); 6695 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) { 6696 uint64_t Size = CAT->getZExtSize(); 6697 for (int64_t I = Size - 1; I > -1; I--) { 6698 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; 6699 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); 6700 WorkList.emplace_back(CAT->getElementType(), IdxListCopy); 6701 } 6702 } else if (const auto *RT = dyn_cast<RecordType>(T)) { 6703 const RecordDecl *Record = RT->getDecl(); 6704 assert(!Record->isUnion() && "Union types not supported in flat cast."); 6705 6706 const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record); 6707 6708 llvm::SmallVector<QualType, 16> FieldTypes; 6709 if (CXXD && CXXD->isStandardLayout()) 6710 Record = CXXD->getStandardLayoutBaseWithFields(); 6711 6712 // deal with potential base classes 6713 if (CXXD && !CXXD->isStandardLayout()) { 6714 for (auto &Base : CXXD->bases()) 6715 FieldTypes.push_back(Base.getType()); 6716 } 6717 6718 for (auto *FD : Record->fields()) 6719 FieldTypes.push_back(FD->getType()); 6720 6721 for (int64_t I = FieldTypes.size() - 1; I > -1; I--) { 6722 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList; 6723 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I)); 6724 WorkList.insert(WorkList.end(), {FieldTypes[I], IdxListCopy}); 6725 } 6726 } else if (const auto *VT = dyn_cast<VectorType>(T)) { 6727 llvm::Type *LLVMT = ConvertTypeForMem(T); 6728 CharUnits Align = getContext().getTypeAlignInChars(T); 6729 Address GEP = 6730 Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "vector.gep"); 6731 for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) { 6732 llvm::Value *Idx = llvm::ConstantInt::get(IdxTy, I); 6733 // gep on vector fields is not recommended so combine gep with 6734 // extract/insert 6735 AccessList.emplace_back(GEP, Idx); 6736 FlatTypes.push_back(VT->getElementType()); 6737 } 6738 } else { 6739 // a scalar/builtin type 6740 llvm::Type *LLVMT = ConvertTypeForMem(T); 6741 CharUnits Align = getContext().getTypeAlignInChars(T); 6742 Address GEP = 6743 Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "gep"); 6744 AccessList.emplace_back(GEP, nullptr); 6745 FlatTypes.push_back(T); 6746 } 6747 } 6748 } 6749