1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This coordinates the per-function state used while generating code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenFunction.h" 14 #include "CGBlocks.h" 15 #include "CGCUDARuntime.h" 16 #include "CGCXXABI.h" 17 #include "CGCleanup.h" 18 #include "CGDebugInfo.h" 19 #include "CGHLSLRuntime.h" 20 #include "CGOpenMPRuntime.h" 21 #include "CodeGenModule.h" 22 #include "CodeGenPGO.h" 23 #include "TargetInfo.h" 24 #include "clang/AST/ASTContext.h" 25 #include "clang/AST/ASTLambda.h" 26 #include "clang/AST/Attr.h" 27 #include "clang/AST/Decl.h" 28 #include "clang/AST/DeclCXX.h" 29 #include "clang/AST/Expr.h" 30 #include "clang/AST/StmtCXX.h" 31 #include "clang/AST/StmtObjC.h" 32 #include "clang/Basic/Builtins.h" 33 #include "clang/Basic/CodeGenOptions.h" 34 #include "clang/Basic/TargetInfo.h" 35 #include "clang/CodeGen/CGFunctionInfo.h" 36 #include "clang/Frontend/FrontendDiagnostic.h" 37 #include "llvm/ADT/ArrayRef.h" 38 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/Dominators.h" 41 #include "llvm/IR/FPEnv.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/MDBuilder.h" 45 #include "llvm/IR/Operator.h" 46 #include "llvm/Support/CRC.h" 47 #include "llvm/Support/xxhash.h" 48 #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h" 49 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 50 #include <optional> 51 52 using namespace clang; 53 using namespace CodeGen; 54 55 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time 56 /// markers. 57 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, 58 const LangOptions &LangOpts) { 59 if (CGOpts.DisableLifetimeMarkers) 60 return false; 61 62 // Sanitizers may use markers. 63 if (CGOpts.SanitizeAddressUseAfterScope || 64 LangOpts.Sanitize.has(SanitizerKind::HWAddress) || 65 LangOpts.Sanitize.has(SanitizerKind::Memory)) 66 return true; 67 68 // For now, only in optimized builds. 69 return CGOpts.OptimizationLevel != 0; 70 } 71 72 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 73 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 74 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), 75 CGBuilderInserterTy(this)), 76 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()), 77 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm), 78 ShouldEmitLifetimeMarkers( 79 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) { 80 if (!suppressNewContext) 81 CGM.getCXXABI().getMangleContext().startNewFunction(); 82 EHStack.setCGF(this); 83 84 SetFastMathFlags(CurFPFeatures); 85 } 86 87 CodeGenFunction::~CodeGenFunction() { 88 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 89 90 if (getLangOpts().OpenMP && CurFn) 91 CGM.getOpenMPRuntime().functionFinished(*this); 92 93 // If we have an OpenMPIRBuilder we want to finalize functions (incl. 94 // outlining etc) at some point. Doing it once the function codegen is done 95 // seems to be a reasonable spot. We do it here, as opposed to the deletion 96 // time of the CodeGenModule, because we have to ensure the IR has not yet 97 // been "emitted" to the outside, thus, modifications are still sensible. 98 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn) 99 CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn); 100 } 101 102 // Map the LangOption for exception behavior into 103 // the corresponding enum in the IR. 104 llvm::fp::ExceptionBehavior 105 clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) { 106 107 switch (Kind) { 108 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore; 109 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap; 110 case LangOptions::FPE_Strict: return llvm::fp::ebStrict; 111 default: 112 llvm_unreachable("Unsupported FP Exception Behavior"); 113 } 114 } 115 116 void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) { 117 llvm::FastMathFlags FMF; 118 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate()); 119 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs()); 120 FMF.setNoInfs(FPFeatures.getNoHonorInfs()); 121 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero()); 122 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal()); 123 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc()); 124 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement()); 125 Builder.setFastMathFlags(FMF); 126 } 127 128 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF, 129 const Expr *E) 130 : CGF(CGF) { 131 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts())); 132 } 133 134 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF, 135 FPOptions FPFeatures) 136 : CGF(CGF) { 137 ConstructorHelper(FPFeatures); 138 } 139 140 void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) { 141 OldFPFeatures = CGF.CurFPFeatures; 142 CGF.CurFPFeatures = FPFeatures; 143 144 OldExcept = CGF.Builder.getDefaultConstrainedExcept(); 145 OldRounding = CGF.Builder.getDefaultConstrainedRounding(); 146 147 if (OldFPFeatures == FPFeatures) 148 return; 149 150 FMFGuard.emplace(CGF.Builder); 151 152 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode(); 153 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior); 154 auto NewExceptionBehavior = 155 ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>( 156 FPFeatures.getExceptionMode())); 157 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior); 158 159 CGF.SetFastMathFlags(FPFeatures); 160 161 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() || 162 isa<CXXConstructorDecl>(CGF.CurFuncDecl) || 163 isa<CXXDestructorDecl>(CGF.CurFuncDecl) || 164 (NewExceptionBehavior == llvm::fp::ebIgnore && 165 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) && 166 "FPConstrained should be enabled on entire function"); 167 168 auto mergeFnAttrValue = [&](StringRef Name, bool Value) { 169 auto OldValue = 170 CGF.CurFn->getFnAttribute(Name).getValueAsBool(); 171 auto NewValue = OldValue & Value; 172 if (OldValue != NewValue) 173 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue)); 174 }; 175 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs()); 176 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs()); 177 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero()); 178 mergeFnAttrValue( 179 "unsafe-fp-math", 180 FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() && 181 FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() && 182 FPFeatures.allowFPContractAcrossStatement()); 183 } 184 185 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() { 186 CGF.CurFPFeatures = OldFPFeatures; 187 CGF.Builder.setDefaultConstrainedExcept(OldExcept); 188 CGF.Builder.setDefaultConstrainedRounding(OldRounding); 189 } 190 191 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 192 LValueBaseInfo BaseInfo; 193 TBAAAccessInfo TBAAInfo; 194 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); 195 Address Addr(V, ConvertTypeForMem(T), Alignment); 196 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo); 197 } 198 199 /// Given a value of type T* that may not be to a complete object, 200 /// construct an l-value with the natural pointee alignment of T. 201 LValue 202 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { 203 LValueBaseInfo BaseInfo; 204 TBAAAccessInfo TBAAInfo; 205 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, 206 /* forPointeeType= */ true); 207 Address Addr(V, ConvertTypeForMem(T), Align); 208 return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); 209 } 210 211 212 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 213 return CGM.getTypes().ConvertTypeForMem(T); 214 } 215 216 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 217 return CGM.getTypes().ConvertType(T); 218 } 219 220 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 221 type = type.getCanonicalType(); 222 while (true) { 223 switch (type->getTypeClass()) { 224 #define TYPE(name, parent) 225 #define ABSTRACT_TYPE(name, parent) 226 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 227 #define DEPENDENT_TYPE(name, parent) case Type::name: 228 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 229 #include "clang/AST/TypeNodes.inc" 230 llvm_unreachable("non-canonical or dependent type in IR-generation"); 231 232 case Type::Auto: 233 case Type::DeducedTemplateSpecialization: 234 llvm_unreachable("undeduced type in IR-generation"); 235 236 // Various scalar types. 237 case Type::Builtin: 238 case Type::Pointer: 239 case Type::BlockPointer: 240 case Type::LValueReference: 241 case Type::RValueReference: 242 case Type::MemberPointer: 243 case Type::Vector: 244 case Type::ExtVector: 245 case Type::ConstantMatrix: 246 case Type::FunctionProto: 247 case Type::FunctionNoProto: 248 case Type::Enum: 249 case Type::ObjCObjectPointer: 250 case Type::Pipe: 251 case Type::BitInt: 252 return TEK_Scalar; 253 254 // Complexes. 255 case Type::Complex: 256 return TEK_Complex; 257 258 // Arrays, records, and Objective-C objects. 259 case Type::ConstantArray: 260 case Type::IncompleteArray: 261 case Type::VariableArray: 262 case Type::Record: 263 case Type::ObjCObject: 264 case Type::ObjCInterface: 265 return TEK_Aggregate; 266 267 // We operate on atomic values according to their underlying type. 268 case Type::Atomic: 269 type = cast<AtomicType>(type)->getValueType(); 270 continue; 271 } 272 llvm_unreachable("unknown type kind!"); 273 } 274 } 275 276 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 277 // For cleanliness, we try to avoid emitting the return block for 278 // simple cases. 279 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 280 281 if (CurBB) { 282 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 283 284 // We have a valid insert point, reuse it if it is empty or there are no 285 // explicit jumps to the return block. 286 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 287 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 288 delete ReturnBlock.getBlock(); 289 ReturnBlock = JumpDest(); 290 } else 291 EmitBlock(ReturnBlock.getBlock()); 292 return llvm::DebugLoc(); 293 } 294 295 // Otherwise, if the return block is the target of a single direct 296 // branch then we can just put the code in that block instead. This 297 // cleans up functions which started with a unified return block. 298 if (ReturnBlock.getBlock()->hasOneUse()) { 299 llvm::BranchInst *BI = 300 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 301 if (BI && BI->isUnconditional() && 302 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 303 // Record/return the DebugLoc of the simple 'return' expression to be used 304 // later by the actual 'ret' instruction. 305 llvm::DebugLoc Loc = BI->getDebugLoc(); 306 Builder.SetInsertPoint(BI->getParent()); 307 BI->eraseFromParent(); 308 delete ReturnBlock.getBlock(); 309 ReturnBlock = JumpDest(); 310 return Loc; 311 } 312 } 313 314 // FIXME: We are at an unreachable point, there is no reason to emit the block 315 // unless it has uses. However, we still need a place to put the debug 316 // region.end for now. 317 318 EmitBlock(ReturnBlock.getBlock()); 319 return llvm::DebugLoc(); 320 } 321 322 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 323 if (!BB) return; 324 if (!BB->use_empty()) { 325 CGF.CurFn->insert(CGF.CurFn->end(), BB); 326 return; 327 } 328 delete BB; 329 } 330 331 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 332 assert(BreakContinueStack.empty() && 333 "mismatched push/pop in break/continue stack!"); 334 335 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 336 && NumSimpleReturnExprs == NumReturnExprs 337 && ReturnBlock.getBlock()->use_empty(); 338 // Usually the return expression is evaluated before the cleanup 339 // code. If the function contains only a simple return statement, 340 // such as a constant, the location before the cleanup code becomes 341 // the last useful breakpoint in the function, because the simple 342 // return expression will be evaluated after the cleanup code. To be 343 // safe, set the debug location for cleanup code to the location of 344 // the return statement. Otherwise the cleanup code should be at the 345 // end of the function's lexical scope. 346 // 347 // If there are multiple branches to the return block, the branch 348 // instructions will get the location of the return statements and 349 // all will be fine. 350 if (CGDebugInfo *DI = getDebugInfo()) { 351 if (OnlySimpleReturnStmts) 352 DI->EmitLocation(Builder, LastStopPoint); 353 else 354 DI->EmitLocation(Builder, EndLoc); 355 } 356 357 // Pop any cleanups that might have been associated with the 358 // parameters. Do this in whatever block we're currently in; it's 359 // important to do this before we enter the return block or return 360 // edges will be *really* confused. 361 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; 362 bool HasOnlyLifetimeMarkers = 363 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); 364 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; 365 366 std::optional<ApplyDebugLocation> OAL; 367 if (HasCleanups) { 368 // Make sure the line table doesn't jump back into the body for 369 // the ret after it's been at EndLoc. 370 if (CGDebugInfo *DI = getDebugInfo()) { 371 if (OnlySimpleReturnStmts) 372 DI->EmitLocation(Builder, EndLoc); 373 else 374 // We may not have a valid end location. Try to apply it anyway, and 375 // fall back to an artificial location if needed. 376 OAL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc); 377 } 378 379 PopCleanupBlocks(PrologueCleanupDepth); 380 } 381 382 // Emit function epilog (to return). 383 llvm::DebugLoc Loc = EmitReturnBlock(); 384 385 if (ShouldInstrumentFunction()) { 386 if (CGM.getCodeGenOpts().InstrumentFunctions) 387 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); 388 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 389 CurFn->addFnAttr("instrument-function-exit-inlined", 390 "__cyg_profile_func_exit"); 391 } 392 393 // Emit debug descriptor for function end. 394 if (CGDebugInfo *DI = getDebugInfo()) 395 DI->EmitFunctionEnd(Builder, CurFn); 396 397 // Reset the debug location to that of the simple 'return' expression, if any 398 // rather than that of the end of the function's scope '}'. 399 ApplyDebugLocation AL(*this, Loc); 400 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 401 EmitEndEHSpec(CurCodeDecl); 402 403 assert(EHStack.empty() && 404 "did not remove all scopes from cleanup stack!"); 405 406 // If someone did an indirect goto, emit the indirect goto block at the end of 407 // the function. 408 if (IndirectBranch) { 409 EmitBlock(IndirectBranch->getParent()); 410 Builder.ClearInsertionPoint(); 411 } 412 413 // If some of our locals escaped, insert a call to llvm.localescape in the 414 // entry block. 415 if (!EscapedLocals.empty()) { 416 // Invert the map from local to index into a simple vector. There should be 417 // no holes. 418 SmallVector<llvm::Value *, 4> EscapeArgs; 419 EscapeArgs.resize(EscapedLocals.size()); 420 for (auto &Pair : EscapedLocals) 421 EscapeArgs[Pair.second] = Pair.first; 422 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 423 &CGM.getModule(), llvm::Intrinsic::localescape); 424 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 425 } 426 427 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 428 llvm::Instruction *Ptr = AllocaInsertPt; 429 AllocaInsertPt = nullptr; 430 Ptr->eraseFromParent(); 431 432 // PostAllocaInsertPt, if created, was lazily created when it was required, 433 // remove it now since it was just created for our own convenience. 434 if (PostAllocaInsertPt) { 435 llvm::Instruction *PostPtr = PostAllocaInsertPt; 436 PostAllocaInsertPt = nullptr; 437 PostPtr->eraseFromParent(); 438 } 439 440 // If someone took the address of a label but never did an indirect goto, we 441 // made a zero entry PHI node, which is illegal, zap it now. 442 if (IndirectBranch) { 443 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 444 if (PN->getNumIncomingValues() == 0) { 445 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 446 PN->eraseFromParent(); 447 } 448 } 449 450 EmitIfUsed(*this, EHResumeBlock); 451 EmitIfUsed(*this, TerminateLandingPad); 452 EmitIfUsed(*this, TerminateHandler); 453 EmitIfUsed(*this, UnreachableBlock); 454 455 for (const auto &FuncletAndParent : TerminateFunclets) 456 EmitIfUsed(*this, FuncletAndParent.second); 457 458 if (CGM.getCodeGenOpts().EmitDeclMetadata) 459 EmitDeclMetadata(); 460 461 for (const auto &R : DeferredReplacements) { 462 if (llvm::Value *Old = R.first) { 463 Old->replaceAllUsesWith(R.second); 464 cast<llvm::Instruction>(Old)->eraseFromParent(); 465 } 466 } 467 DeferredReplacements.clear(); 468 469 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and 470 // PHIs if the current function is a coroutine. We don't do it for all 471 // functions as it may result in slight increase in numbers of instructions 472 // if compiled with no optimizations. We do it for coroutine as the lifetime 473 // of CleanupDestSlot alloca make correct coroutine frame building very 474 // difficult. 475 if (NormalCleanupDest.isValid() && isCoroutine()) { 476 llvm::DominatorTree DT(*CurFn); 477 llvm::PromoteMemToReg( 478 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT); 479 NormalCleanupDest = Address::invalid(); 480 } 481 482 // Scan function arguments for vector width. 483 for (llvm::Argument &A : CurFn->args()) 484 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType())) 485 LargestVectorWidth = 486 std::max((uint64_t)LargestVectorWidth, 487 VT->getPrimitiveSizeInBits().getKnownMinValue()); 488 489 // Update vector width based on return type. 490 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType())) 491 LargestVectorWidth = 492 std::max((uint64_t)LargestVectorWidth, 493 VT->getPrimitiveSizeInBits().getKnownMinValue()); 494 495 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth) 496 LargestVectorWidth = CurFnInfo->getMaxVectorWidth(); 497 498 // Add the required-vector-width attribute. This contains the max width from: 499 // 1. min-vector-width attribute used in the source program. 500 // 2. Any builtins used that have a vector width specified. 501 // 3. Values passed in and out of inline assembly. 502 // 4. Width of vector arguments and return types for this function. 503 // 5. Width of vector aguments and return types for functions called by this 504 // function. 505 if (getContext().getTargetInfo().getTriple().isX86()) 506 CurFn->addFnAttr("min-legal-vector-width", 507 llvm::utostr(LargestVectorWidth)); 508 509 // Add vscale_range attribute if appropriate. 510 std::optional<std::pair<unsigned, unsigned>> VScaleRange = 511 getContext().getTargetInfo().getVScaleRange(getLangOpts()); 512 if (VScaleRange) { 513 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs( 514 getLLVMContext(), VScaleRange->first, VScaleRange->second)); 515 } 516 517 // If we generated an unreachable return block, delete it now. 518 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) { 519 Builder.ClearInsertionPoint(); 520 ReturnBlock.getBlock()->eraseFromParent(); 521 } 522 if (ReturnValue.isValid()) { 523 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer()); 524 if (RetAlloca && RetAlloca->use_empty()) { 525 RetAlloca->eraseFromParent(); 526 ReturnValue = Address::invalid(); 527 } 528 } 529 } 530 531 /// ShouldInstrumentFunction - Return true if the current function should be 532 /// instrumented with __cyg_profile_func_* calls 533 bool CodeGenFunction::ShouldInstrumentFunction() { 534 if (!CGM.getCodeGenOpts().InstrumentFunctions && 535 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && 536 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 537 return false; 538 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 539 return false; 540 return true; 541 } 542 543 bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() { 544 if (!CurFuncDecl) 545 return false; 546 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>(); 547 } 548 549 /// ShouldXRayInstrument - Return true if the current function should be 550 /// instrumented with XRay nop sleds. 551 bool CodeGenFunction::ShouldXRayInstrumentFunction() const { 552 return CGM.getCodeGenOpts().XRayInstrumentFunctions; 553 } 554 555 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to 556 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation. 557 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { 558 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 559 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents || 560 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 561 XRayInstrKind::Custom); 562 } 563 564 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const { 565 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 566 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents || 567 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 568 XRayInstrKind::Typed); 569 } 570 571 llvm::ConstantInt * 572 CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const { 573 // Remove any (C++17) exception specifications, to allow calling e.g. a 574 // noexcept function through a non-noexcept pointer. 575 if (!isa<FunctionNoProtoType>(Ty)) 576 Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None); 577 std::string Mangled; 578 llvm::raw_string_ostream Out(Mangled); 579 CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out, false); 580 return llvm::ConstantInt::get( 581 CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled))); 582 } 583 584 void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD, 585 llvm::Function *Fn) { 586 if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>()) 587 return; 588 589 llvm::LLVMContext &Context = getLLVMContext(); 590 591 CGM.GenKernelArgMetadata(Fn, FD, this); 592 593 if (!getLangOpts().OpenCL) 594 return; 595 596 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 597 QualType HintQTy = A->getTypeHint(); 598 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>(); 599 bool IsSignedInteger = 600 HintQTy->isSignedIntegerType() || 601 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); 602 llvm::Metadata *AttrMDArgs[] = { 603 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 604 CGM.getTypes().ConvertType(A->getTypeHint()))), 605 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 606 llvm::IntegerType::get(Context, 32), 607 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; 608 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); 609 } 610 611 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 612 llvm::Metadata *AttrMDArgs[] = { 613 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 614 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 615 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 616 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); 617 } 618 619 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 620 llvm::Metadata *AttrMDArgs[] = { 621 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 622 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 623 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 624 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); 625 } 626 627 if (const OpenCLIntelReqdSubGroupSizeAttr *A = 628 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) { 629 llvm::Metadata *AttrMDArgs[] = { 630 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; 631 Fn->setMetadata("intel_reqd_sub_group_size", 632 llvm::MDNode::get(Context, AttrMDArgs)); 633 } 634 } 635 636 /// Determine whether the function F ends with a return stmt. 637 static bool endsWithReturn(const Decl* F) { 638 const Stmt *Body = nullptr; 639 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 640 Body = FD->getBody(); 641 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 642 Body = OMD->getBody(); 643 644 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 645 auto LastStmt = CS->body_rbegin(); 646 if (LastStmt != CS->body_rend()) 647 return isa<ReturnStmt>(*LastStmt); 648 } 649 return false; 650 } 651 652 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { 653 if (SanOpts.has(SanitizerKind::Thread)) { 654 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); 655 Fn->removeFnAttr(llvm::Attribute::SanitizeThread); 656 } 657 } 658 659 /// Check if the return value of this function requires sanitization. 660 bool CodeGenFunction::requiresReturnValueCheck() const { 661 return requiresReturnValueNullabilityCheck() || 662 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl && 663 CurCodeDecl->getAttr<ReturnsNonNullAttr>()); 664 } 665 666 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { 667 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); 668 if (!MD || !MD->getDeclName().getAsIdentifierInfo() || 669 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || 670 (MD->getNumParams() != 1 && MD->getNumParams() != 2)) 671 return false; 672 673 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) 674 return false; 675 676 if (MD->getNumParams() == 2) { 677 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); 678 if (!PT || !PT->isVoidPointerType() || 679 !PT->getPointeeType().isConstQualified()) 680 return false; 681 } 682 683 return true; 684 } 685 686 /// Return the UBSan prologue signature for \p FD if one is available. 687 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, 688 const FunctionDecl *FD) { 689 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 690 if (!MD->isStatic()) 691 return nullptr; 692 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); 693 } 694 695 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 696 llvm::Function *Fn, 697 const CGFunctionInfo &FnInfo, 698 const FunctionArgList &Args, 699 SourceLocation Loc, 700 SourceLocation StartLoc) { 701 assert(!CurFn && 702 "Do not use a CodeGenFunction object for more than one function"); 703 704 const Decl *D = GD.getDecl(); 705 706 DidCallStackSave = false; 707 CurCodeDecl = D; 708 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 709 if (FD && FD->usesSEHTry()) 710 CurSEHParent = GD; 711 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 712 FnRetTy = RetTy; 713 CurFn = Fn; 714 CurFnInfo = &FnInfo; 715 assert(CurFn->isDeclaration() && "Function already has body?"); 716 717 // If this function is ignored for any of the enabled sanitizers, 718 // disable the sanitizer for the function. 719 do { 720 #define SANITIZER(NAME, ID) \ 721 if (SanOpts.empty()) \ 722 break; \ 723 if (SanOpts.has(SanitizerKind::ID)) \ 724 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \ 725 SanOpts.set(SanitizerKind::ID, false); 726 727 #include "clang/Basic/Sanitizers.def" 728 #undef SANITIZER 729 } while (false); 730 731 if (D) { 732 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds); 733 SanitizerMask no_sanitize_mask; 734 bool NoSanitizeCoverage = false; 735 736 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) { 737 no_sanitize_mask |= Attr->getMask(); 738 // SanitizeCoverage is not handled by SanOpts. 739 if (Attr->hasCoverage()) 740 NoSanitizeCoverage = true; 741 } 742 743 // Apply the no_sanitize* attributes to SanOpts. 744 SanOpts.Mask &= ~no_sanitize_mask; 745 if (no_sanitize_mask & SanitizerKind::Address) 746 SanOpts.set(SanitizerKind::KernelAddress, false); 747 if (no_sanitize_mask & SanitizerKind::KernelAddress) 748 SanOpts.set(SanitizerKind::Address, false); 749 if (no_sanitize_mask & SanitizerKind::HWAddress) 750 SanOpts.set(SanitizerKind::KernelHWAddress, false); 751 if (no_sanitize_mask & SanitizerKind::KernelHWAddress) 752 SanOpts.set(SanitizerKind::HWAddress, false); 753 754 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds)) 755 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds); 756 757 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage()) 758 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage); 759 760 // Some passes need the non-negated no_sanitize attribute. Pass them on. 761 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) { 762 if (no_sanitize_mask & SanitizerKind::Thread) 763 Fn->addFnAttr("no_sanitize_thread"); 764 } 765 } 766 767 if (ShouldSkipSanitizerInstrumentation()) { 768 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation); 769 } else { 770 // Apply sanitizer attributes to the function. 771 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) 772 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 773 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | 774 SanitizerKind::KernelHWAddress)) 775 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 776 if (SanOpts.has(SanitizerKind::MemtagStack)) 777 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); 778 if (SanOpts.has(SanitizerKind::Thread)) 779 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 780 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory)) 781 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 782 } 783 if (SanOpts.has(SanitizerKind::SafeStack)) 784 Fn->addFnAttr(llvm::Attribute::SafeStack); 785 if (SanOpts.has(SanitizerKind::ShadowCallStack)) 786 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 787 788 // Apply fuzzing attribute to the function. 789 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) 790 Fn->addFnAttr(llvm::Attribute::OptForFuzzing); 791 792 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, 793 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. 794 if (SanOpts.has(SanitizerKind::Thread)) { 795 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) { 796 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); 797 if (OMD->getMethodFamily() == OMF_dealloc || 798 OMD->getMethodFamily() == OMF_initialize || 799 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { 800 markAsIgnoreThreadCheckingAtRuntime(Fn); 801 } 802 } 803 } 804 805 // Ignore unrelated casts in STL allocate() since the allocator must cast 806 // from void* to T* before object initialization completes. Don't match on the 807 // namespace because not all allocators are in std:: 808 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 809 if (matchesStlAllocatorFn(D, getContext())) 810 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; 811 } 812 813 // Ignore null checks in coroutine functions since the coroutines passes 814 // are not aware of how to move the extra UBSan instructions across the split 815 // coroutine boundaries. 816 if (D && SanOpts.has(SanitizerKind::Null)) 817 if (FD && FD->getBody() && 818 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass) 819 SanOpts.Mask &= ~SanitizerKind::Null; 820 821 // Apply xray attributes to the function (as a string, for now) 822 bool AlwaysXRayAttr = false; 823 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) { 824 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 825 XRayInstrKind::FunctionEntry) || 826 CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 827 XRayInstrKind::FunctionExit)) { 828 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) { 829 Fn->addFnAttr("function-instrument", "xray-always"); 830 AlwaysXRayAttr = true; 831 } 832 if (XRayAttr->neverXRayInstrument()) 833 Fn->addFnAttr("function-instrument", "xray-never"); 834 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) 835 if (ShouldXRayInstrumentFunction()) 836 Fn->addFnAttr("xray-log-args", 837 llvm::utostr(LogArgs->getArgumentCount())); 838 } 839 } else { 840 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc)) 841 Fn->addFnAttr( 842 "xray-instruction-threshold", 843 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); 844 } 845 846 if (ShouldXRayInstrumentFunction()) { 847 if (CGM.getCodeGenOpts().XRayIgnoreLoops) 848 Fn->addFnAttr("xray-ignore-loops"); 849 850 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 851 XRayInstrKind::FunctionExit)) 852 Fn->addFnAttr("xray-skip-exit"); 853 854 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 855 XRayInstrKind::FunctionEntry)) 856 Fn->addFnAttr("xray-skip-entry"); 857 858 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups; 859 if (FuncGroups > 1) { 860 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(), 861 CurFn->getName().bytes_end()); 862 auto Group = crc32(FuncName) % FuncGroups; 863 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup && 864 !AlwaysXRayAttr) 865 Fn->addFnAttr("function-instrument", "xray-never"); 866 } 867 } 868 869 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) { 870 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) { 871 case ProfileList::Skip: 872 Fn->addFnAttr(llvm::Attribute::SkipProfile); 873 break; 874 case ProfileList::Forbid: 875 Fn->addFnAttr(llvm::Attribute::NoProfile); 876 break; 877 case ProfileList::Allow: 878 break; 879 } 880 } 881 882 unsigned Count, Offset; 883 if (const auto *Attr = 884 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) { 885 Count = Attr->getCount(); 886 Offset = Attr->getOffset(); 887 } else { 888 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount; 889 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset; 890 } 891 if (Count && Offset <= Count) { 892 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset)); 893 if (Offset) 894 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset)); 895 } 896 // Instruct that functions for COFF/CodeView targets should start with a 897 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64 898 // backends as they don't need it -- instructions on these architectures are 899 // always atomically patchable at runtime. 900 if (CGM.getCodeGenOpts().HotPatch && 901 getContext().getTargetInfo().getTriple().isX86() && 902 getContext().getTargetInfo().getTriple().getEnvironment() != 903 llvm::Triple::CODE16) 904 Fn->addFnAttr("patchable-function", "prologue-short-redirect"); 905 906 // Add no-jump-tables value. 907 if (CGM.getCodeGenOpts().NoUseJumpTables) 908 Fn->addFnAttr("no-jump-tables", "true"); 909 910 // Add no-inline-line-tables value. 911 if (CGM.getCodeGenOpts().NoInlineLineTables) 912 Fn->addFnAttr("no-inline-line-tables"); 913 914 // Add profile-sample-accurate value. 915 if (CGM.getCodeGenOpts().ProfileSampleAccurate) 916 Fn->addFnAttr("profile-sample-accurate"); 917 918 if (!CGM.getCodeGenOpts().SampleProfileFile.empty()) 919 Fn->addFnAttr("use-sample-profile"); 920 921 if (D && D->hasAttr<CFICanonicalJumpTableAttr>()) 922 Fn->addFnAttr("cfi-canonical-jump-table"); 923 924 if (D && D->hasAttr<NoProfileFunctionAttr>()) 925 Fn->addFnAttr(llvm::Attribute::NoProfile); 926 927 if (D) { 928 // Function attributes take precedence over command line flags. 929 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) { 930 switch (A->getThunkType()) { 931 case FunctionReturnThunksAttr::Kind::Keep: 932 break; 933 case FunctionReturnThunksAttr::Kind::Extern: 934 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern); 935 break; 936 } 937 } else if (CGM.getCodeGenOpts().FunctionReturnThunks) 938 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern); 939 } 940 941 if (FD && (getLangOpts().OpenCL || 942 (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) { 943 // Add metadata for a kernel function. 944 EmitKernelMetadata(FD, Fn); 945 } 946 947 // If we are checking function types, emit a function type signature as 948 // prologue data. 949 if (FD && SanOpts.has(SanitizerKind::Function)) { 950 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { 951 llvm::LLVMContext &Ctx = Fn->getContext(); 952 llvm::MDBuilder MDB(Ctx); 953 Fn->setMetadata( 954 llvm::LLVMContext::MD_func_sanitize, 955 MDB.createRTTIPointerPrologue( 956 PrologueSig, getUBSanFunctionTypeHash(FD->getType()))); 957 } 958 } 959 960 // If we're checking nullability, we need to know whether we can check the 961 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. 962 if (SanOpts.has(SanitizerKind::NullabilityReturn)) { 963 auto Nullability = FnRetTy->getNullability(); 964 if (Nullability && *Nullability == NullabilityKind::NonNull) { 965 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 966 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>())) 967 RetValNullabilityPrecondition = 968 llvm::ConstantInt::getTrue(getLLVMContext()); 969 } 970 } 971 972 // If we're in C++ mode and the function name is "main", it is guaranteed 973 // to be norecurse by the standard (3.6.1.3 "The function main shall not be 974 // used within a program"). 975 // 976 // OpenCL C 2.0 v2.2-11 s6.9.i: 977 // Recursion is not supported. 978 // 979 // SYCL v1.2.1 s3.10: 980 // kernels cannot include RTTI information, exception classes, 981 // recursive code, virtual functions or make use of C++ libraries that 982 // are not compiled for the device. 983 if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) || 984 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice || 985 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))) 986 Fn->addFnAttr(llvm::Attribute::NoRecurse); 987 988 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode(); 989 llvm::fp::ExceptionBehavior FPExceptionBehavior = 990 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode()); 991 Builder.setDefaultConstrainedRounding(RM); 992 Builder.setDefaultConstrainedExcept(FPExceptionBehavior); 993 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) || 994 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore || 995 RM != llvm::RoundingMode::NearestTiesToEven))) { 996 Builder.setIsFPConstrained(true); 997 Fn->addFnAttr(llvm::Attribute::StrictFP); 998 } 999 1000 // If a custom alignment is used, force realigning to this alignment on 1001 // any main function which certainly will need it. 1002 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) && 1003 CGM.getCodeGenOpts().StackAlignment)) 1004 Fn->addFnAttr("stackrealign"); 1005 1006 // "main" doesn't need to zero out call-used registers. 1007 if (FD && FD->isMain()) 1008 Fn->removeFnAttr("zero-call-used-regs"); 1009 1010 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 1011 1012 // Create a marker to make it easy to insert allocas into the entryblock 1013 // later. Don't create this with the builder, because we don't want it 1014 // folded. 1015 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 1016 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); 1017 1018 ReturnBlock = getJumpDestInCurrentScope("return"); 1019 1020 Builder.SetInsertPoint(EntryBB); 1021 1022 // If we're checking the return value, allocate space for a pointer to a 1023 // precise source location of the checked return statement. 1024 if (requiresReturnValueCheck()) { 1025 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); 1026 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy), 1027 ReturnLocation); 1028 } 1029 1030 // Emit subprogram debug descriptor. 1031 if (CGDebugInfo *DI = getDebugInfo()) { 1032 // Reconstruct the type from the argument list so that implicit parameters, 1033 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling 1034 // convention. 1035 DI->emitFunctionStart(GD, Loc, StartLoc, 1036 DI->getFunctionType(FD, RetTy, Args), CurFn, 1037 CurFuncIsThunk); 1038 } 1039 1040 if (ShouldInstrumentFunction()) { 1041 if (CGM.getCodeGenOpts().InstrumentFunctions) 1042 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); 1043 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 1044 CurFn->addFnAttr("instrument-function-entry-inlined", 1045 "__cyg_profile_func_enter"); 1046 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 1047 CurFn->addFnAttr("instrument-function-entry-inlined", 1048 "__cyg_profile_func_enter_bare"); 1049 } 1050 1051 // Since emitting the mcount call here impacts optimizations such as function 1052 // inlining, we just add an attribute to insert a mcount call in backend. 1053 // The attribute "counting-function" is set to mcount function name which is 1054 // architecture dependent. 1055 if (CGM.getCodeGenOpts().InstrumentForProfiling) { 1056 // Calls to fentry/mcount should not be generated if function has 1057 // the no_instrument_function attribute. 1058 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { 1059 if (CGM.getCodeGenOpts().CallFEntry) 1060 Fn->addFnAttr("fentry-call", "true"); 1061 else { 1062 Fn->addFnAttr("instrument-function-entry-inlined", 1063 getTarget().getMCountName()); 1064 } 1065 if (CGM.getCodeGenOpts().MNopMCount) { 1066 if (!CGM.getCodeGenOpts().CallFEntry) 1067 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) 1068 << "-mnop-mcount" << "-mfentry"; 1069 Fn->addFnAttr("mnop-mcount"); 1070 } 1071 1072 if (CGM.getCodeGenOpts().RecordMCount) { 1073 if (!CGM.getCodeGenOpts().CallFEntry) 1074 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) 1075 << "-mrecord-mcount" << "-mfentry"; 1076 Fn->addFnAttr("mrecord-mcount"); 1077 } 1078 } 1079 } 1080 1081 if (CGM.getCodeGenOpts().PackedStack) { 1082 if (getContext().getTargetInfo().getTriple().getArch() != 1083 llvm::Triple::systemz) 1084 CGM.getDiags().Report(diag::err_opt_not_valid_on_target) 1085 << "-mpacked-stack"; 1086 Fn->addFnAttr("packed-stack"); 1087 } 1088 1089 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX && 1090 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc)) 1091 Fn->addFnAttr("warn-stack-size", 1092 std::to_string(CGM.getCodeGenOpts().WarnStackSize)); 1093 1094 if (RetTy->isVoidType()) { 1095 // Void type; nothing to return. 1096 ReturnValue = Address::invalid(); 1097 1098 // Count the implicit return. 1099 if (!endsWithReturn(D)) 1100 ++NumReturnExprs; 1101 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { 1102 // Indirect return; emit returned value directly into sret slot. 1103 // This reduces code size, and affects correctness in C++. 1104 auto AI = CurFn->arg_begin(); 1105 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 1106 ++AI; 1107 ReturnValue = 1108 Address(&*AI, ConvertType(RetTy), 1109 CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull); 1110 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) { 1111 ReturnValuePointer = 1112 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr"); 1113 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast( 1114 ReturnValue.getPointer(), Int8PtrTy), 1115 ReturnValuePointer); 1116 } 1117 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 1118 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1119 // Load the sret pointer from the argument struct and return into that. 1120 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 1121 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1122 --EI; 1123 llvm::Value *Addr = Builder.CreateStructGEP( 1124 CurFnInfo->getArgStruct(), &*EI, Idx); 1125 llvm::Type *Ty = 1126 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType(); 1127 ReturnValuePointer = Address(Addr, Ty, getPointerAlign()); 1128 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result"); 1129 ReturnValue = Address(Addr, ConvertType(RetTy), 1130 CGM.getNaturalTypeAlignment(RetTy), KnownNonNull); 1131 } else { 1132 ReturnValue = CreateIRTemp(RetTy, "retval"); 1133 1134 // Tell the epilog emitter to autorelease the result. We do this 1135 // now so that various specialized functions can suppress it 1136 // during their IR-generation. 1137 if (getLangOpts().ObjCAutoRefCount && 1138 !CurFnInfo->isReturnsRetained() && 1139 RetTy->isObjCRetainableType()) 1140 AutoreleaseResult = true; 1141 } 1142 1143 EmitStartEHSpec(CurCodeDecl); 1144 1145 PrologueCleanupDepth = EHStack.stable_begin(); 1146 1147 // Emit OpenMP specific initialization of the device functions. 1148 if (getLangOpts().OpenMP && CurCodeDecl) 1149 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); 1150 1151 // Handle emitting HLSL entry functions. 1152 if (D && D->hasAttr<HLSLShaderAttr>()) 1153 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn); 1154 1155 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 1156 1157 if (isa_and_nonnull<CXXMethodDecl>(D) && 1158 cast<CXXMethodDecl>(D)->isInstance()) { 1159 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 1160 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 1161 if (MD->getParent()->isLambda() && 1162 MD->getOverloadedOperator() == OO_Call) { 1163 // We're in a lambda; figure out the captures. 1164 MD->getParent()->getCaptureFields(LambdaCaptureFields, 1165 LambdaThisCaptureField); 1166 if (LambdaThisCaptureField) { 1167 // If the lambda captures the object referred to by '*this' - either by 1168 // value or by reference, make sure CXXThisValue points to the correct 1169 // object. 1170 1171 // Get the lvalue for the field (which is a copy of the enclosing object 1172 // or contains the address of the enclosing object). 1173 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 1174 if (!LambdaThisCaptureField->getType()->isPointerType()) { 1175 // If the enclosing object was captured by value, just use its address. 1176 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer(); 1177 } else { 1178 // Load the lvalue pointed to by the field, since '*this' was captured 1179 // by reference. 1180 CXXThisValue = 1181 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); 1182 } 1183 } 1184 for (auto *FD : MD->getParent()->fields()) { 1185 if (FD->hasCapturedVLAType()) { 1186 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 1187 SourceLocation()).getScalarVal(); 1188 auto VAT = FD->getCapturedVLAType(); 1189 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 1190 } 1191 } 1192 } else { 1193 // Not in a lambda; just use 'this' from the method. 1194 // FIXME: Should we generate a new load for each use of 'this'? The 1195 // fast register allocator would be happier... 1196 CXXThisValue = CXXABIThisValue; 1197 } 1198 1199 // Check the 'this' pointer once per function, if it's available. 1200 if (CXXABIThisValue) { 1201 SanitizerSet SkippedChecks; 1202 SkippedChecks.set(SanitizerKind::ObjectSize, true); 1203 QualType ThisTy = MD->getThisType(); 1204 1205 // If this is the call operator of a lambda with no capture-default, it 1206 // may have a static invoker function, which may call this operator with 1207 // a null 'this' pointer. 1208 if (isLambdaCallOperator(MD) && 1209 MD->getParent()->getLambdaCaptureDefault() == LCD_None) 1210 SkippedChecks.set(SanitizerKind::Null, true); 1211 1212 EmitTypeCheck( 1213 isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall, 1214 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks); 1215 } 1216 } 1217 1218 // If any of the arguments have a variably modified type, make sure to 1219 // emit the type size, but only if the function is not naked. Naked functions 1220 // have no prolog to run this evaluation. 1221 if (!FD || !FD->hasAttr<NakedAttr>()) { 1222 for (const VarDecl *VD : Args) { 1223 // Dig out the type as written from ParmVarDecls; it's unclear whether 1224 // the standard (C99 6.9.1p10) requires this, but we're following the 1225 // precedent set by gcc. 1226 QualType Ty; 1227 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 1228 Ty = PVD->getOriginalType(); 1229 else 1230 Ty = VD->getType(); 1231 1232 if (Ty->isVariablyModifiedType()) 1233 EmitVariablyModifiedType(Ty); 1234 } 1235 } 1236 // Emit a location at the end of the prologue. 1237 if (CGDebugInfo *DI = getDebugInfo()) 1238 DI->EmitLocation(Builder, StartLoc); 1239 // TODO: Do we need to handle this in two places like we do with 1240 // target-features/target-cpu? 1241 if (CurFuncDecl) 1242 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>()) 1243 LargestVectorWidth = VecWidth->getVectorWidth(); 1244 } 1245 1246 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) { 1247 incrementProfileCounter(Body); 1248 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 1249 EmitCompoundStmtWithoutScope(*S); 1250 else 1251 EmitStmt(Body); 1252 1253 // This is checked after emitting the function body so we know if there 1254 // are any permitted infinite loops. 1255 if (checkIfFunctionMustProgress()) 1256 CurFn->addFnAttr(llvm::Attribute::MustProgress); 1257 } 1258 1259 /// When instrumenting to collect profile data, the counts for some blocks 1260 /// such as switch cases need to not include the fall-through counts, so 1261 /// emit a branch around the instrumentation code. When not instrumenting, 1262 /// this just calls EmitBlock(). 1263 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 1264 const Stmt *S) { 1265 llvm::BasicBlock *SkipCountBB = nullptr; 1266 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { 1267 // When instrumenting for profiling, the fallthrough to certain 1268 // statements needs to skip over the instrumentation code so that we 1269 // get an accurate count. 1270 SkipCountBB = createBasicBlock("skipcount"); 1271 EmitBranch(SkipCountBB); 1272 } 1273 EmitBlock(BB); 1274 uint64_t CurrentCount = getCurrentProfileCount(); 1275 incrementProfileCounter(S); 1276 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); 1277 if (SkipCountBB) 1278 EmitBlock(SkipCountBB); 1279 } 1280 1281 /// Tries to mark the given function nounwind based on the 1282 /// non-existence of any throwing calls within it. We believe this is 1283 /// lightweight enough to do at -O0. 1284 static void TryMarkNoThrow(llvm::Function *F) { 1285 // LLVM treats 'nounwind' on a function as part of the type, so we 1286 // can't do this on functions that can be overwritten. 1287 if (F->isInterposable()) return; 1288 1289 for (llvm::BasicBlock &BB : *F) 1290 for (llvm::Instruction &I : BB) 1291 if (I.mayThrow()) 1292 return; 1293 1294 F->setDoesNotThrow(); 1295 } 1296 1297 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, 1298 FunctionArgList &Args) { 1299 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1300 QualType ResTy = FD->getReturnType(); 1301 1302 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 1303 if (MD && MD->isInstance()) { 1304 if (CGM.getCXXABI().HasThisReturn(GD)) 1305 ResTy = MD->getThisType(); 1306 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 1307 ResTy = CGM.getContext().VoidPtrTy; 1308 CGM.getCXXABI().buildThisParam(*this, Args); 1309 } 1310 1311 // The base version of an inheriting constructor whose constructed base is a 1312 // virtual base is not passed any arguments (because it doesn't actually call 1313 // the inherited constructor). 1314 bool PassedParams = true; 1315 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 1316 if (auto Inherited = CD->getInheritedConstructor()) 1317 PassedParams = 1318 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); 1319 1320 if (PassedParams) { 1321 for (auto *Param : FD->parameters()) { 1322 Args.push_back(Param); 1323 if (!Param->hasAttr<PassObjectSizeAttr>()) 1324 continue; 1325 1326 auto *Implicit = ImplicitParamDecl::Create( 1327 getContext(), Param->getDeclContext(), Param->getLocation(), 1328 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); 1329 SizeArguments[Param] = Implicit; 1330 Args.push_back(Implicit); 1331 } 1332 } 1333 1334 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 1335 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 1336 1337 return ResTy; 1338 } 1339 1340 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 1341 const CGFunctionInfo &FnInfo) { 1342 assert(Fn && "generating code for null Function"); 1343 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1344 CurGD = GD; 1345 1346 FunctionArgList Args; 1347 QualType ResTy = BuildFunctionArgList(GD, Args); 1348 1349 if (FD->isInlineBuiltinDeclaration()) { 1350 // When generating code for a builtin with an inline declaration, use a 1351 // mangled name to hold the actual body, while keeping an external 1352 // definition in case the function pointer is referenced somewhere. 1353 std::string FDInlineName = (Fn->getName() + ".inline").str(); 1354 llvm::Module *M = Fn->getParent(); 1355 llvm::Function *Clone = M->getFunction(FDInlineName); 1356 if (!Clone) { 1357 Clone = llvm::Function::Create(Fn->getFunctionType(), 1358 llvm::GlobalValue::InternalLinkage, 1359 Fn->getAddressSpace(), FDInlineName, M); 1360 Clone->addFnAttr(llvm::Attribute::AlwaysInline); 1361 } 1362 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage); 1363 Fn = Clone; 1364 } else { 1365 // Detect the unusual situation where an inline version is shadowed by a 1366 // non-inline version. In that case we should pick the external one 1367 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way 1368 // to detect that situation before we reach codegen, so do some late 1369 // replacement. 1370 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD; 1371 PD = PD->getPreviousDecl()) { 1372 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) { 1373 std::string FDInlineName = (Fn->getName() + ".inline").str(); 1374 llvm::Module *M = Fn->getParent(); 1375 if (llvm::Function *Clone = M->getFunction(FDInlineName)) { 1376 Clone->replaceAllUsesWith(Fn); 1377 Clone->eraseFromParent(); 1378 } 1379 break; 1380 } 1381 } 1382 } 1383 1384 // Check if we should generate debug info for this function. 1385 if (FD->hasAttr<NoDebugAttr>()) { 1386 // Clear non-distinct debug info that was possibly attached to the function 1387 // due to an earlier declaration without the nodebug attribute 1388 Fn->setSubprogram(nullptr); 1389 // Disable debug info indefinitely for this function 1390 DebugInfo = nullptr; 1391 } 1392 1393 // The function might not have a body if we're generating thunks for a 1394 // function declaration. 1395 SourceRange BodyRange; 1396 if (Stmt *Body = FD->getBody()) 1397 BodyRange = Body->getSourceRange(); 1398 else 1399 BodyRange = FD->getLocation(); 1400 CurEHLocation = BodyRange.getEnd(); 1401 1402 // Use the location of the start of the function to determine where 1403 // the function definition is located. By default use the location 1404 // of the declaration as the location for the subprogram. A function 1405 // may lack a declaration in the source code if it is created by code 1406 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 1407 SourceLocation Loc = FD->getLocation(); 1408 1409 // If this is a function specialization then use the pattern body 1410 // as the location for the function. 1411 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 1412 if (SpecDecl->hasBody(SpecDecl)) 1413 Loc = SpecDecl->getLocation(); 1414 1415 Stmt *Body = FD->getBody(); 1416 1417 if (Body) { 1418 // Coroutines always emit lifetime markers. 1419 if (isa<CoroutineBodyStmt>(Body)) 1420 ShouldEmitLifetimeMarkers = true; 1421 1422 // Initialize helper which will detect jumps which can cause invalid 1423 // lifetime markers. 1424 if (ShouldEmitLifetimeMarkers) 1425 Bypasses.Init(Body); 1426 } 1427 1428 // Emit the standard function prologue. 1429 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 1430 1431 // Save parameters for coroutine function. 1432 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body)) 1433 llvm::append_range(FnArgs, FD->parameters()); 1434 1435 // Generate the body of the function. 1436 PGO.assignRegionCounters(GD, CurFn); 1437 if (isa<CXXDestructorDecl>(FD)) 1438 EmitDestructorBody(Args); 1439 else if (isa<CXXConstructorDecl>(FD)) 1440 EmitConstructorBody(Args); 1441 else if (getLangOpts().CUDA && 1442 !getLangOpts().CUDAIsDevice && 1443 FD->hasAttr<CUDAGlobalAttr>()) 1444 CGM.getCUDARuntime().emitDeviceStub(*this, Args); 1445 else if (isa<CXXMethodDecl>(FD) && 1446 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 1447 // The lambda static invoker function is special, because it forwards or 1448 // clones the body of the function call operator (but is actually static). 1449 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); 1450 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 1451 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 1452 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 1453 // Implicit copy-assignment gets the same special treatment as implicit 1454 // copy-constructors. 1455 emitImplicitAssignmentOperatorBody(Args); 1456 } else if (Body) { 1457 EmitFunctionBody(Body); 1458 } else 1459 llvm_unreachable("no definition for emitted function"); 1460 1461 // C++11 [stmt.return]p2: 1462 // Flowing off the end of a function [...] results in undefined behavior in 1463 // a value-returning function. 1464 // C11 6.9.1p12: 1465 // If the '}' that terminates a function is reached, and the value of the 1466 // function call is used by the caller, the behavior is undefined. 1467 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 1468 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 1469 bool ShouldEmitUnreachable = 1470 CGM.getCodeGenOpts().StrictReturn || 1471 !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType()); 1472 if (SanOpts.has(SanitizerKind::Return)) { 1473 SanitizerScope SanScope(this); 1474 llvm::Value *IsFalse = Builder.getFalse(); 1475 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 1476 SanitizerHandler::MissingReturn, 1477 EmitCheckSourceLocation(FD->getLocation()), std::nullopt); 1478 } else if (ShouldEmitUnreachable) { 1479 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 1480 EmitTrapCall(llvm::Intrinsic::trap); 1481 } 1482 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { 1483 Builder.CreateUnreachable(); 1484 Builder.ClearInsertionPoint(); 1485 } 1486 } 1487 1488 // Emit the standard function epilogue. 1489 FinishFunction(BodyRange.getEnd()); 1490 1491 // If we haven't marked the function nothrow through other means, do 1492 // a quick pass now to see if we can. 1493 if (!CurFn->doesNotThrow()) 1494 TryMarkNoThrow(CurFn); 1495 } 1496 1497 /// ContainsLabel - Return true if the statement contains a label in it. If 1498 /// this statement is not executed normally, it not containing a label means 1499 /// that we can just remove the code. 1500 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 1501 // Null statement, not a label! 1502 if (!S) return false; 1503 1504 // If this is a label, we have to emit the code, consider something like: 1505 // if (0) { ... foo: bar(); } goto foo; 1506 // 1507 // TODO: If anyone cared, we could track __label__'s, since we know that you 1508 // can't jump to one from outside their declared region. 1509 if (isa<LabelStmt>(S)) 1510 return true; 1511 1512 // If this is a case/default statement, and we haven't seen a switch, we have 1513 // to emit the code. 1514 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 1515 return true; 1516 1517 // If this is a switch statement, we want to ignore cases below it. 1518 if (isa<SwitchStmt>(S)) 1519 IgnoreCaseStmts = true; 1520 1521 // Scan subexpressions for verboten labels. 1522 for (const Stmt *SubStmt : S->children()) 1523 if (ContainsLabel(SubStmt, IgnoreCaseStmts)) 1524 return true; 1525 1526 return false; 1527 } 1528 1529 /// containsBreak - Return true if the statement contains a break out of it. 1530 /// If the statement (recursively) contains a switch or loop with a break 1531 /// inside of it, this is fine. 1532 bool CodeGenFunction::containsBreak(const Stmt *S) { 1533 // Null statement, not a label! 1534 if (!S) return false; 1535 1536 // If this is a switch or loop that defines its own break scope, then we can 1537 // include it and anything inside of it. 1538 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 1539 isa<ForStmt>(S)) 1540 return false; 1541 1542 if (isa<BreakStmt>(S)) 1543 return true; 1544 1545 // Scan subexpressions for verboten breaks. 1546 for (const Stmt *SubStmt : S->children()) 1547 if (containsBreak(SubStmt)) 1548 return true; 1549 1550 return false; 1551 } 1552 1553 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { 1554 if (!S) return false; 1555 1556 // Some statement kinds add a scope and thus never add a decl to the current 1557 // scope. Note, this list is longer than the list of statements that might 1558 // have an unscoped decl nested within them, but this way is conservatively 1559 // correct even if more statement kinds are added. 1560 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) || 1561 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) || 1562 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) || 1563 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S)) 1564 return false; 1565 1566 if (isa<DeclStmt>(S)) 1567 return true; 1568 1569 for (const Stmt *SubStmt : S->children()) 1570 if (mightAddDeclToScope(SubStmt)) 1571 return true; 1572 1573 return false; 1574 } 1575 1576 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1577 /// to a constant, or if it does but contains a label, return false. If it 1578 /// constant folds return true and set the boolean result in Result. 1579 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1580 bool &ResultBool, 1581 bool AllowLabels) { 1582 llvm::APSInt ResultInt; 1583 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) 1584 return false; 1585 1586 ResultBool = ResultInt.getBoolValue(); 1587 return true; 1588 } 1589 1590 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1591 /// to a constant, or if it does but contains a label, return false. If it 1592 /// constant folds return true and set the folded value. 1593 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1594 llvm::APSInt &ResultInt, 1595 bool AllowLabels) { 1596 // FIXME: Rename and handle conversion of other evaluatable things 1597 // to bool. 1598 Expr::EvalResult Result; 1599 if (!Cond->EvaluateAsInt(Result, getContext())) 1600 return false; // Not foldable, not integer or not fully evaluatable. 1601 1602 llvm::APSInt Int = Result.Val.getInt(); 1603 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) 1604 return false; // Contains a label. 1605 1606 ResultInt = Int; 1607 return true; 1608 } 1609 1610 /// Determine whether the given condition is an instrumentable condition 1611 /// (i.e. no "&&" or "||"). 1612 bool CodeGenFunction::isInstrumentedCondition(const Expr *C) { 1613 // Bypass simplistic logical-NOT operator before determining whether the 1614 // condition contains any other logical operator. 1615 if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens())) 1616 if (UnOp->getOpcode() == UO_LNot) 1617 C = UnOp->getSubExpr(); 1618 1619 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens()); 1620 return (!BOp || !BOp->isLogicalOp()); 1621 } 1622 1623 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that 1624 /// increments a profile counter based on the semantics of the given logical 1625 /// operator opcode. This is used to instrument branch condition coverage for 1626 /// logical operators. 1627 void CodeGenFunction::EmitBranchToCounterBlock( 1628 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, 1629 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */, 1630 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) { 1631 // If not instrumenting, just emit a branch. 1632 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr(); 1633 if (!InstrumentRegions || !isInstrumentedCondition(Cond)) 1634 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH); 1635 1636 llvm::BasicBlock *ThenBlock = nullptr; 1637 llvm::BasicBlock *ElseBlock = nullptr; 1638 llvm::BasicBlock *NextBlock = nullptr; 1639 1640 // Create the block we'll use to increment the appropriate counter. 1641 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt"); 1642 1643 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This 1644 // means we need to evaluate the condition and increment the counter on TRUE: 1645 // 1646 // if (Cond) 1647 // goto CounterIncrBlock; 1648 // else 1649 // goto FalseBlock; 1650 // 1651 // CounterIncrBlock: 1652 // Counter++; 1653 // goto TrueBlock; 1654 1655 if (LOp == BO_LAnd) { 1656 ThenBlock = CounterIncrBlock; 1657 ElseBlock = FalseBlock; 1658 NextBlock = TrueBlock; 1659 } 1660 1661 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means 1662 // we need to evaluate the condition and increment the counter on FALSE: 1663 // 1664 // if (Cond) 1665 // goto TrueBlock; 1666 // else 1667 // goto CounterIncrBlock; 1668 // 1669 // CounterIncrBlock: 1670 // Counter++; 1671 // goto FalseBlock; 1672 1673 else if (LOp == BO_LOr) { 1674 ThenBlock = TrueBlock; 1675 ElseBlock = CounterIncrBlock; 1676 NextBlock = FalseBlock; 1677 } else { 1678 llvm_unreachable("Expected Opcode must be that of a Logical Operator"); 1679 } 1680 1681 // Emit Branch based on condition. 1682 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH); 1683 1684 // Emit the block containing the counter increment(s). 1685 EmitBlock(CounterIncrBlock); 1686 1687 // Increment corresponding counter; if index not provided, use Cond as index. 1688 incrementProfileCounter(CntrIdx ? CntrIdx : Cond); 1689 1690 // Go to the next block. 1691 EmitBranch(NextBlock); 1692 } 1693 1694 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1695 /// statement) to the specified blocks. Based on the condition, this might try 1696 /// to simplify the codegen of the conditional based on the branch. 1697 /// \param LH The value of the likelihood attribute on the True branch. 1698 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1699 llvm::BasicBlock *TrueBlock, 1700 llvm::BasicBlock *FalseBlock, 1701 uint64_t TrueCount, 1702 Stmt::Likelihood LH) { 1703 Cond = Cond->IgnoreParens(); 1704 1705 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1706 1707 // Handle X && Y in a condition. 1708 if (CondBOp->getOpcode() == BO_LAnd) { 1709 // If we have "1 && X", simplify the code. "0 && X" would have constant 1710 // folded if the case was simple enough. 1711 bool ConstantBool = false; 1712 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1713 ConstantBool) { 1714 // br(1 && X) -> br(X). 1715 incrementProfileCounter(CondBOp); 1716 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock, 1717 FalseBlock, TrueCount, LH); 1718 } 1719 1720 // If we have "X && 1", simplify the code to use an uncond branch. 1721 // "X && 0" would have been constant folded to 0. 1722 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1723 ConstantBool) { 1724 // br(X && 1) -> br(X). 1725 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock, 1726 FalseBlock, TrueCount, LH, CondBOp); 1727 } 1728 1729 // Emit the LHS as a conditional. If the LHS conditional is false, we 1730 // want to jump to the FalseBlock. 1731 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1732 // The counter tells us how often we evaluate RHS, and all of TrueCount 1733 // can be propagated to that branch. 1734 uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); 1735 1736 ConditionalEvaluation eval(*this); 1737 { 1738 ApplyDebugLocation DL(*this, Cond); 1739 // Propagate the likelihood attribute like __builtin_expect 1740 // __builtin_expect(X && Y, 1) -> X and Y are likely 1741 // __builtin_expect(X && Y, 0) -> only Y is unlikely 1742 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount, 1743 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH); 1744 EmitBlock(LHSTrue); 1745 } 1746 1747 incrementProfileCounter(CondBOp); 1748 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1749 1750 // Any temporaries created here are conditional. 1751 eval.begin(*this); 1752 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock, 1753 FalseBlock, TrueCount, LH); 1754 eval.end(*this); 1755 1756 return; 1757 } 1758 1759 if (CondBOp->getOpcode() == BO_LOr) { 1760 // If we have "0 || X", simplify the code. "1 || X" would have constant 1761 // folded if the case was simple enough. 1762 bool ConstantBool = false; 1763 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1764 !ConstantBool) { 1765 // br(0 || X) -> br(X). 1766 incrementProfileCounter(CondBOp); 1767 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, 1768 FalseBlock, TrueCount, LH); 1769 } 1770 1771 // If we have "X || 0", simplify the code to use an uncond branch. 1772 // "X || 1" would have been constant folded to 1. 1773 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1774 !ConstantBool) { 1775 // br(X || 0) -> br(X). 1776 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock, 1777 FalseBlock, TrueCount, LH, CondBOp); 1778 } 1779 1780 // Emit the LHS as a conditional. If the LHS conditional is true, we 1781 // want to jump to the TrueBlock. 1782 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1783 // We have the count for entry to the RHS and for the whole expression 1784 // being true, so we can divy up True count between the short circuit and 1785 // the RHS. 1786 uint64_t LHSCount = 1787 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); 1788 uint64_t RHSCount = TrueCount - LHSCount; 1789 1790 ConditionalEvaluation eval(*this); 1791 { 1792 // Propagate the likelihood attribute like __builtin_expect 1793 // __builtin_expect(X || Y, 1) -> only Y is likely 1794 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely 1795 ApplyDebugLocation DL(*this, Cond); 1796 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount, 1797 LH == Stmt::LH_Likely ? Stmt::LH_None : LH); 1798 EmitBlock(LHSFalse); 1799 } 1800 1801 incrementProfileCounter(CondBOp); 1802 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1803 1804 // Any temporaries created here are conditional. 1805 eval.begin(*this); 1806 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock, 1807 RHSCount, LH); 1808 1809 eval.end(*this); 1810 1811 return; 1812 } 1813 } 1814 1815 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1816 // br(!x, t, f) -> br(x, f, t) 1817 if (CondUOp->getOpcode() == UO_LNot) { 1818 // Negate the count. 1819 uint64_t FalseCount = getCurrentProfileCount() - TrueCount; 1820 // The values of the enum are chosen to make this negation possible. 1821 LH = static_cast<Stmt::Likelihood>(-LH); 1822 // Negate the condition and swap the destination blocks. 1823 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1824 FalseCount, LH); 1825 } 1826 } 1827 1828 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1829 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1830 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1831 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1832 1833 // The ConditionalOperator itself has no likelihood information for its 1834 // true and false branches. This matches the behavior of __builtin_expect. 1835 ConditionalEvaluation cond(*this); 1836 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, 1837 getProfileCount(CondOp), Stmt::LH_None); 1838 1839 // When computing PGO branch weights, we only know the overall count for 1840 // the true block. This code is essentially doing tail duplication of the 1841 // naive code-gen, introducing new edges for which counts are not 1842 // available. Divide the counts proportionally between the LHS and RHS of 1843 // the conditional operator. 1844 uint64_t LHSScaledTrueCount = 0; 1845 if (TrueCount) { 1846 double LHSRatio = 1847 getProfileCount(CondOp) / (double)getCurrentProfileCount(); 1848 LHSScaledTrueCount = TrueCount * LHSRatio; 1849 } 1850 1851 cond.begin(*this); 1852 EmitBlock(LHSBlock); 1853 incrementProfileCounter(CondOp); 1854 { 1855 ApplyDebugLocation DL(*this, Cond); 1856 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1857 LHSScaledTrueCount, LH); 1858 } 1859 cond.end(*this); 1860 1861 cond.begin(*this); 1862 EmitBlock(RHSBlock); 1863 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1864 TrueCount - LHSScaledTrueCount, LH); 1865 cond.end(*this); 1866 1867 return; 1868 } 1869 1870 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1871 // Conditional operator handling can give us a throw expression as a 1872 // condition for a case like: 1873 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1874 // Fold this to: 1875 // br(c, throw x, br(y, t, f)) 1876 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1877 return; 1878 } 1879 1880 // Emit the code with the fully general case. 1881 llvm::Value *CondV; 1882 { 1883 ApplyDebugLocation DL(*this, Cond); 1884 CondV = EvaluateExprAsBool(Cond); 1885 } 1886 1887 llvm::MDNode *Weights = nullptr; 1888 llvm::MDNode *Unpredictable = nullptr; 1889 1890 // If the branch has a condition wrapped by __builtin_unpredictable, 1891 // create metadata that specifies that the branch is unpredictable. 1892 // Don't bother if not optimizing because that metadata would not be used. 1893 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts()); 1894 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1895 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1896 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1897 llvm::MDBuilder MDHelper(getLLVMContext()); 1898 Unpredictable = MDHelper.createUnpredictable(); 1899 } 1900 } 1901 1902 // If there is a Likelihood knowledge for the cond, lower it. 1903 // Note that if not optimizing this won't emit anything. 1904 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH); 1905 if (CondV != NewCondV) 1906 CondV = NewCondV; 1907 else { 1908 // Otherwise, lower profile counts. Note that we do this even at -O0. 1909 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); 1910 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount); 1911 } 1912 1913 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); 1914 } 1915 1916 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1917 /// specified stmt yet. 1918 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1919 CGM.ErrorUnsupported(S, Type); 1920 } 1921 1922 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 1923 /// variable-length array whose elements have a non-zero bit-pattern. 1924 /// 1925 /// \param baseType the inner-most element type of the array 1926 /// \param src - a char* pointing to the bit-pattern for a single 1927 /// base element of the array 1928 /// \param sizeInChars - the total size of the VLA, in chars 1929 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1930 Address dest, Address src, 1931 llvm::Value *sizeInChars) { 1932 CGBuilderTy &Builder = CGF.Builder; 1933 1934 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); 1935 llvm::Value *baseSizeInChars 1936 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); 1937 1938 Address begin = dest.withElementType(CGF.Int8Ty); 1939 llvm::Value *end = Builder.CreateInBoundsGEP( 1940 begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end"); 1941 1942 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1943 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1944 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1945 1946 // Make a loop over the VLA. C99 guarantees that the VLA element 1947 // count must be nonzero. 1948 CGF.EmitBlock(loopBB); 1949 1950 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); 1951 cur->addIncoming(begin.getPointer(), originBB); 1952 1953 CharUnits curAlign = 1954 dest.getAlignment().alignmentOfArrayElement(baseSize); 1955 1956 // memcpy the individual element bit-pattern. 1957 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars, 1958 /*volatile*/ false); 1959 1960 // Go to the next element. 1961 llvm::Value *next = 1962 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); 1963 1964 // Leave if that's the end of the VLA. 1965 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1966 Builder.CreateCondBr(done, contBB, loopBB); 1967 cur->addIncoming(next, loopBB); 1968 1969 CGF.EmitBlock(contBB); 1970 } 1971 1972 void 1973 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { 1974 // Ignore empty classes in C++. 1975 if (getLangOpts().CPlusPlus) { 1976 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1977 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1978 return; 1979 } 1980 } 1981 1982 if (DestPtr.getElementType() != Int8Ty) 1983 DestPtr = DestPtr.withElementType(Int8Ty); 1984 1985 // Get size and alignment info for this aggregate. 1986 CharUnits size = getContext().getTypeSizeInChars(Ty); 1987 1988 llvm::Value *SizeVal; 1989 const VariableArrayType *vla; 1990 1991 // Don't bother emitting a zero-byte memset. 1992 if (size.isZero()) { 1993 // But note that getTypeInfo returns 0 for a VLA. 1994 if (const VariableArrayType *vlaType = 1995 dyn_cast_or_null<VariableArrayType>( 1996 getContext().getAsArrayType(Ty))) { 1997 auto VlaSize = getVLASize(vlaType); 1998 SizeVal = VlaSize.NumElts; 1999 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type); 2000 if (!eltSize.isOne()) 2001 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 2002 vla = vlaType; 2003 } else { 2004 return; 2005 } 2006 } else { 2007 SizeVal = CGM.getSize(size); 2008 vla = nullptr; 2009 } 2010 2011 // If the type contains a pointer to data member we can't memset it to zero. 2012 // Instead, create a null constant and copy it to the destination. 2013 // TODO: there are other patterns besides zero that we can usefully memset, 2014 // like -1, which happens to be the pattern used by member-pointers. 2015 if (!CGM.getTypes().isZeroInitializable(Ty)) { 2016 // For a VLA, emit a single element, then splat that over the VLA. 2017 if (vla) Ty = getContext().getBaseElementType(vla); 2018 2019 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 2020 2021 llvm::GlobalVariable *NullVariable = 2022 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 2023 /*isConstant=*/true, 2024 llvm::GlobalVariable::PrivateLinkage, 2025 NullConstant, Twine()); 2026 CharUnits NullAlign = DestPtr.getAlignment(); 2027 NullVariable->setAlignment(NullAlign.getAsAlign()); 2028 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), 2029 Builder.getInt8Ty(), NullAlign); 2030 2031 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 2032 2033 // Get and call the appropriate llvm.memcpy overload. 2034 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); 2035 return; 2036 } 2037 2038 // Otherwise, just memset the whole thing to zero. This is legal 2039 // because in LLVM, all default initializers (other than the ones we just 2040 // handled above) are guaranteed to have a bit pattern of all zeros. 2041 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 2042 } 2043 2044 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 2045 // Make sure that there is a block for the indirect goto. 2046 if (!IndirectBranch) 2047 GetIndirectGotoBlock(); 2048 2049 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 2050 2051 // Make sure the indirect branch includes all of the address-taken blocks. 2052 IndirectBranch->addDestination(BB); 2053 return llvm::BlockAddress::get(CurFn, BB); 2054 } 2055 2056 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 2057 // If we already made the indirect branch for indirect goto, return its block. 2058 if (IndirectBranch) return IndirectBranch->getParent(); 2059 2060 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); 2061 2062 // Create the PHI node that indirect gotos will add entries to. 2063 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 2064 "indirect.goto.dest"); 2065 2066 // Create the indirect branch instruction. 2067 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 2068 return IndirectBranch->getParent(); 2069 } 2070 2071 /// Computes the length of an array in elements, as well as the base 2072 /// element type and a properly-typed first element pointer. 2073 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 2074 QualType &baseType, 2075 Address &addr) { 2076 const ArrayType *arrayType = origArrayType; 2077 2078 // If it's a VLA, we have to load the stored size. Note that 2079 // this is the size of the VLA in bytes, not its size in elements. 2080 llvm::Value *numVLAElements = nullptr; 2081 if (isa<VariableArrayType>(arrayType)) { 2082 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts; 2083 2084 // Walk into all VLAs. This doesn't require changes to addr, 2085 // which has type T* where T is the first non-VLA element type. 2086 do { 2087 QualType elementType = arrayType->getElementType(); 2088 arrayType = getContext().getAsArrayType(elementType); 2089 2090 // If we only have VLA components, 'addr' requires no adjustment. 2091 if (!arrayType) { 2092 baseType = elementType; 2093 return numVLAElements; 2094 } 2095 } while (isa<VariableArrayType>(arrayType)); 2096 2097 // We get out here only if we find a constant array type 2098 // inside the VLA. 2099 } 2100 2101 // We have some number of constant-length arrays, so addr should 2102 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 2103 // down to the first element of addr. 2104 SmallVector<llvm::Value*, 8> gepIndices; 2105 2106 // GEP down to the array type. 2107 llvm::ConstantInt *zero = Builder.getInt32(0); 2108 gepIndices.push_back(zero); 2109 2110 uint64_t countFromCLAs = 1; 2111 QualType eltType; 2112 2113 llvm::ArrayType *llvmArrayType = 2114 dyn_cast<llvm::ArrayType>(addr.getElementType()); 2115 while (llvmArrayType) { 2116 assert(isa<ConstantArrayType>(arrayType)); 2117 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 2118 == llvmArrayType->getNumElements()); 2119 2120 gepIndices.push_back(zero); 2121 countFromCLAs *= llvmArrayType->getNumElements(); 2122 eltType = arrayType->getElementType(); 2123 2124 llvmArrayType = 2125 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 2126 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 2127 assert((!llvmArrayType || arrayType) && 2128 "LLVM and Clang types are out-of-synch"); 2129 } 2130 2131 if (arrayType) { 2132 // From this point onwards, the Clang array type has been emitted 2133 // as some other type (probably a packed struct). Compute the array 2134 // size, and just emit the 'begin' expression as a bitcast. 2135 while (arrayType) { 2136 countFromCLAs *= 2137 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 2138 eltType = arrayType->getElementType(); 2139 arrayType = getContext().getAsArrayType(eltType); 2140 } 2141 2142 llvm::Type *baseType = ConvertType(eltType); 2143 addr = addr.withElementType(baseType); 2144 } else { 2145 // Create the actual GEP. 2146 addr = Address(Builder.CreateInBoundsGEP( 2147 addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"), 2148 ConvertTypeForMem(eltType), 2149 addr.getAlignment()); 2150 } 2151 2152 baseType = eltType; 2153 2154 llvm::Value *numElements 2155 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 2156 2157 // If we had any VLA dimensions, factor them in. 2158 if (numVLAElements) 2159 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 2160 2161 return numElements; 2162 } 2163 2164 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) { 2165 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 2166 assert(vla && "type was not a variable array type!"); 2167 return getVLASize(vla); 2168 } 2169 2170 CodeGenFunction::VlaSizePair 2171 CodeGenFunction::getVLASize(const VariableArrayType *type) { 2172 // The number of elements so far; always size_t. 2173 llvm::Value *numElements = nullptr; 2174 2175 QualType elementType; 2176 do { 2177 elementType = type->getElementType(); 2178 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 2179 assert(vlaSize && "no size for VLA!"); 2180 assert(vlaSize->getType() == SizeTy); 2181 2182 if (!numElements) { 2183 numElements = vlaSize; 2184 } else { 2185 // It's undefined behavior if this wraps around, so mark it that way. 2186 // FIXME: Teach -fsanitize=undefined to trap this. 2187 numElements = Builder.CreateNUWMul(numElements, vlaSize); 2188 } 2189 } while ((type = getContext().getAsVariableArrayType(elementType))); 2190 2191 return { numElements, elementType }; 2192 } 2193 2194 CodeGenFunction::VlaSizePair 2195 CodeGenFunction::getVLAElements1D(QualType type) { 2196 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 2197 assert(vla && "type was not a variable array type!"); 2198 return getVLAElements1D(vla); 2199 } 2200 2201 CodeGenFunction::VlaSizePair 2202 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) { 2203 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()]; 2204 assert(VlaSize && "no size for VLA!"); 2205 assert(VlaSize->getType() == SizeTy); 2206 return { VlaSize, Vla->getElementType() }; 2207 } 2208 2209 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 2210 assert(type->isVariablyModifiedType() && 2211 "Must pass variably modified type to EmitVLASizes!"); 2212 2213 EnsureInsertPoint(); 2214 2215 // We're going to walk down into the type and look for VLA 2216 // expressions. 2217 do { 2218 assert(type->isVariablyModifiedType()); 2219 2220 const Type *ty = type.getTypePtr(); 2221 switch (ty->getTypeClass()) { 2222 2223 #define TYPE(Class, Base) 2224 #define ABSTRACT_TYPE(Class, Base) 2225 #define NON_CANONICAL_TYPE(Class, Base) 2226 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 2227 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 2228 #include "clang/AST/TypeNodes.inc" 2229 llvm_unreachable("unexpected dependent type!"); 2230 2231 // These types are never variably-modified. 2232 case Type::Builtin: 2233 case Type::Complex: 2234 case Type::Vector: 2235 case Type::ExtVector: 2236 case Type::ConstantMatrix: 2237 case Type::Record: 2238 case Type::Enum: 2239 case Type::Using: 2240 case Type::TemplateSpecialization: 2241 case Type::ObjCTypeParam: 2242 case Type::ObjCObject: 2243 case Type::ObjCInterface: 2244 case Type::ObjCObjectPointer: 2245 case Type::BitInt: 2246 llvm_unreachable("type class is never variably-modified!"); 2247 2248 case Type::Elaborated: 2249 type = cast<ElaboratedType>(ty)->getNamedType(); 2250 break; 2251 2252 case Type::Adjusted: 2253 type = cast<AdjustedType>(ty)->getAdjustedType(); 2254 break; 2255 2256 case Type::Decayed: 2257 type = cast<DecayedType>(ty)->getPointeeType(); 2258 break; 2259 2260 case Type::Pointer: 2261 type = cast<PointerType>(ty)->getPointeeType(); 2262 break; 2263 2264 case Type::BlockPointer: 2265 type = cast<BlockPointerType>(ty)->getPointeeType(); 2266 break; 2267 2268 case Type::LValueReference: 2269 case Type::RValueReference: 2270 type = cast<ReferenceType>(ty)->getPointeeType(); 2271 break; 2272 2273 case Type::MemberPointer: 2274 type = cast<MemberPointerType>(ty)->getPointeeType(); 2275 break; 2276 2277 case Type::ConstantArray: 2278 case Type::IncompleteArray: 2279 // Losing element qualification here is fine. 2280 type = cast<ArrayType>(ty)->getElementType(); 2281 break; 2282 2283 case Type::VariableArray: { 2284 // Losing element qualification here is fine. 2285 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2286 2287 // Unknown size indication requires no size computation. 2288 // Otherwise, evaluate and record it. 2289 if (const Expr *sizeExpr = vat->getSizeExpr()) { 2290 // It's possible that we might have emitted this already, 2291 // e.g. with a typedef and a pointer to it. 2292 llvm::Value *&entry = VLASizeMap[sizeExpr]; 2293 if (!entry) { 2294 llvm::Value *size = EmitScalarExpr(sizeExpr); 2295 2296 // C11 6.7.6.2p5: 2297 // If the size is an expression that is not an integer constant 2298 // expression [...] each time it is evaluated it shall have a value 2299 // greater than zero. 2300 if (SanOpts.has(SanitizerKind::VLABound)) { 2301 SanitizerScope SanScope(this); 2302 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType()); 2303 clang::QualType SEType = sizeExpr->getType(); 2304 llvm::Value *CheckCondition = 2305 SEType->isSignedIntegerType() 2306 ? Builder.CreateICmpSGT(size, Zero) 2307 : Builder.CreateICmpUGT(size, Zero); 2308 llvm::Constant *StaticArgs[] = { 2309 EmitCheckSourceLocation(sizeExpr->getBeginLoc()), 2310 EmitCheckTypeDescriptor(SEType)}; 2311 EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound), 2312 SanitizerHandler::VLABoundNotPositive, StaticArgs, size); 2313 } 2314 2315 // Always zexting here would be wrong if it weren't 2316 // undefined behavior to have a negative bound. 2317 // FIXME: What about when size's type is larger than size_t? 2318 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false); 2319 } 2320 } 2321 type = vat->getElementType(); 2322 break; 2323 } 2324 2325 case Type::FunctionProto: 2326 case Type::FunctionNoProto: 2327 type = cast<FunctionType>(ty)->getReturnType(); 2328 break; 2329 2330 case Type::Paren: 2331 case Type::TypeOf: 2332 case Type::UnaryTransform: 2333 case Type::Attributed: 2334 case Type::BTFTagAttributed: 2335 case Type::SubstTemplateTypeParm: 2336 case Type::MacroQualified: 2337 // Keep walking after single level desugaring. 2338 type = type.getSingleStepDesugaredType(getContext()); 2339 break; 2340 2341 case Type::Typedef: 2342 case Type::Decltype: 2343 case Type::Auto: 2344 case Type::DeducedTemplateSpecialization: 2345 // Stop walking: nothing to do. 2346 return; 2347 2348 case Type::TypeOfExpr: 2349 // Stop walking: emit typeof expression. 2350 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 2351 return; 2352 2353 case Type::Atomic: 2354 type = cast<AtomicType>(ty)->getValueType(); 2355 break; 2356 2357 case Type::Pipe: 2358 type = cast<PipeType>(ty)->getElementType(); 2359 break; 2360 } 2361 } while (type->isVariablyModifiedType()); 2362 } 2363 2364 Address CodeGenFunction::EmitVAListRef(const Expr* E) { 2365 if (getContext().getBuiltinVaListType()->isArrayType()) 2366 return EmitPointerWithAlignment(E); 2367 return EmitLValue(E).getAddress(*this); 2368 } 2369 2370 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { 2371 return EmitLValue(E).getAddress(*this); 2372 } 2373 2374 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 2375 const APValue &Init) { 2376 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!"); 2377 if (CGDebugInfo *Dbg = getDebugInfo()) 2378 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) 2379 Dbg->EmitGlobalVariable(E->getDecl(), Init); 2380 } 2381 2382 CodeGenFunction::PeepholeProtection 2383 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 2384 // At the moment, the only aggressive peephole we do in IR gen 2385 // is trunc(zext) folding, but if we add more, we can easily 2386 // extend this protection. 2387 2388 if (!rvalue.isScalar()) return PeepholeProtection(); 2389 llvm::Value *value = rvalue.getScalarVal(); 2390 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 2391 2392 // Just make an extra bitcast. 2393 assert(HaveInsertPoint()); 2394 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 2395 Builder.GetInsertBlock()); 2396 2397 PeepholeProtection protection; 2398 protection.Inst = inst; 2399 return protection; 2400 } 2401 2402 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 2403 if (!protection.Inst) return; 2404 2405 // In theory, we could try to duplicate the peepholes now, but whatever. 2406 protection.Inst->eraseFromParent(); 2407 } 2408 2409 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, 2410 QualType Ty, SourceLocation Loc, 2411 SourceLocation AssumptionLoc, 2412 llvm::Value *Alignment, 2413 llvm::Value *OffsetValue) { 2414 if (Alignment->getType() != IntPtrTy) 2415 Alignment = 2416 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align"); 2417 if (OffsetValue && OffsetValue->getType() != IntPtrTy) 2418 OffsetValue = 2419 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset"); 2420 llvm::Value *TheCheck = nullptr; 2421 if (SanOpts.has(SanitizerKind::Alignment)) { 2422 llvm::Value *PtrIntValue = 2423 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); 2424 2425 if (OffsetValue) { 2426 bool IsOffsetZero = false; 2427 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue)) 2428 IsOffsetZero = CI->isZero(); 2429 2430 if (!IsOffsetZero) 2431 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr"); 2432 } 2433 2434 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0); 2435 llvm::Value *Mask = 2436 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1)); 2437 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr"); 2438 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond"); 2439 } 2440 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( 2441 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue); 2442 2443 if (!SanOpts.has(SanitizerKind::Alignment)) 2444 return; 2445 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2446 OffsetValue, TheCheck, Assumption); 2447 } 2448 2449 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, 2450 const Expr *E, 2451 SourceLocation AssumptionLoc, 2452 llvm::Value *Alignment, 2453 llvm::Value *OffsetValue) { 2454 QualType Ty = E->getType(); 2455 SourceLocation Loc = E->getExprLoc(); 2456 2457 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2458 OffsetValue); 2459 } 2460 2461 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn, 2462 llvm::Value *AnnotatedVal, 2463 StringRef AnnotationStr, 2464 SourceLocation Location, 2465 const AnnotateAttr *Attr) { 2466 SmallVector<llvm::Value *, 5> Args = { 2467 AnnotatedVal, 2468 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), 2469 ConstGlobalsPtrTy), 2470 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), 2471 ConstGlobalsPtrTy), 2472 CGM.EmitAnnotationLineNo(Location), 2473 }; 2474 if (Attr) 2475 Args.push_back(CGM.EmitAnnotationArgs(Attr)); 2476 return Builder.CreateCall(AnnotationFn, Args); 2477 } 2478 2479 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 2480 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2481 // FIXME We create a new bitcast for every annotation because that's what 2482 // llvm-gcc was doing. 2483 unsigned AS = V->getType()->getPointerAddressSpace(); 2484 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(AS); 2485 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 2486 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation, 2487 {I8PtrTy, CGM.ConstGlobalsPtrTy}), 2488 Builder.CreateBitCast(V, I8PtrTy, V->getName()), 2489 I->getAnnotation(), D->getLocation(), I); 2490 } 2491 2492 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 2493 Address Addr) { 2494 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2495 llvm::Value *V = Addr.getPointer(); 2496 llvm::Type *VTy = V->getType(); 2497 auto *PTy = dyn_cast<llvm::PointerType>(VTy); 2498 unsigned AS = PTy ? PTy->getAddressSpace() : 0; 2499 llvm::PointerType *IntrinTy = 2500 llvm::PointerType::get(CGM.getLLVMContext(), AS); 2501 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 2502 {IntrinTy, CGM.ConstGlobalsPtrTy}); 2503 2504 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 2505 // FIXME Always emit the cast inst so we can differentiate between 2506 // annotation on the first field of a struct and annotation on the struct 2507 // itself. 2508 if (VTy != IntrinTy) 2509 V = Builder.CreateBitCast(V, IntrinTy); 2510 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I); 2511 V = Builder.CreateBitCast(V, VTy); 2512 } 2513 2514 return Address(V, Addr.getElementType(), Addr.getAlignment()); 2515 } 2516 2517 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 2518 2519 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 2520 : CGF(CGF) { 2521 assert(!CGF->IsSanitizerScope); 2522 CGF->IsSanitizerScope = true; 2523 } 2524 2525 CodeGenFunction::SanitizerScope::~SanitizerScope() { 2526 CGF->IsSanitizerScope = false; 2527 } 2528 2529 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 2530 const llvm::Twine &Name, 2531 llvm::BasicBlock *BB, 2532 llvm::BasicBlock::iterator InsertPt) const { 2533 LoopStack.InsertHelper(I); 2534 if (IsSanitizerScope) 2535 I->setNoSanitizeMetadata(); 2536 } 2537 2538 void CGBuilderInserter::InsertHelper( 2539 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 2540 llvm::BasicBlock::iterator InsertPt) const { 2541 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); 2542 if (CGF) 2543 CGF->InsertHelper(I, Name, BB, InsertPt); 2544 } 2545 2546 // Emits an error if we don't have a valid set of target features for the 2547 // called function. 2548 void CodeGenFunction::checkTargetFeatures(const CallExpr *E, 2549 const FunctionDecl *TargetDecl) { 2550 return checkTargetFeatures(E->getBeginLoc(), TargetDecl); 2551 } 2552 2553 // Emits an error if we don't have a valid set of target features for the 2554 // called function. 2555 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc, 2556 const FunctionDecl *TargetDecl) { 2557 // Early exit if this is an indirect call. 2558 if (!TargetDecl) 2559 return; 2560 2561 // Get the current enclosing function if it exists. If it doesn't 2562 // we can't check the target features anyhow. 2563 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl); 2564 if (!FD) 2565 return; 2566 2567 // Grab the required features for the call. For a builtin this is listed in 2568 // the td file with the default cpu, for an always_inline function this is any 2569 // listed cpu and any listed features. 2570 unsigned BuiltinID = TargetDecl->getBuiltinID(); 2571 std::string MissingFeature; 2572 llvm::StringMap<bool> CallerFeatureMap; 2573 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD); 2574 if (BuiltinID) { 2575 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID)); 2576 if (!Builtin::evaluateRequiredTargetFeatures( 2577 FeatureList, CallerFeatureMap)) { 2578 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature) 2579 << TargetDecl->getDeclName() 2580 << FeatureList; 2581 } 2582 } else if (!TargetDecl->isMultiVersion() && 2583 TargetDecl->hasAttr<TargetAttr>()) { 2584 // Get the required features for the callee. 2585 2586 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>(); 2587 ParsedTargetAttr ParsedAttr = 2588 CGM.getContext().filterFunctionTargetAttrs(TD); 2589 2590 SmallVector<StringRef, 1> ReqFeatures; 2591 llvm::StringMap<bool> CalleeFeatureMap; 2592 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2593 2594 for (const auto &F : ParsedAttr.Features) { 2595 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) 2596 ReqFeatures.push_back(StringRef(F).substr(1)); 2597 } 2598 2599 for (const auto &F : CalleeFeatureMap) { 2600 // Only positive features are "required". 2601 if (F.getValue()) 2602 ReqFeatures.push_back(F.getKey()); 2603 } 2604 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) { 2605 if (!CallerFeatureMap.lookup(Feature)) { 2606 MissingFeature = Feature.str(); 2607 return false; 2608 } 2609 return true; 2610 })) 2611 CGM.getDiags().Report(Loc, diag::err_function_needs_feature) 2612 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; 2613 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) { 2614 llvm::StringMap<bool> CalleeFeatureMap; 2615 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2616 2617 for (const auto &F : CalleeFeatureMap) { 2618 if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) || 2619 !CallerFeatureMap.find(F.getKey())->getValue())) 2620 CGM.getDiags().Report(Loc, diag::err_function_needs_feature) 2621 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey(); 2622 } 2623 } 2624 } 2625 2626 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { 2627 if (!CGM.getCodeGenOpts().SanitizeStats) 2628 return; 2629 2630 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); 2631 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); 2632 CGM.getSanStats().create(IRB, SSK); 2633 } 2634 2635 void CodeGenFunction::EmitKCFIOperandBundle( 2636 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) { 2637 const FunctionProtoType *FP = 2638 Callee.getAbstractInfo().getCalleeFunctionProtoType(); 2639 if (FP) 2640 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar())); 2641 } 2642 2643 llvm::Value *CodeGenFunction::FormAArch64ResolverCondition( 2644 const MultiVersionResolverOption &RO) { 2645 llvm::SmallVector<StringRef, 8> CondFeatures; 2646 for (const StringRef &Feature : RO.Conditions.Features) { 2647 // Form condition for features which are not yet enabled in target 2648 if (!getContext().getTargetInfo().hasFeature(Feature)) 2649 CondFeatures.push_back(Feature); 2650 } 2651 if (!CondFeatures.empty()) { 2652 return EmitAArch64CpuSupports(CondFeatures); 2653 } 2654 return nullptr; 2655 } 2656 2657 llvm::Value *CodeGenFunction::FormX86ResolverCondition( 2658 const MultiVersionResolverOption &RO) { 2659 llvm::Value *Condition = nullptr; 2660 2661 if (!RO.Conditions.Architecture.empty()) 2662 Condition = EmitX86CpuIs(RO.Conditions.Architecture); 2663 2664 if (!RO.Conditions.Features.empty()) { 2665 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features); 2666 Condition = 2667 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond; 2668 } 2669 return Condition; 2670 } 2671 2672 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, 2673 llvm::Function *Resolver, 2674 CGBuilderTy &Builder, 2675 llvm::Function *FuncToReturn, 2676 bool SupportsIFunc) { 2677 if (SupportsIFunc) { 2678 Builder.CreateRet(FuncToReturn); 2679 return; 2680 } 2681 2682 llvm::SmallVector<llvm::Value *, 10> Args( 2683 llvm::make_pointer_range(Resolver->args())); 2684 2685 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args); 2686 Result->setTailCallKind(llvm::CallInst::TCK_MustTail); 2687 2688 if (Resolver->getReturnType()->isVoidTy()) 2689 Builder.CreateRetVoid(); 2690 else 2691 Builder.CreateRet(Result); 2692 } 2693 2694 void CodeGenFunction::EmitMultiVersionResolver( 2695 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { 2696 2697 llvm::Triple::ArchType ArchType = 2698 getContext().getTargetInfo().getTriple().getArch(); 2699 2700 switch (ArchType) { 2701 case llvm::Triple::x86: 2702 case llvm::Triple::x86_64: 2703 EmitX86MultiVersionResolver(Resolver, Options); 2704 return; 2705 case llvm::Triple::aarch64: 2706 EmitAArch64MultiVersionResolver(Resolver, Options); 2707 return; 2708 2709 default: 2710 assert(false && "Only implemented for x86 and AArch64 targets"); 2711 } 2712 } 2713 2714 void CodeGenFunction::EmitAArch64MultiVersionResolver( 2715 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { 2716 assert(!Options.empty() && "No multiversion resolver options found"); 2717 assert(Options.back().Conditions.Features.size() == 0 && 2718 "Default case must be last"); 2719 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc(); 2720 assert(SupportsIFunc && 2721 "Multiversion resolver requires target IFUNC support"); 2722 bool AArch64CpuInitialized = false; 2723 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver); 2724 2725 for (const MultiVersionResolverOption &RO : Options) { 2726 Builder.SetInsertPoint(CurBlock); 2727 llvm::Value *Condition = FormAArch64ResolverCondition(RO); 2728 2729 // The 'default' or 'all features enabled' case. 2730 if (!Condition) { 2731 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function, 2732 SupportsIFunc); 2733 return; 2734 } 2735 2736 if (!AArch64CpuInitialized) { 2737 Builder.SetInsertPoint(CurBlock, CurBlock->begin()); 2738 EmitAArch64CpuInit(); 2739 AArch64CpuInitialized = true; 2740 Builder.SetInsertPoint(CurBlock); 2741 } 2742 2743 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver); 2744 CGBuilderTy RetBuilder(*this, RetBlock); 2745 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function, 2746 SupportsIFunc); 2747 CurBlock = createBasicBlock("resolver_else", Resolver); 2748 Builder.CreateCondBr(Condition, RetBlock, CurBlock); 2749 } 2750 2751 // If no default, emit an unreachable. 2752 Builder.SetInsertPoint(CurBlock); 2753 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 2754 TrapCall->setDoesNotReturn(); 2755 TrapCall->setDoesNotThrow(); 2756 Builder.CreateUnreachable(); 2757 Builder.ClearInsertionPoint(); 2758 } 2759 2760 void CodeGenFunction::EmitX86MultiVersionResolver( 2761 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { 2762 2763 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc(); 2764 2765 // Main function's basic block. 2766 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver); 2767 Builder.SetInsertPoint(CurBlock); 2768 EmitX86CpuInit(); 2769 2770 for (const MultiVersionResolverOption &RO : Options) { 2771 Builder.SetInsertPoint(CurBlock); 2772 llvm::Value *Condition = FormX86ResolverCondition(RO); 2773 2774 // The 'default' or 'generic' case. 2775 if (!Condition) { 2776 assert(&RO == Options.end() - 1 && 2777 "Default or Generic case must be last"); 2778 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function, 2779 SupportsIFunc); 2780 return; 2781 } 2782 2783 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver); 2784 CGBuilderTy RetBuilder(*this, RetBlock); 2785 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function, 2786 SupportsIFunc); 2787 CurBlock = createBasicBlock("resolver_else", Resolver); 2788 Builder.CreateCondBr(Condition, RetBlock, CurBlock); 2789 } 2790 2791 // If no generic/default, emit an unreachable. 2792 Builder.SetInsertPoint(CurBlock); 2793 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 2794 TrapCall->setDoesNotReturn(); 2795 TrapCall->setDoesNotThrow(); 2796 Builder.CreateUnreachable(); 2797 Builder.ClearInsertionPoint(); 2798 } 2799 2800 // Loc - where the diagnostic will point, where in the source code this 2801 // alignment has failed. 2802 // SecondaryLoc - if present (will be present if sufficiently different from 2803 // Loc), the diagnostic will additionally point a "Note:" to this location. 2804 // It should be the location where the __attribute__((assume_aligned)) 2805 // was written e.g. 2806 void CodeGenFunction::emitAlignmentAssumptionCheck( 2807 llvm::Value *Ptr, QualType Ty, SourceLocation Loc, 2808 SourceLocation SecondaryLoc, llvm::Value *Alignment, 2809 llvm::Value *OffsetValue, llvm::Value *TheCheck, 2810 llvm::Instruction *Assumption) { 2811 assert(Assumption && isa<llvm::CallInst>(Assumption) && 2812 cast<llvm::CallInst>(Assumption)->getCalledOperand() == 2813 llvm::Intrinsic::getDeclaration( 2814 Builder.GetInsertBlock()->getParent()->getParent(), 2815 llvm::Intrinsic::assume) && 2816 "Assumption should be a call to llvm.assume()."); 2817 assert(&(Builder.GetInsertBlock()->back()) == Assumption && 2818 "Assumption should be the last instruction of the basic block, " 2819 "since the basic block is still being generated."); 2820 2821 if (!SanOpts.has(SanitizerKind::Alignment)) 2822 return; 2823 2824 // Don't check pointers to volatile data. The behavior here is implementation- 2825 // defined. 2826 if (Ty->getPointeeType().isVolatileQualified()) 2827 return; 2828 2829 // We need to temorairly remove the assumption so we can insert the 2830 // sanitizer check before it, else the check will be dropped by optimizations. 2831 Assumption->removeFromParent(); 2832 2833 { 2834 SanitizerScope SanScope(this); 2835 2836 if (!OffsetValue) 2837 OffsetValue = Builder.getInt1(false); // no offset. 2838 2839 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc), 2840 EmitCheckSourceLocation(SecondaryLoc), 2841 EmitCheckTypeDescriptor(Ty)}; 2842 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr), 2843 EmitCheckValue(Alignment), 2844 EmitCheckValue(OffsetValue)}; 2845 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)}, 2846 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData); 2847 } 2848 2849 // We are now in the (new, empty) "cont" basic block. 2850 // Reintroduce the assumption. 2851 Builder.Insert(Assumption); 2852 // FIXME: Assumption still has it's original basic block as it's Parent. 2853 } 2854 2855 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { 2856 if (CGDebugInfo *DI = getDebugInfo()) 2857 return DI->SourceLocToDebugLoc(Location); 2858 2859 return llvm::DebugLoc(); 2860 } 2861 2862 llvm::Value * 2863 CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond, 2864 Stmt::Likelihood LH) { 2865 switch (LH) { 2866 case Stmt::LH_None: 2867 return Cond; 2868 case Stmt::LH_Likely: 2869 case Stmt::LH_Unlikely: 2870 // Don't generate llvm.expect on -O0 as the backend won't use it for 2871 // anything. 2872 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 2873 return Cond; 2874 llvm::Type *CondTy = Cond->getType(); 2875 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean"); 2876 llvm::Function *FnExpect = 2877 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy); 2878 llvm::Value *ExpectedValueOfCond = 2879 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely); 2880 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond}, 2881 Cond->getName() + ".expval"); 2882 } 2883 llvm_unreachable("Unknown Likelihood"); 2884 } 2885 2886 llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec, 2887 unsigned NumElementsDst, 2888 const llvm::Twine &Name) { 2889 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType()); 2890 unsigned NumElementsSrc = SrcTy->getNumElements(); 2891 if (NumElementsSrc == NumElementsDst) 2892 return SrcVec; 2893 2894 std::vector<int> ShuffleMask(NumElementsDst, -1); 2895 for (unsigned MaskIdx = 0; 2896 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx) 2897 ShuffleMask[MaskIdx] = MaskIdx; 2898 2899 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name); 2900 } 2901