1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This coordinates the per-function state used while generating code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenFunction.h" 14 #include "CGBlocks.h" 15 #include "CGCUDARuntime.h" 16 #include "CGCXXABI.h" 17 #include "CGCleanup.h" 18 #include "CGDebugInfo.h" 19 #include "CGOpenMPRuntime.h" 20 #include "CodeGenModule.h" 21 #include "CodeGenPGO.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/ASTContext.h" 24 #include "clang/AST/ASTLambda.h" 25 #include "clang/AST/Attr.h" 26 #include "clang/AST/Decl.h" 27 #include "clang/AST/DeclCXX.h" 28 #include "clang/AST/Expr.h" 29 #include "clang/AST/StmtCXX.h" 30 #include "clang/AST/StmtObjC.h" 31 #include "clang/Basic/Builtins.h" 32 #include "clang/Basic/CodeGenOptions.h" 33 #include "clang/Basic/TargetInfo.h" 34 #include "clang/CodeGen/CGFunctionInfo.h" 35 #include "clang/Frontend/FrontendDiagnostic.h" 36 #include "llvm/ADT/ArrayRef.h" 37 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/FPEnv.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/MDBuilder.h" 44 #include "llvm/IR/Operator.h" 45 #include "llvm/Support/CRC.h" 46 #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h" 47 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 48 49 using namespace clang; 50 using namespace CodeGen; 51 52 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time 53 /// markers. 54 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, 55 const LangOptions &LangOpts) { 56 if (CGOpts.DisableLifetimeMarkers) 57 return false; 58 59 // Sanitizers may use markers. 60 if (CGOpts.SanitizeAddressUseAfterScope || 61 LangOpts.Sanitize.has(SanitizerKind::HWAddress) || 62 LangOpts.Sanitize.has(SanitizerKind::Memory)) 63 return true; 64 65 // For now, only in optimized builds. 66 return CGOpts.OptimizationLevel != 0; 67 } 68 69 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 70 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 71 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), 72 CGBuilderInserterTy(this)), 73 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()), 74 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm), 75 ShouldEmitLifetimeMarkers( 76 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) { 77 if (!suppressNewContext) 78 CGM.getCXXABI().getMangleContext().startNewFunction(); 79 EHStack.setCGF(this); 80 81 SetFastMathFlags(CurFPFeatures); 82 } 83 84 CodeGenFunction::~CodeGenFunction() { 85 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 86 87 if (getLangOpts().OpenMP && CurFn) 88 CGM.getOpenMPRuntime().functionFinished(*this); 89 90 // If we have an OpenMPIRBuilder we want to finalize functions (incl. 91 // outlining etc) at some point. Doing it once the function codegen is done 92 // seems to be a reasonable spot. We do it here, as opposed to the deletion 93 // time of the CodeGenModule, because we have to ensure the IR has not yet 94 // been "emitted" to the outside, thus, modifications are still sensible. 95 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn) 96 CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn); 97 } 98 99 // Map the LangOption for exception behavior into 100 // the corresponding enum in the IR. 101 llvm::fp::ExceptionBehavior 102 clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) { 103 104 switch (Kind) { 105 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore; 106 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap; 107 case LangOptions::FPE_Strict: return llvm::fp::ebStrict; 108 default: 109 llvm_unreachable("Unsupported FP Exception Behavior"); 110 } 111 } 112 113 void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) { 114 llvm::FastMathFlags FMF; 115 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate()); 116 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs()); 117 FMF.setNoInfs(FPFeatures.getNoHonorInfs()); 118 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero()); 119 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal()); 120 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc()); 121 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement()); 122 Builder.setFastMathFlags(FMF); 123 } 124 125 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF, 126 const Expr *E) 127 : CGF(CGF) { 128 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts())); 129 } 130 131 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF, 132 FPOptions FPFeatures) 133 : CGF(CGF) { 134 ConstructorHelper(FPFeatures); 135 } 136 137 void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) { 138 OldFPFeatures = CGF.CurFPFeatures; 139 CGF.CurFPFeatures = FPFeatures; 140 141 OldExcept = CGF.Builder.getDefaultConstrainedExcept(); 142 OldRounding = CGF.Builder.getDefaultConstrainedRounding(); 143 144 if (OldFPFeatures == FPFeatures) 145 return; 146 147 FMFGuard.emplace(CGF.Builder); 148 149 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode(); 150 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior); 151 auto NewExceptionBehavior = 152 ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>( 153 FPFeatures.getExceptionMode())); 154 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior); 155 156 CGF.SetFastMathFlags(FPFeatures); 157 158 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() || 159 isa<CXXConstructorDecl>(CGF.CurFuncDecl) || 160 isa<CXXDestructorDecl>(CGF.CurFuncDecl) || 161 (NewExceptionBehavior == llvm::fp::ebIgnore && 162 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) && 163 "FPConstrained should be enabled on entire function"); 164 165 auto mergeFnAttrValue = [&](StringRef Name, bool Value) { 166 auto OldValue = 167 CGF.CurFn->getFnAttribute(Name).getValueAsBool(); 168 auto NewValue = OldValue & Value; 169 if (OldValue != NewValue) 170 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue)); 171 }; 172 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs()); 173 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs()); 174 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero()); 175 mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() && 176 FPFeatures.getAllowReciprocal() && 177 FPFeatures.getAllowApproxFunc() && 178 FPFeatures.getNoSignedZero()); 179 } 180 181 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() { 182 CGF.CurFPFeatures = OldFPFeatures; 183 CGF.Builder.setDefaultConstrainedExcept(OldExcept); 184 CGF.Builder.setDefaultConstrainedRounding(OldRounding); 185 } 186 187 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 188 LValueBaseInfo BaseInfo; 189 TBAAAccessInfo TBAAInfo; 190 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); 191 Address Addr(V, ConvertTypeForMem(T), Alignment); 192 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo); 193 } 194 195 /// Given a value of type T* that may not be to a complete object, 196 /// construct an l-value with the natural pointee alignment of T. 197 LValue 198 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { 199 LValueBaseInfo BaseInfo; 200 TBAAAccessInfo TBAAInfo; 201 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, 202 /* forPointeeType= */ true); 203 Address Addr(V, ConvertTypeForMem(T), Align); 204 return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); 205 } 206 207 208 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 209 return CGM.getTypes().ConvertTypeForMem(T); 210 } 211 212 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 213 return CGM.getTypes().ConvertType(T); 214 } 215 216 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 217 type = type.getCanonicalType(); 218 while (true) { 219 switch (type->getTypeClass()) { 220 #define TYPE(name, parent) 221 #define ABSTRACT_TYPE(name, parent) 222 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 223 #define DEPENDENT_TYPE(name, parent) case Type::name: 224 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 225 #include "clang/AST/TypeNodes.inc" 226 llvm_unreachable("non-canonical or dependent type in IR-generation"); 227 228 case Type::Auto: 229 case Type::DeducedTemplateSpecialization: 230 llvm_unreachable("undeduced type in IR-generation"); 231 232 // Various scalar types. 233 case Type::Builtin: 234 case Type::Pointer: 235 case Type::BlockPointer: 236 case Type::LValueReference: 237 case Type::RValueReference: 238 case Type::MemberPointer: 239 case Type::Vector: 240 case Type::ExtVector: 241 case Type::ConstantMatrix: 242 case Type::FunctionProto: 243 case Type::FunctionNoProto: 244 case Type::Enum: 245 case Type::ObjCObjectPointer: 246 case Type::Pipe: 247 case Type::BitInt: 248 return TEK_Scalar; 249 250 // Complexes. 251 case Type::Complex: 252 return TEK_Complex; 253 254 // Arrays, records, and Objective-C objects. 255 case Type::ConstantArray: 256 case Type::IncompleteArray: 257 case Type::VariableArray: 258 case Type::Record: 259 case Type::ObjCObject: 260 case Type::ObjCInterface: 261 return TEK_Aggregate; 262 263 // We operate on atomic values according to their underlying type. 264 case Type::Atomic: 265 type = cast<AtomicType>(type)->getValueType(); 266 continue; 267 } 268 llvm_unreachable("unknown type kind!"); 269 } 270 } 271 272 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 273 // For cleanliness, we try to avoid emitting the return block for 274 // simple cases. 275 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 276 277 if (CurBB) { 278 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 279 280 // We have a valid insert point, reuse it if it is empty or there are no 281 // explicit jumps to the return block. 282 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 283 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 284 delete ReturnBlock.getBlock(); 285 ReturnBlock = JumpDest(); 286 } else 287 EmitBlock(ReturnBlock.getBlock()); 288 return llvm::DebugLoc(); 289 } 290 291 // Otherwise, if the return block is the target of a single direct 292 // branch then we can just put the code in that block instead. This 293 // cleans up functions which started with a unified return block. 294 if (ReturnBlock.getBlock()->hasOneUse()) { 295 llvm::BranchInst *BI = 296 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 297 if (BI && BI->isUnconditional() && 298 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 299 // Record/return the DebugLoc of the simple 'return' expression to be used 300 // later by the actual 'ret' instruction. 301 llvm::DebugLoc Loc = BI->getDebugLoc(); 302 Builder.SetInsertPoint(BI->getParent()); 303 BI->eraseFromParent(); 304 delete ReturnBlock.getBlock(); 305 ReturnBlock = JumpDest(); 306 return Loc; 307 } 308 } 309 310 // FIXME: We are at an unreachable point, there is no reason to emit the block 311 // unless it has uses. However, we still need a place to put the debug 312 // region.end for now. 313 314 EmitBlock(ReturnBlock.getBlock()); 315 return llvm::DebugLoc(); 316 } 317 318 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 319 if (!BB) return; 320 if (!BB->use_empty()) 321 return CGF.CurFn->getBasicBlockList().push_back(BB); 322 delete BB; 323 } 324 325 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 326 assert(BreakContinueStack.empty() && 327 "mismatched push/pop in break/continue stack!"); 328 329 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 330 && NumSimpleReturnExprs == NumReturnExprs 331 && ReturnBlock.getBlock()->use_empty(); 332 // Usually the return expression is evaluated before the cleanup 333 // code. If the function contains only a simple return statement, 334 // such as a constant, the location before the cleanup code becomes 335 // the last useful breakpoint in the function, because the simple 336 // return expression will be evaluated after the cleanup code. To be 337 // safe, set the debug location for cleanup code to the location of 338 // the return statement. Otherwise the cleanup code should be at the 339 // end of the function's lexical scope. 340 // 341 // If there are multiple branches to the return block, the branch 342 // instructions will get the location of the return statements and 343 // all will be fine. 344 if (CGDebugInfo *DI = getDebugInfo()) { 345 if (OnlySimpleReturnStmts) 346 DI->EmitLocation(Builder, LastStopPoint); 347 else 348 DI->EmitLocation(Builder, EndLoc); 349 } 350 351 // Pop any cleanups that might have been associated with the 352 // parameters. Do this in whatever block we're currently in; it's 353 // important to do this before we enter the return block or return 354 // edges will be *really* confused. 355 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; 356 bool HasOnlyLifetimeMarkers = 357 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); 358 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; 359 if (HasCleanups) { 360 // Make sure the line table doesn't jump back into the body for 361 // the ret after it's been at EndLoc. 362 Optional<ApplyDebugLocation> AL; 363 if (CGDebugInfo *DI = getDebugInfo()) { 364 if (OnlySimpleReturnStmts) 365 DI->EmitLocation(Builder, EndLoc); 366 else 367 // We may not have a valid end location. Try to apply it anyway, and 368 // fall back to an artificial location if needed. 369 AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc); 370 } 371 372 PopCleanupBlocks(PrologueCleanupDepth); 373 } 374 375 // Emit function epilog (to return). 376 llvm::DebugLoc Loc = EmitReturnBlock(); 377 378 if (ShouldInstrumentFunction()) { 379 if (CGM.getCodeGenOpts().InstrumentFunctions) 380 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); 381 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 382 CurFn->addFnAttr("instrument-function-exit-inlined", 383 "__cyg_profile_func_exit"); 384 } 385 386 // Emit debug descriptor for function end. 387 if (CGDebugInfo *DI = getDebugInfo()) 388 DI->EmitFunctionEnd(Builder, CurFn); 389 390 // Reset the debug location to that of the simple 'return' expression, if any 391 // rather than that of the end of the function's scope '}'. 392 ApplyDebugLocation AL(*this, Loc); 393 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 394 EmitEndEHSpec(CurCodeDecl); 395 396 assert(EHStack.empty() && 397 "did not remove all scopes from cleanup stack!"); 398 399 // If someone did an indirect goto, emit the indirect goto block at the end of 400 // the function. 401 if (IndirectBranch) { 402 EmitBlock(IndirectBranch->getParent()); 403 Builder.ClearInsertionPoint(); 404 } 405 406 // If some of our locals escaped, insert a call to llvm.localescape in the 407 // entry block. 408 if (!EscapedLocals.empty()) { 409 // Invert the map from local to index into a simple vector. There should be 410 // no holes. 411 SmallVector<llvm::Value *, 4> EscapeArgs; 412 EscapeArgs.resize(EscapedLocals.size()); 413 for (auto &Pair : EscapedLocals) 414 EscapeArgs[Pair.second] = Pair.first; 415 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 416 &CGM.getModule(), llvm::Intrinsic::localescape); 417 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 418 } 419 420 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 421 llvm::Instruction *Ptr = AllocaInsertPt; 422 AllocaInsertPt = nullptr; 423 Ptr->eraseFromParent(); 424 425 // PostAllocaInsertPt, if created, was lazily created when it was required, 426 // remove it now since it was just created for our own convenience. 427 if (PostAllocaInsertPt) { 428 llvm::Instruction *PostPtr = PostAllocaInsertPt; 429 PostAllocaInsertPt = nullptr; 430 PostPtr->eraseFromParent(); 431 } 432 433 // If someone took the address of a label but never did an indirect goto, we 434 // made a zero entry PHI node, which is illegal, zap it now. 435 if (IndirectBranch) { 436 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 437 if (PN->getNumIncomingValues() == 0) { 438 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 439 PN->eraseFromParent(); 440 } 441 } 442 443 EmitIfUsed(*this, EHResumeBlock); 444 EmitIfUsed(*this, TerminateLandingPad); 445 EmitIfUsed(*this, TerminateHandler); 446 EmitIfUsed(*this, UnreachableBlock); 447 448 for (const auto &FuncletAndParent : TerminateFunclets) 449 EmitIfUsed(*this, FuncletAndParent.second); 450 451 if (CGM.getCodeGenOpts().EmitDeclMetadata) 452 EmitDeclMetadata(); 453 454 for (const auto &R : DeferredReplacements) { 455 if (llvm::Value *Old = R.first) { 456 Old->replaceAllUsesWith(R.second); 457 cast<llvm::Instruction>(Old)->eraseFromParent(); 458 } 459 } 460 DeferredReplacements.clear(); 461 462 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and 463 // PHIs if the current function is a coroutine. We don't do it for all 464 // functions as it may result in slight increase in numbers of instructions 465 // if compiled with no optimizations. We do it for coroutine as the lifetime 466 // of CleanupDestSlot alloca make correct coroutine frame building very 467 // difficult. 468 if (NormalCleanupDest.isValid() && isCoroutine()) { 469 llvm::DominatorTree DT(*CurFn); 470 llvm::PromoteMemToReg( 471 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT); 472 NormalCleanupDest = Address::invalid(); 473 } 474 475 // Scan function arguments for vector width. 476 for (llvm::Argument &A : CurFn->args()) 477 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType())) 478 LargestVectorWidth = 479 std::max((uint64_t)LargestVectorWidth, 480 VT->getPrimitiveSizeInBits().getKnownMinSize()); 481 482 // Update vector width based on return type. 483 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType())) 484 LargestVectorWidth = 485 std::max((uint64_t)LargestVectorWidth, 486 VT->getPrimitiveSizeInBits().getKnownMinSize()); 487 488 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth) 489 LargestVectorWidth = CurFnInfo->getMaxVectorWidth(); 490 491 // Add the required-vector-width attribute. This contains the max width from: 492 // 1. min-vector-width attribute used in the source program. 493 // 2. Any builtins used that have a vector width specified. 494 // 3. Values passed in and out of inline assembly. 495 // 4. Width of vector arguments and return types for this function. 496 // 5. Width of vector aguments and return types for functions called by this 497 // function. 498 CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth)); 499 500 // Add vscale_range attribute if appropriate. 501 Optional<std::pair<unsigned, unsigned>> VScaleRange = 502 getContext().getTargetInfo().getVScaleRange(getLangOpts()); 503 if (VScaleRange) { 504 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs( 505 getLLVMContext(), VScaleRange->first, VScaleRange->second)); 506 } 507 508 // If we generated an unreachable return block, delete it now. 509 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) { 510 Builder.ClearInsertionPoint(); 511 ReturnBlock.getBlock()->eraseFromParent(); 512 } 513 if (ReturnValue.isValid()) { 514 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer()); 515 if (RetAlloca && RetAlloca->use_empty()) { 516 RetAlloca->eraseFromParent(); 517 ReturnValue = Address::invalid(); 518 } 519 } 520 } 521 522 /// ShouldInstrumentFunction - Return true if the current function should be 523 /// instrumented with __cyg_profile_func_* calls 524 bool CodeGenFunction::ShouldInstrumentFunction() { 525 if (!CGM.getCodeGenOpts().InstrumentFunctions && 526 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && 527 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 528 return false; 529 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 530 return false; 531 return true; 532 } 533 534 bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() { 535 if (!CurFuncDecl) 536 return false; 537 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>(); 538 } 539 540 /// ShouldXRayInstrument - Return true if the current function should be 541 /// instrumented with XRay nop sleds. 542 bool CodeGenFunction::ShouldXRayInstrumentFunction() const { 543 return CGM.getCodeGenOpts().XRayInstrumentFunctions; 544 } 545 546 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to 547 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation. 548 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { 549 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 550 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents || 551 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 552 XRayInstrKind::Custom); 553 } 554 555 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const { 556 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 557 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents || 558 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 559 XRayInstrKind::Typed); 560 } 561 562 llvm::Value * 563 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, 564 llvm::Value *EncodedAddr) { 565 // Reconstruct the address of the global. 566 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); 567 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); 568 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); 569 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); 570 571 // Load the original pointer through the global. 572 return Builder.CreateLoad(Address(GOTAddr, Int8PtrTy, getPointerAlign()), 573 "decoded_addr"); 574 } 575 576 void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD, 577 llvm::Function *Fn) { 578 if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>()) 579 return; 580 581 llvm::LLVMContext &Context = getLLVMContext(); 582 583 CGM.GenKernelArgMetadata(Fn, FD, this); 584 585 if (!getLangOpts().OpenCL) 586 return; 587 588 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 589 QualType HintQTy = A->getTypeHint(); 590 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>(); 591 bool IsSignedInteger = 592 HintQTy->isSignedIntegerType() || 593 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); 594 llvm::Metadata *AttrMDArgs[] = { 595 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 596 CGM.getTypes().ConvertType(A->getTypeHint()))), 597 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 598 llvm::IntegerType::get(Context, 32), 599 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; 600 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); 601 } 602 603 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 604 llvm::Metadata *AttrMDArgs[] = { 605 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 606 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 607 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 608 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); 609 } 610 611 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 612 llvm::Metadata *AttrMDArgs[] = { 613 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 614 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 615 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 616 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); 617 } 618 619 if (const OpenCLIntelReqdSubGroupSizeAttr *A = 620 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) { 621 llvm::Metadata *AttrMDArgs[] = { 622 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; 623 Fn->setMetadata("intel_reqd_sub_group_size", 624 llvm::MDNode::get(Context, AttrMDArgs)); 625 } 626 } 627 628 /// Determine whether the function F ends with a return stmt. 629 static bool endsWithReturn(const Decl* F) { 630 const Stmt *Body = nullptr; 631 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 632 Body = FD->getBody(); 633 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 634 Body = OMD->getBody(); 635 636 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 637 auto LastStmt = CS->body_rbegin(); 638 if (LastStmt != CS->body_rend()) 639 return isa<ReturnStmt>(*LastStmt); 640 } 641 return false; 642 } 643 644 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { 645 if (SanOpts.has(SanitizerKind::Thread)) { 646 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); 647 Fn->removeFnAttr(llvm::Attribute::SanitizeThread); 648 } 649 } 650 651 /// Check if the return value of this function requires sanitization. 652 bool CodeGenFunction::requiresReturnValueCheck() const { 653 return requiresReturnValueNullabilityCheck() || 654 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl && 655 CurCodeDecl->getAttr<ReturnsNonNullAttr>()); 656 } 657 658 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { 659 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); 660 if (!MD || !MD->getDeclName().getAsIdentifierInfo() || 661 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || 662 (MD->getNumParams() != 1 && MD->getNumParams() != 2)) 663 return false; 664 665 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) 666 return false; 667 668 if (MD->getNumParams() == 2) { 669 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); 670 if (!PT || !PT->isVoidPointerType() || 671 !PT->getPointeeType().isConstQualified()) 672 return false; 673 } 674 675 return true; 676 } 677 678 /// Return the UBSan prologue signature for \p FD if one is available. 679 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, 680 const FunctionDecl *FD) { 681 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 682 if (!MD->isStatic()) 683 return nullptr; 684 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); 685 } 686 687 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 688 llvm::Function *Fn, 689 const CGFunctionInfo &FnInfo, 690 const FunctionArgList &Args, 691 SourceLocation Loc, 692 SourceLocation StartLoc) { 693 assert(!CurFn && 694 "Do not use a CodeGenFunction object for more than one function"); 695 696 const Decl *D = GD.getDecl(); 697 698 DidCallStackSave = false; 699 CurCodeDecl = D; 700 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 701 if (FD && FD->usesSEHTry()) 702 CurSEHParent = FD; 703 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 704 FnRetTy = RetTy; 705 CurFn = Fn; 706 CurFnInfo = &FnInfo; 707 assert(CurFn->isDeclaration() && "Function already has body?"); 708 709 // If this function is ignored for any of the enabled sanitizers, 710 // disable the sanitizer for the function. 711 do { 712 #define SANITIZER(NAME, ID) \ 713 if (SanOpts.empty()) \ 714 break; \ 715 if (SanOpts.has(SanitizerKind::ID)) \ 716 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \ 717 SanOpts.set(SanitizerKind::ID, false); 718 719 #include "clang/Basic/Sanitizers.def" 720 #undef SANITIZER 721 } while (false); 722 723 if (D) { 724 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds); 725 bool NoSanitizeCoverage = false; 726 727 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) { 728 // Apply the no_sanitize* attributes to SanOpts. 729 SanitizerMask mask = Attr->getMask(); 730 SanOpts.Mask &= ~mask; 731 if (mask & SanitizerKind::Address) 732 SanOpts.set(SanitizerKind::KernelAddress, false); 733 if (mask & SanitizerKind::KernelAddress) 734 SanOpts.set(SanitizerKind::Address, false); 735 if (mask & SanitizerKind::HWAddress) 736 SanOpts.set(SanitizerKind::KernelHWAddress, false); 737 if (mask & SanitizerKind::KernelHWAddress) 738 SanOpts.set(SanitizerKind::HWAddress, false); 739 740 // SanitizeCoverage is not handled by SanOpts. 741 if (Attr->hasCoverage()) 742 NoSanitizeCoverage = true; 743 } 744 745 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds)) 746 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds); 747 748 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage()) 749 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage); 750 } 751 752 if (ShouldSkipSanitizerInstrumentation()) { 753 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation); 754 } else { 755 // Apply sanitizer attributes to the function. 756 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) 757 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 758 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | 759 SanitizerKind::KernelHWAddress)) 760 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 761 if (SanOpts.has(SanitizerKind::MemtagStack)) 762 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); 763 if (SanOpts.has(SanitizerKind::Thread)) 764 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 765 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory)) 766 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 767 } 768 if (SanOpts.has(SanitizerKind::SafeStack)) 769 Fn->addFnAttr(llvm::Attribute::SafeStack); 770 if (SanOpts.has(SanitizerKind::ShadowCallStack)) 771 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 772 773 // Apply fuzzing attribute to the function. 774 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) 775 Fn->addFnAttr(llvm::Attribute::OptForFuzzing); 776 777 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, 778 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. 779 if (SanOpts.has(SanitizerKind::Thread)) { 780 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) { 781 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); 782 if (OMD->getMethodFamily() == OMF_dealloc || 783 OMD->getMethodFamily() == OMF_initialize || 784 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { 785 markAsIgnoreThreadCheckingAtRuntime(Fn); 786 } 787 } 788 } 789 790 // Ignore unrelated casts in STL allocate() since the allocator must cast 791 // from void* to T* before object initialization completes. Don't match on the 792 // namespace because not all allocators are in std:: 793 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 794 if (matchesStlAllocatorFn(D, getContext())) 795 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; 796 } 797 798 // Ignore null checks in coroutine functions since the coroutines passes 799 // are not aware of how to move the extra UBSan instructions across the split 800 // coroutine boundaries. 801 if (D && SanOpts.has(SanitizerKind::Null)) 802 if (FD && FD->getBody() && 803 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass) 804 SanOpts.Mask &= ~SanitizerKind::Null; 805 806 // Apply xray attributes to the function (as a string, for now) 807 bool AlwaysXRayAttr = false; 808 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) { 809 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 810 XRayInstrKind::FunctionEntry) || 811 CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 812 XRayInstrKind::FunctionExit)) { 813 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) { 814 Fn->addFnAttr("function-instrument", "xray-always"); 815 AlwaysXRayAttr = true; 816 } 817 if (XRayAttr->neverXRayInstrument()) 818 Fn->addFnAttr("function-instrument", "xray-never"); 819 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) 820 if (ShouldXRayInstrumentFunction()) 821 Fn->addFnAttr("xray-log-args", 822 llvm::utostr(LogArgs->getArgumentCount())); 823 } 824 } else { 825 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc)) 826 Fn->addFnAttr( 827 "xray-instruction-threshold", 828 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); 829 } 830 831 if (ShouldXRayInstrumentFunction()) { 832 if (CGM.getCodeGenOpts().XRayIgnoreLoops) 833 Fn->addFnAttr("xray-ignore-loops"); 834 835 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 836 XRayInstrKind::FunctionExit)) 837 Fn->addFnAttr("xray-skip-exit"); 838 839 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 840 XRayInstrKind::FunctionEntry)) 841 Fn->addFnAttr("xray-skip-entry"); 842 843 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups; 844 if (FuncGroups > 1) { 845 auto FuncName = llvm::makeArrayRef<uint8_t>( 846 CurFn->getName().bytes_begin(), CurFn->getName().bytes_end()); 847 auto Group = crc32(FuncName) % FuncGroups; 848 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup && 849 !AlwaysXRayAttr) 850 Fn->addFnAttr("function-instrument", "xray-never"); 851 } 852 } 853 854 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) 855 if (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) 856 Fn->addFnAttr(llvm::Attribute::NoProfile); 857 858 unsigned Count, Offset; 859 if (const auto *Attr = 860 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) { 861 Count = Attr->getCount(); 862 Offset = Attr->getOffset(); 863 } else { 864 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount; 865 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset; 866 } 867 if (Count && Offset <= Count) { 868 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset)); 869 if (Offset) 870 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset)); 871 } 872 // Instruct that functions for COFF/CodeView targets should start with a 873 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64 874 // backends as they don't need it -- instructions on these architectures are 875 // always atomically patchable at runtime. 876 if (CGM.getCodeGenOpts().HotPatch && 877 getContext().getTargetInfo().getTriple().isX86()) 878 Fn->addFnAttr("patchable-function", "prologue-short-redirect"); 879 880 // Add no-jump-tables value. 881 if (CGM.getCodeGenOpts().NoUseJumpTables) 882 Fn->addFnAttr("no-jump-tables", "true"); 883 884 // Add no-inline-line-tables value. 885 if (CGM.getCodeGenOpts().NoInlineLineTables) 886 Fn->addFnAttr("no-inline-line-tables"); 887 888 // Add profile-sample-accurate value. 889 if (CGM.getCodeGenOpts().ProfileSampleAccurate) 890 Fn->addFnAttr("profile-sample-accurate"); 891 892 if (!CGM.getCodeGenOpts().SampleProfileFile.empty()) 893 Fn->addFnAttr("use-sample-profile"); 894 895 if (D && D->hasAttr<CFICanonicalJumpTableAttr>()) 896 Fn->addFnAttr("cfi-canonical-jump-table"); 897 898 if (D && D->hasAttr<NoProfileFunctionAttr>()) 899 Fn->addFnAttr(llvm::Attribute::NoProfile); 900 901 if (D) { 902 // Function attributes take precedence over command line flags. 903 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) { 904 switch (A->getThunkType()) { 905 case FunctionReturnThunksAttr::Kind::Keep: 906 break; 907 case FunctionReturnThunksAttr::Kind::Extern: 908 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern); 909 break; 910 } 911 } else if (CGM.getCodeGenOpts().FunctionReturnThunks) 912 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern); 913 } 914 915 if (FD && (getLangOpts().OpenCL || 916 (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) { 917 // Add metadata for a kernel function. 918 EmitKernelMetadata(FD, Fn); 919 } 920 921 // If we are checking function types, emit a function type signature as 922 // prologue data. 923 if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 924 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { 925 // Remove any (C++17) exception specifications, to allow calling e.g. a 926 // noexcept function through a non-noexcept pointer. 927 auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec( 928 FD->getType(), EST_None); 929 llvm::Constant *FTRTTIConst = 930 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); 931 llvm::GlobalVariable *FTRTTIProxy = 932 CGM.GetOrCreateRTTIProxyGlobalVariable(FTRTTIConst); 933 llvm::LLVMContext &Ctx = Fn->getContext(); 934 llvm::MDBuilder MDB(Ctx); 935 Fn->setMetadata(llvm::LLVMContext::MD_func_sanitize, 936 MDB.createRTTIPointerPrologue(PrologueSig, FTRTTIProxy)); 937 CGM.addCompilerUsedGlobal(FTRTTIProxy); 938 } 939 } 940 941 // If we're checking nullability, we need to know whether we can check the 942 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. 943 if (SanOpts.has(SanitizerKind::NullabilityReturn)) { 944 auto Nullability = FnRetTy->getNullability(getContext()); 945 if (Nullability && *Nullability == NullabilityKind::NonNull) { 946 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 947 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>())) 948 RetValNullabilityPrecondition = 949 llvm::ConstantInt::getTrue(getLLVMContext()); 950 } 951 } 952 953 // If we're in C++ mode and the function name is "main", it is guaranteed 954 // to be norecurse by the standard (3.6.1.3 "The function main shall not be 955 // used within a program"). 956 // 957 // OpenCL C 2.0 v2.2-11 s6.9.i: 958 // Recursion is not supported. 959 // 960 // SYCL v1.2.1 s3.10: 961 // kernels cannot include RTTI information, exception classes, 962 // recursive code, virtual functions or make use of C++ libraries that 963 // are not compiled for the device. 964 if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) || 965 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice || 966 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))) 967 Fn->addFnAttr(llvm::Attribute::NoRecurse); 968 969 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode(); 970 llvm::fp::ExceptionBehavior FPExceptionBehavior = 971 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode()); 972 Builder.setDefaultConstrainedRounding(RM); 973 Builder.setDefaultConstrainedExcept(FPExceptionBehavior); 974 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) || 975 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore || 976 RM != llvm::RoundingMode::NearestTiesToEven))) { 977 Builder.setIsFPConstrained(true); 978 Fn->addFnAttr(llvm::Attribute::StrictFP); 979 } 980 981 // If a custom alignment is used, force realigning to this alignment on 982 // any main function which certainly will need it. 983 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) && 984 CGM.getCodeGenOpts().StackAlignment)) 985 Fn->addFnAttr("stackrealign"); 986 987 // "main" doesn't need to zero out call-used registers. 988 if (FD && FD->isMain()) 989 Fn->removeFnAttr("zero-call-used-regs"); 990 991 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 992 993 // Create a marker to make it easy to insert allocas into the entryblock 994 // later. Don't create this with the builder, because we don't want it 995 // folded. 996 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 997 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); 998 999 ReturnBlock = getJumpDestInCurrentScope("return"); 1000 1001 Builder.SetInsertPoint(EntryBB); 1002 1003 // If we're checking the return value, allocate space for a pointer to a 1004 // precise source location of the checked return statement. 1005 if (requiresReturnValueCheck()) { 1006 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); 1007 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy), 1008 ReturnLocation); 1009 } 1010 1011 // Emit subprogram debug descriptor. 1012 if (CGDebugInfo *DI = getDebugInfo()) { 1013 // Reconstruct the type from the argument list so that implicit parameters, 1014 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling 1015 // convention. 1016 DI->emitFunctionStart(GD, Loc, StartLoc, 1017 DI->getFunctionType(FD, RetTy, Args), CurFn, 1018 CurFuncIsThunk); 1019 } 1020 1021 if (ShouldInstrumentFunction()) { 1022 if (CGM.getCodeGenOpts().InstrumentFunctions) 1023 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); 1024 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 1025 CurFn->addFnAttr("instrument-function-entry-inlined", 1026 "__cyg_profile_func_enter"); 1027 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 1028 CurFn->addFnAttr("instrument-function-entry-inlined", 1029 "__cyg_profile_func_enter_bare"); 1030 } 1031 1032 // Since emitting the mcount call here impacts optimizations such as function 1033 // inlining, we just add an attribute to insert a mcount call in backend. 1034 // The attribute "counting-function" is set to mcount function name which is 1035 // architecture dependent. 1036 if (CGM.getCodeGenOpts().InstrumentForProfiling) { 1037 // Calls to fentry/mcount should not be generated if function has 1038 // the no_instrument_function attribute. 1039 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { 1040 if (CGM.getCodeGenOpts().CallFEntry) 1041 Fn->addFnAttr("fentry-call", "true"); 1042 else { 1043 Fn->addFnAttr("instrument-function-entry-inlined", 1044 getTarget().getMCountName()); 1045 } 1046 if (CGM.getCodeGenOpts().MNopMCount) { 1047 if (!CGM.getCodeGenOpts().CallFEntry) 1048 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) 1049 << "-mnop-mcount" << "-mfentry"; 1050 Fn->addFnAttr("mnop-mcount"); 1051 } 1052 1053 if (CGM.getCodeGenOpts().RecordMCount) { 1054 if (!CGM.getCodeGenOpts().CallFEntry) 1055 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) 1056 << "-mrecord-mcount" << "-mfentry"; 1057 Fn->addFnAttr("mrecord-mcount"); 1058 } 1059 } 1060 } 1061 1062 if (CGM.getCodeGenOpts().PackedStack) { 1063 if (getContext().getTargetInfo().getTriple().getArch() != 1064 llvm::Triple::systemz) 1065 CGM.getDiags().Report(diag::err_opt_not_valid_on_target) 1066 << "-mpacked-stack"; 1067 Fn->addFnAttr("packed-stack"); 1068 } 1069 1070 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX && 1071 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc)) 1072 Fn->addFnAttr("warn-stack-size", 1073 std::to_string(CGM.getCodeGenOpts().WarnStackSize)); 1074 1075 if (RetTy->isVoidType()) { 1076 // Void type; nothing to return. 1077 ReturnValue = Address::invalid(); 1078 1079 // Count the implicit return. 1080 if (!endsWithReturn(D)) 1081 ++NumReturnExprs; 1082 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { 1083 // Indirect return; emit returned value directly into sret slot. 1084 // This reduces code size, and affects correctness in C++. 1085 auto AI = CurFn->arg_begin(); 1086 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 1087 ++AI; 1088 ReturnValue = Address(&*AI, ConvertType(RetTy), 1089 CurFnInfo->getReturnInfo().getIndirectAlign()); 1090 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) { 1091 ReturnValuePointer = 1092 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr"); 1093 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast( 1094 ReturnValue.getPointer(), Int8PtrTy), 1095 ReturnValuePointer); 1096 } 1097 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 1098 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1099 // Load the sret pointer from the argument struct and return into that. 1100 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 1101 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1102 --EI; 1103 llvm::Value *Addr = Builder.CreateStructGEP( 1104 CurFnInfo->getArgStruct(), &*EI, Idx); 1105 llvm::Type *Ty = 1106 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType(); 1107 ReturnValuePointer = Address(Addr, Ty, getPointerAlign()); 1108 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result"); 1109 ReturnValue = 1110 Address(Addr, ConvertType(RetTy), CGM.getNaturalTypeAlignment(RetTy)); 1111 } else { 1112 ReturnValue = CreateIRTemp(RetTy, "retval"); 1113 1114 // Tell the epilog emitter to autorelease the result. We do this 1115 // now so that various specialized functions can suppress it 1116 // during their IR-generation. 1117 if (getLangOpts().ObjCAutoRefCount && 1118 !CurFnInfo->isReturnsRetained() && 1119 RetTy->isObjCRetainableType()) 1120 AutoreleaseResult = true; 1121 } 1122 1123 EmitStartEHSpec(CurCodeDecl); 1124 1125 PrologueCleanupDepth = EHStack.stable_begin(); 1126 1127 // Emit OpenMP specific initialization of the device functions. 1128 if (getLangOpts().OpenMP && CurCodeDecl) 1129 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); 1130 1131 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 1132 1133 if (isa_and_nonnull<CXXMethodDecl>(D) && 1134 cast<CXXMethodDecl>(D)->isInstance()) { 1135 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 1136 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 1137 if (MD->getParent()->isLambda() && 1138 MD->getOverloadedOperator() == OO_Call) { 1139 // We're in a lambda; figure out the captures. 1140 MD->getParent()->getCaptureFields(LambdaCaptureFields, 1141 LambdaThisCaptureField); 1142 if (LambdaThisCaptureField) { 1143 // If the lambda captures the object referred to by '*this' - either by 1144 // value or by reference, make sure CXXThisValue points to the correct 1145 // object. 1146 1147 // Get the lvalue for the field (which is a copy of the enclosing object 1148 // or contains the address of the enclosing object). 1149 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 1150 if (!LambdaThisCaptureField->getType()->isPointerType()) { 1151 // If the enclosing object was captured by value, just use its address. 1152 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer(); 1153 } else { 1154 // Load the lvalue pointed to by the field, since '*this' was captured 1155 // by reference. 1156 CXXThisValue = 1157 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); 1158 } 1159 } 1160 for (auto *FD : MD->getParent()->fields()) { 1161 if (FD->hasCapturedVLAType()) { 1162 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 1163 SourceLocation()).getScalarVal(); 1164 auto VAT = FD->getCapturedVLAType(); 1165 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 1166 } 1167 } 1168 } else { 1169 // Not in a lambda; just use 'this' from the method. 1170 // FIXME: Should we generate a new load for each use of 'this'? The 1171 // fast register allocator would be happier... 1172 CXXThisValue = CXXABIThisValue; 1173 } 1174 1175 // Check the 'this' pointer once per function, if it's available. 1176 if (CXXABIThisValue) { 1177 SanitizerSet SkippedChecks; 1178 SkippedChecks.set(SanitizerKind::ObjectSize, true); 1179 QualType ThisTy = MD->getThisType(); 1180 1181 // If this is the call operator of a lambda with no capture-default, it 1182 // may have a static invoker function, which may call this operator with 1183 // a null 'this' pointer. 1184 if (isLambdaCallOperator(MD) && 1185 MD->getParent()->getLambdaCaptureDefault() == LCD_None) 1186 SkippedChecks.set(SanitizerKind::Null, true); 1187 1188 EmitTypeCheck( 1189 isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall, 1190 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks); 1191 } 1192 } 1193 1194 // If any of the arguments have a variably modified type, make sure to 1195 // emit the type size, but only if the function is not naked. Naked functions 1196 // have no prolog to run this evaluation. 1197 if (!FD || !FD->hasAttr<NakedAttr>()) { 1198 for (const VarDecl *VD : Args) { 1199 // Dig out the type as written from ParmVarDecls; it's unclear whether 1200 // the standard (C99 6.9.1p10) requires this, but we're following the 1201 // precedent set by gcc. 1202 QualType Ty; 1203 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 1204 Ty = PVD->getOriginalType(); 1205 else 1206 Ty = VD->getType(); 1207 1208 if (Ty->isVariablyModifiedType()) 1209 EmitVariablyModifiedType(Ty); 1210 } 1211 } 1212 // Emit a location at the end of the prologue. 1213 if (CGDebugInfo *DI = getDebugInfo()) 1214 DI->EmitLocation(Builder, StartLoc); 1215 // TODO: Do we need to handle this in two places like we do with 1216 // target-features/target-cpu? 1217 if (CurFuncDecl) 1218 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>()) 1219 LargestVectorWidth = VecWidth->getVectorWidth(); 1220 } 1221 1222 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) { 1223 incrementProfileCounter(Body); 1224 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 1225 EmitCompoundStmtWithoutScope(*S); 1226 else 1227 EmitStmt(Body); 1228 1229 // This is checked after emitting the function body so we know if there 1230 // are any permitted infinite loops. 1231 if (checkIfFunctionMustProgress()) 1232 CurFn->addFnAttr(llvm::Attribute::MustProgress); 1233 } 1234 1235 /// When instrumenting to collect profile data, the counts for some blocks 1236 /// such as switch cases need to not include the fall-through counts, so 1237 /// emit a branch around the instrumentation code. When not instrumenting, 1238 /// this just calls EmitBlock(). 1239 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 1240 const Stmt *S) { 1241 llvm::BasicBlock *SkipCountBB = nullptr; 1242 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { 1243 // When instrumenting for profiling, the fallthrough to certain 1244 // statements needs to skip over the instrumentation code so that we 1245 // get an accurate count. 1246 SkipCountBB = createBasicBlock("skipcount"); 1247 EmitBranch(SkipCountBB); 1248 } 1249 EmitBlock(BB); 1250 uint64_t CurrentCount = getCurrentProfileCount(); 1251 incrementProfileCounter(S); 1252 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); 1253 if (SkipCountBB) 1254 EmitBlock(SkipCountBB); 1255 } 1256 1257 /// Tries to mark the given function nounwind based on the 1258 /// non-existence of any throwing calls within it. We believe this is 1259 /// lightweight enough to do at -O0. 1260 static void TryMarkNoThrow(llvm::Function *F) { 1261 // LLVM treats 'nounwind' on a function as part of the type, so we 1262 // can't do this on functions that can be overwritten. 1263 if (F->isInterposable()) return; 1264 1265 for (llvm::BasicBlock &BB : *F) 1266 for (llvm::Instruction &I : BB) 1267 if (I.mayThrow()) 1268 return; 1269 1270 F->setDoesNotThrow(); 1271 } 1272 1273 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, 1274 FunctionArgList &Args) { 1275 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1276 QualType ResTy = FD->getReturnType(); 1277 1278 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 1279 if (MD && MD->isInstance()) { 1280 if (CGM.getCXXABI().HasThisReturn(GD)) 1281 ResTy = MD->getThisType(); 1282 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 1283 ResTy = CGM.getContext().VoidPtrTy; 1284 CGM.getCXXABI().buildThisParam(*this, Args); 1285 } 1286 1287 // The base version of an inheriting constructor whose constructed base is a 1288 // virtual base is not passed any arguments (because it doesn't actually call 1289 // the inherited constructor). 1290 bool PassedParams = true; 1291 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 1292 if (auto Inherited = CD->getInheritedConstructor()) 1293 PassedParams = 1294 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); 1295 1296 if (PassedParams) { 1297 for (auto *Param : FD->parameters()) { 1298 Args.push_back(Param); 1299 if (!Param->hasAttr<PassObjectSizeAttr>()) 1300 continue; 1301 1302 auto *Implicit = ImplicitParamDecl::Create( 1303 getContext(), Param->getDeclContext(), Param->getLocation(), 1304 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); 1305 SizeArguments[Param] = Implicit; 1306 Args.push_back(Implicit); 1307 } 1308 } 1309 1310 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 1311 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 1312 1313 return ResTy; 1314 } 1315 1316 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 1317 const CGFunctionInfo &FnInfo) { 1318 assert(Fn && "generating code for null Function"); 1319 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1320 CurGD = GD; 1321 1322 FunctionArgList Args; 1323 QualType ResTy = BuildFunctionArgList(GD, Args); 1324 1325 if (FD->isInlineBuiltinDeclaration()) { 1326 // When generating code for a builtin with an inline declaration, use a 1327 // mangled name to hold the actual body, while keeping an external 1328 // definition in case the function pointer is referenced somewhere. 1329 std::string FDInlineName = (Fn->getName() + ".inline").str(); 1330 llvm::Module *M = Fn->getParent(); 1331 llvm::Function *Clone = M->getFunction(FDInlineName); 1332 if (!Clone) { 1333 Clone = llvm::Function::Create(Fn->getFunctionType(), 1334 llvm::GlobalValue::InternalLinkage, 1335 Fn->getAddressSpace(), FDInlineName, M); 1336 Clone->addFnAttr(llvm::Attribute::AlwaysInline); 1337 } 1338 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage); 1339 Fn = Clone; 1340 } else { 1341 // Detect the unusual situation where an inline version is shadowed by a 1342 // non-inline version. In that case we should pick the external one 1343 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way 1344 // to detect that situation before we reach codegen, so do some late 1345 // replacement. 1346 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD; 1347 PD = PD->getPreviousDecl()) { 1348 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) { 1349 std::string FDInlineName = (Fn->getName() + ".inline").str(); 1350 llvm::Module *M = Fn->getParent(); 1351 if (llvm::Function *Clone = M->getFunction(FDInlineName)) { 1352 Clone->replaceAllUsesWith(Fn); 1353 Clone->eraseFromParent(); 1354 } 1355 break; 1356 } 1357 } 1358 } 1359 1360 // Check if we should generate debug info for this function. 1361 if (FD->hasAttr<NoDebugAttr>()) { 1362 // Clear non-distinct debug info that was possibly attached to the function 1363 // due to an earlier declaration without the nodebug attribute 1364 Fn->setSubprogram(nullptr); 1365 // Disable debug info indefinitely for this function 1366 DebugInfo = nullptr; 1367 } 1368 1369 // The function might not have a body if we're generating thunks for a 1370 // function declaration. 1371 SourceRange BodyRange; 1372 if (Stmt *Body = FD->getBody()) 1373 BodyRange = Body->getSourceRange(); 1374 else 1375 BodyRange = FD->getLocation(); 1376 CurEHLocation = BodyRange.getEnd(); 1377 1378 // Use the location of the start of the function to determine where 1379 // the function definition is located. By default use the location 1380 // of the declaration as the location for the subprogram. A function 1381 // may lack a declaration in the source code if it is created by code 1382 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 1383 SourceLocation Loc = FD->getLocation(); 1384 1385 // If this is a function specialization then use the pattern body 1386 // as the location for the function. 1387 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 1388 if (SpecDecl->hasBody(SpecDecl)) 1389 Loc = SpecDecl->getLocation(); 1390 1391 Stmt *Body = FD->getBody(); 1392 1393 if (Body) { 1394 // Coroutines always emit lifetime markers. 1395 if (isa<CoroutineBodyStmt>(Body)) 1396 ShouldEmitLifetimeMarkers = true; 1397 1398 // Initialize helper which will detect jumps which can cause invalid 1399 // lifetime markers. 1400 if (ShouldEmitLifetimeMarkers) 1401 Bypasses.Init(Body); 1402 } 1403 1404 // Emit the standard function prologue. 1405 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 1406 1407 // Save parameters for coroutine function. 1408 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body)) 1409 llvm::append_range(FnArgs, FD->parameters()); 1410 1411 // Generate the body of the function. 1412 PGO.assignRegionCounters(GD, CurFn); 1413 if (isa<CXXDestructorDecl>(FD)) 1414 EmitDestructorBody(Args); 1415 else if (isa<CXXConstructorDecl>(FD)) 1416 EmitConstructorBody(Args); 1417 else if (getLangOpts().CUDA && 1418 !getLangOpts().CUDAIsDevice && 1419 FD->hasAttr<CUDAGlobalAttr>()) 1420 CGM.getCUDARuntime().emitDeviceStub(*this, Args); 1421 else if (isa<CXXMethodDecl>(FD) && 1422 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 1423 // The lambda static invoker function is special, because it forwards or 1424 // clones the body of the function call operator (but is actually static). 1425 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); 1426 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 1427 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 1428 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 1429 // Implicit copy-assignment gets the same special treatment as implicit 1430 // copy-constructors. 1431 emitImplicitAssignmentOperatorBody(Args); 1432 } else if (Body) { 1433 EmitFunctionBody(Body); 1434 } else 1435 llvm_unreachable("no definition for emitted function"); 1436 1437 // C++11 [stmt.return]p2: 1438 // Flowing off the end of a function [...] results in undefined behavior in 1439 // a value-returning function. 1440 // C11 6.9.1p12: 1441 // If the '}' that terminates a function is reached, and the value of the 1442 // function call is used by the caller, the behavior is undefined. 1443 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 1444 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 1445 bool ShouldEmitUnreachable = 1446 CGM.getCodeGenOpts().StrictReturn || 1447 !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType()); 1448 if (SanOpts.has(SanitizerKind::Return)) { 1449 SanitizerScope SanScope(this); 1450 llvm::Value *IsFalse = Builder.getFalse(); 1451 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 1452 SanitizerHandler::MissingReturn, 1453 EmitCheckSourceLocation(FD->getLocation()), None); 1454 } else if (ShouldEmitUnreachable) { 1455 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 1456 EmitTrapCall(llvm::Intrinsic::trap); 1457 } 1458 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { 1459 Builder.CreateUnreachable(); 1460 Builder.ClearInsertionPoint(); 1461 } 1462 } 1463 1464 // Emit the standard function epilogue. 1465 FinishFunction(BodyRange.getEnd()); 1466 1467 // If we haven't marked the function nothrow through other means, do 1468 // a quick pass now to see if we can. 1469 if (!CurFn->doesNotThrow()) 1470 TryMarkNoThrow(CurFn); 1471 } 1472 1473 /// ContainsLabel - Return true if the statement contains a label in it. If 1474 /// this statement is not executed normally, it not containing a label means 1475 /// that we can just remove the code. 1476 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 1477 // Null statement, not a label! 1478 if (!S) return false; 1479 1480 // If this is a label, we have to emit the code, consider something like: 1481 // if (0) { ... foo: bar(); } goto foo; 1482 // 1483 // TODO: If anyone cared, we could track __label__'s, since we know that you 1484 // can't jump to one from outside their declared region. 1485 if (isa<LabelStmt>(S)) 1486 return true; 1487 1488 // If this is a case/default statement, and we haven't seen a switch, we have 1489 // to emit the code. 1490 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 1491 return true; 1492 1493 // If this is a switch statement, we want to ignore cases below it. 1494 if (isa<SwitchStmt>(S)) 1495 IgnoreCaseStmts = true; 1496 1497 // Scan subexpressions for verboten labels. 1498 for (const Stmt *SubStmt : S->children()) 1499 if (ContainsLabel(SubStmt, IgnoreCaseStmts)) 1500 return true; 1501 1502 return false; 1503 } 1504 1505 /// containsBreak - Return true if the statement contains a break out of it. 1506 /// If the statement (recursively) contains a switch or loop with a break 1507 /// inside of it, this is fine. 1508 bool CodeGenFunction::containsBreak(const Stmt *S) { 1509 // Null statement, not a label! 1510 if (!S) return false; 1511 1512 // If this is a switch or loop that defines its own break scope, then we can 1513 // include it and anything inside of it. 1514 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 1515 isa<ForStmt>(S)) 1516 return false; 1517 1518 if (isa<BreakStmt>(S)) 1519 return true; 1520 1521 // Scan subexpressions for verboten breaks. 1522 for (const Stmt *SubStmt : S->children()) 1523 if (containsBreak(SubStmt)) 1524 return true; 1525 1526 return false; 1527 } 1528 1529 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { 1530 if (!S) return false; 1531 1532 // Some statement kinds add a scope and thus never add a decl to the current 1533 // scope. Note, this list is longer than the list of statements that might 1534 // have an unscoped decl nested within them, but this way is conservatively 1535 // correct even if more statement kinds are added. 1536 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) || 1537 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) || 1538 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) || 1539 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S)) 1540 return false; 1541 1542 if (isa<DeclStmt>(S)) 1543 return true; 1544 1545 for (const Stmt *SubStmt : S->children()) 1546 if (mightAddDeclToScope(SubStmt)) 1547 return true; 1548 1549 return false; 1550 } 1551 1552 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1553 /// to a constant, or if it does but contains a label, return false. If it 1554 /// constant folds return true and set the boolean result in Result. 1555 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1556 bool &ResultBool, 1557 bool AllowLabels) { 1558 llvm::APSInt ResultInt; 1559 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) 1560 return false; 1561 1562 ResultBool = ResultInt.getBoolValue(); 1563 return true; 1564 } 1565 1566 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1567 /// to a constant, or if it does but contains a label, return false. If it 1568 /// constant folds return true and set the folded value. 1569 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1570 llvm::APSInt &ResultInt, 1571 bool AllowLabels) { 1572 // FIXME: Rename and handle conversion of other evaluatable things 1573 // to bool. 1574 Expr::EvalResult Result; 1575 if (!Cond->EvaluateAsInt(Result, getContext())) 1576 return false; // Not foldable, not integer or not fully evaluatable. 1577 1578 llvm::APSInt Int = Result.Val.getInt(); 1579 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) 1580 return false; // Contains a label. 1581 1582 ResultInt = Int; 1583 return true; 1584 } 1585 1586 /// Determine whether the given condition is an instrumentable condition 1587 /// (i.e. no "&&" or "||"). 1588 bool CodeGenFunction::isInstrumentedCondition(const Expr *C) { 1589 // Bypass simplistic logical-NOT operator before determining whether the 1590 // condition contains any other logical operator. 1591 if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens())) 1592 if (UnOp->getOpcode() == UO_LNot) 1593 C = UnOp->getSubExpr(); 1594 1595 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens()); 1596 return (!BOp || !BOp->isLogicalOp()); 1597 } 1598 1599 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that 1600 /// increments a profile counter based on the semantics of the given logical 1601 /// operator opcode. This is used to instrument branch condition coverage for 1602 /// logical operators. 1603 void CodeGenFunction::EmitBranchToCounterBlock( 1604 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock, 1605 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */, 1606 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) { 1607 // If not instrumenting, just emit a branch. 1608 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr(); 1609 if (!InstrumentRegions || !isInstrumentedCondition(Cond)) 1610 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH); 1611 1612 llvm::BasicBlock *ThenBlock = nullptr; 1613 llvm::BasicBlock *ElseBlock = nullptr; 1614 llvm::BasicBlock *NextBlock = nullptr; 1615 1616 // Create the block we'll use to increment the appropriate counter. 1617 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt"); 1618 1619 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This 1620 // means we need to evaluate the condition and increment the counter on TRUE: 1621 // 1622 // if (Cond) 1623 // goto CounterIncrBlock; 1624 // else 1625 // goto FalseBlock; 1626 // 1627 // CounterIncrBlock: 1628 // Counter++; 1629 // goto TrueBlock; 1630 1631 if (LOp == BO_LAnd) { 1632 ThenBlock = CounterIncrBlock; 1633 ElseBlock = FalseBlock; 1634 NextBlock = TrueBlock; 1635 } 1636 1637 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means 1638 // we need to evaluate the condition and increment the counter on FALSE: 1639 // 1640 // if (Cond) 1641 // goto TrueBlock; 1642 // else 1643 // goto CounterIncrBlock; 1644 // 1645 // CounterIncrBlock: 1646 // Counter++; 1647 // goto FalseBlock; 1648 1649 else if (LOp == BO_LOr) { 1650 ThenBlock = TrueBlock; 1651 ElseBlock = CounterIncrBlock; 1652 NextBlock = FalseBlock; 1653 } else { 1654 llvm_unreachable("Expected Opcode must be that of a Logical Operator"); 1655 } 1656 1657 // Emit Branch based on condition. 1658 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH); 1659 1660 // Emit the block containing the counter increment(s). 1661 EmitBlock(CounterIncrBlock); 1662 1663 // Increment corresponding counter; if index not provided, use Cond as index. 1664 incrementProfileCounter(CntrIdx ? CntrIdx : Cond); 1665 1666 // Go to the next block. 1667 EmitBranch(NextBlock); 1668 } 1669 1670 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1671 /// statement) to the specified blocks. Based on the condition, this might try 1672 /// to simplify the codegen of the conditional based on the branch. 1673 /// \param LH The value of the likelihood attribute on the True branch. 1674 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1675 llvm::BasicBlock *TrueBlock, 1676 llvm::BasicBlock *FalseBlock, 1677 uint64_t TrueCount, 1678 Stmt::Likelihood LH) { 1679 Cond = Cond->IgnoreParens(); 1680 1681 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1682 1683 // Handle X && Y in a condition. 1684 if (CondBOp->getOpcode() == BO_LAnd) { 1685 // If we have "1 && X", simplify the code. "0 && X" would have constant 1686 // folded if the case was simple enough. 1687 bool ConstantBool = false; 1688 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1689 ConstantBool) { 1690 // br(1 && X) -> br(X). 1691 incrementProfileCounter(CondBOp); 1692 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock, 1693 FalseBlock, TrueCount, LH); 1694 } 1695 1696 // If we have "X && 1", simplify the code to use an uncond branch. 1697 // "X && 0" would have been constant folded to 0. 1698 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1699 ConstantBool) { 1700 // br(X && 1) -> br(X). 1701 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock, 1702 FalseBlock, TrueCount, LH, CondBOp); 1703 } 1704 1705 // Emit the LHS as a conditional. If the LHS conditional is false, we 1706 // want to jump to the FalseBlock. 1707 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1708 // The counter tells us how often we evaluate RHS, and all of TrueCount 1709 // can be propagated to that branch. 1710 uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); 1711 1712 ConditionalEvaluation eval(*this); 1713 { 1714 ApplyDebugLocation DL(*this, Cond); 1715 // Propagate the likelihood attribute like __builtin_expect 1716 // __builtin_expect(X && Y, 1) -> X and Y are likely 1717 // __builtin_expect(X && Y, 0) -> only Y is unlikely 1718 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount, 1719 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH); 1720 EmitBlock(LHSTrue); 1721 } 1722 1723 incrementProfileCounter(CondBOp); 1724 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1725 1726 // Any temporaries created here are conditional. 1727 eval.begin(*this); 1728 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock, 1729 FalseBlock, TrueCount, LH); 1730 eval.end(*this); 1731 1732 return; 1733 } 1734 1735 if (CondBOp->getOpcode() == BO_LOr) { 1736 // If we have "0 || X", simplify the code. "1 || X" would have constant 1737 // folded if the case was simple enough. 1738 bool ConstantBool = false; 1739 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1740 !ConstantBool) { 1741 // br(0 || X) -> br(X). 1742 incrementProfileCounter(CondBOp); 1743 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, 1744 FalseBlock, TrueCount, LH); 1745 } 1746 1747 // If we have "X || 0", simplify the code to use an uncond branch. 1748 // "X || 1" would have been constant folded to 1. 1749 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1750 !ConstantBool) { 1751 // br(X || 0) -> br(X). 1752 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock, 1753 FalseBlock, TrueCount, LH, CondBOp); 1754 } 1755 1756 // Emit the LHS as a conditional. If the LHS conditional is true, we 1757 // want to jump to the TrueBlock. 1758 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1759 // We have the count for entry to the RHS and for the whole expression 1760 // being true, so we can divy up True count between the short circuit and 1761 // the RHS. 1762 uint64_t LHSCount = 1763 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); 1764 uint64_t RHSCount = TrueCount - LHSCount; 1765 1766 ConditionalEvaluation eval(*this); 1767 { 1768 // Propagate the likelihood attribute like __builtin_expect 1769 // __builtin_expect(X || Y, 1) -> only Y is likely 1770 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely 1771 ApplyDebugLocation DL(*this, Cond); 1772 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount, 1773 LH == Stmt::LH_Likely ? Stmt::LH_None : LH); 1774 EmitBlock(LHSFalse); 1775 } 1776 1777 incrementProfileCounter(CondBOp); 1778 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1779 1780 // Any temporaries created here are conditional. 1781 eval.begin(*this); 1782 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock, 1783 RHSCount, LH); 1784 1785 eval.end(*this); 1786 1787 return; 1788 } 1789 } 1790 1791 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1792 // br(!x, t, f) -> br(x, f, t) 1793 if (CondUOp->getOpcode() == UO_LNot) { 1794 // Negate the count. 1795 uint64_t FalseCount = getCurrentProfileCount() - TrueCount; 1796 // The values of the enum are chosen to make this negation possible. 1797 LH = static_cast<Stmt::Likelihood>(-LH); 1798 // Negate the condition and swap the destination blocks. 1799 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1800 FalseCount, LH); 1801 } 1802 } 1803 1804 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1805 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1806 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1807 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1808 1809 // The ConditionalOperator itself has no likelihood information for its 1810 // true and false branches. This matches the behavior of __builtin_expect. 1811 ConditionalEvaluation cond(*this); 1812 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, 1813 getProfileCount(CondOp), Stmt::LH_None); 1814 1815 // When computing PGO branch weights, we only know the overall count for 1816 // the true block. This code is essentially doing tail duplication of the 1817 // naive code-gen, introducing new edges for which counts are not 1818 // available. Divide the counts proportionally between the LHS and RHS of 1819 // the conditional operator. 1820 uint64_t LHSScaledTrueCount = 0; 1821 if (TrueCount) { 1822 double LHSRatio = 1823 getProfileCount(CondOp) / (double)getCurrentProfileCount(); 1824 LHSScaledTrueCount = TrueCount * LHSRatio; 1825 } 1826 1827 cond.begin(*this); 1828 EmitBlock(LHSBlock); 1829 incrementProfileCounter(CondOp); 1830 { 1831 ApplyDebugLocation DL(*this, Cond); 1832 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1833 LHSScaledTrueCount, LH); 1834 } 1835 cond.end(*this); 1836 1837 cond.begin(*this); 1838 EmitBlock(RHSBlock); 1839 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1840 TrueCount - LHSScaledTrueCount, LH); 1841 cond.end(*this); 1842 1843 return; 1844 } 1845 1846 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1847 // Conditional operator handling can give us a throw expression as a 1848 // condition for a case like: 1849 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1850 // Fold this to: 1851 // br(c, throw x, br(y, t, f)) 1852 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1853 return; 1854 } 1855 1856 // Emit the code with the fully general case. 1857 llvm::Value *CondV; 1858 { 1859 ApplyDebugLocation DL(*this, Cond); 1860 CondV = EvaluateExprAsBool(Cond); 1861 } 1862 1863 llvm::MDNode *Weights = nullptr; 1864 llvm::MDNode *Unpredictable = nullptr; 1865 1866 // If the branch has a condition wrapped by __builtin_unpredictable, 1867 // create metadata that specifies that the branch is unpredictable. 1868 // Don't bother if not optimizing because that metadata would not be used. 1869 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts()); 1870 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1871 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1872 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1873 llvm::MDBuilder MDHelper(getLLVMContext()); 1874 Unpredictable = MDHelper.createUnpredictable(); 1875 } 1876 } 1877 1878 // If there is a Likelihood knowledge for the cond, lower it. 1879 // Note that if not optimizing this won't emit anything. 1880 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH); 1881 if (CondV != NewCondV) 1882 CondV = NewCondV; 1883 else { 1884 // Otherwise, lower profile counts. Note that we do this even at -O0. 1885 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); 1886 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount); 1887 } 1888 1889 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); 1890 } 1891 1892 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1893 /// specified stmt yet. 1894 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1895 CGM.ErrorUnsupported(S, Type); 1896 } 1897 1898 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 1899 /// variable-length array whose elements have a non-zero bit-pattern. 1900 /// 1901 /// \param baseType the inner-most element type of the array 1902 /// \param src - a char* pointing to the bit-pattern for a single 1903 /// base element of the array 1904 /// \param sizeInChars - the total size of the VLA, in chars 1905 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1906 Address dest, Address src, 1907 llvm::Value *sizeInChars) { 1908 CGBuilderTy &Builder = CGF.Builder; 1909 1910 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); 1911 llvm::Value *baseSizeInChars 1912 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); 1913 1914 Address begin = 1915 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin"); 1916 llvm::Value *end = Builder.CreateInBoundsGEP( 1917 begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end"); 1918 1919 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1920 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1921 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1922 1923 // Make a loop over the VLA. C99 guarantees that the VLA element 1924 // count must be nonzero. 1925 CGF.EmitBlock(loopBB); 1926 1927 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); 1928 cur->addIncoming(begin.getPointer(), originBB); 1929 1930 CharUnits curAlign = 1931 dest.getAlignment().alignmentOfArrayElement(baseSize); 1932 1933 // memcpy the individual element bit-pattern. 1934 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars, 1935 /*volatile*/ false); 1936 1937 // Go to the next element. 1938 llvm::Value *next = 1939 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); 1940 1941 // Leave if that's the end of the VLA. 1942 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1943 Builder.CreateCondBr(done, contBB, loopBB); 1944 cur->addIncoming(next, loopBB); 1945 1946 CGF.EmitBlock(contBB); 1947 } 1948 1949 void 1950 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { 1951 // Ignore empty classes in C++. 1952 if (getLangOpts().CPlusPlus) { 1953 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1954 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1955 return; 1956 } 1957 } 1958 1959 // Cast the dest ptr to the appropriate i8 pointer type. 1960 if (DestPtr.getElementType() != Int8Ty) 1961 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); 1962 1963 // Get size and alignment info for this aggregate. 1964 CharUnits size = getContext().getTypeSizeInChars(Ty); 1965 1966 llvm::Value *SizeVal; 1967 const VariableArrayType *vla; 1968 1969 // Don't bother emitting a zero-byte memset. 1970 if (size.isZero()) { 1971 // But note that getTypeInfo returns 0 for a VLA. 1972 if (const VariableArrayType *vlaType = 1973 dyn_cast_or_null<VariableArrayType>( 1974 getContext().getAsArrayType(Ty))) { 1975 auto VlaSize = getVLASize(vlaType); 1976 SizeVal = VlaSize.NumElts; 1977 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type); 1978 if (!eltSize.isOne()) 1979 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1980 vla = vlaType; 1981 } else { 1982 return; 1983 } 1984 } else { 1985 SizeVal = CGM.getSize(size); 1986 vla = nullptr; 1987 } 1988 1989 // If the type contains a pointer to data member we can't memset it to zero. 1990 // Instead, create a null constant and copy it to the destination. 1991 // TODO: there are other patterns besides zero that we can usefully memset, 1992 // like -1, which happens to be the pattern used by member-pointers. 1993 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1994 // For a VLA, emit a single element, then splat that over the VLA. 1995 if (vla) Ty = getContext().getBaseElementType(vla); 1996 1997 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1998 1999 llvm::GlobalVariable *NullVariable = 2000 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 2001 /*isConstant=*/true, 2002 llvm::GlobalVariable::PrivateLinkage, 2003 NullConstant, Twine()); 2004 CharUnits NullAlign = DestPtr.getAlignment(); 2005 NullVariable->setAlignment(NullAlign.getAsAlign()); 2006 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), 2007 Builder.getInt8Ty(), NullAlign); 2008 2009 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 2010 2011 // Get and call the appropriate llvm.memcpy overload. 2012 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); 2013 return; 2014 } 2015 2016 // Otherwise, just memset the whole thing to zero. This is legal 2017 // because in LLVM, all default initializers (other than the ones we just 2018 // handled above) are guaranteed to have a bit pattern of all zeros. 2019 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 2020 } 2021 2022 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 2023 // Make sure that there is a block for the indirect goto. 2024 if (!IndirectBranch) 2025 GetIndirectGotoBlock(); 2026 2027 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 2028 2029 // Make sure the indirect branch includes all of the address-taken blocks. 2030 IndirectBranch->addDestination(BB); 2031 return llvm::BlockAddress::get(CurFn, BB); 2032 } 2033 2034 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 2035 // If we already made the indirect branch for indirect goto, return its block. 2036 if (IndirectBranch) return IndirectBranch->getParent(); 2037 2038 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); 2039 2040 // Create the PHI node that indirect gotos will add entries to. 2041 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 2042 "indirect.goto.dest"); 2043 2044 // Create the indirect branch instruction. 2045 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 2046 return IndirectBranch->getParent(); 2047 } 2048 2049 /// Computes the length of an array in elements, as well as the base 2050 /// element type and a properly-typed first element pointer. 2051 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 2052 QualType &baseType, 2053 Address &addr) { 2054 const ArrayType *arrayType = origArrayType; 2055 2056 // If it's a VLA, we have to load the stored size. Note that 2057 // this is the size of the VLA in bytes, not its size in elements. 2058 llvm::Value *numVLAElements = nullptr; 2059 if (isa<VariableArrayType>(arrayType)) { 2060 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts; 2061 2062 // Walk into all VLAs. This doesn't require changes to addr, 2063 // which has type T* where T is the first non-VLA element type. 2064 do { 2065 QualType elementType = arrayType->getElementType(); 2066 arrayType = getContext().getAsArrayType(elementType); 2067 2068 // If we only have VLA components, 'addr' requires no adjustment. 2069 if (!arrayType) { 2070 baseType = elementType; 2071 return numVLAElements; 2072 } 2073 } while (isa<VariableArrayType>(arrayType)); 2074 2075 // We get out here only if we find a constant array type 2076 // inside the VLA. 2077 } 2078 2079 // We have some number of constant-length arrays, so addr should 2080 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 2081 // down to the first element of addr. 2082 SmallVector<llvm::Value*, 8> gepIndices; 2083 2084 // GEP down to the array type. 2085 llvm::ConstantInt *zero = Builder.getInt32(0); 2086 gepIndices.push_back(zero); 2087 2088 uint64_t countFromCLAs = 1; 2089 QualType eltType; 2090 2091 llvm::ArrayType *llvmArrayType = 2092 dyn_cast<llvm::ArrayType>(addr.getElementType()); 2093 while (llvmArrayType) { 2094 assert(isa<ConstantArrayType>(arrayType)); 2095 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 2096 == llvmArrayType->getNumElements()); 2097 2098 gepIndices.push_back(zero); 2099 countFromCLAs *= llvmArrayType->getNumElements(); 2100 eltType = arrayType->getElementType(); 2101 2102 llvmArrayType = 2103 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 2104 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 2105 assert((!llvmArrayType || arrayType) && 2106 "LLVM and Clang types are out-of-synch"); 2107 } 2108 2109 if (arrayType) { 2110 // From this point onwards, the Clang array type has been emitted 2111 // as some other type (probably a packed struct). Compute the array 2112 // size, and just emit the 'begin' expression as a bitcast. 2113 while (arrayType) { 2114 countFromCLAs *= 2115 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 2116 eltType = arrayType->getElementType(); 2117 arrayType = getContext().getAsArrayType(eltType); 2118 } 2119 2120 llvm::Type *baseType = ConvertType(eltType); 2121 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin"); 2122 } else { 2123 // Create the actual GEP. 2124 addr = Address(Builder.CreateInBoundsGEP( 2125 addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"), 2126 ConvertTypeForMem(eltType), 2127 addr.getAlignment()); 2128 } 2129 2130 baseType = eltType; 2131 2132 llvm::Value *numElements 2133 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 2134 2135 // If we had any VLA dimensions, factor them in. 2136 if (numVLAElements) 2137 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 2138 2139 return numElements; 2140 } 2141 2142 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) { 2143 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 2144 assert(vla && "type was not a variable array type!"); 2145 return getVLASize(vla); 2146 } 2147 2148 CodeGenFunction::VlaSizePair 2149 CodeGenFunction::getVLASize(const VariableArrayType *type) { 2150 // The number of elements so far; always size_t. 2151 llvm::Value *numElements = nullptr; 2152 2153 QualType elementType; 2154 do { 2155 elementType = type->getElementType(); 2156 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 2157 assert(vlaSize && "no size for VLA!"); 2158 assert(vlaSize->getType() == SizeTy); 2159 2160 if (!numElements) { 2161 numElements = vlaSize; 2162 } else { 2163 // It's undefined behavior if this wraps around, so mark it that way. 2164 // FIXME: Teach -fsanitize=undefined to trap this. 2165 numElements = Builder.CreateNUWMul(numElements, vlaSize); 2166 } 2167 } while ((type = getContext().getAsVariableArrayType(elementType))); 2168 2169 return { numElements, elementType }; 2170 } 2171 2172 CodeGenFunction::VlaSizePair 2173 CodeGenFunction::getVLAElements1D(QualType type) { 2174 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 2175 assert(vla && "type was not a variable array type!"); 2176 return getVLAElements1D(vla); 2177 } 2178 2179 CodeGenFunction::VlaSizePair 2180 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) { 2181 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()]; 2182 assert(VlaSize && "no size for VLA!"); 2183 assert(VlaSize->getType() == SizeTy); 2184 return { VlaSize, Vla->getElementType() }; 2185 } 2186 2187 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 2188 assert(type->isVariablyModifiedType() && 2189 "Must pass variably modified type to EmitVLASizes!"); 2190 2191 EnsureInsertPoint(); 2192 2193 // We're going to walk down into the type and look for VLA 2194 // expressions. 2195 do { 2196 assert(type->isVariablyModifiedType()); 2197 2198 const Type *ty = type.getTypePtr(); 2199 switch (ty->getTypeClass()) { 2200 2201 #define TYPE(Class, Base) 2202 #define ABSTRACT_TYPE(Class, Base) 2203 #define NON_CANONICAL_TYPE(Class, Base) 2204 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 2205 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 2206 #include "clang/AST/TypeNodes.inc" 2207 llvm_unreachable("unexpected dependent type!"); 2208 2209 // These types are never variably-modified. 2210 case Type::Builtin: 2211 case Type::Complex: 2212 case Type::Vector: 2213 case Type::ExtVector: 2214 case Type::ConstantMatrix: 2215 case Type::Record: 2216 case Type::Enum: 2217 case Type::Elaborated: 2218 case Type::Using: 2219 case Type::TemplateSpecialization: 2220 case Type::ObjCTypeParam: 2221 case Type::ObjCObject: 2222 case Type::ObjCInterface: 2223 case Type::ObjCObjectPointer: 2224 case Type::BitInt: 2225 llvm_unreachable("type class is never variably-modified!"); 2226 2227 case Type::Adjusted: 2228 type = cast<AdjustedType>(ty)->getAdjustedType(); 2229 break; 2230 2231 case Type::Decayed: 2232 type = cast<DecayedType>(ty)->getPointeeType(); 2233 break; 2234 2235 case Type::Pointer: 2236 type = cast<PointerType>(ty)->getPointeeType(); 2237 break; 2238 2239 case Type::BlockPointer: 2240 type = cast<BlockPointerType>(ty)->getPointeeType(); 2241 break; 2242 2243 case Type::LValueReference: 2244 case Type::RValueReference: 2245 type = cast<ReferenceType>(ty)->getPointeeType(); 2246 break; 2247 2248 case Type::MemberPointer: 2249 type = cast<MemberPointerType>(ty)->getPointeeType(); 2250 break; 2251 2252 case Type::ConstantArray: 2253 case Type::IncompleteArray: 2254 // Losing element qualification here is fine. 2255 type = cast<ArrayType>(ty)->getElementType(); 2256 break; 2257 2258 case Type::VariableArray: { 2259 // Losing element qualification here is fine. 2260 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2261 2262 // Unknown size indication requires no size computation. 2263 // Otherwise, evaluate and record it. 2264 if (const Expr *sizeExpr = vat->getSizeExpr()) { 2265 // It's possible that we might have emitted this already, 2266 // e.g. with a typedef and a pointer to it. 2267 llvm::Value *&entry = VLASizeMap[sizeExpr]; 2268 if (!entry) { 2269 llvm::Value *size = EmitScalarExpr(sizeExpr); 2270 2271 // C11 6.7.6.2p5: 2272 // If the size is an expression that is not an integer constant 2273 // expression [...] each time it is evaluated it shall have a value 2274 // greater than zero. 2275 if (SanOpts.has(SanitizerKind::VLABound)) { 2276 SanitizerScope SanScope(this); 2277 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType()); 2278 clang::QualType SEType = sizeExpr->getType(); 2279 llvm::Value *CheckCondition = 2280 SEType->isSignedIntegerType() 2281 ? Builder.CreateICmpSGT(size, Zero) 2282 : Builder.CreateICmpUGT(size, Zero); 2283 llvm::Constant *StaticArgs[] = { 2284 EmitCheckSourceLocation(sizeExpr->getBeginLoc()), 2285 EmitCheckTypeDescriptor(SEType)}; 2286 EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound), 2287 SanitizerHandler::VLABoundNotPositive, StaticArgs, size); 2288 } 2289 2290 // Always zexting here would be wrong if it weren't 2291 // undefined behavior to have a negative bound. 2292 // FIXME: What about when size's type is larger than size_t? 2293 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false); 2294 } 2295 } 2296 type = vat->getElementType(); 2297 break; 2298 } 2299 2300 case Type::FunctionProto: 2301 case Type::FunctionNoProto: 2302 type = cast<FunctionType>(ty)->getReturnType(); 2303 break; 2304 2305 case Type::Paren: 2306 case Type::TypeOf: 2307 case Type::UnaryTransform: 2308 case Type::Attributed: 2309 case Type::BTFTagAttributed: 2310 case Type::SubstTemplateTypeParm: 2311 case Type::MacroQualified: 2312 // Keep walking after single level desugaring. 2313 type = type.getSingleStepDesugaredType(getContext()); 2314 break; 2315 2316 case Type::Typedef: 2317 case Type::Decltype: 2318 case Type::Auto: 2319 case Type::DeducedTemplateSpecialization: 2320 // Stop walking: nothing to do. 2321 return; 2322 2323 case Type::TypeOfExpr: 2324 // Stop walking: emit typeof expression. 2325 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 2326 return; 2327 2328 case Type::Atomic: 2329 type = cast<AtomicType>(ty)->getValueType(); 2330 break; 2331 2332 case Type::Pipe: 2333 type = cast<PipeType>(ty)->getElementType(); 2334 break; 2335 } 2336 } while (type->isVariablyModifiedType()); 2337 } 2338 2339 Address CodeGenFunction::EmitVAListRef(const Expr* E) { 2340 if (getContext().getBuiltinVaListType()->isArrayType()) 2341 return EmitPointerWithAlignment(E); 2342 return EmitLValue(E).getAddress(*this); 2343 } 2344 2345 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { 2346 return EmitLValue(E).getAddress(*this); 2347 } 2348 2349 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 2350 const APValue &Init) { 2351 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!"); 2352 if (CGDebugInfo *Dbg = getDebugInfo()) 2353 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) 2354 Dbg->EmitGlobalVariable(E->getDecl(), Init); 2355 } 2356 2357 CodeGenFunction::PeepholeProtection 2358 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 2359 // At the moment, the only aggressive peephole we do in IR gen 2360 // is trunc(zext) folding, but if we add more, we can easily 2361 // extend this protection. 2362 2363 if (!rvalue.isScalar()) return PeepholeProtection(); 2364 llvm::Value *value = rvalue.getScalarVal(); 2365 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 2366 2367 // Just make an extra bitcast. 2368 assert(HaveInsertPoint()); 2369 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 2370 Builder.GetInsertBlock()); 2371 2372 PeepholeProtection protection; 2373 protection.Inst = inst; 2374 return protection; 2375 } 2376 2377 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 2378 if (!protection.Inst) return; 2379 2380 // In theory, we could try to duplicate the peepholes now, but whatever. 2381 protection.Inst->eraseFromParent(); 2382 } 2383 2384 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, 2385 QualType Ty, SourceLocation Loc, 2386 SourceLocation AssumptionLoc, 2387 llvm::Value *Alignment, 2388 llvm::Value *OffsetValue) { 2389 if (Alignment->getType() != IntPtrTy) 2390 Alignment = 2391 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align"); 2392 if (OffsetValue && OffsetValue->getType() != IntPtrTy) 2393 OffsetValue = 2394 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset"); 2395 llvm::Value *TheCheck = nullptr; 2396 if (SanOpts.has(SanitizerKind::Alignment)) { 2397 llvm::Value *PtrIntValue = 2398 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); 2399 2400 if (OffsetValue) { 2401 bool IsOffsetZero = false; 2402 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue)) 2403 IsOffsetZero = CI->isZero(); 2404 2405 if (!IsOffsetZero) 2406 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr"); 2407 } 2408 2409 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0); 2410 llvm::Value *Mask = 2411 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1)); 2412 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr"); 2413 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond"); 2414 } 2415 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( 2416 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue); 2417 2418 if (!SanOpts.has(SanitizerKind::Alignment)) 2419 return; 2420 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2421 OffsetValue, TheCheck, Assumption); 2422 } 2423 2424 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, 2425 const Expr *E, 2426 SourceLocation AssumptionLoc, 2427 llvm::Value *Alignment, 2428 llvm::Value *OffsetValue) { 2429 if (auto *CE = dyn_cast<CastExpr>(E)) 2430 E = CE->getSubExprAsWritten(); 2431 QualType Ty = E->getType(); 2432 SourceLocation Loc = E->getExprLoc(); 2433 2434 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2435 OffsetValue); 2436 } 2437 2438 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn, 2439 llvm::Value *AnnotatedVal, 2440 StringRef AnnotationStr, 2441 SourceLocation Location, 2442 const AnnotateAttr *Attr) { 2443 SmallVector<llvm::Value *, 5> Args = { 2444 AnnotatedVal, 2445 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 2446 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 2447 CGM.EmitAnnotationLineNo(Location), 2448 }; 2449 if (Attr) 2450 Args.push_back(CGM.EmitAnnotationArgs(Attr)); 2451 return Builder.CreateCall(AnnotationFn, Args); 2452 } 2453 2454 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 2455 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2456 // FIXME We create a new bitcast for every annotation because that's what 2457 // llvm-gcc was doing. 2458 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 2459 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 2460 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 2461 I->getAnnotation(), D->getLocation(), I); 2462 } 2463 2464 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 2465 Address Addr) { 2466 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2467 llvm::Value *V = Addr.getPointer(); 2468 llvm::Type *VTy = V->getType(); 2469 auto *PTy = dyn_cast<llvm::PointerType>(VTy); 2470 unsigned AS = PTy ? PTy->getAddressSpace() : 0; 2471 llvm::PointerType *IntrinTy = 2472 llvm::PointerType::getWithSamePointeeType(CGM.Int8PtrTy, AS); 2473 llvm::Function *F = 2474 CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, IntrinTy); 2475 2476 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 2477 // FIXME Always emit the cast inst so we can differentiate between 2478 // annotation on the first field of a struct and annotation on the struct 2479 // itself. 2480 if (VTy != IntrinTy) 2481 V = Builder.CreateBitCast(V, IntrinTy); 2482 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I); 2483 V = Builder.CreateBitCast(V, VTy); 2484 } 2485 2486 return Address(V, Addr.getElementType(), Addr.getAlignment()); 2487 } 2488 2489 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 2490 2491 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 2492 : CGF(CGF) { 2493 assert(!CGF->IsSanitizerScope); 2494 CGF->IsSanitizerScope = true; 2495 } 2496 2497 CodeGenFunction::SanitizerScope::~SanitizerScope() { 2498 CGF->IsSanitizerScope = false; 2499 } 2500 2501 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 2502 const llvm::Twine &Name, 2503 llvm::BasicBlock *BB, 2504 llvm::BasicBlock::iterator InsertPt) const { 2505 LoopStack.InsertHelper(I); 2506 if (IsSanitizerScope) 2507 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 2508 } 2509 2510 void CGBuilderInserter::InsertHelper( 2511 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 2512 llvm::BasicBlock::iterator InsertPt) const { 2513 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); 2514 if (CGF) 2515 CGF->InsertHelper(I, Name, BB, InsertPt); 2516 } 2517 2518 // Emits an error if we don't have a valid set of target features for the 2519 // called function. 2520 void CodeGenFunction::checkTargetFeatures(const CallExpr *E, 2521 const FunctionDecl *TargetDecl) { 2522 return checkTargetFeatures(E->getBeginLoc(), TargetDecl); 2523 } 2524 2525 // Emits an error if we don't have a valid set of target features for the 2526 // called function. 2527 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc, 2528 const FunctionDecl *TargetDecl) { 2529 // Early exit if this is an indirect call. 2530 if (!TargetDecl) 2531 return; 2532 2533 // Get the current enclosing function if it exists. If it doesn't 2534 // we can't check the target features anyhow. 2535 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl); 2536 if (!FD) 2537 return; 2538 2539 // Grab the required features for the call. For a builtin this is listed in 2540 // the td file with the default cpu, for an always_inline function this is any 2541 // listed cpu and any listed features. 2542 unsigned BuiltinID = TargetDecl->getBuiltinID(); 2543 std::string MissingFeature; 2544 llvm::StringMap<bool> CallerFeatureMap; 2545 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD); 2546 if (BuiltinID) { 2547 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID)); 2548 if (!Builtin::evaluateRequiredTargetFeatures( 2549 FeatureList, CallerFeatureMap)) { 2550 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature) 2551 << TargetDecl->getDeclName() 2552 << FeatureList; 2553 } 2554 } else if (!TargetDecl->isMultiVersion() && 2555 TargetDecl->hasAttr<TargetAttr>()) { 2556 // Get the required features for the callee. 2557 2558 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>(); 2559 ParsedTargetAttr ParsedAttr = 2560 CGM.getContext().filterFunctionTargetAttrs(TD); 2561 2562 SmallVector<StringRef, 1> ReqFeatures; 2563 llvm::StringMap<bool> CalleeFeatureMap; 2564 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2565 2566 for (const auto &F : ParsedAttr.Features) { 2567 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) 2568 ReqFeatures.push_back(StringRef(F).substr(1)); 2569 } 2570 2571 for (const auto &F : CalleeFeatureMap) { 2572 // Only positive features are "required". 2573 if (F.getValue()) 2574 ReqFeatures.push_back(F.getKey()); 2575 } 2576 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) { 2577 if (!CallerFeatureMap.lookup(Feature)) { 2578 MissingFeature = Feature.str(); 2579 return false; 2580 } 2581 return true; 2582 })) 2583 CGM.getDiags().Report(Loc, diag::err_function_needs_feature) 2584 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; 2585 } 2586 } 2587 2588 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { 2589 if (!CGM.getCodeGenOpts().SanitizeStats) 2590 return; 2591 2592 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); 2593 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); 2594 CGM.getSanStats().create(IRB, SSK); 2595 } 2596 2597 llvm::Value * 2598 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) { 2599 llvm::Value *Condition = nullptr; 2600 2601 if (!RO.Conditions.Architecture.empty()) 2602 Condition = EmitX86CpuIs(RO.Conditions.Architecture); 2603 2604 if (!RO.Conditions.Features.empty()) { 2605 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features); 2606 Condition = 2607 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond; 2608 } 2609 return Condition; 2610 } 2611 2612 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, 2613 llvm::Function *Resolver, 2614 CGBuilderTy &Builder, 2615 llvm::Function *FuncToReturn, 2616 bool SupportsIFunc) { 2617 if (SupportsIFunc) { 2618 Builder.CreateRet(FuncToReturn); 2619 return; 2620 } 2621 2622 llvm::SmallVector<llvm::Value *, 10> Args( 2623 llvm::make_pointer_range(Resolver->args())); 2624 2625 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args); 2626 Result->setTailCallKind(llvm::CallInst::TCK_MustTail); 2627 2628 if (Resolver->getReturnType()->isVoidTy()) 2629 Builder.CreateRetVoid(); 2630 else 2631 Builder.CreateRet(Result); 2632 } 2633 2634 void CodeGenFunction::EmitMultiVersionResolver( 2635 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { 2636 assert(getContext().getTargetInfo().getTriple().isX86() && 2637 "Only implemented for x86 targets"); 2638 2639 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc(); 2640 2641 // Main function's basic block. 2642 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver); 2643 Builder.SetInsertPoint(CurBlock); 2644 EmitX86CpuInit(); 2645 2646 for (const MultiVersionResolverOption &RO : Options) { 2647 Builder.SetInsertPoint(CurBlock); 2648 llvm::Value *Condition = FormResolverCondition(RO); 2649 2650 // The 'default' or 'generic' case. 2651 if (!Condition) { 2652 assert(&RO == Options.end() - 1 && 2653 "Default or Generic case must be last"); 2654 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function, 2655 SupportsIFunc); 2656 return; 2657 } 2658 2659 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver); 2660 CGBuilderTy RetBuilder(*this, RetBlock); 2661 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function, 2662 SupportsIFunc); 2663 CurBlock = createBasicBlock("resolver_else", Resolver); 2664 Builder.CreateCondBr(Condition, RetBlock, CurBlock); 2665 } 2666 2667 // If no generic/default, emit an unreachable. 2668 Builder.SetInsertPoint(CurBlock); 2669 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 2670 TrapCall->setDoesNotReturn(); 2671 TrapCall->setDoesNotThrow(); 2672 Builder.CreateUnreachable(); 2673 Builder.ClearInsertionPoint(); 2674 } 2675 2676 // Loc - where the diagnostic will point, where in the source code this 2677 // alignment has failed. 2678 // SecondaryLoc - if present (will be present if sufficiently different from 2679 // Loc), the diagnostic will additionally point a "Note:" to this location. 2680 // It should be the location where the __attribute__((assume_aligned)) 2681 // was written e.g. 2682 void CodeGenFunction::emitAlignmentAssumptionCheck( 2683 llvm::Value *Ptr, QualType Ty, SourceLocation Loc, 2684 SourceLocation SecondaryLoc, llvm::Value *Alignment, 2685 llvm::Value *OffsetValue, llvm::Value *TheCheck, 2686 llvm::Instruction *Assumption) { 2687 assert(Assumption && isa<llvm::CallInst>(Assumption) && 2688 cast<llvm::CallInst>(Assumption)->getCalledOperand() == 2689 llvm::Intrinsic::getDeclaration( 2690 Builder.GetInsertBlock()->getParent()->getParent(), 2691 llvm::Intrinsic::assume) && 2692 "Assumption should be a call to llvm.assume()."); 2693 assert(&(Builder.GetInsertBlock()->back()) == Assumption && 2694 "Assumption should be the last instruction of the basic block, " 2695 "since the basic block is still being generated."); 2696 2697 if (!SanOpts.has(SanitizerKind::Alignment)) 2698 return; 2699 2700 // Don't check pointers to volatile data. The behavior here is implementation- 2701 // defined. 2702 if (Ty->getPointeeType().isVolatileQualified()) 2703 return; 2704 2705 // We need to temorairly remove the assumption so we can insert the 2706 // sanitizer check before it, else the check will be dropped by optimizations. 2707 Assumption->removeFromParent(); 2708 2709 { 2710 SanitizerScope SanScope(this); 2711 2712 if (!OffsetValue) 2713 OffsetValue = Builder.getInt1(false); // no offset. 2714 2715 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc), 2716 EmitCheckSourceLocation(SecondaryLoc), 2717 EmitCheckTypeDescriptor(Ty)}; 2718 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr), 2719 EmitCheckValue(Alignment), 2720 EmitCheckValue(OffsetValue)}; 2721 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)}, 2722 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData); 2723 } 2724 2725 // We are now in the (new, empty) "cont" basic block. 2726 // Reintroduce the assumption. 2727 Builder.Insert(Assumption); 2728 // FIXME: Assumption still has it's original basic block as it's Parent. 2729 } 2730 2731 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { 2732 if (CGDebugInfo *DI = getDebugInfo()) 2733 return DI->SourceLocToDebugLoc(Location); 2734 2735 return llvm::DebugLoc(); 2736 } 2737 2738 llvm::Value * 2739 CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond, 2740 Stmt::Likelihood LH) { 2741 switch (LH) { 2742 case Stmt::LH_None: 2743 return Cond; 2744 case Stmt::LH_Likely: 2745 case Stmt::LH_Unlikely: 2746 // Don't generate llvm.expect on -O0 as the backend won't use it for 2747 // anything. 2748 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 2749 return Cond; 2750 llvm::Type *CondTy = Cond->getType(); 2751 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean"); 2752 llvm::Function *FnExpect = 2753 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy); 2754 llvm::Value *ExpectedValueOfCond = 2755 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely); 2756 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond}, 2757 Cond->getName() + ".expval"); 2758 } 2759 llvm_unreachable("Unknown Likelihood"); 2760 } 2761 2762 llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec, 2763 unsigned NumElementsDst, 2764 const llvm::Twine &Name) { 2765 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType()); 2766 unsigned NumElementsSrc = SrcTy->getNumElements(); 2767 if (NumElementsSrc == NumElementsDst) 2768 return SrcVec; 2769 2770 std::vector<int> ShuffleMask(NumElementsDst, -1); 2771 for (unsigned MaskIdx = 0; 2772 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx) 2773 ShuffleMask[MaskIdx] = MaskIdx; 2774 2775 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name); 2776 } 2777