1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code dealing with code generation of C++ declarations 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGDebugInfo.h" 15 #include "CGHLSLRuntime.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "CodeGenFunction.h" 19 #include "TargetInfo.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/Basic/LangOptions.h" 22 #include "llvm/ADT/StringExtras.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/MDBuilder.h" 25 #include "llvm/Support/Path.h" 26 27 using namespace clang; 28 using namespace CodeGen; 29 30 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, 31 ConstantAddress DeclPtr) { 32 assert( 33 (D.hasGlobalStorage() || 34 (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && 35 "VarDecl must have global or local (in the case of OpenCL) storage!"); 36 assert(!D.getType()->isReferenceType() && 37 "Should not call EmitDeclInit on a reference!"); 38 39 QualType type = D.getType(); 40 LValue lv = CGF.MakeAddrLValue(DeclPtr, type); 41 42 const Expr *Init = D.getInit(); 43 switch (CGF.getEvaluationKind(type)) { 44 case TEK_Scalar: { 45 CodeGenModule &CGM = CGF.CGM; 46 if (lv.isObjCStrong()) 47 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), 48 DeclPtr, D.getTLSKind()); 49 else if (lv.isObjCWeak()) 50 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), 51 DeclPtr); 52 else 53 CGF.EmitScalarInit(Init, &D, lv, false); 54 return; 55 } 56 case TEK_Complex: 57 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); 58 return; 59 case TEK_Aggregate: 60 CGF.EmitAggExpr(Init, 61 AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, 62 AggValueSlot::DoesNotNeedGCBarriers, 63 AggValueSlot::IsNotAliased, 64 AggValueSlot::DoesNotOverlap)); 65 return; 66 } 67 llvm_unreachable("bad evaluation kind"); 68 } 69 70 /// Emit code to cause the destruction of the given variable with 71 /// static storage duration. 72 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, 73 ConstantAddress Addr) { 74 // Honor __attribute__((no_destroy)) and bail instead of attempting 75 // to emit a reference to a possibly nonexistent destructor, which 76 // in turn can cause a crash. This will result in a global constructor 77 // that isn't balanced out by a destructor call as intended by the 78 // attribute. This also checks for -fno-c++-static-destructors and 79 // bails even if the attribute is not present. 80 QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext()); 81 82 // FIXME: __attribute__((cleanup)) ? 83 84 switch (DtorKind) { 85 case QualType::DK_none: 86 return; 87 88 case QualType::DK_cxx_destructor: 89 break; 90 91 case QualType::DK_objc_strong_lifetime: 92 case QualType::DK_objc_weak_lifetime: 93 case QualType::DK_nontrivial_c_struct: 94 // We don't care about releasing objects during process teardown. 95 assert(!D.getTLSKind() && "should have rejected this"); 96 return; 97 } 98 99 llvm::FunctionCallee Func; 100 llvm::Constant *Argument; 101 102 CodeGenModule &CGM = CGF.CGM; 103 QualType Type = D.getType(); 104 105 // Special-case non-array C++ destructors, if they have the right signature. 106 // Under some ABIs, destructors return this instead of void, and cannot be 107 // passed directly to __cxa_atexit if the target does not allow this 108 // mismatch. 109 const CXXRecordDecl *Record = Type->getAsCXXRecordDecl(); 110 bool CanRegisterDestructor = 111 Record && (!CGM.getCXXABI().HasThisReturn( 112 GlobalDecl(Record->getDestructor(), Dtor_Complete)) || 113 CGM.getCXXABI().canCallMismatchedFunctionType()); 114 // If __cxa_atexit is disabled via a flag, a different helper function is 115 // generated elsewhere which uses atexit instead, and it takes the destructor 116 // directly. 117 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; 118 if (Record && (CanRegisterDestructor || UsingExternalHelper)) { 119 assert(!Record->hasTrivialDestructor()); 120 CXXDestructorDecl *Dtor = Record->getDestructor(); 121 122 Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete)); 123 if (CGF.getContext().getLangOpts().OpenCL) { 124 auto DestAS = 125 CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam(); 126 auto DestTy = llvm::PointerType::get( 127 CGM.getLLVMContext(), CGM.getContext().getTargetAddressSpace(DestAS)); 128 auto SrcAS = D.getType().getQualifiers().getAddressSpace(); 129 if (DestAS == SrcAS) 130 Argument = Addr.getPointer(); 131 else 132 // FIXME: On addr space mismatch we are passing NULL. The generation 133 // of the global destructor function should be adjusted accordingly. 134 Argument = llvm::ConstantPointerNull::get(DestTy); 135 } else { 136 Argument = Addr.getPointer(); 137 } 138 // Otherwise, the standard logic requires a helper function. 139 } else { 140 Addr = Addr.withElementType(CGF.ConvertTypeForMem(Type)); 141 Func = CodeGenFunction(CGM) 142 .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind), 143 CGF.needsEHCleanup(DtorKind), &D); 144 Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); 145 } 146 147 CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument); 148 } 149 150 /// Emit code to cause the variable at the given address to be considered as 151 /// constant from this point onwards. 152 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, 153 llvm::Constant *Addr) { 154 return CGF.EmitInvariantStart( 155 Addr, CGF.getContext().getTypeSizeInChars(D.getType())); 156 } 157 158 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) { 159 // Do not emit the intrinsic if we're not optimizing. 160 if (!CGM.getCodeGenOpts().OptimizationLevel) 161 return; 162 163 // Grab the llvm.invariant.start intrinsic. 164 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; 165 // Overloaded address space type. 166 assert(Addr->getType()->isPointerTy() && "Address must be a pointer"); 167 llvm::Type *ObjectPtr[1] = {Addr->getType()}; 168 llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr); 169 170 // Emit a call with the size in bytes of the object. 171 uint64_t Width = Size.getQuantity(); 172 llvm::Value *Args[2] = {llvm::ConstantInt::getSigned(Int64Ty, Width), Addr}; 173 Builder.CreateCall(InvariantStart, Args); 174 } 175 176 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, 177 llvm::GlobalVariable *GV, 178 bool PerformInit) { 179 180 const Expr *Init = D.getInit(); 181 QualType T = D.getType(); 182 183 // The address space of a static local variable (DeclPtr) may be different 184 // from the address space of the "this" argument of the constructor. In that 185 // case, we need an addrspacecast before calling the constructor. 186 // 187 // struct StructWithCtor { 188 // __device__ StructWithCtor() {...} 189 // }; 190 // __device__ void foo() { 191 // __shared__ StructWithCtor s; 192 // ... 193 // } 194 // 195 // For example, in the above CUDA code, the static local variable s has a 196 // "shared" address space qualifier, but the constructor of StructWithCtor 197 // expects "this" in the "generic" address space. 198 unsigned ExpectedAddrSpace = getTypes().getTargetAddressSpace(T); 199 unsigned ActualAddrSpace = GV->getAddressSpace(); 200 llvm::Constant *DeclPtr = GV; 201 if (ActualAddrSpace != ExpectedAddrSpace) { 202 llvm::PointerType *PTy = 203 llvm::PointerType::get(getLLVMContext(), ExpectedAddrSpace); 204 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy); 205 } 206 207 ConstantAddress DeclAddr( 208 DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D)); 209 210 if (!T->isReferenceType()) { 211 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && 212 D.hasAttr<OMPThreadPrivateDeclAttr>()) { 213 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition( 214 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(), 215 PerformInit, this); 216 } 217 bool NeedsDtor = 218 D.needsDestruction(getContext()) == QualType::DK_cxx_destructor; 219 if (PerformInit) 220 EmitDeclInit(*this, D, DeclAddr); 221 if (D.getType().isConstantStorage(getContext(), true, !NeedsDtor)) 222 EmitDeclInvariant(*this, D, DeclPtr); 223 else 224 EmitDeclDestroy(*this, D, DeclAddr); 225 return; 226 } 227 228 assert(PerformInit && "cannot have constant initializer which needs " 229 "destruction for reference"); 230 RValue RV = EmitReferenceBindingToExpr(Init); 231 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T); 232 } 233 234 /// Create a stub function, suitable for being passed to atexit, 235 /// which passes the given address to the given destructor function. 236 llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD, 237 llvm::FunctionCallee dtor, 238 llvm::Constant *addr) { 239 // Get the destructor function type, void(*)(void). 240 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false); 241 SmallString<256> FnName; 242 { 243 llvm::raw_svector_ostream Out(FnName); 244 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out); 245 } 246 247 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 248 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction( 249 ty, FnName.str(), FI, VD.getLocation()); 250 251 CodeGenFunction CGF(CGM); 252 253 CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit), 254 CGM.getContext().VoidTy, fn, FI, FunctionArgList(), 255 VD.getLocation(), VD.getInit()->getExprLoc()); 256 // Emit an artificial location for this function. 257 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 258 259 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr); 260 261 // Make sure the call and the callee agree on calling convention. 262 if (auto *dtorFn = dyn_cast<llvm::Function>( 263 dtor.getCallee()->stripPointerCastsAndAliases())) 264 call->setCallingConv(dtorFn->getCallingConv()); 265 266 CGF.FinishFunction(); 267 268 // Get a proper function pointer. 269 FunctionProtoType::ExtProtoInfo EPI(getContext().getDefaultCallingConvention( 270 /*IsVariadic=*/false, /*IsCXXMethod=*/false)); 271 QualType fnType = getContext().getFunctionType(getContext().VoidTy, 272 {getContext().VoidPtrTy}, EPI); 273 return CGM.getFunctionPointer(fn, fnType); 274 } 275 276 /// Create a stub function, suitable for being passed to __pt_atexit_np, 277 /// which passes the given address to the given destructor function. 278 llvm::Function *CodeGenFunction::createTLSAtExitStub( 279 const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr, 280 llvm::FunctionCallee &AtExit) { 281 SmallString<256> FnName; 282 { 283 llvm::raw_svector_ostream Out(FnName); 284 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&D, Out); 285 } 286 287 const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo( 288 getContext().IntTy, FnInfoOpts::None, {getContext().IntTy}, 289 FunctionType::ExtInfo(), {}, RequiredArgs::All); 290 291 // Get the stub function type, int(*)(int,...). 292 llvm::FunctionType *StubTy = 293 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy}, true); 294 295 llvm::Function *DtorStub = CGM.CreateGlobalInitOrCleanUpFunction( 296 StubTy, FnName.str(), FI, D.getLocation()); 297 298 CodeGenFunction CGF(CGM); 299 300 FunctionArgList Args; 301 ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy, 302 ImplicitParamKind::Other); 303 Args.push_back(&IPD); 304 QualType ResTy = CGM.getContext().IntTy; 305 306 CGF.StartFunction(GlobalDecl(&D, DynamicInitKind::AtExit), ResTy, DtorStub, 307 FI, Args, D.getLocation(), D.getInit()->getExprLoc()); 308 309 // Emit an artificial location for this function. 310 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 311 312 llvm::CallInst *call = CGF.Builder.CreateCall(Dtor, Addr); 313 314 // Make sure the call and the callee agree on calling convention. 315 if (auto *DtorFn = dyn_cast<llvm::Function>( 316 Dtor.getCallee()->stripPointerCastsAndAliases())) 317 call->setCallingConv(DtorFn->getCallingConv()); 318 319 // Return 0 from function 320 CGF.Builder.CreateStore(llvm::Constant::getNullValue(CGM.IntTy), 321 CGF.ReturnValue); 322 323 CGF.FinishFunction(); 324 325 return DtorStub; 326 } 327 328 /// Register a global destructor using the C atexit runtime function. 329 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD, 330 llvm::FunctionCallee dtor, 331 llvm::Constant *addr) { 332 // Create a function which calls the destructor. 333 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr); 334 registerGlobalDtorWithAtExit(dtorStub); 335 } 336 337 /// Register a global destructor using the LLVM 'llvm.global_dtors' global. 338 void CodeGenFunction::registerGlobalDtorWithLLVM(const VarDecl &VD, 339 llvm::FunctionCallee Dtor, 340 llvm::Constant *Addr) { 341 // Create a function which calls the destructor. 342 llvm::Function *dtorStub = 343 cast<llvm::Function>(createAtExitStub(VD, Dtor, Addr)); 344 CGM.AddGlobalDtor(dtorStub); 345 } 346 347 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) { 348 // extern "C" int atexit(void (*f)(void)); 349 assert(dtorStub->getType()->isPointerTy() && 350 "Argument to atexit has a wrong type."); 351 352 llvm::FunctionType *atexitTy = 353 llvm::FunctionType::get(IntTy, dtorStub->getType(), false); 354 355 llvm::FunctionCallee atexit = 356 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(), 357 /*Local=*/true); 358 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee())) 359 atexitFn->setDoesNotThrow(); 360 361 EmitNounwindRuntimeCall(atexit, dtorStub); 362 } 363 364 llvm::Value * 365 CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) { 366 // The unatexit subroutine unregisters __dtor functions that were previously 367 // registered by the atexit subroutine. If the referenced function is found, 368 // it is removed from the list of functions that are called at normal program 369 // termination and the unatexit returns a value of 0, otherwise a non-zero 370 // value is returned. 371 // 372 // extern "C" int unatexit(void (*f)(void)); 373 assert(dtorStub->getType()->isPointerTy() && 374 "Argument to unatexit has a wrong type."); 375 376 llvm::FunctionType *unatexitTy = 377 llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false); 378 379 llvm::FunctionCallee unatexit = 380 CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList()); 381 382 cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow(); 383 384 return EmitNounwindRuntimeCall(unatexit, dtorStub); 385 } 386 387 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D, 388 llvm::GlobalVariable *DeclPtr, 389 bool PerformInit) { 390 // If we've been asked to forbid guard variables, emit an error now. 391 // This diagnostic is hard-coded for Darwin's use case; we can find 392 // better phrasing if someone else needs it. 393 if (CGM.getCodeGenOpts().ForbidGuardVariables) 394 CGM.Error(D.getLocation(), 395 "this initialization requires a guard variable, which " 396 "the kernel does not support"); 397 398 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit); 399 } 400 401 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, 402 llvm::BasicBlock *InitBlock, 403 llvm::BasicBlock *NoInitBlock, 404 GuardKind Kind, 405 const VarDecl *D) { 406 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable"); 407 408 // A guess at how many times we will enter the initialization of a 409 // variable, depending on the kind of variable. 410 static const uint64_t InitsPerTLSVar = 1024; 411 static const uint64_t InitsPerLocalVar = 1024 * 1024; 412 413 llvm::MDNode *Weights; 414 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) { 415 // For non-local variables, don't apply any weighting for now. Due to our 416 // use of COMDATs, we expect there to be at most one initialization of the 417 // variable per DSO, but we have no way to know how many DSOs will try to 418 // initialize the variable. 419 Weights = nullptr; 420 } else { 421 uint64_t NumInits; 422 // FIXME: For the TLS case, collect and use profiling information to 423 // determine a more accurate brach weight. 424 if (Kind == GuardKind::TlsGuard || D->getTLSKind()) 425 NumInits = InitsPerTLSVar; 426 else 427 NumInits = InitsPerLocalVar; 428 429 // The probability of us entering the initializer is 430 // 1 / (total number of times we attempt to initialize the variable). 431 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 432 Weights = MDHelper.createBranchWeights(1, NumInits - 1); 433 } 434 435 Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights); 436 } 437 438 llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction( 439 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI, 440 SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) { 441 llvm::Function *Fn = llvm::Function::Create(FTy, Linkage, Name, &getModule()); 442 443 if (!getLangOpts().AppleKext && !TLS) { 444 // Set the section if needed. 445 if (const char *Section = getTarget().getStaticInitSectionSpecifier()) 446 Fn->setSection(Section); 447 } 448 449 if (Linkage == llvm::GlobalVariable::InternalLinkage) 450 SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 451 else { 452 SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, false); 453 SetLLVMFunctionAttributesForDefinition(nullptr, Fn); 454 getTargetCodeGenInfo().setTargetAttributes(nullptr, Fn, *this); 455 } 456 457 Fn->setCallingConv(getRuntimeCC()); 458 459 if (!getLangOpts().Exceptions) 460 Fn->setDoesNotThrow(); 461 462 if (getLangOpts().Sanitize.has(SanitizerKind::Address) && 463 !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc)) 464 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 465 466 if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) && 467 !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc)) 468 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 469 470 if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) && 471 !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc)) 472 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 473 474 if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) && 475 !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc)) 476 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 477 478 if (getLangOpts().Sanitize.has(SanitizerKind::MemtagStack) && 479 !isInNoSanitizeList(SanitizerKind::MemtagStack, Fn, Loc)) 480 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); 481 482 if (getLangOpts().Sanitize.has(SanitizerKind::Type) && 483 !isInNoSanitizeList(SanitizerKind::Type, Fn, Loc)) 484 Fn->addFnAttr(llvm::Attribute::SanitizeType); 485 486 if (getLangOpts().Sanitize.has(SanitizerKind::Thread) && 487 !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc)) 488 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 489 490 if (getLangOpts().Sanitize.has(SanitizerKind::NumericalStability) && 491 !isInNoSanitizeList(SanitizerKind::NumericalStability, Fn, Loc)) 492 Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability); 493 494 if (getLangOpts().Sanitize.has(SanitizerKind::Memory) && 495 !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc)) 496 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 497 498 if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) && 499 !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc)) 500 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 501 502 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) && 503 !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc)) 504 Fn->addFnAttr(llvm::Attribute::SafeStack); 505 506 if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) && 507 !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc)) 508 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 509 510 return Fn; 511 } 512 513 /// Create a global pointer to a function that will initialize a global 514 /// variable. The user has requested that this pointer be emitted in a specific 515 /// section. 516 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D, 517 llvm::GlobalVariable *GV, 518 llvm::Function *InitFunc, 519 InitSegAttr *ISA) { 520 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable( 521 TheModule, InitFunc->getType(), /*isConstant=*/true, 522 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr"); 523 PtrArray->setSection(ISA->getSection()); 524 addUsedGlobal(PtrArray); 525 526 // If the GV is already in a comdat group, then we have to join it. 527 if (llvm::Comdat *C = GV->getComdat()) 528 PtrArray->setComdat(C); 529 } 530 531 void 532 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, 533 llvm::GlobalVariable *Addr, 534 bool PerformInit) { 535 536 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, 537 // __constant__ and __shared__ variables defined in namespace scope, 538 // that are of class type, cannot have a non-empty constructor. All 539 // the checks have been done in Sema by now. Whatever initializers 540 // are allowed are empty and we just need to ignore them here. 541 if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit && 542 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 543 D->hasAttr<CUDASharedAttr>())) 544 return; 545 546 // Check if we've already initialized this decl. 547 auto I = DelayedCXXInitPosition.find(D); 548 if (I != DelayedCXXInitPosition.end() && I->second == ~0U) 549 return; 550 551 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 552 SmallString<256> FnName; 553 { 554 llvm::raw_svector_ostream Out(FnName); 555 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out); 556 } 557 558 // Create a variable initialization function. 559 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( 560 FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation()); 561 562 auto *ISA = D->getAttr<InitSegAttr>(); 563 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr, 564 PerformInit); 565 566 llvm::GlobalVariable *COMDATKey = 567 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr; 568 569 if (D->getTLSKind()) { 570 // FIXME: Should we support init_priority for thread_local? 571 // FIXME: We only need to register one __cxa_thread_atexit function for the 572 // entire TU. 573 CXXThreadLocalInits.push_back(Fn); 574 CXXThreadLocalInitVars.push_back(D); 575 } else if (PerformInit && ISA) { 576 // Contract with backend that "init_seg(compiler)" corresponds to priority 577 // 200 and "init_seg(lib)" corresponds to priority 400. 578 int Priority = -1; 579 if (ISA->getSection() == ".CRT$XCC") 580 Priority = 200; 581 else if (ISA->getSection() == ".CRT$XCL") 582 Priority = 400; 583 584 if (Priority != -1) 585 AddGlobalCtor(Fn, Priority, ~0U, COMDATKey); 586 else 587 EmitPointerToInitFunc(D, Addr, Fn, ISA); 588 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) { 589 OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(), 590 PrioritizedCXXGlobalInits.size()); 591 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); 592 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) || 593 !isUniqueGVALinkage(getContext().GetGVALinkageForVariable(D)) || 594 D->hasAttr<SelectAnyAttr>()) { 595 // For vague linkage globals, put the initializer into its own global_ctors 596 // entry with the global as a comdat key. This ensures at most one 597 // initializer per DSO runs during DSO dynamic initialization. 598 // 599 // For ELF platforms, this is an important code size and startup time 600 // optimization. For dynamic, non-hidden symbols, the weak guard variable 601 // remains to ensure that other DSOs do not re-initialize the global. 602 // 603 // For PE-COFF platforms, there is no guard variable, and COMDAT 604 // associativity is the only way to ensure vauge linkage globals are 605 // initialized exactly once. 606 // 607 // MachO is the only remaining platform with no comdats that doesn't 608 // benefit from this optimization. The rest are mainly modeled on ELF 609 // behavior. 610 // 611 // C++ requires that inline global variables are initialized in source 612 // order, but this requirement does not exist for templated entities. 613 // llvm.global_ctors does not guarantee initialization order, so in 614 // general, Clang does not fully conform to the ordering requirement. 615 // However, in practice, LLVM emits global_ctors in the provided order, and 616 // users typically don't rely on ordering between inline globals in 617 // different headers which are then transitively included in varying order. 618 // Clang's current behavior is a practical tradeoff, since dropping the 619 // comdat would lead to unacceptable impact on code size and startup time. 620 // 621 // FIXME: Find a solution to guarantee source-order initialization of 622 // inline variables. 623 // 624 // C++ [basic.start.init]p2: 625 // Definitions of explicitly specialized class template static data 626 // members have ordered initialization. Other class template static data 627 // members (i.e., implicitly or explicitly instantiated specializations) 628 // have unordered initialization. 629 // 630 // CXXGlobalInits.size() is the lex order number for the next deferred 631 // VarDecl. Use it when the current VarDecl is non-deferred. Although this 632 // lex order number is shared between current VarDecl and some following 633 // VarDecls, their order of insertion into `llvm.global_ctors` is the same 634 // as the lexing order and the following stable sort would preserve such 635 // order. 636 I = DelayedCXXInitPosition.find(D); 637 unsigned LexOrder = 638 I == DelayedCXXInitPosition.end() ? CXXGlobalInits.size() : I->second; 639 AddGlobalCtor(Fn, 65535, LexOrder, COMDATKey); 640 if (COMDATKey && (getTriple().isOSBinFormatELF() || 641 getTarget().getCXXABI().isMicrosoft())) { 642 // When COMDAT is used on ELF or in the MS C++ ABI, the key must be in 643 // llvm.used to prevent linker GC. 644 addUsedGlobal(COMDATKey); 645 } 646 647 // If we used a COMDAT key for the global ctor, the init function can be 648 // discarded if the global ctor entry is discarded. 649 // FIXME: Do we need to restrict this to ELF and Wasm? 650 llvm::Comdat *C = Addr->getComdat(); 651 if (COMDATKey && C && 652 (getTarget().getTriple().isOSBinFormatELF() || 653 getTarget().getTriple().isOSBinFormatWasm())) { 654 Fn->setComdat(C); 655 } 656 } else { 657 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash. 658 if (I == DelayedCXXInitPosition.end()) { 659 CXXGlobalInits.push_back(Fn); 660 } else if (I->second != ~0U) { 661 assert(I->second < CXXGlobalInits.size() && 662 CXXGlobalInits[I->second] == nullptr); 663 CXXGlobalInits[I->second] = Fn; 664 } 665 } 666 667 // Remember that we already emitted the initializer for this global. 668 DelayedCXXInitPosition[D] = ~0U; 669 } 670 671 void CodeGenModule::EmitCXXThreadLocalInitFunc() { 672 getCXXABI().EmitThreadLocalInitFuncs( 673 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars); 674 675 CXXThreadLocalInits.clear(); 676 CXXThreadLocalInitVars.clear(); 677 CXXThreadLocals.clear(); 678 } 679 680 /* Build the initializer for a C++20 module: 681 This is arranged to be run only once regardless of how many times the module 682 might be included transitively. This arranged by using a guard variable. 683 684 If there are no initializers at all (and also no imported modules) we reduce 685 this to an empty function (since the Itanium ABI requires that this function 686 be available to a caller, which might be produced by a different 687 implementation). 688 689 First we call any initializers for imported modules. 690 We then call initializers for the Global Module Fragment (if present) 691 We then call initializers for the current module. 692 We then call initializers for the Private Module Fragment (if present) 693 */ 694 695 void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) { 696 assert(Primary->isInterfaceOrPartition() && 697 "The function should only be called for C++20 named module interface" 698 " or partition."); 699 700 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) 701 CXXGlobalInits.pop_back(); 702 703 // As noted above, we create the function, even if it is empty. 704 // Module initializers for imported modules are emitted first. 705 706 // Collect all the modules that we import 707 llvm::SmallSetVector<Module *, 8> AllImports; 708 // Ones that we export 709 for (auto I : Primary->Exports) 710 AllImports.insert(I.getPointer()); 711 // Ones that we only import. 712 AllImports.insert_range(Primary->Imports); 713 // Ones that we import in the global module fragment or the private module 714 // fragment. 715 for (Module *SubM : Primary->submodules()) { 716 assert((SubM->isGlobalModule() || SubM->isPrivateModule()) && 717 "The sub modules of C++20 module unit should only be global module " 718 "fragments or private module framents."); 719 assert(SubM->Exports.empty() && 720 "The global mdoule fragments and the private module fragments are " 721 "not allowed to export import modules."); 722 AllImports.insert_range(SubM->Imports); 723 } 724 725 SmallVector<llvm::Function *, 8> ModuleInits; 726 for (Module *M : AllImports) { 727 // No Itanium initializer in header like modules. 728 if (M->isHeaderLikeModule()) 729 continue; // TODO: warn of mixed use of module map modules and C++20? 730 // We're allowed to skip the initialization if we are sure it doesn't 731 // do any thing. 732 if (!M->isNamedModuleInterfaceHasInit()) 733 continue; 734 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 735 SmallString<256> FnName; 736 { 737 llvm::raw_svector_ostream Out(FnName); 738 cast<ItaniumMangleContext>(getCXXABI().getMangleContext()) 739 .mangleModuleInitializer(M, Out); 740 } 741 assert(!GetGlobalValue(FnName.str()) && 742 "We should only have one use of the initializer call"); 743 llvm::Function *Fn = llvm::Function::Create( 744 FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule()); 745 ModuleInits.push_back(Fn); 746 } 747 748 // Add any initializers with specified priority; this uses the same approach 749 // as EmitCXXGlobalInitFunc(). 750 if (!PrioritizedCXXGlobalInits.empty()) { 751 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), 752 PrioritizedCXXGlobalInits.end()); 753 for (SmallVectorImpl<GlobalInitData>::iterator 754 I = PrioritizedCXXGlobalInits.begin(), 755 E = PrioritizedCXXGlobalInits.end(); 756 I != E;) { 757 SmallVectorImpl<GlobalInitData>::iterator PrioE = 758 std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp()); 759 760 for (; I < PrioE; ++I) 761 ModuleInits.push_back(I->second); 762 } 763 } 764 765 // Now append the ones without specified priority. 766 for (auto *F : CXXGlobalInits) 767 ModuleInits.push_back(F); 768 769 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 770 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 771 772 // We now build the initializer for this module, which has a mangled name 773 // as per the Itanium ABI . The action of the initializer is guarded so that 774 // each init is run just once (even though a module might be imported 775 // multiple times via nested use). 776 llvm::Function *Fn; 777 { 778 SmallString<256> InitFnName; 779 llvm::raw_svector_ostream Out(InitFnName); 780 cast<ItaniumMangleContext>(getCXXABI().getMangleContext()) 781 .mangleModuleInitializer(Primary, Out); 782 Fn = CreateGlobalInitOrCleanUpFunction( 783 FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false, 784 llvm::GlobalVariable::ExternalLinkage); 785 786 // If we have a completely empty initializer then we do not want to create 787 // the guard variable. 788 ConstantAddress GuardAddr = ConstantAddress::invalid(); 789 if (!ModuleInits.empty()) { 790 // Create the guard var. 791 llvm::GlobalVariable *Guard = new llvm::GlobalVariable( 792 getModule(), Int8Ty, /*isConstant=*/false, 793 llvm::GlobalVariable::InternalLinkage, 794 llvm::ConstantInt::get(Int8Ty, 0), InitFnName.str() + "__in_chrg"); 795 CharUnits GuardAlign = CharUnits::One(); 796 Guard->setAlignment(GuardAlign.getAsAlign()); 797 GuardAddr = ConstantAddress(Guard, Int8Ty, GuardAlign); 798 } 799 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits, 800 GuardAddr); 801 } 802 803 // We allow for the case that a module object is added to a linked binary 804 // without a specific call to the the initializer. This also ensures that 805 // implementation partition initializers are called when the partition 806 // is not imported as an interface. 807 AddGlobalCtor(Fn); 808 809 // See the comment in EmitCXXGlobalInitFunc about OpenCL global init 810 // functions. 811 if (getLangOpts().OpenCL) { 812 GenKernelArgMetadata(Fn); 813 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); 814 } 815 816 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice || 817 getLangOpts().GPUAllowDeviceInit); 818 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) { 819 if (getTriple().isSPIRV()) 820 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); 821 else 822 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL); 823 Fn->addFnAttr("device-init"); 824 } 825 826 // We are done with the inits. 827 AllImports.clear(); 828 PrioritizedCXXGlobalInits.clear(); 829 CXXGlobalInits.clear(); 830 ModuleInits.clear(); 831 } 832 833 static SmallString<128> getTransformedFileName(llvm::Module &M) { 834 SmallString<128> FileName = llvm::sys::path::filename(M.getName()); 835 836 if (FileName.empty()) 837 FileName = "<null>"; 838 839 for (size_t i = 0; i < FileName.size(); ++i) { 840 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens 841 // to be the set of C preprocessing numbers. 842 if (!isPreprocessingNumberBody(FileName[i])) 843 FileName[i] = '_'; 844 } 845 846 return FileName; 847 } 848 849 static std::string getPrioritySuffix(unsigned int Priority) { 850 assert(Priority <= 65535 && "Priority should always be <= 65535."); 851 852 // Compute the function suffix from priority. Prepend with zeroes to make 853 // sure the function names are also ordered as priorities. 854 std::string PrioritySuffix = llvm::utostr(Priority); 855 PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix; 856 857 return PrioritySuffix; 858 } 859 860 void 861 CodeGenModule::EmitCXXGlobalInitFunc() { 862 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) 863 CXXGlobalInits.pop_back(); 864 865 // When we import C++20 modules, we must run their initializers first. 866 SmallVector<llvm::Function *, 8> ModuleInits; 867 if (CXX20ModuleInits) 868 for (Module *M : ImportedModules) { 869 // No Itanium initializer in header like modules. 870 if (M->isHeaderLikeModule()) 871 continue; 872 // We're allowed to skip the initialization if we are sure it doesn't 873 // do any thing. 874 if (!M->isNamedModuleInterfaceHasInit()) 875 continue; 876 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 877 SmallString<256> FnName; 878 { 879 llvm::raw_svector_ostream Out(FnName); 880 cast<ItaniumMangleContext>(getCXXABI().getMangleContext()) 881 .mangleModuleInitializer(M, Out); 882 } 883 assert(!GetGlobalValue(FnName.str()) && 884 "We should only have one use of the initializer call"); 885 llvm::Function *Fn = llvm::Function::Create( 886 FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule()); 887 ModuleInits.push_back(Fn); 888 } 889 890 if (ModuleInits.empty() && CXXGlobalInits.empty() && 891 PrioritizedCXXGlobalInits.empty()) 892 return; 893 894 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 895 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 896 897 // Create our global prioritized initialization function. 898 if (!PrioritizedCXXGlobalInits.empty()) { 899 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits; 900 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), 901 PrioritizedCXXGlobalInits.end()); 902 // Iterate over "chunks" of ctors with same priority and emit each chunk 903 // into separate function. Note - everything is sorted first by priority, 904 // second - by lex order, so we emit ctor functions in proper order. 905 for (SmallVectorImpl<GlobalInitData >::iterator 906 I = PrioritizedCXXGlobalInits.begin(), 907 E = PrioritizedCXXGlobalInits.end(); I != E; ) { 908 SmallVectorImpl<GlobalInitData >::iterator 909 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp()); 910 911 LocalCXXGlobalInits.clear(); 912 913 unsigned int Priority = I->first.priority; 914 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( 915 FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI); 916 917 // Prepend the module inits to the highest priority set. 918 if (!ModuleInits.empty()) { 919 for (auto *F : ModuleInits) 920 LocalCXXGlobalInits.push_back(F); 921 ModuleInits.clear(); 922 } 923 924 for (; I < PrioE; ++I) 925 LocalCXXGlobalInits.push_back(I->second); 926 927 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits); 928 AddGlobalCtor(Fn, Priority); 929 } 930 PrioritizedCXXGlobalInits.clear(); 931 } 932 933 if (getCXXABI().useSinitAndSterm() && ModuleInits.empty() && 934 CXXGlobalInits.empty()) 935 return; 936 937 for (auto *F : CXXGlobalInits) 938 ModuleInits.push_back(F); 939 CXXGlobalInits.clear(); 940 941 // Include the filename in the symbol name. Including "sub_" matches gcc 942 // and makes sure these symbols appear lexicographically behind the symbols 943 // with priority emitted above. Module implementation units behave the same 944 // way as a non-modular TU with imports. 945 llvm::Function *Fn; 946 if (CXX20ModuleInits && getContext().getCurrentNamedModule() && 947 !getContext().getCurrentNamedModule()->isModuleImplementation()) { 948 SmallString<256> InitFnName; 949 llvm::raw_svector_ostream Out(InitFnName); 950 cast<ItaniumMangleContext>(getCXXABI().getMangleContext()) 951 .mangleModuleInitializer(getContext().getCurrentNamedModule(), Out); 952 Fn = CreateGlobalInitOrCleanUpFunction( 953 FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false, 954 llvm::GlobalVariable::ExternalLinkage); 955 } else 956 Fn = CreateGlobalInitOrCleanUpFunction( 957 FTy, 958 llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())), 959 FI); 960 961 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits); 962 AddGlobalCtor(Fn); 963 964 // In OpenCL global init functions must be converted to kernels in order to 965 // be able to launch them from the host. 966 // FIXME: Some more work might be needed to handle destructors correctly. 967 // Current initialization function makes use of function pointers callbacks. 968 // We can't support function pointers especially between host and device. 969 // However it seems global destruction has little meaning without any 970 // dynamic resource allocation on the device and program scope variables are 971 // destroyed by the runtime when program is released. 972 if (getLangOpts().OpenCL) { 973 GenKernelArgMetadata(Fn); 974 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); 975 } 976 977 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice || 978 getLangOpts().GPUAllowDeviceInit); 979 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) { 980 if (getTriple().isSPIRV()) 981 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); 982 else 983 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL); 984 Fn->addFnAttr("device-init"); 985 } 986 987 ModuleInits.clear(); 988 } 989 990 void CodeGenModule::EmitCXXGlobalCleanUpFunc() { 991 if (CXXGlobalDtorsOrStermFinalizers.empty() && 992 PrioritizedCXXStermFinalizers.empty()) 993 return; 994 995 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 996 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 997 998 // Create our global prioritized cleanup function. 999 if (!PrioritizedCXXStermFinalizers.empty()) { 1000 SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers; 1001 llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(), 1002 PrioritizedCXXStermFinalizers.end()); 1003 // Iterate over "chunks" of dtors with same priority and emit each chunk 1004 // into separate function. Note - everything is sorted first by priority, 1005 // second - by lex order, so we emit dtor functions in proper order. 1006 for (SmallVectorImpl<StermFinalizerData>::iterator 1007 I = PrioritizedCXXStermFinalizers.begin(), 1008 E = PrioritizedCXXStermFinalizers.end(); 1009 I != E;) { 1010 SmallVectorImpl<StermFinalizerData>::iterator PrioE = 1011 std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp()); 1012 1013 LocalCXXStermFinalizers.clear(); 1014 1015 unsigned int Priority = I->first.priority; 1016 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( 1017 FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI); 1018 1019 for (; I < PrioE; ++I) { 1020 llvm::FunctionCallee DtorFn = I->second; 1021 LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(), 1022 DtorFn.getCallee(), nullptr); 1023 } 1024 1025 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc( 1026 Fn, LocalCXXStermFinalizers); 1027 AddGlobalDtor(Fn, Priority); 1028 } 1029 PrioritizedCXXStermFinalizers.clear(); 1030 } 1031 1032 if (CXXGlobalDtorsOrStermFinalizers.empty()) 1033 return; 1034 1035 // Create our global cleanup function. 1036 llvm::Function *Fn = 1037 CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI); 1038 1039 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc( 1040 Fn, CXXGlobalDtorsOrStermFinalizers); 1041 AddGlobalDtor(Fn); 1042 CXXGlobalDtorsOrStermFinalizers.clear(); 1043 } 1044 1045 /// Emit the code necessary to initialize the given global variable. 1046 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, 1047 const VarDecl *D, 1048 llvm::GlobalVariable *Addr, 1049 bool PerformInit) { 1050 // Check if we need to emit debug info for variable initializer. 1051 if (D->hasAttr<NoDebugAttr>()) 1052 DebugInfo = nullptr; // disable debug info indefinitely for this function 1053 1054 CurEHLocation = D->getBeginLoc(); 1055 1056 StartFunction(GlobalDecl(D, DynamicInitKind::Initializer), 1057 getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(), 1058 FunctionArgList()); 1059 // Emit an artificial location for this function. 1060 auto AL = ApplyDebugLocation::CreateArtificial(*this); 1061 1062 // Use guarded initialization if the global variable is weak. This 1063 // occurs for, e.g., instantiated static data members and 1064 // definitions explicitly marked weak. 1065 // 1066 // Also use guarded initialization for a variable with dynamic TLS and 1067 // unordered initialization. (If the initialization is ordered, the ABI 1068 // layer will guard the whole-TU initialization for us.) 1069 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() || 1070 (D->getTLSKind() == VarDecl::TLS_Dynamic && 1071 isTemplateInstantiation(D->getTemplateSpecializationKind()))) { 1072 EmitCXXGuardedInit(*D, Addr, PerformInit); 1073 } else { 1074 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit); 1075 } 1076 1077 FinishFunction(); 1078 } 1079 1080 void 1081 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, 1082 ArrayRef<llvm::Function *> Decls, 1083 ConstantAddress Guard) { 1084 { 1085 auto NL = ApplyDebugLocation::CreateEmpty(*this); 1086 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 1087 getTypes().arrangeNullaryFunction(), FunctionArgList()); 1088 // Emit an artificial location for this function. 1089 auto AL = ApplyDebugLocation::CreateArtificial(*this); 1090 1091 llvm::BasicBlock *ExitBlock = nullptr; 1092 if (Guard.isValid()) { 1093 // If we have a guard variable, check whether we've already performed 1094 // these initializations. This happens for TLS initialization functions. 1095 llvm::Value *GuardVal = Builder.CreateLoad(Guard); 1096 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal, 1097 "guard.uninitialized"); 1098 llvm::BasicBlock *InitBlock = createBasicBlock("init"); 1099 ExitBlock = createBasicBlock("exit"); 1100 EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock, 1101 GuardKind::TlsGuard, nullptr); 1102 EmitBlock(InitBlock); 1103 // Mark as initialized before initializing anything else. If the 1104 // initializers use previously-initialized thread_local vars, that's 1105 // probably supposed to be OK, but the standard doesn't say. 1106 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard); 1107 1108 // The guard variable can't ever change again. 1109 EmitInvariantStart( 1110 Guard.getPointer(), 1111 CharUnits::fromQuantity( 1112 CGM.getDataLayout().getTypeAllocSize(GuardVal->getType()))); 1113 } 1114 1115 RunCleanupsScope Scope(*this); 1116 1117 // When building in Objective-C++ ARC mode, create an autorelease pool 1118 // around the global initializers. 1119 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) { 1120 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 1121 EmitObjCAutoreleasePoolCleanup(token); 1122 } 1123 1124 for (llvm::Function *Decl : Decls) 1125 if (Decl) 1126 EmitRuntimeCall(Decl); 1127 1128 Scope.ForceCleanup(); 1129 1130 if (ExitBlock) { 1131 Builder.CreateBr(ExitBlock); 1132 EmitBlock(ExitBlock); 1133 } 1134 } 1135 1136 FinishFunction(); 1137 } 1138 1139 void CodeGenFunction::GenerateCXXGlobalCleanUpFunc( 1140 llvm::Function *Fn, 1141 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, 1142 llvm::Constant *>> 1143 DtorsOrStermFinalizers) { 1144 { 1145 auto NL = ApplyDebugLocation::CreateEmpty(*this); 1146 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 1147 getTypes().arrangeNullaryFunction(), FunctionArgList()); 1148 // Emit an artificial location for this function. 1149 auto AL = ApplyDebugLocation::CreateArtificial(*this); 1150 1151 // Emit the cleanups, in reverse order from construction. 1152 for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) { 1153 llvm::FunctionType *CalleeTy; 1154 llvm::Value *Callee; 1155 llvm::Constant *Arg; 1156 std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1]; 1157 1158 llvm::CallBase *CI = nullptr; 1159 if (Arg == nullptr) { 1160 assert( 1161 CGM.getCXXABI().useSinitAndSterm() && 1162 "Arg could not be nullptr unless using sinit and sterm functions."); 1163 CI = Builder.CreateCall(CalleeTy, Callee); 1164 } else { 1165 // If the object lives in a different address space, the `this` pointer 1166 // address space won't match the dtor `this` param. An addrspacecast is 1167 // required. 1168 assert(Arg->getType()->isPointerTy()); 1169 assert(CalleeTy->getParamType(0)->isPointerTy()); 1170 unsigned ActualAddrSpace = Arg->getType()->getPointerAddressSpace(); 1171 unsigned ExpectedAddrSpace = 1172 CalleeTy->getParamType(0)->getPointerAddressSpace(); 1173 if (ActualAddrSpace != ExpectedAddrSpace) { 1174 llvm::PointerType *PTy = 1175 llvm::PointerType::get(getLLVMContext(), ExpectedAddrSpace); 1176 Arg = llvm::ConstantExpr::getAddrSpaceCast(Arg, PTy); 1177 } 1178 CI = Builder.CreateCall(CalleeTy, Callee, Arg); 1179 } 1180 1181 // Make sure the call and the callee agree on calling convention. 1182 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 1183 CI->setCallingConv(F->getCallingConv()); 1184 1185 if (CGM.shouldEmitConvergenceTokens() && CI->isConvergent()) 1186 CI = addConvergenceControlToken(CI); 1187 } 1188 } 1189 1190 FinishFunction(); 1191 } 1192 1193 /// generateDestroyHelper - Generates a helper function which, when 1194 /// invoked, destroys the given object. The address of the object 1195 /// should be in global memory. 1196 llvm::Function *CodeGenFunction::generateDestroyHelper( 1197 Address addr, QualType type, Destroyer *destroyer, 1198 bool useEHCleanupForArray, const VarDecl *VD) { 1199 FunctionArgList args; 1200 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy, 1201 ImplicitParamKind::Other); 1202 args.push_back(&Dst); 1203 1204 const CGFunctionInfo &FI = 1205 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args); 1206 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); 1207 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction( 1208 FTy, "__cxx_global_array_dtor", FI, VD->getLocation()); 1209 1210 CurEHLocation = VD->getBeginLoc(); 1211 1212 StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor), 1213 getContext().VoidTy, fn, FI, args); 1214 // Emit an artificial location for this function. 1215 auto AL = ApplyDebugLocation::CreateArtificial(*this); 1216 1217 emitDestroy(addr, type, destroyer, useEHCleanupForArray); 1218 1219 FinishFunction(); 1220 1221 return fn; 1222 } 1223