1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements semantic analysis for CUDA constructs. 10 /// 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "clang/AST/Decl.h" 15 #include "clang/AST/ExprCXX.h" 16 #include "clang/Basic/Cuda.h" 17 #include "clang/Basic/TargetInfo.h" 18 #include "clang/Lex/Preprocessor.h" 19 #include "clang/Sema/Lookup.h" 20 #include "clang/Sema/ScopeInfo.h" 21 #include "clang/Sema/Sema.h" 22 #include "clang/Sema/SemaDiagnostic.h" 23 #include "clang/Sema/SemaInternal.h" 24 #include "clang/Sema/Template.h" 25 #include "llvm/ADT/Optional.h" 26 #include "llvm/ADT/SmallVector.h" 27 using namespace clang; 28 29 template <typename AttrT> static bool hasExplicitAttr(const VarDecl *D) { 30 if (!D) 31 return false; 32 if (auto *A = D->getAttr<AttrT>()) 33 return !A->isImplicit(); 34 return false; 35 } 36 37 void Sema::PushForceCUDAHostDevice() { 38 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 39 ForceCUDAHostDeviceDepth++; 40 } 41 42 bool Sema::PopForceCUDAHostDevice() { 43 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 44 if (ForceCUDAHostDeviceDepth == 0) 45 return false; 46 ForceCUDAHostDeviceDepth--; 47 return true; 48 } 49 50 ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, 51 MultiExprArg ExecConfig, 52 SourceLocation GGGLoc) { 53 FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl(); 54 if (!ConfigDecl) 55 return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use) 56 << getCudaConfigureFuncName()); 57 QualType ConfigQTy = ConfigDecl->getType(); 58 59 DeclRefExpr *ConfigDR = new (Context) 60 DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc); 61 MarkFunctionReferenced(LLLLoc, ConfigDecl); 62 63 return BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr, 64 /*IsExecConfig=*/true); 65 } 66 67 Sema::CUDAFunctionTarget 68 Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) { 69 bool HasHostAttr = false; 70 bool HasDeviceAttr = false; 71 bool HasGlobalAttr = false; 72 bool HasInvalidTargetAttr = false; 73 for (const ParsedAttr &AL : Attrs) { 74 switch (AL.getKind()) { 75 case ParsedAttr::AT_CUDAGlobal: 76 HasGlobalAttr = true; 77 break; 78 case ParsedAttr::AT_CUDAHost: 79 HasHostAttr = true; 80 break; 81 case ParsedAttr::AT_CUDADevice: 82 HasDeviceAttr = true; 83 break; 84 case ParsedAttr::AT_CUDAInvalidTarget: 85 HasInvalidTargetAttr = true; 86 break; 87 default: 88 break; 89 } 90 } 91 92 if (HasInvalidTargetAttr) 93 return CFT_InvalidTarget; 94 95 if (HasGlobalAttr) 96 return CFT_Global; 97 98 if (HasHostAttr && HasDeviceAttr) 99 return CFT_HostDevice; 100 101 if (HasDeviceAttr) 102 return CFT_Device; 103 104 return CFT_Host; 105 } 106 107 template <typename A> 108 static bool hasAttr(const FunctionDecl *D, bool IgnoreImplicitAttr) { 109 return D->hasAttrs() && llvm::any_of(D->getAttrs(), [&](Attr *Attribute) { 110 return isa<A>(Attribute) && 111 !(IgnoreImplicitAttr && Attribute->isImplicit()); 112 }); 113 } 114 115 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function 116 Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D, 117 bool IgnoreImplicitHDAttr) { 118 // Code that lives outside a function is run on the host. 119 if (D == nullptr) 120 return CFT_Host; 121 122 if (D->hasAttr<CUDAInvalidTargetAttr>()) 123 return CFT_InvalidTarget; 124 125 if (D->hasAttr<CUDAGlobalAttr>()) 126 return CFT_Global; 127 128 if (hasAttr<CUDADeviceAttr>(D, IgnoreImplicitHDAttr)) { 129 if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) 130 return CFT_HostDevice; 131 return CFT_Device; 132 } else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) { 133 return CFT_Host; 134 } else if ((D->isImplicit() || !D->isUserProvided()) && 135 !IgnoreImplicitHDAttr) { 136 // Some implicit declarations (like intrinsic functions) are not marked. 137 // Set the most lenient target on them for maximal flexibility. 138 return CFT_HostDevice; 139 } 140 141 return CFT_Host; 142 } 143 144 /// IdentifyTarget - Determine the CUDA compilation target for this variable. 145 Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) { 146 if (Var->hasAttr<HIPManagedAttr>()) 147 return CVT_Unified; 148 // Only constexpr and const variabless with implicit constant attribute 149 // are emitted on both sides. Such variables are promoted to device side 150 // only if they have static constant intializers on device side. 151 if ((Var->isConstexpr() || Var->getType().isConstQualified()) && 152 Var->hasAttr<CUDAConstantAttr>() && 153 !hasExplicitAttr<CUDAConstantAttr>(Var)) 154 return CVT_Both; 155 if (Var->hasAttr<CUDADeviceAttr>() || Var->hasAttr<CUDAConstantAttr>() || 156 Var->hasAttr<CUDASharedAttr>() || 157 Var->getType()->isCUDADeviceBuiltinSurfaceType() || 158 Var->getType()->isCUDADeviceBuiltinTextureType()) 159 return CVT_Device; 160 // Function-scope static variable without explicit device or constant 161 // attribute are emitted 162 // - on both sides in host device functions 163 // - on device side in device or global functions 164 if (auto *FD = dyn_cast<FunctionDecl>(Var->getDeclContext())) { 165 switch (IdentifyCUDATarget(FD)) { 166 case CFT_HostDevice: 167 return CVT_Both; 168 case CFT_Device: 169 case CFT_Global: 170 return CVT_Device; 171 default: 172 return CVT_Host; 173 } 174 } 175 return CVT_Host; 176 } 177 178 // * CUDA Call preference table 179 // 180 // F - from, 181 // T - to 182 // Ph - preference in host mode 183 // Pd - preference in device mode 184 // H - handled in (x) 185 // Preferences: N:native, SS:same side, HD:host-device, WS:wrong side, --:never. 186 // 187 // | F | T | Ph | Pd | H | 188 // |----+----+-----+-----+-----+ 189 // | d | d | N | N | (c) | 190 // | d | g | -- | -- | (a) | 191 // | d | h | -- | -- | (e) | 192 // | d | hd | HD | HD | (b) | 193 // | g | d | N | N | (c) | 194 // | g | g | -- | -- | (a) | 195 // | g | h | -- | -- | (e) | 196 // | g | hd | HD | HD | (b) | 197 // | h | d | -- | -- | (e) | 198 // | h | g | N | N | (c) | 199 // | h | h | N | N | (c) | 200 // | h | hd | HD | HD | (b) | 201 // | hd | d | WS | SS | (d) | 202 // | hd | g | SS | -- |(d/a)| 203 // | hd | h | SS | WS | (d) | 204 // | hd | hd | HD | HD | (b) | 205 206 Sema::CUDAFunctionPreference 207 Sema::IdentifyCUDAPreference(const FunctionDecl *Caller, 208 const FunctionDecl *Callee) { 209 assert(Callee && "Callee must be valid."); 210 CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller); 211 CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee); 212 213 // If one of the targets is invalid, the check always fails, no matter what 214 // the other target is. 215 if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget) 216 return CFP_Never; 217 218 // (a) Can't call global from some contexts until we support CUDA's 219 // dynamic parallelism. 220 if (CalleeTarget == CFT_Global && 221 (CallerTarget == CFT_Global || CallerTarget == CFT_Device)) 222 return CFP_Never; 223 224 // (b) Calling HostDevice is OK for everyone. 225 if (CalleeTarget == CFT_HostDevice) 226 return CFP_HostDevice; 227 228 // (c) Best case scenarios 229 if (CalleeTarget == CallerTarget || 230 (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) || 231 (CallerTarget == CFT_Global && CalleeTarget == CFT_Device)) 232 return CFP_Native; 233 234 // (d) HostDevice behavior depends on compilation mode. 235 if (CallerTarget == CFT_HostDevice) { 236 // It's OK to call a compilation-mode matching function from an HD one. 237 if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) || 238 (!getLangOpts().CUDAIsDevice && 239 (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))) 240 return CFP_SameSide; 241 242 // Calls from HD to non-mode-matching functions (i.e., to host functions 243 // when compiling in device mode or to device functions when compiling in 244 // host mode) are allowed at the sema level, but eventually rejected if 245 // they're ever codegened. TODO: Reject said calls earlier. 246 return CFP_WrongSide; 247 } 248 249 // (e) Calling across device/host boundary is not something you should do. 250 if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) || 251 (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) || 252 (CallerTarget == CFT_Global && CalleeTarget == CFT_Host)) 253 return CFP_Never; 254 255 llvm_unreachable("All cases should've been handled by now."); 256 } 257 258 template <typename AttrT> static bool hasImplicitAttr(const FunctionDecl *D) { 259 if (!D) 260 return false; 261 if (auto *A = D->getAttr<AttrT>()) 262 return A->isImplicit(); 263 return D->isImplicit(); 264 } 265 266 bool Sema::isCUDAImplicitHostDeviceFunction(const FunctionDecl *D) { 267 bool IsImplicitDevAttr = hasImplicitAttr<CUDADeviceAttr>(D); 268 bool IsImplicitHostAttr = hasImplicitAttr<CUDAHostAttr>(D); 269 return IsImplicitDevAttr && IsImplicitHostAttr; 270 } 271 272 void Sema::EraseUnwantedCUDAMatches( 273 const FunctionDecl *Caller, 274 SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) { 275 if (Matches.size() <= 1) 276 return; 277 278 using Pair = std::pair<DeclAccessPair, FunctionDecl*>; 279 280 // Gets the CUDA function preference for a call from Caller to Match. 281 auto GetCFP = [&](const Pair &Match) { 282 return IdentifyCUDAPreference(Caller, Match.second); 283 }; 284 285 // Find the best call preference among the functions in Matches. 286 CUDAFunctionPreference BestCFP = GetCFP(*std::max_element( 287 Matches.begin(), Matches.end(), 288 [&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); })); 289 290 // Erase all functions with lower priority. 291 llvm::erase_if(Matches, 292 [&](const Pair &Match) { return GetCFP(Match) < BestCFP; }); 293 } 294 295 /// When an implicitly-declared special member has to invoke more than one 296 /// base/field special member, conflicts may occur in the targets of these 297 /// members. For example, if one base's member __host__ and another's is 298 /// __device__, it's a conflict. 299 /// This function figures out if the given targets \param Target1 and 300 /// \param Target2 conflict, and if they do not it fills in 301 /// \param ResolvedTarget with a target that resolves for both calls. 302 /// \return true if there's a conflict, false otherwise. 303 static bool 304 resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1, 305 Sema::CUDAFunctionTarget Target2, 306 Sema::CUDAFunctionTarget *ResolvedTarget) { 307 // Only free functions and static member functions may be global. 308 assert(Target1 != Sema::CFT_Global); 309 assert(Target2 != Sema::CFT_Global); 310 311 if (Target1 == Sema::CFT_HostDevice) { 312 *ResolvedTarget = Target2; 313 } else if (Target2 == Sema::CFT_HostDevice) { 314 *ResolvedTarget = Target1; 315 } else if (Target1 != Target2) { 316 return true; 317 } else { 318 *ResolvedTarget = Target1; 319 } 320 321 return false; 322 } 323 324 bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, 325 CXXSpecialMember CSM, 326 CXXMethodDecl *MemberDecl, 327 bool ConstRHS, 328 bool Diagnose) { 329 // If the defaulted special member is defined lexically outside of its 330 // owning class, or the special member already has explicit device or host 331 // attributes, do not infer. 332 bool InClass = MemberDecl->getLexicalParent() == MemberDecl->getParent(); 333 bool HasH = MemberDecl->hasAttr<CUDAHostAttr>(); 334 bool HasD = MemberDecl->hasAttr<CUDADeviceAttr>(); 335 bool HasExplicitAttr = 336 (HasD && !MemberDecl->getAttr<CUDADeviceAttr>()->isImplicit()) || 337 (HasH && !MemberDecl->getAttr<CUDAHostAttr>()->isImplicit()); 338 if (!InClass || HasExplicitAttr) 339 return false; 340 341 llvm::Optional<CUDAFunctionTarget> InferredTarget; 342 343 // We're going to invoke special member lookup; mark that these special 344 // members are called from this one, and not from its caller. 345 ContextRAII MethodContext(*this, MemberDecl); 346 347 // Look for special members in base classes that should be invoked from here. 348 // Infer the target of this member base on the ones it should call. 349 // Skip direct and indirect virtual bases for abstract classes. 350 llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases; 351 for (const auto &B : ClassDecl->bases()) { 352 if (!B.isVirtual()) { 353 Bases.push_back(&B); 354 } 355 } 356 357 if (!ClassDecl->isAbstract()) { 358 llvm::append_range(Bases, llvm::make_pointer_range(ClassDecl->vbases())); 359 } 360 361 for (const auto *B : Bases) { 362 const RecordType *BaseType = B->getType()->getAs<RecordType>(); 363 if (!BaseType) { 364 continue; 365 } 366 367 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); 368 Sema::SpecialMemberOverloadResult SMOR = 369 LookupSpecialMember(BaseClassDecl, CSM, 370 /* ConstArg */ ConstRHS, 371 /* VolatileArg */ false, 372 /* RValueThis */ false, 373 /* ConstThis */ false, 374 /* VolatileThis */ false); 375 376 if (!SMOR.getMethod()) 377 continue; 378 379 CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod()); 380 if (!InferredTarget) { 381 InferredTarget = BaseMethodTarget; 382 } else { 383 bool ResolutionError = resolveCalleeCUDATargetConflict( 384 InferredTarget.value(), BaseMethodTarget, 385 InferredTarget.getPointer()); 386 if (ResolutionError) { 387 if (Diagnose) { 388 Diag(ClassDecl->getLocation(), 389 diag::note_implicit_member_target_infer_collision) 390 << (unsigned)CSM << InferredTarget.value() << BaseMethodTarget; 391 } 392 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); 393 return true; 394 } 395 } 396 } 397 398 // Same as for bases, but now for special members of fields. 399 for (const auto *F : ClassDecl->fields()) { 400 if (F->isInvalidDecl()) { 401 continue; 402 } 403 404 const RecordType *FieldType = 405 Context.getBaseElementType(F->getType())->getAs<RecordType>(); 406 if (!FieldType) { 407 continue; 408 } 409 410 CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl()); 411 Sema::SpecialMemberOverloadResult SMOR = 412 LookupSpecialMember(FieldRecDecl, CSM, 413 /* ConstArg */ ConstRHS && !F->isMutable(), 414 /* VolatileArg */ false, 415 /* RValueThis */ false, 416 /* ConstThis */ false, 417 /* VolatileThis */ false); 418 419 if (!SMOR.getMethod()) 420 continue; 421 422 CUDAFunctionTarget FieldMethodTarget = 423 IdentifyCUDATarget(SMOR.getMethod()); 424 if (!InferredTarget) { 425 InferredTarget = FieldMethodTarget; 426 } else { 427 bool ResolutionError = resolveCalleeCUDATargetConflict( 428 InferredTarget.value(), FieldMethodTarget, 429 InferredTarget.getPointer()); 430 if (ResolutionError) { 431 if (Diagnose) { 432 Diag(ClassDecl->getLocation(), 433 diag::note_implicit_member_target_infer_collision) 434 << (unsigned)CSM << InferredTarget.value() << FieldMethodTarget; 435 } 436 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); 437 return true; 438 } 439 } 440 } 441 442 443 // If no target was inferred, mark this member as __host__ __device__; 444 // it's the least restrictive option that can be invoked from any target. 445 bool NeedsH = true, NeedsD = true; 446 if (InferredTarget) { 447 if (InferredTarget.value() == CFT_Device) 448 NeedsH = false; 449 else if (InferredTarget.value() == CFT_Host) 450 NeedsD = false; 451 } 452 453 // We either setting attributes first time, or the inferred ones must match 454 // previously set ones. 455 if (NeedsD && !HasD) 456 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 457 if (NeedsH && !HasH) 458 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); 459 460 return false; 461 } 462 463 bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) { 464 if (!CD->isDefined() && CD->isTemplateInstantiation()) 465 InstantiateFunctionDefinition(Loc, CD->getFirstDecl()); 466 467 // (E.2.3.1, CUDA 7.5) A constructor for a class type is considered 468 // empty at a point in the translation unit, if it is either a 469 // trivial constructor 470 if (CD->isTrivial()) 471 return true; 472 473 // ... or it satisfies all of the following conditions: 474 // The constructor function has been defined. 475 // The constructor function has no parameters, 476 // and the function body is an empty compound statement. 477 if (!(CD->hasTrivialBody() && CD->getNumParams() == 0)) 478 return false; 479 480 // Its class has no virtual functions and no virtual base classes. 481 if (CD->getParent()->isDynamicClass()) 482 return false; 483 484 // Union ctor does not call ctors of its data members. 485 if (CD->getParent()->isUnion()) 486 return true; 487 488 // The only form of initializer allowed is an empty constructor. 489 // This will recursively check all base classes and member initializers 490 if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) { 491 if (const CXXConstructExpr *CE = 492 dyn_cast<CXXConstructExpr>(CI->getInit())) 493 return isEmptyCudaConstructor(Loc, CE->getConstructor()); 494 return false; 495 })) 496 return false; 497 498 return true; 499 } 500 501 bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) { 502 // No destructor -> no problem. 503 if (!DD) 504 return true; 505 506 if (!DD->isDefined() && DD->isTemplateInstantiation()) 507 InstantiateFunctionDefinition(Loc, DD->getFirstDecl()); 508 509 // (E.2.3.1, CUDA 7.5) A destructor for a class type is considered 510 // empty at a point in the translation unit, if it is either a 511 // trivial constructor 512 if (DD->isTrivial()) 513 return true; 514 515 // ... or it satisfies all of the following conditions: 516 // The destructor function has been defined. 517 // and the function body is an empty compound statement. 518 if (!DD->hasTrivialBody()) 519 return false; 520 521 const CXXRecordDecl *ClassDecl = DD->getParent(); 522 523 // Its class has no virtual functions and no virtual base classes. 524 if (ClassDecl->isDynamicClass()) 525 return false; 526 527 // Union does not have base class and union dtor does not call dtors of its 528 // data members. 529 if (DD->getParent()->isUnion()) 530 return true; 531 532 // Only empty destructors are allowed. This will recursively check 533 // destructors for all base classes... 534 if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) { 535 if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl()) 536 return isEmptyCudaDestructor(Loc, RD->getDestructor()); 537 return true; 538 })) 539 return false; 540 541 // ... and member fields. 542 if (!llvm::all_of(ClassDecl->fields(), [&](const FieldDecl *Field) { 543 if (CXXRecordDecl *RD = Field->getType() 544 ->getBaseElementTypeUnsafe() 545 ->getAsCXXRecordDecl()) 546 return isEmptyCudaDestructor(Loc, RD->getDestructor()); 547 return true; 548 })) 549 return false; 550 551 return true; 552 } 553 554 namespace { 555 enum CUDAInitializerCheckKind { 556 CICK_DeviceOrConstant, // Check initializer for device/constant variable 557 CICK_Shared, // Check initializer for shared variable 558 }; 559 560 bool IsDependentVar(VarDecl *VD) { 561 if (VD->getType()->isDependentType()) 562 return true; 563 if (const auto *Init = VD->getInit()) 564 return Init->isValueDependent(); 565 return false; 566 } 567 568 // Check whether a variable has an allowed initializer for a CUDA device side 569 // variable with global storage. \p VD may be a host variable to be checked for 570 // potential promotion to device side variable. 571 // 572 // CUDA/HIP allows only empty constructors as initializers for global 573 // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all 574 // __shared__ variables whether they are local or not (they all are implicitly 575 // static in CUDA). One exception is that CUDA allows constant initializers 576 // for __constant__ and __device__ variables. 577 bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD, 578 CUDAInitializerCheckKind CheckKind) { 579 assert(!VD->isInvalidDecl() && VD->hasGlobalStorage()); 580 assert(!IsDependentVar(VD) && "do not check dependent var"); 581 const Expr *Init = VD->getInit(); 582 auto IsEmptyInit = [&](const Expr *Init) { 583 if (!Init) 584 return true; 585 if (const auto *CE = dyn_cast<CXXConstructExpr>(Init)) { 586 return S.isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor()); 587 } 588 return false; 589 }; 590 auto IsConstantInit = [&](const Expr *Init) { 591 assert(Init); 592 ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.Context, 593 /*NoWronSidedVars=*/true); 594 return Init->isConstantInitializer(S.Context, 595 VD->getType()->isReferenceType()); 596 }; 597 auto HasEmptyDtor = [&](VarDecl *VD) { 598 if (const auto *RD = VD->getType()->getAsCXXRecordDecl()) 599 return S.isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor()); 600 return true; 601 }; 602 if (CheckKind == CICK_Shared) 603 return IsEmptyInit(Init) && HasEmptyDtor(VD); 604 return S.LangOpts.GPUAllowDeviceInit || 605 ((IsEmptyInit(Init) || IsConstantInit(Init)) && HasEmptyDtor(VD)); 606 } 607 } // namespace 608 609 void Sema::checkAllowedCUDAInitializer(VarDecl *VD) { 610 // Do not check dependent variables since the ctor/dtor/initializer are not 611 // determined. Do it after instantiation. 612 if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage() || 613 IsDependentVar(VD)) 614 return; 615 const Expr *Init = VD->getInit(); 616 bool IsSharedVar = VD->hasAttr<CUDASharedAttr>(); 617 bool IsDeviceOrConstantVar = 618 !IsSharedVar && 619 (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()); 620 if (IsDeviceOrConstantVar || IsSharedVar) { 621 if (HasAllowedCUDADeviceStaticInitializer( 622 *this, VD, IsSharedVar ? CICK_Shared : CICK_DeviceOrConstant)) 623 return; 624 Diag(VD->getLocation(), 625 IsSharedVar ? diag::err_shared_var_init : diag::err_dynamic_var_init) 626 << Init->getSourceRange(); 627 VD->setInvalidDecl(); 628 } else { 629 // This is a host-side global variable. Check that the initializer is 630 // callable from the host side. 631 const FunctionDecl *InitFn = nullptr; 632 if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) { 633 InitFn = CE->getConstructor(); 634 } else if (const CallExpr *CE = dyn_cast<CallExpr>(Init)) { 635 InitFn = CE->getDirectCallee(); 636 } 637 if (InitFn) { 638 CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn); 639 if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) { 640 Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer) 641 << InitFnTarget << InitFn; 642 Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn; 643 VD->setInvalidDecl(); 644 } 645 } 646 } 647 } 648 649 // With -fcuda-host-device-constexpr, an unattributed constexpr function is 650 // treated as implicitly __host__ __device__, unless: 651 // * it is a variadic function (device-side variadic functions are not 652 // allowed), or 653 // * a __device__ function with this signature was already declared, in which 654 // case in which case we output an error, unless the __device__ decl is in a 655 // system header, in which case we leave the constexpr function unattributed. 656 // 657 // In addition, all function decls are treated as __host__ __device__ when 658 // ForceCUDAHostDeviceDepth > 0 (corresponding to code within a 659 // #pragma clang force_cuda_host_device_begin/end 660 // pair). 661 void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD, 662 const LookupResult &Previous) { 663 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 664 665 if (ForceCUDAHostDeviceDepth > 0) { 666 if (!NewD->hasAttr<CUDAHostAttr>()) 667 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context)); 668 if (!NewD->hasAttr<CUDADeviceAttr>()) 669 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 670 return; 671 } 672 673 if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() || 674 NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() || 675 NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>()) 676 return; 677 678 // Is D a __device__ function with the same signature as NewD, ignoring CUDA 679 // attributes? 680 auto IsMatchingDeviceFn = [&](NamedDecl *D) { 681 if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(D)) 682 D = Using->getTargetDecl(); 683 FunctionDecl *OldD = D->getAsFunction(); 684 return OldD && OldD->hasAttr<CUDADeviceAttr>() && 685 !OldD->hasAttr<CUDAHostAttr>() && 686 !IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false, 687 /* ConsiderCudaAttrs = */ false); 688 }; 689 auto It = llvm::find_if(Previous, IsMatchingDeviceFn); 690 if (It != Previous.end()) { 691 // We found a __device__ function with the same name and signature as NewD 692 // (ignoring CUDA attrs). This is an error unless that function is defined 693 // in a system header, in which case we simply return without making NewD 694 // host+device. 695 NamedDecl *Match = *It; 696 if (!getSourceManager().isInSystemHeader(Match->getLocation())) { 697 Diag(NewD->getLocation(), 698 diag::err_cuda_unattributed_constexpr_cannot_overload_device) 699 << NewD; 700 Diag(Match->getLocation(), 701 diag::note_cuda_conflicting_device_function_declared_here); 702 } 703 return; 704 } 705 706 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context)); 707 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 708 } 709 710 // TODO: `__constant__` memory may be a limited resource for certain targets. 711 // A safeguard may be needed at the end of compilation pipeline if 712 // `__constant__` memory usage goes beyond limit. 713 void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) { 714 // Do not promote dependent variables since the cotr/dtor/initializer are 715 // not determined. Do it after instantiation. 716 if (getLangOpts().CUDAIsDevice && !VD->hasAttr<CUDAConstantAttr>() && 717 !VD->hasAttr<CUDASharedAttr>() && 718 (VD->isFileVarDecl() || VD->isStaticDataMember()) && 719 !IsDependentVar(VD) && 720 ((VD->isConstexpr() || VD->getType().isConstQualified()) && 721 HasAllowedCUDADeviceStaticInitializer(*this, VD, 722 CICK_DeviceOrConstant))) { 723 VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext())); 724 } 725 } 726 727 Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc, 728 unsigned DiagID) { 729 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 730 FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true); 731 SemaDiagnosticBuilder::Kind DiagKind = [&] { 732 if (!CurFunContext) 733 return SemaDiagnosticBuilder::K_Nop; 734 switch (CurrentCUDATarget()) { 735 case CFT_Global: 736 case CFT_Device: 737 return SemaDiagnosticBuilder::K_Immediate; 738 case CFT_HostDevice: 739 // An HD function counts as host code if we're compiling for host, and 740 // device code if we're compiling for device. Defer any errors in device 741 // mode until the function is known-emitted. 742 if (!getLangOpts().CUDAIsDevice) 743 return SemaDiagnosticBuilder::K_Nop; 744 if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID)) 745 return SemaDiagnosticBuilder::K_Immediate; 746 return (getEmissionStatus(CurFunContext) == 747 FunctionEmissionStatus::Emitted) 748 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack 749 : SemaDiagnosticBuilder::K_Deferred; 750 default: 751 return SemaDiagnosticBuilder::K_Nop; 752 } 753 }(); 754 return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this); 755 } 756 757 Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc, 758 unsigned DiagID) { 759 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 760 FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true); 761 SemaDiagnosticBuilder::Kind DiagKind = [&] { 762 if (!CurFunContext) 763 return SemaDiagnosticBuilder::K_Nop; 764 switch (CurrentCUDATarget()) { 765 case CFT_Host: 766 return SemaDiagnosticBuilder::K_Immediate; 767 case CFT_HostDevice: 768 // An HD function counts as host code if we're compiling for host, and 769 // device code if we're compiling for device. Defer any errors in device 770 // mode until the function is known-emitted. 771 if (getLangOpts().CUDAIsDevice) 772 return SemaDiagnosticBuilder::K_Nop; 773 if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID)) 774 return SemaDiagnosticBuilder::K_Immediate; 775 return (getEmissionStatus(CurFunContext) == 776 FunctionEmissionStatus::Emitted) 777 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack 778 : SemaDiagnosticBuilder::K_Deferred; 779 default: 780 return SemaDiagnosticBuilder::K_Nop; 781 } 782 }(); 783 return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this); 784 } 785 786 bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) { 787 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 788 assert(Callee && "Callee may not be null."); 789 790 auto &ExprEvalCtx = ExprEvalContexts.back(); 791 if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated()) 792 return true; 793 794 // FIXME: Is bailing out early correct here? Should we instead assume that 795 // the caller is a global initializer? 796 FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); 797 if (!Caller) 798 return true; 799 800 // If the caller is known-emitted, mark the callee as known-emitted. 801 // Otherwise, mark the call in our call graph so we can traverse it later. 802 bool CallerKnownEmitted = 803 getEmissionStatus(Caller) == FunctionEmissionStatus::Emitted; 804 SemaDiagnosticBuilder::Kind DiagKind = [this, Caller, Callee, 805 CallerKnownEmitted] { 806 switch (IdentifyCUDAPreference(Caller, Callee)) { 807 case CFP_Never: 808 case CFP_WrongSide: 809 assert(Caller && "Never/wrongSide calls require a non-null caller"); 810 // If we know the caller will be emitted, we know this wrong-side call 811 // will be emitted, so it's an immediate error. Otherwise, defer the 812 // error until we know the caller is emitted. 813 return CallerKnownEmitted 814 ? SemaDiagnosticBuilder::K_ImmediateWithCallStack 815 : SemaDiagnosticBuilder::K_Deferred; 816 default: 817 return SemaDiagnosticBuilder::K_Nop; 818 } 819 }(); 820 821 if (DiagKind == SemaDiagnosticBuilder::K_Nop) { 822 // For -fgpu-rdc, keep track of external kernels used by host functions. 823 if (LangOpts.CUDAIsDevice && LangOpts.GPURelocatableDeviceCode && 824 Callee->hasAttr<CUDAGlobalAttr>() && !Callee->isDefined()) 825 getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Callee); 826 return true; 827 } 828 829 // Avoid emitting this error twice for the same location. Using a hashtable 830 // like this is unfortunate, but because we must continue parsing as normal 831 // after encountering a deferred error, it's otherwise very tricky for us to 832 // ensure that we only emit this deferred error once. 833 if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second) 834 return true; 835 836 SemaDiagnosticBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this) 837 << IdentifyCUDATarget(Callee) << /*function*/ 0 << Callee 838 << IdentifyCUDATarget(Caller); 839 if (!Callee->getBuiltinID()) 840 SemaDiagnosticBuilder(DiagKind, Callee->getLocation(), 841 diag::note_previous_decl, Caller, *this) 842 << Callee; 843 return DiagKind != SemaDiagnosticBuilder::K_Immediate && 844 DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack; 845 } 846 847 // Check the wrong-sided reference capture of lambda for CUDA/HIP. 848 // A lambda function may capture a stack variable by reference when it is 849 // defined and uses the capture by reference when the lambda is called. When 850 // the capture and use happen on different sides, the capture is invalid and 851 // should be diagnosed. 852 void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee, 853 const sema::Capture &Capture) { 854 // In host compilation we only need to check lambda functions emitted on host 855 // side. In such lambda functions, a reference capture is invalid only 856 // if the lambda structure is populated by a device function or kernel then 857 // is passed to and called by a host function. However that is impossible, 858 // since a device function or kernel can only call a device function, also a 859 // kernel cannot pass a lambda back to a host function since we cannot 860 // define a kernel argument type which can hold the lambda before the lambda 861 // itself is defined. 862 if (!LangOpts.CUDAIsDevice) 863 return; 864 865 // File-scope lambda can only do init captures for global variables, which 866 // results in passing by value for these global variables. 867 FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); 868 if (!Caller) 869 return; 870 871 // In device compilation, we only need to check lambda functions which are 872 // emitted on device side. For such lambdas, a reference capture is invalid 873 // only if the lambda structure is populated by a host function then passed 874 // to and called in a device function or kernel. 875 bool CalleeIsDevice = Callee->hasAttr<CUDADeviceAttr>(); 876 bool CallerIsHost = 877 !Caller->hasAttr<CUDAGlobalAttr>() && !Caller->hasAttr<CUDADeviceAttr>(); 878 bool ShouldCheck = CalleeIsDevice && CallerIsHost; 879 if (!ShouldCheck || !Capture.isReferenceCapture()) 880 return; 881 auto DiagKind = SemaDiagnosticBuilder::K_Deferred; 882 if (Capture.isVariableCapture()) { 883 SemaDiagnosticBuilder(DiagKind, Capture.getLocation(), 884 diag::err_capture_bad_target, Callee, *this) 885 << Capture.getVariable(); 886 } else if (Capture.isThisCapture()) { 887 // Capture of this pointer is allowed since this pointer may be pointing to 888 // managed memory which is accessible on both device and host sides. It only 889 // results in invalid memory access if this pointer points to memory not 890 // accessible on device side. 891 SemaDiagnosticBuilder(DiagKind, Capture.getLocation(), 892 diag::warn_maybe_capture_bad_target_this_ptr, Callee, 893 *this); 894 } 895 } 896 897 void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) { 898 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 899 if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>()) 900 return; 901 Method->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 902 Method->addAttr(CUDAHostAttr::CreateImplicit(Context)); 903 } 904 905 void Sema::checkCUDATargetOverload(FunctionDecl *NewFD, 906 const LookupResult &Previous) { 907 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 908 CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD); 909 for (NamedDecl *OldND : Previous) { 910 FunctionDecl *OldFD = OldND->getAsFunction(); 911 if (!OldFD) 912 continue; 913 914 CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD); 915 // Don't allow HD and global functions to overload other functions with the 916 // same signature. We allow overloading based on CUDA attributes so that 917 // functions can have different implementations on the host and device, but 918 // HD/global functions "exist" in some sense on both the host and device, so 919 // should have the same implementation on both sides. 920 if (NewTarget != OldTarget && 921 ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) || 922 (NewTarget == CFT_Global) || (OldTarget == CFT_Global)) && 923 !IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false, 924 /* ConsiderCudaAttrs = */ false)) { 925 Diag(NewFD->getLocation(), diag::err_cuda_ovl_target) 926 << NewTarget << NewFD->getDeclName() << OldTarget << OldFD; 927 Diag(OldFD->getLocation(), diag::note_previous_declaration); 928 NewFD->setInvalidDecl(); 929 break; 930 } 931 } 932 } 933 934 template <typename AttrTy> 935 static void copyAttrIfPresent(Sema &S, FunctionDecl *FD, 936 const FunctionDecl &TemplateFD) { 937 if (AttrTy *Attribute = TemplateFD.getAttr<AttrTy>()) { 938 AttrTy *Clone = Attribute->clone(S.Context); 939 Clone->setInherited(true); 940 FD->addAttr(Clone); 941 } 942 } 943 944 void Sema::inheritCUDATargetAttrs(FunctionDecl *FD, 945 const FunctionTemplateDecl &TD) { 946 const FunctionDecl &TemplateFD = *TD.getTemplatedDecl(); 947 copyAttrIfPresent<CUDAGlobalAttr>(*this, FD, TemplateFD); 948 copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD); 949 copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD); 950 } 951 952 std::string Sema::getCudaConfigureFuncName() const { 953 if (getLangOpts().HIP) 954 return getLangOpts().HIPUseNewLaunchAPI ? "__hipPushCallConfiguration" 955 : "hipConfigureCall"; 956 957 // New CUDA kernel launch sequence. 958 if (CudaFeatureEnabled(Context.getTargetInfo().getSDKVersion(), 959 CudaFeature::CUDA_USES_NEW_LAUNCH)) 960 return "__cudaPushCallConfiguration"; 961 962 // Legacy CUDA kernel configuration call 963 return "cudaConfigureCall"; 964 } 965