1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements semantic analysis for CUDA constructs. 10 /// 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "clang/AST/Decl.h" 15 #include "clang/AST/ExprCXX.h" 16 #include "clang/Basic/Cuda.h" 17 #include "clang/Lex/Preprocessor.h" 18 #include "clang/Sema/Lookup.h" 19 #include "clang/Sema/Sema.h" 20 #include "clang/Sema/SemaDiagnostic.h" 21 #include "clang/Sema/SemaInternal.h" 22 #include "clang/Sema/Template.h" 23 #include "llvm/ADT/Optional.h" 24 #include "llvm/ADT/SmallVector.h" 25 using namespace clang; 26 27 void Sema::PushForceCUDAHostDevice() { 28 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 29 ForceCUDAHostDeviceDepth++; 30 } 31 32 bool Sema::PopForceCUDAHostDevice() { 33 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 34 if (ForceCUDAHostDeviceDepth == 0) 35 return false; 36 ForceCUDAHostDeviceDepth--; 37 return true; 38 } 39 40 ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, 41 MultiExprArg ExecConfig, 42 SourceLocation GGGLoc) { 43 FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl(); 44 if (!ConfigDecl) 45 return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use) 46 << getCudaConfigureFuncName()); 47 QualType ConfigQTy = ConfigDecl->getType(); 48 49 DeclRefExpr *ConfigDR = new (Context) 50 DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc); 51 MarkFunctionReferenced(LLLLoc, ConfigDecl); 52 53 return BuildCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr, 54 /*IsExecConfig=*/true); 55 } 56 57 Sema::CUDAFunctionTarget 58 Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) { 59 bool HasHostAttr = false; 60 bool HasDeviceAttr = false; 61 bool HasGlobalAttr = false; 62 bool HasInvalidTargetAttr = false; 63 for (const ParsedAttr &AL : Attrs) { 64 switch (AL.getKind()) { 65 case ParsedAttr::AT_CUDAGlobal: 66 HasGlobalAttr = true; 67 break; 68 case ParsedAttr::AT_CUDAHost: 69 HasHostAttr = true; 70 break; 71 case ParsedAttr::AT_CUDADevice: 72 HasDeviceAttr = true; 73 break; 74 case ParsedAttr::AT_CUDAInvalidTarget: 75 HasInvalidTargetAttr = true; 76 break; 77 default: 78 break; 79 } 80 } 81 82 if (HasInvalidTargetAttr) 83 return CFT_InvalidTarget; 84 85 if (HasGlobalAttr) 86 return CFT_Global; 87 88 if (HasHostAttr && HasDeviceAttr) 89 return CFT_HostDevice; 90 91 if (HasDeviceAttr) 92 return CFT_Device; 93 94 return CFT_Host; 95 } 96 97 template <typename A> 98 static bool hasAttr(const FunctionDecl *D, bool IgnoreImplicitAttr) { 99 return D->hasAttrs() && llvm::any_of(D->getAttrs(), [&](Attr *Attribute) { 100 return isa<A>(Attribute) && 101 !(IgnoreImplicitAttr && Attribute->isImplicit()); 102 }); 103 } 104 105 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function 106 Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D, 107 bool IgnoreImplicitHDAttr) { 108 // Code that lives outside a function is run on the host. 109 if (D == nullptr) 110 return CFT_Host; 111 112 if (D->hasAttr<CUDAInvalidTargetAttr>()) 113 return CFT_InvalidTarget; 114 115 if (D->hasAttr<CUDAGlobalAttr>()) 116 return CFT_Global; 117 118 if (hasAttr<CUDADeviceAttr>(D, IgnoreImplicitHDAttr)) { 119 if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) 120 return CFT_HostDevice; 121 return CFT_Device; 122 } else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) { 123 return CFT_Host; 124 } else if (D->isImplicit() && !IgnoreImplicitHDAttr) { 125 // Some implicit declarations (like intrinsic functions) are not marked. 126 // Set the most lenient target on them for maximal flexibility. 127 return CFT_HostDevice; 128 } 129 130 return CFT_Host; 131 } 132 133 // * CUDA Call preference table 134 // 135 // F - from, 136 // T - to 137 // Ph - preference in host mode 138 // Pd - preference in device mode 139 // H - handled in (x) 140 // Preferences: N:native, SS:same side, HD:host-device, WS:wrong side, --:never. 141 // 142 // | F | T | Ph | Pd | H | 143 // |----+----+-----+-----+-----+ 144 // | d | d | N | N | (c) | 145 // | d | g | -- | -- | (a) | 146 // | d | h | -- | -- | (e) | 147 // | d | hd | HD | HD | (b) | 148 // | g | d | N | N | (c) | 149 // | g | g | -- | -- | (a) | 150 // | g | h | -- | -- | (e) | 151 // | g | hd | HD | HD | (b) | 152 // | h | d | -- | -- | (e) | 153 // | h | g | N | N | (c) | 154 // | h | h | N | N | (c) | 155 // | h | hd | HD | HD | (b) | 156 // | hd | d | WS | SS | (d) | 157 // | hd | g | SS | -- |(d/a)| 158 // | hd | h | SS | WS | (d) | 159 // | hd | hd | HD | HD | (b) | 160 161 Sema::CUDAFunctionPreference 162 Sema::IdentifyCUDAPreference(const FunctionDecl *Caller, 163 const FunctionDecl *Callee) { 164 assert(Callee && "Callee must be valid."); 165 CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller); 166 CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee); 167 168 // If one of the targets is invalid, the check always fails, no matter what 169 // the other target is. 170 if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget) 171 return CFP_Never; 172 173 // (a) Can't call global from some contexts until we support CUDA's 174 // dynamic parallelism. 175 if (CalleeTarget == CFT_Global && 176 (CallerTarget == CFT_Global || CallerTarget == CFT_Device)) 177 return CFP_Never; 178 179 // (b) Calling HostDevice is OK for everyone. 180 if (CalleeTarget == CFT_HostDevice) 181 return CFP_HostDevice; 182 183 // (c) Best case scenarios 184 if (CalleeTarget == CallerTarget || 185 (CallerTarget == CFT_Host && CalleeTarget == CFT_Global) || 186 (CallerTarget == CFT_Global && CalleeTarget == CFT_Device)) 187 return CFP_Native; 188 189 // (d) HostDevice behavior depends on compilation mode. 190 if (CallerTarget == CFT_HostDevice) { 191 // It's OK to call a compilation-mode matching function from an HD one. 192 if ((getLangOpts().CUDAIsDevice && CalleeTarget == CFT_Device) || 193 (!getLangOpts().CUDAIsDevice && 194 (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))) 195 return CFP_SameSide; 196 197 // Calls from HD to non-mode-matching functions (i.e., to host functions 198 // when compiling in device mode or to device functions when compiling in 199 // host mode) are allowed at the sema level, but eventually rejected if 200 // they're ever codegened. TODO: Reject said calls earlier. 201 return CFP_WrongSide; 202 } 203 204 // (e) Calling across device/host boundary is not something you should do. 205 if ((CallerTarget == CFT_Host && CalleeTarget == CFT_Device) || 206 (CallerTarget == CFT_Device && CalleeTarget == CFT_Host) || 207 (CallerTarget == CFT_Global && CalleeTarget == CFT_Host)) 208 return CFP_Never; 209 210 llvm_unreachable("All cases should've been handled by now."); 211 } 212 213 void Sema::EraseUnwantedCUDAMatches( 214 const FunctionDecl *Caller, 215 SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) { 216 if (Matches.size() <= 1) 217 return; 218 219 using Pair = std::pair<DeclAccessPair, FunctionDecl*>; 220 221 // Gets the CUDA function preference for a call from Caller to Match. 222 auto GetCFP = [&](const Pair &Match) { 223 return IdentifyCUDAPreference(Caller, Match.second); 224 }; 225 226 // Find the best call preference among the functions in Matches. 227 CUDAFunctionPreference BestCFP = GetCFP(*std::max_element( 228 Matches.begin(), Matches.end(), 229 [&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); })); 230 231 // Erase all functions with lower priority. 232 llvm::erase_if(Matches, 233 [&](const Pair &Match) { return GetCFP(Match) < BestCFP; }); 234 } 235 236 /// When an implicitly-declared special member has to invoke more than one 237 /// base/field special member, conflicts may occur in the targets of these 238 /// members. For example, if one base's member __host__ and another's is 239 /// __device__, it's a conflict. 240 /// This function figures out if the given targets \param Target1 and 241 /// \param Target2 conflict, and if they do not it fills in 242 /// \param ResolvedTarget with a target that resolves for both calls. 243 /// \return true if there's a conflict, false otherwise. 244 static bool 245 resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1, 246 Sema::CUDAFunctionTarget Target2, 247 Sema::CUDAFunctionTarget *ResolvedTarget) { 248 // Only free functions and static member functions may be global. 249 assert(Target1 != Sema::CFT_Global); 250 assert(Target2 != Sema::CFT_Global); 251 252 if (Target1 == Sema::CFT_HostDevice) { 253 *ResolvedTarget = Target2; 254 } else if (Target2 == Sema::CFT_HostDevice) { 255 *ResolvedTarget = Target1; 256 } else if (Target1 != Target2) { 257 return true; 258 } else { 259 *ResolvedTarget = Target1; 260 } 261 262 return false; 263 } 264 265 bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, 266 CXXSpecialMember CSM, 267 CXXMethodDecl *MemberDecl, 268 bool ConstRHS, 269 bool Diagnose) { 270 llvm::Optional<CUDAFunctionTarget> InferredTarget; 271 272 // We're going to invoke special member lookup; mark that these special 273 // members are called from this one, and not from its caller. 274 ContextRAII MethodContext(*this, MemberDecl); 275 276 // Look for special members in base classes that should be invoked from here. 277 // Infer the target of this member base on the ones it should call. 278 // Skip direct and indirect virtual bases for abstract classes. 279 llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases; 280 for (const auto &B : ClassDecl->bases()) { 281 if (!B.isVirtual()) { 282 Bases.push_back(&B); 283 } 284 } 285 286 if (!ClassDecl->isAbstract()) { 287 for (const auto &VB : ClassDecl->vbases()) { 288 Bases.push_back(&VB); 289 } 290 } 291 292 for (const auto *B : Bases) { 293 const RecordType *BaseType = B->getType()->getAs<RecordType>(); 294 if (!BaseType) { 295 continue; 296 } 297 298 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); 299 Sema::SpecialMemberOverloadResult SMOR = 300 LookupSpecialMember(BaseClassDecl, CSM, 301 /* ConstArg */ ConstRHS, 302 /* VolatileArg */ false, 303 /* RValueThis */ false, 304 /* ConstThis */ false, 305 /* VolatileThis */ false); 306 307 if (!SMOR.getMethod()) 308 continue; 309 310 CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod()); 311 if (!InferredTarget.hasValue()) { 312 InferredTarget = BaseMethodTarget; 313 } else { 314 bool ResolutionError = resolveCalleeCUDATargetConflict( 315 InferredTarget.getValue(), BaseMethodTarget, 316 InferredTarget.getPointer()); 317 if (ResolutionError) { 318 if (Diagnose) { 319 Diag(ClassDecl->getLocation(), 320 diag::note_implicit_member_target_infer_collision) 321 << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget; 322 } 323 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); 324 return true; 325 } 326 } 327 } 328 329 // Same as for bases, but now for special members of fields. 330 for (const auto *F : ClassDecl->fields()) { 331 if (F->isInvalidDecl()) { 332 continue; 333 } 334 335 const RecordType *FieldType = 336 Context.getBaseElementType(F->getType())->getAs<RecordType>(); 337 if (!FieldType) { 338 continue; 339 } 340 341 CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl()); 342 Sema::SpecialMemberOverloadResult SMOR = 343 LookupSpecialMember(FieldRecDecl, CSM, 344 /* ConstArg */ ConstRHS && !F->isMutable(), 345 /* VolatileArg */ false, 346 /* RValueThis */ false, 347 /* ConstThis */ false, 348 /* VolatileThis */ false); 349 350 if (!SMOR.getMethod()) 351 continue; 352 353 CUDAFunctionTarget FieldMethodTarget = 354 IdentifyCUDATarget(SMOR.getMethod()); 355 if (!InferredTarget.hasValue()) { 356 InferredTarget = FieldMethodTarget; 357 } else { 358 bool ResolutionError = resolveCalleeCUDATargetConflict( 359 InferredTarget.getValue(), FieldMethodTarget, 360 InferredTarget.getPointer()); 361 if (ResolutionError) { 362 if (Diagnose) { 363 Diag(ClassDecl->getLocation(), 364 diag::note_implicit_member_target_infer_collision) 365 << (unsigned)CSM << InferredTarget.getValue() 366 << FieldMethodTarget; 367 } 368 MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); 369 return true; 370 } 371 } 372 } 373 374 if (InferredTarget.hasValue()) { 375 if (InferredTarget.getValue() == CFT_Device) { 376 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 377 } else if (InferredTarget.getValue() == CFT_Host) { 378 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); 379 } else { 380 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 381 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); 382 } 383 } else { 384 // If no target was inferred, mark this member as __host__ __device__; 385 // it's the least restrictive option that can be invoked from any target. 386 MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 387 MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); 388 } 389 390 return false; 391 } 392 393 bool Sema::isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD) { 394 if (!CD->isDefined() && CD->isTemplateInstantiation()) 395 InstantiateFunctionDefinition(Loc, CD->getFirstDecl()); 396 397 // (E.2.3.1, CUDA 7.5) A constructor for a class type is considered 398 // empty at a point in the translation unit, if it is either a 399 // trivial constructor 400 if (CD->isTrivial()) 401 return true; 402 403 // ... or it satisfies all of the following conditions: 404 // The constructor function has been defined. 405 // The constructor function has no parameters, 406 // and the function body is an empty compound statement. 407 if (!(CD->hasTrivialBody() && CD->getNumParams() == 0)) 408 return false; 409 410 // Its class has no virtual functions and no virtual base classes. 411 if (CD->getParent()->isDynamicClass()) 412 return false; 413 414 // The only form of initializer allowed is an empty constructor. 415 // This will recursively check all base classes and member initializers 416 if (!llvm::all_of(CD->inits(), [&](const CXXCtorInitializer *CI) { 417 if (const CXXConstructExpr *CE = 418 dyn_cast<CXXConstructExpr>(CI->getInit())) 419 return isEmptyCudaConstructor(Loc, CE->getConstructor()); 420 return false; 421 })) 422 return false; 423 424 return true; 425 } 426 427 bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) { 428 // No destructor -> no problem. 429 if (!DD) 430 return true; 431 432 if (!DD->isDefined() && DD->isTemplateInstantiation()) 433 InstantiateFunctionDefinition(Loc, DD->getFirstDecl()); 434 435 // (E.2.3.1, CUDA 7.5) A destructor for a class type is considered 436 // empty at a point in the translation unit, if it is either a 437 // trivial constructor 438 if (DD->isTrivial()) 439 return true; 440 441 // ... or it satisfies all of the following conditions: 442 // The destructor function has been defined. 443 // and the function body is an empty compound statement. 444 if (!DD->hasTrivialBody()) 445 return false; 446 447 const CXXRecordDecl *ClassDecl = DD->getParent(); 448 449 // Its class has no virtual functions and no virtual base classes. 450 if (ClassDecl->isDynamicClass()) 451 return false; 452 453 // Only empty destructors are allowed. This will recursively check 454 // destructors for all base classes... 455 if (!llvm::all_of(ClassDecl->bases(), [&](const CXXBaseSpecifier &BS) { 456 if (CXXRecordDecl *RD = BS.getType()->getAsCXXRecordDecl()) 457 return isEmptyCudaDestructor(Loc, RD->getDestructor()); 458 return true; 459 })) 460 return false; 461 462 // ... and member fields. 463 if (!llvm::all_of(ClassDecl->fields(), [&](const FieldDecl *Field) { 464 if (CXXRecordDecl *RD = Field->getType() 465 ->getBaseElementTypeUnsafe() 466 ->getAsCXXRecordDecl()) 467 return isEmptyCudaDestructor(Loc, RD->getDestructor()); 468 return true; 469 })) 470 return false; 471 472 return true; 473 } 474 475 void Sema::checkAllowedCUDAInitializer(VarDecl *VD) { 476 if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage()) 477 return; 478 const Expr *Init = VD->getInit(); 479 if (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>() || 480 VD->hasAttr<CUDASharedAttr>()) { 481 assert(!VD->isStaticLocal() || VD->hasAttr<CUDASharedAttr>()); 482 bool AllowedInit = false; 483 if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) 484 AllowedInit = 485 isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor()); 486 // We'll allow constant initializers even if it's a non-empty 487 // constructor according to CUDA rules. This deviates from NVCC, 488 // but allows us to handle things like constexpr constructors. 489 if (!AllowedInit && 490 (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>())) 491 AllowedInit = VD->getInit()->isConstantInitializer( 492 Context, VD->getType()->isReferenceType()); 493 494 // Also make sure that destructor, if there is one, is empty. 495 if (AllowedInit) 496 if (CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl()) 497 AllowedInit = 498 isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor()); 499 500 if (!AllowedInit) { 501 Diag(VD->getLocation(), VD->hasAttr<CUDASharedAttr>() 502 ? diag::err_shared_var_init 503 : diag::err_dynamic_var_init) 504 << Init->getSourceRange(); 505 VD->setInvalidDecl(); 506 } 507 } else { 508 // This is a host-side global variable. Check that the initializer is 509 // callable from the host side. 510 const FunctionDecl *InitFn = nullptr; 511 if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) { 512 InitFn = CE->getConstructor(); 513 } else if (const CallExpr *CE = dyn_cast<CallExpr>(Init)) { 514 InitFn = CE->getDirectCallee(); 515 } 516 if (InitFn) { 517 CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn); 518 if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) { 519 Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer) 520 << InitFnTarget << InitFn; 521 Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn; 522 VD->setInvalidDecl(); 523 } 524 } 525 } 526 } 527 528 // With -fcuda-host-device-constexpr, an unattributed constexpr function is 529 // treated as implicitly __host__ __device__, unless: 530 // * it is a variadic function (device-side variadic functions are not 531 // allowed), or 532 // * a __device__ function with this signature was already declared, in which 533 // case in which case we output an error, unless the __device__ decl is in a 534 // system header, in which case we leave the constexpr function unattributed. 535 // 536 // In addition, all function decls are treated as __host__ __device__ when 537 // ForceCUDAHostDeviceDepth > 0 (corresponding to code within a 538 // #pragma clang force_cuda_host_device_begin/end 539 // pair). 540 void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD, 541 const LookupResult &Previous) { 542 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 543 544 if (ForceCUDAHostDeviceDepth > 0) { 545 if (!NewD->hasAttr<CUDAHostAttr>()) 546 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context)); 547 if (!NewD->hasAttr<CUDADeviceAttr>()) 548 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 549 return; 550 } 551 552 if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() || 553 NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() || 554 NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>()) 555 return; 556 557 // Is D a __device__ function with the same signature as NewD, ignoring CUDA 558 // attributes? 559 auto IsMatchingDeviceFn = [&](NamedDecl *D) { 560 if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(D)) 561 D = Using->getTargetDecl(); 562 FunctionDecl *OldD = D->getAsFunction(); 563 return OldD && OldD->hasAttr<CUDADeviceAttr>() && 564 !OldD->hasAttr<CUDAHostAttr>() && 565 !IsOverload(NewD, OldD, /* UseMemberUsingDeclRules = */ false, 566 /* ConsiderCudaAttrs = */ false); 567 }; 568 auto It = llvm::find_if(Previous, IsMatchingDeviceFn); 569 if (It != Previous.end()) { 570 // We found a __device__ function with the same name and signature as NewD 571 // (ignoring CUDA attrs). This is an error unless that function is defined 572 // in a system header, in which case we simply return without making NewD 573 // host+device. 574 NamedDecl *Match = *It; 575 if (!getSourceManager().isInSystemHeader(Match->getLocation())) { 576 Diag(NewD->getLocation(), 577 diag::err_cuda_unattributed_constexpr_cannot_overload_device) 578 << NewD; 579 Diag(Match->getLocation(), 580 diag::note_cuda_conflicting_device_function_declared_here); 581 } 582 return; 583 } 584 585 NewD->addAttr(CUDAHostAttr::CreateImplicit(Context)); 586 NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 587 } 588 589 // Do we know that we will eventually codegen the given function? 590 static bool IsKnownEmitted(Sema &S, FunctionDecl *FD) { 591 // Templates are emitted when they're instantiated. 592 if (FD->isDependentContext()) 593 return false; 594 595 // When compiling for device, host functions are never emitted. Similarly, 596 // when compiling for host, device and global functions are never emitted. 597 // (Technically, we do emit a host-side stub for global functions, but this 598 // doesn't count for our purposes here.) 599 Sema::CUDAFunctionTarget T = S.IdentifyCUDATarget(FD); 600 if (S.getLangOpts().CUDAIsDevice && T == Sema::CFT_Host) 601 return false; 602 if (!S.getLangOpts().CUDAIsDevice && 603 (T == Sema::CFT_Device || T == Sema::CFT_Global)) 604 return false; 605 606 // Check whether this function is externally visible -- if so, it's 607 // known-emitted. 608 // 609 // We have to check the GVA linkage of the function's *definition* -- if we 610 // only have a declaration, we don't know whether or not the function will be 611 // emitted, because (say) the definition could include "inline". 612 FunctionDecl *Def = FD->getDefinition(); 613 614 if (Def && 615 !isDiscardableGVALinkage(S.getASTContext().GetGVALinkageForFunction(Def))) 616 return true; 617 618 // Otherwise, the function is known-emitted if it's in our set of 619 // known-emitted functions. 620 return S.DeviceKnownEmittedFns.count(FD) > 0; 621 } 622 623 Sema::DeviceDiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc, 624 unsigned DiagID) { 625 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 626 DeviceDiagBuilder::Kind DiagKind = [this] { 627 switch (CurrentCUDATarget()) { 628 case CFT_Global: 629 case CFT_Device: 630 return DeviceDiagBuilder::K_Immediate; 631 case CFT_HostDevice: 632 // An HD function counts as host code if we're compiling for host, and 633 // device code if we're compiling for device. Defer any errors in device 634 // mode until the function is known-emitted. 635 if (getLangOpts().CUDAIsDevice) { 636 return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext)) 637 ? DeviceDiagBuilder::K_ImmediateWithCallStack 638 : DeviceDiagBuilder::K_Deferred; 639 } 640 return DeviceDiagBuilder::K_Nop; 641 642 default: 643 return DeviceDiagBuilder::K_Nop; 644 } 645 }(); 646 return DeviceDiagBuilder(DiagKind, Loc, DiagID, 647 dyn_cast<FunctionDecl>(CurContext), *this); 648 } 649 650 Sema::DeviceDiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc, 651 unsigned DiagID) { 652 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 653 DeviceDiagBuilder::Kind DiagKind = [this] { 654 switch (CurrentCUDATarget()) { 655 case CFT_Host: 656 return DeviceDiagBuilder::K_Immediate; 657 case CFT_HostDevice: 658 // An HD function counts as host code if we're compiling for host, and 659 // device code if we're compiling for device. Defer any errors in device 660 // mode until the function is known-emitted. 661 if (getLangOpts().CUDAIsDevice) 662 return DeviceDiagBuilder::K_Nop; 663 664 return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext)) 665 ? DeviceDiagBuilder::K_ImmediateWithCallStack 666 : DeviceDiagBuilder::K_Deferred; 667 default: 668 return DeviceDiagBuilder::K_Nop; 669 } 670 }(); 671 return DeviceDiagBuilder(DiagKind, Loc, DiagID, 672 dyn_cast<FunctionDecl>(CurContext), *this); 673 } 674 675 bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) { 676 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 677 assert(Callee && "Callee may not be null."); 678 679 auto &ExprEvalCtx = ExprEvalContexts.back(); 680 if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated()) 681 return true; 682 683 // FIXME: Is bailing out early correct here? Should we instead assume that 684 // the caller is a global initializer? 685 FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext); 686 if (!Caller) 687 return true; 688 689 // If the caller is known-emitted, mark the callee as known-emitted. 690 // Otherwise, mark the call in our call graph so we can traverse it later. 691 bool CallerKnownEmitted = IsKnownEmitted(*this, Caller); 692 if (CallerKnownEmitted) { 693 // Host-side references to a __global__ function refer to the stub, so the 694 // function itself is never emitted and therefore should not be marked. 695 if (getLangOpts().CUDAIsDevice || IdentifyCUDATarget(Callee) != CFT_Global) 696 markKnownEmitted(*this, Caller, Callee, Loc, IsKnownEmitted); 697 } else { 698 // If we have 699 // host fn calls kernel fn calls host+device, 700 // the HD function does not get instantiated on the host. We model this by 701 // omitting at the call to the kernel from the callgraph. This ensures 702 // that, when compiling for host, only HD functions actually called from the 703 // host get marked as known-emitted. 704 if (getLangOpts().CUDAIsDevice || IdentifyCUDATarget(Callee) != CFT_Global) 705 DeviceCallGraph[Caller].insert({Callee, Loc}); 706 } 707 708 DeviceDiagBuilder::Kind DiagKind = [this, Caller, Callee, 709 CallerKnownEmitted] { 710 switch (IdentifyCUDAPreference(Caller, Callee)) { 711 case CFP_Never: 712 return DeviceDiagBuilder::K_Immediate; 713 case CFP_WrongSide: 714 assert(Caller && "WrongSide calls require a non-null caller"); 715 // If we know the caller will be emitted, we know this wrong-side call 716 // will be emitted, so it's an immediate error. Otherwise, defer the 717 // error until we know the caller is emitted. 718 return CallerKnownEmitted ? DeviceDiagBuilder::K_ImmediateWithCallStack 719 : DeviceDiagBuilder::K_Deferred; 720 default: 721 return DeviceDiagBuilder::K_Nop; 722 } 723 }(); 724 725 if (DiagKind == DeviceDiagBuilder::K_Nop) 726 return true; 727 728 // Avoid emitting this error twice for the same location. Using a hashtable 729 // like this is unfortunate, but because we must continue parsing as normal 730 // after encountering a deferred error, it's otherwise very tricky for us to 731 // ensure that we only emit this deferred error once. 732 if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second) 733 return true; 734 735 DeviceDiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this) 736 << IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller); 737 DeviceDiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl, 738 Caller, *this) 739 << Callee; 740 return DiagKind != DeviceDiagBuilder::K_Immediate && 741 DiagKind != DeviceDiagBuilder::K_ImmediateWithCallStack; 742 } 743 744 void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) { 745 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 746 if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>()) 747 return; 748 FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext); 749 if (!CurFn) 750 return; 751 CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn); 752 if (Target == CFT_Global || Target == CFT_Device) { 753 Method->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 754 } else if (Target == CFT_HostDevice) { 755 Method->addAttr(CUDADeviceAttr::CreateImplicit(Context)); 756 Method->addAttr(CUDAHostAttr::CreateImplicit(Context)); 757 } 758 } 759 760 void Sema::checkCUDATargetOverload(FunctionDecl *NewFD, 761 const LookupResult &Previous) { 762 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 763 CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD); 764 for (NamedDecl *OldND : Previous) { 765 FunctionDecl *OldFD = OldND->getAsFunction(); 766 if (!OldFD) 767 continue; 768 769 CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD); 770 // Don't allow HD and global functions to overload other functions with the 771 // same signature. We allow overloading based on CUDA attributes so that 772 // functions can have different implementations on the host and device, but 773 // HD/global functions "exist" in some sense on both the host and device, so 774 // should have the same implementation on both sides. 775 if (NewTarget != OldTarget && 776 ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) || 777 (NewTarget == CFT_Global) || (OldTarget == CFT_Global)) && 778 !IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false, 779 /* ConsiderCudaAttrs = */ false)) { 780 Diag(NewFD->getLocation(), diag::err_cuda_ovl_target) 781 << NewTarget << NewFD->getDeclName() << OldTarget << OldFD; 782 Diag(OldFD->getLocation(), diag::note_previous_declaration); 783 NewFD->setInvalidDecl(); 784 break; 785 } 786 } 787 } 788 789 template <typename AttrTy> 790 static void copyAttrIfPresent(Sema &S, FunctionDecl *FD, 791 const FunctionDecl &TemplateFD) { 792 if (AttrTy *Attribute = TemplateFD.getAttr<AttrTy>()) { 793 AttrTy *Clone = Attribute->clone(S.Context); 794 Clone->setInherited(true); 795 FD->addAttr(Clone); 796 } 797 } 798 799 void Sema::inheritCUDATargetAttrs(FunctionDecl *FD, 800 const FunctionTemplateDecl &TD) { 801 const FunctionDecl &TemplateFD = *TD.getTemplatedDecl(); 802 copyAttrIfPresent<CUDAGlobalAttr>(*this, FD, TemplateFD); 803 copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD); 804 copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD); 805 } 806 807 std::string Sema::getCudaConfigureFuncName() const { 808 if (getLangOpts().HIP) 809 return "hipConfigureCall"; 810 811 // New CUDA kernel launch sequence. 812 if (CudaFeatureEnabled(Context.getTargetInfo().getSDKVersion(), 813 CudaFeature::CUDA_USES_NEW_LAUNCH)) 814 return "__cudaPushCallConfiguration"; 815 816 // Legacy CUDA kernel configuration call 817 return "cudaConfigureCall"; 818 } 819