Home
last modified time | relevance | path

Searched refs:CUDA (Results 1 – 25 of 90) sorted by relevance

1234

/freebsd/contrib/llvm-project/clang/lib/Sema/
H A DSemaBase.cpp64 bool ShouldDefer = getLangOpts().CUDA && getLangOpts().GPUDeferDiag && in Diag()
78 ? SemaRef.CUDA().DiagIfDeviceCode(Loc, DiagID) in Diag()
79 : SemaRef.CUDA().DiagIfHostCode(Loc, DiagID); in Diag()
H A DSemaCUDA.cpp42 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in PushForceHostDevice()
47 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in PopForceHostDevice()
742 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in maybeAddHostDeviceAttrs()
821 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in DiagIfDeviceCode()
853 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in DiagIfHostCode()
883 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in CheckCall()
998 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in SetLambdaAttrs()
1007 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in checkTargetOverload()
H A DSema.cpp1689 auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(FD); in emitCallStackNotes()
1690 while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) { in emitCallStackNotes()
1697 FnIt = S.CUDA().DeviceKnownEmittedFns.find(FnIt->second.FD); in emitCallStackNotes()
1801 S.CUDA().DeviceKnownEmittedFns[FD] = {Caller, Loc}; in checkFunc()
1953 if (getLangOpts().CUDA) in targetDiag()
1954 return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID) in targetDiag()
1955 : CUDA().DiagIfHostCode(Loc, DiagID); in targetDiag()
H A DSemaLambda.cpp483 if (!MCtx && (getLangOpts().CUDA || getLangOpts().SYCLIsDevice || in handleLambdaNumbering()
1453 if (getLangOpts().CUDA) in ActOnStartOfLambdaDefinition()
1454 CUDA().SetLambdaAttrs(Method); in ActOnStartOfLambdaDefinition()
2196 if (LangOpts.CUDA) in BuildLambdaExpr()
2197 CUDA().CheckLambdaCapture(CallOperator, From); in BuildLambdaExpr()
/freebsd/contrib/llvm-project/clang/lib/Frontend/
H A DFrontendOptions.cpp22 .Case("cui", InputKind(Language::CUDA).getPreprocessed()) in getInputKindForExtension()
33 .Cases("cu", "cuh", Language::CUDA) in getInputKindForExtension()
/freebsd/lib/clang/headers/
H A DMakefile206 INCSGROUPS+= CUDA
208 CUDA+= cuda_wrappers/algorithm
209 CUDA+= cuda_wrappers/cmath
210 CUDA+= cuda_wrappers/complex
211 CUDA+= cuda_wrappers/new
/freebsd/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/
H A Dcmath1 /*===---- cmath - CUDA wrapper for <cmath> ---------------------------------===
31 // libc++ will need long double variants of these functions, but CUDA does not
71 // which we can't handle on GPU. We need to forward those to CUDA-provided
H A Dnew1 /*===---- new - CUDA wrapper for <new> -------------------------------------===
31 // and CUDA-specific macros are not available yet.
H A Dcomplex1 /*===---- complex - CUDA wrapper for <complex> ------------------------------===
47 // functions that don't exist when compiling CUDA device code).
/freebsd/contrib/llvm-project/clang/lib/Basic/
H A DLangStandards.cpp38 case Language::CUDA: in languageToString()
113 case Language::CUDA: in getDefaultLanguageStandard()
H A DLangOptions.cpp184 Opts.CUDA = Lang == Language::CUDA || Opts.HIP; in setLangDefaults()
195 } else if (Opts.CUDA) { in setLangDefaults()
H A DBuiltins.cpp117 if (!LangOpts.CUDA && BuiltinInfo.Langs == CUDA_LANG) in builtinIsSupported()
/freebsd/contrib/llvm-project/clang/include/clang/Basic/
H A DLangOptions.def259 LANGOPT(CUDA , 1, 0, "CUDA")
268 LANGOPT(OpenMPCUDANumSMs , 32, 0, "Number of SMs for CUDA devices.")
269 LANGOPT(OpenMPCUDABlocksPerSM , 32, 0, "Number of blocks per SM for CUDA devices.")
287 LANGOPT(CUDAIsDevice , 1, 0, "compiling for CUDA device")
288 LANGOPT(CUDAAllowVariadicFunctions, 1, 0, "allowing variadic functions in CUDA device code")
292 …eTemplates, 1, 0, "assume template functions to be implicitly host device by default for CUDA/HIP")
295 LANGOPT(GPUDeferDiag, 1, 0, "defer host/device related diagnostic messages for CUDA/HIP")
296 …gSideOverloads, 1, 0, "always exclude wrong side overloads in overloading resolution for CUDA/HIP")
304 …sume that kernels are launched with uniform block sizes (default true for CUDA/HIP and false other…
H A DDiagnosticDriverKinds.td58 def err_drv_cuda_bad_gpu_arch : Error<"unsupported CUDA gpu architecture: %0">;
63 "cannot find CUDA installation; provide its path via '--cuda-path', or pass "
64 "'-nocudainc' to build without CUDA includes">;
66 "cannot find libdevice for %0; provide path to different CUDA installation "
107 "GPU arch %0 is supported by CUDA versions between %1 and %2 (inclusive), "
108 "but installation at %3 is %4; use '--cuda-path' to specify a different CUDA "
112 "CUDA version%0 is newer than the latest%select{| partially}1 supported version %2">,
115 "CUDA version %0 is only partially supported">,
120 "mixed CUDA and HIP compilation is not supported">;
368 "NVPTX target requires CUDA 9.2 or above; CUDA %0 detected">;
[all …]
H A DLangStandard.h41 CUDA, enumerator
H A DFeatures.def307 // CUDA/HIP Features
308 FEATURE(cuda_noinline_keyword, LangOpts.CUDA)
309 EXTENSION(cuda_implicit_host_device_templates, LangOpts.CUDA && LangOpts.OffloadImplicitHostDeviceT…
H A DAttr.td408 def CUDA : LangOpt<"CUDA">;
1338 // CUDA attributes are spelled __attribute__((attr)) or __declspec(__attr__),
1343 let LangOpts = [CUDA];
1349 let LangOpts = [CUDA];
1355 let LangOpts = [CUDA];
1361 let LangOpts = [CUDA];
1367 let LangOpts = [CUDA];
1377 let LangOpts = [CUDA];
1389 let LangOpts = [CUDA];
1397 let LangOpts = [CUDA];
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/NVPTX/
H A DNVPTXSubtarget.cpp44 // Set default to PTX 6.0 (CUDA 9.0) in initializeSubtargetDependencies()
60 // Enable handles for Kepler+, where CUDA supports indirect surfaces and in hasImageHandles()
62 if (TM.getDrvInterface() == NVPTX::CUDA) in hasImageHandles()
H A DNVPTXLowerArgs.cpp568 if (TM.getDrvInterface() == NVPTX::CUDA) { in runOnKernelFunction()
595 else if (TM.getDrvInterface() == NVPTX::CUDA) in runOnKernelFunction()
598 TM.getDrvInterface() == NVPTX::CUDA) { in runOnKernelFunction()
H A DNVPTX.h75 CUDA enumerator
H A DNVPTXAsmPrinter.h253 NVPTX::CUDA) {} in NVPTXAsmPrinter()
/freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/
H A DNVPTX.cpp233 if (M.getLangOpts().CUDA) { in setTargetAttributes()
261 if (M.getLangOpts().CUDA) { in setTargetAttributes()
/freebsd/sys/powerpc/conf/
H A DNOTES58 device cuda # VIA-CUDA ADB interface
/freebsd/contrib/llvm-project/clang/lib/CodeGen/
H A DTargetInfo.cpp128 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && in getGlobalVarAddressSpace()
/freebsd/contrib/llvm-project/clang/include/clang/Sema/
H A DSemaInternal.h41 if (!LangOpts.CUDA || !D) in DeclAttrsMatchCUDAMode()

1234