1 //===--- AMDGPU.h - Declare AMDGPU target feature support -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file declares AMDGPU TargetInfo objects. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H 14 #define LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H 15 16 #include "clang/Basic/TargetID.h" 17 #include "clang/Basic/TargetInfo.h" 18 #include "clang/Basic/TargetOptions.h" 19 #include "llvm/ADT/StringSet.h" 20 #include "llvm/Support/AMDGPUAddrSpace.h" 21 #include "llvm/Support/Compiler.h" 22 #include "llvm/TargetParser/TargetParser.h" 23 #include "llvm/TargetParser/Triple.h" 24 #include <optional> 25 26 namespace clang { 27 namespace targets { 28 29 class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo { 30 31 static const char *const GCCRegNames[]; 32 33 static const LangASMap AMDGPUDefIsGenMap; 34 static const LangASMap AMDGPUDefIsPrivMap; 35 36 llvm::AMDGPU::GPUKind GPUKind; 37 unsigned GPUFeatures; 38 unsigned WavefrontSize; 39 40 /// Whether to use cumode or WGP mode. True for cumode. False for WGP mode. 41 bool CUMode; 42 43 /// Whether having image instructions. 44 bool HasImage = false; 45 46 /// Target ID is device name followed by optional feature name postfixed 47 /// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-. 48 /// If the target ID contains feature+, map it to true. 49 /// If the target ID contains feature-, map it to false. 50 /// If the target ID does not contain a feature (default), do not map it. 51 llvm::StringMap<bool> OffloadArchFeatures; 52 std::string TargetID; 53 54 bool hasFP64() const { 55 return getTriple().getArch() == llvm::Triple::amdgcn || 56 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FP64); 57 } 58 59 /// Has fast fma f32 60 bool hasFastFMAF() const { 61 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_FMA_F32); 62 } 63 64 /// Has fast fma f64 65 bool hasFastFMA() const { 66 return getTriple().getArch() == llvm::Triple::amdgcn; 67 } 68 69 bool hasFMAF() const { 70 return getTriple().getArch() == llvm::Triple::amdgcn || 71 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FMA); 72 } 73 74 bool hasFullRateDenormalsF32() const { 75 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32); 76 } 77 78 bool hasLDEXPF() const { 79 return getTriple().getArch() == llvm::Triple::amdgcn || 80 !!(GPUFeatures & llvm::AMDGPU::FEATURE_LDEXP); 81 } 82 83 static bool isAMDGCN(const llvm::Triple &TT) { 84 return TT.getArch() == llvm::Triple::amdgcn; 85 } 86 87 static bool isR600(const llvm::Triple &TT) { 88 return TT.getArch() == llvm::Triple::r600; 89 } 90 91 public: 92 AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts); 93 94 void setAddressSpaceMap(bool DefaultIsPrivate); 95 96 void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override; 97 98 uint64_t getPointerWidthV(LangAS AS) const override { 99 if (isR600(getTriple())) 100 return 32; 101 unsigned TargetAS = getTargetAddressSpace(AS); 102 103 if (TargetAS == llvm::AMDGPUAS::PRIVATE_ADDRESS || 104 TargetAS == llvm::AMDGPUAS::LOCAL_ADDRESS) 105 return 32; 106 107 return 64; 108 } 109 110 uint64_t getPointerAlignV(LangAS AddrSpace) const override { 111 return getPointerWidthV(AddrSpace); 112 } 113 114 uint64_t getMaxPointerWidth() const override { 115 return getTriple().getArch() == llvm::Triple::amdgcn ? 64 : 32; 116 } 117 118 bool hasBFloat16Type() const override { return isAMDGCN(getTriple()); } 119 120 std::string_view getClobbers() const override { return ""; } 121 122 ArrayRef<const char *> getGCCRegNames() const override; 123 124 ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override { 125 return std::nullopt; 126 } 127 128 /// Accepted register names: (n, m is unsigned integer, n < m) 129 /// v 130 /// s 131 /// a 132 /// {vn}, {v[n]} 133 /// {sn}, {s[n]} 134 /// {an}, {a[n]} 135 /// {S} , where S is a special register name 136 ////{v[n:m]} 137 /// {s[n:m]} 138 /// {a[n:m]} 139 bool validateAsmConstraint(const char *&Name, 140 TargetInfo::ConstraintInfo &Info) const override { 141 static const ::llvm::StringSet<> SpecialRegs({ 142 "exec", "vcc", "flat_scratch", "m0", "scc", "tba", "tma", 143 "flat_scratch_lo", "flat_scratch_hi", "vcc_lo", "vcc_hi", "exec_lo", 144 "exec_hi", "tma_lo", "tma_hi", "tba_lo", "tba_hi", 145 }); 146 147 switch (*Name) { 148 case 'I': 149 Info.setRequiresImmediate(-16, 64); 150 return true; 151 case 'J': 152 Info.setRequiresImmediate(-32768, 32767); 153 return true; 154 case 'A': 155 case 'B': 156 case 'C': 157 Info.setRequiresImmediate(); 158 return true; 159 default: 160 break; 161 } 162 163 StringRef S(Name); 164 165 if (S == "DA" || S == "DB") { 166 Name++; 167 Info.setRequiresImmediate(); 168 return true; 169 } 170 171 bool HasLeftParen = false; 172 if (S.consume_front("{")) 173 HasLeftParen = true; 174 if (S.empty()) 175 return false; 176 if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') { 177 if (!HasLeftParen) 178 return false; 179 auto E = S.find('}'); 180 if (!SpecialRegs.count(S.substr(0, E))) 181 return false; 182 S = S.drop_front(E + 1); 183 if (!S.empty()) 184 return false; 185 // Found {S} where S is a special register. 186 Info.setAllowsRegister(); 187 Name = S.data() - 1; 188 return true; 189 } 190 S = S.drop_front(); 191 if (!HasLeftParen) { 192 if (!S.empty()) 193 return false; 194 // Found s, v or a. 195 Info.setAllowsRegister(); 196 Name = S.data() - 1; 197 return true; 198 } 199 bool HasLeftBracket = false; 200 if (S.consume_front("[")) 201 HasLeftBracket = true; 202 unsigned long long N; 203 if (S.empty() || consumeUnsignedInteger(S, 10, N)) 204 return false; 205 if (S.consume_front(":")) { 206 if (!HasLeftBracket) 207 return false; 208 unsigned long long M; 209 if (consumeUnsignedInteger(S, 10, M) || N >= M) 210 return false; 211 } 212 if (HasLeftBracket) { 213 if (!S.consume_front("]")) 214 return false; 215 } 216 if (!S.consume_front("}")) 217 return false; 218 if (!S.empty()) 219 return false; 220 // Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]} 221 // or {a[n:m]}. 222 Info.setAllowsRegister(); 223 Name = S.data() - 1; 224 return true; 225 } 226 227 // \p Constraint will be left pointing at the last character of 228 // the constraint. In practice, it won't be changed unless the 229 // constraint is longer than one character. 230 std::string convertConstraint(const char *&Constraint) const override { 231 232 StringRef S(Constraint); 233 if (S == "DA" || S == "DB") { 234 return std::string("^") + std::string(Constraint++, 2); 235 } 236 237 const char *Begin = Constraint; 238 TargetInfo::ConstraintInfo Info("", ""); 239 if (validateAsmConstraint(Constraint, Info)) 240 return std::string(Begin).substr(0, Constraint - Begin + 1); 241 242 Constraint = Begin; 243 return std::string(1, *Constraint); 244 } 245 246 bool 247 initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, 248 StringRef CPU, 249 const std::vector<std::string> &FeatureVec) const override; 250 251 ArrayRef<Builtin::Info> getTargetBuiltins() const override; 252 253 bool useFP16ConversionIntrinsics() const override { return false; } 254 255 void getTargetDefines(const LangOptions &Opts, 256 MacroBuilder &Builder) const override; 257 258 BuiltinVaListKind getBuiltinVaListKind() const override { 259 return TargetInfo::CharPtrBuiltinVaList; 260 } 261 262 bool isValidCPUName(StringRef Name) const override { 263 if (getTriple().getArch() == llvm::Triple::amdgcn) 264 return llvm::AMDGPU::parseArchAMDGCN(Name) != llvm::AMDGPU::GK_NONE; 265 return llvm::AMDGPU::parseArchR600(Name) != llvm::AMDGPU::GK_NONE; 266 } 267 268 void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override; 269 270 bool setCPU(const std::string &Name) override { 271 if (getTriple().getArch() == llvm::Triple::amdgcn) { 272 GPUKind = llvm::AMDGPU::parseArchAMDGCN(Name); 273 GPUFeatures = llvm::AMDGPU::getArchAttrAMDGCN(GPUKind); 274 } else { 275 GPUKind = llvm::AMDGPU::parseArchR600(Name); 276 GPUFeatures = llvm::AMDGPU::getArchAttrR600(GPUKind); 277 } 278 279 return GPUKind != llvm::AMDGPU::GK_NONE; 280 } 281 282 void setSupportedOpenCLOpts() override { 283 auto &Opts = getSupportedOpenCLOpts(); 284 Opts["cl_clang_storage_class_specifiers"] = true; 285 Opts["__cl_clang_variadic_functions"] = true; 286 Opts["__cl_clang_function_pointers"] = true; 287 Opts["__cl_clang_non_portable_kernel_param_types"] = true; 288 Opts["__cl_clang_bitfields"] = true; 289 290 bool IsAMDGCN = isAMDGCN(getTriple()); 291 292 Opts["cl_khr_fp64"] = hasFP64(); 293 Opts["__opencl_c_fp64"] = hasFP64(); 294 295 if (IsAMDGCN || GPUKind >= llvm::AMDGPU::GK_CEDAR) { 296 Opts["cl_khr_byte_addressable_store"] = true; 297 Opts["cl_khr_global_int32_base_atomics"] = true; 298 Opts["cl_khr_global_int32_extended_atomics"] = true; 299 Opts["cl_khr_local_int32_base_atomics"] = true; 300 Opts["cl_khr_local_int32_extended_atomics"] = true; 301 } 302 303 if (IsAMDGCN) { 304 Opts["cl_khr_fp16"] = true; 305 Opts["cl_khr_int64_base_atomics"] = true; 306 Opts["cl_khr_int64_extended_atomics"] = true; 307 Opts["cl_khr_mipmap_image"] = true; 308 Opts["cl_khr_mipmap_image_writes"] = true; 309 Opts["cl_khr_subgroups"] = true; 310 Opts["cl_amd_media_ops"] = true; 311 Opts["cl_amd_media_ops2"] = true; 312 313 Opts["__opencl_c_images"] = true; 314 Opts["__opencl_c_3d_image_writes"] = true; 315 Opts["cl_khr_3d_image_writes"] = true; 316 } 317 } 318 319 LangAS getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const override { 320 switch (TK) { 321 case OCLTK_Image: 322 return LangAS::opencl_constant; 323 324 case OCLTK_ClkEvent: 325 case OCLTK_Queue: 326 case OCLTK_ReserveID: 327 return LangAS::opencl_global; 328 329 default: 330 return TargetInfo::getOpenCLTypeAddrSpace(TK); 331 } 332 } 333 334 LangAS getOpenCLBuiltinAddressSpace(unsigned AS) const override { 335 switch (AS) { 336 case 0: 337 return LangAS::opencl_generic; 338 case 1: 339 return LangAS::opencl_global; 340 case 3: 341 return LangAS::opencl_local; 342 case 4: 343 return LangAS::opencl_constant; 344 case 5: 345 return LangAS::opencl_private; 346 default: 347 return getLangASFromTargetAS(AS); 348 } 349 } 350 351 LangAS getCUDABuiltinAddressSpace(unsigned AS) const override { 352 switch (AS) { 353 case 0: 354 return LangAS::Default; 355 case 1: 356 return LangAS::cuda_device; 357 case 3: 358 return LangAS::cuda_shared; 359 case 4: 360 return LangAS::cuda_constant; 361 default: 362 return getLangASFromTargetAS(AS); 363 } 364 } 365 366 std::optional<LangAS> getConstantAddressSpace() const override { 367 return getLangASFromTargetAS(llvm::AMDGPUAS::CONSTANT_ADDRESS); 368 } 369 370 const llvm::omp::GV &getGridValue() const override { 371 switch (WavefrontSize) { 372 case 32: 373 return llvm::omp::getAMDGPUGridValues<32>(); 374 case 64: 375 return llvm::omp::getAMDGPUGridValues<64>(); 376 default: 377 llvm_unreachable("getGridValue not implemented for this wavesize"); 378 } 379 } 380 381 /// \returns Target specific vtbl ptr address space. 382 unsigned getVtblPtrAddressSpace() const override { 383 return static_cast<unsigned>(llvm::AMDGPUAS::CONSTANT_ADDRESS); 384 } 385 386 /// \returns If a target requires an address within a target specific address 387 /// space \p AddressSpace to be converted in order to be used, then return the 388 /// corresponding target specific DWARF address space. 389 /// 390 /// \returns Otherwise return std::nullopt and no conversion will be emitted 391 /// in the DWARF. 392 std::optional<unsigned> 393 getDWARFAddressSpace(unsigned AddressSpace) const override { 394 const unsigned DWARF_Private = 1; 395 const unsigned DWARF_Local = 2; 396 if (AddressSpace == llvm::AMDGPUAS::PRIVATE_ADDRESS) { 397 return DWARF_Private; 398 } else if (AddressSpace == llvm::AMDGPUAS::LOCAL_ADDRESS) { 399 return DWARF_Local; 400 } else { 401 return std::nullopt; 402 } 403 } 404 405 CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { 406 switch (CC) { 407 default: 408 return CCCR_Warning; 409 case CC_C: 410 case CC_OpenCLKernel: 411 case CC_AMDGPUKernelCall: 412 return CCCR_OK; 413 } 414 } 415 416 // In amdgcn target the null pointer in global, constant, and generic 417 // address space has value 0 but in private and local address space has 418 // value ~0. 419 uint64_t getNullPointerValue(LangAS AS) const override { 420 // FIXME: Also should handle region. 421 return (AS == LangAS::opencl_local || AS == LangAS::opencl_private) 422 ? ~0 : 0; 423 } 424 425 void setAuxTarget(const TargetInfo *Aux) override; 426 427 bool hasBitIntType() const override { return true; } 428 429 // Record offload arch features since they are needed for defining the 430 // pre-defined macros. 431 bool handleTargetFeatures(std::vector<std::string> &Features, 432 DiagnosticsEngine &Diags) override { 433 auto TargetIDFeatures = 434 getAllPossibleTargetIDFeatures(getTriple(), getArchNameAMDGCN(GPUKind)); 435 for (const auto &F : Features) { 436 assert(F.front() == '+' || F.front() == '-'); 437 if (F == "+wavefrontsize64") 438 WavefrontSize = 64; 439 else if (F == "+cumode") 440 CUMode = true; 441 else if (F == "-cumode") 442 CUMode = false; 443 else if (F == "+image-insts") 444 HasImage = true; 445 bool IsOn = F.front() == '+'; 446 StringRef Name = StringRef(F).drop_front(); 447 if (!llvm::is_contained(TargetIDFeatures, Name)) 448 continue; 449 assert(!OffloadArchFeatures.contains(Name)); 450 OffloadArchFeatures[Name] = IsOn; 451 } 452 return true; 453 } 454 455 std::optional<std::string> getTargetID() const override { 456 if (!isAMDGCN(getTriple())) 457 return std::nullopt; 458 // When -target-cpu is not set, we assume generic code that it is valid 459 // for all GPU and use an empty string as target ID to represent that. 460 if (GPUKind == llvm::AMDGPU::GK_NONE) 461 return std::string(""); 462 return getCanonicalTargetID(getArchNameAMDGCN(GPUKind), 463 OffloadArchFeatures); 464 } 465 466 bool hasHIPImageSupport() const override { return HasImage; } 467 }; 468 469 } // namespace targets 470 } // namespace clang 471 472 #endif // LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H 473