1 //===--- AMDGPU.h - Declare AMDGPU target feature support -------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file declares AMDGPU TargetInfo objects. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H 14 #define LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H 15 16 #include "clang/Basic/TargetID.h" 17 #include "clang/Basic/TargetInfo.h" 18 #include "clang/Basic/TargetOptions.h" 19 #include "llvm/ADT/StringSet.h" 20 #include "llvm/ADT/Triple.h" 21 #include "llvm/Support/Compiler.h" 22 #include "llvm/Support/TargetParser.h" 23 24 namespace clang { 25 namespace targets { 26 27 class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo { 28 29 static const Builtin::Info BuiltinInfo[]; 30 static const char *const GCCRegNames[]; 31 32 enum AddrSpace { 33 Generic = 0, 34 Global = 1, 35 Local = 3, 36 Constant = 4, 37 Private = 5 38 }; 39 static const LangASMap AMDGPUDefIsGenMap; 40 static const LangASMap AMDGPUDefIsPrivMap; 41 42 llvm::AMDGPU::GPUKind GPUKind; 43 unsigned GPUFeatures; 44 unsigned WavefrontSize; 45 46 /// Target ID is device name followed by optional feature name postfixed 47 /// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-. 48 /// If the target ID contains feature+, map it to true. 49 /// If the target ID contains feature-, map it to false. 50 /// If the target ID does not contain a feature (default), do not map it. 51 llvm::StringMap<bool> OffloadArchFeatures; 52 std::string TargetID; 53 54 bool hasFP64() const { 55 return getTriple().getArch() == llvm::Triple::amdgcn || 56 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FP64); 57 } 58 59 /// Has fast fma f32 60 bool hasFastFMAF() const { 61 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_FMA_F32); 62 } 63 64 /// Has fast fma f64 65 bool hasFastFMA() const { 66 return getTriple().getArch() == llvm::Triple::amdgcn; 67 } 68 69 bool hasFMAF() const { 70 return getTriple().getArch() == llvm::Triple::amdgcn || 71 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FMA); 72 } 73 74 bool hasFullRateDenormalsF32() const { 75 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32); 76 } 77 78 bool hasLDEXPF() const { 79 return getTriple().getArch() == llvm::Triple::amdgcn || 80 !!(GPUFeatures & llvm::AMDGPU::FEATURE_LDEXP); 81 } 82 83 static bool isAMDGCN(const llvm::Triple &TT) { 84 return TT.getArch() == llvm::Triple::amdgcn; 85 } 86 87 static bool isR600(const llvm::Triple &TT) { 88 return TT.getArch() == llvm::Triple::r600; 89 } 90 91 public: 92 AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts); 93 94 void setAddressSpaceMap(bool DefaultIsPrivate); 95 96 void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override; 97 98 uint64_t getPointerWidthV(unsigned AddrSpace) const override { 99 if (isR600(getTriple())) 100 return 32; 101 102 if (AddrSpace == Private || AddrSpace == Local) 103 return 32; 104 105 return 64; 106 } 107 108 uint64_t getPointerAlignV(unsigned AddrSpace) const override { 109 return getPointerWidthV(AddrSpace); 110 } 111 112 uint64_t getMaxPointerWidth() const override { 113 return getTriple().getArch() == llvm::Triple::amdgcn ? 64 : 32; 114 } 115 116 const char *getClobbers() const override { return ""; } 117 118 ArrayRef<const char *> getGCCRegNames() const override; 119 120 ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override { 121 return None; 122 } 123 124 /// Accepted register names: (n, m is unsigned integer, n < m) 125 /// v 126 /// s 127 /// a 128 /// {vn}, {v[n]} 129 /// {sn}, {s[n]} 130 /// {an}, {a[n]} 131 /// {S} , where S is a special register name 132 ////{v[n:m]} 133 /// {s[n:m]} 134 /// {a[n:m]} 135 bool validateAsmConstraint(const char *&Name, 136 TargetInfo::ConstraintInfo &Info) const override { 137 static const ::llvm::StringSet<> SpecialRegs({ 138 "exec", "vcc", "flat_scratch", "m0", "scc", "tba", "tma", 139 "flat_scratch_lo", "flat_scratch_hi", "vcc_lo", "vcc_hi", "exec_lo", 140 "exec_hi", "tma_lo", "tma_hi", "tba_lo", "tba_hi", 141 }); 142 143 switch (*Name) { 144 case 'I': 145 Info.setRequiresImmediate(-16, 64); 146 return true; 147 case 'J': 148 Info.setRequiresImmediate(-32768, 32767); 149 return true; 150 case 'A': 151 case 'B': 152 case 'C': 153 Info.setRequiresImmediate(); 154 return true; 155 default: 156 break; 157 } 158 159 StringRef S(Name); 160 161 if (S == "DA" || S == "DB") { 162 Name++; 163 Info.setRequiresImmediate(); 164 return true; 165 } 166 167 bool HasLeftParen = false; 168 if (S.front() == '{') { 169 HasLeftParen = true; 170 S = S.drop_front(); 171 } 172 if (S.empty()) 173 return false; 174 if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') { 175 if (!HasLeftParen) 176 return false; 177 auto E = S.find('}'); 178 if (!SpecialRegs.count(S.substr(0, E))) 179 return false; 180 S = S.drop_front(E + 1); 181 if (!S.empty()) 182 return false; 183 // Found {S} where S is a special register. 184 Info.setAllowsRegister(); 185 Name = S.data() - 1; 186 return true; 187 } 188 S = S.drop_front(); 189 if (!HasLeftParen) { 190 if (!S.empty()) 191 return false; 192 // Found s, v or a. 193 Info.setAllowsRegister(); 194 Name = S.data() - 1; 195 return true; 196 } 197 bool HasLeftBracket = false; 198 if (!S.empty() && S.front() == '[') { 199 HasLeftBracket = true; 200 S = S.drop_front(); 201 } 202 unsigned long long N; 203 if (S.empty() || consumeUnsignedInteger(S, 10, N)) 204 return false; 205 if (!S.empty() && S.front() == ':') { 206 if (!HasLeftBracket) 207 return false; 208 S = S.drop_front(); 209 unsigned long long M; 210 if (consumeUnsignedInteger(S, 10, M) || N >= M) 211 return false; 212 } 213 if (HasLeftBracket) { 214 if (S.empty() || S.front() != ']') 215 return false; 216 S = S.drop_front(); 217 } 218 if (S.empty() || S.front() != '}') 219 return false; 220 S = S.drop_front(); 221 if (!S.empty()) 222 return false; 223 // Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]} 224 // or {a[n:m]}. 225 Info.setAllowsRegister(); 226 Name = S.data() - 1; 227 return true; 228 } 229 230 // \p Constraint will be left pointing at the last character of 231 // the constraint. In practice, it won't be changed unless the 232 // constraint is longer than one character. 233 std::string convertConstraint(const char *&Constraint) const override { 234 235 StringRef S(Constraint); 236 if (S == "DA" || S == "DB") { 237 return std::string("^") + std::string(Constraint++, 2); 238 } 239 240 const char *Begin = Constraint; 241 TargetInfo::ConstraintInfo Info("", ""); 242 if (validateAsmConstraint(Constraint, Info)) 243 return std::string(Begin).substr(0, Constraint - Begin + 1); 244 245 Constraint = Begin; 246 return std::string(1, *Constraint); 247 } 248 249 bool 250 initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, 251 StringRef CPU, 252 const std::vector<std::string> &FeatureVec) const override; 253 254 ArrayRef<Builtin::Info> getTargetBuiltins() const override; 255 256 bool useFP16ConversionIntrinsics() const override { return false; } 257 258 void getTargetDefines(const LangOptions &Opts, 259 MacroBuilder &Builder) const override; 260 261 BuiltinVaListKind getBuiltinVaListKind() const override { 262 return TargetInfo::CharPtrBuiltinVaList; 263 } 264 265 bool isValidCPUName(StringRef Name) const override { 266 if (getTriple().getArch() == llvm::Triple::amdgcn) 267 return llvm::AMDGPU::parseArchAMDGCN(Name) != llvm::AMDGPU::GK_NONE; 268 return llvm::AMDGPU::parseArchR600(Name) != llvm::AMDGPU::GK_NONE; 269 } 270 271 void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override; 272 273 bool setCPU(const std::string &Name) override { 274 if (getTriple().getArch() == llvm::Triple::amdgcn) { 275 GPUKind = llvm::AMDGPU::parseArchAMDGCN(Name); 276 GPUFeatures = llvm::AMDGPU::getArchAttrAMDGCN(GPUKind); 277 } else { 278 GPUKind = llvm::AMDGPU::parseArchR600(Name); 279 GPUFeatures = llvm::AMDGPU::getArchAttrR600(GPUKind); 280 } 281 282 return GPUKind != llvm::AMDGPU::GK_NONE; 283 } 284 285 void setSupportedOpenCLOpts() override { 286 auto &Opts = getSupportedOpenCLOpts(); 287 Opts["cl_clang_storage_class_specifiers"] = true; 288 Opts["__cl_clang_variadic_functions"] = true; 289 Opts["__cl_clang_function_pointers"] = true; 290 Opts["__cl_clang_non_portable_kernel_param_types"] = true; 291 Opts["__cl_clang_bitfields"] = true; 292 293 bool IsAMDGCN = isAMDGCN(getTriple()); 294 295 Opts["cl_khr_fp64"] = hasFP64(); 296 Opts["__opencl_c_fp64"] = hasFP64(); 297 298 if (IsAMDGCN || GPUKind >= llvm::AMDGPU::GK_CEDAR) { 299 Opts["cl_khr_byte_addressable_store"] = true; 300 Opts["cl_khr_global_int32_base_atomics"] = true; 301 Opts["cl_khr_global_int32_extended_atomics"] = true; 302 Opts["cl_khr_local_int32_base_atomics"] = true; 303 Opts["cl_khr_local_int32_extended_atomics"] = true; 304 } 305 306 if (IsAMDGCN) { 307 Opts["cl_khr_fp16"] = true; 308 Opts["cl_khr_int64_base_atomics"] = true; 309 Opts["cl_khr_int64_extended_atomics"] = true; 310 Opts["cl_khr_mipmap_image"] = true; 311 Opts["cl_khr_mipmap_image_writes"] = true; 312 Opts["cl_khr_subgroups"] = true; 313 Opts["cl_amd_media_ops"] = true; 314 Opts["cl_amd_media_ops2"] = true; 315 316 Opts["__opencl_c_images"] = true; 317 Opts["__opencl_c_3d_image_writes"] = true; 318 Opts["cl_khr_3d_image_writes"] = true; 319 } 320 } 321 322 LangAS getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const override { 323 switch (TK) { 324 case OCLTK_Image: 325 return LangAS::opencl_constant; 326 327 case OCLTK_ClkEvent: 328 case OCLTK_Queue: 329 case OCLTK_ReserveID: 330 return LangAS::opencl_global; 331 332 default: 333 return TargetInfo::getOpenCLTypeAddrSpace(TK); 334 } 335 } 336 337 LangAS getOpenCLBuiltinAddressSpace(unsigned AS) const override { 338 switch (AS) { 339 case 0: 340 return LangAS::opencl_generic; 341 case 1: 342 return LangAS::opencl_global; 343 case 3: 344 return LangAS::opencl_local; 345 case 4: 346 return LangAS::opencl_constant; 347 case 5: 348 return LangAS::opencl_private; 349 default: 350 return getLangASFromTargetAS(AS); 351 } 352 } 353 354 LangAS getCUDABuiltinAddressSpace(unsigned AS) const override { 355 switch (AS) { 356 case 0: 357 return LangAS::Default; 358 case 1: 359 return LangAS::cuda_device; 360 case 3: 361 return LangAS::cuda_shared; 362 case 4: 363 return LangAS::cuda_constant; 364 default: 365 return getLangASFromTargetAS(AS); 366 } 367 } 368 369 llvm::Optional<LangAS> getConstantAddressSpace() const override { 370 return getLangASFromTargetAS(Constant); 371 } 372 373 const llvm::omp::GV &getGridValue() const override { 374 switch (WavefrontSize) { 375 case 32: 376 return llvm::omp::getAMDGPUGridValues<32>(); 377 case 64: 378 return llvm::omp::getAMDGPUGridValues<64>(); 379 default: 380 llvm_unreachable("getGridValue not implemented for this wavesize"); 381 } 382 } 383 384 /// \returns Target specific vtbl ptr address space. 385 unsigned getVtblPtrAddressSpace() const override { 386 return static_cast<unsigned>(Constant); 387 } 388 389 /// \returns If a target requires an address within a target specific address 390 /// space \p AddressSpace to be converted in order to be used, then return the 391 /// corresponding target specific DWARF address space. 392 /// 393 /// \returns Otherwise return None and no conversion will be emitted in the 394 /// DWARF. 395 Optional<unsigned> 396 getDWARFAddressSpace(unsigned AddressSpace) const override { 397 const unsigned DWARF_Private = 1; 398 const unsigned DWARF_Local = 2; 399 if (AddressSpace == Private) { 400 return DWARF_Private; 401 } else if (AddressSpace == Local) { 402 return DWARF_Local; 403 } else { 404 return None; 405 } 406 } 407 408 CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { 409 switch (CC) { 410 default: 411 return CCCR_Warning; 412 case CC_C: 413 case CC_OpenCLKernel: 414 return CCCR_OK; 415 } 416 } 417 418 // In amdgcn target the null pointer in global, constant, and generic 419 // address space has value 0 but in private and local address space has 420 // value ~0. 421 uint64_t getNullPointerValue(LangAS AS) const override { 422 // FIXME: Also should handle region. 423 return (AS == LangAS::opencl_local || AS == LangAS::opencl_private) 424 ? ~0 : 0; 425 } 426 427 void setAuxTarget(const TargetInfo *Aux) override; 428 429 bool hasBitIntType() const override { return true; } 430 431 // Record offload arch features since they are needed for defining the 432 // pre-defined macros. 433 bool handleTargetFeatures(std::vector<std::string> &Features, 434 DiagnosticsEngine &Diags) override { 435 auto TargetIDFeatures = 436 getAllPossibleTargetIDFeatures(getTriple(), getArchNameAMDGCN(GPUKind)); 437 llvm::for_each(Features, [&](const auto &F) { 438 assert(F.front() == '+' || F.front() == '-'); 439 if (F == "+wavefrontsize64") 440 WavefrontSize = 64; 441 bool IsOn = F.front() == '+'; 442 StringRef Name = StringRef(F).drop_front(); 443 if (!llvm::is_contained(TargetIDFeatures, Name)) 444 return; 445 assert(OffloadArchFeatures.find(Name) == OffloadArchFeatures.end()); 446 OffloadArchFeatures[Name] = IsOn; 447 }); 448 return true; 449 } 450 451 Optional<std::string> getTargetID() const override { 452 if (!isAMDGCN(getTriple())) 453 return llvm::None; 454 // When -target-cpu is not set, we assume generic code that it is valid 455 // for all GPU and use an empty string as target ID to represent that. 456 if (GPUKind == llvm::AMDGPU::GK_NONE) 457 return std::string(""); 458 return getCanonicalTargetID(getArchNameAMDGCN(GPUKind), 459 OffloadArchFeatures); 460 } 461 }; 462 463 } // namespace targets 464 } // namespace clang 465 466 #endif // LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H 467