1 //===-- AMDGPUTargetStreamer.cpp - Mips Target Streamer Methods -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides AMDGPU specific target streamer methods. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "AMDGPUTargetStreamer.h" 14 #include "AMDGPUPTNote.h" 15 #include "AMDKernelCodeT.h" 16 #include "Utils/AMDGPUBaseInfo.h" 17 #include "Utils/AMDKernelCodeTUtils.h" 18 #include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h" 19 #include "llvm/BinaryFormat/ELF.h" 20 #include "llvm/MC/MCContext.h" 21 #include "llvm/MC/MCELFStreamer.h" 22 #include "llvm/MC/MCSectionELF.h" 23 #include "llvm/Support/AMDGPUMetadata.h" 24 #include "llvm/Support/AMDHSAKernelDescriptor.h" 25 #include "llvm/Support/FormattedStream.h" 26 27 using namespace llvm; 28 using namespace llvm::AMDGPU; 29 30 //===----------------------------------------------------------------------===// 31 // AMDGPUTargetStreamer 32 //===----------------------------------------------------------------------===// 33 34 bool AMDGPUTargetStreamer::EmitHSAMetadataV2(StringRef HSAMetadataString) { 35 HSAMD::Metadata HSAMetadata; 36 if (HSAMD::fromString(HSAMetadataString, HSAMetadata)) 37 return false; 38 return EmitHSAMetadata(HSAMetadata); 39 } 40 41 bool AMDGPUTargetStreamer::EmitHSAMetadataV3(StringRef HSAMetadataString) { 42 msgpack::Document HSAMetadataDoc; 43 if (!HSAMetadataDoc.fromYAML(HSAMetadataString)) 44 return false; 45 return EmitHSAMetadata(HSAMetadataDoc, false); 46 } 47 48 StringRef AMDGPUTargetStreamer::getArchNameFromElfMach(unsigned ElfMach) { 49 AMDGPU::GPUKind AK; 50 51 switch (ElfMach) { 52 default: llvm_unreachable("Unhandled ELF::EF_AMDGPU type"); 53 case ELF::EF_AMDGPU_MACH_R600_R600: AK = GK_R600; break; 54 case ELF::EF_AMDGPU_MACH_R600_R630: AK = GK_R630; break; 55 case ELF::EF_AMDGPU_MACH_R600_RS880: AK = GK_RS880; break; 56 case ELF::EF_AMDGPU_MACH_R600_RV670: AK = GK_RV670; break; 57 case ELF::EF_AMDGPU_MACH_R600_RV710: AK = GK_RV710; break; 58 case ELF::EF_AMDGPU_MACH_R600_RV730: AK = GK_RV730; break; 59 case ELF::EF_AMDGPU_MACH_R600_RV770: AK = GK_RV770; break; 60 case ELF::EF_AMDGPU_MACH_R600_CEDAR: AK = GK_CEDAR; break; 61 case ELF::EF_AMDGPU_MACH_R600_CYPRESS: AK = GK_CYPRESS; break; 62 case ELF::EF_AMDGPU_MACH_R600_JUNIPER: AK = GK_JUNIPER; break; 63 case ELF::EF_AMDGPU_MACH_R600_REDWOOD: AK = GK_REDWOOD; break; 64 case ELF::EF_AMDGPU_MACH_R600_SUMO: AK = GK_SUMO; break; 65 case ELF::EF_AMDGPU_MACH_R600_BARTS: AK = GK_BARTS; break; 66 case ELF::EF_AMDGPU_MACH_R600_CAICOS: AK = GK_CAICOS; break; 67 case ELF::EF_AMDGPU_MACH_R600_CAYMAN: AK = GK_CAYMAN; break; 68 case ELF::EF_AMDGPU_MACH_R600_TURKS: AK = GK_TURKS; break; 69 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX600: AK = GK_GFX600; break; 70 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX601: AK = GK_GFX601; break; 71 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX602: AK = GK_GFX602; break; 72 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX700: AK = GK_GFX700; break; 73 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX701: AK = GK_GFX701; break; 74 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX702: AK = GK_GFX702; break; 75 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX703: AK = GK_GFX703; break; 76 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX704: AK = GK_GFX704; break; 77 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX705: AK = GK_GFX705; break; 78 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX801: AK = GK_GFX801; break; 79 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX802: AK = GK_GFX802; break; 80 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX803: AK = GK_GFX803; break; 81 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX805: AK = GK_GFX805; break; 82 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX810: AK = GK_GFX810; break; 83 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX900: AK = GK_GFX900; break; 84 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX902: AK = GK_GFX902; break; 85 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX904: AK = GK_GFX904; break; 86 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX906: AK = GK_GFX906; break; 87 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX908: AK = GK_GFX908; break; 88 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX909: AK = GK_GFX909; break; 89 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C: AK = GK_GFX90C; break; 90 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010: AK = GK_GFX1010; break; 91 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011: AK = GK_GFX1011; break; 92 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012: AK = GK_GFX1012; break; 93 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030: AK = GK_GFX1030; break; 94 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031: AK = GK_GFX1031; break; 95 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032: AK = GK_GFX1032; break; 96 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033: AK = GK_GFX1033; break; 97 case ELF::EF_AMDGPU_MACH_NONE: AK = GK_NONE; break; 98 } 99 100 StringRef GPUName = getArchNameAMDGCN(AK); 101 if (GPUName != "") 102 return GPUName; 103 return getArchNameR600(AK); 104 } 105 106 unsigned AMDGPUTargetStreamer::getElfMach(StringRef GPU) { 107 AMDGPU::GPUKind AK = parseArchAMDGCN(GPU); 108 if (AK == AMDGPU::GPUKind::GK_NONE) 109 AK = parseArchR600(GPU); 110 111 switch (AK) { 112 case GK_R600: return ELF::EF_AMDGPU_MACH_R600_R600; 113 case GK_R630: return ELF::EF_AMDGPU_MACH_R600_R630; 114 case GK_RS880: return ELF::EF_AMDGPU_MACH_R600_RS880; 115 case GK_RV670: return ELF::EF_AMDGPU_MACH_R600_RV670; 116 case GK_RV710: return ELF::EF_AMDGPU_MACH_R600_RV710; 117 case GK_RV730: return ELF::EF_AMDGPU_MACH_R600_RV730; 118 case GK_RV770: return ELF::EF_AMDGPU_MACH_R600_RV770; 119 case GK_CEDAR: return ELF::EF_AMDGPU_MACH_R600_CEDAR; 120 case GK_CYPRESS: return ELF::EF_AMDGPU_MACH_R600_CYPRESS; 121 case GK_JUNIPER: return ELF::EF_AMDGPU_MACH_R600_JUNIPER; 122 case GK_REDWOOD: return ELF::EF_AMDGPU_MACH_R600_REDWOOD; 123 case GK_SUMO: return ELF::EF_AMDGPU_MACH_R600_SUMO; 124 case GK_BARTS: return ELF::EF_AMDGPU_MACH_R600_BARTS; 125 case GK_CAICOS: return ELF::EF_AMDGPU_MACH_R600_CAICOS; 126 case GK_CAYMAN: return ELF::EF_AMDGPU_MACH_R600_CAYMAN; 127 case GK_TURKS: return ELF::EF_AMDGPU_MACH_R600_TURKS; 128 case GK_GFX600: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX600; 129 case GK_GFX601: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX601; 130 case GK_GFX602: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX602; 131 case GK_GFX700: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX700; 132 case GK_GFX701: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX701; 133 case GK_GFX702: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX702; 134 case GK_GFX703: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX703; 135 case GK_GFX704: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX704; 136 case GK_GFX705: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX705; 137 case GK_GFX801: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX801; 138 case GK_GFX802: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX802; 139 case GK_GFX803: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX803; 140 case GK_GFX805: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX805; 141 case GK_GFX810: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX810; 142 case GK_GFX900: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX900; 143 case GK_GFX902: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX902; 144 case GK_GFX904: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX904; 145 case GK_GFX906: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX906; 146 case GK_GFX908: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX908; 147 case GK_GFX909: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX909; 148 case GK_GFX90C: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C; 149 case GK_GFX1010: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010; 150 case GK_GFX1011: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011; 151 case GK_GFX1012: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012; 152 case GK_GFX1030: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030; 153 case GK_GFX1031: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031; 154 case GK_GFX1032: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032; 155 case GK_GFX1033: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033; 156 case GK_NONE: return ELF::EF_AMDGPU_MACH_NONE; 157 } 158 159 llvm_unreachable("unknown GPU"); 160 } 161 162 //===----------------------------------------------------------------------===// 163 // AMDGPUTargetAsmStreamer 164 //===----------------------------------------------------------------------===// 165 166 AMDGPUTargetAsmStreamer::AMDGPUTargetAsmStreamer(MCStreamer &S, 167 formatted_raw_ostream &OS) 168 : AMDGPUTargetStreamer(S), OS(OS) { } 169 170 // A hook for emitting stuff at the end. 171 // We use it for emitting the accumulated PAL metadata as directives. 172 // The PAL metadata is reset after it is emitted. 173 void AMDGPUTargetAsmStreamer::finish() { 174 std::string S; 175 getPALMetadata()->toString(S); 176 OS << S; 177 178 // Reset the pal metadata so its data will not affect a compilation that 179 // reuses this object. 180 getPALMetadata()->reset(); 181 } 182 183 void AMDGPUTargetAsmStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) { 184 OS << "\t.amdgcn_target \"" << Target << "\"\n"; 185 } 186 187 void AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectVersion( 188 uint32_t Major, uint32_t Minor) { 189 OS << "\t.hsa_code_object_version " << 190 Twine(Major) << "," << Twine(Minor) << '\n'; 191 } 192 193 void 194 AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major, 195 uint32_t Minor, 196 uint32_t Stepping, 197 StringRef VendorName, 198 StringRef ArchName) { 199 OS << "\t.hsa_code_object_isa " << 200 Twine(Major) << "," << Twine(Minor) << "," << Twine(Stepping) << 201 ",\"" << VendorName << "\",\"" << ArchName << "\"\n"; 202 203 } 204 205 void 206 AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) { 207 OS << "\t.amd_kernel_code_t\n"; 208 dumpAmdKernelCode(&Header, OS, "\t\t"); 209 OS << "\t.end_amd_kernel_code_t\n"; 210 } 211 212 void AMDGPUTargetAsmStreamer::EmitAMDGPUSymbolType(StringRef SymbolName, 213 unsigned Type) { 214 switch (Type) { 215 default: llvm_unreachable("Invalid AMDGPU symbol type"); 216 case ELF::STT_AMDGPU_HSA_KERNEL: 217 OS << "\t.amdgpu_hsa_kernel " << SymbolName << '\n' ; 218 break; 219 } 220 } 221 222 void AMDGPUTargetAsmStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size, 223 Align Alignment) { 224 OS << "\t.amdgpu_lds " << Symbol->getName() << ", " << Size << ", " 225 << Alignment.value() << '\n'; 226 } 227 228 bool AMDGPUTargetAsmStreamer::EmitISAVersion(StringRef IsaVersionString) { 229 OS << "\t.amd_amdgpu_isa \"" << IsaVersionString << "\"\n"; 230 return true; 231 } 232 233 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata( 234 const AMDGPU::HSAMD::Metadata &HSAMetadata) { 235 std::string HSAMetadataString; 236 if (HSAMD::toString(HSAMetadata, HSAMetadataString)) 237 return false; 238 239 OS << '\t' << HSAMD::AssemblerDirectiveBegin << '\n'; 240 OS << HSAMetadataString << '\n'; 241 OS << '\t' << HSAMD::AssemblerDirectiveEnd << '\n'; 242 return true; 243 } 244 245 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata( 246 msgpack::Document &HSAMetadataDoc, bool Strict) { 247 HSAMD::V3::MetadataVerifier Verifier(Strict); 248 if (!Verifier.verify(HSAMetadataDoc.getRoot())) 249 return false; 250 251 std::string HSAMetadataString; 252 raw_string_ostream StrOS(HSAMetadataString); 253 HSAMetadataDoc.toYAML(StrOS); 254 255 OS << '\t' << HSAMD::V3::AssemblerDirectiveBegin << '\n'; 256 OS << StrOS.str() << '\n'; 257 OS << '\t' << HSAMD::V3::AssemblerDirectiveEnd << '\n'; 258 return true; 259 } 260 261 bool AMDGPUTargetAsmStreamer::EmitCodeEnd() { 262 const uint32_t Encoded_s_code_end = 0xbf9f0000; 263 OS << "\t.p2alignl 6, " << Encoded_s_code_end << '\n'; 264 OS << "\t.fill 48, 4, " << Encoded_s_code_end << '\n'; 265 return true; 266 } 267 268 void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor( 269 const MCSubtargetInfo &STI, StringRef KernelName, 270 const amdhsa::kernel_descriptor_t &KD, uint64_t NextVGPR, uint64_t NextSGPR, 271 bool ReserveVCC, bool ReserveFlatScr, bool ReserveXNACK) { 272 IsaVersion IVersion = getIsaVersion(STI.getCPU()); 273 274 OS << "\t.amdhsa_kernel " << KernelName << '\n'; 275 276 #define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME) \ 277 STREAM << "\t\t" << DIRECTIVE << " " \ 278 << AMDHSA_BITS_GET(KERNEL_DESC.MEMBER_NAME, FIELD_NAME) << '\n'; 279 280 OS << "\t\t.amdhsa_group_segment_fixed_size " << KD.group_segment_fixed_size 281 << '\n'; 282 OS << "\t\t.amdhsa_private_segment_fixed_size " 283 << KD.private_segment_fixed_size << '\n'; 284 285 PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_buffer", KD, 286 kernel_code_properties, 287 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER); 288 PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_ptr", KD, 289 kernel_code_properties, 290 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR); 291 PRINT_FIELD(OS, ".amdhsa_user_sgpr_queue_ptr", KD, 292 kernel_code_properties, 293 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR); 294 PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_segment_ptr", KD, 295 kernel_code_properties, 296 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR); 297 PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_id", KD, 298 kernel_code_properties, 299 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID); 300 PRINT_FIELD(OS, ".amdhsa_user_sgpr_flat_scratch_init", KD, 301 kernel_code_properties, 302 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT); 303 PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_size", KD, 304 kernel_code_properties, 305 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE); 306 if (IVersion.Major >= 10) 307 PRINT_FIELD(OS, ".amdhsa_wavefront_size32", KD, 308 kernel_code_properties, 309 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32); 310 PRINT_FIELD( 311 OS, ".amdhsa_system_sgpr_private_segment_wavefront_offset", KD, 312 compute_pgm_rsrc2, 313 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT); 314 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_x", KD, 315 compute_pgm_rsrc2, 316 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X); 317 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_y", KD, 318 compute_pgm_rsrc2, 319 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y); 320 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_z", KD, 321 compute_pgm_rsrc2, 322 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z); 323 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_info", KD, 324 compute_pgm_rsrc2, 325 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO); 326 PRINT_FIELD(OS, ".amdhsa_system_vgpr_workitem_id", KD, 327 compute_pgm_rsrc2, 328 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID); 329 330 // These directives are required. 331 OS << "\t\t.amdhsa_next_free_vgpr " << NextVGPR << '\n'; 332 OS << "\t\t.amdhsa_next_free_sgpr " << NextSGPR << '\n'; 333 334 if (!ReserveVCC) 335 OS << "\t\t.amdhsa_reserve_vcc " << ReserveVCC << '\n'; 336 if (IVersion.Major >= 7 && !ReserveFlatScr) 337 OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n'; 338 if (IVersion.Major >= 8 && ReserveXNACK != hasXNACK(STI)) 339 OS << "\t\t.amdhsa_reserve_xnack_mask " << ReserveXNACK << '\n'; 340 341 PRINT_FIELD(OS, ".amdhsa_float_round_mode_32", KD, 342 compute_pgm_rsrc1, 343 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32); 344 PRINT_FIELD(OS, ".amdhsa_float_round_mode_16_64", KD, 345 compute_pgm_rsrc1, 346 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64); 347 PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_32", KD, 348 compute_pgm_rsrc1, 349 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32); 350 PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_16_64", KD, 351 compute_pgm_rsrc1, 352 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64); 353 PRINT_FIELD(OS, ".amdhsa_dx10_clamp", KD, 354 compute_pgm_rsrc1, 355 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP); 356 PRINT_FIELD(OS, ".amdhsa_ieee_mode", KD, 357 compute_pgm_rsrc1, 358 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE); 359 if (IVersion.Major >= 9) 360 PRINT_FIELD(OS, ".amdhsa_fp16_overflow", KD, 361 compute_pgm_rsrc1, 362 amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL); 363 if (IVersion.Major >= 10) { 364 PRINT_FIELD(OS, ".amdhsa_workgroup_processor_mode", KD, 365 compute_pgm_rsrc1, 366 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE); 367 PRINT_FIELD(OS, ".amdhsa_memory_ordered", KD, 368 compute_pgm_rsrc1, 369 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED); 370 PRINT_FIELD(OS, ".amdhsa_forward_progress", KD, 371 compute_pgm_rsrc1, 372 amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS); 373 } 374 PRINT_FIELD( 375 OS, ".amdhsa_exception_fp_ieee_invalid_op", KD, 376 compute_pgm_rsrc2, 377 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION); 378 PRINT_FIELD(OS, ".amdhsa_exception_fp_denorm_src", KD, 379 compute_pgm_rsrc2, 380 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE); 381 PRINT_FIELD( 382 OS, ".amdhsa_exception_fp_ieee_div_zero", KD, 383 compute_pgm_rsrc2, 384 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO); 385 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_overflow", KD, 386 compute_pgm_rsrc2, 387 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW); 388 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_underflow", KD, 389 compute_pgm_rsrc2, 390 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW); 391 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_inexact", KD, 392 compute_pgm_rsrc2, 393 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT); 394 PRINT_FIELD(OS, ".amdhsa_exception_int_div_zero", KD, 395 compute_pgm_rsrc2, 396 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO); 397 #undef PRINT_FIELD 398 399 OS << "\t.end_amdhsa_kernel\n"; 400 } 401 402 //===----------------------------------------------------------------------===// 403 // AMDGPUTargetELFStreamer 404 //===----------------------------------------------------------------------===// 405 406 AMDGPUTargetELFStreamer::AMDGPUTargetELFStreamer(MCStreamer &S, 407 const MCSubtargetInfo &STI) 408 : AMDGPUTargetStreamer(S), Streamer(S), Os(STI.getTargetTriple().getOS()) { 409 MCAssembler &MCA = getStreamer().getAssembler(); 410 unsigned EFlags = MCA.getELFHeaderEFlags(); 411 412 EFlags &= ~ELF::EF_AMDGPU_MACH; 413 EFlags |= getElfMach(STI.getCPU()); 414 415 EFlags &= ~ELF::EF_AMDGPU_XNACK; 416 if (AMDGPU::hasXNACK(STI)) 417 EFlags |= ELF::EF_AMDGPU_XNACK; 418 419 EFlags &= ~ELF::EF_AMDGPU_SRAM_ECC; 420 if (AMDGPU::hasSRAMECC(STI)) 421 EFlags |= ELF::EF_AMDGPU_SRAM_ECC; 422 423 MCA.setELFHeaderEFlags(EFlags); 424 } 425 426 MCELFStreamer &AMDGPUTargetELFStreamer::getStreamer() { 427 return static_cast<MCELFStreamer &>(Streamer); 428 } 429 430 // A hook for emitting stuff at the end. 431 // We use it for emitting the accumulated PAL metadata as a .note record. 432 // The PAL metadata is reset after it is emitted. 433 void AMDGPUTargetELFStreamer::finish() { 434 std::string Blob; 435 const char *Vendor = getPALMetadata()->getVendor(); 436 unsigned Type = getPALMetadata()->getType(); 437 getPALMetadata()->toBlob(Type, Blob); 438 if (Blob.empty()) 439 return; 440 EmitNote(Vendor, MCConstantExpr::create(Blob.size(), getContext()), Type, 441 [&](MCELFStreamer &OS) { OS.emitBytes(Blob); }); 442 443 // Reset the pal metadata so its data will not affect a compilation that 444 // reuses this object. 445 getPALMetadata()->reset(); 446 } 447 448 void AMDGPUTargetELFStreamer::EmitNote( 449 StringRef Name, const MCExpr *DescSZ, unsigned NoteType, 450 function_ref<void(MCELFStreamer &)> EmitDesc) { 451 auto &S = getStreamer(); 452 auto &Context = S.getContext(); 453 454 auto NameSZ = Name.size() + 1; 455 456 unsigned NoteFlags = 0; 457 // TODO Apparently, this is currently needed for OpenCL as mentioned in 458 // https://reviews.llvm.org/D74995 459 if (Os == Triple::AMDHSA) 460 NoteFlags = ELF::SHF_ALLOC; 461 462 S.PushSection(); 463 S.SwitchSection( 464 Context.getELFSection(ElfNote::SectionName, ELF::SHT_NOTE, NoteFlags)); 465 S.emitInt32(NameSZ); // namesz 466 S.emitValue(DescSZ, 4); // descz 467 S.emitInt32(NoteType); // type 468 S.emitBytes(Name); // name 469 S.emitValueToAlignment(4, 0, 1, 0); // padding 0 470 EmitDesc(S); // desc 471 S.emitValueToAlignment(4, 0, 1, 0); // padding 0 472 S.PopSection(); 473 } 474 475 void AMDGPUTargetELFStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) {} 476 477 void AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectVersion( 478 uint32_t Major, uint32_t Minor) { 479 480 EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(8, getContext()), 481 ElfNote::NT_AMDGPU_HSA_CODE_OBJECT_VERSION, [&](MCELFStreamer &OS) { 482 OS.emitInt32(Major); 483 OS.emitInt32(Minor); 484 }); 485 } 486 487 void 488 AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major, 489 uint32_t Minor, 490 uint32_t Stepping, 491 StringRef VendorName, 492 StringRef ArchName) { 493 uint16_t VendorNameSize = VendorName.size() + 1; 494 uint16_t ArchNameSize = ArchName.size() + 1; 495 496 unsigned DescSZ = sizeof(VendorNameSize) + sizeof(ArchNameSize) + 497 sizeof(Major) + sizeof(Minor) + sizeof(Stepping) + 498 VendorNameSize + ArchNameSize; 499 500 EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(DescSZ, getContext()), 501 ElfNote::NT_AMDGPU_HSA_ISA, [&](MCELFStreamer &OS) { 502 OS.emitInt16(VendorNameSize); 503 OS.emitInt16(ArchNameSize); 504 OS.emitInt32(Major); 505 OS.emitInt32(Minor); 506 OS.emitInt32(Stepping); 507 OS.emitBytes(VendorName); 508 OS.emitInt8(0); // NULL terminate VendorName 509 OS.emitBytes(ArchName); 510 OS.emitInt8(0); // NULL terminte ArchName 511 }); 512 } 513 514 void 515 AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) { 516 517 MCStreamer &OS = getStreamer(); 518 OS.PushSection(); 519 OS.emitBytes(StringRef((const char*)&Header, sizeof(Header))); 520 OS.PopSection(); 521 } 522 523 void AMDGPUTargetELFStreamer::EmitAMDGPUSymbolType(StringRef SymbolName, 524 unsigned Type) { 525 MCSymbolELF *Symbol = cast<MCSymbolELF>( 526 getStreamer().getContext().getOrCreateSymbol(SymbolName)); 527 Symbol->setType(Type); 528 } 529 530 void AMDGPUTargetELFStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size, 531 Align Alignment) { 532 MCSymbolELF *SymbolELF = cast<MCSymbolELF>(Symbol); 533 SymbolELF->setType(ELF::STT_OBJECT); 534 535 if (!SymbolELF->isBindingSet()) { 536 SymbolELF->setBinding(ELF::STB_GLOBAL); 537 SymbolELF->setExternal(true); 538 } 539 540 if (SymbolELF->declareCommon(Size, Alignment.value(), true)) { 541 report_fatal_error("Symbol: " + Symbol->getName() + 542 " redeclared as different type"); 543 } 544 545 SymbolELF->setIndex(ELF::SHN_AMDGPU_LDS); 546 SymbolELF->setSize(MCConstantExpr::create(Size, getContext())); 547 } 548 549 bool AMDGPUTargetELFStreamer::EmitISAVersion(StringRef IsaVersionString) { 550 // Create two labels to mark the beginning and end of the desc field 551 // and a MCExpr to calculate the size of the desc field. 552 auto &Context = getContext(); 553 auto *DescBegin = Context.createTempSymbol(); 554 auto *DescEnd = Context.createTempSymbol(); 555 auto *DescSZ = MCBinaryExpr::createSub( 556 MCSymbolRefExpr::create(DescEnd, Context), 557 MCSymbolRefExpr::create(DescBegin, Context), Context); 558 559 EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_ISA, 560 [&](MCELFStreamer &OS) { 561 OS.emitLabel(DescBegin); 562 OS.emitBytes(IsaVersionString); 563 OS.emitLabel(DescEnd); 564 }); 565 return true; 566 } 567 568 bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc, 569 bool Strict) { 570 HSAMD::V3::MetadataVerifier Verifier(Strict); 571 if (!Verifier.verify(HSAMetadataDoc.getRoot())) 572 return false; 573 574 std::string HSAMetadataString; 575 HSAMetadataDoc.writeToBlob(HSAMetadataString); 576 577 // Create two labels to mark the beginning and end of the desc field 578 // and a MCExpr to calculate the size of the desc field. 579 auto &Context = getContext(); 580 auto *DescBegin = Context.createTempSymbol(); 581 auto *DescEnd = Context.createTempSymbol(); 582 auto *DescSZ = MCBinaryExpr::createSub( 583 MCSymbolRefExpr::create(DescEnd, Context), 584 MCSymbolRefExpr::create(DescBegin, Context), Context); 585 586 EmitNote(ElfNote::NoteNameV3, DescSZ, ELF::NT_AMDGPU_METADATA, 587 [&](MCELFStreamer &OS) { 588 OS.emitLabel(DescBegin); 589 OS.emitBytes(HSAMetadataString); 590 OS.emitLabel(DescEnd); 591 }); 592 return true; 593 } 594 595 bool AMDGPUTargetELFStreamer::EmitHSAMetadata( 596 const AMDGPU::HSAMD::Metadata &HSAMetadata) { 597 std::string HSAMetadataString; 598 if (HSAMD::toString(HSAMetadata, HSAMetadataString)) 599 return false; 600 601 // Create two labels to mark the beginning and end of the desc field 602 // and a MCExpr to calculate the size of the desc field. 603 auto &Context = getContext(); 604 auto *DescBegin = Context.createTempSymbol(); 605 auto *DescEnd = Context.createTempSymbol(); 606 auto *DescSZ = MCBinaryExpr::createSub( 607 MCSymbolRefExpr::create(DescEnd, Context), 608 MCSymbolRefExpr::create(DescBegin, Context), Context); 609 610 EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_HSA_METADATA, 611 [&](MCELFStreamer &OS) { 612 OS.emitLabel(DescBegin); 613 OS.emitBytes(HSAMetadataString); 614 OS.emitLabel(DescEnd); 615 }); 616 return true; 617 } 618 619 bool AMDGPUTargetELFStreamer::EmitCodeEnd() { 620 const uint32_t Encoded_s_code_end = 0xbf9f0000; 621 622 MCStreamer &OS = getStreamer(); 623 OS.PushSection(); 624 OS.emitValueToAlignment(64, Encoded_s_code_end, 4); 625 for (unsigned I = 0; I < 48; ++I) 626 OS.emitInt32(Encoded_s_code_end); 627 OS.PopSection(); 628 return true; 629 } 630 631 void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor( 632 const MCSubtargetInfo &STI, StringRef KernelName, 633 const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR, 634 uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr, 635 bool ReserveXNACK) { 636 auto &Streamer = getStreamer(); 637 auto &Context = Streamer.getContext(); 638 639 MCSymbolELF *KernelCodeSymbol = cast<MCSymbolELF>( 640 Context.getOrCreateSymbol(Twine(KernelName))); 641 MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>( 642 Context.getOrCreateSymbol(Twine(KernelName) + Twine(".kd"))); 643 644 // Copy kernel descriptor symbol's binding, other and visibility from the 645 // kernel code symbol. 646 KernelDescriptorSymbol->setBinding(KernelCodeSymbol->getBinding()); 647 KernelDescriptorSymbol->setOther(KernelCodeSymbol->getOther()); 648 KernelDescriptorSymbol->setVisibility(KernelCodeSymbol->getVisibility()); 649 // Kernel descriptor symbol's type and size are fixed. 650 KernelDescriptorSymbol->setType(ELF::STT_OBJECT); 651 KernelDescriptorSymbol->setSize( 652 MCConstantExpr::create(sizeof(KernelDescriptor), Context)); 653 654 // The visibility of the kernel code symbol must be protected or less to allow 655 // static relocations from the kernel descriptor to be used. 656 if (KernelCodeSymbol->getVisibility() == ELF::STV_DEFAULT) 657 KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED); 658 659 Streamer.emitLabel(KernelDescriptorSymbol); 660 Streamer.emitInt32(KernelDescriptor.group_segment_fixed_size); 661 Streamer.emitInt32(KernelDescriptor.private_segment_fixed_size); 662 for (uint8_t Res : KernelDescriptor.reserved0) 663 Streamer.emitInt8(Res); 664 // FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The 665 // expression being created is: 666 // (start of kernel code) - (start of kernel descriptor) 667 // It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64. 668 Streamer.emitValue(MCBinaryExpr::createSub( 669 MCSymbolRefExpr::create( 670 KernelCodeSymbol, MCSymbolRefExpr::VK_AMDGPU_REL64, Context), 671 MCSymbolRefExpr::create( 672 KernelDescriptorSymbol, MCSymbolRefExpr::VK_None, Context), 673 Context), 674 sizeof(KernelDescriptor.kernel_code_entry_byte_offset)); 675 for (uint8_t Res : KernelDescriptor.reserved1) 676 Streamer.emitInt8(Res); 677 Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc3); 678 Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc1); 679 Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc2); 680 Streamer.emitInt16(KernelDescriptor.kernel_code_properties); 681 for (uint8_t Res : KernelDescriptor.reserved2) 682 Streamer.emitInt8(Res); 683 } 684