1 //===-- AMDGPUTargetStreamer.cpp - Mips Target Streamer Methods -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides AMDGPU specific target streamer methods. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "AMDGPUTargetStreamer.h" 14 #include "AMDGPU.h" 15 #include "SIDefines.h" 16 #include "Utils/AMDGPUBaseInfo.h" 17 #include "Utils/AMDKernelCodeTUtils.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h" 20 #include "llvm/BinaryFormat/ELF.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/Metadata.h" 24 #include "llvm/IR/Module.h" 25 #include "llvm/MC/MCContext.h" 26 #include "llvm/MC/MCELFStreamer.h" 27 #include "llvm/MC/MCObjectFileInfo.h" 28 #include "llvm/MC/MCSectionELF.h" 29 #include "llvm/Support/FormattedStream.h" 30 #include "llvm/Support/TargetParser.h" 31 32 namespace llvm { 33 #include "AMDGPUPTNote.h" 34 } 35 36 using namespace llvm; 37 using namespace llvm::AMDGPU; 38 using namespace llvm::AMDGPU::HSAMD; 39 40 //===----------------------------------------------------------------------===// 41 // AMDGPUTargetStreamer 42 //===----------------------------------------------------------------------===// 43 44 bool AMDGPUTargetStreamer::EmitHSAMetadataV2(StringRef HSAMetadataString) { 45 HSAMD::Metadata HSAMetadata; 46 if (HSAMD::fromString(std::string(HSAMetadataString), HSAMetadata)) 47 return false; 48 49 return EmitHSAMetadata(HSAMetadata); 50 } 51 52 bool AMDGPUTargetStreamer::EmitHSAMetadataV3(StringRef HSAMetadataString) { 53 msgpack::Document HSAMetadataDoc; 54 if (!HSAMetadataDoc.fromYAML(HSAMetadataString)) 55 return false; 56 return EmitHSAMetadata(HSAMetadataDoc, false); 57 } 58 59 StringRef AMDGPUTargetStreamer::getArchNameFromElfMach(unsigned ElfMach) { 60 AMDGPU::GPUKind AK; 61 62 switch (ElfMach) { 63 default: llvm_unreachable("Unhandled ELF::EF_AMDGPU type"); 64 case ELF::EF_AMDGPU_MACH_R600_R600: AK = GK_R600; break; 65 case ELF::EF_AMDGPU_MACH_R600_R630: AK = GK_R630; break; 66 case ELF::EF_AMDGPU_MACH_R600_RS880: AK = GK_RS880; break; 67 case ELF::EF_AMDGPU_MACH_R600_RV670: AK = GK_RV670; break; 68 case ELF::EF_AMDGPU_MACH_R600_RV710: AK = GK_RV710; break; 69 case ELF::EF_AMDGPU_MACH_R600_RV730: AK = GK_RV730; break; 70 case ELF::EF_AMDGPU_MACH_R600_RV770: AK = GK_RV770; break; 71 case ELF::EF_AMDGPU_MACH_R600_CEDAR: AK = GK_CEDAR; break; 72 case ELF::EF_AMDGPU_MACH_R600_CYPRESS: AK = GK_CYPRESS; break; 73 case ELF::EF_AMDGPU_MACH_R600_JUNIPER: AK = GK_JUNIPER; break; 74 case ELF::EF_AMDGPU_MACH_R600_REDWOOD: AK = GK_REDWOOD; break; 75 case ELF::EF_AMDGPU_MACH_R600_SUMO: AK = GK_SUMO; break; 76 case ELF::EF_AMDGPU_MACH_R600_BARTS: AK = GK_BARTS; break; 77 case ELF::EF_AMDGPU_MACH_R600_CAICOS: AK = GK_CAICOS; break; 78 case ELF::EF_AMDGPU_MACH_R600_CAYMAN: AK = GK_CAYMAN; break; 79 case ELF::EF_AMDGPU_MACH_R600_TURKS: AK = GK_TURKS; break; 80 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX600: AK = GK_GFX600; break; 81 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX601: AK = GK_GFX601; break; 82 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX700: AK = GK_GFX700; break; 83 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX701: AK = GK_GFX701; break; 84 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX702: AK = GK_GFX702; break; 85 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX703: AK = GK_GFX703; break; 86 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX704: AK = GK_GFX704; break; 87 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX801: AK = GK_GFX801; break; 88 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX802: AK = GK_GFX802; break; 89 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX803: AK = GK_GFX803; break; 90 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX810: AK = GK_GFX810; break; 91 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX900: AK = GK_GFX900; break; 92 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX902: AK = GK_GFX902; break; 93 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX904: AK = GK_GFX904; break; 94 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX906: AK = GK_GFX906; break; 95 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX908: AK = GK_GFX908; break; 96 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX909: AK = GK_GFX909; break; 97 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010: AK = GK_GFX1010; break; 98 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011: AK = GK_GFX1011; break; 99 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012: AK = GK_GFX1012; break; 100 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030: AK = GK_GFX1030; break; 101 case ELF::EF_AMDGPU_MACH_NONE: AK = GK_NONE; break; 102 } 103 104 StringRef GPUName = getArchNameAMDGCN(AK); 105 if (GPUName != "") 106 return GPUName; 107 return getArchNameR600(AK); 108 } 109 110 unsigned AMDGPUTargetStreamer::getElfMach(StringRef GPU) { 111 AMDGPU::GPUKind AK = parseArchAMDGCN(GPU); 112 if (AK == AMDGPU::GPUKind::GK_NONE) 113 AK = parseArchR600(GPU); 114 115 switch (AK) { 116 case GK_R600: return ELF::EF_AMDGPU_MACH_R600_R600; 117 case GK_R630: return ELF::EF_AMDGPU_MACH_R600_R630; 118 case GK_RS880: return ELF::EF_AMDGPU_MACH_R600_RS880; 119 case GK_RV670: return ELF::EF_AMDGPU_MACH_R600_RV670; 120 case GK_RV710: return ELF::EF_AMDGPU_MACH_R600_RV710; 121 case GK_RV730: return ELF::EF_AMDGPU_MACH_R600_RV730; 122 case GK_RV770: return ELF::EF_AMDGPU_MACH_R600_RV770; 123 case GK_CEDAR: return ELF::EF_AMDGPU_MACH_R600_CEDAR; 124 case GK_CYPRESS: return ELF::EF_AMDGPU_MACH_R600_CYPRESS; 125 case GK_JUNIPER: return ELF::EF_AMDGPU_MACH_R600_JUNIPER; 126 case GK_REDWOOD: return ELF::EF_AMDGPU_MACH_R600_REDWOOD; 127 case GK_SUMO: return ELF::EF_AMDGPU_MACH_R600_SUMO; 128 case GK_BARTS: return ELF::EF_AMDGPU_MACH_R600_BARTS; 129 case GK_CAICOS: return ELF::EF_AMDGPU_MACH_R600_CAICOS; 130 case GK_CAYMAN: return ELF::EF_AMDGPU_MACH_R600_CAYMAN; 131 case GK_TURKS: return ELF::EF_AMDGPU_MACH_R600_TURKS; 132 case GK_GFX600: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX600; 133 case GK_GFX601: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX601; 134 case GK_GFX700: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX700; 135 case GK_GFX701: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX701; 136 case GK_GFX702: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX702; 137 case GK_GFX703: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX703; 138 case GK_GFX704: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX704; 139 case GK_GFX801: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX801; 140 case GK_GFX802: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX802; 141 case GK_GFX803: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX803; 142 case GK_GFX810: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX810; 143 case GK_GFX900: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX900; 144 case GK_GFX902: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX902; 145 case GK_GFX904: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX904; 146 case GK_GFX906: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX906; 147 case GK_GFX908: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX908; 148 case GK_GFX909: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX909; 149 case GK_GFX1010: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010; 150 case GK_GFX1011: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011; 151 case GK_GFX1012: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012; 152 case GK_GFX1030: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030; 153 case GK_NONE: return ELF::EF_AMDGPU_MACH_NONE; 154 } 155 156 llvm_unreachable("unknown GPU"); 157 } 158 159 //===----------------------------------------------------------------------===// 160 // AMDGPUTargetAsmStreamer 161 //===----------------------------------------------------------------------===// 162 163 AMDGPUTargetAsmStreamer::AMDGPUTargetAsmStreamer(MCStreamer &S, 164 formatted_raw_ostream &OS) 165 : AMDGPUTargetStreamer(S), OS(OS) { } 166 167 // A hook for emitting stuff at the end. 168 // We use it for emitting the accumulated PAL metadata as directives. 169 void AMDGPUTargetAsmStreamer::finish() { 170 std::string S; 171 getPALMetadata()->toString(S); 172 OS << S; 173 } 174 175 void AMDGPUTargetAsmStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) { 176 OS << "\t.amdgcn_target \"" << Target << "\"\n"; 177 } 178 179 void AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectVersion( 180 uint32_t Major, uint32_t Minor) { 181 OS << "\t.hsa_code_object_version " << 182 Twine(Major) << "," << Twine(Minor) << '\n'; 183 } 184 185 void 186 AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major, 187 uint32_t Minor, 188 uint32_t Stepping, 189 StringRef VendorName, 190 StringRef ArchName) { 191 OS << "\t.hsa_code_object_isa " << 192 Twine(Major) << "," << Twine(Minor) << "," << Twine(Stepping) << 193 ",\"" << VendorName << "\",\"" << ArchName << "\"\n"; 194 195 } 196 197 void 198 AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) { 199 OS << "\t.amd_kernel_code_t\n"; 200 dumpAmdKernelCode(&Header, OS, "\t\t"); 201 OS << "\t.end_amd_kernel_code_t\n"; 202 } 203 204 void AMDGPUTargetAsmStreamer::EmitAMDGPUSymbolType(StringRef SymbolName, 205 unsigned Type) { 206 switch (Type) { 207 default: llvm_unreachable("Invalid AMDGPU symbol type"); 208 case ELF::STT_AMDGPU_HSA_KERNEL: 209 OS << "\t.amdgpu_hsa_kernel " << SymbolName << '\n' ; 210 break; 211 } 212 } 213 214 void AMDGPUTargetAsmStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size, 215 Align Alignment) { 216 OS << "\t.amdgpu_lds " << Symbol->getName() << ", " << Size << ", " 217 << Alignment.value() << '\n'; 218 } 219 220 bool AMDGPUTargetAsmStreamer::EmitISAVersion(StringRef IsaVersionString) { 221 OS << "\t.amd_amdgpu_isa \"" << IsaVersionString << "\"\n"; 222 return true; 223 } 224 225 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata( 226 const AMDGPU::HSAMD::Metadata &HSAMetadata) { 227 std::string HSAMetadataString; 228 if (HSAMD::toString(HSAMetadata, HSAMetadataString)) 229 return false; 230 231 OS << '\t' << AssemblerDirectiveBegin << '\n'; 232 OS << HSAMetadataString << '\n'; 233 OS << '\t' << AssemblerDirectiveEnd << '\n'; 234 return true; 235 } 236 237 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata( 238 msgpack::Document &HSAMetadataDoc, bool Strict) { 239 V3::MetadataVerifier Verifier(Strict); 240 if (!Verifier.verify(HSAMetadataDoc.getRoot())) 241 return false; 242 243 std::string HSAMetadataString; 244 raw_string_ostream StrOS(HSAMetadataString); 245 HSAMetadataDoc.toYAML(StrOS); 246 247 OS << '\t' << V3::AssemblerDirectiveBegin << '\n'; 248 OS << StrOS.str() << '\n'; 249 OS << '\t' << V3::AssemblerDirectiveEnd << '\n'; 250 return true; 251 } 252 253 bool AMDGPUTargetAsmStreamer::EmitCodeEnd() { 254 const uint32_t Encoded_s_code_end = 0xbf9f0000; 255 OS << "\t.p2alignl 6, " << Encoded_s_code_end << '\n'; 256 OS << "\t.fill 48, 4, " << Encoded_s_code_end << '\n'; 257 return true; 258 } 259 260 void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor( 261 const MCSubtargetInfo &STI, StringRef KernelName, 262 const amdhsa::kernel_descriptor_t &KD, uint64_t NextVGPR, uint64_t NextSGPR, 263 bool ReserveVCC, bool ReserveFlatScr, bool ReserveXNACK) { 264 IsaVersion IVersion = getIsaVersion(STI.getCPU()); 265 266 OS << "\t.amdhsa_kernel " << KernelName << '\n'; 267 268 #define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME) \ 269 STREAM << "\t\t" << DIRECTIVE << " " \ 270 << AMDHSA_BITS_GET(KERNEL_DESC.MEMBER_NAME, FIELD_NAME) << '\n'; 271 272 OS << "\t\t.amdhsa_group_segment_fixed_size " << KD.group_segment_fixed_size 273 << '\n'; 274 OS << "\t\t.amdhsa_private_segment_fixed_size " 275 << KD.private_segment_fixed_size << '\n'; 276 277 PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_buffer", KD, 278 kernel_code_properties, 279 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER); 280 PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_ptr", KD, 281 kernel_code_properties, 282 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR); 283 PRINT_FIELD(OS, ".amdhsa_user_sgpr_queue_ptr", KD, 284 kernel_code_properties, 285 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR); 286 PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_segment_ptr", KD, 287 kernel_code_properties, 288 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR); 289 PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_id", KD, 290 kernel_code_properties, 291 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID); 292 PRINT_FIELD(OS, ".amdhsa_user_sgpr_flat_scratch_init", KD, 293 kernel_code_properties, 294 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT); 295 PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_size", KD, 296 kernel_code_properties, 297 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE); 298 if (IVersion.Major >= 10) 299 PRINT_FIELD(OS, ".amdhsa_wavefront_size32", KD, 300 kernel_code_properties, 301 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32); 302 PRINT_FIELD( 303 OS, ".amdhsa_system_sgpr_private_segment_wavefront_offset", KD, 304 compute_pgm_rsrc2, 305 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET); 306 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_x", KD, 307 compute_pgm_rsrc2, 308 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X); 309 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_y", KD, 310 compute_pgm_rsrc2, 311 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y); 312 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_z", KD, 313 compute_pgm_rsrc2, 314 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z); 315 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_info", KD, 316 compute_pgm_rsrc2, 317 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO); 318 PRINT_FIELD(OS, ".amdhsa_system_vgpr_workitem_id", KD, 319 compute_pgm_rsrc2, 320 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID); 321 322 // These directives are required. 323 OS << "\t\t.amdhsa_next_free_vgpr " << NextVGPR << '\n'; 324 OS << "\t\t.amdhsa_next_free_sgpr " << NextSGPR << '\n'; 325 326 if (!ReserveVCC) 327 OS << "\t\t.amdhsa_reserve_vcc " << ReserveVCC << '\n'; 328 if (IVersion.Major >= 7 && !ReserveFlatScr) 329 OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n'; 330 if (IVersion.Major >= 8 && ReserveXNACK != hasXNACK(STI)) 331 OS << "\t\t.amdhsa_reserve_xnack_mask " << ReserveXNACK << '\n'; 332 333 PRINT_FIELD(OS, ".amdhsa_float_round_mode_32", KD, 334 compute_pgm_rsrc1, 335 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32); 336 PRINT_FIELD(OS, ".amdhsa_float_round_mode_16_64", KD, 337 compute_pgm_rsrc1, 338 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64); 339 PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_32", KD, 340 compute_pgm_rsrc1, 341 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32); 342 PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_16_64", KD, 343 compute_pgm_rsrc1, 344 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64); 345 PRINT_FIELD(OS, ".amdhsa_dx10_clamp", KD, 346 compute_pgm_rsrc1, 347 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP); 348 PRINT_FIELD(OS, ".amdhsa_ieee_mode", KD, 349 compute_pgm_rsrc1, 350 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE); 351 if (IVersion.Major >= 9) 352 PRINT_FIELD(OS, ".amdhsa_fp16_overflow", KD, 353 compute_pgm_rsrc1, 354 amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL); 355 if (IVersion.Major >= 10) { 356 PRINT_FIELD(OS, ".amdhsa_workgroup_processor_mode", KD, 357 compute_pgm_rsrc1, 358 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE); 359 PRINT_FIELD(OS, ".amdhsa_memory_ordered", KD, 360 compute_pgm_rsrc1, 361 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED); 362 PRINT_FIELD(OS, ".amdhsa_forward_progress", KD, 363 compute_pgm_rsrc1, 364 amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS); 365 } 366 PRINT_FIELD( 367 OS, ".amdhsa_exception_fp_ieee_invalid_op", KD, 368 compute_pgm_rsrc2, 369 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION); 370 PRINT_FIELD(OS, ".amdhsa_exception_fp_denorm_src", KD, 371 compute_pgm_rsrc2, 372 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE); 373 PRINT_FIELD( 374 OS, ".amdhsa_exception_fp_ieee_div_zero", KD, 375 compute_pgm_rsrc2, 376 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO); 377 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_overflow", KD, 378 compute_pgm_rsrc2, 379 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW); 380 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_underflow", KD, 381 compute_pgm_rsrc2, 382 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW); 383 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_inexact", KD, 384 compute_pgm_rsrc2, 385 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT); 386 PRINT_FIELD(OS, ".amdhsa_exception_int_div_zero", KD, 387 compute_pgm_rsrc2, 388 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO); 389 #undef PRINT_FIELD 390 391 OS << "\t.end_amdhsa_kernel\n"; 392 } 393 394 //===----------------------------------------------------------------------===// 395 // AMDGPUTargetELFStreamer 396 //===----------------------------------------------------------------------===// 397 398 AMDGPUTargetELFStreamer::AMDGPUTargetELFStreamer(MCStreamer &S, 399 const MCSubtargetInfo &STI) 400 : AMDGPUTargetStreamer(S), Streamer(S), Os(STI.getTargetTriple().getOS()) { 401 MCAssembler &MCA = getStreamer().getAssembler(); 402 unsigned EFlags = MCA.getELFHeaderEFlags(); 403 404 EFlags &= ~ELF::EF_AMDGPU_MACH; 405 EFlags |= getElfMach(STI.getCPU()); 406 407 EFlags &= ~ELF::EF_AMDGPU_XNACK; 408 if (AMDGPU::hasXNACK(STI)) 409 EFlags |= ELF::EF_AMDGPU_XNACK; 410 411 EFlags &= ~ELF::EF_AMDGPU_SRAM_ECC; 412 if (AMDGPU::hasSRAMECC(STI)) 413 EFlags |= ELF::EF_AMDGPU_SRAM_ECC; 414 415 MCA.setELFHeaderEFlags(EFlags); 416 } 417 418 MCELFStreamer &AMDGPUTargetELFStreamer::getStreamer() { 419 return static_cast<MCELFStreamer &>(Streamer); 420 } 421 422 // A hook for emitting stuff at the end. 423 // We use it for emitting the accumulated PAL metadata as a .note record. 424 void AMDGPUTargetELFStreamer::finish() { 425 std::string Blob; 426 const char *Vendor = getPALMetadata()->getVendor(); 427 unsigned Type = getPALMetadata()->getType(); 428 getPALMetadata()->toBlob(Type, Blob); 429 if (Blob.empty()) 430 return; 431 EmitNote(Vendor, MCConstantExpr::create(Blob.size(), getContext()), Type, 432 [&](MCELFStreamer &OS) { OS.emitBytes(Blob); }); 433 } 434 435 void AMDGPUTargetELFStreamer::EmitNote( 436 StringRef Name, const MCExpr *DescSZ, unsigned NoteType, 437 function_ref<void(MCELFStreamer &)> EmitDesc) { 438 auto &S = getStreamer(); 439 auto &Context = S.getContext(); 440 441 auto NameSZ = Name.size() + 1; 442 443 unsigned NoteFlags = 0; 444 // TODO Apparently, this is currently needed for OpenCL as mentioned in 445 // https://reviews.llvm.org/D74995 446 if (Os == Triple::AMDHSA) 447 NoteFlags = ELF::SHF_ALLOC; 448 449 S.PushSection(); 450 S.SwitchSection( 451 Context.getELFSection(ElfNote::SectionName, ELF::SHT_NOTE, NoteFlags)); 452 S.emitInt32(NameSZ); // namesz 453 S.emitValue(DescSZ, 4); // descz 454 S.emitInt32(NoteType); // type 455 S.emitBytes(Name); // name 456 S.emitValueToAlignment(4, 0, 1, 0); // padding 0 457 EmitDesc(S); // desc 458 S.emitValueToAlignment(4, 0, 1, 0); // padding 0 459 S.PopSection(); 460 } 461 462 void AMDGPUTargetELFStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) {} 463 464 void AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectVersion( 465 uint32_t Major, uint32_t Minor) { 466 467 EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(8, getContext()), 468 ElfNote::NT_AMDGPU_HSA_CODE_OBJECT_VERSION, [&](MCELFStreamer &OS) { 469 OS.emitInt32(Major); 470 OS.emitInt32(Minor); 471 }); 472 } 473 474 void 475 AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major, 476 uint32_t Minor, 477 uint32_t Stepping, 478 StringRef VendorName, 479 StringRef ArchName) { 480 uint16_t VendorNameSize = VendorName.size() + 1; 481 uint16_t ArchNameSize = ArchName.size() + 1; 482 483 unsigned DescSZ = sizeof(VendorNameSize) + sizeof(ArchNameSize) + 484 sizeof(Major) + sizeof(Minor) + sizeof(Stepping) + 485 VendorNameSize + ArchNameSize; 486 487 EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(DescSZ, getContext()), 488 ElfNote::NT_AMDGPU_HSA_ISA, [&](MCELFStreamer &OS) { 489 OS.emitInt16(VendorNameSize); 490 OS.emitInt16(ArchNameSize); 491 OS.emitInt32(Major); 492 OS.emitInt32(Minor); 493 OS.emitInt32(Stepping); 494 OS.emitBytes(VendorName); 495 OS.emitInt8(0); // NULL terminate VendorName 496 OS.emitBytes(ArchName); 497 OS.emitInt8(0); // NULL terminte ArchName 498 }); 499 } 500 501 void 502 AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) { 503 504 MCStreamer &OS = getStreamer(); 505 OS.PushSection(); 506 OS.emitBytes(StringRef((const char*)&Header, sizeof(Header))); 507 OS.PopSection(); 508 } 509 510 void AMDGPUTargetELFStreamer::EmitAMDGPUSymbolType(StringRef SymbolName, 511 unsigned Type) { 512 MCSymbolELF *Symbol = cast<MCSymbolELF>( 513 getStreamer().getContext().getOrCreateSymbol(SymbolName)); 514 Symbol->setType(Type); 515 } 516 517 void AMDGPUTargetELFStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size, 518 Align Alignment) { 519 MCSymbolELF *SymbolELF = cast<MCSymbolELF>(Symbol); 520 SymbolELF->setType(ELF::STT_OBJECT); 521 522 if (!SymbolELF->isBindingSet()) { 523 SymbolELF->setBinding(ELF::STB_GLOBAL); 524 SymbolELF->setExternal(true); 525 } 526 527 if (SymbolELF->declareCommon(Size, Alignment.value(), true)) { 528 report_fatal_error("Symbol: " + Symbol->getName() + 529 " redeclared as different type"); 530 } 531 532 SymbolELF->setIndex(ELF::SHN_AMDGPU_LDS); 533 SymbolELF->setSize(MCConstantExpr::create(Size, getContext())); 534 } 535 536 bool AMDGPUTargetELFStreamer::EmitISAVersion(StringRef IsaVersionString) { 537 // Create two labels to mark the beginning and end of the desc field 538 // and a MCExpr to calculate the size of the desc field. 539 auto &Context = getContext(); 540 auto *DescBegin = Context.createTempSymbol(); 541 auto *DescEnd = Context.createTempSymbol(); 542 auto *DescSZ = MCBinaryExpr::createSub( 543 MCSymbolRefExpr::create(DescEnd, Context), 544 MCSymbolRefExpr::create(DescBegin, Context), Context); 545 546 EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_ISA, 547 [&](MCELFStreamer &OS) { 548 OS.emitLabel(DescBegin); 549 OS.emitBytes(IsaVersionString); 550 OS.emitLabel(DescEnd); 551 }); 552 return true; 553 } 554 555 bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc, 556 bool Strict) { 557 V3::MetadataVerifier Verifier(Strict); 558 if (!Verifier.verify(HSAMetadataDoc.getRoot())) 559 return false; 560 561 std::string HSAMetadataString; 562 HSAMetadataDoc.writeToBlob(HSAMetadataString); 563 564 // Create two labels to mark the beginning and end of the desc field 565 // and a MCExpr to calculate the size of the desc field. 566 auto &Context = getContext(); 567 auto *DescBegin = Context.createTempSymbol(); 568 auto *DescEnd = Context.createTempSymbol(); 569 auto *DescSZ = MCBinaryExpr::createSub( 570 MCSymbolRefExpr::create(DescEnd, Context), 571 MCSymbolRefExpr::create(DescBegin, Context), Context); 572 573 EmitNote(ElfNote::NoteNameV3, DescSZ, ELF::NT_AMDGPU_METADATA, 574 [&](MCELFStreamer &OS) { 575 OS.emitLabel(DescBegin); 576 OS.emitBytes(HSAMetadataString); 577 OS.emitLabel(DescEnd); 578 }); 579 return true; 580 } 581 582 bool AMDGPUTargetELFStreamer::EmitHSAMetadata( 583 const AMDGPU::HSAMD::Metadata &HSAMetadata) { 584 std::string HSAMetadataString; 585 if (HSAMD::toString(HSAMetadata, HSAMetadataString)) 586 return false; 587 588 // Create two labels to mark the beginning and end of the desc field 589 // and a MCExpr to calculate the size of the desc field. 590 auto &Context = getContext(); 591 auto *DescBegin = Context.createTempSymbol(); 592 auto *DescEnd = Context.createTempSymbol(); 593 auto *DescSZ = MCBinaryExpr::createSub( 594 MCSymbolRefExpr::create(DescEnd, Context), 595 MCSymbolRefExpr::create(DescBegin, Context), Context); 596 597 EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_HSA_METADATA, 598 [&](MCELFStreamer &OS) { 599 OS.emitLabel(DescBegin); 600 OS.emitBytes(HSAMetadataString); 601 OS.emitLabel(DescEnd); 602 }); 603 return true; 604 } 605 606 bool AMDGPUTargetELFStreamer::EmitCodeEnd() { 607 const uint32_t Encoded_s_code_end = 0xbf9f0000; 608 609 MCStreamer &OS = getStreamer(); 610 OS.PushSection(); 611 OS.emitValueToAlignment(64, Encoded_s_code_end, 4); 612 for (unsigned I = 0; I < 48; ++I) 613 OS.emitInt32(Encoded_s_code_end); 614 OS.PopSection(); 615 return true; 616 } 617 618 void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor( 619 const MCSubtargetInfo &STI, StringRef KernelName, 620 const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR, 621 uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr, 622 bool ReserveXNACK) { 623 auto &Streamer = getStreamer(); 624 auto &Context = Streamer.getContext(); 625 626 MCSymbolELF *KernelCodeSymbol = cast<MCSymbolELF>( 627 Context.getOrCreateSymbol(Twine(KernelName))); 628 MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>( 629 Context.getOrCreateSymbol(Twine(KernelName) + Twine(".kd"))); 630 631 // Copy kernel descriptor symbol's binding, other and visibility from the 632 // kernel code symbol. 633 KernelDescriptorSymbol->setBinding(KernelCodeSymbol->getBinding()); 634 KernelDescriptorSymbol->setOther(KernelCodeSymbol->getOther()); 635 KernelDescriptorSymbol->setVisibility(KernelCodeSymbol->getVisibility()); 636 // Kernel descriptor symbol's type and size are fixed. 637 KernelDescriptorSymbol->setType(ELF::STT_OBJECT); 638 KernelDescriptorSymbol->setSize( 639 MCConstantExpr::create(sizeof(KernelDescriptor), Context)); 640 641 // The visibility of the kernel code symbol must be protected or less to allow 642 // static relocations from the kernel descriptor to be used. 643 if (KernelCodeSymbol->getVisibility() == ELF::STV_DEFAULT) 644 KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED); 645 646 Streamer.emitLabel(KernelDescriptorSymbol); 647 Streamer.emitBytes(StringRef( 648 (const char*)&(KernelDescriptor), 649 offsetof(amdhsa::kernel_descriptor_t, kernel_code_entry_byte_offset))); 650 // FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The 651 // expression being created is: 652 // (start of kernel code) - (start of kernel descriptor) 653 // It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64. 654 Streamer.emitValue(MCBinaryExpr::createSub( 655 MCSymbolRefExpr::create( 656 KernelCodeSymbol, MCSymbolRefExpr::VK_AMDGPU_REL64, Context), 657 MCSymbolRefExpr::create( 658 KernelDescriptorSymbol, MCSymbolRefExpr::VK_None, Context), 659 Context), 660 sizeof(KernelDescriptor.kernel_code_entry_byte_offset)); 661 Streamer.emitBytes(StringRef( 662 (const char*)&(KernelDescriptor) + 663 offsetof(amdhsa::kernel_descriptor_t, kernel_code_entry_byte_offset) + 664 sizeof(KernelDescriptor.kernel_code_entry_byte_offset), 665 sizeof(KernelDescriptor) - 666 offsetof(amdhsa::kernel_descriptor_t, kernel_code_entry_byte_offset) - 667 sizeof(KernelDescriptor.kernel_code_entry_byte_offset))); 668 } 669