1 /* 2 * Copyright 2025 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "imu_v12_1.h" 34 #include "soc_v1_0.h" 35 #include "gfx_v12_1_pkt.h" 36 37 #include "gc/gc_12_1_0_offset.h" 38 #include "gc/gc_12_1_0_sh_mask.h" 39 #include "soc24_enum.h" 40 #include "ivsrcid/gfx/irqsrcs_gfx_12_1_0.h" 41 42 #include "soc15.h" 43 #include "clearstate_gfx12.h" 44 #include "v12_structs.h" 45 #include "gfx_v12_1.h" 46 #include "mes_v12_1.h" 47 48 #define GFX12_MEC_HPD_SIZE 2048 49 #define NUM_SIMD_PER_CU_GFX12_1 4 50 51 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 52 53 MODULE_FIRMWARE("amdgpu/gc_12_1_0_mec.bin"); 54 MODULE_FIRMWARE("amdgpu/gc_12_1_0_rlc.bin"); 55 56 #define SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 0x00000001 57 #define DEFAULT_SH_MEM_CONFIG \ 58 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 59 (SH_MEM_ALIGNMENT_MODE_UNALIGNED_GFX12_1_0 << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 60 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 61 62 #define XCC_REG_RANGE_0_LOW 0x1260 /* XCC gfxdec0 lower Bound */ 63 #define XCC_REG_RANGE_0_HIGH 0x3C00 /* XCC gfxdec0 upper Bound */ 64 #define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */ 65 #define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */ 66 #define NORMALIZE_XCC_REG_OFFSET(offset) \ 67 (offset & 0xFFFF) 68 69 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id); 70 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev); 71 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev); 72 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev); 73 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev); 74 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev); 75 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev, 76 struct amdgpu_cu_info *cu_info); 77 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev); 78 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, 79 u32 sh_num, u32 instance, int xcc_id); 80 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 81 uint32_t val); 82 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 83 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring, 84 uint16_t pasid, uint32_t flush_type, 85 bool all_hub, uint8_t dst_sel); 86 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 87 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 88 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev, 89 bool enable); 90 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev, 91 bool enable, int xcc_id); 92 93 static void gfx_v12_1_kiq_set_resources(struct amdgpu_ring *kiq_ring, 94 uint64_t queue_mask) 95 { 96 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 97 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 98 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 99 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 100 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 101 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 102 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 103 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 104 amdgpu_ring_write(kiq_ring, 0); 105 } 106 107 static void gfx_v12_1_kiq_map_queues(struct amdgpu_ring *kiq_ring, 108 struct amdgpu_ring *ring) 109 { 110 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 111 uint64_t wptr_addr = ring->wptr_gpu_addr; 112 uint32_t me = 0, eng_sel = 0; 113 114 switch (ring->funcs->type) { 115 case AMDGPU_RING_TYPE_COMPUTE: 116 me = 1; 117 eng_sel = 0; 118 break; 119 case AMDGPU_RING_TYPE_MES: 120 me = 2; 121 eng_sel = 5; 122 break; 123 default: 124 WARN_ON(1); 125 } 126 127 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 128 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 129 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 130 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 131 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 132 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 133 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 134 PACKET3_MAP_QUEUES_ME((me)) | 135 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 136 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 137 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 138 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 139 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 140 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 141 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 142 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 143 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 144 } 145 146 static void gfx_v12_1_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 147 struct amdgpu_ring *ring, 148 enum amdgpu_unmap_queues_action action, 149 u64 gpu_addr, u64 seq) 150 { 151 struct amdgpu_device *adev = kiq_ring->adev; 152 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 153 154 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 155 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, 156 seq, kiq_ring->xcc_id); 157 return; 158 } 159 160 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 161 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 162 PACKET3_UNMAP_QUEUES_ACTION(action) | 163 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 164 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 165 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 166 amdgpu_ring_write(kiq_ring, 167 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 168 169 if (action == PREEMPT_QUEUES_NO_UNMAP) { 170 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 171 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 172 amdgpu_ring_write(kiq_ring, seq); 173 } else { 174 amdgpu_ring_write(kiq_ring, 0); 175 amdgpu_ring_write(kiq_ring, 0); 176 amdgpu_ring_write(kiq_ring, 0); 177 } 178 } 179 180 static void gfx_v12_1_kiq_query_status(struct amdgpu_ring *kiq_ring, 181 struct amdgpu_ring *ring, 182 u64 addr, u64 seq) 183 { 184 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 185 186 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 187 amdgpu_ring_write(kiq_ring, 188 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 189 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 190 PACKET3_QUERY_STATUS_COMMAND(2)); 191 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 192 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 193 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 194 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 195 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 196 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 197 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 198 } 199 200 static void gfx_v12_1_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 201 uint16_t pasid, 202 uint32_t flush_type, 203 bool all_hub) 204 { 205 gfx_v12_1_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 206 } 207 208 static const struct kiq_pm4_funcs gfx_v12_1_kiq_pm4_funcs = { 209 .kiq_set_resources = gfx_v12_1_kiq_set_resources, 210 .kiq_map_queues = gfx_v12_1_kiq_map_queues, 211 .kiq_unmap_queues = gfx_v12_1_kiq_unmap_queues, 212 .kiq_query_status = gfx_v12_1_kiq_query_status, 213 .kiq_invalidate_tlbs = gfx_v12_1_kiq_invalidate_tlbs, 214 .set_resources_size = 8, 215 .map_queues_size = 7, 216 .unmap_queues_size = 6, 217 .query_status_size = 7, 218 .invalidate_tlbs_size = 2, 219 }; 220 221 static void gfx_v12_1_set_kiq_pm4_funcs(struct amdgpu_device *adev) 222 { 223 int i, num_xcc; 224 225 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 226 for (i =0; i < num_xcc; i++) 227 adev->gfx.kiq[i].pmf = &gfx_v12_1_kiq_pm4_funcs; 228 } 229 230 static uint32_t gfx_v12_1_normalize_xcc_reg_offset(uint32_t reg) 231 { 232 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg); 233 234 /* If it is an XCC reg, normalize the reg to keep 235 lower 16 bits in local xcc */ 236 237 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) || 238 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH))) 239 return normalized_reg; 240 else 241 return reg; 242 } 243 244 static void gfx_v12_1_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 245 int mem_space, int opt, uint32_t addr0, 246 uint32_t addr1, uint32_t ref, 247 uint32_t mask, uint32_t inv) 248 { 249 if (mem_space == 0) { 250 addr0 = gfx_v12_1_normalize_xcc_reg_offset(addr0); 251 addr1 = gfx_v12_1_normalize_xcc_reg_offset(addr1); 252 } 253 254 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 255 amdgpu_ring_write(ring, 256 /* memory (1) or register (0) */ 257 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 258 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 259 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 260 WAIT_REG_MEM_ENGINE(eng_sel))); 261 262 if (mem_space) 263 BUG_ON(addr0 & 0x3); /* Dword align */ 264 amdgpu_ring_write(ring, addr0); 265 amdgpu_ring_write(ring, addr1); 266 amdgpu_ring_write(ring, ref); 267 amdgpu_ring_write(ring, mask); 268 amdgpu_ring_write(ring, inv); /* poll interval */ 269 } 270 271 static int gfx_v12_1_ring_test_ring(struct amdgpu_ring *ring) 272 { 273 struct amdgpu_device *adev = ring->adev; 274 uint32_t scratch_reg0_offset, xcc_offset; 275 uint32_t tmp = 0; 276 unsigned i; 277 int r; 278 279 /* Use register offset which is local to XCC in the packet */ 280 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 281 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); 282 WREG32(scratch_reg0_offset, 0xCAFEDEAD); 283 tmp = RREG32(scratch_reg0_offset); 284 285 r = amdgpu_ring_alloc(ring, 5); 286 if (r) { 287 dev_err(adev->dev, 288 "amdgpu: cp failed to lock ring %d (%d).\n", 289 ring->idx, r); 290 return r; 291 } 292 293 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 294 gfx_v12_1_ring_emit_wreg(ring, xcc_offset, 0xDEADBEEF); 295 } else { 296 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 297 amdgpu_ring_write(ring, xcc_offset - 298 PACKET3_SET_UCONFIG_REG_START); 299 amdgpu_ring_write(ring, 0xDEADBEEF); 300 } 301 amdgpu_ring_commit(ring); 302 303 for (i = 0; i < adev->usec_timeout; i++) { 304 tmp = RREG32(scratch_reg0_offset); 305 if (tmp == 0xDEADBEEF) 306 break; 307 if (amdgpu_emu_mode == 1) 308 msleep(1); 309 else 310 udelay(1); 311 } 312 313 if (i >= adev->usec_timeout) 314 r = -ETIMEDOUT; 315 return r; 316 } 317 318 static int gfx_v12_1_ring_test_ib(struct amdgpu_ring *ring, long timeout) 319 { 320 struct amdgpu_device *adev = ring->adev; 321 struct amdgpu_ib ib; 322 struct dma_fence *f = NULL; 323 unsigned index; 324 uint64_t gpu_addr; 325 volatile uint32_t *cpu_ptr; 326 long r; 327 328 /* MES KIQ fw hasn't indirect buffer support for now */ 329 if (adev->enable_mes_kiq && 330 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 331 return 0; 332 333 memset(&ib, 0, sizeof(ib)); 334 335 r = amdgpu_device_wb_get(adev, &index); 336 if (r) 337 return r; 338 339 gpu_addr = adev->wb.gpu_addr + (index * 4); 340 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 341 cpu_ptr = &adev->wb.wb[index]; 342 343 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 344 if (r) { 345 dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); 346 goto err1; 347 } 348 349 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 350 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 351 ib.ptr[2] = lower_32_bits(gpu_addr); 352 ib.ptr[3] = upper_32_bits(gpu_addr); 353 ib.ptr[4] = 0xDEADBEEF; 354 ib.length_dw = 5; 355 356 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 357 if (r) 358 goto err2; 359 360 r = dma_fence_wait_timeout(f, false, timeout); 361 if (r == 0) { 362 r = -ETIMEDOUT; 363 goto err2; 364 } else if (r < 0) { 365 goto err2; 366 } 367 368 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 369 r = 0; 370 else 371 r = -EINVAL; 372 err2: 373 amdgpu_ib_free(&ib, NULL); 374 dma_fence_put(f); 375 err1: 376 amdgpu_device_wb_free(adev, index); 377 return r; 378 } 379 380 static void gfx_v12_1_free_microcode(struct amdgpu_device *adev) 381 { 382 amdgpu_ucode_release(&adev->gfx.rlc_fw); 383 amdgpu_ucode_release(&adev->gfx.mec_fw); 384 385 kfree(adev->gfx.rlc.register_list_format); 386 } 387 388 static int gfx_v12_1_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 389 { 390 const struct psp_firmware_header_v1_0 *toc_hdr; 391 int err = 0; 392 393 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, 394 AMDGPU_UCODE_REQUIRED, 395 "amdgpu/%s_toc.bin", ucode_prefix); 396 if (err) 397 goto out; 398 399 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 400 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 401 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 402 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 403 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 404 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 405 return 0; 406 out: 407 amdgpu_ucode_release(&adev->psp.toc_fw); 408 return err; 409 } 410 411 static int gfx_v12_1_init_microcode(struct amdgpu_device *adev) 412 { 413 char ucode_prefix[15]; 414 int err; 415 const struct rlc_firmware_header_v2_0 *rlc_hdr; 416 uint16_t version_major; 417 uint16_t version_minor; 418 419 DRM_DEBUG("\n"); 420 421 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 422 423 if (!amdgpu_sriov_vf(adev)) { 424 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 425 AMDGPU_UCODE_REQUIRED, 426 "amdgpu/%s_rlc.bin", ucode_prefix); 427 if (err) 428 goto out; 429 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 430 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 431 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 432 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 433 if (err) 434 goto out; 435 } 436 437 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 438 AMDGPU_UCODE_REQUIRED, 439 "amdgpu/%s_mec.bin", ucode_prefix); 440 if (err) 441 goto out; 442 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 443 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 444 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 445 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK); 446 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK); 447 448 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 449 err = gfx_v12_1_init_toc_microcode(adev, ucode_prefix); 450 451 /* only one MEC for gfx 12 */ 452 adev->gfx.mec2_fw = NULL; 453 454 if (adev->gfx.imu.funcs) { 455 if (adev->gfx.imu.funcs->init_microcode) { 456 err = adev->gfx.imu.funcs->init_microcode(adev); 457 if (err) 458 dev_err(adev->dev, "Failed to load imu firmware!\n"); 459 } 460 } 461 462 out: 463 if (err) { 464 amdgpu_ucode_release(&adev->gfx.rlc_fw); 465 amdgpu_ucode_release(&adev->gfx.mec_fw); 466 } 467 468 return err; 469 } 470 471 static u32 gfx_v12_1_get_csb_size(struct amdgpu_device *adev) 472 { 473 u32 count = 0; 474 const struct cs_section_def *sect = NULL; 475 const struct cs_extent_def *ext = NULL; 476 477 count += 1; 478 479 for (sect = gfx12_cs_data; sect->section != NULL; ++sect) { 480 if (sect->id == SECT_CONTEXT) { 481 for (ext = sect->section; ext->extent != NULL; ++ext) 482 count += 2 + ext->reg_count; 483 } else 484 return 0; 485 } 486 487 return count; 488 } 489 490 static void gfx_v12_1_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 491 { 492 u32 count = 0, clustercount = 0, i; 493 const struct cs_section_def *sect = NULL; 494 const struct cs_extent_def *ext = NULL; 495 496 if (adev->gfx.rlc.cs_data == NULL) 497 return; 498 if (buffer == NULL) 499 return; 500 501 count += 1; 502 503 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 504 if (sect->id == SECT_CONTEXT) { 505 for (ext = sect->section; ext->extent != NULL; ++ext) { 506 clustercount++; 507 buffer[count++] = ext->reg_count; 508 buffer[count++] = ext->reg_index; 509 510 for (i = 0; i < ext->reg_count; i++) 511 buffer[count++] = cpu_to_le32(ext->extent[i]); 512 } 513 } else 514 return; 515 } 516 517 buffer[0] = clustercount; 518 } 519 520 static void gfx_v12_1_rlc_fini(struct amdgpu_device *adev) 521 { 522 /* clear state block */ 523 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 524 &adev->gfx.rlc.clear_state_gpu_addr, 525 (void **)&adev->gfx.rlc.cs_ptr); 526 527 /* jump table block */ 528 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 529 &adev->gfx.rlc.cp_table_gpu_addr, 530 (void **)&adev->gfx.rlc.cp_table_ptr); 531 } 532 533 static void gfx_v12_1_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 534 { 535 int xcc_id, num_xcc; 536 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 537 538 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 539 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 540 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; 541 reg_access_ctrl->scratch_reg0 = 542 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); 543 reg_access_ctrl->scratch_reg1 = 544 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); 545 reg_access_ctrl->scratch_reg2 = 546 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); 547 reg_access_ctrl->scratch_reg3 = 548 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); 549 reg_access_ctrl->grbm_cntl = 550 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); 551 reg_access_ctrl->grbm_idx = 552 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); 553 reg_access_ctrl->spare_int = 554 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT_0); 555 } 556 adev->gfx.rlc.rlcg_reg_access_supported = true; 557 } 558 559 static int gfx_v12_1_rlc_init(struct amdgpu_device *adev) 560 { 561 const struct cs_section_def *cs_data; 562 int r, i, num_xcc; 563 564 adev->gfx.rlc.cs_data = gfx12_cs_data; 565 566 cs_data = adev->gfx.rlc.cs_data; 567 568 if (cs_data) { 569 /* init clear state block */ 570 r = amdgpu_gfx_rlc_init_csb(adev); 571 if (r) 572 return r; 573 } 574 575 /* init spm vmid with 0xf */ 576 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 577 for (i = 0; i < num_xcc; i++) { 578 if (adev->gfx.rlc.funcs->update_spm_vmid) 579 adev->gfx.rlc.funcs->update_spm_vmid(adev, i, NULL, 0xf); 580 } 581 582 return 0; 583 } 584 585 static void gfx_v12_1_mec_fini(struct amdgpu_device *adev) 586 { 587 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 588 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 589 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 590 } 591 592 static int gfx_v12_1_mec_init(struct amdgpu_device *adev) 593 { 594 int r, i, num_xcc; 595 u32 *hpd; 596 size_t mec_hpd_size; 597 598 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 599 600 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 601 for (i = 0; i < num_xcc; i++) 602 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, 603 AMDGPU_MAX_COMPUTE_QUEUES); 604 605 /* take ownership of the relevant compute queues */ 606 amdgpu_gfx_compute_queue_acquire(adev); 607 mec_hpd_size = adev->gfx.num_compute_rings * 608 GFX12_MEC_HPD_SIZE * num_xcc; 609 610 if (mec_hpd_size) { 611 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 612 AMDGPU_GEM_DOMAIN_GTT, 613 &adev->gfx.mec.hpd_eop_obj, 614 &adev->gfx.mec.hpd_eop_gpu_addr, 615 (void **)&hpd); 616 if (r) { 617 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 618 gfx_v12_1_mec_fini(adev); 619 return r; 620 } 621 622 memset(hpd, 0, mec_hpd_size); 623 624 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 625 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 626 } 627 628 return 0; 629 } 630 631 static uint32_t wave_read_ind(struct amdgpu_device *adev, 632 uint32_t xcc_id, uint32_t wave, 633 uint32_t address) 634 { 635 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 636 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 637 (address << SQ_IND_INDEX__INDEX__SHIFT)); 638 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 639 } 640 641 static void wave_read_regs(struct amdgpu_device *adev, 642 uint32_t xcc_id, uint32_t wave, 643 uint32_t thread, uint32_t regno, 644 uint32_t num, uint32_t *out) 645 { 646 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 647 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 648 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 649 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 650 (SQ_IND_INDEX__AUTO_INCR_MASK)); 651 while (num--) 652 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 653 } 654 655 static void gfx_v12_1_read_wave_data(struct amdgpu_device *adev, 656 uint32_t xcc_id, 657 uint32_t simd, uint32_t wave, 658 uint32_t *dst, int *no_fields) 659 { 660 /* in gfx12 the SIMD_ID is specified as part of the INSTANCE 661 * field when performing a select_se_sh so it should be 662 * zero here */ 663 WARN_ON(simd != 0); 664 665 /* type 4 wave data */ 666 dst[(*no_fields)++] = 4; 667 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATUS); 668 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_LO); 669 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_PC_HI); 670 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_LO); 671 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXEC_HI); 672 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID1); 673 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_HW_ID2); 674 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_GPR_ALLOC); 675 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_LDS_ALLOC); 676 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS); 677 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_STS2); 678 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_IB_DBG1); 679 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_M0); 680 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_MODE); 681 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_STATE_PRIV); 682 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_PRIV); 683 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_EXCP_FLAG_USER); 684 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_TRAP_CTRL); 685 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_ACTIVE); 686 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_VALID_AND_IDLE); 687 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_LO); 688 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_DVGPR_ALLOC_HI); 689 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, wave, ixSQ_WAVE_SCHED_MODE); 690 } 691 692 static void gfx_v12_1_read_wave_sgprs(struct amdgpu_device *adev, 693 uint32_t xcc_id, uint32_t simd, 694 uint32_t wave, uint32_t start, 695 uint32_t size, uint32_t *dst) 696 { 697 WARN_ON(simd != 0); 698 699 wave_read_regs(adev, xcc_id, wave, 0, 700 start + SQIND_WAVE_SGPRS_OFFSET, 701 size, dst); 702 } 703 704 static void gfx_v12_1_read_wave_vgprs(struct amdgpu_device *adev, 705 uint32_t xcc_id, uint32_t simd, 706 uint32_t wave, uint32_t thread, 707 uint32_t start, uint32_t size, 708 uint32_t *dst) 709 { 710 wave_read_regs(adev, xcc_id, wave, thread, 711 start + SQIND_WAVE_VGPRS_OFFSET, 712 size, dst); 713 } 714 715 static void gfx_v12_1_select_me_pipe_q(struct amdgpu_device *adev, 716 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 717 { 718 soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); 719 } 720 721 static int gfx_v12_1_get_xccs_per_xcp(struct amdgpu_device *adev) 722 { 723 /* Fill this in when the interface is ready */ 724 return 1; 725 } 726 727 static int gfx_v12_1_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) 728 { 729 int logic_xcc; 730 int xcc = (ih_node & 0x7) - 2 + (ih_node >> 3) * 4; 731 732 for (logic_xcc = 0; logic_xcc < NUM_XCC(adev->gfx.xcc_mask); logic_xcc++) { 733 if (xcc == GET_INST(GC, logic_xcc)) 734 return logic_xcc; 735 } 736 737 dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); 738 return -EINVAL; 739 } 740 741 static const struct amdgpu_gfx_funcs gfx_v12_1_gfx_funcs = { 742 .get_gpu_clock_counter = &gfx_v12_1_get_gpu_clock_counter, 743 .select_se_sh = &gfx_v12_1_xcc_select_se_sh, 744 .read_wave_data = &gfx_v12_1_read_wave_data, 745 .read_wave_sgprs = &gfx_v12_1_read_wave_sgprs, 746 .read_wave_vgprs = &gfx_v12_1_read_wave_vgprs, 747 .select_me_pipe_q = &gfx_v12_1_select_me_pipe_q, 748 .update_perfmon_mgcg = &gfx_v12_1_update_perf_clk, 749 .get_xccs_per_xcp = &gfx_v12_1_get_xccs_per_xcp, 750 .ih_node_to_logical_xcc = &gfx_v12_1_ih_to_xcc_inst, 751 }; 752 753 static int gfx_v12_1_gpu_early_init(struct amdgpu_device *adev) 754 { 755 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 756 case IP_VERSION(12, 1, 0): 757 adev->gfx.config.max_hw_contexts = 8; 758 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 759 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 760 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 761 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 762 break; 763 default: 764 BUG(); 765 break; 766 } 767 768 return 0; 769 } 770 771 static int gfx_v12_1_compute_ring_init(struct amdgpu_device *adev, int ring_id, 772 int xcc_id, int mec, int pipe, int queue) 773 { 774 int r; 775 unsigned irq_type; 776 struct amdgpu_ring *ring; 777 unsigned int hw_prio; 778 uint32_t xcc_doorbell_start; 779 780 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + 781 ring_id]; 782 783 /* mec0 is me1 */ 784 ring->xcc_id = xcc_id; 785 ring->me = mec + 1; 786 ring->pipe = pipe; 787 ring->queue = queue; 788 789 ring->ring_obj = NULL; 790 ring->use_doorbell = true; 791 xcc_doorbell_start = adev->doorbell_index.mec_ring0 + 792 xcc_id * adev->doorbell_index.xcc_doorbell_range; 793 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; 794 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + 795 (ring_id + xcc_id * adev->gfx.num_compute_rings) * 796 GFX12_MEC_HPD_SIZE; 797 ring->vm_hub = AMDGPU_GFXHUB(xcc_id); 798 sprintf(ring->name, "comp_%d.%d.%d.%d", 799 ring->xcc_id, ring->me, ring->pipe, ring->queue); 800 801 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 802 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 803 + ring->pipe; 804 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 805 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 806 /* type-2 packets are deprecated on MEC, use type-3 instead */ 807 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 808 hw_prio, NULL); 809 if (r) 810 return r; 811 812 return 0; 813 } 814 815 static struct { 816 SOC24_FIRMWARE_ID id; 817 unsigned int offset; 818 unsigned int size; 819 unsigned int size_x16; 820 unsigned int num_inst; 821 } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX]; 822 823 #define RLC_TOC_OFFSET_DWUNIT 8 824 #define RLC_SIZE_MULTIPLE 1024 825 #define RLC_TOC_UMF_SIZE_inM 23ULL 826 #define RLC_TOC_FORMAT_API 165ULL 827 828 #define RLC_NUM_INS_CODE0 1 829 #define RLC_NUM_INS_CODE1 8 830 #define RLC_NUM_INS_CODE2 2 831 #define RLC_NUM_INS_CODE3 16 832 833 static void gfx_v12_1_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 834 { 835 RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc; 836 837 while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) { 838 rlc_autoload_info[ucode->id].id = ucode->id; 839 rlc_autoload_info[ucode->id].offset = 840 ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4; 841 rlc_autoload_info[ucode->id].size = 842 ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 : 843 ucode->size * 4; 844 switch (ucode->vfflr_image_code) { 845 case 0: 846 rlc_autoload_info[ucode->id].num_inst = 847 RLC_NUM_INS_CODE0; 848 break; 849 case 1: 850 rlc_autoload_info[ucode->id].num_inst = 851 RLC_NUM_INS_CODE1; 852 break; 853 case 2: 854 rlc_autoload_info[ucode->id].num_inst = 855 RLC_NUM_INS_CODE2; 856 break; 857 case 3: 858 rlc_autoload_info[ucode->id].num_inst = 859 RLC_NUM_INS_CODE3; 860 break; 861 default: 862 dev_err(adev->dev, 863 "Invalid Instance number detected\n"); 864 break; 865 } 866 ucode++; 867 } 868 } 869 870 static uint32_t gfx_v12_1_calc_toc_total_size(struct amdgpu_device *adev) 871 { 872 uint32_t total_size = 0; 873 SOC24_FIRMWARE_ID id; 874 875 gfx_v12_1_parse_rlc_toc(adev, adev->psp.toc.start_addr); 876 877 for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++) 878 total_size += rlc_autoload_info[id].size; 879 880 /* In case the offset in rlc toc ucode is aligned */ 881 if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset) 882 total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset + 883 rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size; 884 if (total_size < (RLC_TOC_UMF_SIZE_inM << 20)) 885 total_size = RLC_TOC_UMF_SIZE_inM << 20; 886 887 return total_size; 888 } 889 890 static int gfx_v12_1_rlc_autoload_buffer_init(struct amdgpu_device *adev) 891 { 892 int r; 893 uint32_t total_size; 894 895 total_size = gfx_v12_1_calc_toc_total_size(adev); 896 897 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 898 AMDGPU_GEM_DOMAIN_VRAM, 899 &adev->gfx.rlc.rlc_autoload_bo, 900 &adev->gfx.rlc.rlc_autoload_gpu_addr, 901 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 902 903 if (r) { 904 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 905 return r; 906 } 907 908 return 0; 909 } 910 911 static void gfx_v12_1_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 912 SOC24_FIRMWARE_ID id, 913 const void *fw_data, 914 uint32_t fw_size) 915 { 916 uint32_t toc_offset; 917 uint32_t toc_fw_size, toc_fw_inst_size; 918 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 919 int i, num_inst; 920 921 if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX) 922 return; 923 924 toc_offset = rlc_autoload_info[id].offset; 925 toc_fw_size = rlc_autoload_info[id].size; 926 num_inst = rlc_autoload_info[id].num_inst; 927 toc_fw_inst_size = toc_fw_size / num_inst; 928 929 if (fw_size == 0) 930 fw_size = toc_fw_inst_size; 931 932 if (fw_size > toc_fw_inst_size) 933 fw_size = toc_fw_inst_size; 934 935 for (i = 0; i < num_inst; i++) { 936 if ((num_inst == RLC_NUM_INS_CODE0) || 937 ((1 << (i / 2)) & adev->gfx.xcc_mask)) { 938 memcpy(ptr + toc_offset + i * toc_fw_inst_size, fw_data, fw_size); 939 940 if (fw_size < toc_fw_inst_size) 941 memset(ptr + toc_offset + fw_size + i * toc_fw_inst_size, 942 0, toc_fw_inst_size - fw_size); 943 } 944 } 945 } 946 947 static void 948 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev) 949 { 950 void *data; 951 uint32_t size; 952 uint32_t *toc_ptr; 953 954 data = adev->psp.toc.start_addr; 955 size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size; 956 957 toc_ptr = (uint32_t *)data + size / 4 - 2; 958 *toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1; 959 960 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC, 961 data, size); 962 } 963 964 static void 965 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev) 966 { 967 const __le32 *fw_data; 968 uint32_t fw_size; 969 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 970 const struct rlc_firmware_header_v2_0 *rlc_hdr; 971 const struct rlc_firmware_header_v2_1 *rlcv21_hdr; 972 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 973 uint16_t version_major, version_minor; 974 975 /* mec ucode */ 976 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 977 adev->gfx.mec_fw->data; 978 /* instruction */ 979 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 980 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 981 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 982 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC, 983 fw_data, fw_size); 984 /* data */ 985 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 986 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 987 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 988 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK, 989 fw_data, fw_size); 990 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK, 991 fw_data, fw_size); 992 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK, 993 fw_data, fw_size); 994 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK, 995 fw_data, fw_size); 996 997 /* rlc ucode */ 998 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 999 adev->gfx.rlc_fw->data; 1000 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1001 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1002 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1003 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE, 1004 fw_data, fw_size); 1005 1006 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1007 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1008 if (version_major == 2) { 1009 if (version_minor >= 1) { 1010 rlcv21_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 1011 1012 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1013 le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_offset_bytes)); 1014 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_gpm_size_bytes); 1015 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLCG_SCRATCH, 1016 fw_data, fw_size); 1017 1018 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1019 le32_to_cpu(rlcv21_hdr->save_restore_list_srm_offset_bytes)); 1020 fw_size = le32_to_cpu(rlcv21_hdr->save_restore_list_srm_size_bytes); 1021 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_SRM_ARAM, 1022 fw_data, fw_size); 1023 } 1024 if (version_minor >= 2) { 1025 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1026 1027 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1028 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1029 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1030 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE, 1031 fw_data, fw_size); 1032 1033 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1034 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1035 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1036 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT, 1037 fw_data, fw_size); 1038 } 1039 } 1040 } 1041 1042 static void 1043 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev) 1044 { 1045 const __le32 *fw_data; 1046 uint32_t fw_size; 1047 const struct sdma_firmware_header_v3_0 *sdma_hdr; 1048 1049 if (adev->sdma.instance[0].fw) { 1050 sdma_hdr = (const struct sdma_firmware_header_v3_0 *) 1051 adev->sdma.instance[0].fw->data; 1052 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1053 le32_to_cpu(sdma_hdr->ucode_offset_bytes)); 1054 fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes); 1055 1056 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0, 1057 fw_data, fw_size); 1058 } 1059 } 1060 1061 static void 1062 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev) 1063 { 1064 const __le32 *fw_data; 1065 unsigned fw_size; 1066 const struct mes_firmware_header_v1_0 *mes_hdr; 1067 int pipe, ucode_id, data_id; 1068 1069 for (pipe = 0; pipe < 2; pipe++) { 1070 if (pipe == 0) { 1071 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0; 1072 data_id = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK; 1073 } else { 1074 ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1; 1075 data_id = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK; 1076 } 1077 1078 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1079 adev->mes.fw[pipe]->data; 1080 1081 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1082 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1083 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1084 1085 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size); 1086 1087 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1088 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1089 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1090 1091 gfx_v12_1_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size); 1092 } 1093 } 1094 1095 static int gfx_v12_1_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1096 { 1097 uint32_t rlc_g_offset, rlc_g_size; 1098 uint64_t gpu_addr; 1099 uint32_t data; 1100 int i, num_xcc; 1101 1102 /* RLC autoload sequence 2: copy ucode */ 1103 gfx_v12_1_rlc_backdoor_autoload_copy_sdma_ucode(adev); 1104 gfx_v12_1_rlc_backdoor_autoload_copy_gfx_ucode(adev); 1105 gfx_v12_1_rlc_backdoor_autoload_copy_mes_ucode(adev); 1106 gfx_v12_1_rlc_backdoor_autoload_copy_toc_ucode(adev); 1107 1108 rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset; 1109 rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size; 1110 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start; 1111 1112 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1113 for (i = 0; i < num_xcc; i++) { 1114 WREG32_SOC15(GC, GET_INST(GC, i), 1115 regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, 1116 upper_32_bits(gpu_addr)); 1117 WREG32_SOC15(GC, GET_INST(GC, i), 1118 regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, 1119 lower_32_bits(gpu_addr)); 1120 WREG32_SOC15(GC, GET_INST(GC, i), 1121 regGFX_IMU_RLC_BOOTLOADER_SIZE, 1122 rlc_g_size); 1123 } 1124 1125 if (adev->gfx.imu.funcs) { 1126 /* RLC autoload sequence 3: load IMU fw */ 1127 if (adev->gfx.imu.funcs->load_microcode) 1128 adev->gfx.imu.funcs->load_microcode(adev); 1129 } 1130 1131 /* unhalt rlc to start autoload */ 1132 for (i = 0; i < num_xcc; i++) { 1133 data = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE); 1134 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1); 1135 data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 1136 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_THREAD_ENABLE, data); 1137 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block) 1144 { 1145 int i, j, k, r, ring_id = 0; 1146 unsigned num_compute_rings; 1147 int xcc_id, num_xcc; 1148 struct amdgpu_device *adev = ip_block->adev; 1149 1150 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1151 case IP_VERSION(12, 1, 0): 1152 adev->gfx.mec.num_mec = 1; 1153 adev->gfx.mec.num_pipe_per_mec = 4; 1154 adev->gfx.mec.num_queue_per_pipe = 8; 1155 break; 1156 default: 1157 adev->gfx.mec.num_mec = 2; 1158 adev->gfx.mec.num_pipe_per_mec = 2; 1159 adev->gfx.mec.num_queue_per_pipe = 4; 1160 break; 1161 } 1162 1163 /* recalculate compute rings to use based on hardware configuration */ 1164 num_compute_rings = (adev->gfx.mec.num_pipe_per_mec * 1165 adev->gfx.mec.num_queue_per_pipe) / 2; 1166 adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings, 1167 num_compute_rings); 1168 1169 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1170 1171 /* EOP Event */ 1172 r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP, 1173 GFX_12_1_0__SRCID__CP_EOP_INTERRUPT, 1174 &adev->gfx.eop_irq); 1175 if (r) 1176 return r; 1177 1178 /* Privileged reg */ 1179 r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP, 1180 GFX_12_1_0__SRCID__CP_PRIV_REG_FAULT, 1181 &adev->gfx.priv_reg_irq); 1182 if (r) 1183 return r; 1184 1185 /* Privileged inst */ 1186 r = amdgpu_irq_add_id(adev, SOC_V1_0_IH_CLIENTID_GRBM_CP, 1187 GFX_12_1_0__SRCID__CP_PRIV_INSTR_FAULT, 1188 &adev->gfx.priv_inst_irq); 1189 if (r) 1190 return r; 1191 1192 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1193 1194 r = gfx_v12_1_rlc_init(adev); 1195 if (r) { 1196 dev_err(adev->dev, "Failed to init rlc BOs!\n"); 1197 return r; 1198 } 1199 1200 r = gfx_v12_1_mec_init(adev); 1201 if (r) { 1202 dev_err(adev->dev, "Failed to init MEC BOs!\n"); 1203 return r; 1204 } 1205 1206 /* set up the compute queues - allocate horizontally across pipes */ 1207 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 1208 ring_id = 0; 1209 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1210 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1211 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1212 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 1213 xcc_id, i, k, j)) 1214 continue; 1215 1216 r = gfx_v12_1_compute_ring_init(adev, ring_id, 1217 xcc_id, i, k, j); 1218 if (r) 1219 return r; 1220 1221 ring_id++; 1222 } 1223 } 1224 } 1225 1226 if (!adev->enable_mes_kiq) { 1227 r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, xcc_id); 1228 if (r) { 1229 dev_err(adev->dev, "Failed to init KIQ BOs!\n"); 1230 return r; 1231 } 1232 1233 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1234 if (r) 1235 return r; 1236 } 1237 1238 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_1_compute_mqd), xcc_id); 1239 if (r) 1240 return r; 1241 } 1242 1243 /* allocate visible FB for rlc auto-loading fw */ 1244 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1245 r = gfx_v12_1_rlc_autoload_buffer_init(adev); 1246 if (r) 1247 return r; 1248 } 1249 1250 r = gfx_v12_1_gpu_early_init(adev); 1251 if (r) 1252 return r; 1253 1254 r = amdgpu_gfx_sysfs_init(adev); 1255 if (r) 1256 return r; 1257 1258 return 0; 1259 } 1260 1261 static void gfx_v12_1_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1262 { 1263 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1264 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1265 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1266 } 1267 1268 static int gfx_v12_1_sw_fini(struct amdgpu_ip_block *ip_block) 1269 { 1270 int i, num_xcc; 1271 struct amdgpu_device *adev = ip_block->adev; 1272 1273 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1274 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) 1275 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1276 1277 for (i = 0; i < num_xcc; i++) { 1278 amdgpu_gfx_mqd_sw_fini(adev, i); 1279 1280 if (!adev->enable_mes_kiq) { 1281 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); 1282 amdgpu_gfx_kiq_fini(adev, i); 1283 } 1284 } 1285 1286 gfx_v12_1_rlc_fini(adev); 1287 gfx_v12_1_mec_fini(adev); 1288 1289 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1290 gfx_v12_1_rlc_autoload_buffer_fini(adev); 1291 1292 gfx_v12_1_free_microcode(adev); 1293 1294 return 0; 1295 } 1296 1297 static void gfx_v12_1_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1298 u32 sh_num, u32 instance, int xcc_id) 1299 { 1300 u32 data; 1301 1302 if (instance == 0xffffffff) 1303 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1304 INSTANCE_BROADCAST_WRITES, 1); 1305 else 1306 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1307 instance); 1308 1309 if (se_num == 0xffffffff) 1310 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1311 1); 1312 else 1313 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1314 1315 if (sh_num == 0xffffffff) 1316 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1317 1); 1318 else 1319 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1320 1321 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); 1322 } 1323 1324 static u32 gfx_v12_1_get_sa_active_bitmap(struct amdgpu_device *adev, 1325 int xcc_id) 1326 { 1327 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1328 1329 gc_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SA_UNIT_DISABLE); 1330 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1331 CC_GC_SA_UNIT_DISABLE, 1332 SA_DISABLE); 1333 gc_user_disabled_sa_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SA_UNIT_DISABLE); 1334 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1335 GC_USER_SA_UNIT_DISABLE, 1336 SA_DISABLE); 1337 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1338 adev->gfx.config.max_shader_engines); 1339 1340 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1341 } 1342 1343 static u32 gfx_v12_1_get_rb_active_bitmap(struct amdgpu_device *adev, 1344 int xcc_id) 1345 { 1346 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1347 u32 rb_mask; 1348 1349 gc_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 1350 regCC_RB_BACKEND_DISABLE); 1351 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1352 CC_RB_BACKEND_DISABLE, 1353 BACKEND_DISABLE); 1354 gc_user_disabled_rb_mask = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 1355 regGC_USER_RB_BACKEND_DISABLE); 1356 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1357 GC_USER_RB_BACKEND_DISABLE, 1358 BACKEND_DISABLE); 1359 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1360 adev->gfx.config.max_shader_engines); 1361 1362 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1363 } 1364 1365 static void gfx_v12_1_setup_rb(struct amdgpu_device *adev) 1366 { 1367 u32 rb_bitmap_width_per_sa; 1368 u32 max_sa; 1369 u32 active_sa_bitmap; 1370 u32 global_active_rb_bitmap; 1371 u32 active_rb_bitmap = 0; 1372 u32 i; 1373 int xcc_id; 1374 1375 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { 1376 /* query sa bitmap from SA_UNIT_DISABLE registers */ 1377 active_sa_bitmap = gfx_v12_1_get_sa_active_bitmap(adev, xcc_id); 1378 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 1379 global_active_rb_bitmap = gfx_v12_1_get_rb_active_bitmap(adev, xcc_id); 1380 1381 /* generate active rb bitmap according to active sa bitmap */ 1382 max_sa = adev->gfx.config.max_shader_engines * 1383 adev->gfx.config.max_sh_per_se; 1384 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 1385 adev->gfx.config.max_sh_per_se; 1386 for (i = 0; i < max_sa; i++) { 1387 if (active_sa_bitmap & (1 << i)) 1388 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); 1389 } 1390 1391 active_rb_bitmap |= global_active_rb_bitmap; 1392 } 1393 1394 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 1395 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 1396 } 1397 1398 static void gfx_v12_1_xcc_init_compute_vmid(struct amdgpu_device *adev, 1399 int xcc_id) 1400 { 1401 int i; 1402 uint32_t sh_mem_bases; 1403 uint32_t data; 1404 1405 /* 1406 * Configure apertures: 1407 * LDS: 0x20000000'00000000 - 0x20000001'00000000 (4GB) 1408 * Scratch: 0x10000000'00000000 - 0x10000001'00000000 (4GB) 1409 */ 1410 sh_mem_bases = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1411 (adev->gmc.private_aperture_start >> 58)); 1412 sh_mem_bases = REG_SET_FIELD(sh_mem_bases, SH_MEM_BASES, SHARED_BASE, 1413 (adev->gmc.shared_aperture_start >> 48)); 1414 1415 mutex_lock(&adev->srbm_mutex); 1416 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1417 soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1418 /* CP and shaders */ 1419 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1420 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); 1421 1422 /* Enable trap for each kfd vmid. */ 1423 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); 1424 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1425 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); 1426 1427 /* Disable VGPR deallocation instruction for each KFD vmid. */ 1428 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG); 1429 data = REG_SET_FIELD(data, SQ_DEBUG, DISABLE_VGPR_DEALLOC, 1); 1430 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_DEBUG, data); 1431 } 1432 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 1433 mutex_unlock(&adev->srbm_mutex); 1434 } 1435 1436 static void gfx_v12_1_tcp_harvest(struct amdgpu_device *adev) 1437 { 1438 /* TODO: harvest feature to be added later. */ 1439 } 1440 1441 static void gfx_v12_1_get_tcc_info(struct amdgpu_device *adev) 1442 { 1443 } 1444 1445 static void gfx_v12_1_xcc_constants_init(struct amdgpu_device *adev, 1446 int xcc_id) 1447 { 1448 u32 tmp; 1449 int i; 1450 1451 /* XXX SH_MEM regs */ 1452 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1453 mutex_lock(&adev->srbm_mutex); 1454 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1455 soc_v1_0_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1456 /* CP and shaders */ 1457 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1458 regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1459 if (i != 0) { 1460 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1461 (adev->gmc.private_aperture_start >> 58)); 1462 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1463 (adev->gmc.shared_aperture_start >> 48)); 1464 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, tmp); 1465 } 1466 } 1467 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); 1468 1469 mutex_unlock(&adev->srbm_mutex); 1470 1471 gfx_v12_1_xcc_init_compute_vmid(adev, xcc_id); 1472 } 1473 1474 static void gfx_v12_1_constants_init(struct amdgpu_device *adev) 1475 { 1476 int i, num_xcc; 1477 1478 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1479 1480 gfx_v12_1_setup_rb(adev); 1481 gfx_v12_1_get_cu_info(adev, &adev->gfx.cu_info); 1482 gfx_v12_1_get_tcc_info(adev); 1483 adev->gfx.config.pa_sc_tile_steering_override = 0; 1484 1485 for (i = 0; i < num_xcc; i++) 1486 gfx_v12_1_xcc_constants_init(adev, i); 1487 } 1488 1489 static void gfx_v12_1_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1490 bool enable, int xcc_id) 1491 { 1492 u32 tmp; 1493 1494 if (amdgpu_sriov_vf(adev)) 1495 return; 1496 1497 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0); 1498 1499 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1500 enable ? 1 : 0); 1501 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1502 enable ? 1 : 0); 1503 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1504 enable ? 1 : 0); 1505 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1506 enable ? 1 : 0); 1507 1508 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); 1509 } 1510 1511 static int gfx_v12_1_xcc_init_csb(struct amdgpu_device *adev, 1512 int xcc_id) 1513 { 1514 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1515 1516 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_HI, 1517 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1518 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CSIB_ADDR_LO, 1519 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1520 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1521 regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1522 1523 return 0; 1524 } 1525 1526 static void gfx_v12_1_xcc_rlc_stop(struct amdgpu_device *adev, 1527 int xcc_id) 1528 { 1529 u32 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL); 1530 1531 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1532 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CNTL, tmp); 1533 } 1534 1535 static void gfx_v12_1_rlc_stop(struct amdgpu_device *adev) 1536 { 1537 int i, num_xcc; 1538 1539 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1540 for (i = 0; i < num_xcc; i++) 1541 gfx_v12_1_xcc_rlc_stop(adev, i); 1542 } 1543 1544 static void gfx_v12_1_xcc_rlc_reset(struct amdgpu_device *adev, 1545 int xcc_id) 1546 { 1547 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), 1548 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 1549 udelay(50); 1550 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), 1551 GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1552 udelay(50); 1553 } 1554 1555 static void gfx_v12_1_rlc_reset(struct amdgpu_device *adev) 1556 { 1557 int i, num_xcc; 1558 1559 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1560 for (i = 0; i < num_xcc; i++) 1561 gfx_v12_1_xcc_rlc_reset(adev, i); 1562 } 1563 1564 static void gfx_v12_1_xcc_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 1565 bool enable, int xcc_id) 1566 { 1567 uint32_t rlc_pg_cntl; 1568 1569 rlc_pg_cntl = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL); 1570 1571 if (!enable) { 1572 /* RLC_PG_CNTL[23] = 0 (default) 1573 * RLC will wait for handshake acks with SMU 1574 * GFXOFF will be enabled 1575 * RLC_PG_CNTL[23] = 1 1576 * RLC will not issue any message to SMU 1577 * hence no handshake between SMU & RLC 1578 * GFXOFF will be disabled 1579 */ 1580 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1581 } else 1582 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1583 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, rlc_pg_cntl); 1584 } 1585 1586 static void gfx_v12_1_xcc_rlc_start(struct amdgpu_device *adev, 1587 int xcc_id) 1588 { 1589 /* TODO: enable rlc & smu handshake until smu 1590 * and gfxoff feature works as expected */ 1591 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 1592 gfx_v12_1_xcc_rlc_smu_handshake_cntl(adev, false, xcc_id); 1593 1594 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, RLC_ENABLE_F32, 1); 1595 udelay(50); 1596 } 1597 1598 static void gfx_v12_1_rlc_start(struct amdgpu_device *adev) 1599 { 1600 int i, num_xcc; 1601 1602 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1603 for (i = 0; i < num_xcc; i++) { 1604 gfx_v12_1_xcc_rlc_start(adev, i); 1605 } 1606 } 1607 1608 static void gfx_v12_1_xcc_rlc_enable_srm(struct amdgpu_device *adev, 1609 int xcc_id) 1610 { 1611 uint32_t tmp; 1612 1613 /* enable Save Restore Machine */ 1614 tmp = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL)); 1615 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1616 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 1617 WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SRM_CNTL), tmp); 1618 } 1619 1620 static void gfx_v12_1_xcc_load_rlcg_microcode(struct amdgpu_device *adev, 1621 int xcc_id) 1622 { 1623 const struct rlc_firmware_header_v2_0 *hdr; 1624 const __le32 *fw_data; 1625 unsigned i, fw_size; 1626 1627 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1628 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1629 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1630 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1631 1632 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, 1633 RLCG_UCODE_LOADING_START_ADDRESS); 1634 1635 for (i = 0; i < fw_size; i++) 1636 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1637 regRLC_GPM_UCODE_DATA, 1638 le32_to_cpup(fw_data++)); 1639 1640 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1641 regRLC_GPM_UCODE_ADDR, 1642 adev->gfx.rlc_fw_version); 1643 } 1644 1645 static void gfx_v12_1_xcc_load_rlc_iram_dram_microcode(struct amdgpu_device *adev, 1646 int xcc_id) 1647 { 1648 const struct rlc_firmware_header_v2_2 *hdr; 1649 const __le32 *fw_data; 1650 unsigned i, fw_size; 1651 u32 tmp; 1652 1653 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1654 1655 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1656 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 1657 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 1658 1659 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_IRAM_ADDR, 0); 1660 1661 for (i = 0; i < fw_size; i++) { 1662 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1663 msleep(1); 1664 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1665 regRLC_LX6_IRAM_DATA, 1666 le32_to_cpup(fw_data++)); 1667 } 1668 1669 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1670 regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1671 1672 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1673 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 1674 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 1675 1676 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1677 regRLC_LX6_DRAM_ADDR, 0); 1678 for (i = 0; i < fw_size; i++) { 1679 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1680 msleep(1); 1681 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1682 regRLC_LX6_DRAM_DATA, 1683 le32_to_cpup(fw_data++)); 1684 } 1685 1686 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 1687 regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1688 1689 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL); 1690 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 1691 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 1692 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_LX6_CNTL, tmp); 1693 } 1694 1695 static int gfx_v12_1_xcc_rlc_load_microcode(struct amdgpu_device *adev, 1696 int xcc_id) 1697 { 1698 const struct rlc_firmware_header_v2_0 *hdr; 1699 uint16_t version_major; 1700 uint16_t version_minor; 1701 1702 if (!adev->gfx.rlc_fw) 1703 return -EINVAL; 1704 1705 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1706 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1707 1708 version_major = le16_to_cpu(hdr->header.header_version_major); 1709 version_minor = le16_to_cpu(hdr->header.header_version_minor); 1710 1711 if (version_major == 2) { 1712 gfx_v12_1_xcc_load_rlcg_microcode(adev, xcc_id); 1713 if (amdgpu_dpm == 1) { 1714 if (version_minor >= 2) 1715 gfx_v12_1_xcc_load_rlc_iram_dram_microcode(adev, xcc_id); 1716 } 1717 1718 return 0; 1719 } 1720 1721 return -EINVAL; 1722 } 1723 1724 static int gfx_v12_1_xcc_rlc_resume(struct amdgpu_device *adev, 1725 int xcc_id) 1726 { 1727 int r; 1728 1729 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1730 gfx_v12_1_xcc_init_csb(adev, xcc_id); 1731 1732 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 1733 gfx_v12_1_xcc_rlc_enable_srm(adev, xcc_id); 1734 } else { 1735 if (amdgpu_sriov_vf(adev)) { 1736 gfx_v12_1_xcc_init_csb(adev, xcc_id); 1737 return 0; 1738 } 1739 1740 gfx_v12_1_xcc_rlc_stop(adev, xcc_id); 1741 1742 /* disable CG */ 1743 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); 1744 1745 /* disable PG */ 1746 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_PG_CNTL, 0); 1747 1748 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1749 /* legacy rlc firmware loading */ 1750 r = gfx_v12_1_xcc_rlc_load_microcode(adev, xcc_id); 1751 if (r) 1752 return r; 1753 } 1754 1755 gfx_v12_1_xcc_init_csb(adev, xcc_id); 1756 1757 gfx_v12_1_xcc_rlc_start(adev, xcc_id); 1758 } 1759 1760 return 0; 1761 } 1762 1763 static int gfx_v12_1_rlc_resume(struct amdgpu_device *adev) 1764 { 1765 int r, i, num_xcc; 1766 1767 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1768 for (i = 0; i < num_xcc; i++) { 1769 r = gfx_v12_1_xcc_rlc_resume(adev, i); 1770 if (r) 1771 return r; 1772 } 1773 1774 return 0; 1775 } 1776 1777 static void gfx_v12_1_xcc_config_gfx_rs64(struct amdgpu_device *adev, 1778 int xcc_id) 1779 { 1780 const struct gfx_firmware_header_v2_0 *mec_hdr; 1781 uint32_t pipe_id, tmp; 1782 1783 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 1784 adev->gfx.mec_fw->data; 1785 1786 /* config mec program start addr */ 1787 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 1788 soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id)); 1789 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START, 1790 mec_hdr->ucode_start_addr_lo >> 2 | 1791 mec_hdr->ucode_start_addr_hi << 30); 1792 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI, 1793 mec_hdr->ucode_start_addr_hi >> 2); 1794 } 1795 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); 1796 1797 /* reset mec pipe */ 1798 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL); 1799 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 1800 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 1801 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 1802 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 1803 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp); 1804 1805 /* clear mec pipe reset */ 1806 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 1807 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 1808 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 1809 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 1810 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, tmp); 1811 } 1812 1813 static void gfx_v12_1_config_gfx_rs64(struct amdgpu_device *adev) 1814 { 1815 int i, num_xcc; 1816 1817 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1818 1819 for (i = 0; i < num_xcc; i++) 1820 gfx_v12_1_xcc_config_gfx_rs64(adev, i); 1821 } 1822 1823 static void gfx_v12_1_xcc_set_mec_ucode_start_addr(struct amdgpu_device *adev, 1824 int xcc_id) 1825 { 1826 const struct gfx_firmware_header_v2_0 *cp_hdr; 1827 unsigned pipe_id; 1828 1829 cp_hdr = (const struct gfx_firmware_header_v2_0 *) 1830 adev->gfx.mec_fw->data; 1831 mutex_lock(&adev->srbm_mutex); 1832 for (pipe_id = 0; pipe_id < adev->gfx.mec.num_pipe_per_mec; pipe_id++) { 1833 soc_v1_0_grbm_select(adev, 1, pipe_id, 0, 0, GET_INST(GC, xcc_id)); 1834 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START, 1835 cp_hdr->ucode_start_addr_lo >> 2 | 1836 cp_hdr->ucode_start_addr_hi << 30); 1837 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_PRGRM_CNTR_START_HI, 1838 cp_hdr->ucode_start_addr_hi >> 2); 1839 } 1840 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); 1841 mutex_unlock(&adev->srbm_mutex); 1842 } 1843 1844 static int gfx_v12_1_xcc_wait_for_rlc_autoload_complete(struct amdgpu_device *adev, 1845 int xcc_id) 1846 { 1847 uint32_t cp_status; 1848 uint32_t bootload_status; 1849 int i; 1850 1851 for (i = 0; i < adev->usec_timeout; i++) { 1852 cp_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_STAT); 1853 bootload_status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 1854 regRLC_RLCS_BOOTLOAD_STATUS); 1855 1856 if ((cp_status == 0) && 1857 (REG_GET_FIELD(bootload_status, 1858 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 1859 break; 1860 } 1861 udelay(1); 1862 if (amdgpu_emu_mode) 1863 msleep(10); 1864 } 1865 1866 if (i >= adev->usec_timeout) { 1867 dev_err(adev->dev, 1868 "rlc autoload: xcc%d gc ucode autoload timeout\n", xcc_id); 1869 return -ETIMEDOUT; 1870 } 1871 1872 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1873 gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id); 1874 } 1875 1876 return 0; 1877 } 1878 1879 static int gfx_v12_1_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 1880 { 1881 int xcc_id; 1882 1883 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) 1884 gfx_v12_1_xcc_wait_for_rlc_autoload_complete(adev, xcc_id); 1885 1886 return 0; 1887 } 1888 1889 static void gfx_v12_1_xcc_cp_compute_enable(struct amdgpu_device *adev, 1890 bool enable, int xcc_id) 1891 { 1892 u32 data; 1893 1894 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL); 1895 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 1896 enable ? 0 : 1); 1897 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1898 enable ? 0 : 1); 1899 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1900 enable ? 0 : 1); 1901 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1902 enable ? 0 : 1); 1903 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1904 enable ? 0 : 1); 1905 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 1906 enable ? 1 : 0); 1907 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 1908 enable ? 1 : 0); 1909 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 1910 enable ? 1 : 0); 1911 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 1912 enable ? 1 : 0); 1913 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 1914 enable ? 0 : 1); 1915 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_RS64_CNTL, data); 1916 1917 adev->gfx.kiq[xcc_id].ring.sched.ready = enable; 1918 1919 udelay(50); 1920 } 1921 1922 static int gfx_v12_1_xcc_cp_compute_load_microcode_rs64(struct amdgpu_device *adev, 1923 uint16_t xcc_mask) 1924 { 1925 const struct gfx_firmware_header_v2_0 *mec_hdr; 1926 const __le32 *fw_ucode, *fw_data; 1927 u32 tmp, fw_ucode_size, fw_data_size; 1928 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 1929 u32 *fw_ucode_ptr, *fw_data_ptr; 1930 int r, xcc_id; 1931 1932 if (!adev->gfx.mec_fw) 1933 return -EINVAL; 1934 1935 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 1936 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 1937 1938 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 1939 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 1940 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 1941 1942 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1943 le32_to_cpu(mec_hdr->data_offset_bytes)); 1944 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 1945 1946 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 1947 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 1948 &adev->gfx.mec.mec_fw_obj, 1949 &adev->gfx.mec.mec_fw_gpu_addr, 1950 (void **)&fw_ucode_ptr); 1951 if (r) { 1952 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 1953 gfx_v12_1_mec_fini(adev); 1954 return r; 1955 } 1956 1957 r = amdgpu_bo_create_reserved(adev, 1958 ALIGN(fw_data_size, 64 * 1024) * 1959 adev->gfx.mec.num_pipe_per_mec * NUM_XCC(xcc_mask), 1960 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 1961 &adev->gfx.mec.mec_fw_data_obj, 1962 &adev->gfx.mec.mec_fw_data_gpu_addr, 1963 (void **)&fw_data_ptr); 1964 if (r) { 1965 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 1966 gfx_v12_1_mec_fini(adev); 1967 return r; 1968 } 1969 1970 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 1971 for (xcc_id = 0; xcc_id < NUM_XCC(xcc_mask); xcc_id++) { 1972 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 1973 u32 offset = (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) * 1974 ALIGN(fw_data_size, 64 * 1024) / 4; 1975 memcpy(fw_data_ptr + offset, fw_data, fw_data_size); 1976 } 1977 } 1978 1979 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 1980 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 1981 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 1982 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 1983 1984 for (xcc_id = 0; xcc_id < NUM_XCC(xcc_mask); xcc_id++) { 1985 gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id); 1986 1987 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL); 1988 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 1989 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 1990 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 1991 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp); 1992 1993 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL); 1994 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 1995 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 1996 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_BASE_CNTL, tmp); 1997 1998 mutex_lock(&adev->srbm_mutex); 1999 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2000 soc_v1_0_grbm_select(adev, 1, i, 0, 0, GET_INST(GC, xcc_id)); 2001 2002 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_LO, 2003 lower_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2004 (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) * 2005 ALIGN(fw_data_size, 64 * 1024))); 2006 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_MDBASE_HI, 2007 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr + 2008 (xcc_id * adev->gfx.mec.num_pipe_per_mec + i) * 2009 ALIGN(fw_data_size, 64 * 1024))); 2010 2011 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO, 2012 lower_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2013 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI, 2014 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2015 } 2016 mutex_unlock(&adev->srbm_mutex); 2017 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); 2018 2019 /* Trigger an invalidation of the L1 instruction caches */ 2020 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL); 2021 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2022 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL, tmp); 2023 2024 /* Wait for invalidation complete */ 2025 for (i = 0; i < usec_timeout; i++) { 2026 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DC_OP_CNTL); 2027 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2028 INVALIDATE_DCACHE_COMPLETE)) 2029 break; 2030 udelay(1); 2031 } 2032 2033 if (i >= usec_timeout) { 2034 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2035 return -EINVAL; 2036 } 2037 2038 /* Trigger an invalidation of the L1 instruction caches */ 2039 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL); 2040 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2041 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL, tmp); 2042 2043 /* Wait for invalidation complete */ 2044 for (i = 0; i < usec_timeout; i++) { 2045 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_OP_CNTL); 2046 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2047 INVALIDATE_CACHE_COMPLETE)) 2048 break; 2049 udelay(1); 2050 } 2051 2052 if (i >= usec_timeout) { 2053 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2054 return -EINVAL; 2055 } 2056 2057 gfx_v12_1_xcc_set_mec_ucode_start_addr(adev, xcc_id); 2058 } 2059 2060 return 0; 2061 } 2062 2063 static void gfx_v12_1_xcc_kiq_setting(struct amdgpu_ring *ring, 2064 int xcc_id) 2065 { 2066 uint32_t tmp; 2067 struct amdgpu_device *adev = ring->adev; 2068 2069 /* tell RLC which is KIQ queue */ 2070 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); 2071 tmp &= 0xffffff00; 2072 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2073 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); 2074 tmp |= 0x80; 2075 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); 2076 } 2077 2078 static void gfx_v12_1_xcc_cp_set_doorbell_range(struct amdgpu_device *adev, 2079 int xcc_id) 2080 { 2081 /* disable gfx engine doorbell range */ 2082 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_LOWER, 0); 2083 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_DOORBELL_RANGE_UPPER, 0); 2084 2085 /* set compute engine doorbell range */ 2086 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER, 2087 ((adev->doorbell_index.kiq + 2088 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 2089 2) << 2); 2090 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER, 2091 ((adev->doorbell_index.userqueue_end + 2092 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 2093 2) << 2); 2094 } 2095 2096 static int gfx_v12_1_compute_mqd_init(struct amdgpu_device *adev, void *m, 2097 struct amdgpu_mqd_prop *prop) 2098 { 2099 struct v12_1_compute_mqd *mqd = m; 2100 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 2101 uint32_t tmp; 2102 2103 mqd->header = 0xC0310800; 2104 mqd->compute_pipelinestat_enable = 0x00000001; 2105 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2106 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2107 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2108 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2109 mqd->compute_misc_reserved = 0x00000007; 2110 2111 eop_base_addr = prop->eop_gpu_addr >> 8; 2112 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2113 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2114 2115 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2116 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_EOP_CONTROL); 2117 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 2118 (order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1)); 2119 2120 mqd->cp_hqd_eop_control = tmp; 2121 2122 /* enable doorbell? */ 2123 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL); 2124 2125 if (prop->use_doorbell) { 2126 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2127 DOORBELL_OFFSET, prop->doorbell_index); 2128 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2129 DOORBELL_EN, 1); 2130 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2131 DOORBELL_SOURCE, 0); 2132 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2133 DOORBELL_HIT, 0); 2134 } else { 2135 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2136 DOORBELL_EN, 0); 2137 } 2138 2139 mqd->cp_hqd_pq_doorbell_control = tmp; 2140 2141 /* disable the queue if it's active */ 2142 mqd->cp_hqd_dequeue_request = 0; 2143 mqd->cp_hqd_pq_rptr = 0; 2144 mqd->cp_hqd_pq_wptr_lo = 0; 2145 mqd->cp_hqd_pq_wptr_hi = 0; 2146 2147 /* set the pointer to the MQD */ 2148 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 2149 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 2150 2151 /* set MQD vmid to 0 */ 2152 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_MQD_CONTROL); 2153 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 2154 mqd->cp_mqd_control = tmp; 2155 2156 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2157 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 2158 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 2159 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 2160 2161 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2162 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_CONTROL); 2163 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 2164 (order_base_2(prop->queue_size / 4) - 1)); 2165 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 2166 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 2167 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 2168 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 2169 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 2170 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 2171 mqd->cp_hqd_pq_control = tmp; 2172 2173 /* set the wb address whether it's enabled or not */ 2174 wb_gpu_addr = prop->rptr_gpu_addr; 2175 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 2176 mqd->cp_hqd_pq_rptr_report_addr_hi = 2177 upper_32_bits(wb_gpu_addr) & 0xffff; 2178 2179 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2180 wb_gpu_addr = prop->wptr_gpu_addr; 2181 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2182 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2183 2184 tmp = 0; 2185 /* enable the doorbell if requested */ 2186 if (prop->use_doorbell) { 2187 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_DOORBELL_CONTROL); 2188 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2189 DOORBELL_OFFSET, prop->doorbell_index); 2190 2191 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2192 DOORBELL_EN, 1); 2193 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2194 DOORBELL_SOURCE, 0); 2195 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2196 DOORBELL_HIT, 0); 2197 } 2198 2199 mqd->cp_hqd_pq_doorbell_control = tmp; 2200 2201 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2202 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PQ_RPTR); 2203 2204 /* set the vmid for the queue */ 2205 mqd->cp_hqd_vmid = 0; 2206 2207 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_PERSISTENT_STATE); 2208 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x63); 2209 mqd->cp_hqd_persistent_state = tmp; 2210 2211 /* set MIN_IB_AVAIL_SIZE */ 2212 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HQD_IB_CONTROL); 2213 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 1); 2214 mqd->cp_hqd_ib_control = tmp; 2215 2216 /* set static priority for a compute queue/ring */ 2217 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 2218 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 2219 2220 mqd->cp_mqd_stride_size = prop->mqd_stride_size ? prop->mqd_stride_size : 2221 sizeof(struct v12_1_compute_mqd); 2222 2223 mqd->cp_hqd_active = prop->hqd_active; 2224 2225 return 0; 2226 } 2227 2228 static int gfx_v12_1_xcc_kiq_init_register(struct amdgpu_ring *ring, 2229 int xcc_id) 2230 { 2231 struct amdgpu_device *adev = ring->adev; 2232 struct v12_1_compute_mqd *mqd = ring->mqd_ptr; 2233 int j; 2234 2235 /* inactivate the queue */ 2236 if (amdgpu_sriov_vf(adev)) 2237 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); 2238 2239 /* disable wptr polling */ 2240 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); 2241 2242 /* write the EOP addr */ 2243 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, 2244 mqd->cp_hqd_eop_base_addr_lo); 2245 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI, 2246 mqd->cp_hqd_eop_base_addr_hi); 2247 2248 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2249 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL, 2250 mqd->cp_hqd_eop_control); 2251 2252 /* enable doorbell? */ 2253 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 2254 mqd->cp_hqd_pq_doorbell_control); 2255 2256 /* disable the queue if it's active */ 2257 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { 2258 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); 2259 for (j = 0; j < adev->usec_timeout; j++) { 2260 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 2261 break; 2262 udelay(1); 2263 } 2264 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 2265 mqd->cp_hqd_dequeue_request); 2266 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 2267 mqd->cp_hqd_pq_rptr); 2268 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 2269 mqd->cp_hqd_pq_wptr_lo); 2270 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 2271 mqd->cp_hqd_pq_wptr_hi); 2272 } 2273 2274 /* set the pointer to the MQD */ 2275 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR, 2276 mqd->cp_mqd_base_addr_lo); 2277 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI, 2278 mqd->cp_mqd_base_addr_hi); 2279 2280 /* set MQD vmid to 0 */ 2281 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL, 2282 mqd->cp_mqd_control); 2283 2284 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2285 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE, 2286 mqd->cp_hqd_pq_base_lo); 2287 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI, 2288 mqd->cp_hqd_pq_base_hi); 2289 2290 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2291 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL, 2292 mqd->cp_hqd_pq_control); 2293 2294 /* set the wb address whether it's enabled or not */ 2295 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR, 2296 mqd->cp_hqd_pq_rptr_report_addr_lo); 2297 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 2298 mqd->cp_hqd_pq_rptr_report_addr_hi); 2299 2300 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2301 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR, 2302 mqd->cp_hqd_pq_wptr_poll_addr_lo); 2303 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 2304 mqd->cp_hqd_pq_wptr_poll_addr_hi); 2305 2306 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 2307 mqd->cp_hqd_pq_doorbell_control); 2308 2309 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2310 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 2311 mqd->cp_hqd_pq_wptr_lo); 2312 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 2313 mqd->cp_hqd_pq_wptr_hi); 2314 2315 /* set the vmid for the queue */ 2316 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid); 2317 2318 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 2319 mqd->cp_hqd_persistent_state); 2320 2321 /* activate the queue */ 2322 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 2323 mqd->cp_hqd_active); 2324 2325 if (ring->use_doorbell) 2326 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1); 2327 2328 return 0; 2329 } 2330 2331 static int gfx_v12_1_xcc_kiq_init_queue(struct amdgpu_ring *ring, 2332 int xcc_id) 2333 { 2334 struct amdgpu_device *adev = ring->adev; 2335 struct v12_1_compute_mqd *mqd = ring->mqd_ptr; 2336 2337 gfx_v12_1_xcc_kiq_setting(ring, xcc_id); 2338 2339 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 2340 /* reset MQD to a clean status */ 2341 if (adev->gfx.kiq[xcc_id].mqd_backup) 2342 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(*mqd)); 2343 2344 /* reset ring buffer */ 2345 ring->wptr = 0; 2346 amdgpu_ring_clear_ring(ring); 2347 2348 mutex_lock(&adev->srbm_mutex); 2349 soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2350 gfx_v12_1_xcc_kiq_init_register(ring, xcc_id); 2351 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2352 mutex_unlock(&adev->srbm_mutex); 2353 } else { 2354 memset((void *)mqd, 0, sizeof(*mqd)); 2355 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 2356 amdgpu_ring_clear_ring(ring); 2357 mutex_lock(&adev->srbm_mutex); 2358 soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2359 amdgpu_ring_init_mqd(ring); 2360 gfx_v12_1_xcc_kiq_init_register(ring, xcc_id); 2361 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2362 mutex_unlock(&adev->srbm_mutex); 2363 2364 if (adev->gfx.kiq[xcc_id].mqd_backup) 2365 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(*mqd)); 2366 } 2367 2368 return 0; 2369 } 2370 2371 static int gfx_v12_1_xcc_kcq_init_queue(struct amdgpu_ring *ring, 2372 int xcc_id) 2373 { 2374 struct amdgpu_device *adev = ring->adev; 2375 struct v12_1_compute_mqd *mqd = ring->mqd_ptr; 2376 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 2377 2378 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2379 memset((void *)mqd, 0, sizeof(*mqd)); 2380 mutex_lock(&adev->srbm_mutex); 2381 soc_v1_0_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2382 amdgpu_ring_init_mqd(ring); 2383 soc_v1_0_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2384 mutex_unlock(&adev->srbm_mutex); 2385 2386 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2387 memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2388 } else { 2389 /* restore MQD to a clean status */ 2390 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2391 memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 2392 /* reset ring buffer */ 2393 ring->wptr = 0; 2394 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 2395 amdgpu_ring_clear_ring(ring); 2396 } 2397 2398 return 0; 2399 } 2400 2401 static int gfx_v12_1_xcc_kiq_resume(struct amdgpu_device *adev, 2402 int xcc_id) 2403 { 2404 struct amdgpu_ring *ring; 2405 int r; 2406 2407 ring = &adev->gfx.kiq[xcc_id].ring; 2408 2409 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2410 if (unlikely(r != 0)) 2411 return r; 2412 2413 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2414 if (unlikely(r != 0)) { 2415 amdgpu_bo_unreserve(ring->mqd_obj); 2416 return r; 2417 } 2418 2419 gfx_v12_1_xcc_kiq_init_queue(ring, xcc_id); 2420 amdgpu_bo_kunmap(ring->mqd_obj); 2421 ring->mqd_ptr = NULL; 2422 amdgpu_bo_unreserve(ring->mqd_obj); 2423 ring->sched.ready = true; 2424 return 0; 2425 } 2426 2427 static int gfx_v12_1_xcc_kcq_resume(struct amdgpu_device *adev, 2428 int xcc_id) 2429 { 2430 struct amdgpu_ring *ring = NULL; 2431 int r = 0, i; 2432 2433 if (!amdgpu_async_gfx_ring) 2434 gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id); 2435 2436 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2437 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; 2438 2439 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2440 if (unlikely(r != 0)) 2441 goto done; 2442 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2443 if (!r) { 2444 r = gfx_v12_1_xcc_kcq_init_queue(ring, xcc_id); 2445 amdgpu_bo_kunmap(ring->mqd_obj); 2446 ring->mqd_ptr = NULL; 2447 } 2448 amdgpu_bo_unreserve(ring->mqd_obj); 2449 if (r) 2450 goto done; 2451 } 2452 2453 r = amdgpu_gfx_enable_kcq(adev, xcc_id); 2454 done: 2455 return r; 2456 } 2457 2458 static int gfx_v12_1_xcc_cp_resume(struct amdgpu_device *adev, uint16_t xcc_mask) 2459 { 2460 int r, i, xcc_id; 2461 struct amdgpu_ring *ring; 2462 2463 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2464 /* legacy firmware loading */ 2465 r = gfx_v12_1_xcc_cp_compute_load_microcode_rs64(adev, xcc_mask); 2466 if (r) 2467 return r; 2468 } 2469 2470 for (xcc_id = 0; xcc_id < NUM_XCC(xcc_mask); xcc_id++) { 2471 if (!(adev->flags & AMD_IS_APU)) 2472 gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 2473 2474 gfx_v12_1_xcc_cp_set_doorbell_range(adev, xcc_id); 2475 2476 gfx_v12_1_xcc_cp_compute_enable(adev, true, xcc_id); 2477 2478 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 2479 r = amdgpu_mes_kiq_hw_init(adev, xcc_id); 2480 else 2481 r = gfx_v12_1_xcc_kiq_resume(adev, xcc_id); 2482 if (r) 2483 return r; 2484 2485 r = gfx_v12_1_xcc_kcq_resume(adev, xcc_id); 2486 if (r) 2487 return r; 2488 2489 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2490 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; 2491 r = amdgpu_ring_test_helper(ring); 2492 if (r) 2493 return r; 2494 } 2495 } 2496 2497 return 0; 2498 } 2499 2500 static int gfx_v12_1_cp_resume(struct amdgpu_device *adev) 2501 { 2502 int num_xcc, num_xcp, num_xcc_per_xcp; 2503 int r = 0; 2504 2505 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2506 if (amdgpu_sriov_vf(adev)) { 2507 enum amdgpu_gfx_partition mode; 2508 2509 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2510 AMDGPU_XCP_FL_NONE); 2511 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2512 return -EINVAL; 2513 if (adev->gfx.funcs && 2514 adev->gfx.funcs->get_xccs_per_xcp) { 2515 num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev); 2516 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp; 2517 num_xcp = num_xcc / num_xcc_per_xcp; 2518 } else { 2519 return -EINVAL; 2520 } 2521 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); 2522 2523 } else { 2524 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2525 AMDGPU_XCP_FL_NONE) == 2526 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2527 r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, 2528 amdgpu_user_partt_mode); 2529 } 2530 2531 if (r) 2532 return r; 2533 2534 return gfx_v12_1_xcc_cp_resume(adev, adev->gfx.xcc_mask); 2535 } 2536 2537 static int gfx_v12_1_gfxhub_enable(struct amdgpu_device *adev) 2538 { 2539 int r, i; 2540 bool value; 2541 2542 r = adev->gfxhub.funcs->gart_enable(adev); 2543 if (r) 2544 return r; 2545 2546 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 2547 false : true; 2548 2549 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 2550 /* TODO investigate why TLB flush is needed, 2551 * are we missing a flush somewhere else? */ 2552 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 2553 if (AMDGPU_IS_GFXHUB(i)) 2554 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(i), 0); 2555 } 2556 2557 return 0; 2558 } 2559 2560 static int get_gb_addr_config(struct amdgpu_device *adev) 2561 { 2562 u32 gb_addr_config; 2563 2564 gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG_READ); 2565 if (gb_addr_config == 0) 2566 return -EINVAL; 2567 2568 adev->gfx.config.gb_addr_config_fields.num_pkrs = 2569 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG_READ, NUM_PKRS); 2570 2571 adev->gfx.config.gb_addr_config = gb_addr_config; 2572 2573 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 2574 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 2575 GB_ADDR_CONFIG_READ, NUM_PIPES); 2576 2577 adev->gfx.config.max_tile_pipes = 2578 adev->gfx.config.gb_addr_config_fields.num_pipes; 2579 2580 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 2581 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 2582 GB_ADDR_CONFIG_READ, MAX_COMPRESSED_FRAGS); 2583 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 2584 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 2585 GB_ADDR_CONFIG_READ, NUM_RB_PER_SE); 2586 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 2587 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 2588 GB_ADDR_CONFIG_READ, NUM_SHADER_ENGINES); 2589 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 2590 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 2591 GB_ADDR_CONFIG_READ, PIPE_INTERLEAVE_SIZE)); 2592 2593 return 0; 2594 } 2595 2596 static void gfx_v12_1_xcc_disable_gpa_mode(struct amdgpu_device *adev, 2597 int xcc_id) 2598 { 2599 uint32_t data; 2600 2601 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG); 2602 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 2603 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); 2604 2605 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG); 2606 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 2607 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPG_PSP_DEBUG, data); 2608 } 2609 2610 static void gfx_v12_1_xcc_enable_atomics(struct amdgpu_device *adev, 2611 int xcc_id) 2612 { 2613 uint32_t data; 2614 2615 /* Set the TCP UTCL0 register to enable atomics */ 2616 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1); 2617 data = REG_SET_FIELD(data, TCP_UTCL0_CNTL1, ATOMIC_REQUESTER_EN, 0x1); 2618 2619 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regTCP_UTCL0_CNTL1, data); 2620 } 2621 2622 static void gfx_v12_1_init_golden_registers(struct amdgpu_device *adev) 2623 { 2624 int i; 2625 2626 for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); i++) 2627 gfx_v12_1_xcc_enable_atomics(adev, i); 2628 } 2629 2630 static int gfx_v12_1_hw_init(struct amdgpu_ip_block *ip_block) 2631 { 2632 int r, i, num_xcc; 2633 struct amdgpu_device *adev = ip_block->adev; 2634 2635 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2636 /* rlc autoload firmware */ 2637 r = gfx_v12_1_rlc_backdoor_autoload_enable(adev); 2638 if (r) 2639 return r; 2640 } else { 2641 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2642 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2643 2644 if (adev->gfx.imu.funcs) { 2645 if (adev->gfx.imu.funcs->load_microcode) 2646 adev->gfx.imu.funcs->load_microcode(adev); 2647 } 2648 2649 for (i = 0; i < num_xcc; i++) { 2650 /* disable gpa mode in backdoor loading */ 2651 gfx_v12_1_xcc_disable_gpa_mode(adev, i); 2652 } 2653 } 2654 } 2655 2656 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 2657 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2658 r = gfx_v12_1_wait_for_rlc_autoload_complete(adev); 2659 if (r) { 2660 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 2661 return r; 2662 } 2663 } 2664 2665 adev->gfx.is_poweron = true; 2666 2667 if (get_gb_addr_config(adev)) 2668 DRM_WARN("Invalid gb_addr_config !\n"); 2669 2670 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2671 gfx_v12_1_config_gfx_rs64(adev); 2672 2673 r = gfx_v12_1_gfxhub_enable(adev); 2674 if (r) 2675 return r; 2676 2677 gfx_v12_1_init_golden_registers(adev); 2678 2679 gfx_v12_1_constants_init(adev); 2680 2681 if (adev->nbio.funcs->gc_doorbell_init) 2682 adev->nbio.funcs->gc_doorbell_init(adev); 2683 2684 r = gfx_v12_1_rlc_resume(adev); 2685 if (r) 2686 return r; 2687 2688 /* 2689 * init golden registers and rlc resume may override some registers, 2690 * reconfig them here 2691 */ 2692 gfx_v12_1_tcp_harvest(adev); 2693 2694 r = gfx_v12_1_cp_resume(adev); 2695 if (r) 2696 return r; 2697 2698 return r; 2699 } 2700 2701 static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev, 2702 int xcc_id) 2703 { 2704 uint32_t tmp; 2705 2706 if (!adev->no_hw_access) { 2707 if (amdgpu_gfx_disable_kcq(adev, xcc_id)) 2708 DRM_ERROR("KCQ disable failed\n"); 2709 2710 amdgpu_mes_kiq_hw_fini(adev, xcc_id); 2711 } 2712 2713 if (amdgpu_sriov_vf(adev)) { 2714 /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ 2715 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); 2716 tmp &= 0xffffff00; 2717 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); 2718 } 2719 gfx_v12_1_xcc_cp_compute_enable(adev, false, xcc_id); 2720 gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 2721 } 2722 2723 static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block) 2724 { 2725 struct amdgpu_device *adev = ip_block->adev; 2726 int i, num_xcc; 2727 2728 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 2729 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 2730 2731 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2732 for (i = 0; i < num_xcc; i++) { 2733 gfx_v12_1_xcc_fini(adev, i); 2734 } 2735 2736 adev->gfxhub.funcs->gart_disable(adev); 2737 2738 adev->gfx.is_poweron = false; 2739 2740 return 0; 2741 } 2742 2743 static int gfx_v12_1_suspend(struct amdgpu_ip_block *ip_block) 2744 { 2745 return gfx_v12_1_hw_fini(ip_block); 2746 } 2747 2748 static int gfx_v12_1_resume(struct amdgpu_ip_block *ip_block) 2749 { 2750 return gfx_v12_1_hw_init(ip_block); 2751 } 2752 2753 static bool gfx_v12_1_is_idle(struct amdgpu_ip_block *ip_block) 2754 { 2755 struct amdgpu_device *adev = ip_block->adev; 2756 int i, num_xcc; 2757 2758 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2759 for (i = 0; i < num_xcc; i++) { 2760 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), 2761 regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) 2762 return false; 2763 } 2764 return true; 2765 } 2766 2767 static int gfx_v12_1_wait_for_idle(struct amdgpu_ip_block *ip_block) 2768 { 2769 unsigned i; 2770 struct amdgpu_device *adev = ip_block->adev; 2771 2772 for (i = 0; i < adev->usec_timeout; i++) { 2773 if (gfx_v12_1_is_idle(ip_block)) 2774 return 0; 2775 udelay(1); 2776 } 2777 return -ETIMEDOUT; 2778 } 2779 2780 static uint64_t gfx_v12_1_get_gpu_clock_counter(struct amdgpu_device *adev) 2781 { 2782 uint64_t clock = 0; 2783 2784 if (adev->smuio.funcs && 2785 adev->smuio.funcs->get_gpu_clock_counter) 2786 clock = adev->smuio.funcs->get_gpu_clock_counter(adev); 2787 else 2788 dev_warn(adev->dev, "query gpu clock counter is not supported\n"); 2789 2790 return clock; 2791 } 2792 2793 static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block) 2794 { 2795 struct amdgpu_device *adev = ip_block->adev; 2796 2797 adev->gfx.funcs = &gfx_v12_1_gfx_funcs; 2798 2799 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 2800 AMDGPU_MAX_COMPUTE_RINGS); 2801 2802 gfx_v12_1_set_kiq_pm4_funcs(adev); 2803 gfx_v12_1_set_ring_funcs(adev); 2804 gfx_v12_1_set_irq_funcs(adev); 2805 gfx_v12_1_set_rlc_funcs(adev); 2806 gfx_v12_1_set_mqd_funcs(adev); 2807 gfx_v12_1_set_imu_funcs(adev); 2808 2809 gfx_v12_1_init_rlcg_reg_access_ctrl(adev); 2810 2811 return gfx_v12_1_init_microcode(adev); 2812 } 2813 2814 static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block) 2815 { 2816 struct amdgpu_device *adev = ip_block->adev; 2817 int r; 2818 2819 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 2820 if (r) 2821 return r; 2822 2823 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 2824 if (r) 2825 return r; 2826 2827 return 0; 2828 } 2829 2830 static bool gfx_v12_1_is_rlc_enabled(struct amdgpu_device *adev) 2831 { 2832 uint32_t rlc_cntl; 2833 2834 /* if RLC is not enabled, do nothing */ 2835 rlc_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL); 2836 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 2837 } 2838 2839 static void gfx_v12_1_xcc_set_safe_mode(struct amdgpu_device *adev, 2840 int xcc_id) 2841 { 2842 uint32_t data; 2843 unsigned i; 2844 2845 data = RLC_SAFE_MODE__CMD_MASK; 2846 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 2847 2848 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); 2849 2850 /* wait for RLC_SAFE_MODE */ 2851 for (i = 0; i < adev->usec_timeout; i++) { 2852 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2853 regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 2854 break; 2855 udelay(1); 2856 } 2857 } 2858 2859 static void gfx_v12_1_xcc_unset_safe_mode(struct amdgpu_device *adev, 2860 int xcc_id) 2861 { 2862 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2863 regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 2864 } 2865 2866 static void gfx_v12_1_update_perf_clk(struct amdgpu_device *adev, 2867 bool enable) 2868 { 2869 int i, num_xcc; 2870 2871 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2872 for (i = 0; i < num_xcc; i++) 2873 gfx_v12_1_xcc_update_perf_clk(adev, enable, i); 2874 } 2875 2876 static void gfx_v12_1_update_spm_vmid(struct amdgpu_device *adev, 2877 int xcc_id, 2878 struct amdgpu_ring *ring, 2879 unsigned vmid) 2880 { 2881 u32 reg, data; 2882 2883 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL); 2884 if (amdgpu_sriov_is_pp_one_vf(adev)) 2885 data = RREG32_NO_KIQ(reg); 2886 else 2887 data = RREG32(reg); 2888 2889 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 2890 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 2891 2892 if (amdgpu_sriov_is_pp_one_vf(adev)) 2893 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data); 2894 else 2895 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL, data); 2896 2897 if (ring 2898 && amdgpu_sriov_is_pp_one_vf(adev) 2899 && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 2900 || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 2901 uint32_t reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPM_MC_CNTL); 2902 amdgpu_ring_emit_wreg(ring, reg, data); 2903 } 2904 } 2905 2906 static const struct amdgpu_rlc_funcs gfx_v12_1_rlc_funcs = { 2907 .is_rlc_enabled = gfx_v12_1_is_rlc_enabled, 2908 .set_safe_mode = gfx_v12_1_xcc_set_safe_mode, 2909 .unset_safe_mode = gfx_v12_1_xcc_unset_safe_mode, 2910 .init = gfx_v12_1_rlc_init, 2911 .get_csb_size = gfx_v12_1_get_csb_size, 2912 .get_csb_buffer = gfx_v12_1_get_csb_buffer, 2913 .resume = gfx_v12_1_rlc_resume, 2914 .stop = gfx_v12_1_rlc_stop, 2915 .reset = gfx_v12_1_rlc_reset, 2916 .start = gfx_v12_1_rlc_start, 2917 .update_spm_vmid = gfx_v12_1_update_spm_vmid, 2918 }; 2919 2920 #if 0 2921 static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable) 2922 { 2923 /* TODO */ 2924 } 2925 2926 static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable) 2927 { 2928 /* TODO */ 2929 } 2930 #endif 2931 2932 static int gfx_v12_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 2933 enum amd_powergating_state state) 2934 { 2935 struct amdgpu_device *adev = ip_block->adev; 2936 bool enable = (state == AMD_PG_STATE_GATE); 2937 2938 if (amdgpu_sriov_vf(adev)) 2939 return 0; 2940 2941 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2942 case IP_VERSION(12, 1, 0): 2943 amdgpu_gfx_off_ctrl(adev, enable); 2944 break; 2945 default: 2946 break; 2947 } 2948 2949 return 0; 2950 } 2951 2952 static void gfx_v12_1_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 2953 bool enable, int xcc_id) 2954 { 2955 uint32_t def, data; 2956 2957 if (!(adev->cg_flags & 2958 (AMD_CG_SUPPORT_GFX_CGCG | 2959 AMD_CG_SUPPORT_GFX_CGLS | 2960 AMD_CG_SUPPORT_GFX_3D_CGCG | 2961 AMD_CG_SUPPORT_GFX_3D_CGLS))) 2962 return; 2963 2964 if (enable) { 2965 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2966 regRLC_CGTT_MGCG_OVERRIDE); 2967 2968 /* unset CGCG override */ 2969 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 2970 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 2971 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 2972 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 2973 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 2974 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 2975 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 2976 2977 /* update CGCG override bits */ 2978 if (def != data) 2979 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2980 regRLC_CGTT_MGCG_OVERRIDE, data); 2981 2982 /* enable cgcg FSM(0x0000363F) */ 2983 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 2984 2985 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 2986 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 2987 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 2988 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 2989 } 2990 2991 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 2992 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 2993 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 2994 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 2995 } 2996 2997 if (def != data) 2998 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2999 regRLC_CGCG_CGLS_CTRL, data); 3000 3001 /* set IDLE_POLL_COUNT(0x00900100) */ 3002 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); 3003 3004 data &= ~CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK; 3005 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; 3006 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3007 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3008 3009 if (def != data) 3010 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); 3011 3012 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL); 3013 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 3014 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 3015 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 3016 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 3017 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL, data); 3018 } else { 3019 /* Program RLC_CGCG_CGLS_CTRL */ 3020 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 3021 3022 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 3023 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3024 3025 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3026 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3027 3028 if (def != data) 3029 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); 3030 } 3031 } 3032 3033 static void gfx_v12_1_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, 3034 bool enable, int xcc_id) 3035 { 3036 uint32_t data, def; 3037 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 3038 return; 3039 3040 /* It is disabled by HW by default */ 3041 if (enable) { 3042 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 3043 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3044 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 3045 3046 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3047 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 3048 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 3049 3050 if (def != data) 3051 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 3052 } 3053 } else { 3054 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 3055 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 3056 3057 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 3058 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3059 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 3060 3061 if (def != data) 3062 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 3063 } 3064 } 3065 } 3066 3067 static void gfx_v12_1_xcc_update_repeater_fgcg(struct amdgpu_device *adev, 3068 bool enable, int xcc_id) 3069 { 3070 uint32_t def, data; 3071 3072 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 3073 return; 3074 3075 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 3076 3077 if (enable) 3078 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 3079 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK); 3080 else 3081 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK | 3082 RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK; 3083 3084 if (def != data) 3085 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 3086 } 3087 3088 static void gfx_v12_1_xcc_update_sram_fgcg(struct amdgpu_device *adev, 3089 bool enable, int xcc_id) 3090 { 3091 uint32_t def, data; 3092 3093 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 3094 return; 3095 3096 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 3097 3098 if (enable) 3099 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 3100 else 3101 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 3102 3103 if (def != data) 3104 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 3105 } 3106 3107 static void gfx_v12_1_xcc_update_perf_clk(struct amdgpu_device *adev, 3108 bool enable, int xcc_id) 3109 { 3110 uint32_t def, data; 3111 3112 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 3113 return; 3114 3115 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 3116 3117 if (enable) 3118 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3119 else 3120 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 3121 3122 if (def != data) 3123 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 3124 } 3125 3126 static int gfx_v12_1_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, 3127 bool enable, int xcc_id) 3128 { 3129 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 3130 3131 gfx_v12_1_xcc_update_coarse_grain_clock_gating(adev, enable, xcc_id); 3132 3133 gfx_v12_1_xcc_update_medium_grain_clock_gating(adev, enable, xcc_id); 3134 3135 gfx_v12_1_xcc_update_repeater_fgcg(adev, enable, xcc_id); 3136 3137 gfx_v12_1_xcc_update_sram_fgcg(adev, enable, xcc_id); 3138 3139 gfx_v12_1_xcc_update_perf_clk(adev, enable, xcc_id); 3140 3141 if (adev->cg_flags & 3142 (AMD_CG_SUPPORT_GFX_MGCG | 3143 AMD_CG_SUPPORT_GFX_CGLS | 3144 AMD_CG_SUPPORT_GFX_CGCG | 3145 AMD_CG_SUPPORT_GFX_3D_CGCG | 3146 AMD_CG_SUPPORT_GFX_3D_CGLS)) 3147 gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, enable, xcc_id); 3148 3149 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 3150 3151 return 0; 3152 } 3153 3154 static int gfx_v12_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, 3155 enum amd_clockgating_state state) 3156 { 3157 struct amdgpu_device *adev = ip_block->adev; 3158 int i, num_xcc; 3159 3160 if (amdgpu_sriov_vf(adev)) 3161 return 0; 3162 3163 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3164 switch (adev->ip_versions[GC_HWIP][0]) { 3165 case IP_VERSION(12, 1, 0): 3166 for (i = 0; i < num_xcc; i++) 3167 gfx_v12_1_xcc_update_gfx_clock_gating(adev, 3168 state == AMD_CG_STATE_GATE, i); 3169 break; 3170 default: 3171 break; 3172 } 3173 3174 return 0; 3175 } 3176 3177 static void gfx_v12_1_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 3178 { 3179 struct amdgpu_device *adev = ip_block->adev; 3180 int data; 3181 3182 /* AMD_CG_SUPPORT_GFX_MGCG */ 3183 data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE); 3184 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 3185 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 3186 3187 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 3188 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 3189 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 3190 3191 /* AMD_CG_SUPPORT_GFX_FGCG */ 3192 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 3193 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 3194 3195 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 3196 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 3197 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 3198 3199 /* AMD_CG_SUPPORT_GFX_CGCG */ 3200 data = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL); 3201 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 3202 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 3203 3204 /* AMD_CG_SUPPORT_GFX_CGLS */ 3205 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 3206 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 3207 } 3208 3209 static u64 gfx_v12_1_ring_get_rptr_compute(struct amdgpu_ring *ring) 3210 { 3211 /* gfx12 hardware is 32bit rptr */ 3212 return *(uint32_t *)ring->rptr_cpu_addr; 3213 } 3214 3215 static u64 gfx_v12_1_ring_get_wptr_compute(struct amdgpu_ring *ring) 3216 { 3217 u64 wptr; 3218 3219 /* XXX check if swapping is necessary on BE */ 3220 if (ring->use_doorbell) 3221 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 3222 else 3223 BUG(); 3224 return wptr; 3225 } 3226 3227 static void gfx_v12_1_ring_set_wptr_compute(struct amdgpu_ring *ring) 3228 { 3229 struct amdgpu_device *adev = ring->adev; 3230 3231 /* XXX check if swapping is necessary on BE */ 3232 if (ring->use_doorbell) { 3233 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 3234 ring->wptr); 3235 WDOORBELL64(ring->doorbell_index, ring->wptr); 3236 } else { 3237 BUG(); /* only DOORBELL method supported on gfx12 now */ 3238 } 3239 } 3240 3241 static void gfx_v12_1_ring_emit_ib_compute(struct amdgpu_ring *ring, 3242 struct amdgpu_job *job, 3243 struct amdgpu_ib *ib, 3244 uint32_t flags) 3245 { 3246 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 3247 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 3248 3249 /* Currently, there is a high possibility to get wave ID mismatch 3250 * between ME and GDS, leading to a hw deadlock, because ME generates 3251 * different wave IDs than the GDS expects. This situation happens 3252 * randomly when at least 5 compute pipes use GDS ordered append. 3253 * The wave IDs generated by ME are also wrong after suspend/resume. 3254 * Those are probably bugs somewhere else in the kernel driver. 3255 * 3256 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 3257 * GDS to 0 for this ring (me/pipe). 3258 */ 3259 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 3260 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3261 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 3262 } 3263 3264 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3265 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3266 amdgpu_ring_write(ring, 3267 #ifdef __BIG_ENDIAN 3268 (2 << 0) | 3269 #endif 3270 lower_32_bits(ib->gpu_addr)); 3271 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 3272 amdgpu_ring_write(ring, control); 3273 } 3274 3275 static void gfx_v12_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 3276 u64 seq, unsigned flags) 3277 { 3278 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 3279 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 3280 3281 /* RELEASE_MEM - flush caches, send int */ 3282 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 3283 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ(1) | 3284 PACKET3_RELEASE_MEM_GCR_GLV_WB | 3285 PACKET3_RELEASE_MEM_GCR_GL2_WB | 3286 PACKET3_RELEASE_MEM_GCR_GL2_SCOPE(2) | 3287 PACKET3_RELEASE_MEM_TEMPORAL(3) | 3288 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3289 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 3290 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 3291 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 3292 3293 /* 3294 * the address should be Qword aligned if 64bit write, Dword 3295 * aligned if only send 32bit data low (discard data high) 3296 */ 3297 if (write64bit) 3298 BUG_ON(addr & 0x7); 3299 else 3300 BUG_ON(addr & 0x3); 3301 amdgpu_ring_write(ring, lower_32_bits(addr)); 3302 amdgpu_ring_write(ring, upper_32_bits(addr)); 3303 amdgpu_ring_write(ring, lower_32_bits(seq)); 3304 amdgpu_ring_write(ring, upper_32_bits(seq)); 3305 amdgpu_ring_write(ring, 0); 3306 } 3307 3308 static void gfx_v12_1_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 3309 { 3310 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3311 uint32_t seq = ring->fence_drv.sync_seq; 3312 uint64_t addr = ring->fence_drv.gpu_addr; 3313 3314 gfx_v12_1_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 3315 upper_32_bits(addr), seq, 0xffffffff, 4); 3316 } 3317 3318 static void gfx_v12_1_ring_invalidate_tlbs(struct amdgpu_ring *ring, 3319 uint16_t pasid, uint32_t flush_type, 3320 bool all_hub, uint8_t dst_sel) 3321 { 3322 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 3323 amdgpu_ring_write(ring, 3324 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 3325 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 3326 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 3327 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 3328 } 3329 3330 static void gfx_v12_1_ring_emit_vm_flush(struct amdgpu_ring *ring, 3331 unsigned vmid, uint64_t pd_addr) 3332 { 3333 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 3334 3335 /* compute doesn't have PFP */ 3336 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 3337 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 3338 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3339 amdgpu_ring_write(ring, 0x0); 3340 } 3341 } 3342 3343 static void gfx_v12_1_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 3344 u64 seq, unsigned int flags) 3345 { 3346 struct amdgpu_device *adev = ring->adev; 3347 3348 /* we only allocate 32bit for each seq wb address */ 3349 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 3350 3351 /* write fence seq to the "addr" */ 3352 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3353 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3354 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 3355 amdgpu_ring_write(ring, lower_32_bits(addr)); 3356 amdgpu_ring_write(ring, upper_32_bits(addr)); 3357 amdgpu_ring_write(ring, lower_32_bits(seq)); 3358 3359 if (flags & AMDGPU_FENCE_FLAG_INT) { 3360 /* set register to trigger INT */ 3361 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3362 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3363 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 3364 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS)); 3365 amdgpu_ring_write(ring, 0); 3366 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 3367 } 3368 } 3369 3370 static void gfx_v12_1_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 3371 uint32_t reg_val_offs) 3372 { 3373 struct amdgpu_device *adev = ring->adev; 3374 3375 reg = gfx_v12_1_normalize_xcc_reg_offset(reg); 3376 3377 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 3378 amdgpu_ring_write(ring, 0 | /* src: register*/ 3379 (5 << 8) | /* dst: memory */ 3380 (1 << 20)); /* write confirm */ 3381 amdgpu_ring_write(ring, reg); 3382 amdgpu_ring_write(ring, 0); 3383 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 3384 reg_val_offs * 4)); 3385 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 3386 reg_val_offs * 4)); 3387 } 3388 3389 static void gfx_v12_1_ring_emit_wreg(struct amdgpu_ring *ring, 3390 uint32_t reg, 3391 uint32_t val) 3392 { 3393 uint32_t cmd = 0; 3394 3395 reg = gfx_v12_1_normalize_xcc_reg_offset(reg); 3396 3397 switch (ring->funcs->type) { 3398 case AMDGPU_RING_TYPE_KIQ: 3399 cmd = (1 << 16); /* no inc addr */ 3400 break; 3401 default: 3402 cmd = WR_CONFIRM; 3403 break; 3404 } 3405 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3406 amdgpu_ring_write(ring, cmd); 3407 amdgpu_ring_write(ring, reg); 3408 amdgpu_ring_write(ring, 0); 3409 amdgpu_ring_write(ring, val); 3410 } 3411 3412 static void gfx_v12_1_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 3413 uint32_t val, uint32_t mask) 3414 { 3415 gfx_v12_1_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 3416 } 3417 3418 static void gfx_v12_1_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 3419 uint32_t reg0, uint32_t reg1, 3420 uint32_t ref, uint32_t mask) 3421 { 3422 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3423 3424 gfx_v12_1_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 3425 ref, mask, 0x20); 3426 } 3427 3428 static void gfx_v12_1_xcc_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 3429 int me, int pipe, 3430 enum amdgpu_interrupt_state state, 3431 int xcc_id) 3432 { 3433 u32 mec_int_cntl, mec_int_cntl_reg; 3434 3435 /* 3436 * amdgpu controls only the first MEC. That's why this function only 3437 * handles the setting of interrupts for this specific MEC. All other 3438 * pipes' interrupts are set by amdkfd. 3439 */ 3440 3441 if (me == 1) { 3442 switch (pipe) { 3443 case 0: 3444 mec_int_cntl_reg = SOC15_REG_OFFSET( 3445 GC, GET_INST(GC, xcc_id), 3446 regCP_ME1_PIPE0_INT_CNTL); 3447 break; 3448 case 1: 3449 mec_int_cntl_reg = SOC15_REG_OFFSET( 3450 GC, GET_INST(GC, xcc_id), 3451 regCP_ME1_PIPE1_INT_CNTL); 3452 break; 3453 case 2: 3454 mec_int_cntl_reg = SOC15_REG_OFFSET( 3455 GC, GET_INST(GC, xcc_id), 3456 regCP_ME1_PIPE2_INT_CNTL); 3457 break; 3458 case 3: 3459 mec_int_cntl_reg = SOC15_REG_OFFSET( 3460 GC, GET_INST(GC, xcc_id), 3461 regCP_ME1_PIPE3_INT_CNTL); 3462 break; 3463 default: 3464 DRM_DEBUG("invalid pipe %d\n", pipe); 3465 return; 3466 } 3467 } else { 3468 DRM_DEBUG("invalid me %d\n", me); 3469 return; 3470 } 3471 3472 switch (state) { 3473 case AMDGPU_IRQ_STATE_DISABLE: 3474 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3475 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3476 TIME_STAMP_INT_ENABLE, 0); 3477 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3478 GENERIC0_INT_ENABLE, 0); 3479 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3480 break; 3481 case AMDGPU_IRQ_STATE_ENABLE: 3482 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3483 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3484 TIME_STAMP_INT_ENABLE, 1); 3485 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3486 GENERIC0_INT_ENABLE, 1); 3487 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3488 break; 3489 default: 3490 break; 3491 } 3492 } 3493 3494 static int gfx_v12_1_set_eop_interrupt_state(struct amdgpu_device *adev, 3495 struct amdgpu_irq_src *src, 3496 unsigned type, 3497 enum amdgpu_interrupt_state state) 3498 { 3499 int i, num_xcc; 3500 3501 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3502 for (i = 0; i < num_xcc; i++) { 3503 switch (type) { 3504 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 3505 gfx_v12_1_xcc_set_compute_eop_interrupt_state( 3506 adev, 1, 0, state, i); 3507 break; 3508 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 3509 gfx_v12_1_xcc_set_compute_eop_interrupt_state( 3510 adev, 1, 1, state, i); 3511 break; 3512 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 3513 gfx_v12_1_xcc_set_compute_eop_interrupt_state( 3514 adev, 1, 2, state, i); 3515 break; 3516 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 3517 gfx_v12_1_xcc_set_compute_eop_interrupt_state( 3518 adev, 1, 3, state, i); 3519 break; 3520 default: 3521 break; 3522 } 3523 } 3524 3525 return 0; 3526 } 3527 3528 static int gfx_v12_1_eop_irq(struct amdgpu_device *adev, 3529 struct amdgpu_irq_src *source, 3530 struct amdgpu_iv_entry *entry) 3531 { 3532 int i, xcc_id; 3533 u8 me_id, pipe_id, queue_id; 3534 struct amdgpu_ring *ring; 3535 uint32_t mes_queue_id = entry->src_data[0]; 3536 3537 DRM_DEBUG("IH: CP EOP\n"); 3538 3539 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 3540 struct amdgpu_mes_queue *queue; 3541 3542 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 3543 3544 spin_lock(&adev->mes.queue_id_lock); 3545 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 3546 if (queue) { 3547 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 3548 amdgpu_fence_process(queue->ring); 3549 } 3550 spin_unlock(&adev->mes.queue_id_lock); 3551 } else { 3552 me_id = (entry->ring_id & 0x0c) >> 2; 3553 pipe_id = (entry->ring_id & 0x03) >> 0; 3554 queue_id = (entry->ring_id & 0x70) >> 4; 3555 xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id); 3556 3557 if (xcc_id == -EINVAL) 3558 return -EINVAL; 3559 3560 switch (me_id) { 3561 case 0: 3562 if (pipe_id == 0) 3563 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 3564 else 3565 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 3566 break; 3567 case 1: 3568 case 2: 3569 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3570 ring = &adev->gfx.compute_ring 3571 [i + 3572 xcc_id * adev->gfx.num_compute_rings]; 3573 /* Per-queue interrupt is supported for MEC starting from VI. 3574 * The interrupt can only be enabled/disabled per pipe instead 3575 * of per queue. 3576 */ 3577 if ((ring->me == me_id) && 3578 (ring->pipe == pipe_id) && 3579 (ring->queue == queue_id)) 3580 amdgpu_fence_process(ring); 3581 } 3582 break; 3583 } 3584 } 3585 3586 return 0; 3587 } 3588 3589 static int gfx_v12_1_set_priv_reg_fault_state(struct amdgpu_device *adev, 3590 struct amdgpu_irq_src *source, 3591 unsigned type, 3592 enum amdgpu_interrupt_state state) 3593 { 3594 int i, num_xcc; 3595 3596 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3597 switch (state) { 3598 case AMDGPU_IRQ_STATE_DISABLE: 3599 case AMDGPU_IRQ_STATE_ENABLE: 3600 for (i = 0; i < num_xcc; i++) 3601 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3602 PRIV_REG_INT_ENABLE, 3603 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3604 break; 3605 default: 3606 break; 3607 } 3608 3609 return 0; 3610 } 3611 3612 static int gfx_v12_1_set_priv_inst_fault_state(struct amdgpu_device *adev, 3613 struct amdgpu_irq_src *source, 3614 unsigned type, 3615 enum amdgpu_interrupt_state state) 3616 { 3617 int i, num_xcc; 3618 3619 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3620 switch (state) { 3621 case AMDGPU_IRQ_STATE_DISABLE: 3622 case AMDGPU_IRQ_STATE_ENABLE: 3623 for (i = 0; i < num_xcc; i++) 3624 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3625 PRIV_INSTR_INT_ENABLE, 3626 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3627 break; 3628 default: 3629 break; 3630 } 3631 3632 return 0; 3633 } 3634 3635 static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev, 3636 struct amdgpu_iv_entry *entry) 3637 { 3638 u8 me_id, pipe_id, queue_id; 3639 struct amdgpu_ring *ring; 3640 int i, xcc_id; 3641 3642 me_id = (entry->ring_id & 0x0c) >> 2; 3643 pipe_id = (entry->ring_id & 0x03) >> 0; 3644 queue_id = (entry->ring_id & 0x70) >> 4; 3645 xcc_id = gfx_v12_1_ih_to_xcc_inst(adev, entry->node_id); 3646 3647 if (xcc_id == -EINVAL) 3648 return; 3649 3650 switch (me_id) { 3651 case 0: 3652 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3653 ring = &adev->gfx.gfx_ring[i]; 3654 /* we only enabled 1 gfx queue per pipe for now */ 3655 if (ring->me == me_id && ring->pipe == pipe_id) 3656 drm_sched_fault(&ring->sched); 3657 } 3658 break; 3659 case 1: 3660 case 2: 3661 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3662 ring = &adev->gfx.compute_ring 3663 [i + 3664 xcc_id * adev->gfx.num_compute_rings]; 3665 if (ring->me == me_id && ring->pipe == pipe_id && 3666 ring->queue == queue_id) 3667 drm_sched_fault(&ring->sched); 3668 } 3669 break; 3670 default: 3671 BUG(); 3672 break; 3673 } 3674 } 3675 3676 static int gfx_v12_1_priv_reg_irq(struct amdgpu_device *adev, 3677 struct amdgpu_irq_src *source, 3678 struct amdgpu_iv_entry *entry) 3679 { 3680 DRM_ERROR("Illegal register access in command stream\n"); 3681 gfx_v12_1_handle_priv_fault(adev, entry); 3682 return 0; 3683 } 3684 3685 static int gfx_v12_1_priv_inst_irq(struct amdgpu_device *adev, 3686 struct amdgpu_irq_src *source, 3687 struct amdgpu_iv_entry *entry) 3688 { 3689 DRM_ERROR("Illegal instruction in command stream\n"); 3690 gfx_v12_1_handle_priv_fault(adev, entry); 3691 return 0; 3692 } 3693 3694 static void gfx_v12_1_emit_mem_sync(struct amdgpu_ring *ring) 3695 { 3696 const unsigned int gcr_cntl = 3697 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 3698 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 3699 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 3700 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 3701 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1) | 3702 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_SCOPE(2); 3703 3704 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 3705 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 3706 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 3707 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 3708 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 3709 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 3710 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 3711 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 3712 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 3713 } 3714 3715 static const struct amd_ip_funcs gfx_v12_1_ip_funcs = { 3716 .name = "gfx_v12_1", 3717 .early_init = gfx_v12_1_early_init, 3718 .late_init = gfx_v12_1_late_init, 3719 .sw_init = gfx_v12_1_sw_init, 3720 .sw_fini = gfx_v12_1_sw_fini, 3721 .hw_init = gfx_v12_1_hw_init, 3722 .hw_fini = gfx_v12_1_hw_fini, 3723 .suspend = gfx_v12_1_suspend, 3724 .resume = gfx_v12_1_resume, 3725 .is_idle = gfx_v12_1_is_idle, 3726 .wait_for_idle = gfx_v12_1_wait_for_idle, 3727 .set_clockgating_state = gfx_v12_1_set_clockgating_state, 3728 .set_powergating_state = gfx_v12_1_set_powergating_state, 3729 .get_clockgating_state = gfx_v12_1_get_clockgating_state, 3730 }; 3731 3732 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_compute = { 3733 .type = AMDGPU_RING_TYPE_COMPUTE, 3734 .align_mask = 0xff, 3735 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 3736 .support_64bit_ptrs = true, 3737 .get_rptr = gfx_v12_1_ring_get_rptr_compute, 3738 .get_wptr = gfx_v12_1_ring_get_wptr_compute, 3739 .set_wptr = gfx_v12_1_ring_set_wptr_compute, 3740 .emit_frame_size = 3741 7 + /* gfx_v12_1_ring_emit_pipeline_sync */ 3742 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 3743 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 3744 2 + /* gfx_v12_1_ring_emit_vm_flush */ 3745 8 + 8 + 8 + /* gfx_v12_1_ring_emit_fence x3 for user fence, vm fence */ 3746 8, /* gfx_v12_1_emit_mem_sync */ 3747 .emit_ib_size = 7, /* gfx_v12_1_ring_emit_ib_compute */ 3748 .emit_ib = gfx_v12_1_ring_emit_ib_compute, 3749 .emit_fence = gfx_v12_1_ring_emit_fence, 3750 .emit_pipeline_sync = gfx_v12_1_ring_emit_pipeline_sync, 3751 .emit_vm_flush = gfx_v12_1_ring_emit_vm_flush, 3752 .test_ring = gfx_v12_1_ring_test_ring, 3753 .test_ib = gfx_v12_1_ring_test_ib, 3754 .insert_nop = amdgpu_ring_insert_nop, 3755 .pad_ib = amdgpu_ring_generic_pad_ib, 3756 .emit_wreg = gfx_v12_1_ring_emit_wreg, 3757 .emit_reg_wait = gfx_v12_1_ring_emit_reg_wait, 3758 .emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait, 3759 .emit_mem_sync = gfx_v12_1_emit_mem_sync, 3760 }; 3761 3762 static const struct amdgpu_ring_funcs gfx_v12_1_ring_funcs_kiq = { 3763 .type = AMDGPU_RING_TYPE_KIQ, 3764 .align_mask = 0xff, 3765 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 3766 .support_64bit_ptrs = true, 3767 .get_rptr = gfx_v12_1_ring_get_rptr_compute, 3768 .get_wptr = gfx_v12_1_ring_get_wptr_compute, 3769 .set_wptr = gfx_v12_1_ring_set_wptr_compute, 3770 .emit_frame_size = 3771 7 + /* gfx_v12_1_ring_emit_pipeline_sync */ 3772 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 3773 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 3774 2 + /* gfx_v12_1_ring_emit_vm_flush */ 3775 8 + 8 + 8, /* gfx_v12_1_ring_emit_fence_kiq x3 for user fence, vm fence */ 3776 .emit_ib_size = 7, /* gfx_v12_1_ring_emit_ib_compute */ 3777 .emit_ib = gfx_v12_1_ring_emit_ib_compute, 3778 .emit_fence = gfx_v12_1_ring_emit_fence_kiq, 3779 .test_ring = gfx_v12_1_ring_test_ring, 3780 .test_ib = gfx_v12_1_ring_test_ib, 3781 .insert_nop = amdgpu_ring_insert_nop, 3782 .pad_ib = amdgpu_ring_generic_pad_ib, 3783 .emit_rreg = gfx_v12_1_ring_emit_rreg, 3784 .emit_wreg = gfx_v12_1_ring_emit_wreg, 3785 .emit_reg_wait = gfx_v12_1_ring_emit_reg_wait, 3786 .emit_reg_write_reg_wait = gfx_v12_1_ring_emit_reg_write_reg_wait, 3787 }; 3788 3789 static void gfx_v12_1_set_ring_funcs(struct amdgpu_device *adev) 3790 { 3791 int i, j, num_xcc; 3792 3793 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3794 for (i = 0; i < num_xcc; i++) { 3795 adev->gfx.kiq[i].ring.funcs = &gfx_v12_1_ring_funcs_kiq; 3796 3797 for (j = 0; j < adev->gfx.num_compute_rings; j++) 3798 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs = 3799 &gfx_v12_1_ring_funcs_compute; 3800 } 3801 } 3802 3803 static const struct amdgpu_irq_src_funcs gfx_v12_1_eop_irq_funcs = { 3804 .set = gfx_v12_1_set_eop_interrupt_state, 3805 .process = gfx_v12_1_eop_irq, 3806 }; 3807 3808 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_reg_irq_funcs = { 3809 .set = gfx_v12_1_set_priv_reg_fault_state, 3810 .process = gfx_v12_1_priv_reg_irq, 3811 }; 3812 3813 static const struct amdgpu_irq_src_funcs gfx_v12_1_priv_inst_irq_funcs = { 3814 .set = gfx_v12_1_set_priv_inst_fault_state, 3815 .process = gfx_v12_1_priv_inst_irq, 3816 }; 3817 3818 static void gfx_v12_1_set_irq_funcs(struct amdgpu_device *adev) 3819 { 3820 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 3821 adev->gfx.eop_irq.funcs = &gfx_v12_1_eop_irq_funcs; 3822 3823 adev->gfx.priv_reg_irq.num_types = 1; 3824 adev->gfx.priv_reg_irq.funcs = &gfx_v12_1_priv_reg_irq_funcs; 3825 3826 adev->gfx.priv_inst_irq.num_types = 1; 3827 adev->gfx.priv_inst_irq.funcs = &gfx_v12_1_priv_inst_irq_funcs; 3828 } 3829 3830 static void gfx_v12_1_set_imu_funcs(struct amdgpu_device *adev) 3831 { 3832 if (adev->flags & AMD_IS_APU) 3833 adev->gfx.imu.mode = MISSION_MODE; 3834 else 3835 adev->gfx.imu.mode = DEBUG_MODE; 3836 3837 adev->gfx.imu.funcs = &gfx_v12_1_imu_funcs; 3838 } 3839 3840 static void gfx_v12_1_set_rlc_funcs(struct amdgpu_device *adev) 3841 { 3842 adev->gfx.rlc.funcs = &gfx_v12_1_rlc_funcs; 3843 } 3844 3845 static void gfx_v12_1_set_mqd_funcs(struct amdgpu_device *adev) 3846 { 3847 /* set compute eng mqd */ 3848 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 3849 sizeof(struct v12_1_compute_mqd); 3850 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 3851 gfx_v12_1_compute_mqd_init; 3852 } 3853 3854 static void gfx_v12_1_set_user_cu_inactive_bitmap_per_sh(struct amdgpu_device *adev, 3855 u32 bitmap, int xcc_id) 3856 { 3857 u32 data; 3858 3859 if (!bitmap) 3860 return; 3861 3862 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 3863 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 3864 3865 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data); 3866 } 3867 3868 static u32 gfx_v12_1_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev, 3869 int xcc_id) 3870 { 3871 u32 data, mask; 3872 3873 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG); 3874 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG); 3875 3876 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 3877 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 3878 3879 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 3880 3881 return (~data) & mask; 3882 } 3883 3884 static int gfx_v12_1_get_cu_info(struct amdgpu_device *adev, 3885 struct amdgpu_cu_info *cu_info) 3886 { 3887 int i, j, k, counter, xcc_id, active_cu_number = 0; 3888 u32 mask, bitmap; 3889 unsigned int disable_masks[2 * 2]; 3890 3891 if (!adev || !cu_info) 3892 return -EINVAL; 3893 3894 if (adev->gfx.config.max_shader_engines > 2 || 3895 adev->gfx.config.max_sh_per_se > 2) { 3896 dev_err(adev->dev, 3897 "Max SE (%d) and Max SA per SE (%d) is greater than expected\n", 3898 adev->gfx.config.max_shader_engines, 3899 adev->gfx.config.max_sh_per_se); 3900 return -EINVAL; 3901 } 3902 3903 amdgpu_gfx_parse_disable_cu(disable_masks, 3904 adev->gfx.config.max_shader_engines, 3905 adev->gfx.config.max_sh_per_se); 3906 3907 mutex_lock(&adev->grbm_idx_mutex); 3908 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { 3909 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3910 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3911 bitmap = i * adev->gfx.config.max_sh_per_se + j; 3912 if (!((gfx_v12_1_get_sa_active_bitmap(adev, xcc_id) >> bitmap) & 1)) 3913 continue; 3914 mask = 1; 3915 counter = 0; 3916 gfx_v12_1_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id); 3917 gfx_v12_1_set_user_cu_inactive_bitmap_per_sh( 3918 adev, 3919 disable_masks[i * adev->gfx.config.max_sh_per_se + j], 3920 xcc_id); 3921 bitmap = gfx_v12_1_get_cu_active_bitmap_per_sh(adev, xcc_id); 3922 3923 cu_info->bitmap[xcc_id][i][j] = bitmap; 3924 3925 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 3926 if (bitmap & mask) 3927 counter++; 3928 3929 mask <<= 1; 3930 } 3931 active_cu_number += counter; 3932 } 3933 } 3934 gfx_v12_1_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); 3935 } 3936 mutex_unlock(&adev->grbm_idx_mutex); 3937 3938 cu_info->number = active_cu_number; 3939 cu_info->simd_per_cu = NUM_SIMD_PER_CU_GFX12_1; 3940 cu_info->lds_size = 320; 3941 3942 return 0; 3943 } 3944 3945 const struct amdgpu_ip_block_version gfx_v12_1_ip_block = { 3946 .type = AMD_IP_BLOCK_TYPE_GFX, 3947 .major = 12, 3948 .minor = 1, 3949 .rev = 0, 3950 .funcs = &gfx_v12_1_ip_funcs, 3951 }; 3952 3953 static int gfx_v12_1_xcp_resume(void *handle, uint32_t inst_mask) 3954 { 3955 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3956 uint32_t tmp_mask; 3957 int i, r; 3958 3959 /* TODO : Initialize golden regs */ 3960 /* gfx_v12_1_init_golden_registers(adev); */ 3961 3962 tmp_mask = inst_mask; 3963 for_each_inst(i, tmp_mask) 3964 gfx_v12_1_xcc_constants_init(adev, i); 3965 3966 if (!amdgpu_sriov_vf(adev)) { 3967 tmp_mask = inst_mask; 3968 for_each_inst(i, tmp_mask) { 3969 r = gfx_v12_1_xcc_rlc_resume(adev, i); 3970 if (r) 3971 return r; 3972 } 3973 } 3974 3975 r = gfx_v12_1_xcc_cp_resume(adev, inst_mask); 3976 3977 return r; 3978 } 3979 3980 static int gfx_v12_1_xcp_suspend(void *handle, uint32_t inst_mask) 3981 { 3982 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3983 int i; 3984 3985 for_each_inst(i, inst_mask) 3986 gfx_v12_1_xcc_fini(adev, i); 3987 3988 return 0; 3989 } 3990 3991 struct amdgpu_xcp_ip_funcs gfx_v12_1_xcp_funcs = { 3992 .suspend = &gfx_v12_1_xcp_suspend, 3993 .resume = &gfx_v12_1_xcp_resume 3994 }; 3995