1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "imu_v11_0.h" 34 #include "soc21.h" 35 #include "nvd.h" 36 37 #include "gc/gc_11_0_0_offset.h" 38 #include "gc/gc_11_0_0_sh_mask.h" 39 #include "smuio/smuio_13_0_6_offset.h" 40 #include "smuio/smuio_13_0_6_sh_mask.h" 41 #include "navi10_enum.h" 42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 43 44 #include "soc15.h" 45 #include "soc15d.h" 46 #include "clearstate_gfx11.h" 47 #include "v11_structs.h" 48 #include "gfx_v11_0.h" 49 #include "gfx_v11_0_3.h" 50 #include "nbio_v4_3.h" 51 #include "mes_v11_0.h" 52 53 #define GFX11_NUM_GFX_RINGS 1 54 #define GFX11_MEC_HPD_SIZE 2048 55 56 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 57 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388 58 59 #define regCGTT_WD_CLK_CTRL 0x5086 60 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e 62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 63 64 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); 65 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); 66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); 67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); 68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); 69 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); 70 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); 71 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin"); 72 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin"); 73 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin"); 74 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin"); 75 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin"); 76 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin"); 77 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin"); 78 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin"); 79 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin"); 80 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin"); 81 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin"); 82 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin"); 83 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin"); 84 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin"); 85 86 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = 87 { 88 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010), 89 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010), 90 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), 91 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988), 92 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007), 93 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008), 94 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100), 95 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), 96 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) 97 }; 98 99 #define DEFAULT_SH_MEM_CONFIG \ 100 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 101 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 102 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 103 104 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); 105 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); 106 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); 107 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); 108 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); 109 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); 110 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); 111 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 112 struct amdgpu_cu_info *cu_info); 113 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); 114 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 115 u32 sh_num, u32 instance); 116 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 117 118 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 119 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 120 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 121 uint32_t val); 122 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 123 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 124 uint16_t pasid, uint32_t flush_type, 125 bool all_hub, uint8_t dst_sel); 126 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev); 127 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev); 128 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 129 bool enable); 130 131 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 132 { 133 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 134 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 135 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 136 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 137 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 138 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 139 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 140 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 141 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 142 } 143 144 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, 145 struct amdgpu_ring *ring) 146 { 147 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 148 uint64_t wptr_addr = ring->wptr_gpu_addr; 149 uint32_t me = 0, eng_sel = 0; 150 151 switch (ring->funcs->type) { 152 case AMDGPU_RING_TYPE_COMPUTE: 153 me = 1; 154 eng_sel = 0; 155 break; 156 case AMDGPU_RING_TYPE_GFX: 157 me = 0; 158 eng_sel = 4; 159 break; 160 case AMDGPU_RING_TYPE_MES: 161 me = 2; 162 eng_sel = 5; 163 break; 164 default: 165 WARN_ON(1); 166 } 167 168 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 169 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 170 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 171 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 172 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 173 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 174 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 175 PACKET3_MAP_QUEUES_ME((me)) | 176 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 177 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 178 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 179 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 180 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 181 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 182 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 183 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 184 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 185 } 186 187 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 188 struct amdgpu_ring *ring, 189 enum amdgpu_unmap_queues_action action, 190 u64 gpu_addr, u64 seq) 191 { 192 struct amdgpu_device *adev = kiq_ring->adev; 193 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 194 195 if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) { 196 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 197 return; 198 } 199 200 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 201 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 202 PACKET3_UNMAP_QUEUES_ACTION(action) | 203 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 204 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 205 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 206 amdgpu_ring_write(kiq_ring, 207 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 208 209 if (action == PREEMPT_QUEUES_NO_UNMAP) { 210 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 211 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 212 amdgpu_ring_write(kiq_ring, seq); 213 } else { 214 amdgpu_ring_write(kiq_ring, 0); 215 amdgpu_ring_write(kiq_ring, 0); 216 amdgpu_ring_write(kiq_ring, 0); 217 } 218 } 219 220 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, 221 struct amdgpu_ring *ring, 222 u64 addr, 223 u64 seq) 224 { 225 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 226 227 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 228 amdgpu_ring_write(kiq_ring, 229 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 230 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 231 PACKET3_QUERY_STATUS_COMMAND(2)); 232 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 233 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 234 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 235 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 236 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 237 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 238 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 239 } 240 241 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 242 uint16_t pasid, uint32_t flush_type, 243 bool all_hub) 244 { 245 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 246 } 247 248 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { 249 .kiq_set_resources = gfx11_kiq_set_resources, 250 .kiq_map_queues = gfx11_kiq_map_queues, 251 .kiq_unmap_queues = gfx11_kiq_unmap_queues, 252 .kiq_query_status = gfx11_kiq_query_status, 253 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, 254 .set_resources_size = 8, 255 .map_queues_size = 7, 256 .unmap_queues_size = 6, 257 .query_status_size = 7, 258 .invalidate_tlbs_size = 2, 259 }; 260 261 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 262 { 263 adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs; 264 } 265 266 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 267 { 268 switch (adev->ip_versions[GC_HWIP][0]) { 269 case IP_VERSION(11, 0, 1): 270 case IP_VERSION(11, 0, 4): 271 soc15_program_register_sequence(adev, 272 golden_settings_gc_11_0_1, 273 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); 274 break; 275 default: 276 break; 277 } 278 } 279 280 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 281 bool wc, uint32_t reg, uint32_t val) 282 { 283 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 284 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 285 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 286 amdgpu_ring_write(ring, reg); 287 amdgpu_ring_write(ring, 0); 288 amdgpu_ring_write(ring, val); 289 } 290 291 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 292 int mem_space, int opt, uint32_t addr0, 293 uint32_t addr1, uint32_t ref, uint32_t mask, 294 uint32_t inv) 295 { 296 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 297 amdgpu_ring_write(ring, 298 /* memory (1) or register (0) */ 299 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 300 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 301 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 302 WAIT_REG_MEM_ENGINE(eng_sel))); 303 304 if (mem_space) 305 BUG_ON(addr0 & 0x3); /* Dword align */ 306 amdgpu_ring_write(ring, addr0); 307 amdgpu_ring_write(ring, addr1); 308 amdgpu_ring_write(ring, ref); 309 amdgpu_ring_write(ring, mask); 310 amdgpu_ring_write(ring, inv); /* poll interval */ 311 } 312 313 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) 314 { 315 struct amdgpu_device *adev = ring->adev; 316 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 317 uint32_t tmp = 0; 318 unsigned i; 319 int r; 320 321 WREG32(scratch, 0xCAFEDEAD); 322 r = amdgpu_ring_alloc(ring, 5); 323 if (r) { 324 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 325 ring->idx, r); 326 return r; 327 } 328 329 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 330 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 331 } else { 332 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 333 amdgpu_ring_write(ring, scratch - 334 PACKET3_SET_UCONFIG_REG_START); 335 amdgpu_ring_write(ring, 0xDEADBEEF); 336 } 337 amdgpu_ring_commit(ring); 338 339 for (i = 0; i < adev->usec_timeout; i++) { 340 tmp = RREG32(scratch); 341 if (tmp == 0xDEADBEEF) 342 break; 343 if (amdgpu_emu_mode == 1) 344 msleep(1); 345 else 346 udelay(1); 347 } 348 349 if (i >= adev->usec_timeout) 350 r = -ETIMEDOUT; 351 return r; 352 } 353 354 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 355 { 356 struct amdgpu_device *adev = ring->adev; 357 struct amdgpu_ib ib; 358 struct dma_fence *f = NULL; 359 unsigned index; 360 uint64_t gpu_addr; 361 volatile uint32_t *cpu_ptr; 362 long r; 363 364 /* MES KIQ fw hasn't indirect buffer support for now */ 365 if (adev->enable_mes_kiq && 366 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 367 return 0; 368 369 memset(&ib, 0, sizeof(ib)); 370 371 if (ring->is_mes_queue) { 372 uint32_t padding, offset; 373 374 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 375 padding = amdgpu_mes_ctx_get_offs(ring, 376 AMDGPU_MES_CTX_PADDING_OFFS); 377 378 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 379 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 380 381 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 382 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 383 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 384 } else { 385 r = amdgpu_device_wb_get(adev, &index); 386 if (r) 387 return r; 388 389 gpu_addr = adev->wb.gpu_addr + (index * 4); 390 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 391 cpu_ptr = &adev->wb.wb[index]; 392 393 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 394 if (r) { 395 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 396 goto err1; 397 } 398 } 399 400 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 401 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 402 ib.ptr[2] = lower_32_bits(gpu_addr); 403 ib.ptr[3] = upper_32_bits(gpu_addr); 404 ib.ptr[4] = 0xDEADBEEF; 405 ib.length_dw = 5; 406 407 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 408 if (r) 409 goto err2; 410 411 r = dma_fence_wait_timeout(f, false, timeout); 412 if (r == 0) { 413 r = -ETIMEDOUT; 414 goto err2; 415 } else if (r < 0) { 416 goto err2; 417 } 418 419 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 420 r = 0; 421 else 422 r = -EINVAL; 423 err2: 424 if (!ring->is_mes_queue) 425 amdgpu_ib_free(adev, &ib, NULL); 426 dma_fence_put(f); 427 err1: 428 if (!ring->is_mes_queue) 429 amdgpu_device_wb_free(adev, index); 430 return r; 431 } 432 433 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) 434 { 435 amdgpu_ucode_release(&adev->gfx.pfp_fw); 436 amdgpu_ucode_release(&adev->gfx.me_fw); 437 amdgpu_ucode_release(&adev->gfx.rlc_fw); 438 amdgpu_ucode_release(&adev->gfx.mec_fw); 439 440 kfree(adev->gfx.rlc.register_list_format); 441 } 442 443 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 444 { 445 const struct psp_firmware_header_v1_0 *toc_hdr; 446 int err = 0; 447 char fw_name[40]; 448 449 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); 450 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 451 if (err) 452 goto out; 453 454 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 455 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 456 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 457 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 458 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 459 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 460 return 0; 461 out: 462 amdgpu_ucode_release(&adev->psp.toc_fw); 463 return err; 464 } 465 466 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) 467 { 468 char fw_name[40]; 469 char ucode_prefix[30]; 470 int err; 471 const struct rlc_firmware_header_v2_0 *rlc_hdr; 472 uint16_t version_major; 473 uint16_t version_minor; 474 475 DRM_DEBUG("\n"); 476 477 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 478 479 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); 480 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); 481 if (err) 482 goto out; 483 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ 484 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( 485 (union amdgpu_firmware_header *) 486 adev->gfx.pfp_fw->data, 2, 0); 487 if (adev->gfx.rs64_enable) { 488 dev_info(adev->dev, "CP RS64 enable\n"); 489 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 490 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 491 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK); 492 } else { 493 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP); 494 } 495 496 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); 497 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); 498 if (err) 499 goto out; 500 if (adev->gfx.rs64_enable) { 501 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 502 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 503 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK); 504 } else { 505 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME); 506 } 507 508 if (!amdgpu_sriov_vf(adev)) { 509 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); 510 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); 511 if (err) 512 goto out; 513 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 514 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 515 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 516 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 517 if (err) 518 goto out; 519 } 520 521 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); 522 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); 523 if (err) 524 goto out; 525 if (adev->gfx.rs64_enable) { 526 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 527 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 528 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 529 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK); 530 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK); 531 } else { 532 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 533 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 534 } 535 536 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 537 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix); 538 539 /* only one MEC for gfx 11.0.0. */ 540 adev->gfx.mec2_fw = NULL; 541 542 out: 543 if (err) { 544 amdgpu_ucode_release(&adev->gfx.pfp_fw); 545 amdgpu_ucode_release(&adev->gfx.me_fw); 546 amdgpu_ucode_release(&adev->gfx.rlc_fw); 547 amdgpu_ucode_release(&adev->gfx.mec_fw); 548 } 549 550 return err; 551 } 552 553 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) 554 { 555 u32 count = 0; 556 const struct cs_section_def *sect = NULL; 557 const struct cs_extent_def *ext = NULL; 558 559 /* begin clear state */ 560 count += 2; 561 /* context control state */ 562 count += 3; 563 564 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 565 for (ext = sect->section; ext->extent != NULL; ++ext) { 566 if (sect->id == SECT_CONTEXT) 567 count += 2 + ext->reg_count; 568 else 569 return 0; 570 } 571 } 572 573 /* set PA_SC_TILE_STEERING_OVERRIDE */ 574 count += 3; 575 /* end clear state */ 576 count += 2; 577 /* clear state */ 578 count += 2; 579 580 return count; 581 } 582 583 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, 584 volatile u32 *buffer) 585 { 586 u32 count = 0, i; 587 const struct cs_section_def *sect = NULL; 588 const struct cs_extent_def *ext = NULL; 589 int ctx_reg_offset; 590 591 if (adev->gfx.rlc.cs_data == NULL) 592 return; 593 if (buffer == NULL) 594 return; 595 596 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 597 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 598 599 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 600 buffer[count++] = cpu_to_le32(0x80000000); 601 buffer[count++] = cpu_to_le32(0x80000000); 602 603 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 604 for (ext = sect->section; ext->extent != NULL; ++ext) { 605 if (sect->id == SECT_CONTEXT) { 606 buffer[count++] = 607 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 608 buffer[count++] = cpu_to_le32(ext->reg_index - 609 PACKET3_SET_CONTEXT_REG_START); 610 for (i = 0; i < ext->reg_count; i++) 611 buffer[count++] = cpu_to_le32(ext->extent[i]); 612 } else { 613 return; 614 } 615 } 616 } 617 618 ctx_reg_offset = 619 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 620 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 621 buffer[count++] = cpu_to_le32(ctx_reg_offset); 622 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 623 624 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 625 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 626 627 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 628 buffer[count++] = cpu_to_le32(0); 629 } 630 631 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) 632 { 633 /* clear state block */ 634 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 635 &adev->gfx.rlc.clear_state_gpu_addr, 636 (void **)&adev->gfx.rlc.cs_ptr); 637 638 /* jump table block */ 639 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 640 &adev->gfx.rlc.cp_table_gpu_addr, 641 (void **)&adev->gfx.rlc.cp_table_ptr); 642 } 643 644 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 645 { 646 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 647 648 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; 649 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 650 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 651 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 652 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 653 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 654 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 655 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 656 adev->gfx.rlc.rlcg_reg_access_supported = true; 657 } 658 659 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) 660 { 661 const struct cs_section_def *cs_data; 662 int r; 663 664 adev->gfx.rlc.cs_data = gfx11_cs_data; 665 666 cs_data = adev->gfx.rlc.cs_data; 667 668 if (cs_data) { 669 /* init clear state block */ 670 r = amdgpu_gfx_rlc_init_csb(adev); 671 if (r) 672 return r; 673 } 674 675 /* init spm vmid with 0xf */ 676 if (adev->gfx.rlc.funcs->update_spm_vmid) 677 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); 678 679 return 0; 680 } 681 682 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) 683 { 684 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 685 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 686 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 687 } 688 689 static void gfx_v11_0_me_init(struct amdgpu_device *adev) 690 { 691 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 692 693 amdgpu_gfx_graphics_queue_acquire(adev); 694 } 695 696 static int gfx_v11_0_mec_init(struct amdgpu_device *adev) 697 { 698 int r; 699 u32 *hpd; 700 size_t mec_hpd_size; 701 702 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 703 704 /* take ownership of the relevant compute queues */ 705 amdgpu_gfx_compute_queue_acquire(adev); 706 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; 707 708 if (mec_hpd_size) { 709 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 710 AMDGPU_GEM_DOMAIN_GTT, 711 &adev->gfx.mec.hpd_eop_obj, 712 &adev->gfx.mec.hpd_eop_gpu_addr, 713 (void **)&hpd); 714 if (r) { 715 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 716 gfx_v11_0_mec_fini(adev); 717 return r; 718 } 719 720 memset(hpd, 0, mec_hpd_size); 721 722 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 723 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 724 } 725 726 return 0; 727 } 728 729 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 730 { 731 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 732 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 733 (address << SQ_IND_INDEX__INDEX__SHIFT)); 734 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 735 } 736 737 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 738 uint32_t thread, uint32_t regno, 739 uint32_t num, uint32_t *out) 740 { 741 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 742 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 743 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 744 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 745 (SQ_IND_INDEX__AUTO_INCR_MASK)); 746 while (num--) 747 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 748 } 749 750 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 751 { 752 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE 753 * field when performing a select_se_sh so it should be 754 * zero here */ 755 WARN_ON(simd != 0); 756 757 /* type 3 wave data */ 758 dst[(*no_fields)++] = 3; 759 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 760 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 761 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 762 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 763 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 764 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 765 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 766 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 767 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 768 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 769 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 770 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 771 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 772 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 773 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 774 } 775 776 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 777 uint32_t wave, uint32_t start, 778 uint32_t size, uint32_t *dst) 779 { 780 WARN_ON(simd != 0); 781 782 wave_read_regs( 783 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 784 dst); 785 } 786 787 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, 788 uint32_t wave, uint32_t thread, 789 uint32_t start, uint32_t size, 790 uint32_t *dst) 791 { 792 wave_read_regs( 793 adev, wave, thread, 794 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 795 } 796 797 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, 798 u32 me, u32 pipe, u32 q, u32 vm) 799 { 800 soc21_grbm_select(adev, me, pipe, q, vm); 801 } 802 803 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { 804 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, 805 .select_se_sh = &gfx_v11_0_select_se_sh, 806 .read_wave_data = &gfx_v11_0_read_wave_data, 807 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, 808 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, 809 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, 810 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, 811 }; 812 813 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 814 { 815 816 switch (adev->ip_versions[GC_HWIP][0]) { 817 case IP_VERSION(11, 0, 0): 818 case IP_VERSION(11, 0, 2): 819 adev->gfx.config.max_hw_contexts = 8; 820 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 821 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 822 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 823 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 824 break; 825 case IP_VERSION(11, 0, 3): 826 adev->gfx.ras = &gfx_v11_0_3_ras; 827 adev->gfx.config.max_hw_contexts = 8; 828 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 829 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 830 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 831 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 832 break; 833 case IP_VERSION(11, 0, 1): 834 case IP_VERSION(11, 0, 4): 835 adev->gfx.config.max_hw_contexts = 8; 836 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 837 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 838 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; 839 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300; 840 break; 841 default: 842 BUG(); 843 break; 844 } 845 846 return 0; 847 } 848 849 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 850 int me, int pipe, int queue) 851 { 852 int r; 853 struct amdgpu_ring *ring; 854 unsigned int irq_type; 855 856 ring = &adev->gfx.gfx_ring[ring_id]; 857 858 ring->me = me; 859 ring->pipe = pipe; 860 ring->queue = queue; 861 862 ring->ring_obj = NULL; 863 ring->use_doorbell = true; 864 865 if (!ring_id) 866 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 867 else 868 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 869 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 870 871 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 872 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 873 AMDGPU_RING_PRIO_DEFAULT, NULL); 874 if (r) 875 return r; 876 return 0; 877 } 878 879 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 880 int mec, int pipe, int queue) 881 { 882 int r; 883 unsigned irq_type; 884 struct amdgpu_ring *ring; 885 unsigned int hw_prio; 886 887 ring = &adev->gfx.compute_ring[ring_id]; 888 889 /* mec0 is me1 */ 890 ring->me = mec + 1; 891 ring->pipe = pipe; 892 ring->queue = queue; 893 894 ring->ring_obj = NULL; 895 ring->use_doorbell = true; 896 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 897 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 898 + (ring_id * GFX11_MEC_HPD_SIZE); 899 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 900 901 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 902 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 903 + ring->pipe; 904 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 905 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 906 /* type-2 packets are deprecated on MEC, use type-3 instead */ 907 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 908 hw_prio, NULL); 909 if (r) 910 return r; 911 912 return 0; 913 } 914 915 static struct { 916 SOC21_FIRMWARE_ID id; 917 unsigned int offset; 918 unsigned int size; 919 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; 920 921 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 922 { 923 RLC_TABLE_OF_CONTENT *ucode = rlc_toc; 924 925 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && 926 (ucode->id < SOC21_FIRMWARE_ID_MAX)) { 927 rlc_autoload_info[ucode->id].id = ucode->id; 928 rlc_autoload_info[ucode->id].offset = ucode->offset * 4; 929 rlc_autoload_info[ucode->id].size = ucode->size * 4; 930 931 ucode++; 932 } 933 } 934 935 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) 936 { 937 uint32_t total_size = 0; 938 SOC21_FIRMWARE_ID id; 939 940 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 941 942 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) 943 total_size += rlc_autoload_info[id].size; 944 945 /* In case the offset in rlc toc ucode is aligned */ 946 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) 947 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + 948 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; 949 950 return total_size; 951 } 952 953 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 954 { 955 int r; 956 uint32_t total_size; 957 958 total_size = gfx_v11_0_calc_toc_total_size(adev); 959 960 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 961 AMDGPU_GEM_DOMAIN_VRAM | 962 AMDGPU_GEM_DOMAIN_GTT, 963 &adev->gfx.rlc.rlc_autoload_bo, 964 &adev->gfx.rlc.rlc_autoload_gpu_addr, 965 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 966 967 if (r) { 968 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 969 return r; 970 } 971 972 return 0; 973 } 974 975 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 976 SOC21_FIRMWARE_ID id, 977 const void *fw_data, 978 uint32_t fw_size, 979 uint32_t *fw_autoload_mask) 980 { 981 uint32_t toc_offset; 982 uint32_t toc_fw_size; 983 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 984 985 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) 986 return; 987 988 toc_offset = rlc_autoload_info[id].offset; 989 toc_fw_size = rlc_autoload_info[id].size; 990 991 if (fw_size == 0) 992 fw_size = toc_fw_size; 993 994 if (fw_size > toc_fw_size) 995 fw_size = toc_fw_size; 996 997 memcpy(ptr + toc_offset, fw_data, fw_size); 998 999 if (fw_size < toc_fw_size) 1000 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1001 1002 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) 1003 *(uint64_t *)fw_autoload_mask |= 1ULL << id; 1004 } 1005 1006 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, 1007 uint32_t *fw_autoload_mask) 1008 { 1009 void *data; 1010 uint32_t size; 1011 uint64_t *toc_ptr; 1012 1013 *(uint64_t *)fw_autoload_mask |= 0x1; 1014 1015 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); 1016 1017 data = adev->psp.toc.start_addr; 1018 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; 1019 1020 toc_ptr = (uint64_t *)data + size / 8 - 1; 1021 *toc_ptr = *(uint64_t *)fw_autoload_mask; 1022 1023 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, 1024 data, size, fw_autoload_mask); 1025 } 1026 1027 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, 1028 uint32_t *fw_autoload_mask) 1029 { 1030 const __le32 *fw_data; 1031 uint32_t fw_size; 1032 const struct gfx_firmware_header_v1_0 *cp_hdr; 1033 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1034 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1035 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1036 uint16_t version_major, version_minor; 1037 1038 if (adev->gfx.rs64_enable) { 1039 /* pfp ucode */ 1040 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1041 adev->gfx.pfp_fw->data; 1042 /* instruction */ 1043 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1044 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1045 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1046 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, 1047 fw_data, fw_size, fw_autoload_mask); 1048 /* data */ 1049 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1050 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1051 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1052 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, 1053 fw_data, fw_size, fw_autoload_mask); 1054 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, 1055 fw_data, fw_size, fw_autoload_mask); 1056 /* me ucode */ 1057 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1058 adev->gfx.me_fw->data; 1059 /* instruction */ 1060 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1061 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1062 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1063 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, 1064 fw_data, fw_size, fw_autoload_mask); 1065 /* data */ 1066 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1067 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1068 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1069 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, 1070 fw_data, fw_size, fw_autoload_mask); 1071 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, 1072 fw_data, fw_size, fw_autoload_mask); 1073 /* mec ucode */ 1074 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1075 adev->gfx.mec_fw->data; 1076 /* instruction */ 1077 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1078 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1079 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1080 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, 1081 fw_data, fw_size, fw_autoload_mask); 1082 /* data */ 1083 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1084 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1085 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1086 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, 1087 fw_data, fw_size, fw_autoload_mask); 1088 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, 1089 fw_data, fw_size, fw_autoload_mask); 1090 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, 1091 fw_data, fw_size, fw_autoload_mask); 1092 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, 1093 fw_data, fw_size, fw_autoload_mask); 1094 } else { 1095 /* pfp ucode */ 1096 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1097 adev->gfx.pfp_fw->data; 1098 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1099 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1100 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1101 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, 1102 fw_data, fw_size, fw_autoload_mask); 1103 1104 /* me ucode */ 1105 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1106 adev->gfx.me_fw->data; 1107 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1108 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1109 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1110 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, 1111 fw_data, fw_size, fw_autoload_mask); 1112 1113 /* mec ucode */ 1114 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1115 adev->gfx.mec_fw->data; 1116 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1117 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1118 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1119 cp_hdr->jt_size * 4; 1120 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, 1121 fw_data, fw_size, fw_autoload_mask); 1122 } 1123 1124 /* rlc ucode */ 1125 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1126 adev->gfx.rlc_fw->data; 1127 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1128 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1129 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1130 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, 1131 fw_data, fw_size, fw_autoload_mask); 1132 1133 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1134 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1135 if (version_major == 2) { 1136 if (version_minor >= 2) { 1137 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1138 1139 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1140 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1141 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1142 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, 1143 fw_data, fw_size, fw_autoload_mask); 1144 1145 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1146 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1147 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1148 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, 1149 fw_data, fw_size, fw_autoload_mask); 1150 } 1151 } 1152 } 1153 1154 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, 1155 uint32_t *fw_autoload_mask) 1156 { 1157 const __le32 *fw_data; 1158 uint32_t fw_size; 1159 const struct sdma_firmware_header_v2_0 *sdma_hdr; 1160 1161 sdma_hdr = (const struct sdma_firmware_header_v2_0 *) 1162 adev->sdma.instance[0].fw->data; 1163 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1164 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1165 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 1166 1167 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1168 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); 1169 1170 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1171 le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 1172 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 1173 1174 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1175 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); 1176 } 1177 1178 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, 1179 uint32_t *fw_autoload_mask) 1180 { 1181 const __le32 *fw_data; 1182 unsigned fw_size; 1183 const struct mes_firmware_header_v1_0 *mes_hdr; 1184 int pipe, ucode_id, data_id; 1185 1186 for (pipe = 0; pipe < 2; pipe++) { 1187 if (pipe==0) { 1188 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; 1189 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; 1190 } else { 1191 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; 1192 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; 1193 } 1194 1195 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1196 adev->mes.fw[pipe]->data; 1197 1198 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1199 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1200 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1201 1202 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1203 ucode_id, fw_data, fw_size, fw_autoload_mask); 1204 1205 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1206 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1207 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1208 1209 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1210 data_id, fw_data, fw_size, fw_autoload_mask); 1211 } 1212 } 1213 1214 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1215 { 1216 uint32_t rlc_g_offset, rlc_g_size; 1217 uint64_t gpu_addr; 1218 uint32_t autoload_fw_id[2]; 1219 1220 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); 1221 1222 /* RLC autoload sequence 2: copy ucode */ 1223 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); 1224 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); 1225 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); 1226 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); 1227 1228 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; 1229 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; 1230 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1231 1232 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1233 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1234 1235 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1236 1237 /* RLC autoload sequence 3: load IMU fw */ 1238 if (adev->gfx.imu.funcs->load_microcode) 1239 adev->gfx.imu.funcs->load_microcode(adev); 1240 /* RLC autoload sequence 4 init IMU fw */ 1241 if (adev->gfx.imu.funcs->setup_imu) 1242 adev->gfx.imu.funcs->setup_imu(adev); 1243 if (adev->gfx.imu.funcs->start_imu) 1244 adev->gfx.imu.funcs->start_imu(adev); 1245 1246 /* RLC autoload sequence 5 disable gpa mode */ 1247 gfx_v11_0_disable_gpa_mode(adev); 1248 1249 return 0; 1250 } 1251 1252 static int gfx_v11_0_sw_init(void *handle) 1253 { 1254 int i, j, k, r, ring_id = 0; 1255 struct amdgpu_kiq *kiq; 1256 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1257 1258 adev->gfxhub.funcs->init(adev); 1259 1260 switch (adev->ip_versions[GC_HWIP][0]) { 1261 case IP_VERSION(11, 0, 0): 1262 case IP_VERSION(11, 0, 2): 1263 case IP_VERSION(11, 0, 3): 1264 adev->gfx.me.num_me = 1; 1265 adev->gfx.me.num_pipe_per_me = 1; 1266 adev->gfx.me.num_queue_per_pipe = 1; 1267 adev->gfx.mec.num_mec = 2; 1268 adev->gfx.mec.num_pipe_per_mec = 4; 1269 adev->gfx.mec.num_queue_per_pipe = 4; 1270 break; 1271 case IP_VERSION(11, 0, 1): 1272 case IP_VERSION(11, 0, 4): 1273 adev->gfx.me.num_me = 1; 1274 adev->gfx.me.num_pipe_per_me = 1; 1275 adev->gfx.me.num_queue_per_pipe = 1; 1276 adev->gfx.mec.num_mec = 1; 1277 adev->gfx.mec.num_pipe_per_mec = 4; 1278 adev->gfx.mec.num_queue_per_pipe = 4; 1279 break; 1280 default: 1281 adev->gfx.me.num_me = 1; 1282 adev->gfx.me.num_pipe_per_me = 1; 1283 adev->gfx.me.num_queue_per_pipe = 1; 1284 adev->gfx.mec.num_mec = 1; 1285 adev->gfx.mec.num_pipe_per_mec = 4; 1286 adev->gfx.mec.num_queue_per_pipe = 8; 1287 break; 1288 } 1289 1290 /* EOP Event */ 1291 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1292 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1293 &adev->gfx.eop_irq); 1294 if (r) 1295 return r; 1296 1297 /* Privileged reg */ 1298 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1299 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1300 &adev->gfx.priv_reg_irq); 1301 if (r) 1302 return r; 1303 1304 /* Privileged inst */ 1305 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1306 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1307 &adev->gfx.priv_inst_irq); 1308 if (r) 1309 return r; 1310 1311 /* ECC error */ 1312 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1313 GFX_11_0_0__SRCID__CP_ECC_ERROR, 1314 &adev->gfx.cp_ecc_error_irq); 1315 if (r) 1316 return r; 1317 1318 /* FED error */ 1319 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1320 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT, 1321 &adev->gfx.rlc_gc_fed_irq); 1322 if (r) 1323 return r; 1324 1325 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1326 1327 if (adev->gfx.imu.funcs) { 1328 if (adev->gfx.imu.funcs->init_microcode) { 1329 r = adev->gfx.imu.funcs->init_microcode(adev); 1330 if (r) 1331 DRM_ERROR("Failed to load imu firmware!\n"); 1332 } 1333 } 1334 1335 gfx_v11_0_me_init(adev); 1336 1337 r = gfx_v11_0_rlc_init(adev); 1338 if (r) { 1339 DRM_ERROR("Failed to init rlc BOs!\n"); 1340 return r; 1341 } 1342 1343 r = gfx_v11_0_mec_init(adev); 1344 if (r) { 1345 DRM_ERROR("Failed to init MEC BOs!\n"); 1346 return r; 1347 } 1348 1349 /* set up the gfx ring */ 1350 for (i = 0; i < adev->gfx.me.num_me; i++) { 1351 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1352 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1353 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1354 continue; 1355 1356 r = gfx_v11_0_gfx_ring_init(adev, ring_id, 1357 i, k, j); 1358 if (r) 1359 return r; 1360 ring_id++; 1361 } 1362 } 1363 } 1364 1365 ring_id = 0; 1366 /* set up the compute queues - allocate horizontally across pipes */ 1367 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1368 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1369 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1370 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, 1371 j)) 1372 continue; 1373 1374 r = gfx_v11_0_compute_ring_init(adev, ring_id, 1375 i, k, j); 1376 if (r) 1377 return r; 1378 1379 ring_id++; 1380 } 1381 } 1382 } 1383 1384 if (!adev->enable_mes_kiq) { 1385 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE); 1386 if (r) { 1387 DRM_ERROR("Failed to init KIQ BOs!\n"); 1388 return r; 1389 } 1390 1391 kiq = &adev->gfx.kiq; 1392 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 1393 if (r) 1394 return r; 1395 } 1396 1397 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd)); 1398 if (r) 1399 return r; 1400 1401 /* allocate visible FB for rlc auto-loading fw */ 1402 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1403 r = gfx_v11_0_rlc_autoload_buffer_init(adev); 1404 if (r) 1405 return r; 1406 } 1407 1408 r = gfx_v11_0_gpu_early_init(adev); 1409 if (r) 1410 return r; 1411 1412 if (amdgpu_gfx_ras_sw_init(adev)) { 1413 dev_err(adev->dev, "Failed to initialize gfx ras block!\n"); 1414 return -EINVAL; 1415 } 1416 1417 return 0; 1418 } 1419 1420 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) 1421 { 1422 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1423 &adev->gfx.pfp.pfp_fw_gpu_addr, 1424 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1425 1426 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1427 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1428 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1429 } 1430 1431 static void gfx_v11_0_me_fini(struct amdgpu_device *adev) 1432 { 1433 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1434 &adev->gfx.me.me_fw_gpu_addr, 1435 (void **)&adev->gfx.me.me_fw_ptr); 1436 1437 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1438 &adev->gfx.me.me_fw_data_gpu_addr, 1439 (void **)&adev->gfx.me.me_fw_data_ptr); 1440 } 1441 1442 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1443 { 1444 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1445 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1446 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1447 } 1448 1449 static int gfx_v11_0_sw_fini(void *handle) 1450 { 1451 int i; 1452 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1453 1454 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1455 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1456 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1457 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1458 1459 amdgpu_gfx_mqd_sw_fini(adev); 1460 1461 if (!adev->enable_mes_kiq) { 1462 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); 1463 amdgpu_gfx_kiq_fini(adev); 1464 } 1465 1466 gfx_v11_0_pfp_fini(adev); 1467 gfx_v11_0_me_fini(adev); 1468 gfx_v11_0_rlc_fini(adev); 1469 gfx_v11_0_mec_fini(adev); 1470 1471 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1472 gfx_v11_0_rlc_autoload_buffer_fini(adev); 1473 1474 gfx_v11_0_free_microcode(adev); 1475 1476 return 0; 1477 } 1478 1479 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1480 u32 sh_num, u32 instance) 1481 { 1482 u32 data; 1483 1484 if (instance == 0xffffffff) 1485 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1486 INSTANCE_BROADCAST_WRITES, 1); 1487 else 1488 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1489 instance); 1490 1491 if (se_num == 0xffffffff) 1492 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1493 1); 1494 else 1495 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1496 1497 if (sh_num == 0xffffffff) 1498 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1499 1); 1500 else 1501 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1502 1503 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1504 } 1505 1506 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev) 1507 { 1508 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask; 1509 1510 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE); 1511 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask, 1512 CC_GC_SA_UNIT_DISABLE, 1513 SA_DISABLE); 1514 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE); 1515 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask, 1516 GC_USER_SA_UNIT_DISABLE, 1517 SA_DISABLE); 1518 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se * 1519 adev->gfx.config.max_shader_engines); 1520 1521 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask)); 1522 } 1523 1524 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1525 { 1526 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask; 1527 u32 rb_mask; 1528 1529 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1530 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask, 1531 CC_RB_BACKEND_DISABLE, 1532 BACKEND_DISABLE); 1533 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1534 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask, 1535 GC_USER_RB_BACKEND_DISABLE, 1536 BACKEND_DISABLE); 1537 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se * 1538 adev->gfx.config.max_shader_engines); 1539 1540 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask)); 1541 } 1542 1543 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) 1544 { 1545 u32 rb_bitmap_width_per_sa; 1546 u32 max_sa; 1547 u32 active_sa_bitmap; 1548 u32 global_active_rb_bitmap; 1549 u32 active_rb_bitmap = 0; 1550 u32 i; 1551 1552 /* query sa bitmap from SA_UNIT_DISABLE registers */ 1553 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev); 1554 /* query rb bitmap from RB_BACKEND_DISABLE registers */ 1555 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev); 1556 1557 /* generate active rb bitmap according to active sa bitmap */ 1558 max_sa = adev->gfx.config.max_shader_engines * 1559 adev->gfx.config.max_sh_per_se; 1560 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se / 1561 adev->gfx.config.max_sh_per_se; 1562 for (i = 0; i < max_sa; i++) { 1563 if (active_sa_bitmap & (1 << i)) 1564 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); 1565 } 1566 1567 active_rb_bitmap |= global_active_rb_bitmap; 1568 adev->gfx.config.backend_enable_mask = active_rb_bitmap; 1569 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); 1570 } 1571 1572 #define DEFAULT_SH_MEM_BASES (0x6000) 1573 #define LDS_APP_BASE 0x1 1574 #define SCRATCH_APP_BASE 0x2 1575 1576 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) 1577 { 1578 int i; 1579 uint32_t sh_mem_bases; 1580 uint32_t data; 1581 1582 /* 1583 * Configure apertures: 1584 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1585 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1586 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1587 */ 1588 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1589 SCRATCH_APP_BASE; 1590 1591 mutex_lock(&adev->srbm_mutex); 1592 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1593 soc21_grbm_select(adev, 0, 0, 0, i); 1594 /* CP and shaders */ 1595 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1596 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1597 1598 /* Enable trap for each kfd vmid. */ 1599 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 1600 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1601 } 1602 soc21_grbm_select(adev, 0, 0, 0, 0); 1603 mutex_unlock(&adev->srbm_mutex); 1604 1605 /* Initialize all compute VMIDs to have no GDS, GWS, or OA 1606 acccess. These should be enabled by FW for target VMIDs. */ 1607 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1608 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); 1609 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); 1610 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); 1611 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); 1612 } 1613 } 1614 1615 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) 1616 { 1617 int vmid; 1618 1619 /* 1620 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1621 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1622 * the driver can enable them for graphics. VMID0 should maintain 1623 * access so that HWS firmware can save/restore entries. 1624 */ 1625 for (vmid = 1; vmid < 16; vmid++) { 1626 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); 1627 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); 1628 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); 1629 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); 1630 } 1631 } 1632 1633 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) 1634 { 1635 /* TODO: harvest feature to be added later. */ 1636 } 1637 1638 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) 1639 { 1640 /* TCCs are global (not instanced). */ 1641 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | 1642 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); 1643 1644 adev->gfx.config.tcc_disabled_mask = 1645 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 1646 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 1647 } 1648 1649 static void gfx_v11_0_constants_init(struct amdgpu_device *adev) 1650 { 1651 u32 tmp; 1652 int i; 1653 1654 if (!amdgpu_sriov_vf(adev)) 1655 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1656 1657 gfx_v11_0_setup_rb(adev); 1658 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); 1659 gfx_v11_0_get_tcc_info(adev); 1660 adev->gfx.config.pa_sc_tile_steering_override = 0; 1661 1662 /* Set whether texture coordinate truncation is conformant. */ 1663 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2); 1664 adev->gfx.config.ta_cntl2_truncate_coord_mode = 1665 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE); 1666 1667 /* XXX SH_MEM regs */ 1668 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1669 mutex_lock(&adev->srbm_mutex); 1670 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { 1671 soc21_grbm_select(adev, 0, 0, 0, i); 1672 /* CP and shaders */ 1673 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1674 if (i != 0) { 1675 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1676 (adev->gmc.private_aperture_start >> 48)); 1677 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1678 (adev->gmc.shared_aperture_start >> 48)); 1679 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1680 } 1681 } 1682 soc21_grbm_select(adev, 0, 0, 0, 0); 1683 1684 mutex_unlock(&adev->srbm_mutex); 1685 1686 gfx_v11_0_init_compute_vmid(adev); 1687 gfx_v11_0_init_gds_vmid(adev); 1688 } 1689 1690 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1691 bool enable) 1692 { 1693 u32 tmp; 1694 1695 if (amdgpu_sriov_vf(adev)) 1696 return; 1697 1698 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); 1699 1700 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1701 enable ? 1 : 0); 1702 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1703 enable ? 1 : 0); 1704 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1705 enable ? 1 : 0); 1706 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1707 enable ? 1 : 0); 1708 1709 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); 1710 } 1711 1712 static int gfx_v11_0_init_csb(struct amdgpu_device *adev) 1713 { 1714 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1715 1716 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 1717 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1718 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 1719 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1720 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1721 1722 return 0; 1723 } 1724 1725 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) 1726 { 1727 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 1728 1729 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1730 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 1731 } 1732 1733 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) 1734 { 1735 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 1736 udelay(50); 1737 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1738 udelay(50); 1739 } 1740 1741 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 1742 bool enable) 1743 { 1744 uint32_t rlc_pg_cntl; 1745 1746 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 1747 1748 if (!enable) { 1749 /* RLC_PG_CNTL[23] = 0 (default) 1750 * RLC will wait for handshake acks with SMU 1751 * GFXOFF will be enabled 1752 * RLC_PG_CNTL[23] = 1 1753 * RLC will not issue any message to SMU 1754 * hence no handshake between SMU & RLC 1755 * GFXOFF will be disabled 1756 */ 1757 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1758 } else 1759 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1760 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 1761 } 1762 1763 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) 1764 { 1765 /* TODO: enable rlc & smu handshake until smu 1766 * and gfxoff feature works as expected */ 1767 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 1768 gfx_v11_0_rlc_smu_handshake_cntl(adev, false); 1769 1770 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 1771 udelay(50); 1772 } 1773 1774 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) 1775 { 1776 uint32_t tmp; 1777 1778 /* enable Save Restore Machine */ 1779 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 1780 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1781 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 1782 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 1783 } 1784 1785 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) 1786 { 1787 const struct rlc_firmware_header_v2_0 *hdr; 1788 const __le32 *fw_data; 1789 unsigned i, fw_size; 1790 1791 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1792 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1793 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1794 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1795 1796 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 1797 RLCG_UCODE_LOADING_START_ADDRESS); 1798 1799 for (i = 0; i < fw_size; i++) 1800 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 1801 le32_to_cpup(fw_data++)); 1802 1803 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1804 } 1805 1806 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 1807 { 1808 const struct rlc_firmware_header_v2_2 *hdr; 1809 const __le32 *fw_data; 1810 unsigned i, fw_size; 1811 u32 tmp; 1812 1813 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1814 1815 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1816 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 1817 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 1818 1819 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 1820 1821 for (i = 0; i < fw_size; i++) { 1822 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1823 msleep(1); 1824 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 1825 le32_to_cpup(fw_data++)); 1826 } 1827 1828 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1829 1830 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1831 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 1832 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 1833 1834 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 1835 for (i = 0; i < fw_size; i++) { 1836 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1837 msleep(1); 1838 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 1839 le32_to_cpup(fw_data++)); 1840 } 1841 1842 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1843 1844 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 1845 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 1846 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 1847 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 1848 } 1849 1850 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) 1851 { 1852 const struct rlc_firmware_header_v2_3 *hdr; 1853 const __le32 *fw_data; 1854 unsigned i, fw_size; 1855 u32 tmp; 1856 1857 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 1858 1859 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1860 le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); 1861 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; 1862 1863 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); 1864 1865 for (i = 0; i < fw_size; i++) { 1866 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1867 msleep(1); 1868 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, 1869 le32_to_cpup(fw_data++)); 1870 } 1871 1872 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); 1873 1874 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 1875 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 1876 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); 1877 1878 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1879 le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); 1880 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; 1881 1882 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); 1883 1884 for (i = 0; i < fw_size; i++) { 1885 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1886 msleep(1); 1887 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, 1888 le32_to_cpup(fw_data++)); 1889 } 1890 1891 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); 1892 1893 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); 1894 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); 1895 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); 1896 } 1897 1898 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) 1899 { 1900 const struct rlc_firmware_header_v2_0 *hdr; 1901 uint16_t version_major; 1902 uint16_t version_minor; 1903 1904 if (!adev->gfx.rlc_fw) 1905 return -EINVAL; 1906 1907 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1908 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1909 1910 version_major = le16_to_cpu(hdr->header.header_version_major); 1911 version_minor = le16_to_cpu(hdr->header.header_version_minor); 1912 1913 if (version_major == 2) { 1914 gfx_v11_0_load_rlcg_microcode(adev); 1915 if (amdgpu_dpm == 1) { 1916 if (version_minor >= 2) 1917 gfx_v11_0_load_rlc_iram_dram_microcode(adev); 1918 if (version_minor == 3) 1919 gfx_v11_0_load_rlcp_rlcv_microcode(adev); 1920 } 1921 1922 return 0; 1923 } 1924 1925 return -EINVAL; 1926 } 1927 1928 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) 1929 { 1930 int r; 1931 1932 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1933 gfx_v11_0_init_csb(adev); 1934 1935 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 1936 gfx_v11_0_rlc_enable_srm(adev); 1937 } else { 1938 if (amdgpu_sriov_vf(adev)) { 1939 gfx_v11_0_init_csb(adev); 1940 return 0; 1941 } 1942 1943 adev->gfx.rlc.funcs->stop(adev); 1944 1945 /* disable CG */ 1946 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 1947 1948 /* disable PG */ 1949 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 1950 1951 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1952 /* legacy rlc firmware loading */ 1953 r = gfx_v11_0_rlc_load_microcode(adev); 1954 if (r) 1955 return r; 1956 } 1957 1958 gfx_v11_0_init_csb(adev); 1959 1960 adev->gfx.rlc.funcs->start(adev); 1961 } 1962 return 0; 1963 } 1964 1965 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) 1966 { 1967 uint32_t usec_timeout = 50000; /* wait for 50ms */ 1968 uint32_t tmp; 1969 int i; 1970 1971 /* Trigger an invalidation of the L1 instruction caches */ 1972 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 1973 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 1974 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 1975 1976 /* Wait for invalidation complete */ 1977 for (i = 0; i < usec_timeout; i++) { 1978 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 1979 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 1980 INVALIDATE_CACHE_COMPLETE)) 1981 break; 1982 udelay(1); 1983 } 1984 1985 if (i >= usec_timeout) { 1986 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 1987 return -EINVAL; 1988 } 1989 1990 if (amdgpu_emu_mode == 1) 1991 adev->hdp.funcs->flush_hdp(adev, NULL); 1992 1993 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 1994 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 1995 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 1996 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 1997 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 1998 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 1999 2000 /* Program me ucode address into intruction cache address register */ 2001 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2002 lower_32_bits(addr) & 0xFFFFF000); 2003 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2004 upper_32_bits(addr)); 2005 2006 return 0; 2007 } 2008 2009 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) 2010 { 2011 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2012 uint32_t tmp; 2013 int i; 2014 2015 /* Trigger an invalidation of the L1 instruction caches */ 2016 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2017 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2018 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2019 2020 /* Wait for invalidation complete */ 2021 for (i = 0; i < usec_timeout; i++) { 2022 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2023 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2024 INVALIDATE_CACHE_COMPLETE)) 2025 break; 2026 udelay(1); 2027 } 2028 2029 if (i >= usec_timeout) { 2030 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2031 return -EINVAL; 2032 } 2033 2034 if (amdgpu_emu_mode == 1) 2035 adev->hdp.funcs->flush_hdp(adev, NULL); 2036 2037 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2038 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2039 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2040 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2041 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2042 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2043 2044 /* Program pfp ucode address into intruction cache address register */ 2045 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2046 lower_32_bits(addr) & 0xFFFFF000); 2047 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2048 upper_32_bits(addr)); 2049 2050 return 0; 2051 } 2052 2053 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) 2054 { 2055 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2056 uint32_t tmp; 2057 int i; 2058 2059 /* Trigger an invalidation of the L1 instruction caches */ 2060 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2061 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2062 2063 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2064 2065 /* Wait for invalidation complete */ 2066 for (i = 0; i < usec_timeout; i++) { 2067 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2068 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2069 INVALIDATE_CACHE_COMPLETE)) 2070 break; 2071 udelay(1); 2072 } 2073 2074 if (i >= usec_timeout) { 2075 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2076 return -EINVAL; 2077 } 2078 2079 if (amdgpu_emu_mode == 1) 2080 adev->hdp.funcs->flush_hdp(adev, NULL); 2081 2082 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2083 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2084 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2085 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2086 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2087 2088 /* Program mec1 ucode address into intruction cache address register */ 2089 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2090 lower_32_bits(addr) & 0xFFFFF000); 2091 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2092 upper_32_bits(addr)); 2093 2094 return 0; 2095 } 2096 2097 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2098 { 2099 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2100 uint32_t tmp; 2101 unsigned i, pipe_id; 2102 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2103 2104 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2105 adev->gfx.pfp_fw->data; 2106 2107 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2108 lower_32_bits(addr)); 2109 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2110 upper_32_bits(addr)); 2111 2112 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2113 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2114 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2115 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2116 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2117 2118 /* 2119 * Programming any of the CP_PFP_IC_BASE registers 2120 * forces invalidation of the ME L1 I$. Wait for the 2121 * invalidation complete 2122 */ 2123 for (i = 0; i < usec_timeout; i++) { 2124 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2125 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2126 INVALIDATE_CACHE_COMPLETE)) 2127 break; 2128 udelay(1); 2129 } 2130 2131 if (i >= usec_timeout) { 2132 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2133 return -EINVAL; 2134 } 2135 2136 /* Prime the L1 instruction caches */ 2137 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2138 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2139 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2140 /* Waiting for cache primed*/ 2141 for (i = 0; i < usec_timeout; i++) { 2142 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2143 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2144 ICACHE_PRIMED)) 2145 break; 2146 udelay(1); 2147 } 2148 2149 if (i >= usec_timeout) { 2150 dev_err(adev->dev, "failed to prime instruction cache\n"); 2151 return -EINVAL; 2152 } 2153 2154 mutex_lock(&adev->srbm_mutex); 2155 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2156 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2157 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2158 (pfp_hdr->ucode_start_addr_hi << 30) | 2159 (pfp_hdr->ucode_start_addr_lo >> 2)); 2160 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2161 pfp_hdr->ucode_start_addr_hi >> 2); 2162 2163 /* 2164 * Program CP_ME_CNTL to reset given PIPE to take 2165 * effect of CP_PFP_PRGRM_CNTR_START. 2166 */ 2167 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2168 if (pipe_id == 0) 2169 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2170 PFP_PIPE0_RESET, 1); 2171 else 2172 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2173 PFP_PIPE1_RESET, 1); 2174 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2175 2176 /* Clear pfp pipe0 reset bit. */ 2177 if (pipe_id == 0) 2178 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2179 PFP_PIPE0_RESET, 0); 2180 else 2181 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2182 PFP_PIPE1_RESET, 0); 2183 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2184 2185 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2186 lower_32_bits(addr2)); 2187 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2188 upper_32_bits(addr2)); 2189 } 2190 soc21_grbm_select(adev, 0, 0, 0, 0); 2191 mutex_unlock(&adev->srbm_mutex); 2192 2193 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2194 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2195 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2196 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2197 2198 /* Invalidate the data caches */ 2199 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2200 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2201 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2202 2203 for (i = 0; i < usec_timeout; i++) { 2204 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2205 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2206 INVALIDATE_DCACHE_COMPLETE)) 2207 break; 2208 udelay(1); 2209 } 2210 2211 if (i >= usec_timeout) { 2212 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2213 return -EINVAL; 2214 } 2215 2216 return 0; 2217 } 2218 2219 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2220 { 2221 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2222 uint32_t tmp; 2223 unsigned i, pipe_id; 2224 const struct gfx_firmware_header_v2_0 *me_hdr; 2225 2226 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2227 adev->gfx.me_fw->data; 2228 2229 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2230 lower_32_bits(addr)); 2231 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2232 upper_32_bits(addr)); 2233 2234 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2235 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2236 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2237 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2238 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2239 2240 /* 2241 * Programming any of the CP_ME_IC_BASE registers 2242 * forces invalidation of the ME L1 I$. Wait for the 2243 * invalidation complete 2244 */ 2245 for (i = 0; i < usec_timeout; i++) { 2246 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2247 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2248 INVALIDATE_CACHE_COMPLETE)) 2249 break; 2250 udelay(1); 2251 } 2252 2253 if (i >= usec_timeout) { 2254 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2255 return -EINVAL; 2256 } 2257 2258 /* Prime the instruction caches */ 2259 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2260 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2261 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2262 2263 /* Waiting for instruction cache primed*/ 2264 for (i = 0; i < usec_timeout; i++) { 2265 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2266 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2267 ICACHE_PRIMED)) 2268 break; 2269 udelay(1); 2270 } 2271 2272 if (i >= usec_timeout) { 2273 dev_err(adev->dev, "failed to prime instruction cache\n"); 2274 return -EINVAL; 2275 } 2276 2277 mutex_lock(&adev->srbm_mutex); 2278 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2279 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2280 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2281 (me_hdr->ucode_start_addr_hi << 30) | 2282 (me_hdr->ucode_start_addr_lo >> 2) ); 2283 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2284 me_hdr->ucode_start_addr_hi>>2); 2285 2286 /* 2287 * Program CP_ME_CNTL to reset given PIPE to take 2288 * effect of CP_PFP_PRGRM_CNTR_START. 2289 */ 2290 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2291 if (pipe_id == 0) 2292 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2293 ME_PIPE0_RESET, 1); 2294 else 2295 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2296 ME_PIPE1_RESET, 1); 2297 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2298 2299 /* Clear pfp pipe0 reset bit. */ 2300 if (pipe_id == 0) 2301 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2302 ME_PIPE0_RESET, 0); 2303 else 2304 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2305 ME_PIPE1_RESET, 0); 2306 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2307 2308 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2309 lower_32_bits(addr2)); 2310 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2311 upper_32_bits(addr2)); 2312 } 2313 soc21_grbm_select(adev, 0, 0, 0, 0); 2314 mutex_unlock(&adev->srbm_mutex); 2315 2316 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2317 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2318 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2319 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2320 2321 /* Invalidate the data caches */ 2322 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2323 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2324 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2325 2326 for (i = 0; i < usec_timeout; i++) { 2327 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2328 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2329 INVALIDATE_DCACHE_COMPLETE)) 2330 break; 2331 udelay(1); 2332 } 2333 2334 if (i >= usec_timeout) { 2335 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2336 return -EINVAL; 2337 } 2338 2339 return 0; 2340 } 2341 2342 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2343 { 2344 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2345 uint32_t tmp; 2346 unsigned i; 2347 const struct gfx_firmware_header_v2_0 *mec_hdr; 2348 2349 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2350 adev->gfx.mec_fw->data; 2351 2352 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2353 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2354 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2355 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2356 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2357 2358 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2359 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2360 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2361 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2362 2363 mutex_lock(&adev->srbm_mutex); 2364 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2365 soc21_grbm_select(adev, 1, i, 0, 0); 2366 2367 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); 2368 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2369 upper_32_bits(addr2)); 2370 2371 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2372 mec_hdr->ucode_start_addr_lo >> 2 | 2373 mec_hdr->ucode_start_addr_hi << 30); 2374 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2375 mec_hdr->ucode_start_addr_hi >> 2); 2376 2377 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); 2378 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2379 upper_32_bits(addr)); 2380 } 2381 mutex_unlock(&adev->srbm_mutex); 2382 soc21_grbm_select(adev, 0, 0, 0, 0); 2383 2384 /* Trigger an invalidation of the L1 instruction caches */ 2385 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2386 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2387 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2388 2389 /* Wait for invalidation complete */ 2390 for (i = 0; i < usec_timeout; i++) { 2391 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2392 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2393 INVALIDATE_DCACHE_COMPLETE)) 2394 break; 2395 udelay(1); 2396 } 2397 2398 if (i >= usec_timeout) { 2399 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2400 return -EINVAL; 2401 } 2402 2403 /* Trigger an invalidation of the L1 instruction caches */ 2404 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2405 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2406 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2407 2408 /* Wait for invalidation complete */ 2409 for (i = 0; i < usec_timeout; i++) { 2410 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2411 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2412 INVALIDATE_CACHE_COMPLETE)) 2413 break; 2414 udelay(1); 2415 } 2416 2417 if (i >= usec_timeout) { 2418 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2419 return -EINVAL; 2420 } 2421 2422 return 0; 2423 } 2424 2425 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) 2426 { 2427 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2428 const struct gfx_firmware_header_v2_0 *me_hdr; 2429 const struct gfx_firmware_header_v2_0 *mec_hdr; 2430 uint32_t pipe_id, tmp; 2431 2432 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2433 adev->gfx.mec_fw->data; 2434 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2435 adev->gfx.me_fw->data; 2436 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2437 adev->gfx.pfp_fw->data; 2438 2439 /* config pfp program start addr */ 2440 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2441 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2442 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2443 (pfp_hdr->ucode_start_addr_hi << 30) | 2444 (pfp_hdr->ucode_start_addr_lo >> 2)); 2445 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2446 pfp_hdr->ucode_start_addr_hi >> 2); 2447 } 2448 soc21_grbm_select(adev, 0, 0, 0, 0); 2449 2450 /* reset pfp pipe */ 2451 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2452 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2453 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2454 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2455 2456 /* clear pfp pipe reset */ 2457 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2458 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2459 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2460 2461 /* config me program start addr */ 2462 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2463 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2464 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2465 (me_hdr->ucode_start_addr_hi << 30) | 2466 (me_hdr->ucode_start_addr_lo >> 2) ); 2467 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2468 me_hdr->ucode_start_addr_hi>>2); 2469 } 2470 soc21_grbm_select(adev, 0, 0, 0, 0); 2471 2472 /* reset me pipe */ 2473 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2474 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2475 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2476 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2477 2478 /* clear me pipe reset */ 2479 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2480 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2481 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2482 2483 /* config mec program start addr */ 2484 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2485 soc21_grbm_select(adev, 1, pipe_id, 0, 0); 2486 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2487 mec_hdr->ucode_start_addr_lo >> 2 | 2488 mec_hdr->ucode_start_addr_hi << 30); 2489 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2490 mec_hdr->ucode_start_addr_hi >> 2); 2491 } 2492 soc21_grbm_select(adev, 0, 0, 0, 0); 2493 2494 /* reset mec pipe */ 2495 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2496 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 2497 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 2498 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 2499 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 2500 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2501 2502 /* clear mec pipe reset */ 2503 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 2504 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 2505 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 2506 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 2507 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 2508 } 2509 2510 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2511 { 2512 uint32_t cp_status; 2513 uint32_t bootload_status; 2514 int i, r; 2515 uint64_t addr, addr2; 2516 2517 for (i = 0; i < adev->usec_timeout; i++) { 2518 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2519 2520 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) || 2521 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4)) 2522 bootload_status = RREG32_SOC15(GC, 0, 2523 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 2524 else 2525 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2526 2527 if ((cp_status == 0) && 2528 (REG_GET_FIELD(bootload_status, 2529 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2530 break; 2531 } 2532 udelay(1); 2533 } 2534 2535 if (i >= adev->usec_timeout) { 2536 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2537 return -ETIMEDOUT; 2538 } 2539 2540 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2541 if (adev->gfx.rs64_enable) { 2542 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2543 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; 2544 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2545 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; 2546 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); 2547 if (r) 2548 return r; 2549 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2550 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; 2551 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2552 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; 2553 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); 2554 if (r) 2555 return r; 2556 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2557 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; 2558 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2559 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; 2560 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); 2561 if (r) 2562 return r; 2563 } else { 2564 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2565 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; 2566 r = gfx_v11_0_config_me_cache(adev, addr); 2567 if (r) 2568 return r; 2569 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2570 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; 2571 r = gfx_v11_0_config_pfp_cache(adev, addr); 2572 if (r) 2573 return r; 2574 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2575 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; 2576 r = gfx_v11_0_config_mec_cache(adev, addr); 2577 if (r) 2578 return r; 2579 } 2580 } 2581 2582 return 0; 2583 } 2584 2585 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2586 { 2587 int i; 2588 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2589 2590 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2591 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2592 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2593 2594 for (i = 0; i < adev->usec_timeout; i++) { 2595 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 2596 break; 2597 udelay(1); 2598 } 2599 2600 if (i >= adev->usec_timeout) 2601 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 2602 2603 return 0; 2604 } 2605 2606 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 2607 { 2608 int r; 2609 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2610 const __le32 *fw_data; 2611 unsigned i, fw_size; 2612 2613 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2614 adev->gfx.pfp_fw->data; 2615 2616 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2617 2618 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2619 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2620 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 2621 2622 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 2623 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2624 &adev->gfx.pfp.pfp_fw_obj, 2625 &adev->gfx.pfp.pfp_fw_gpu_addr, 2626 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2627 if (r) { 2628 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 2629 gfx_v11_0_pfp_fini(adev); 2630 return r; 2631 } 2632 2633 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 2634 2635 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2636 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2637 2638 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); 2639 2640 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); 2641 2642 for (i = 0; i < pfp_hdr->jt_size; i++) 2643 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, 2644 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); 2645 2646 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2647 2648 return 0; 2649 } 2650 2651 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 2652 { 2653 int r; 2654 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2655 const __le32 *fw_ucode, *fw_data; 2656 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2657 uint32_t tmp; 2658 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2659 2660 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2661 adev->gfx.pfp_fw->data; 2662 2663 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2664 2665 /* instruction */ 2666 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 2667 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 2668 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 2669 /* data */ 2670 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2671 le32_to_cpu(pfp_hdr->data_offset_bytes)); 2672 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 2673 2674 /* 64kb align */ 2675 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2676 64 * 1024, 2677 AMDGPU_GEM_DOMAIN_VRAM | 2678 AMDGPU_GEM_DOMAIN_GTT, 2679 &adev->gfx.pfp.pfp_fw_obj, 2680 &adev->gfx.pfp.pfp_fw_gpu_addr, 2681 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2682 if (r) { 2683 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 2684 gfx_v11_0_pfp_fini(adev); 2685 return r; 2686 } 2687 2688 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2689 64 * 1024, 2690 AMDGPU_GEM_DOMAIN_VRAM | 2691 AMDGPU_GEM_DOMAIN_GTT, 2692 &adev->gfx.pfp.pfp_fw_data_obj, 2693 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 2694 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 2695 if (r) { 2696 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 2697 gfx_v11_0_pfp_fini(adev); 2698 return r; 2699 } 2700 2701 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 2702 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 2703 2704 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2705 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 2706 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2707 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 2708 2709 if (amdgpu_emu_mode == 1) 2710 adev->hdp.funcs->flush_hdp(adev, NULL); 2711 2712 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2713 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2714 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2715 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2716 2717 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2718 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2719 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2720 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2721 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2722 2723 /* 2724 * Programming any of the CP_PFP_IC_BASE registers 2725 * forces invalidation of the ME L1 I$. Wait for the 2726 * invalidation complete 2727 */ 2728 for (i = 0; i < usec_timeout; i++) { 2729 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2730 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2731 INVALIDATE_CACHE_COMPLETE)) 2732 break; 2733 udelay(1); 2734 } 2735 2736 if (i >= usec_timeout) { 2737 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2738 return -EINVAL; 2739 } 2740 2741 /* Prime the L1 instruction caches */ 2742 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2743 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2744 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2745 /* Waiting for cache primed*/ 2746 for (i = 0; i < usec_timeout; i++) { 2747 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2748 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2749 ICACHE_PRIMED)) 2750 break; 2751 udelay(1); 2752 } 2753 2754 if (i >= usec_timeout) { 2755 dev_err(adev->dev, "failed to prime instruction cache\n"); 2756 return -EINVAL; 2757 } 2758 2759 mutex_lock(&adev->srbm_mutex); 2760 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2761 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2762 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2763 (pfp_hdr->ucode_start_addr_hi << 30) | 2764 (pfp_hdr->ucode_start_addr_lo >> 2) ); 2765 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2766 pfp_hdr->ucode_start_addr_hi>>2); 2767 2768 /* 2769 * Program CP_ME_CNTL to reset given PIPE to take 2770 * effect of CP_PFP_PRGRM_CNTR_START. 2771 */ 2772 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2773 if (pipe_id == 0) 2774 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2775 PFP_PIPE0_RESET, 1); 2776 else 2777 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2778 PFP_PIPE1_RESET, 1); 2779 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2780 2781 /* Clear pfp pipe0 reset bit. */ 2782 if (pipe_id == 0) 2783 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2784 PFP_PIPE0_RESET, 0); 2785 else 2786 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2787 PFP_PIPE1_RESET, 0); 2788 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2789 2790 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2791 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2792 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2793 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 2794 } 2795 soc21_grbm_select(adev, 0, 0, 0, 0); 2796 mutex_unlock(&adev->srbm_mutex); 2797 2798 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2799 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2800 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2801 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2802 2803 /* Invalidate the data caches */ 2804 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2805 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2806 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2807 2808 for (i = 0; i < usec_timeout; i++) { 2809 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2810 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2811 INVALIDATE_DCACHE_COMPLETE)) 2812 break; 2813 udelay(1); 2814 } 2815 2816 if (i >= usec_timeout) { 2817 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2818 return -EINVAL; 2819 } 2820 2821 return 0; 2822 } 2823 2824 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 2825 { 2826 int r; 2827 const struct gfx_firmware_header_v1_0 *me_hdr; 2828 const __le32 *fw_data; 2829 unsigned i, fw_size; 2830 2831 me_hdr = (const struct gfx_firmware_header_v1_0 *) 2832 adev->gfx.me_fw->data; 2833 2834 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2835 2836 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 2837 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 2838 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 2839 2840 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 2841 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2842 &adev->gfx.me.me_fw_obj, 2843 &adev->gfx.me.me_fw_gpu_addr, 2844 (void **)&adev->gfx.me.me_fw_ptr); 2845 if (r) { 2846 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 2847 gfx_v11_0_me_fini(adev); 2848 return r; 2849 } 2850 2851 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 2852 2853 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 2854 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 2855 2856 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); 2857 2858 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); 2859 2860 for (i = 0; i < me_hdr->jt_size; i++) 2861 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, 2862 le32_to_cpup(fw_data + me_hdr->jt_offset + i)); 2863 2864 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); 2865 2866 return 0; 2867 } 2868 2869 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 2870 { 2871 int r; 2872 const struct gfx_firmware_header_v2_0 *me_hdr; 2873 const __le32 *fw_ucode, *fw_data; 2874 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2875 uint32_t tmp; 2876 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2877 2878 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2879 adev->gfx.me_fw->data; 2880 2881 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2882 2883 /* instruction */ 2884 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 2885 le32_to_cpu(me_hdr->ucode_offset_bytes)); 2886 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 2887 /* data */ 2888 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 2889 le32_to_cpu(me_hdr->data_offset_bytes)); 2890 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 2891 2892 /* 64kb align*/ 2893 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2894 64 * 1024, 2895 AMDGPU_GEM_DOMAIN_VRAM | 2896 AMDGPU_GEM_DOMAIN_GTT, 2897 &adev->gfx.me.me_fw_obj, 2898 &adev->gfx.me.me_fw_gpu_addr, 2899 (void **)&adev->gfx.me.me_fw_ptr); 2900 if (r) { 2901 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 2902 gfx_v11_0_me_fini(adev); 2903 return r; 2904 } 2905 2906 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2907 64 * 1024, 2908 AMDGPU_GEM_DOMAIN_VRAM | 2909 AMDGPU_GEM_DOMAIN_GTT, 2910 &adev->gfx.me.me_fw_data_obj, 2911 &adev->gfx.me.me_fw_data_gpu_addr, 2912 (void **)&adev->gfx.me.me_fw_data_ptr); 2913 if (r) { 2914 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 2915 gfx_v11_0_pfp_fini(adev); 2916 return r; 2917 } 2918 2919 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 2920 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 2921 2922 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 2923 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 2924 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 2925 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 2926 2927 if (amdgpu_emu_mode == 1) 2928 adev->hdp.funcs->flush_hdp(adev, NULL); 2929 2930 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2931 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2932 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2933 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2934 2935 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2936 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2937 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2938 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2939 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2940 2941 /* 2942 * Programming any of the CP_ME_IC_BASE registers 2943 * forces invalidation of the ME L1 I$. Wait for the 2944 * invalidation complete 2945 */ 2946 for (i = 0; i < usec_timeout; i++) { 2947 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2948 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2949 INVALIDATE_CACHE_COMPLETE)) 2950 break; 2951 udelay(1); 2952 } 2953 2954 if (i >= usec_timeout) { 2955 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2956 return -EINVAL; 2957 } 2958 2959 /* Prime the instruction caches */ 2960 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2961 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2962 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2963 2964 /* Waiting for instruction cache primed*/ 2965 for (i = 0; i < usec_timeout; i++) { 2966 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2967 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2968 ICACHE_PRIMED)) 2969 break; 2970 udelay(1); 2971 } 2972 2973 if (i >= usec_timeout) { 2974 dev_err(adev->dev, "failed to prime instruction cache\n"); 2975 return -EINVAL; 2976 } 2977 2978 mutex_lock(&adev->srbm_mutex); 2979 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2980 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2981 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2982 (me_hdr->ucode_start_addr_hi << 30) | 2983 (me_hdr->ucode_start_addr_lo >> 2) ); 2984 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2985 me_hdr->ucode_start_addr_hi>>2); 2986 2987 /* 2988 * Program CP_ME_CNTL to reset given PIPE to take 2989 * effect of CP_PFP_PRGRM_CNTR_START. 2990 */ 2991 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2992 if (pipe_id == 0) 2993 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2994 ME_PIPE0_RESET, 1); 2995 else 2996 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2997 ME_PIPE1_RESET, 1); 2998 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2999 3000 /* Clear pfp pipe0 reset bit. */ 3001 if (pipe_id == 0) 3002 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3003 ME_PIPE0_RESET, 0); 3004 else 3005 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3006 ME_PIPE1_RESET, 0); 3007 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3008 3009 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 3010 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3011 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 3012 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3013 } 3014 soc21_grbm_select(adev, 0, 0, 0, 0); 3015 mutex_unlock(&adev->srbm_mutex); 3016 3017 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3018 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3019 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3020 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3021 3022 /* Invalidate the data caches */ 3023 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3024 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3025 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3026 3027 for (i = 0; i < usec_timeout; i++) { 3028 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3029 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3030 INVALIDATE_DCACHE_COMPLETE)) 3031 break; 3032 udelay(1); 3033 } 3034 3035 if (i >= usec_timeout) { 3036 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3037 return -EINVAL; 3038 } 3039 3040 return 0; 3041 } 3042 3043 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 3044 { 3045 int r; 3046 3047 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 3048 return -EINVAL; 3049 3050 gfx_v11_0_cp_gfx_enable(adev, false); 3051 3052 if (adev->gfx.rs64_enable) 3053 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); 3054 else 3055 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); 3056 if (r) { 3057 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 3058 return r; 3059 } 3060 3061 if (adev->gfx.rs64_enable) 3062 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); 3063 else 3064 r = gfx_v11_0_cp_gfx_load_me_microcode(adev); 3065 if (r) { 3066 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 3067 return r; 3068 } 3069 3070 return 0; 3071 } 3072 3073 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) 3074 { 3075 struct amdgpu_ring *ring; 3076 const struct cs_section_def *sect = NULL; 3077 const struct cs_extent_def *ext = NULL; 3078 int r, i; 3079 int ctx_reg_offset; 3080 3081 /* init the CP */ 3082 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 3083 adev->gfx.config.max_hw_contexts - 1); 3084 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 3085 3086 if (!amdgpu_async_gfx_ring) 3087 gfx_v11_0_cp_gfx_enable(adev, true); 3088 3089 ring = &adev->gfx.gfx_ring[0]; 3090 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3091 if (r) { 3092 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3093 return r; 3094 } 3095 3096 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3097 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3098 3099 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3100 amdgpu_ring_write(ring, 0x80000000); 3101 amdgpu_ring_write(ring, 0x80000000); 3102 3103 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 3104 for (ext = sect->section; ext->extent != NULL; ++ext) { 3105 if (sect->id == SECT_CONTEXT) { 3106 amdgpu_ring_write(ring, 3107 PACKET3(PACKET3_SET_CONTEXT_REG, 3108 ext->reg_count)); 3109 amdgpu_ring_write(ring, ext->reg_index - 3110 PACKET3_SET_CONTEXT_REG_START); 3111 for (i = 0; i < ext->reg_count; i++) 3112 amdgpu_ring_write(ring, ext->extent[i]); 3113 } 3114 } 3115 } 3116 3117 ctx_reg_offset = 3118 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 3119 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 3120 amdgpu_ring_write(ring, ctx_reg_offset); 3121 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 3122 3123 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3124 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 3125 3126 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3127 amdgpu_ring_write(ring, 0); 3128 3129 amdgpu_ring_commit(ring); 3130 3131 /* submit cs packet to copy state 0 to next available state */ 3132 if (adev->gfx.num_gfx_rings > 1) { 3133 /* maximum supported gfx ring is 2 */ 3134 ring = &adev->gfx.gfx_ring[1]; 3135 r = amdgpu_ring_alloc(ring, 2); 3136 if (r) { 3137 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3138 return r; 3139 } 3140 3141 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3142 amdgpu_ring_write(ring, 0); 3143 3144 amdgpu_ring_commit(ring); 3145 } 3146 return 0; 3147 } 3148 3149 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 3150 CP_PIPE_ID pipe) 3151 { 3152 u32 tmp; 3153 3154 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 3155 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 3156 3157 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 3158 } 3159 3160 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 3161 struct amdgpu_ring *ring) 3162 { 3163 u32 tmp; 3164 3165 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3166 if (ring->use_doorbell) { 3167 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3168 DOORBELL_OFFSET, ring->doorbell_index); 3169 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3170 DOORBELL_EN, 1); 3171 } else { 3172 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3173 DOORBELL_EN, 0); 3174 } 3175 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 3176 3177 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 3178 DOORBELL_RANGE_LOWER, ring->doorbell_index); 3179 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 3180 3181 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3182 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 3183 } 3184 3185 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) 3186 { 3187 struct amdgpu_ring *ring; 3188 u32 tmp; 3189 u32 rb_bufsz; 3190 u64 rb_addr, rptr_addr, wptr_gpu_addr; 3191 u32 i; 3192 3193 /* Set the write pointer delay */ 3194 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 3195 3196 /* set the RB to use vmid 0 */ 3197 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 3198 3199 /* Init gfx ring 0 for pipe 0 */ 3200 mutex_lock(&adev->srbm_mutex); 3201 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3202 3203 /* Set ring buffer size */ 3204 ring = &adev->gfx.gfx_ring[0]; 3205 rb_bufsz = order_base_2(ring->ring_size / 8); 3206 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 3207 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 3208 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3209 3210 /* Initialize the ring buffer's write pointers */ 3211 ring->wptr = 0; 3212 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3213 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3214 3215 /* set the wb address wether it's enabled or not */ 3216 rptr_addr = ring->rptr_gpu_addr; 3217 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 3218 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3219 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3220 3221 wptr_gpu_addr = ring->wptr_gpu_addr; 3222 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3223 lower_32_bits(wptr_gpu_addr)); 3224 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3225 upper_32_bits(wptr_gpu_addr)); 3226 3227 mdelay(1); 3228 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3229 3230 rb_addr = ring->gpu_addr >> 8; 3231 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 3232 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 3233 3234 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 3235 3236 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3237 mutex_unlock(&adev->srbm_mutex); 3238 3239 /* Init gfx ring 1 for pipe 1 */ 3240 if (adev->gfx.num_gfx_rings > 1) { 3241 mutex_lock(&adev->srbm_mutex); 3242 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 3243 /* maximum supported gfx ring is 2 */ 3244 ring = &adev->gfx.gfx_ring[1]; 3245 rb_bufsz = order_base_2(ring->ring_size / 8); 3246 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 3247 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 3248 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3249 /* Initialize the ring buffer's write pointers */ 3250 ring->wptr = 0; 3251 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); 3252 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 3253 /* Set the wb address wether it's enabled or not */ 3254 rptr_addr = ring->rptr_gpu_addr; 3255 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 3256 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3257 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3258 wptr_gpu_addr = ring->wptr_gpu_addr; 3259 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3260 lower_32_bits(wptr_gpu_addr)); 3261 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3262 upper_32_bits(wptr_gpu_addr)); 3263 3264 mdelay(1); 3265 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3266 3267 rb_addr = ring->gpu_addr >> 8; 3268 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); 3269 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 3270 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); 3271 3272 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3273 mutex_unlock(&adev->srbm_mutex); 3274 } 3275 /* Switch to pipe 0 */ 3276 mutex_lock(&adev->srbm_mutex); 3277 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3278 mutex_unlock(&adev->srbm_mutex); 3279 3280 /* start the ring */ 3281 gfx_v11_0_cp_gfx_start(adev); 3282 3283 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3284 ring = &adev->gfx.gfx_ring[i]; 3285 ring->sched.ready = true; 3286 } 3287 3288 return 0; 3289 } 3290 3291 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 3292 { 3293 u32 data; 3294 3295 if (adev->gfx.rs64_enable) { 3296 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3297 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 3298 enable ? 0 : 1); 3299 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 3300 enable ? 0 : 1); 3301 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 3302 enable ? 0 : 1); 3303 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 3304 enable ? 0 : 1); 3305 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 3306 enable ? 0 : 1); 3307 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 3308 enable ? 1 : 0); 3309 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 3310 enable ? 1 : 0); 3311 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 3312 enable ? 1 : 0); 3313 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 3314 enable ? 1 : 0); 3315 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 3316 enable ? 0 : 1); 3317 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 3318 } else { 3319 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); 3320 3321 if (enable) { 3322 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); 3323 if (!adev->enable_mes_kiq) 3324 data = REG_SET_FIELD(data, CP_MEC_CNTL, 3325 MEC_ME2_HALT, 0); 3326 } else { 3327 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); 3328 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); 3329 } 3330 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); 3331 } 3332 3333 adev->gfx.kiq.ring.sched.ready = enable; 3334 3335 udelay(50); 3336 } 3337 3338 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) 3339 { 3340 const struct gfx_firmware_header_v1_0 *mec_hdr; 3341 const __le32 *fw_data; 3342 unsigned i, fw_size; 3343 u32 *fw = NULL; 3344 int r; 3345 3346 if (!adev->gfx.mec_fw) 3347 return -EINVAL; 3348 3349 gfx_v11_0_cp_compute_enable(adev, false); 3350 3351 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3352 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3353 3354 fw_data = (const __le32 *) 3355 (adev->gfx.mec_fw->data + 3356 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 3357 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 3358 3359 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 3360 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3361 &adev->gfx.mec.mec_fw_obj, 3362 &adev->gfx.mec.mec_fw_gpu_addr, 3363 (void **)&fw); 3364 if (r) { 3365 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 3366 gfx_v11_0_mec_fini(adev); 3367 return r; 3368 } 3369 3370 memcpy(fw, fw_data, fw_size); 3371 3372 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3373 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3374 3375 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); 3376 3377 /* MEC1 */ 3378 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); 3379 3380 for (i = 0; i < mec_hdr->jt_size; i++) 3381 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, 3382 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 3383 3384 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 3385 3386 return 0; 3387 } 3388 3389 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 3390 { 3391 const struct gfx_firmware_header_v2_0 *mec_hdr; 3392 const __le32 *fw_ucode, *fw_data; 3393 u32 tmp, fw_ucode_size, fw_data_size; 3394 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 3395 u32 *fw_ucode_ptr, *fw_data_ptr; 3396 int r; 3397 3398 if (!adev->gfx.mec_fw) 3399 return -EINVAL; 3400 3401 gfx_v11_0_cp_compute_enable(adev, false); 3402 3403 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 3404 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3405 3406 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 3407 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 3408 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 3409 3410 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 3411 le32_to_cpu(mec_hdr->data_offset_bytes)); 3412 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3413 3414 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3415 64 * 1024, 3416 AMDGPU_GEM_DOMAIN_VRAM | 3417 AMDGPU_GEM_DOMAIN_GTT, 3418 &adev->gfx.mec.mec_fw_obj, 3419 &adev->gfx.mec.mec_fw_gpu_addr, 3420 (void **)&fw_ucode_ptr); 3421 if (r) { 3422 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3423 gfx_v11_0_mec_fini(adev); 3424 return r; 3425 } 3426 3427 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3428 64 * 1024, 3429 AMDGPU_GEM_DOMAIN_VRAM | 3430 AMDGPU_GEM_DOMAIN_GTT, 3431 &adev->gfx.mec.mec_fw_data_obj, 3432 &adev->gfx.mec.mec_fw_data_gpu_addr, 3433 (void **)&fw_data_ptr); 3434 if (r) { 3435 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3436 gfx_v11_0_mec_fini(adev); 3437 return r; 3438 } 3439 3440 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 3441 memcpy(fw_data_ptr, fw_data, fw_data_size); 3442 3443 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3444 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 3445 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3446 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 3447 3448 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 3449 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 3450 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 3451 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 3452 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 3453 3454 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 3455 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 3456 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 3457 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 3458 3459 mutex_lock(&adev->srbm_mutex); 3460 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3461 soc21_grbm_select(adev, 1, i, 0, 0); 3462 3463 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 3464 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 3465 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 3466 3467 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3468 mec_hdr->ucode_start_addr_lo >> 2 | 3469 mec_hdr->ucode_start_addr_hi << 30); 3470 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3471 mec_hdr->ucode_start_addr_hi >> 2); 3472 3473 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 3474 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 3475 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 3476 } 3477 mutex_unlock(&adev->srbm_mutex); 3478 soc21_grbm_select(adev, 0, 0, 0, 0); 3479 3480 /* Trigger an invalidation of the L1 instruction caches */ 3481 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3482 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3483 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 3484 3485 /* Wait for invalidation complete */ 3486 for (i = 0; i < usec_timeout; i++) { 3487 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3488 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 3489 INVALIDATE_DCACHE_COMPLETE)) 3490 break; 3491 udelay(1); 3492 } 3493 3494 if (i >= usec_timeout) { 3495 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3496 return -EINVAL; 3497 } 3498 3499 /* Trigger an invalidation of the L1 instruction caches */ 3500 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3501 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 3502 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 3503 3504 /* Wait for invalidation complete */ 3505 for (i = 0; i < usec_timeout; i++) { 3506 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3507 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 3508 INVALIDATE_CACHE_COMPLETE)) 3509 break; 3510 udelay(1); 3511 } 3512 3513 if (i >= usec_timeout) { 3514 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3515 return -EINVAL; 3516 } 3517 3518 return 0; 3519 } 3520 3521 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) 3522 { 3523 uint32_t tmp; 3524 struct amdgpu_device *adev = ring->adev; 3525 3526 /* tell RLC which is KIQ queue */ 3527 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3528 tmp &= 0xffffff00; 3529 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 3530 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3531 tmp |= 0x80; 3532 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3533 } 3534 3535 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) 3536 { 3537 /* set graphics engine doorbell range */ 3538 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 3539 (adev->doorbell_index.gfx_ring0 * 2) << 2); 3540 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3541 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 3542 3543 /* set compute engine doorbell range */ 3544 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3545 (adev->doorbell_index.kiq * 2) << 2); 3546 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3547 (adev->doorbell_index.userqueue_end * 2) << 2); 3548 } 3549 3550 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 3551 struct amdgpu_mqd_prop *prop) 3552 { 3553 struct v11_gfx_mqd *mqd = m; 3554 uint64_t hqd_gpu_addr, wb_gpu_addr; 3555 uint32_t tmp; 3556 uint32_t rb_bufsz; 3557 3558 /* set up gfx hqd wptr */ 3559 mqd->cp_gfx_hqd_wptr = 0; 3560 mqd->cp_gfx_hqd_wptr_hi = 0; 3561 3562 /* set the pointer to the MQD */ 3563 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 3564 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3565 3566 /* set up mqd control */ 3567 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 3568 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 3569 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 3570 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 3571 mqd->cp_gfx_mqd_control = tmp; 3572 3573 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 3574 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 3575 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 3576 mqd->cp_gfx_hqd_vmid = 0; 3577 3578 /* set up default queue priority level 3579 * 0x0 = low priority, 0x1 = high priority */ 3580 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 3581 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 3582 mqd->cp_gfx_hqd_queue_priority = tmp; 3583 3584 /* set up time quantum */ 3585 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 3586 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 3587 mqd->cp_gfx_hqd_quantum = tmp; 3588 3589 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 3590 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3591 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 3592 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 3593 3594 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 3595 wb_gpu_addr = prop->rptr_gpu_addr; 3596 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 3597 mqd->cp_gfx_hqd_rptr_addr_hi = 3598 upper_32_bits(wb_gpu_addr) & 0xffff; 3599 3600 /* set up rb_wptr_poll addr */ 3601 wb_gpu_addr = prop->wptr_gpu_addr; 3602 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3603 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3604 3605 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 3606 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 3607 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 3608 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 3609 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 3610 #ifdef __BIG_ENDIAN 3611 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 3612 #endif 3613 mqd->cp_gfx_hqd_cntl = tmp; 3614 3615 /* set up cp_doorbell_control */ 3616 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3617 if (prop->use_doorbell) { 3618 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3619 DOORBELL_OFFSET, prop->doorbell_index); 3620 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3621 DOORBELL_EN, 1); 3622 } else 3623 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3624 DOORBELL_EN, 0); 3625 mqd->cp_rb_doorbell_control = tmp; 3626 3627 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3628 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 3629 3630 /* active the queue */ 3631 mqd->cp_gfx_hqd_active = 1; 3632 3633 return 0; 3634 } 3635 3636 #ifdef BRING_UP_DEBUG 3637 static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring) 3638 { 3639 struct amdgpu_device *adev = ring->adev; 3640 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 3641 3642 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */ 3643 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr); 3644 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi); 3645 3646 /* set GFX_MQD_BASE */ 3647 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr); 3648 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); 3649 3650 /* set GFX_MQD_CONTROL */ 3651 WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control); 3652 3653 /* set GFX_HQD_VMID to 0 */ 3654 WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid); 3655 3656 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY, 3657 mqd->cp_gfx_hqd_queue_priority); 3658 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum); 3659 3660 /* set GFX_HQD_BASE, similar as CP_RB_BASE */ 3661 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base); 3662 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi); 3663 3664 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */ 3665 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr); 3666 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi); 3667 3668 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */ 3669 WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl); 3670 3671 /* set RB_WPTR_POLL_ADDR */ 3672 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo); 3673 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi); 3674 3675 /* set RB_DOORBELL_CONTROL */ 3676 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control); 3677 3678 /* active the queue */ 3679 WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active); 3680 3681 return 0; 3682 } 3683 #endif 3684 3685 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) 3686 { 3687 struct amdgpu_device *adev = ring->adev; 3688 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 3689 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 3690 3691 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 3692 memset((void *)mqd, 0, sizeof(*mqd)); 3693 mutex_lock(&adev->srbm_mutex); 3694 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3695 amdgpu_ring_init_mqd(ring); 3696 #ifdef BRING_UP_DEBUG 3697 gfx_v11_0_gfx_queue_init_register(ring); 3698 #endif 3699 soc21_grbm_select(adev, 0, 0, 0, 0); 3700 mutex_unlock(&adev->srbm_mutex); 3701 if (adev->gfx.me.mqd_backup[mqd_idx]) 3702 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3703 } else if (amdgpu_in_reset(adev)) { 3704 /* reset mqd with the backup copy */ 3705 if (adev->gfx.me.mqd_backup[mqd_idx]) 3706 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 3707 /* reset the ring */ 3708 ring->wptr = 0; 3709 *ring->wptr_cpu_addr = 0; 3710 amdgpu_ring_clear_ring(ring); 3711 #ifdef BRING_UP_DEBUG 3712 mutex_lock(&adev->srbm_mutex); 3713 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3714 gfx_v11_0_gfx_queue_init_register(ring); 3715 soc21_grbm_select(adev, 0, 0, 0, 0); 3716 mutex_unlock(&adev->srbm_mutex); 3717 #endif 3718 } else { 3719 amdgpu_ring_clear_ring(ring); 3720 } 3721 3722 return 0; 3723 } 3724 3725 #ifndef BRING_UP_DEBUG 3726 static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) 3727 { 3728 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 3729 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 3730 int r, i; 3731 3732 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) 3733 return -EINVAL; 3734 3735 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 3736 adev->gfx.num_gfx_rings); 3737 if (r) { 3738 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 3739 return r; 3740 } 3741 3742 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 3743 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); 3744 3745 return amdgpu_ring_test_helper(kiq_ring); 3746 } 3747 #endif 3748 3749 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 3750 { 3751 int r, i; 3752 struct amdgpu_ring *ring; 3753 3754 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3755 ring = &adev->gfx.gfx_ring[i]; 3756 3757 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3758 if (unlikely(r != 0)) 3759 goto done; 3760 3761 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3762 if (!r) { 3763 r = gfx_v11_0_gfx_init_queue(ring); 3764 amdgpu_bo_kunmap(ring->mqd_obj); 3765 ring->mqd_ptr = NULL; 3766 } 3767 amdgpu_bo_unreserve(ring->mqd_obj); 3768 if (r) 3769 goto done; 3770 } 3771 #ifndef BRING_UP_DEBUG 3772 r = gfx_v11_0_kiq_enable_kgq(adev); 3773 if (r) 3774 goto done; 3775 #endif 3776 r = gfx_v11_0_cp_gfx_start(adev); 3777 if (r) 3778 goto done; 3779 3780 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3781 ring = &adev->gfx.gfx_ring[i]; 3782 ring->sched.ready = true; 3783 } 3784 done: 3785 return r; 3786 } 3787 3788 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 3789 struct amdgpu_mqd_prop *prop) 3790 { 3791 struct v11_compute_mqd *mqd = m; 3792 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 3793 uint32_t tmp; 3794 3795 mqd->header = 0xC0310800; 3796 mqd->compute_pipelinestat_enable = 0x00000001; 3797 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 3798 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 3799 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 3800 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 3801 mqd->compute_misc_reserved = 0x00000007; 3802 3803 eop_base_addr = prop->eop_gpu_addr >> 8; 3804 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 3805 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 3806 3807 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3808 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 3809 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 3810 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); 3811 3812 mqd->cp_hqd_eop_control = tmp; 3813 3814 /* enable doorbell? */ 3815 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3816 3817 if (prop->use_doorbell) { 3818 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3819 DOORBELL_OFFSET, prop->doorbell_index); 3820 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3821 DOORBELL_EN, 1); 3822 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3823 DOORBELL_SOURCE, 0); 3824 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3825 DOORBELL_HIT, 0); 3826 } else { 3827 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3828 DOORBELL_EN, 0); 3829 } 3830 3831 mqd->cp_hqd_pq_doorbell_control = tmp; 3832 3833 /* disable the queue if it's active */ 3834 mqd->cp_hqd_dequeue_request = 0; 3835 mqd->cp_hqd_pq_rptr = 0; 3836 mqd->cp_hqd_pq_wptr_lo = 0; 3837 mqd->cp_hqd_pq_wptr_hi = 0; 3838 3839 /* set the pointer to the MQD */ 3840 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 3841 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3842 3843 /* set MQD vmid to 0 */ 3844 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 3845 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 3846 mqd->cp_mqd_control = tmp; 3847 3848 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3849 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3850 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 3851 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 3852 3853 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3854 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 3855 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 3856 (order_base_2(prop->queue_size / 4) - 1)); 3857 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 3858 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 3859 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 3860 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 3861 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 3862 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 3863 mqd->cp_hqd_pq_control = tmp; 3864 3865 /* set the wb address whether it's enabled or not */ 3866 wb_gpu_addr = prop->rptr_gpu_addr; 3867 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 3868 mqd->cp_hqd_pq_rptr_report_addr_hi = 3869 upper_32_bits(wb_gpu_addr) & 0xffff; 3870 3871 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3872 wb_gpu_addr = prop->wptr_gpu_addr; 3873 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3874 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3875 3876 tmp = 0; 3877 /* enable the doorbell if requested */ 3878 if (prop->use_doorbell) { 3879 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 3880 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3881 DOORBELL_OFFSET, prop->doorbell_index); 3882 3883 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3884 DOORBELL_EN, 1); 3885 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3886 DOORBELL_SOURCE, 0); 3887 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3888 DOORBELL_HIT, 0); 3889 } 3890 3891 mqd->cp_hqd_pq_doorbell_control = tmp; 3892 3893 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3894 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 3895 3896 /* set the vmid for the queue */ 3897 mqd->cp_hqd_vmid = 0; 3898 3899 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 3900 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 3901 mqd->cp_hqd_persistent_state = tmp; 3902 3903 /* set MIN_IB_AVAIL_SIZE */ 3904 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 3905 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 3906 mqd->cp_hqd_ib_control = tmp; 3907 3908 /* set static priority for a compute queue/ring */ 3909 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 3910 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 3911 3912 mqd->cp_hqd_active = prop->hqd_active; 3913 3914 return 0; 3915 } 3916 3917 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) 3918 { 3919 struct amdgpu_device *adev = ring->adev; 3920 struct v11_compute_mqd *mqd = ring->mqd_ptr; 3921 int j; 3922 3923 /* inactivate the queue */ 3924 if (amdgpu_sriov_vf(adev)) 3925 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 3926 3927 /* disable wptr polling */ 3928 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3929 3930 /* write the EOP addr */ 3931 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 3932 mqd->cp_hqd_eop_base_addr_lo); 3933 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 3934 mqd->cp_hqd_eop_base_addr_hi); 3935 3936 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3937 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 3938 mqd->cp_hqd_eop_control); 3939 3940 /* enable doorbell? */ 3941 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 3942 mqd->cp_hqd_pq_doorbell_control); 3943 3944 /* disable the queue if it's active */ 3945 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 3946 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 3947 for (j = 0; j < adev->usec_timeout; j++) { 3948 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 3949 break; 3950 udelay(1); 3951 } 3952 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 3953 mqd->cp_hqd_dequeue_request); 3954 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 3955 mqd->cp_hqd_pq_rptr); 3956 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 3957 mqd->cp_hqd_pq_wptr_lo); 3958 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 3959 mqd->cp_hqd_pq_wptr_hi); 3960 } 3961 3962 /* set the pointer to the MQD */ 3963 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 3964 mqd->cp_mqd_base_addr_lo); 3965 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 3966 mqd->cp_mqd_base_addr_hi); 3967 3968 /* set MQD vmid to 0 */ 3969 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 3970 mqd->cp_mqd_control); 3971 3972 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3973 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 3974 mqd->cp_hqd_pq_base_lo); 3975 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 3976 mqd->cp_hqd_pq_base_hi); 3977 3978 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3979 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 3980 mqd->cp_hqd_pq_control); 3981 3982 /* set the wb address whether it's enabled or not */ 3983 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 3984 mqd->cp_hqd_pq_rptr_report_addr_lo); 3985 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 3986 mqd->cp_hqd_pq_rptr_report_addr_hi); 3987 3988 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3989 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 3990 mqd->cp_hqd_pq_wptr_poll_addr_lo); 3991 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 3992 mqd->cp_hqd_pq_wptr_poll_addr_hi); 3993 3994 /* enable the doorbell if requested */ 3995 if (ring->use_doorbell) { 3996 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3997 (adev->doorbell_index.kiq * 2) << 2); 3998 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3999 (adev->doorbell_index.userqueue_end * 2) << 2); 4000 } 4001 4002 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4003 mqd->cp_hqd_pq_doorbell_control); 4004 4005 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4006 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4007 mqd->cp_hqd_pq_wptr_lo); 4008 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4009 mqd->cp_hqd_pq_wptr_hi); 4010 4011 /* set the vmid for the queue */ 4012 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 4013 4014 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 4015 mqd->cp_hqd_persistent_state); 4016 4017 /* activate the queue */ 4018 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 4019 mqd->cp_hqd_active); 4020 4021 if (ring->use_doorbell) 4022 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4023 4024 return 0; 4025 } 4026 4027 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) 4028 { 4029 struct amdgpu_device *adev = ring->adev; 4030 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4031 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 4032 4033 gfx_v11_0_kiq_setting(ring); 4034 4035 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4036 /* reset MQD to a clean status */ 4037 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4038 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4039 4040 /* reset ring buffer */ 4041 ring->wptr = 0; 4042 amdgpu_ring_clear_ring(ring); 4043 4044 mutex_lock(&adev->srbm_mutex); 4045 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4046 gfx_v11_0_kiq_init_register(ring); 4047 soc21_grbm_select(adev, 0, 0, 0, 0); 4048 mutex_unlock(&adev->srbm_mutex); 4049 } else { 4050 memset((void *)mqd, 0, sizeof(*mqd)); 4051 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 4052 amdgpu_ring_clear_ring(ring); 4053 mutex_lock(&adev->srbm_mutex); 4054 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4055 amdgpu_ring_init_mqd(ring); 4056 gfx_v11_0_kiq_init_register(ring); 4057 soc21_grbm_select(adev, 0, 0, 0, 0); 4058 mutex_unlock(&adev->srbm_mutex); 4059 4060 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4061 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4062 } 4063 4064 return 0; 4065 } 4066 4067 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) 4068 { 4069 struct amdgpu_device *adev = ring->adev; 4070 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4071 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4072 4073 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 4074 memset((void *)mqd, 0, sizeof(*mqd)); 4075 mutex_lock(&adev->srbm_mutex); 4076 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4077 amdgpu_ring_init_mqd(ring); 4078 soc21_grbm_select(adev, 0, 0, 0, 0); 4079 mutex_unlock(&adev->srbm_mutex); 4080 4081 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4082 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4083 } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4084 /* reset MQD to a clean status */ 4085 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4086 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4087 4088 /* reset ring buffer */ 4089 ring->wptr = 0; 4090 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 4091 amdgpu_ring_clear_ring(ring); 4092 } else { 4093 amdgpu_ring_clear_ring(ring); 4094 } 4095 4096 return 0; 4097 } 4098 4099 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4100 { 4101 struct amdgpu_ring *ring; 4102 int r; 4103 4104 ring = &adev->gfx.kiq.ring; 4105 4106 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4107 if (unlikely(r != 0)) 4108 return r; 4109 4110 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4111 if (unlikely(r != 0)) { 4112 amdgpu_bo_unreserve(ring->mqd_obj); 4113 return r; 4114 } 4115 4116 gfx_v11_0_kiq_init_queue(ring); 4117 amdgpu_bo_kunmap(ring->mqd_obj); 4118 ring->mqd_ptr = NULL; 4119 amdgpu_bo_unreserve(ring->mqd_obj); 4120 ring->sched.ready = true; 4121 return 0; 4122 } 4123 4124 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4125 { 4126 struct amdgpu_ring *ring = NULL; 4127 int r = 0, i; 4128 4129 if (!amdgpu_async_gfx_ring) 4130 gfx_v11_0_cp_compute_enable(adev, true); 4131 4132 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4133 ring = &adev->gfx.compute_ring[i]; 4134 4135 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4136 if (unlikely(r != 0)) 4137 goto done; 4138 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4139 if (!r) { 4140 r = gfx_v11_0_kcq_init_queue(ring); 4141 amdgpu_bo_kunmap(ring->mqd_obj); 4142 ring->mqd_ptr = NULL; 4143 } 4144 amdgpu_bo_unreserve(ring->mqd_obj); 4145 if (r) 4146 goto done; 4147 } 4148 4149 r = amdgpu_gfx_enable_kcq(adev); 4150 done: 4151 return r; 4152 } 4153 4154 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) 4155 { 4156 int r, i; 4157 struct amdgpu_ring *ring; 4158 4159 if (!(adev->flags & AMD_IS_APU)) 4160 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4161 4162 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4163 /* legacy firmware loading */ 4164 r = gfx_v11_0_cp_gfx_load_microcode(adev); 4165 if (r) 4166 return r; 4167 4168 if (adev->gfx.rs64_enable) 4169 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); 4170 else 4171 r = gfx_v11_0_cp_compute_load_microcode(adev); 4172 if (r) 4173 return r; 4174 } 4175 4176 gfx_v11_0_cp_set_doorbell_range(adev); 4177 4178 if (amdgpu_async_gfx_ring) { 4179 gfx_v11_0_cp_compute_enable(adev, true); 4180 gfx_v11_0_cp_gfx_enable(adev, true); 4181 } 4182 4183 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 4184 r = amdgpu_mes_kiq_hw_init(adev); 4185 else 4186 r = gfx_v11_0_kiq_resume(adev); 4187 if (r) 4188 return r; 4189 4190 r = gfx_v11_0_kcq_resume(adev); 4191 if (r) 4192 return r; 4193 4194 if (!amdgpu_async_gfx_ring) { 4195 r = gfx_v11_0_cp_gfx_resume(adev); 4196 if (r) 4197 return r; 4198 } else { 4199 r = gfx_v11_0_cp_async_gfx_ring_resume(adev); 4200 if (r) 4201 return r; 4202 } 4203 4204 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4205 ring = &adev->gfx.gfx_ring[i]; 4206 r = amdgpu_ring_test_helper(ring); 4207 if (r) 4208 return r; 4209 } 4210 4211 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4212 ring = &adev->gfx.compute_ring[i]; 4213 r = amdgpu_ring_test_helper(ring); 4214 if (r) 4215 return r; 4216 } 4217 4218 return 0; 4219 } 4220 4221 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) 4222 { 4223 gfx_v11_0_cp_gfx_enable(adev, enable); 4224 gfx_v11_0_cp_compute_enable(adev, enable); 4225 } 4226 4227 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) 4228 { 4229 int r; 4230 bool value; 4231 4232 r = adev->gfxhub.funcs->gart_enable(adev); 4233 if (r) 4234 return r; 4235 4236 adev->hdp.funcs->flush_hdp(adev, NULL); 4237 4238 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 4239 false : true; 4240 4241 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 4242 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); 4243 4244 return 0; 4245 } 4246 4247 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) 4248 { 4249 u32 tmp; 4250 4251 /* select RS64 */ 4252 if (adev->gfx.rs64_enable) { 4253 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); 4254 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); 4255 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); 4256 4257 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); 4258 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); 4259 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); 4260 } 4261 4262 if (amdgpu_emu_mode == 1) 4263 msleep(100); 4264 } 4265 4266 static int get_gb_addr_config(struct amdgpu_device * adev) 4267 { 4268 u32 gb_addr_config; 4269 4270 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 4271 if (gb_addr_config == 0) 4272 return -EINVAL; 4273 4274 adev->gfx.config.gb_addr_config_fields.num_pkrs = 4275 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 4276 4277 adev->gfx.config.gb_addr_config = gb_addr_config; 4278 4279 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 4280 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4281 GB_ADDR_CONFIG, NUM_PIPES); 4282 4283 adev->gfx.config.max_tile_pipes = 4284 adev->gfx.config.gb_addr_config_fields.num_pipes; 4285 4286 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 4287 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4288 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 4289 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 4290 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4291 GB_ADDR_CONFIG, NUM_RB_PER_SE); 4292 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 4293 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4294 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 4295 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 4296 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4297 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 4298 4299 return 0; 4300 } 4301 4302 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) 4303 { 4304 uint32_t data; 4305 4306 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 4307 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 4308 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 4309 4310 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 4311 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 4312 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 4313 } 4314 4315 static int gfx_v11_0_hw_init(void *handle) 4316 { 4317 int r; 4318 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4319 4320 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 4321 if (adev->gfx.imu.funcs) { 4322 /* RLC autoload sequence 1: Program rlc ram */ 4323 if (adev->gfx.imu.funcs->program_rlc_ram) 4324 adev->gfx.imu.funcs->program_rlc_ram(adev); 4325 } 4326 /* rlc autoload firmware */ 4327 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); 4328 if (r) 4329 return r; 4330 } else { 4331 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4332 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 4333 if (adev->gfx.imu.funcs->load_microcode) 4334 adev->gfx.imu.funcs->load_microcode(adev); 4335 if (adev->gfx.imu.funcs->setup_imu) 4336 adev->gfx.imu.funcs->setup_imu(adev); 4337 if (adev->gfx.imu.funcs->start_imu) 4338 adev->gfx.imu.funcs->start_imu(adev); 4339 } 4340 4341 /* disable gpa mode in backdoor loading */ 4342 gfx_v11_0_disable_gpa_mode(adev); 4343 } 4344 } 4345 4346 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 4347 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 4348 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); 4349 if (r) { 4350 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 4351 return r; 4352 } 4353 } 4354 4355 adev->gfx.is_poweron = true; 4356 4357 if(get_gb_addr_config(adev)) 4358 DRM_WARN("Invalid gb_addr_config !\n"); 4359 4360 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4361 adev->gfx.rs64_enable) 4362 gfx_v11_0_config_gfx_rs64(adev); 4363 4364 r = gfx_v11_0_gfxhub_enable(adev); 4365 if (r) 4366 return r; 4367 4368 if (!amdgpu_emu_mode) 4369 gfx_v11_0_init_golden_registers(adev); 4370 4371 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 4372 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 4373 /** 4374 * For gfx 11, rlc firmware loading relies on smu firmware is 4375 * loaded firstly, so in direct type, it has to load smc ucode 4376 * here before rlc. 4377 */ 4378 if (!(adev->flags & AMD_IS_APU)) { 4379 r = amdgpu_pm_load_smu_firmware(adev, NULL); 4380 if (r) 4381 return r; 4382 } 4383 } 4384 4385 gfx_v11_0_constants_init(adev); 4386 4387 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 4388 gfx_v11_0_select_cp_fw_arch(adev); 4389 4390 if (adev->nbio.funcs->gc_doorbell_init) 4391 adev->nbio.funcs->gc_doorbell_init(adev); 4392 4393 r = gfx_v11_0_rlc_resume(adev); 4394 if (r) 4395 return r; 4396 4397 /* 4398 * init golden registers and rlc resume may override some registers, 4399 * reconfig them here 4400 */ 4401 gfx_v11_0_tcp_harvest(adev); 4402 4403 r = gfx_v11_0_cp_resume(adev); 4404 if (r) 4405 return r; 4406 4407 return r; 4408 } 4409 4410 #ifndef BRING_UP_DEBUG 4411 static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) 4412 { 4413 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 4414 struct amdgpu_ring *kiq_ring = &kiq->ring; 4415 int i, r = 0; 4416 4417 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 4418 return -EINVAL; 4419 4420 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 4421 adev->gfx.num_gfx_rings)) 4422 return -ENOMEM; 4423 4424 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4425 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], 4426 PREEMPT_QUEUES, 0, 0); 4427 4428 if (adev->gfx.kiq.ring.sched.ready) 4429 r = amdgpu_ring_test_helper(kiq_ring); 4430 4431 return r; 4432 } 4433 #endif 4434 4435 static int gfx_v11_0_hw_fini(void *handle) 4436 { 4437 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4438 int r; 4439 4440 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); 4441 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4442 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4443 4444 if (!adev->no_hw_access) { 4445 #ifndef BRING_UP_DEBUG 4446 if (amdgpu_async_gfx_ring) { 4447 r = gfx_v11_0_kiq_disable_kgq(adev); 4448 if (r) 4449 DRM_ERROR("KGQ disable failed\n"); 4450 } 4451 #endif 4452 if (amdgpu_gfx_disable_kcq(adev)) 4453 DRM_ERROR("KCQ disable failed\n"); 4454 4455 amdgpu_mes_kiq_hw_fini(adev); 4456 } 4457 4458 if (amdgpu_sriov_vf(adev)) 4459 /* Remove the steps disabling CPG and clearing KIQ position, 4460 * so that CP could perform IDLE-SAVE during switch. Those 4461 * steps are necessary to avoid a DMAR error in gfx9 but it is 4462 * not reproduced on gfx11. 4463 */ 4464 return 0; 4465 4466 gfx_v11_0_cp_enable(adev, false); 4467 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4468 4469 adev->gfxhub.funcs->gart_disable(adev); 4470 4471 adev->gfx.is_poweron = false; 4472 4473 return 0; 4474 } 4475 4476 static int gfx_v11_0_suspend(void *handle) 4477 { 4478 return gfx_v11_0_hw_fini(handle); 4479 } 4480 4481 static int gfx_v11_0_resume(void *handle) 4482 { 4483 return gfx_v11_0_hw_init(handle); 4484 } 4485 4486 static bool gfx_v11_0_is_idle(void *handle) 4487 { 4488 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4489 4490 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4491 GRBM_STATUS, GUI_ACTIVE)) 4492 return false; 4493 else 4494 return true; 4495 } 4496 4497 static int gfx_v11_0_wait_for_idle(void *handle) 4498 { 4499 unsigned i; 4500 u32 tmp; 4501 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4502 4503 for (i = 0; i < adev->usec_timeout; i++) { 4504 /* read MC_STATUS */ 4505 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 4506 GRBM_STATUS__GUI_ACTIVE_MASK; 4507 4508 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 4509 return 0; 4510 udelay(1); 4511 } 4512 return -ETIMEDOUT; 4513 } 4514 4515 static int gfx_v11_0_soft_reset(void *handle) 4516 { 4517 u32 grbm_soft_reset = 0; 4518 u32 tmp; 4519 int i, j, k; 4520 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4521 4522 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4523 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); 4524 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); 4525 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0); 4526 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); 4527 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4528 4529 gfx_v11_0_set_safe_mode(adev); 4530 4531 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4532 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4533 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4534 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4535 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4536 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4537 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4538 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4539 4540 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 4541 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 4542 } 4543 } 4544 } 4545 for (i = 0; i < adev->gfx.me.num_me; ++i) { 4546 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 4547 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 4548 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4549 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4550 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4551 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4552 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4553 4554 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 4555 } 4556 } 4557 } 4558 4559 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); 4560 4561 // Read CP_VMID_RESET register three times. 4562 // to get sufficient time for GFX_HQD_ACTIVE reach 0 4563 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4564 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4565 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4566 4567 for (i = 0; i < adev->usec_timeout; i++) { 4568 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && 4569 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) 4570 break; 4571 udelay(1); 4572 } 4573 if (i >= adev->usec_timeout) { 4574 printk("Failed to wait all pipes clean\n"); 4575 return -EINVAL; 4576 } 4577 4578 /********** trigger soft reset ***********/ 4579 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4580 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4581 SOFT_RESET_CP, 1); 4582 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4583 SOFT_RESET_GFX, 1); 4584 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4585 SOFT_RESET_CPF, 1); 4586 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4587 SOFT_RESET_CPC, 1); 4588 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4589 SOFT_RESET_CPG, 1); 4590 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4591 /********** exit soft reset ***********/ 4592 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4593 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4594 SOFT_RESET_CP, 0); 4595 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4596 SOFT_RESET_GFX, 0); 4597 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4598 SOFT_RESET_CPF, 0); 4599 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4600 SOFT_RESET_CPC, 0); 4601 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4602 SOFT_RESET_CPG, 0); 4603 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4604 4605 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL); 4606 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1); 4607 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp); 4608 4609 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0); 4610 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0); 4611 4612 for (i = 0; i < adev->usec_timeout; i++) { 4613 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET)) 4614 break; 4615 udelay(1); 4616 } 4617 if (i >= adev->usec_timeout) { 4618 printk("Failed to wait CP_VMID_RESET to 0\n"); 4619 return -EINVAL; 4620 } 4621 4622 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4623 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 4624 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 4625 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 4626 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 4627 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4628 4629 gfx_v11_0_unset_safe_mode(adev); 4630 4631 return gfx_v11_0_cp_resume(adev); 4632 } 4633 4634 static bool gfx_v11_0_check_soft_reset(void *handle) 4635 { 4636 int i, r; 4637 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4638 struct amdgpu_ring *ring; 4639 long tmo = msecs_to_jiffies(1000); 4640 4641 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4642 ring = &adev->gfx.gfx_ring[i]; 4643 r = amdgpu_ring_test_ib(ring, tmo); 4644 if (r) 4645 return true; 4646 } 4647 4648 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4649 ring = &adev->gfx.compute_ring[i]; 4650 r = amdgpu_ring_test_ib(ring, tmo); 4651 if (r) 4652 return true; 4653 } 4654 4655 return false; 4656 } 4657 4658 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) 4659 { 4660 uint64_t clock; 4661 4662 amdgpu_gfx_off_ctrl(adev, false); 4663 mutex_lock(&adev->gfx.gpu_clock_mutex); 4664 clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) | 4665 ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL); 4666 mutex_unlock(&adev->gfx.gpu_clock_mutex); 4667 amdgpu_gfx_off_ctrl(adev, true); 4668 return clock; 4669 } 4670 4671 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 4672 uint32_t vmid, 4673 uint32_t gds_base, uint32_t gds_size, 4674 uint32_t gws_base, uint32_t gws_size, 4675 uint32_t oa_base, uint32_t oa_size) 4676 { 4677 struct amdgpu_device *adev = ring->adev; 4678 4679 /* GDS Base */ 4680 gfx_v11_0_write_data_to_reg(ring, 0, false, 4681 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, 4682 gds_base); 4683 4684 /* GDS Size */ 4685 gfx_v11_0_write_data_to_reg(ring, 0, false, 4686 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, 4687 gds_size); 4688 4689 /* GWS */ 4690 gfx_v11_0_write_data_to_reg(ring, 0, false, 4691 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, 4692 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 4693 4694 /* OA */ 4695 gfx_v11_0_write_data_to_reg(ring, 0, false, 4696 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, 4697 (1 << (oa_size + oa_base)) - (1 << oa_base)); 4698 } 4699 4700 static int gfx_v11_0_early_init(void *handle) 4701 { 4702 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4703 4704 adev->gfx.funcs = &gfx_v11_0_gfx_funcs; 4705 4706 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; 4707 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 4708 AMDGPU_MAX_COMPUTE_RINGS); 4709 4710 gfx_v11_0_set_kiq_pm4_funcs(adev); 4711 gfx_v11_0_set_ring_funcs(adev); 4712 gfx_v11_0_set_irq_funcs(adev); 4713 gfx_v11_0_set_gds_init(adev); 4714 gfx_v11_0_set_rlc_funcs(adev); 4715 gfx_v11_0_set_mqd_funcs(adev); 4716 gfx_v11_0_set_imu_funcs(adev); 4717 4718 gfx_v11_0_init_rlcg_reg_access_ctrl(adev); 4719 4720 return gfx_v11_0_init_microcode(adev); 4721 } 4722 4723 static int gfx_v11_0_ras_late_init(void *handle) 4724 { 4725 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4726 struct ras_common_if *gfx_common_if; 4727 int ret; 4728 4729 gfx_common_if = kzalloc(sizeof(struct ras_common_if), GFP_KERNEL); 4730 if (!gfx_common_if) 4731 return -ENOMEM; 4732 4733 gfx_common_if->block = AMDGPU_RAS_BLOCK__GFX; 4734 4735 ret = amdgpu_ras_feature_enable(adev, gfx_common_if, true); 4736 if (ret) 4737 dev_warn(adev->dev, "Failed to enable gfx11 ras feature\n"); 4738 4739 kfree(gfx_common_if); 4740 return 0; 4741 } 4742 4743 static int gfx_v11_0_late_init(void *handle) 4744 { 4745 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4746 int r; 4747 4748 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 4749 if (r) 4750 return r; 4751 4752 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 4753 if (r) 4754 return r; 4755 4756 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) { 4757 r = gfx_v11_0_ras_late_init(handle); 4758 if (r) 4759 return r; 4760 } 4761 4762 return 0; 4763 } 4764 4765 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) 4766 { 4767 uint32_t rlc_cntl; 4768 4769 /* if RLC is not enabled, do nothing */ 4770 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 4771 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 4772 } 4773 4774 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev) 4775 { 4776 uint32_t data; 4777 unsigned i; 4778 4779 data = RLC_SAFE_MODE__CMD_MASK; 4780 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 4781 4782 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 4783 4784 /* wait for RLC_SAFE_MODE */ 4785 for (i = 0; i < adev->usec_timeout; i++) { 4786 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 4787 RLC_SAFE_MODE, CMD)) 4788 break; 4789 udelay(1); 4790 } 4791 } 4792 4793 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev) 4794 { 4795 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 4796 } 4797 4798 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 4799 bool enable) 4800 { 4801 uint32_t def, data; 4802 4803 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 4804 return; 4805 4806 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4807 4808 if (enable) 4809 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 4810 else 4811 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 4812 4813 if (def != data) 4814 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4815 } 4816 4817 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev, 4818 bool enable) 4819 { 4820 uint32_t def, data; 4821 4822 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 4823 return; 4824 4825 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4826 4827 if (enable) 4828 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4829 else 4830 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 4831 4832 if (def != data) 4833 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4834 } 4835 4836 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, 4837 bool enable) 4838 { 4839 uint32_t def, data; 4840 4841 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 4842 return; 4843 4844 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4845 4846 if (enable) 4847 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 4848 else 4849 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 4850 4851 if (def != data) 4852 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4853 } 4854 4855 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 4856 bool enable) 4857 { 4858 uint32_t data, def; 4859 4860 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 4861 return; 4862 4863 /* It is disabled by HW by default */ 4864 if (enable) { 4865 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 4866 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 4867 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4868 4869 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4870 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4871 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4872 4873 if (def != data) 4874 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4875 } 4876 } else { 4877 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 4878 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4879 4880 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 4881 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 4882 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 4883 4884 if (def != data) 4885 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4886 } 4887 } 4888 } 4889 4890 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 4891 bool enable) 4892 { 4893 uint32_t def, data; 4894 4895 if (!(adev->cg_flags & 4896 (AMD_CG_SUPPORT_GFX_CGCG | 4897 AMD_CG_SUPPORT_GFX_CGLS | 4898 AMD_CG_SUPPORT_GFX_3D_CGCG | 4899 AMD_CG_SUPPORT_GFX_3D_CGLS))) 4900 return; 4901 4902 if (enable) { 4903 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 4904 4905 /* unset CGCG override */ 4906 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 4907 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 4908 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 4909 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 4910 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 4911 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 4912 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 4913 4914 /* update CGCG override bits */ 4915 if (def != data) 4916 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 4917 4918 /* enable cgcg FSM(0x0000363F) */ 4919 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 4920 4921 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 4922 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 4923 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 4924 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 4925 } 4926 4927 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 4928 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 4929 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 4930 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 4931 } 4932 4933 if (def != data) 4934 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 4935 4936 /* Program RLC_CGCG_CGLS_CTRL_3D */ 4937 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 4938 4939 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 4940 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 4941 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 4942 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 4943 } 4944 4945 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 4946 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 4947 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 4948 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 4949 } 4950 4951 if (def != data) 4952 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 4953 4954 /* set IDLE_POLL_COUNT(0x00900100) */ 4955 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 4956 4957 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 4958 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 4959 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 4960 4961 if (def != data) 4962 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 4963 4964 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4965 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 4966 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 4967 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 4968 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 4969 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 4970 4971 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 4972 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 4973 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 4974 4975 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 4976 if (adev->sdma.num_instances > 1) { 4977 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 4978 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 4979 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 4980 } 4981 } else { 4982 /* Program RLC_CGCG_CGLS_CTRL */ 4983 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 4984 4985 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 4986 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 4987 4988 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 4989 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 4990 4991 if (def != data) 4992 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 4993 4994 /* Program RLC_CGCG_CGLS_CTRL_3D */ 4995 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 4996 4997 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 4998 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 4999 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5000 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5001 5002 if (def != data) 5003 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5004 5005 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5006 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5007 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5008 5009 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5010 if (adev->sdma.num_instances > 1) { 5011 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5012 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5013 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5014 } 5015 } 5016 } 5017 5018 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5019 bool enable) 5020 { 5021 amdgpu_gfx_rlc_enter_safe_mode(adev); 5022 5023 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); 5024 5025 gfx_v11_0_update_medium_grain_clock_gating(adev, enable); 5026 5027 gfx_v11_0_update_repeater_fgcg(adev, enable); 5028 5029 gfx_v11_0_update_sram_fgcg(adev, enable); 5030 5031 gfx_v11_0_update_perf_clk(adev, enable); 5032 5033 if (adev->cg_flags & 5034 (AMD_CG_SUPPORT_GFX_MGCG | 5035 AMD_CG_SUPPORT_GFX_CGLS | 5036 AMD_CG_SUPPORT_GFX_CGCG | 5037 AMD_CG_SUPPORT_GFX_3D_CGCG | 5038 AMD_CG_SUPPORT_GFX_3D_CGLS)) 5039 gfx_v11_0_enable_gui_idle_interrupt(adev, enable); 5040 5041 amdgpu_gfx_rlc_exit_safe_mode(adev); 5042 5043 return 0; 5044 } 5045 5046 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) 5047 { 5048 u32 reg, data; 5049 5050 amdgpu_gfx_off_ctrl(adev, false); 5051 5052 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 5053 if (amdgpu_sriov_is_pp_one_vf(adev)) 5054 data = RREG32_NO_KIQ(reg); 5055 else 5056 data = RREG32(reg); 5057 5058 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 5059 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 5060 5061 if (amdgpu_sriov_is_pp_one_vf(adev)) 5062 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 5063 else 5064 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 5065 5066 amdgpu_gfx_off_ctrl(adev, true); 5067 } 5068 5069 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { 5070 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, 5071 .set_safe_mode = gfx_v11_0_set_safe_mode, 5072 .unset_safe_mode = gfx_v11_0_unset_safe_mode, 5073 .init = gfx_v11_0_rlc_init, 5074 .get_csb_size = gfx_v11_0_get_csb_size, 5075 .get_csb_buffer = gfx_v11_0_get_csb_buffer, 5076 .resume = gfx_v11_0_rlc_resume, 5077 .stop = gfx_v11_0_rlc_stop, 5078 .reset = gfx_v11_0_rlc_reset, 5079 .start = gfx_v11_0_rlc_start, 5080 .update_spm_vmid = gfx_v11_0_update_spm_vmid, 5081 }; 5082 5083 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) 5084 { 5085 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 5086 5087 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) 5088 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5089 else 5090 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5091 5092 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data); 5093 5094 // Program RLC_PG_DELAY3 for CGPG hysteresis 5095 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 5096 switch (adev->ip_versions[GC_HWIP][0]) { 5097 case IP_VERSION(11, 0, 1): 5098 case IP_VERSION(11, 0, 4): 5099 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); 5100 break; 5101 default: 5102 break; 5103 } 5104 } 5105 } 5106 5107 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable) 5108 { 5109 amdgpu_gfx_rlc_enter_safe_mode(adev); 5110 5111 gfx_v11_cntl_power_gating(adev, enable); 5112 5113 amdgpu_gfx_rlc_exit_safe_mode(adev); 5114 } 5115 5116 static int gfx_v11_0_set_powergating_state(void *handle, 5117 enum amd_powergating_state state) 5118 { 5119 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5120 bool enable = (state == AMD_PG_STATE_GATE); 5121 5122 if (amdgpu_sriov_vf(adev)) 5123 return 0; 5124 5125 switch (adev->ip_versions[GC_HWIP][0]) { 5126 case IP_VERSION(11, 0, 0): 5127 case IP_VERSION(11, 0, 2): 5128 case IP_VERSION(11, 0, 3): 5129 amdgpu_gfx_off_ctrl(adev, enable); 5130 break; 5131 case IP_VERSION(11, 0, 1): 5132 case IP_VERSION(11, 0, 4): 5133 gfx_v11_cntl_pg(adev, enable); 5134 amdgpu_gfx_off_ctrl(adev, enable); 5135 break; 5136 default: 5137 break; 5138 } 5139 5140 return 0; 5141 } 5142 5143 static int gfx_v11_0_set_clockgating_state(void *handle, 5144 enum amd_clockgating_state state) 5145 { 5146 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5147 5148 if (amdgpu_sriov_vf(adev)) 5149 return 0; 5150 5151 switch (adev->ip_versions[GC_HWIP][0]) { 5152 case IP_VERSION(11, 0, 0): 5153 case IP_VERSION(11, 0, 1): 5154 case IP_VERSION(11, 0, 2): 5155 case IP_VERSION(11, 0, 3): 5156 case IP_VERSION(11, 0, 4): 5157 gfx_v11_0_update_gfx_clock_gating(adev, 5158 state == AMD_CG_STATE_GATE); 5159 break; 5160 default: 5161 break; 5162 } 5163 5164 return 0; 5165 } 5166 5167 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags) 5168 { 5169 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5170 int data; 5171 5172 /* AMD_CG_SUPPORT_GFX_MGCG */ 5173 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5174 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 5175 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5176 5177 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 5178 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 5179 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 5180 5181 /* AMD_CG_SUPPORT_GFX_FGCG */ 5182 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 5183 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 5184 5185 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 5186 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 5187 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 5188 5189 /* AMD_CG_SUPPORT_GFX_CGCG */ 5190 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5191 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5192 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5193 5194 /* AMD_CG_SUPPORT_GFX_CGLS */ 5195 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5196 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5197 5198 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5199 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5200 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 5201 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 5202 5203 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 5204 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 5205 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 5206 } 5207 5208 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 5209 { 5210 /* gfx11 is 32bit rptr*/ 5211 return *(uint32_t *)ring->rptr_cpu_addr; 5212 } 5213 5214 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5215 { 5216 struct amdgpu_device *adev = ring->adev; 5217 u64 wptr; 5218 5219 /* XXX check if swapping is necessary on BE */ 5220 if (ring->use_doorbell) { 5221 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5222 } else { 5223 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 5224 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 5225 } 5226 5227 return wptr; 5228 } 5229 5230 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5231 { 5232 struct amdgpu_device *adev = ring->adev; 5233 uint32_t *wptr_saved; 5234 uint32_t *is_queue_unmap; 5235 uint64_t aggregated_db_index; 5236 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; 5237 uint64_t wptr_tmp; 5238 5239 if (ring->is_mes_queue) { 5240 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 5241 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 5242 sizeof(uint32_t)); 5243 aggregated_db_index = 5244 amdgpu_mes_get_aggregated_doorbell_index(adev, 5245 ring->hw_prio); 5246 5247 wptr_tmp = ring->wptr & ring->buf_mask; 5248 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 5249 *wptr_saved = wptr_tmp; 5250 /* assume doorbell always being used by mes mapped queue */ 5251 if (*is_queue_unmap) { 5252 WDOORBELL64(aggregated_db_index, wptr_tmp); 5253 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5254 } else { 5255 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5256 5257 if (*is_queue_unmap) 5258 WDOORBELL64(aggregated_db_index, wptr_tmp); 5259 } 5260 } else { 5261 if (ring->use_doorbell) { 5262 /* XXX check if swapping is necessary on BE */ 5263 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5264 ring->wptr); 5265 WDOORBELL64(ring->doorbell_index, ring->wptr); 5266 } else { 5267 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 5268 lower_32_bits(ring->wptr)); 5269 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 5270 upper_32_bits(ring->wptr)); 5271 } 5272 } 5273 } 5274 5275 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 5276 { 5277 /* gfx11 hardware is 32bit rptr */ 5278 return *(uint32_t *)ring->rptr_cpu_addr; 5279 } 5280 5281 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 5282 { 5283 u64 wptr; 5284 5285 /* XXX check if swapping is necessary on BE */ 5286 if (ring->use_doorbell) 5287 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5288 else 5289 BUG(); 5290 return wptr; 5291 } 5292 5293 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 5294 { 5295 struct amdgpu_device *adev = ring->adev; 5296 uint32_t *wptr_saved; 5297 uint32_t *is_queue_unmap; 5298 uint64_t aggregated_db_index; 5299 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; 5300 uint64_t wptr_tmp; 5301 5302 if (ring->is_mes_queue) { 5303 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 5304 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 5305 sizeof(uint32_t)); 5306 aggregated_db_index = 5307 amdgpu_mes_get_aggregated_doorbell_index(adev, 5308 ring->hw_prio); 5309 5310 wptr_tmp = ring->wptr & ring->buf_mask; 5311 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 5312 *wptr_saved = wptr_tmp; 5313 /* assume doorbell always used by mes mapped queue */ 5314 if (*is_queue_unmap) { 5315 WDOORBELL64(aggregated_db_index, wptr_tmp); 5316 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5317 } else { 5318 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5319 5320 if (*is_queue_unmap) 5321 WDOORBELL64(aggregated_db_index, wptr_tmp); 5322 } 5323 } else { 5324 /* XXX check if swapping is necessary on BE */ 5325 if (ring->use_doorbell) { 5326 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5327 ring->wptr); 5328 WDOORBELL64(ring->doorbell_index, ring->wptr); 5329 } else { 5330 BUG(); /* only DOORBELL method supported on gfx11 now */ 5331 } 5332 } 5333 } 5334 5335 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 5336 { 5337 struct amdgpu_device *adev = ring->adev; 5338 u32 ref_and_mask, reg_mem_engine; 5339 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 5340 5341 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 5342 switch (ring->me) { 5343 case 1: 5344 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 5345 break; 5346 case 2: 5347 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 5348 break; 5349 default: 5350 return; 5351 } 5352 reg_mem_engine = 0; 5353 } else { 5354 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 5355 reg_mem_engine = 1; /* pfp */ 5356 } 5357 5358 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 5359 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 5360 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 5361 ref_and_mask, ref_and_mask, 0x20); 5362 } 5363 5364 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 5365 struct amdgpu_job *job, 5366 struct amdgpu_ib *ib, 5367 uint32_t flags) 5368 { 5369 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5370 u32 header, control = 0; 5371 5372 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 5373 5374 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5375 5376 control |= ib->length_dw | (vmid << 24); 5377 5378 if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5379 control |= INDIRECT_BUFFER_PRE_ENB(1); 5380 5381 if (flags & AMDGPU_IB_PREEMPTED) 5382 control |= INDIRECT_BUFFER_PRE_RESUME(1); 5383 5384 if (vmid) 5385 gfx_v11_0_ring_emit_de_meta(ring, 5386 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); 5387 } 5388 5389 if (ring->is_mes_queue) 5390 /* inherit vmid from mqd */ 5391 control |= 0x400000; 5392 5393 amdgpu_ring_write(ring, header); 5394 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5395 amdgpu_ring_write(ring, 5396 #ifdef __BIG_ENDIAN 5397 (2 << 0) | 5398 #endif 5399 lower_32_bits(ib->gpu_addr)); 5400 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5401 amdgpu_ring_write(ring, control); 5402 } 5403 5404 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 5405 struct amdgpu_job *job, 5406 struct amdgpu_ib *ib, 5407 uint32_t flags) 5408 { 5409 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5410 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 5411 5412 if (ring->is_mes_queue) 5413 /* inherit vmid from mqd */ 5414 control |= 0x40000000; 5415 5416 /* Currently, there is a high possibility to get wave ID mismatch 5417 * between ME and GDS, leading to a hw deadlock, because ME generates 5418 * different wave IDs than the GDS expects. This situation happens 5419 * randomly when at least 5 compute pipes use GDS ordered append. 5420 * The wave IDs generated by ME are also wrong after suspend/resume. 5421 * Those are probably bugs somewhere else in the kernel driver. 5422 * 5423 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 5424 * GDS to 0 for this ring (me/pipe). 5425 */ 5426 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 5427 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 5428 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 5429 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 5430 } 5431 5432 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 5433 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5434 amdgpu_ring_write(ring, 5435 #ifdef __BIG_ENDIAN 5436 (2 << 0) | 5437 #endif 5438 lower_32_bits(ib->gpu_addr)); 5439 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5440 amdgpu_ring_write(ring, control); 5441 } 5442 5443 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 5444 u64 seq, unsigned flags) 5445 { 5446 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5447 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5448 5449 /* RELEASE_MEM - flush caches, send int */ 5450 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5451 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 5452 PACKET3_RELEASE_MEM_GCR_GL2_WB | 5453 PACKET3_RELEASE_MEM_GCR_GL2_INV | 5454 PACKET3_RELEASE_MEM_GCR_GL2_US | 5455 PACKET3_RELEASE_MEM_GCR_GL1_INV | 5456 PACKET3_RELEASE_MEM_GCR_GLV_INV | 5457 PACKET3_RELEASE_MEM_GCR_GLM_INV | 5458 PACKET3_RELEASE_MEM_GCR_GLM_WB | 5459 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 5460 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5461 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 5462 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 5463 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 5464 5465 /* 5466 * the address should be Qword aligned if 64bit write, Dword 5467 * aligned if only send 32bit data low (discard data high) 5468 */ 5469 if (write64bit) 5470 BUG_ON(addr & 0x7); 5471 else 5472 BUG_ON(addr & 0x3); 5473 amdgpu_ring_write(ring, lower_32_bits(addr)); 5474 amdgpu_ring_write(ring, upper_32_bits(addr)); 5475 amdgpu_ring_write(ring, lower_32_bits(seq)); 5476 amdgpu_ring_write(ring, upper_32_bits(seq)); 5477 amdgpu_ring_write(ring, ring->is_mes_queue ? 5478 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 5479 } 5480 5481 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 5482 { 5483 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5484 uint32_t seq = ring->fence_drv.sync_seq; 5485 uint64_t addr = ring->fence_drv.gpu_addr; 5486 5487 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 5488 upper_32_bits(addr), seq, 0xffffffff, 4); 5489 } 5490 5491 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 5492 uint16_t pasid, uint32_t flush_type, 5493 bool all_hub, uint8_t dst_sel) 5494 { 5495 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 5496 amdgpu_ring_write(ring, 5497 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 5498 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 5499 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 5500 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 5501 } 5502 5503 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 5504 unsigned vmid, uint64_t pd_addr) 5505 { 5506 if (ring->is_mes_queue) 5507 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 5508 else 5509 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 5510 5511 /* compute doesn't have PFP */ 5512 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 5513 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5514 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5515 amdgpu_ring_write(ring, 0x0); 5516 } 5517 } 5518 5519 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 5520 u64 seq, unsigned int flags) 5521 { 5522 struct amdgpu_device *adev = ring->adev; 5523 5524 /* we only allocate 32bit for each seq wb address */ 5525 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 5526 5527 /* write fence seq to the "addr" */ 5528 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5529 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5530 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 5531 amdgpu_ring_write(ring, lower_32_bits(addr)); 5532 amdgpu_ring_write(ring, upper_32_bits(addr)); 5533 amdgpu_ring_write(ring, lower_32_bits(seq)); 5534 5535 if (flags & AMDGPU_FENCE_FLAG_INT) { 5536 /* set register to trigger INT */ 5537 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5538 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5539 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 5540 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 5541 amdgpu_ring_write(ring, 0); 5542 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 5543 } 5544 } 5545 5546 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 5547 uint32_t flags) 5548 { 5549 uint32_t dw2 = 0; 5550 5551 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 5552 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 5553 /* set load_global_config & load_global_uconfig */ 5554 dw2 |= 0x8001; 5555 /* set load_cs_sh_regs */ 5556 dw2 |= 0x01000000; 5557 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 5558 dw2 |= 0x10002; 5559 } 5560 5561 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 5562 amdgpu_ring_write(ring, dw2); 5563 amdgpu_ring_write(ring, 0); 5564 } 5565 5566 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 5567 { 5568 unsigned ret; 5569 5570 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 5571 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 5572 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 5573 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 5574 ret = ring->wptr & ring->buf_mask; 5575 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 5576 5577 return ret; 5578 } 5579 5580 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 5581 { 5582 unsigned cur; 5583 BUG_ON(offset > ring->buf_mask); 5584 BUG_ON(ring->ring[offset] != 0x55aa55aa); 5585 5586 cur = (ring->wptr - 1) & ring->buf_mask; 5587 if (likely(cur > offset)) 5588 ring->ring[offset] = cur - offset; 5589 else 5590 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 5591 } 5592 5593 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) 5594 { 5595 int i, r = 0; 5596 struct amdgpu_device *adev = ring->adev; 5597 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 5598 struct amdgpu_ring *kiq_ring = &kiq->ring; 5599 unsigned long flags; 5600 5601 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 5602 return -EINVAL; 5603 5604 spin_lock_irqsave(&kiq->ring_lock, flags); 5605 5606 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 5607 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5608 return -ENOMEM; 5609 } 5610 5611 /* assert preemption condition */ 5612 amdgpu_ring_set_preempt_cond_exec(ring, false); 5613 5614 /* assert IB preemption, emit the trailing fence */ 5615 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 5616 ring->trail_fence_gpu_addr, 5617 ++ring->trail_seq); 5618 amdgpu_ring_commit(kiq_ring); 5619 5620 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5621 5622 /* poll the trailing fence */ 5623 for (i = 0; i < adev->usec_timeout; i++) { 5624 if (ring->trail_seq == 5625 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 5626 break; 5627 udelay(1); 5628 } 5629 5630 if (i >= adev->usec_timeout) { 5631 r = -EINVAL; 5632 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 5633 } 5634 5635 /* deassert preemption condition */ 5636 amdgpu_ring_set_preempt_cond_exec(ring, true); 5637 return r; 5638 } 5639 5640 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 5641 { 5642 struct amdgpu_device *adev = ring->adev; 5643 struct v10_de_ib_state de_payload = {0}; 5644 uint64_t offset, gds_addr, de_payload_gpu_addr; 5645 void *de_payload_cpu_addr; 5646 int cnt; 5647 5648 if (ring->is_mes_queue) { 5649 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5650 gfx[0].gfx_meta_data) + 5651 offsetof(struct v10_gfx_meta_data, de_payload); 5652 de_payload_gpu_addr = 5653 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5654 de_payload_cpu_addr = 5655 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 5656 5657 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5658 gfx[0].gds_backup) + 5659 offsetof(struct v10_gfx_meta_data, de_payload); 5660 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5661 } else { 5662 offset = offsetof(struct v10_gfx_meta_data, de_payload); 5663 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 5664 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 5665 5666 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 5667 AMDGPU_CSA_SIZE - adev->gds.gds_size, 5668 PAGE_SIZE); 5669 } 5670 5671 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 5672 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 5673 5674 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 5675 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 5676 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 5677 WRITE_DATA_DST_SEL(8) | 5678 WR_CONFIRM) | 5679 WRITE_DATA_CACHE_POLICY(0)); 5680 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 5681 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 5682 5683 if (resume) 5684 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 5685 sizeof(de_payload) >> 2); 5686 else 5687 amdgpu_ring_write_multiple(ring, (void *)&de_payload, 5688 sizeof(de_payload) >> 2); 5689 } 5690 5691 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, 5692 bool secure) 5693 { 5694 uint32_t v = secure ? FRAME_TMZ : 0; 5695 5696 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 5697 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 5698 } 5699 5700 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 5701 uint32_t reg_val_offs) 5702 { 5703 struct amdgpu_device *adev = ring->adev; 5704 5705 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 5706 amdgpu_ring_write(ring, 0 | /* src: register*/ 5707 (5 << 8) | /* dst: memory */ 5708 (1 << 20)); /* write confirm */ 5709 amdgpu_ring_write(ring, reg); 5710 amdgpu_ring_write(ring, 0); 5711 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 5712 reg_val_offs * 4)); 5713 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 5714 reg_val_offs * 4)); 5715 } 5716 5717 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 5718 uint32_t val) 5719 { 5720 uint32_t cmd = 0; 5721 5722 switch (ring->funcs->type) { 5723 case AMDGPU_RING_TYPE_GFX: 5724 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 5725 break; 5726 case AMDGPU_RING_TYPE_KIQ: 5727 cmd = (1 << 16); /* no inc addr */ 5728 break; 5729 default: 5730 cmd = WR_CONFIRM; 5731 break; 5732 } 5733 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5734 amdgpu_ring_write(ring, cmd); 5735 amdgpu_ring_write(ring, reg); 5736 amdgpu_ring_write(ring, 0); 5737 amdgpu_ring_write(ring, val); 5738 } 5739 5740 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 5741 uint32_t val, uint32_t mask) 5742 { 5743 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 5744 } 5745 5746 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 5747 uint32_t reg0, uint32_t reg1, 5748 uint32_t ref, uint32_t mask) 5749 { 5750 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5751 5752 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 5753 ref, mask, 0x20); 5754 } 5755 5756 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, 5757 unsigned vmid) 5758 { 5759 struct amdgpu_device *adev = ring->adev; 5760 uint32_t value = 0; 5761 5762 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 5763 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 5764 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 5765 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 5766 WREG32_SOC15(GC, 0, regSQ_CMD, value); 5767 } 5768 5769 static void 5770 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 5771 uint32_t me, uint32_t pipe, 5772 enum amdgpu_interrupt_state state) 5773 { 5774 uint32_t cp_int_cntl, cp_int_cntl_reg; 5775 5776 if (!me) { 5777 switch (pipe) { 5778 case 0: 5779 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 5780 break; 5781 case 1: 5782 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 5783 break; 5784 default: 5785 DRM_DEBUG("invalid pipe %d\n", pipe); 5786 return; 5787 } 5788 } else { 5789 DRM_DEBUG("invalid me %d\n", me); 5790 return; 5791 } 5792 5793 switch (state) { 5794 case AMDGPU_IRQ_STATE_DISABLE: 5795 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 5796 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5797 TIME_STAMP_INT_ENABLE, 0); 5798 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5799 GENERIC0_INT_ENABLE, 0); 5800 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 5801 break; 5802 case AMDGPU_IRQ_STATE_ENABLE: 5803 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 5804 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5805 TIME_STAMP_INT_ENABLE, 1); 5806 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5807 GENERIC0_INT_ENABLE, 1); 5808 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 5809 break; 5810 default: 5811 break; 5812 } 5813 } 5814 5815 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 5816 int me, int pipe, 5817 enum amdgpu_interrupt_state state) 5818 { 5819 u32 mec_int_cntl, mec_int_cntl_reg; 5820 5821 /* 5822 * amdgpu controls only the first MEC. That's why this function only 5823 * handles the setting of interrupts for this specific MEC. All other 5824 * pipes' interrupts are set by amdkfd. 5825 */ 5826 5827 if (me == 1) { 5828 switch (pipe) { 5829 case 0: 5830 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 5831 break; 5832 case 1: 5833 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 5834 break; 5835 case 2: 5836 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 5837 break; 5838 case 3: 5839 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 5840 break; 5841 default: 5842 DRM_DEBUG("invalid pipe %d\n", pipe); 5843 return; 5844 } 5845 } else { 5846 DRM_DEBUG("invalid me %d\n", me); 5847 return; 5848 } 5849 5850 switch (state) { 5851 case AMDGPU_IRQ_STATE_DISABLE: 5852 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 5853 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 5854 TIME_STAMP_INT_ENABLE, 0); 5855 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 5856 GENERIC0_INT_ENABLE, 0); 5857 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 5858 break; 5859 case AMDGPU_IRQ_STATE_ENABLE: 5860 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 5861 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 5862 TIME_STAMP_INT_ENABLE, 1); 5863 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 5864 GENERIC0_INT_ENABLE, 1); 5865 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 5866 break; 5867 default: 5868 break; 5869 } 5870 } 5871 5872 #define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1 5873 #define SET_ECC_ME_PIPE_STATE(reg_addr, state) \ 5874 do { \ 5875 uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \ 5876 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \ 5877 WREG32_SOC15_IP(GC, reg_addr, tmp); \ 5878 } while (0) 5879 5880 static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev, 5881 struct amdgpu_irq_src *source, 5882 unsigned type, 5883 enum amdgpu_interrupt_state state) 5884 { 5885 uint32_t ecc_irq_state = 0; 5886 uint32_t pipe0_int_cntl_addr = 0; 5887 int i = 0; 5888 5889 ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0; 5890 5891 pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 5892 5893 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state); 5894 5895 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) 5896 SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL, 5897 ecc_irq_state); 5898 5899 return 0; 5900 } 5901 5902 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, 5903 struct amdgpu_irq_src *src, 5904 unsigned type, 5905 enum amdgpu_interrupt_state state) 5906 { 5907 switch (type) { 5908 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 5909 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 5910 break; 5911 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 5912 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 5913 break; 5914 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 5915 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 5916 break; 5917 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 5918 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 5919 break; 5920 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 5921 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 5922 break; 5923 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 5924 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 5925 break; 5926 default: 5927 break; 5928 } 5929 return 0; 5930 } 5931 5932 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, 5933 struct amdgpu_irq_src *source, 5934 struct amdgpu_iv_entry *entry) 5935 { 5936 int i; 5937 u8 me_id, pipe_id, queue_id; 5938 struct amdgpu_ring *ring; 5939 uint32_t mes_queue_id = entry->src_data[0]; 5940 5941 DRM_DEBUG("IH: CP EOP\n"); 5942 5943 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 5944 struct amdgpu_mes_queue *queue; 5945 5946 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 5947 5948 spin_lock(&adev->mes.queue_id_lock); 5949 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 5950 if (queue) { 5951 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 5952 amdgpu_fence_process(queue->ring); 5953 } 5954 spin_unlock(&adev->mes.queue_id_lock); 5955 } else { 5956 me_id = (entry->ring_id & 0x0c) >> 2; 5957 pipe_id = (entry->ring_id & 0x03) >> 0; 5958 queue_id = (entry->ring_id & 0x70) >> 4; 5959 5960 switch (me_id) { 5961 case 0: 5962 if (pipe_id == 0) 5963 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 5964 else 5965 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 5966 break; 5967 case 1: 5968 case 2: 5969 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 5970 ring = &adev->gfx.compute_ring[i]; 5971 /* Per-queue interrupt is supported for MEC starting from VI. 5972 * The interrupt can only be enabled/disabled per pipe instead 5973 * of per queue. 5974 */ 5975 if ((ring->me == me_id) && 5976 (ring->pipe == pipe_id) && 5977 (ring->queue == queue_id)) 5978 amdgpu_fence_process(ring); 5979 } 5980 break; 5981 } 5982 } 5983 5984 return 0; 5985 } 5986 5987 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 5988 struct amdgpu_irq_src *source, 5989 unsigned type, 5990 enum amdgpu_interrupt_state state) 5991 { 5992 switch (state) { 5993 case AMDGPU_IRQ_STATE_DISABLE: 5994 case AMDGPU_IRQ_STATE_ENABLE: 5995 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 5996 PRIV_REG_INT_ENABLE, 5997 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 5998 break; 5999 default: 6000 break; 6001 } 6002 6003 return 0; 6004 } 6005 6006 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6007 struct amdgpu_irq_src *source, 6008 unsigned type, 6009 enum amdgpu_interrupt_state state) 6010 { 6011 switch (state) { 6012 case AMDGPU_IRQ_STATE_DISABLE: 6013 case AMDGPU_IRQ_STATE_ENABLE: 6014 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 6015 PRIV_INSTR_INT_ENABLE, 6016 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6017 break; 6018 default: 6019 break; 6020 } 6021 6022 return 0; 6023 } 6024 6025 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, 6026 struct amdgpu_iv_entry *entry) 6027 { 6028 u8 me_id, pipe_id, queue_id; 6029 struct amdgpu_ring *ring; 6030 int i; 6031 6032 me_id = (entry->ring_id & 0x0c) >> 2; 6033 pipe_id = (entry->ring_id & 0x03) >> 0; 6034 queue_id = (entry->ring_id & 0x70) >> 4; 6035 6036 switch (me_id) { 6037 case 0: 6038 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6039 ring = &adev->gfx.gfx_ring[i]; 6040 /* we only enabled 1 gfx queue per pipe for now */ 6041 if (ring->me == me_id && ring->pipe == pipe_id) 6042 drm_sched_fault(&ring->sched); 6043 } 6044 break; 6045 case 1: 6046 case 2: 6047 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6048 ring = &adev->gfx.compute_ring[i]; 6049 if (ring->me == me_id && ring->pipe == pipe_id && 6050 ring->queue == queue_id) 6051 drm_sched_fault(&ring->sched); 6052 } 6053 break; 6054 default: 6055 BUG(); 6056 break; 6057 } 6058 } 6059 6060 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, 6061 struct amdgpu_irq_src *source, 6062 struct amdgpu_iv_entry *entry) 6063 { 6064 DRM_ERROR("Illegal register access in command stream\n"); 6065 gfx_v11_0_handle_priv_fault(adev, entry); 6066 return 0; 6067 } 6068 6069 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, 6070 struct amdgpu_irq_src *source, 6071 struct amdgpu_iv_entry *entry) 6072 { 6073 DRM_ERROR("Illegal instruction in command stream\n"); 6074 gfx_v11_0_handle_priv_fault(adev, entry); 6075 return 0; 6076 } 6077 6078 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev, 6079 struct amdgpu_irq_src *source, 6080 struct amdgpu_iv_entry *entry) 6081 { 6082 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq) 6083 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry); 6084 6085 return 0; 6086 } 6087 6088 #if 0 6089 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 6090 struct amdgpu_irq_src *src, 6091 unsigned int type, 6092 enum amdgpu_interrupt_state state) 6093 { 6094 uint32_t tmp, target; 6095 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 6096 6097 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6098 target += ring->pipe; 6099 6100 switch (type) { 6101 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6102 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6103 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6104 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6105 GENERIC2_INT_ENABLE, 0); 6106 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6107 6108 tmp = RREG32_SOC15_IP(GC, target); 6109 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6110 GENERIC2_INT_ENABLE, 0); 6111 WREG32_SOC15_IP(GC, target, tmp); 6112 } else { 6113 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6114 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6115 GENERIC2_INT_ENABLE, 1); 6116 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6117 6118 tmp = RREG32_SOC15_IP(GC, target); 6119 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6120 GENERIC2_INT_ENABLE, 1); 6121 WREG32_SOC15_IP(GC, target, tmp); 6122 } 6123 break; 6124 default: 6125 BUG(); /* kiq only support GENERIC2_INT now */ 6126 break; 6127 } 6128 return 0; 6129 } 6130 #endif 6131 6132 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) 6133 { 6134 const unsigned int gcr_cntl = 6135 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 6136 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 6137 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 6138 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 6139 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 6140 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 6141 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 6142 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 6143 6144 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 6145 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 6146 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 6147 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6148 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6149 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6150 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6151 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6152 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 6153 } 6154 6155 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { 6156 .name = "gfx_v11_0", 6157 .early_init = gfx_v11_0_early_init, 6158 .late_init = gfx_v11_0_late_init, 6159 .sw_init = gfx_v11_0_sw_init, 6160 .sw_fini = gfx_v11_0_sw_fini, 6161 .hw_init = gfx_v11_0_hw_init, 6162 .hw_fini = gfx_v11_0_hw_fini, 6163 .suspend = gfx_v11_0_suspend, 6164 .resume = gfx_v11_0_resume, 6165 .is_idle = gfx_v11_0_is_idle, 6166 .wait_for_idle = gfx_v11_0_wait_for_idle, 6167 .soft_reset = gfx_v11_0_soft_reset, 6168 .check_soft_reset = gfx_v11_0_check_soft_reset, 6169 .set_clockgating_state = gfx_v11_0_set_clockgating_state, 6170 .set_powergating_state = gfx_v11_0_set_powergating_state, 6171 .get_clockgating_state = gfx_v11_0_get_clockgating_state, 6172 }; 6173 6174 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 6175 .type = AMDGPU_RING_TYPE_GFX, 6176 .align_mask = 0xff, 6177 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6178 .support_64bit_ptrs = true, 6179 .secure_submission_supported = true, 6180 .vmhub = AMDGPU_GFXHUB_0, 6181 .get_rptr = gfx_v11_0_ring_get_rptr_gfx, 6182 .get_wptr = gfx_v11_0_ring_get_wptr_gfx, 6183 .set_wptr = gfx_v11_0_ring_set_wptr_gfx, 6184 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 6185 5 + /* COND_EXEC */ 6186 7 + /* PIPELINE_SYNC */ 6187 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6188 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6189 2 + /* VM_FLUSH */ 6190 8 + /* FENCE for VM_FLUSH */ 6191 20 + /* GDS switch */ 6192 5 + /* COND_EXEC */ 6193 7 + /* HDP_flush */ 6194 4 + /* VGT_flush */ 6195 31 + /* DE_META */ 6196 3 + /* CNTX_CTRL */ 6197 5 + /* HDP_INVL */ 6198 8 + 8 + /* FENCE x2 */ 6199 8, /* gfx_v11_0_emit_mem_sync */ 6200 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ 6201 .emit_ib = gfx_v11_0_ring_emit_ib_gfx, 6202 .emit_fence = gfx_v11_0_ring_emit_fence, 6203 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6204 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6205 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6206 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6207 .test_ring = gfx_v11_0_ring_test_ring, 6208 .test_ib = gfx_v11_0_ring_test_ib, 6209 .insert_nop = amdgpu_ring_insert_nop, 6210 .pad_ib = amdgpu_ring_generic_pad_ib, 6211 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, 6212 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, 6213 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec, 6214 .preempt_ib = gfx_v11_0_ring_preempt_ib, 6215 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, 6216 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6217 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6218 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6219 .soft_recovery = gfx_v11_0_ring_soft_recovery, 6220 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6221 }; 6222 6223 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { 6224 .type = AMDGPU_RING_TYPE_COMPUTE, 6225 .align_mask = 0xff, 6226 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6227 .support_64bit_ptrs = true, 6228 .vmhub = AMDGPU_GFXHUB_0, 6229 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6230 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6231 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6232 .emit_frame_size = 6233 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6234 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6235 5 + /* hdp invalidate */ 6236 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6237 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6238 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6239 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6240 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 6241 8, /* gfx_v11_0_emit_mem_sync */ 6242 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6243 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6244 .emit_fence = gfx_v11_0_ring_emit_fence, 6245 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6246 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6247 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6248 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6249 .test_ring = gfx_v11_0_ring_test_ring, 6250 .test_ib = gfx_v11_0_ring_test_ib, 6251 .insert_nop = amdgpu_ring_insert_nop, 6252 .pad_ib = amdgpu_ring_generic_pad_ib, 6253 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6254 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6255 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6256 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6257 }; 6258 6259 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 6260 .type = AMDGPU_RING_TYPE_KIQ, 6261 .align_mask = 0xff, 6262 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6263 .support_64bit_ptrs = true, 6264 .vmhub = AMDGPU_GFXHUB_0, 6265 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6266 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6267 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6268 .emit_frame_size = 6269 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6270 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6271 5 + /*hdp invalidate */ 6272 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6273 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6274 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6275 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6276 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 6277 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6278 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6279 .emit_fence = gfx_v11_0_ring_emit_fence_kiq, 6280 .test_ring = gfx_v11_0_ring_test_ring, 6281 .test_ib = gfx_v11_0_ring_test_ib, 6282 .insert_nop = amdgpu_ring_insert_nop, 6283 .pad_ib = amdgpu_ring_generic_pad_ib, 6284 .emit_rreg = gfx_v11_0_ring_emit_rreg, 6285 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6286 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6287 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6288 }; 6289 6290 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) 6291 { 6292 int i; 6293 6294 adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq; 6295 6296 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 6297 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; 6298 6299 for (i = 0; i < adev->gfx.num_compute_rings; i++) 6300 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; 6301 } 6302 6303 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { 6304 .set = gfx_v11_0_set_eop_interrupt_state, 6305 .process = gfx_v11_0_eop_irq, 6306 }; 6307 6308 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { 6309 .set = gfx_v11_0_set_priv_reg_fault_state, 6310 .process = gfx_v11_0_priv_reg_irq, 6311 }; 6312 6313 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { 6314 .set = gfx_v11_0_set_priv_inst_fault_state, 6315 .process = gfx_v11_0_priv_inst_irq, 6316 }; 6317 6318 static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = { 6319 .set = gfx_v11_0_set_cp_ecc_error_state, 6320 .process = amdgpu_gfx_cp_ecc_error_irq, 6321 }; 6322 6323 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = { 6324 .process = gfx_v11_0_rlc_gc_fed_irq, 6325 }; 6326 6327 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) 6328 { 6329 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 6330 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; 6331 6332 adev->gfx.priv_reg_irq.num_types = 1; 6333 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; 6334 6335 adev->gfx.priv_inst_irq.num_types = 1; 6336 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; 6337 6338 adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */ 6339 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs; 6340 6341 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */ 6342 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs; 6343 6344 } 6345 6346 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) 6347 { 6348 if (adev->flags & AMD_IS_APU) 6349 adev->gfx.imu.mode = MISSION_MODE; 6350 else 6351 adev->gfx.imu.mode = DEBUG_MODE; 6352 6353 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; 6354 } 6355 6356 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) 6357 { 6358 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; 6359 } 6360 6361 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) 6362 { 6363 unsigned total_cu = adev->gfx.config.max_cu_per_sh * 6364 adev->gfx.config.max_sh_per_se * 6365 adev->gfx.config.max_shader_engines; 6366 6367 adev->gds.gds_size = 0x1000; 6368 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; 6369 adev->gds.gws_size = 64; 6370 adev->gds.oa_size = 16; 6371 } 6372 6373 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) 6374 { 6375 /* set gfx eng mqd */ 6376 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 6377 sizeof(struct v11_gfx_mqd); 6378 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 6379 gfx_v11_0_gfx_mqd_init; 6380 /* set compute eng mqd */ 6381 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 6382 sizeof(struct v11_compute_mqd); 6383 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 6384 gfx_v11_0_compute_mqd_init; 6385 } 6386 6387 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 6388 u32 bitmap) 6389 { 6390 u32 data; 6391 6392 if (!bitmap) 6393 return; 6394 6395 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 6396 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 6397 6398 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 6399 } 6400 6401 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 6402 { 6403 u32 data, wgp_bitmask; 6404 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 6405 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 6406 6407 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 6408 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 6409 6410 wgp_bitmask = 6411 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 6412 6413 return (~data) & wgp_bitmask; 6414 } 6415 6416 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 6417 { 6418 u32 wgp_idx, wgp_active_bitmap; 6419 u32 cu_bitmap_per_wgp, cu_active_bitmap; 6420 6421 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); 6422 cu_active_bitmap = 0; 6423 6424 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 6425 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 6426 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 6427 if (wgp_active_bitmap & (1 << wgp_idx)) 6428 cu_active_bitmap |= cu_bitmap_per_wgp; 6429 } 6430 6431 return cu_active_bitmap; 6432 } 6433 6434 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 6435 struct amdgpu_cu_info *cu_info) 6436 { 6437 int i, j, k, counter, active_cu_number = 0; 6438 u32 mask, bitmap; 6439 unsigned disable_masks[8 * 2]; 6440 6441 if (!adev || !cu_info) 6442 return -EINVAL; 6443 6444 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 6445 6446 mutex_lock(&adev->grbm_idx_mutex); 6447 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 6448 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 6449 mask = 1; 6450 counter = 0; 6451 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); 6452 if (i < 8 && j < 2) 6453 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( 6454 adev, disable_masks[i * 2 + j]); 6455 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); 6456 6457 /** 6458 * GFX11 could support more than 4 SEs, while the bitmap 6459 * in cu_info struct is 4x4 and ioctl interface struct 6460 * drm_amdgpu_info_device should keep stable. 6461 * So we use last two columns of bitmap to store cu mask for 6462 * SEs 4 to 7, the layout of the bitmap is as below: 6463 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 6464 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 6465 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 6466 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 6467 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 6468 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 6469 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 6470 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 6471 */ 6472 cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap; 6473 6474 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 6475 if (bitmap & mask) 6476 counter++; 6477 6478 mask <<= 1; 6479 } 6480 active_cu_number += counter; 6481 } 6482 } 6483 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 6484 mutex_unlock(&adev->grbm_idx_mutex); 6485 6486 cu_info->number = active_cu_number; 6487 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 6488 6489 return 0; 6490 } 6491 6492 const struct amdgpu_ip_block_version gfx_v11_0_ip_block = 6493 { 6494 .type = AMD_IP_BLOCK_TYPE_GFX, 6495 .major = 11, 6496 .minor = 0, 6497 .rev = 0, 6498 .funcs = &gfx_v11_0_ip_funcs, 6499 }; 6500