1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 25 #include "amdgpu.h" 26 #include "amdgpu_gfx.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "soc15_common.h" 30 #include "vega10_enum.h" 31 32 #include "v9_structs.h" 33 34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" 35 36 #include "gc/gc_9_4_3_offset.h" 37 #include "gc/gc_9_4_3_sh_mask.h" 38 39 #include "gfx_v9_4_3.h" 40 #include "gfx_v9_4_3_cleaner_shader.h" 41 #include "amdgpu_xcp.h" 42 #include "amdgpu_aca.h" 43 44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); 45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin"); 46 MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin"); 47 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); 48 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin"); 49 MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin"); 50 MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin"); 51 MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin"); 52 53 #define GFX9_MEC_HPD_SIZE 4096 54 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 55 56 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 57 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 58 59 #define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */ 60 #define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */ 61 #define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */ 62 #define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */ 63 64 #define NORMALIZE_XCC_REG_OFFSET(offset) \ 65 (offset & 0xFFFF) 66 67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = { 68 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 69 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 71 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 72 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 73 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 74 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 75 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 78 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 79 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 80 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 81 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 82 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), 83 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), 84 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), 85 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 86 SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS), 87 SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS), 88 SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS), 89 SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS), 90 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 91 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL), 92 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS), 93 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 94 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 95 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), 96 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR), 97 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 98 SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT), 99 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND), 100 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE), 101 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1), 102 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2), 103 SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE), 104 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE), 105 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE), 106 SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT), 107 SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6), 108 /* SE status registers */ 109 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 110 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 111 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 112 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) 113 }; 114 115 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = { 116 /* compute queue registers */ 117 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 118 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE), 119 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 120 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 121 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 122 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 123 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 124 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 125 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 126 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 127 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 128 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 129 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 130 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 131 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 132 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 157 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 158 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 159 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 160 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 161 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 162 }; 163 164 struct amdgpu_gfx_ras gfx_v9_4_3_ras; 165 166 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); 167 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); 168 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); 169 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev); 170 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, 171 struct amdgpu_cu_info *cu_info); 172 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 173 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 174 175 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, 176 uint64_t queue_mask) 177 { 178 struct amdgpu_device *adev = kiq_ring->adev; 179 u64 shader_mc_addr; 180 181 /* Cleaner shader MC address */ 182 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; 183 184 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 185 amdgpu_ring_write(kiq_ring, 186 PACKET3_SET_RESOURCES_VMID_MASK(0) | 187 /* vmid_mask:0* queue_type:0 (KIQ) */ 188 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); 189 amdgpu_ring_write(kiq_ring, 190 lower_32_bits(queue_mask)); /* queue mask lo */ 191 amdgpu_ring_write(kiq_ring, 192 upper_32_bits(queue_mask)); /* queue mask hi */ 193 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ 194 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ 195 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 196 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 197 } 198 199 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring, 200 struct amdgpu_ring *ring) 201 { 202 struct amdgpu_device *adev = kiq_ring->adev; 203 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 204 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 205 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 206 207 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 208 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 209 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 210 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 211 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 212 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 213 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 214 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 215 /*queue_type: normal compute queue */ 216 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | 217 /* alloc format: all_on_one_pipe */ 218 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | 219 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 220 /* num_queues: must be 1 */ 221 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); 222 amdgpu_ring_write(kiq_ring, 223 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 224 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 225 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 226 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 227 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 228 } 229 230 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 231 struct amdgpu_ring *ring, 232 enum amdgpu_unmap_queues_action action, 233 u64 gpu_addr, u64 seq) 234 { 235 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 236 237 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 238 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 239 PACKET3_UNMAP_QUEUES_ACTION(action) | 240 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 241 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 242 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 243 amdgpu_ring_write(kiq_ring, 244 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 245 246 if (action == PREEMPT_QUEUES_NO_UNMAP) { 247 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 248 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 249 amdgpu_ring_write(kiq_ring, seq); 250 } else { 251 amdgpu_ring_write(kiq_ring, 0); 252 amdgpu_ring_write(kiq_ring, 0); 253 amdgpu_ring_write(kiq_ring, 0); 254 } 255 } 256 257 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring, 258 struct amdgpu_ring *ring, 259 u64 addr, 260 u64 seq) 261 { 262 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 263 264 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 265 amdgpu_ring_write(kiq_ring, 266 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 267 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 268 PACKET3_QUERY_STATUS_COMMAND(2)); 269 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 270 amdgpu_ring_write(kiq_ring, 271 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 272 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 273 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 274 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 275 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 276 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 277 } 278 279 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 280 uint16_t pasid, uint32_t flush_type, 281 bool all_hub) 282 { 283 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 284 amdgpu_ring_write(kiq_ring, 285 PACKET3_INVALIDATE_TLBS_DST_SEL(1) | 286 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 287 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 288 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 289 } 290 291 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type, 292 uint32_t me_id, uint32_t pipe_id, uint32_t queue_id, 293 uint32_t xcc_id, uint32_t vmid) 294 { 295 struct amdgpu_device *adev = kiq_ring->adev; 296 unsigned i; 297 298 /* enter save mode */ 299 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 300 mutex_lock(&adev->srbm_mutex); 301 soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id); 302 303 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 304 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2); 305 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1); 306 /* wait till dequeue take effects */ 307 for (i = 0; i < adev->usec_timeout; i++) { 308 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 309 break; 310 udelay(1); 311 } 312 if (i >= adev->usec_timeout) 313 dev_err(adev->dev, "fail to wait on hqd deactive\n"); 314 } else { 315 dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type); 316 } 317 318 soc15_grbm_select(adev, 0, 0, 0, 0, 0); 319 mutex_unlock(&adev->srbm_mutex); 320 /* exit safe mode */ 321 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 322 } 323 324 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { 325 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources, 326 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues, 327 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues, 328 .kiq_query_status = gfx_v9_4_3_kiq_query_status, 329 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs, 330 .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue, 331 .set_resources_size = 8, 332 .map_queues_size = 7, 333 .unmap_queues_size = 6, 334 .query_status_size = 7, 335 .invalidate_tlbs_size = 2, 336 }; 337 338 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) 339 { 340 int i, num_xcc; 341 342 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 343 for (i = 0; i < num_xcc; i++) 344 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; 345 } 346 347 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) 348 { 349 int i, num_xcc, dev_inst; 350 351 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 352 for (i = 0; i < num_xcc; i++) { 353 dev_inst = GET_INST(GC, i); 354 355 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, 356 GOLDEN_GB_ADDR_CONFIG); 357 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1); 358 } 359 } 360 361 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg) 362 { 363 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg); 364 365 /* If it is an XCC reg, normalize the reg to keep 366 lower 16 bits in local xcc */ 367 368 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) || 369 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH))) 370 return normalized_reg; 371 else 372 return reg; 373 } 374 375 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 376 bool wc, uint32_t reg, uint32_t val) 377 { 378 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 379 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 380 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 381 WRITE_DATA_DST_SEL(0) | 382 (wc ? WR_CONFIRM : 0)); 383 amdgpu_ring_write(ring, reg); 384 amdgpu_ring_write(ring, 0); 385 amdgpu_ring_write(ring, val); 386 } 387 388 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 389 int mem_space, int opt, uint32_t addr0, 390 uint32_t addr1, uint32_t ref, uint32_t mask, 391 uint32_t inv) 392 { 393 /* Only do the normalization on regspace */ 394 if (mem_space == 0) { 395 addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0); 396 addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1); 397 } 398 399 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 400 amdgpu_ring_write(ring, 401 /* memory (1) or register (0) */ 402 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 403 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 404 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 405 WAIT_REG_MEM_ENGINE(eng_sel))); 406 407 if (mem_space) 408 BUG_ON(addr0 & 0x3); /* Dword align */ 409 amdgpu_ring_write(ring, addr0); 410 amdgpu_ring_write(ring, addr1); 411 amdgpu_ring_write(ring, ref); 412 amdgpu_ring_write(ring, mask); 413 amdgpu_ring_write(ring, inv); /* poll interval */ 414 } 415 416 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) 417 { 418 uint32_t scratch_reg0_offset, xcc_offset; 419 struct amdgpu_device *adev = ring->adev; 420 uint32_t tmp = 0; 421 unsigned i; 422 int r; 423 424 /* Use register offset which is local to XCC in the packet */ 425 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 426 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); 427 WREG32(scratch_reg0_offset, 0xCAFEDEAD); 428 tmp = RREG32(scratch_reg0_offset); 429 430 r = amdgpu_ring_alloc(ring, 3); 431 if (r) 432 return r; 433 434 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 435 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START); 436 amdgpu_ring_write(ring, 0xDEADBEEF); 437 amdgpu_ring_commit(ring); 438 439 for (i = 0; i < adev->usec_timeout; i++) { 440 tmp = RREG32(scratch_reg0_offset); 441 if (tmp == 0xDEADBEEF) 442 break; 443 udelay(1); 444 } 445 446 if (i >= adev->usec_timeout) 447 r = -ETIMEDOUT; 448 return r; 449 } 450 451 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout) 452 { 453 struct amdgpu_device *adev = ring->adev; 454 struct amdgpu_ib ib; 455 struct dma_fence *f = NULL; 456 457 unsigned index; 458 uint64_t gpu_addr; 459 uint32_t tmp; 460 long r; 461 462 r = amdgpu_device_wb_get(adev, &index); 463 if (r) 464 return r; 465 466 gpu_addr = adev->wb.gpu_addr + (index * 4); 467 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 468 memset(&ib, 0, sizeof(ib)); 469 470 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 471 if (r) 472 goto err1; 473 474 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 475 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 476 ib.ptr[2] = lower_32_bits(gpu_addr); 477 ib.ptr[3] = upper_32_bits(gpu_addr); 478 ib.ptr[4] = 0xDEADBEEF; 479 ib.length_dw = 5; 480 481 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 482 if (r) 483 goto err2; 484 485 r = dma_fence_wait_timeout(f, false, timeout); 486 if (r == 0) { 487 r = -ETIMEDOUT; 488 goto err2; 489 } else if (r < 0) { 490 goto err2; 491 } 492 493 tmp = adev->wb.wb[index]; 494 if (tmp == 0xDEADBEEF) 495 r = 0; 496 else 497 r = -EINVAL; 498 499 err2: 500 amdgpu_ib_free(&ib, NULL); 501 dma_fence_put(f); 502 err1: 503 amdgpu_device_wb_free(adev, index); 504 return r; 505 } 506 507 508 /* This value might differs per partition */ 509 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) 510 { 511 uint64_t clock; 512 513 mutex_lock(&adev->gfx.gpu_clock_mutex); 514 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 515 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) | 516 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 517 mutex_unlock(&adev->gfx.gpu_clock_mutex); 518 519 return clock; 520 } 521 522 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev) 523 { 524 amdgpu_ucode_release(&adev->gfx.pfp_fw); 525 amdgpu_ucode_release(&adev->gfx.me_fw); 526 amdgpu_ucode_release(&adev->gfx.ce_fw); 527 amdgpu_ucode_release(&adev->gfx.rlc_fw); 528 amdgpu_ucode_release(&adev->gfx.mec_fw); 529 amdgpu_ucode_release(&adev->gfx.mec2_fw); 530 531 kfree(adev->gfx.rlc.register_list_format); 532 } 533 534 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev, 535 const char *chip_name) 536 { 537 int err; 538 const struct rlc_firmware_header_v2_0 *rlc_hdr; 539 uint16_t version_major; 540 uint16_t version_minor; 541 542 543 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 544 AMDGPU_UCODE_REQUIRED, 545 "amdgpu/%s_rlc.bin", chip_name); 546 if (err) 547 goto out; 548 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 549 550 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 551 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 552 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 553 out: 554 if (err) 555 amdgpu_ucode_release(&adev->gfx.rlc_fw); 556 557 return err; 558 } 559 560 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, 561 const char *chip_name) 562 { 563 int err; 564 565 if (amdgpu_sriov_vf(adev)) { 566 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 567 AMDGPU_UCODE_REQUIRED, 568 "amdgpu/%s_sjt_mec.bin", chip_name); 569 570 if (err) 571 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 572 AMDGPU_UCODE_REQUIRED, 573 "amdgpu/%s_mec.bin", chip_name); 574 } else 575 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 576 AMDGPU_UCODE_REQUIRED, 577 "amdgpu/%s_mec.bin", chip_name); 578 if (err) 579 goto out; 580 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 581 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 582 583 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; 584 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; 585 586 out: 587 if (err) 588 amdgpu_ucode_release(&adev->gfx.mec_fw); 589 return err; 590 } 591 592 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) 593 { 594 char ucode_prefix[15]; 595 int r; 596 597 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 598 599 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix); 600 if (r) 601 return r; 602 603 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix); 604 if (r) 605 return r; 606 607 return r; 608 } 609 610 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) 611 { 612 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 613 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 614 } 615 616 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) 617 { 618 int r, i, num_xcc; 619 u32 *hpd; 620 const __le32 *fw_data; 621 unsigned fw_size; 622 u32 *fw; 623 size_t mec_hpd_size; 624 625 const struct gfx_firmware_header_v1_0 *mec_hdr; 626 627 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 628 for (i = 0; i < num_xcc; i++) 629 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, 630 AMDGPU_MAX_COMPUTE_QUEUES); 631 632 /* take ownership of the relevant compute queues */ 633 amdgpu_gfx_compute_queue_acquire(adev); 634 mec_hpd_size = 635 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE; 636 if (mec_hpd_size) { 637 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 638 AMDGPU_GEM_DOMAIN_VRAM | 639 AMDGPU_GEM_DOMAIN_GTT, 640 &adev->gfx.mec.hpd_eop_obj, 641 &adev->gfx.mec.hpd_eop_gpu_addr, 642 (void **)&hpd); 643 if (r) { 644 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 645 gfx_v9_4_3_mec_fini(adev); 646 return r; 647 } 648 649 if (amdgpu_emu_mode == 1) { 650 for (i = 0; i < mec_hpd_size / 4; i++) { 651 memset((void *)(hpd + i), 0, 4); 652 if (i % 50 == 0) 653 msleep(1); 654 } 655 } else { 656 memset(hpd, 0, mec_hpd_size); 657 } 658 659 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 660 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 661 } 662 663 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 664 665 fw_data = (const __le32 *) 666 (adev->gfx.mec_fw->data + 667 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 668 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 669 670 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 671 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 672 &adev->gfx.mec.mec_fw_obj, 673 &adev->gfx.mec.mec_fw_gpu_addr, 674 (void **)&fw); 675 if (r) { 676 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); 677 gfx_v9_4_3_mec_fini(adev); 678 return r; 679 } 680 681 memcpy(fw, fw_data, fw_size); 682 683 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 684 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 685 686 return 0; 687 } 688 689 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, 690 u32 sh_num, u32 instance, int xcc_id) 691 { 692 u32 data; 693 694 if (instance == 0xffffffff) 695 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 696 INSTANCE_BROADCAST_WRITES, 1); 697 else 698 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 699 INSTANCE_INDEX, instance); 700 701 if (se_num == 0xffffffff) 702 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 703 SE_BROADCAST_WRITES, 1); 704 else 705 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 706 707 if (sh_num == 0xffffffff) 708 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 709 SH_BROADCAST_WRITES, 1); 710 else 711 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 712 713 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); 714 } 715 716 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address) 717 { 718 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 719 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 720 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 721 (address << SQ_IND_INDEX__INDEX__SHIFT) | 722 (SQ_IND_INDEX__FORCE_READ_MASK)); 723 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 724 } 725 726 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 727 uint32_t wave, uint32_t thread, 728 uint32_t regno, uint32_t num, uint32_t *out) 729 { 730 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 731 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 732 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 733 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 734 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 735 (SQ_IND_INDEX__FORCE_READ_MASK) | 736 (SQ_IND_INDEX__AUTO_INCR_MASK)); 737 while (num--) 738 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 739 } 740 741 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, 742 uint32_t xcc_id, uint32_t simd, uint32_t wave, 743 uint32_t *dst, int *no_fields) 744 { 745 /* type 1 wave data */ 746 dst[(*no_fields)++] = 1; 747 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); 748 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); 749 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); 750 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); 751 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); 752 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID); 753 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); 754 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); 755 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC); 756 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC); 757 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS); 758 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); 759 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0); 760 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0); 761 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE); 762 } 763 764 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 765 uint32_t wave, uint32_t start, 766 uint32_t size, uint32_t *dst) 767 { 768 wave_read_regs(adev, xcc_id, simd, wave, 0, 769 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 770 } 771 772 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 773 uint32_t wave, uint32_t thread, 774 uint32_t start, uint32_t size, 775 uint32_t *dst) 776 { 777 wave_read_regs(adev, xcc_id, simd, wave, thread, 778 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 779 } 780 781 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, 782 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 783 { 784 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); 785 } 786 787 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev) 788 { 789 u32 xcp_ctl; 790 791 /* Value is expected to be the same on all, fetch from first instance */ 792 xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL); 793 794 return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP); 795 } 796 797 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, 798 int num_xccs_per_xcp) 799 { 800 int ret, i, num_xcc; 801 u32 tmp = 0; 802 803 if (adev->psp.funcs) { 804 ret = psp_spatial_partition(&adev->psp, 805 NUM_XCC(adev->gfx.xcc_mask) / 806 num_xccs_per_xcp); 807 if (ret) 808 return ret; 809 } else { 810 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 811 812 for (i = 0; i < num_xcc; i++) { 813 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, 814 num_xccs_per_xcp); 815 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, 816 i % num_xccs_per_xcp); 817 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, 818 tmp); 819 } 820 ret = 0; 821 } 822 823 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; 824 825 return ret; 826 } 827 828 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) 829 { 830 int xcc; 831 832 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); 833 if (!xcc) { 834 dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); 835 return -EINVAL; 836 } 837 838 return xcc - 1; 839 } 840 841 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { 842 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, 843 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, 844 .read_wave_data = &gfx_v9_4_3_read_wave_data, 845 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, 846 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, 847 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, 848 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, 849 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, 850 .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp, 851 }; 852 853 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, 854 struct aca_bank *bank, enum aca_smu_type type, 855 void *data) 856 { 857 struct aca_bank_info info; 858 u64 misc0; 859 u32 instlo; 860 int ret; 861 862 ret = aca_bank_info_decode(bank, &info); 863 if (ret) 864 return ret; 865 866 /* NOTE: overwrite info.die_id with xcd id for gfx */ 867 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 868 instlo &= GENMASK(31, 1); 869 info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; 870 871 misc0 = bank->regs[ACA_REG_IDX_MISC0]; 872 873 switch (type) { 874 case ACA_SMU_TYPE_UE: 875 bank->aca_err_type = ACA_ERROR_TYPE_UE; 876 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL); 877 break; 878 case ACA_SMU_TYPE_CE: 879 bank->aca_err_type = ACA_ERROR_TYPE_CE; 880 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 881 ACA_REG__MISC0__ERRCNT(misc0)); 882 break; 883 default: 884 return -EINVAL; 885 } 886 887 return ret; 888 } 889 890 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 891 enum aca_smu_type type, void *data) 892 { 893 u32 instlo; 894 895 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 896 instlo &= GENMASK(31, 1); 897 switch (instlo) { 898 case mmSMNAID_XCD0_MCA_SMU: 899 case mmSMNAID_XCD1_MCA_SMU: 900 case mmSMNXCD_XCD0_MCA_SMU: 901 return true; 902 default: 903 break; 904 } 905 906 return false; 907 } 908 909 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = { 910 .aca_bank_parser = gfx_v9_4_3_aca_bank_parser, 911 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid, 912 }; 913 914 static const struct aca_info gfx_v9_4_3_aca_info = { 915 .hwip = ACA_HWIP_TYPE_SMU, 916 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, 917 .bank_ops = &gfx_v9_4_3_aca_bank_ops, 918 }; 919 920 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) 921 { 922 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; 923 adev->gfx.ras = &gfx_v9_4_3_ras; 924 925 adev->gfx.config.max_hw_contexts = 8; 926 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 927 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 928 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 929 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 930 adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG; 931 932 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 933 REG_GET_FIELD( 934 adev->gfx.config.gb_addr_config, 935 GB_ADDR_CONFIG, 936 NUM_PIPES); 937 938 adev->gfx.config.max_tile_pipes = 939 adev->gfx.config.gb_addr_config_fields.num_pipes; 940 941 adev->gfx.config.gb_addr_config_fields.num_banks = 1 << 942 REG_GET_FIELD( 943 adev->gfx.config.gb_addr_config, 944 GB_ADDR_CONFIG, 945 NUM_BANKS); 946 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 947 REG_GET_FIELD( 948 adev->gfx.config.gb_addr_config, 949 GB_ADDR_CONFIG, 950 MAX_COMPRESSED_FRAGS); 951 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 952 REG_GET_FIELD( 953 adev->gfx.config.gb_addr_config, 954 GB_ADDR_CONFIG, 955 NUM_RB_PER_SE); 956 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 957 REG_GET_FIELD( 958 adev->gfx.config.gb_addr_config, 959 GB_ADDR_CONFIG, 960 NUM_SHADER_ENGINES); 961 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 962 REG_GET_FIELD( 963 adev->gfx.config.gb_addr_config, 964 GB_ADDR_CONFIG, 965 PIPE_INTERLEAVE_SIZE)); 966 967 return 0; 968 } 969 970 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, 971 int xcc_id, int mec, int pipe, int queue) 972 { 973 unsigned irq_type; 974 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 975 unsigned int hw_prio; 976 uint32_t xcc_doorbell_start; 977 978 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + 979 ring_id]; 980 981 /* mec0 is me1 */ 982 ring->xcc_id = xcc_id; 983 ring->me = mec + 1; 984 ring->pipe = pipe; 985 ring->queue = queue; 986 987 ring->ring_obj = NULL; 988 ring->use_doorbell = true; 989 xcc_doorbell_start = adev->doorbell_index.mec_ring0 + 990 xcc_id * adev->doorbell_index.xcc_doorbell_range; 991 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; 992 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + 993 (ring_id + xcc_id * adev->gfx.num_compute_rings) * 994 GFX9_MEC_HPD_SIZE; 995 ring->vm_hub = AMDGPU_GFXHUB(xcc_id); 996 sprintf(ring->name, "comp_%d.%d.%d.%d", 997 ring->xcc_id, ring->me, ring->pipe, ring->queue); 998 999 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1000 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1001 + ring->pipe; 1002 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1003 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1004 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1005 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1006 hw_prio, NULL); 1007 } 1008 1009 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev) 1010 { 1011 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 1012 uint32_t *ptr, num_xcc, inst; 1013 1014 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1015 1016 ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL); 1017 if (!ptr) { 1018 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1019 adev->gfx.ip_dump_core = NULL; 1020 } else { 1021 adev->gfx.ip_dump_core = ptr; 1022 } 1023 1024 /* Allocate memory for compute queue registers for all the instances */ 1025 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 1026 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1027 adev->gfx.mec.num_queue_per_pipe; 1028 1029 ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL); 1030 if (!ptr) { 1031 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1032 adev->gfx.ip_dump_compute_queues = NULL; 1033 } else { 1034 adev->gfx.ip_dump_compute_queues = ptr; 1035 } 1036 } 1037 1038 static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) 1039 { 1040 int i, j, k, r, ring_id, xcc_id, num_xcc; 1041 struct amdgpu_device *adev = ip_block->adev; 1042 1043 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1044 case IP_VERSION(9, 4, 3): 1045 case IP_VERSION(9, 4, 4): 1046 adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex; 1047 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex); 1048 if (adev->gfx.mec_fw_version >= 153) { 1049 adev->gfx.enable_cleaner_shader = true; 1050 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1051 if (r) { 1052 adev->gfx.enable_cleaner_shader = false; 1053 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1054 } 1055 } 1056 break; 1057 default: 1058 adev->gfx.enable_cleaner_shader = false; 1059 break; 1060 } 1061 1062 adev->gfx.mec.num_mec = 2; 1063 adev->gfx.mec.num_pipe_per_mec = 4; 1064 adev->gfx.mec.num_queue_per_pipe = 8; 1065 1066 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1067 1068 /* EOP Event */ 1069 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); 1070 if (r) 1071 return r; 1072 1073 /* Bad opcode Event */ 1074 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 1075 GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR, 1076 &adev->gfx.bad_op_irq); 1077 if (r) 1078 return r; 1079 1080 /* Privileged reg */ 1081 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, 1082 &adev->gfx.priv_reg_irq); 1083 if (r) 1084 return r; 1085 1086 /* Privileged inst */ 1087 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, 1088 &adev->gfx.priv_inst_irq); 1089 if (r) 1090 return r; 1091 1092 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1093 1094 r = adev->gfx.rlc.funcs->init(adev); 1095 if (r) { 1096 DRM_ERROR("Failed to init rlc BOs!\n"); 1097 return r; 1098 } 1099 1100 r = gfx_v9_4_3_mec_init(adev); 1101 if (r) { 1102 DRM_ERROR("Failed to init MEC BOs!\n"); 1103 return r; 1104 } 1105 1106 /* set up the compute queues - allocate horizontally across pipes */ 1107 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 1108 ring_id = 0; 1109 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1110 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1111 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; 1112 k++) { 1113 if (!amdgpu_gfx_is_mec_queue_enabled( 1114 adev, xcc_id, i, k, j)) 1115 continue; 1116 1117 r = gfx_v9_4_3_compute_ring_init(adev, 1118 ring_id, 1119 xcc_id, 1120 i, k, j); 1121 if (r) 1122 return r; 1123 1124 ring_id++; 1125 } 1126 } 1127 } 1128 1129 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id); 1130 if (r) { 1131 DRM_ERROR("Failed to init KIQ BOs!\n"); 1132 return r; 1133 } 1134 1135 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1136 if (r) 1137 return r; 1138 1139 /* create MQD for all compute queues as wel as KIQ for SRIOV case */ 1140 r = amdgpu_gfx_mqd_sw_init(adev, 1141 sizeof(struct v9_mqd_allocation), xcc_id); 1142 if (r) 1143 return r; 1144 } 1145 1146 adev->gfx.compute_supported_reset = 1147 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 1148 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1149 case IP_VERSION(9, 4, 3): 1150 case IP_VERSION(9, 4, 4): 1151 if ((adev->gfx.mec_fw_version >= 155) && 1152 !amdgpu_sriov_vf(adev)) { 1153 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1154 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; 1155 } 1156 break; 1157 case IP_VERSION(9, 5, 0): 1158 if ((adev->gfx.mec_fw_version >= 21) && 1159 !amdgpu_sriov_vf(adev)) { 1160 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1161 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; 1162 } 1163 break; 1164 default: 1165 break; 1166 } 1167 r = gfx_v9_4_3_gpu_early_init(adev); 1168 if (r) 1169 return r; 1170 1171 r = amdgpu_gfx_ras_sw_init(adev); 1172 if (r) 1173 return r; 1174 1175 r = amdgpu_gfx_sysfs_init(adev); 1176 if (r) 1177 return r; 1178 1179 gfx_v9_4_3_alloc_ip_dump(adev); 1180 1181 return 0; 1182 } 1183 1184 static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block) 1185 { 1186 int i, num_xcc; 1187 struct amdgpu_device *adev = ip_block->adev; 1188 1189 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1190 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) 1191 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1192 1193 for (i = 0; i < num_xcc; i++) { 1194 amdgpu_gfx_mqd_sw_fini(adev, i); 1195 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); 1196 amdgpu_gfx_kiq_fini(adev, i); 1197 } 1198 1199 amdgpu_gfx_cleaner_shader_sw_fini(adev); 1200 1201 gfx_v9_4_3_mec_fini(adev); 1202 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); 1203 gfx_v9_4_3_free_microcode(adev); 1204 amdgpu_gfx_sysfs_fini(adev); 1205 1206 kfree(adev->gfx.ip_dump_core); 1207 kfree(adev->gfx.ip_dump_compute_queues); 1208 1209 return 0; 1210 } 1211 1212 #define DEFAULT_SH_MEM_BASES (0x6000) 1213 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, 1214 int xcc_id) 1215 { 1216 int i; 1217 uint32_t sh_mem_config; 1218 uint32_t sh_mem_bases; 1219 uint32_t data; 1220 1221 /* 1222 * Configure apertures: 1223 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1224 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1225 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1226 */ 1227 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1228 1229 sh_mem_config = SH_MEM_ADDRESS_MODE_64 | 1230 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1231 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1232 1233 mutex_lock(&adev->srbm_mutex); 1234 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1235 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1236 /* CP and shaders */ 1237 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); 1238 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); 1239 1240 /* Enable trap for each kfd vmid. */ 1241 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); 1242 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1243 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); 1244 } 1245 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 1246 mutex_unlock(&adev->srbm_mutex); 1247 1248 /* 1249 * Initialize all compute VMIDs to have no GDS, GWS, or OA 1250 * access. These should be enabled by FW for target VMIDs. 1251 */ 1252 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1253 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); 1254 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); 1255 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0); 1256 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0); 1257 } 1258 } 1259 1260 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) 1261 { 1262 int vmid; 1263 1264 /* 1265 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1266 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1267 * the driver can enable them for graphics. VMID0 should maintain 1268 * access so that HWS firmware can save/restore entries. 1269 */ 1270 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { 1271 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0); 1272 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0); 1273 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0); 1274 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0); 1275 } 1276 } 1277 1278 /* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1 1279 * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain 1280 * bit in SET_RESOURCES 1281 */ 1282 static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id) 1283 { 1284 uint32_t data; 1285 1286 if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)) 1287 return; 1288 1289 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1); 1290 data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1); 1291 WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data); 1292 } 1293 1294 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, 1295 int xcc_id) 1296 { 1297 u32 tmp; 1298 int i; 1299 1300 /* XXX SH_MEM regs */ 1301 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1302 mutex_lock(&adev->srbm_mutex); 1303 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1304 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1305 /* CP and shaders */ 1306 if (i == 0) { 1307 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1308 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1309 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, 1310 !!adev->gmc.noretry); 1311 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1312 regSH_MEM_CONFIG, tmp); 1313 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1314 regSH_MEM_BASES, 0); 1315 } else { 1316 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1317 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1318 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, 1319 !!adev->gmc.noretry); 1320 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1321 regSH_MEM_CONFIG, tmp); 1322 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1323 (adev->gmc.private_aperture_start >> 1324 48)); 1325 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1326 (adev->gmc.shared_aperture_start >> 1327 48)); 1328 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1329 regSH_MEM_BASES, tmp); 1330 } 1331 } 1332 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); 1333 1334 mutex_unlock(&adev->srbm_mutex); 1335 1336 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); 1337 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); 1338 gfx_v9_4_3_xcc_init_sq(adev, xcc_id); 1339 } 1340 1341 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) 1342 { 1343 int i, num_xcc; 1344 1345 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1346 1347 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); 1348 adev->gfx.config.db_debug2 = 1349 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); 1350 1351 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1352 /* ToDo: GC 9.4.4 */ 1353 case IP_VERSION(9, 4, 3): 1354 if (adev->gfx.mec_fw_version >= 184 && 1355 (amdgpu_sriov_reg_access_sq_config(adev) || 1356 !amdgpu_sriov_vf(adev))) 1357 adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; 1358 break; 1359 case IP_VERSION(9, 5, 0): 1360 if (adev->gfx.mec_fw_version >= 23) 1361 adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; 1362 break; 1363 default: 1364 break; 1365 } 1366 1367 for (i = 0; i < num_xcc; i++) 1368 gfx_v9_4_3_xcc_constants_init(adev, i); 1369 } 1370 1371 static void 1372 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev, 1373 int xcc_id) 1374 { 1375 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); 1376 } 1377 1378 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id) 1379 { 1380 /* 1381 * Rlc save restore list is workable since v2_1. 1382 */ 1383 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id); 1384 } 1385 1386 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) 1387 { 1388 uint32_t data; 1389 1390 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG); 1391 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; 1392 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); 1393 } 1394 1395 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) 1396 { 1397 uint32_t rlc_setting; 1398 1399 /* if RLC is not enabled, do nothing */ 1400 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL); 1401 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 1402 return false; 1403 1404 return true; 1405 } 1406 1407 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 1408 { 1409 uint32_t data; 1410 unsigned i; 1411 1412 data = RLC_SAFE_MODE__CMD_MASK; 1413 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 1414 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); 1415 1416 /* wait for RLC_SAFE_MODE */ 1417 for (i = 0; i < adev->usec_timeout; i++) { 1418 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 1419 break; 1420 udelay(1); 1421 } 1422 } 1423 1424 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, 1425 int xcc_id) 1426 { 1427 uint32_t data; 1428 1429 data = RLC_SAFE_MODE__CMD_MASK; 1430 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); 1431 } 1432 1433 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 1434 { 1435 int xcc_id, num_xcc; 1436 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 1437 1438 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1439 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 1440 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; 1441 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); 1442 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); 1443 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); 1444 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); 1445 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); 1446 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); 1447 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); 1448 } 1449 adev->gfx.rlc.rlcg_reg_access_supported = true; 1450 } 1451 1452 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) 1453 { 1454 /* init spm vmid with 0xf */ 1455 if (adev->gfx.rlc.funcs->update_spm_vmid) 1456 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 1457 1458 return 0; 1459 } 1460 1461 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, 1462 int xcc_id) 1463 { 1464 u32 i, j, k; 1465 u32 mask; 1466 1467 mutex_lock(&adev->grbm_idx_mutex); 1468 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1469 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1470 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 1471 xcc_id); 1472 for (k = 0; k < adev->usec_timeout; k++) { 1473 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0) 1474 break; 1475 udelay(1); 1476 } 1477 if (k == adev->usec_timeout) { 1478 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 1479 0xffffffff, 1480 0xffffffff, xcc_id); 1481 mutex_unlock(&adev->grbm_idx_mutex); 1482 DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 1483 i, j); 1484 return; 1485 } 1486 } 1487 } 1488 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 1489 xcc_id); 1490 mutex_unlock(&adev->grbm_idx_mutex); 1491 1492 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 1493 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 1494 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 1495 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 1496 for (k = 0; k < adev->usec_timeout; k++) { 1497 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 1498 break; 1499 udelay(1); 1500 } 1501 } 1502 1503 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1504 bool enable, int xcc_id) 1505 { 1506 u32 tmp; 1507 1508 /* These interrupts should be enabled to drive DS clock */ 1509 1510 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0); 1511 1512 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 1513 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); 1514 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); 1515 1516 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); 1517 } 1518 1519 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id) 1520 { 1521 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, 1522 RLC_ENABLE_F32, 0); 1523 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 1524 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id); 1525 } 1526 1527 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) 1528 { 1529 int i, num_xcc; 1530 1531 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1532 for (i = 0; i < num_xcc; i++) 1533 gfx_v9_4_3_xcc_rlc_stop(adev, i); 1534 } 1535 1536 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id) 1537 { 1538 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, 1539 SOFT_RESET_RLC, 1); 1540 udelay(50); 1541 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, 1542 SOFT_RESET_RLC, 0); 1543 udelay(50); 1544 } 1545 1546 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) 1547 { 1548 int i, num_xcc; 1549 1550 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1551 for (i = 0; i < num_xcc; i++) 1552 gfx_v9_4_3_xcc_rlc_reset(adev, i); 1553 } 1554 1555 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id) 1556 { 1557 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, 1558 RLC_ENABLE_F32, 1); 1559 udelay(50); 1560 1561 /* carrizo do enable cp interrupt after cp inited */ 1562 if (!(adev->flags & AMD_IS_APU)) { 1563 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); 1564 udelay(50); 1565 } 1566 } 1567 1568 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) 1569 { 1570 #ifdef AMDGPU_RLC_DEBUG_RETRY 1571 u32 rlc_ucode_ver; 1572 #endif 1573 int i, num_xcc; 1574 1575 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1576 for (i = 0; i < num_xcc; i++) { 1577 gfx_v9_4_3_xcc_rlc_start(adev, i); 1578 #ifdef AMDGPU_RLC_DEBUG_RETRY 1579 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 1580 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); 1581 if (rlc_ucode_ver == 0x108) { 1582 dev_info(adev->dev, 1583 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 1584 rlc_ucode_ver, adev->gfx.rlc_fw_version); 1585 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 1586 * default is 0x9C4 to create a 100us interval */ 1587 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4); 1588 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr 1589 * to disable the page fault retry interrupts, default is 1590 * 0x100 (256) */ 1591 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100); 1592 } 1593 #endif 1594 } 1595 } 1596 1597 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev, 1598 int xcc_id) 1599 { 1600 const struct rlc_firmware_header_v2_0 *hdr; 1601 const __le32 *fw_data; 1602 unsigned i, fw_size; 1603 1604 if (!adev->gfx.rlc_fw) 1605 return -EINVAL; 1606 1607 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1608 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1609 1610 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1611 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1612 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1613 1614 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, 1615 RLCG_UCODE_LOADING_START_ADDRESS); 1616 for (i = 0; i < fw_size; i++) { 1617 if (amdgpu_emu_mode == 1 && i % 100 == 0) { 1618 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i); 1619 msleep(1); 1620 } 1621 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 1622 } 1623 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1624 1625 return 0; 1626 } 1627 1628 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id) 1629 { 1630 int r; 1631 1632 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1633 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); 1634 /* legacy rlc firmware loading */ 1635 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id); 1636 if (r) 1637 return r; 1638 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); 1639 } 1640 1641 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 1642 /* disable CG */ 1643 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); 1644 gfx_v9_4_3_xcc_init_pg(adev, xcc_id); 1645 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 1646 1647 return 0; 1648 } 1649 1650 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) 1651 { 1652 int r, i, num_xcc; 1653 1654 if (amdgpu_sriov_vf(adev)) 1655 return 0; 1656 1657 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1658 for (i = 0; i < num_xcc; i++) { 1659 r = gfx_v9_4_3_xcc_rlc_resume(adev, i); 1660 if (r) 1661 return r; 1662 } 1663 1664 return 0; 1665 } 1666 1667 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1668 unsigned vmid) 1669 { 1670 u32 reg, pre_data, data; 1671 1672 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL); 1673 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) 1674 pre_data = RREG32_NO_KIQ(reg); 1675 else 1676 pre_data = RREG32(reg); 1677 1678 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK); 1679 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 1680 1681 if (pre_data != data) { 1682 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) { 1683 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); 1684 } else 1685 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); 1686 } 1687 } 1688 1689 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = { 1690 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)}, 1691 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)}, 1692 }; 1693 1694 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, 1695 uint32_t offset, 1696 struct soc15_reg_rlcg *entries, int arr_size) 1697 { 1698 int i, inst; 1699 uint32_t reg; 1700 1701 if (!entries) 1702 return false; 1703 1704 for (i = 0; i < arr_size; i++) { 1705 const struct soc15_reg_rlcg *entry; 1706 1707 entry = &entries[i]; 1708 inst = adev->ip_map.logical_to_dev_inst ? 1709 adev->ip_map.logical_to_dev_inst( 1710 adev, entry->hwip, entry->instance) : 1711 entry->instance; 1712 reg = adev->reg_offset[entry->hwip][inst][entry->segment] + 1713 entry->reg; 1714 if (offset == reg) 1715 return true; 1716 } 1717 1718 return false; 1719 } 1720 1721 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) 1722 { 1723 return gfx_v9_4_3_check_rlcg_range(adev, offset, 1724 (void *)rlcg_access_gc_9_4_3, 1725 ARRAY_SIZE(rlcg_access_gc_9_4_3)); 1726 } 1727 1728 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, 1729 bool enable, int xcc_id) 1730 { 1731 if (enable) { 1732 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); 1733 } else { 1734 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 1735 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK | 1736 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK | 1737 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK | 1738 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK | 1739 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK | 1740 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK | 1741 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK | 1742 CP_MEC_CNTL__MEC_ME1_HALT_MASK | 1743 CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 1744 adev->gfx.kiq[xcc_id].ring.sched.ready = false; 1745 } 1746 udelay(50); 1747 } 1748 1749 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev, 1750 int xcc_id) 1751 { 1752 const struct gfx_firmware_header_v1_0 *mec_hdr; 1753 const __le32 *fw_data; 1754 unsigned i; 1755 u32 tmp; 1756 u32 mec_ucode_addr_offset; 1757 u32 mec_ucode_data_offset; 1758 1759 if (!adev->gfx.mec_fw) 1760 return -EINVAL; 1761 1762 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); 1763 1764 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 1765 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 1766 1767 fw_data = (const __le32 *) 1768 (adev->gfx.mec_fw->data + 1769 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 1770 tmp = 0; 1771 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 1772 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 1773 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp); 1774 1775 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO, 1776 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); 1777 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI, 1778 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 1779 1780 mec_ucode_addr_offset = 1781 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR); 1782 mec_ucode_data_offset = 1783 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA); 1784 1785 /* MEC1 */ 1786 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); 1787 for (i = 0; i < mec_hdr->jt_size; i++) 1788 WREG32(mec_ucode_data_offset, 1789 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 1790 1791 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version); 1792 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ 1793 1794 return 0; 1795 } 1796 1797 /* KIQ functions */ 1798 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id) 1799 { 1800 uint32_t tmp; 1801 struct amdgpu_device *adev = ring->adev; 1802 1803 /* tell RLC which is KIQ queue */ 1804 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); 1805 tmp &= 0xffffff00; 1806 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 1807 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80); 1808 } 1809 1810 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) 1811 { 1812 struct amdgpu_device *adev = ring->adev; 1813 1814 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 1815 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { 1816 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; 1817 mqd->cp_hqd_queue_priority = 1818 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; 1819 } 1820 } 1821 } 1822 1823 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id) 1824 { 1825 struct amdgpu_device *adev = ring->adev; 1826 struct v9_mqd *mqd = ring->mqd_ptr; 1827 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 1828 uint32_t tmp; 1829 1830 mqd->header = 0xC0310800; 1831 mqd->compute_pipelinestat_enable = 0x00000001; 1832 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 1833 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 1834 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 1835 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 1836 mqd->compute_misc_reserved = 0x00000003; 1837 1838 mqd->dynamic_cu_mask_addr_lo = 1839 lower_32_bits(ring->mqd_gpu_addr 1840 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 1841 mqd->dynamic_cu_mask_addr_hi = 1842 upper_32_bits(ring->mqd_gpu_addr 1843 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 1844 1845 eop_base_addr = ring->eop_gpu_addr >> 8; 1846 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 1847 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 1848 1849 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 1850 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL); 1851 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 1852 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); 1853 1854 mqd->cp_hqd_eop_control = tmp; 1855 1856 /* enable doorbell? */ 1857 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL); 1858 1859 if (ring->use_doorbell) { 1860 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1861 DOORBELL_OFFSET, ring->doorbell_index); 1862 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1863 DOORBELL_EN, 1); 1864 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1865 DOORBELL_SOURCE, 0); 1866 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1867 DOORBELL_HIT, 0); 1868 if (amdgpu_sriov_multi_vf_mode(adev)) 1869 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1870 DOORBELL_MODE, 1); 1871 } else { 1872 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1873 DOORBELL_EN, 0); 1874 } 1875 1876 mqd->cp_hqd_pq_doorbell_control = tmp; 1877 1878 /* disable the queue if it's active */ 1879 ring->wptr = 0; 1880 mqd->cp_hqd_dequeue_request = 0; 1881 mqd->cp_hqd_pq_rptr = 0; 1882 mqd->cp_hqd_pq_wptr_lo = 0; 1883 mqd->cp_hqd_pq_wptr_hi = 0; 1884 1885 /* set the pointer to the MQD */ 1886 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 1887 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 1888 1889 /* set MQD vmid to 0 */ 1890 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL); 1891 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 1892 mqd->cp_mqd_control = tmp; 1893 1894 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 1895 hqd_gpu_addr = ring->gpu_addr >> 8; 1896 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 1897 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 1898 1899 /* set up the HQD, this is similar to CP_RB0_CNTL */ 1900 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL); 1901 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 1902 (order_base_2(ring->ring_size / 4) - 1)); 1903 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 1904 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 1905 #ifdef __BIG_ENDIAN 1906 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 1907 #endif 1908 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 1909 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); 1910 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 1911 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 1912 mqd->cp_hqd_pq_control = tmp; 1913 1914 /* set the wb address whether it's enabled or not */ 1915 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 1916 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 1917 mqd->cp_hqd_pq_rptr_report_addr_hi = 1918 upper_32_bits(wb_gpu_addr) & 0xffff; 1919 1920 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 1921 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 1922 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 1923 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 1924 1925 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 1926 ring->wptr = 0; 1927 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR); 1928 1929 /* set the vmid for the queue */ 1930 mqd->cp_hqd_vmid = 0; 1931 1932 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE); 1933 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 1934 mqd->cp_hqd_persistent_state = tmp; 1935 1936 /* set MIN_IB_AVAIL_SIZE */ 1937 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL); 1938 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 1939 mqd->cp_hqd_ib_control = tmp; 1940 1941 /* set static priority for a queue/ring */ 1942 gfx_v9_4_3_mqd_set_priority(ring, mqd); 1943 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM); 1944 1945 /* map_queues packet doesn't need activate the queue, 1946 * so only kiq need set this field. 1947 */ 1948 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 1949 mqd->cp_hqd_active = 1; 1950 1951 return 0; 1952 } 1953 1954 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, 1955 int xcc_id) 1956 { 1957 struct amdgpu_device *adev = ring->adev; 1958 struct v9_mqd *mqd = ring->mqd_ptr; 1959 int j; 1960 1961 /* disable wptr polling */ 1962 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); 1963 1964 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, 1965 mqd->cp_hqd_eop_base_addr_lo); 1966 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI, 1967 mqd->cp_hqd_eop_base_addr_hi); 1968 1969 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 1970 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL, 1971 mqd->cp_hqd_eop_control); 1972 1973 /* enable doorbell? */ 1974 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 1975 mqd->cp_hqd_pq_doorbell_control); 1976 1977 /* disable the queue if it's active */ 1978 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { 1979 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); 1980 for (j = 0; j < adev->usec_timeout; j++) { 1981 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 1982 break; 1983 udelay(1); 1984 } 1985 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1986 mqd->cp_hqd_dequeue_request); 1987 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 1988 mqd->cp_hqd_pq_rptr); 1989 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 1990 mqd->cp_hqd_pq_wptr_lo); 1991 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 1992 mqd->cp_hqd_pq_wptr_hi); 1993 } 1994 1995 /* set the pointer to the MQD */ 1996 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR, 1997 mqd->cp_mqd_base_addr_lo); 1998 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI, 1999 mqd->cp_mqd_base_addr_hi); 2000 2001 /* set MQD vmid to 0 */ 2002 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL, 2003 mqd->cp_mqd_control); 2004 2005 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2006 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE, 2007 mqd->cp_hqd_pq_base_lo); 2008 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI, 2009 mqd->cp_hqd_pq_base_hi); 2010 2011 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2012 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL, 2013 mqd->cp_hqd_pq_control); 2014 2015 /* set the wb address whether it's enabled or not */ 2016 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR, 2017 mqd->cp_hqd_pq_rptr_report_addr_lo); 2018 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 2019 mqd->cp_hqd_pq_rptr_report_addr_hi); 2020 2021 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2022 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR, 2023 mqd->cp_hqd_pq_wptr_poll_addr_lo); 2024 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 2025 mqd->cp_hqd_pq_wptr_poll_addr_hi); 2026 2027 /* enable the doorbell if requested */ 2028 if (ring->use_doorbell) { 2029 WREG32_SOC15( 2030 GC, GET_INST(GC, xcc_id), 2031 regCP_MEC_DOORBELL_RANGE_LOWER, 2032 ((adev->doorbell_index.kiq + 2033 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 2034 2) << 2); 2035 WREG32_SOC15( 2036 GC, GET_INST(GC, xcc_id), 2037 regCP_MEC_DOORBELL_RANGE_UPPER, 2038 ((adev->doorbell_index.userqueue_end + 2039 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 2040 2) << 2); 2041 } 2042 2043 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 2044 mqd->cp_hqd_pq_doorbell_control); 2045 2046 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2047 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 2048 mqd->cp_hqd_pq_wptr_lo); 2049 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 2050 mqd->cp_hqd_pq_wptr_hi); 2051 2052 /* set the vmid for the queue */ 2053 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid); 2054 2055 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 2056 mqd->cp_hqd_persistent_state); 2057 2058 /* activate the queue */ 2059 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 2060 mqd->cp_hqd_active); 2061 2062 if (ring->use_doorbell) 2063 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1); 2064 2065 return 0; 2066 } 2067 2068 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, 2069 int xcc_id) 2070 { 2071 struct amdgpu_device *adev = ring->adev; 2072 int j; 2073 2074 /* disable the queue if it's active */ 2075 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { 2076 2077 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); 2078 2079 for (j = 0; j < adev->usec_timeout; j++) { 2080 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 2081 break; 2082 udelay(1); 2083 } 2084 2085 if (j == AMDGPU_MAX_USEC_TIMEOUT) { 2086 DRM_DEBUG("%s dequeue request failed.\n", ring->name); 2087 2088 /* Manual disable if dequeue request times out */ 2089 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); 2090 } 2091 2092 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 2093 0); 2094 } 2095 2096 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); 2097 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); 2098 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT); 2099 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); 2100 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); 2101 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); 2102 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0); 2103 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0); 2104 2105 return 0; 2106 } 2107 2108 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) 2109 { 2110 struct amdgpu_device *adev = ring->adev; 2111 struct v9_mqd *mqd = ring->mqd_ptr; 2112 struct v9_mqd *tmp_mqd; 2113 2114 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id); 2115 2116 /* GPU could be in bad state during probe, driver trigger the reset 2117 * after load the SMU, in this case , the mqd is not be initialized. 2118 * driver need to re-init the mqd. 2119 * check mqd->cp_hqd_pq_control since this value should not be 0 2120 */ 2121 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup; 2122 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) { 2123 /* for GPU_RESET case , reset MQD to a clean status */ 2124 if (adev->gfx.kiq[xcc_id].mqd_backup) 2125 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation)); 2126 2127 /* reset ring buffer */ 2128 ring->wptr = 0; 2129 amdgpu_ring_clear_ring(ring); 2130 mutex_lock(&adev->srbm_mutex); 2131 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2132 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); 2133 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2134 mutex_unlock(&adev->srbm_mutex); 2135 } else { 2136 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2137 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2138 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2139 mutex_lock(&adev->srbm_mutex); 2140 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 2141 amdgpu_ring_clear_ring(ring); 2142 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2143 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); 2144 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); 2145 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2146 mutex_unlock(&adev->srbm_mutex); 2147 2148 if (adev->gfx.kiq[xcc_id].mqd_backup) 2149 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); 2150 } 2151 2152 return 0; 2153 } 2154 2155 static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, 2156 bool restore) 2157 { 2158 struct amdgpu_device *adev = ring->adev; 2159 struct v9_mqd *mqd = ring->mqd_ptr; 2160 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 2161 struct v9_mqd *tmp_mqd; 2162 2163 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control 2164 * is not be initialized before 2165 */ 2166 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; 2167 2168 if (!restore && (!tmp_mqd->cp_hqd_pq_control || 2169 (!amdgpu_in_reset(adev) && !adev->in_suspend))) { 2170 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2171 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2172 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2173 mutex_lock(&adev->srbm_mutex); 2174 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2175 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); 2176 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2177 mutex_unlock(&adev->srbm_mutex); 2178 2179 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2180 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 2181 } else { 2182 /* restore MQD to a clean status */ 2183 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2184 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 2185 /* reset ring buffer */ 2186 ring->wptr = 0; 2187 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); 2188 amdgpu_ring_clear_ring(ring); 2189 } 2190 } 2191 2192 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) 2193 { 2194 struct amdgpu_ring *ring; 2195 int j; 2196 2197 for (j = 0; j < adev->gfx.num_compute_rings; j++) { 2198 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings]; 2199 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2200 mutex_lock(&adev->srbm_mutex); 2201 soc15_grbm_select(adev, ring->me, 2202 ring->pipe, 2203 ring->queue, 0, GET_INST(GC, xcc_id)); 2204 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id); 2205 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2206 mutex_unlock(&adev->srbm_mutex); 2207 } 2208 } 2209 2210 return 0; 2211 } 2212 2213 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) 2214 { 2215 gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id); 2216 return 0; 2217 } 2218 2219 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) 2220 { 2221 struct amdgpu_ring *ring; 2222 int i; 2223 2224 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); 2225 2226 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2227 ring = &adev->gfx.compute_ring[i + xcc_id * 2228 adev->gfx.num_compute_rings]; 2229 2230 gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); 2231 } 2232 2233 return amdgpu_gfx_enable_kcq(adev, xcc_id); 2234 } 2235 2236 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) 2237 { 2238 struct amdgpu_ring *ring; 2239 int r, j; 2240 2241 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 2242 2243 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2244 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id); 2245 2246 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); 2247 if (r) 2248 return r; 2249 } else { 2250 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); 2251 } 2252 2253 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); 2254 if (r) 2255 return r; 2256 2257 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id); 2258 if (r) 2259 return r; 2260 2261 for (j = 0; j < adev->gfx.num_compute_rings; j++) { 2262 ring = &adev->gfx.compute_ring 2263 [j + xcc_id * adev->gfx.num_compute_rings]; 2264 r = amdgpu_ring_test_helper(ring); 2265 if (r) 2266 return r; 2267 } 2268 2269 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); 2270 2271 return 0; 2272 } 2273 2274 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) 2275 { 2276 int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp; 2277 2278 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2279 if (amdgpu_sriov_vf(adev)) { 2280 enum amdgpu_gfx_partition mode; 2281 2282 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2283 AMDGPU_XCP_FL_NONE); 2284 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2285 return -EINVAL; 2286 num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev); 2287 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp; 2288 num_xcp = num_xcc / num_xcc_per_xcp; 2289 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); 2290 2291 } else { 2292 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2293 AMDGPU_XCP_FL_NONE) == 2294 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2295 r = amdgpu_xcp_switch_partition_mode( 2296 adev->xcp_mgr, amdgpu_user_partt_mode); 2297 } 2298 if (r) 2299 return r; 2300 2301 for (i = 0; i < num_xcc; i++) { 2302 r = gfx_v9_4_3_xcc_cp_resume(adev, i); 2303 if (r) 2304 return r; 2305 } 2306 2307 return 0; 2308 } 2309 2310 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) 2311 { 2312 if (amdgpu_gfx_disable_kcq(adev, xcc_id)) 2313 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id); 2314 2315 if (amdgpu_sriov_vf(adev)) { 2316 /* must disable polling for SRIOV when hw finished, otherwise 2317 * CPC engine may still keep fetching WB address which is already 2318 * invalid after sw finished and trigger DMAR reading error in 2319 * hypervisor side. 2320 */ 2321 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); 2322 return; 2323 } 2324 2325 /* Use deinitialize sequence from CAIL when unbinding device 2326 * from driver, otherwise KIQ is hanging when binding back 2327 */ 2328 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2329 mutex_lock(&adev->srbm_mutex); 2330 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me, 2331 adev->gfx.kiq[xcc_id].ring.pipe, 2332 adev->gfx.kiq[xcc_id].ring.queue, 0, 2333 GET_INST(GC, xcc_id)); 2334 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring, 2335 xcc_id); 2336 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2337 mutex_unlock(&adev->srbm_mutex); 2338 } 2339 2340 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); 2341 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); 2342 } 2343 2344 static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block) 2345 { 2346 int r; 2347 struct amdgpu_device *adev = ip_block->adev; 2348 2349 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, 2350 adev->gfx.cleaner_shader_ptr); 2351 2352 if (!amdgpu_sriov_vf(adev)) 2353 gfx_v9_4_3_init_golden_registers(adev); 2354 2355 gfx_v9_4_3_constants_init(adev); 2356 2357 r = adev->gfx.rlc.funcs->resume(adev); 2358 if (r) 2359 return r; 2360 2361 r = gfx_v9_4_3_cp_resume(adev); 2362 if (r) 2363 return r; 2364 2365 return r; 2366 } 2367 2368 static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block) 2369 { 2370 struct amdgpu_device *adev = ip_block->adev; 2371 int i, num_xcc; 2372 2373 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 2374 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 2375 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 2376 2377 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2378 for (i = 0; i < num_xcc; i++) { 2379 gfx_v9_4_3_xcc_fini(adev, i); 2380 } 2381 2382 return 0; 2383 } 2384 2385 static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block) 2386 { 2387 return gfx_v9_4_3_hw_fini(ip_block); 2388 } 2389 2390 static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block) 2391 { 2392 return gfx_v9_4_3_hw_init(ip_block); 2393 } 2394 2395 static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block) 2396 { 2397 struct amdgpu_device *adev = ip_block->adev; 2398 int i, num_xcc; 2399 2400 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2401 for (i = 0; i < num_xcc; i++) { 2402 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS), 2403 GRBM_STATUS, GUI_ACTIVE)) 2404 return false; 2405 } 2406 return true; 2407 } 2408 2409 static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block) 2410 { 2411 unsigned i; 2412 struct amdgpu_device *adev = ip_block->adev; 2413 2414 for (i = 0; i < adev->usec_timeout; i++) { 2415 if (gfx_v9_4_3_is_idle(ip_block)) 2416 return 0; 2417 udelay(1); 2418 } 2419 return -ETIMEDOUT; 2420 } 2421 2422 static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block) 2423 { 2424 u32 grbm_soft_reset = 0; 2425 u32 tmp; 2426 struct amdgpu_device *adev = ip_block->adev; 2427 2428 /* GRBM_STATUS */ 2429 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); 2430 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 2431 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 2432 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 2433 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 2434 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 2435 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { 2436 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2437 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 2438 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2439 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 2440 } 2441 2442 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 2443 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2444 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 2445 } 2446 2447 /* GRBM_STATUS2 */ 2448 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2); 2449 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 2450 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2451 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2452 2453 2454 if (grbm_soft_reset) { 2455 /* stop the rlc */ 2456 adev->gfx.rlc.funcs->stop(adev); 2457 2458 /* Disable MEC parsing/prefetching */ 2459 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0); 2460 2461 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2462 tmp |= grbm_soft_reset; 2463 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 2464 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); 2465 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2466 2467 udelay(50); 2468 2469 tmp &= ~grbm_soft_reset; 2470 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); 2471 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2472 2473 /* Wait a little for things to settle down */ 2474 udelay(50); 2475 } 2476 return 0; 2477 } 2478 2479 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, 2480 uint32_t vmid, 2481 uint32_t gds_base, uint32_t gds_size, 2482 uint32_t gws_base, uint32_t gws_size, 2483 uint32_t oa_base, uint32_t oa_size) 2484 { 2485 struct amdgpu_device *adev = ring->adev; 2486 2487 /* GDS Base */ 2488 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2489 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid, 2490 gds_base); 2491 2492 /* GDS Size */ 2493 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2494 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid, 2495 gds_size); 2496 2497 /* GWS */ 2498 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2499 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid, 2500 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 2501 2502 /* OA */ 2503 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2504 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid, 2505 (1 << (oa_size + oa_base)) - (1 << oa_base)); 2506 } 2507 2508 static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block) 2509 { 2510 struct amdgpu_device *adev = ip_block->adev; 2511 2512 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 2513 AMDGPU_MAX_COMPUTE_RINGS); 2514 gfx_v9_4_3_set_kiq_pm4_funcs(adev); 2515 gfx_v9_4_3_set_ring_funcs(adev); 2516 gfx_v9_4_3_set_irq_funcs(adev); 2517 gfx_v9_4_3_set_gds_init(adev); 2518 gfx_v9_4_3_set_rlc_funcs(adev); 2519 2520 /* init rlcg reg access ctrl */ 2521 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev); 2522 2523 return gfx_v9_4_3_init_microcode(adev); 2524 } 2525 2526 static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block) 2527 { 2528 struct amdgpu_device *adev = ip_block->adev; 2529 int r; 2530 2531 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 2532 if (r) 2533 return r; 2534 2535 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 2536 if (r) 2537 return r; 2538 2539 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 2540 if (r) 2541 return r; 2542 2543 if (adev->gfx.ras && 2544 adev->gfx.ras->enable_watchdog_timer) 2545 adev->gfx.ras->enable_watchdog_timer(adev); 2546 2547 return 0; 2548 } 2549 2550 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev, 2551 bool enable, int xcc_id) 2552 { 2553 uint32_t def, data; 2554 2555 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 2556 return; 2557 2558 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2559 regRLC_CGTT_MGCG_OVERRIDE); 2560 2561 if (enable) 2562 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 2563 else 2564 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 2565 2566 if (def != data) 2567 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2568 regRLC_CGTT_MGCG_OVERRIDE, data); 2569 2570 } 2571 2572 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev, 2573 bool enable, int xcc_id) 2574 { 2575 uint32_t def, data; 2576 2577 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 2578 return; 2579 2580 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2581 regRLC_CGTT_MGCG_OVERRIDE); 2582 2583 if (enable) 2584 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; 2585 else 2586 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; 2587 2588 if (def != data) 2589 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2590 regRLC_CGTT_MGCG_OVERRIDE, data); 2591 } 2592 2593 static void 2594 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, 2595 bool enable, int xcc_id) 2596 { 2597 uint32_t data, def; 2598 2599 /* It is disabled by HW by default */ 2600 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 2601 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 2602 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2603 2604 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 2605 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 2606 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 2607 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 2608 2609 if (def != data) 2610 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2611 2612 /* MGLS is a global flag to control all MGLS in GFX */ 2613 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 2614 /* 2 - RLC memory Light sleep */ 2615 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 2616 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); 2617 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 2618 if (def != data) 2619 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); 2620 } 2621 /* 3 - CP memory Light sleep */ 2622 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 2623 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); 2624 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 2625 if (def != data) 2626 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); 2627 } 2628 } 2629 } else { 2630 /* 1 - MGCG_OVERRIDE */ 2631 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2632 2633 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 2634 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 2635 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 2636 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 2637 2638 if (def != data) 2639 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2640 2641 /* 2 - disable MGLS in RLC */ 2642 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); 2643 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 2644 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 2645 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); 2646 } 2647 2648 /* 3 - disable MGLS in CP */ 2649 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); 2650 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 2651 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 2652 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); 2653 } 2654 } 2655 2656 } 2657 2658 static void 2659 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 2660 bool enable, int xcc_id) 2661 { 2662 uint32_t def, data; 2663 2664 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 2665 2666 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2667 /* unset CGCG override */ 2668 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 2669 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 2670 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 2671 else 2672 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 2673 /* update CGCG and CGLS override bits */ 2674 if (def != data) 2675 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2676 2677 /* CGCG Hysteresis: 400us */ 2678 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 2679 2680 data = (0x2710 2681 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 2682 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 2683 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 2684 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 2685 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 2686 if (def != data) 2687 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); 2688 2689 /* set IDLE_POLL_COUNT(0x33450100)*/ 2690 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); 2691 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 2692 (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 2693 if (def != data) 2694 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); 2695 } else { 2696 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 2697 /* reset CGCG/CGLS bits */ 2698 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 2699 /* disable cgcg and cgls in FSM */ 2700 if (def != data) 2701 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); 2702 } 2703 2704 } 2705 2706 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, 2707 bool enable, int xcc_id) 2708 { 2709 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 2710 2711 if (enable) { 2712 /* FGCG */ 2713 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); 2714 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); 2715 2716 /* CGCG/CGLS should be enabled after MGCG/MGLS 2717 * === MGCG + MGLS === 2718 */ 2719 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, 2720 xcc_id); 2721 /* === CGCG + CGLS === */ 2722 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, 2723 xcc_id); 2724 } else { 2725 /* CGCG/CGLS should be disabled before MGCG/MGLS 2726 * === CGCG + CGLS === 2727 */ 2728 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, 2729 xcc_id); 2730 /* === MGCG + MGLS === */ 2731 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, 2732 xcc_id); 2733 2734 /* FGCG */ 2735 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); 2736 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); 2737 } 2738 2739 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 2740 2741 return 0; 2742 } 2743 2744 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { 2745 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, 2746 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode, 2747 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode, 2748 .init = gfx_v9_4_3_rlc_init, 2749 .resume = gfx_v9_4_3_rlc_resume, 2750 .stop = gfx_v9_4_3_rlc_stop, 2751 .reset = gfx_v9_4_3_rlc_reset, 2752 .start = gfx_v9_4_3_rlc_start, 2753 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid, 2754 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range, 2755 }; 2756 2757 static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block, 2758 enum amd_powergating_state state) 2759 { 2760 return 0; 2761 } 2762 2763 static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block, 2764 enum amd_clockgating_state state) 2765 { 2766 struct amdgpu_device *adev = ip_block->adev; 2767 int i, num_xcc; 2768 2769 if (amdgpu_sriov_vf(adev)) 2770 return 0; 2771 2772 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2773 for (i = 0; i < num_xcc; i++) 2774 gfx_v9_4_3_xcc_update_gfx_clock_gating( 2775 adev, state == AMD_CG_STATE_GATE, i); 2776 2777 return 0; 2778 } 2779 2780 static void gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 2781 { 2782 struct amdgpu_device *adev = ip_block->adev; 2783 int data; 2784 2785 if (amdgpu_sriov_vf(adev)) 2786 *flags = 0; 2787 2788 /* AMD_CG_SUPPORT_GFX_MGCG */ 2789 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE)); 2790 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 2791 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 2792 2793 /* AMD_CG_SUPPORT_GFX_CGCG */ 2794 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL)); 2795 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 2796 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 2797 2798 /* AMD_CG_SUPPORT_GFX_CGLS */ 2799 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 2800 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 2801 2802 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 2803 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL)); 2804 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 2805 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 2806 2807 /* AMD_CG_SUPPORT_GFX_CP_LS */ 2808 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL)); 2809 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 2810 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 2811 } 2812 2813 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) 2814 { 2815 struct amdgpu_device *adev = ring->adev; 2816 u32 ref_and_mask, reg_mem_engine; 2817 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 2818 2819 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 2820 switch (ring->me) { 2821 case 1: 2822 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 2823 break; 2824 case 2: 2825 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 2826 break; 2827 default: 2828 return; 2829 } 2830 reg_mem_engine = 0; 2831 } else { 2832 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 2833 reg_mem_engine = 1; /* pfp */ 2834 } 2835 2836 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1, 2837 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 2838 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 2839 ref_and_mask, ref_and_mask, 0x20); 2840 } 2841 2842 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring, 2843 struct amdgpu_job *job, 2844 struct amdgpu_ib *ib, 2845 uint32_t flags) 2846 { 2847 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2848 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 2849 2850 /* Currently, there is a high possibility to get wave ID mismatch 2851 * between ME and GDS, leading to a hw deadlock, because ME generates 2852 * different wave IDs than the GDS expects. This situation happens 2853 * randomly when at least 5 compute pipes use GDS ordered append. 2854 * The wave IDs generated by ME are also wrong after suspend/resume. 2855 * Those are probably bugs somewhere else in the kernel driver. 2856 * 2857 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 2858 * GDS to 0 for this ring (me/pipe). 2859 */ 2860 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 2861 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2862 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 2863 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 2864 } 2865 2866 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2867 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 2868 amdgpu_ring_write(ring, 2869 #ifdef __BIG_ENDIAN 2870 (2 << 0) | 2871 #endif 2872 lower_32_bits(ib->gpu_addr)); 2873 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 2874 amdgpu_ring_write(ring, control); 2875 } 2876 2877 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 2878 u64 seq, unsigned flags) 2879 { 2880 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2881 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2882 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; 2883 2884 /* RELEASE_MEM - flush caches, send int */ 2885 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 2886 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | 2887 EOP_TC_NC_ACTION_EN) : 2888 (EOP_TCL1_ACTION_EN | 2889 EOP_TC_ACTION_EN | 2890 EOP_TC_WB_ACTION_EN | 2891 EOP_TC_MD_ACTION_EN)) | 2892 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2893 EVENT_INDEX(5))); 2894 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2895 2896 /* 2897 * the address should be Qword aligned if 64bit write, Dword 2898 * aligned if only send 32bit data low (discard data high) 2899 */ 2900 if (write64bit) 2901 BUG_ON(addr & 0x7); 2902 else 2903 BUG_ON(addr & 0x3); 2904 amdgpu_ring_write(ring, lower_32_bits(addr)); 2905 amdgpu_ring_write(ring, upper_32_bits(addr)); 2906 amdgpu_ring_write(ring, lower_32_bits(seq)); 2907 amdgpu_ring_write(ring, upper_32_bits(seq)); 2908 amdgpu_ring_write(ring, 0); 2909 } 2910 2911 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 2912 { 2913 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 2914 uint32_t seq = ring->fence_drv.sync_seq; 2915 uint64_t addr = ring->fence_drv.gpu_addr; 2916 2917 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0, 2918 lower_32_bits(addr), upper_32_bits(addr), 2919 seq, 0xffffffff, 4); 2920 } 2921 2922 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring, 2923 unsigned vmid, uint64_t pd_addr) 2924 { 2925 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 2926 } 2927 2928 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring) 2929 { 2930 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ 2931 } 2932 2933 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring) 2934 { 2935 u64 wptr; 2936 2937 /* XXX check if swapping is necessary on BE */ 2938 if (ring->use_doorbell) 2939 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); 2940 else 2941 BUG(); 2942 return wptr; 2943 } 2944 2945 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring) 2946 { 2947 struct amdgpu_device *adev = ring->adev; 2948 2949 /* XXX check if swapping is necessary on BE */ 2950 if (ring->use_doorbell) { 2951 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); 2952 WDOORBELL64(ring->doorbell_index, ring->wptr); 2953 } else { 2954 BUG(); /* only DOORBELL method supported on gfx9 now */ 2955 } 2956 } 2957 2958 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 2959 u64 seq, unsigned int flags) 2960 { 2961 struct amdgpu_device *adev = ring->adev; 2962 2963 /* we only allocate 32bit for each seq wb address */ 2964 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 2965 2966 /* write fence seq to the "addr" */ 2967 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2968 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2969 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 2970 amdgpu_ring_write(ring, lower_32_bits(addr)); 2971 amdgpu_ring_write(ring, upper_32_bits(addr)); 2972 amdgpu_ring_write(ring, lower_32_bits(seq)); 2973 2974 if (flags & AMDGPU_FENCE_FLAG_INT) { 2975 /* set register to trigger INT */ 2976 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2977 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2978 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 2979 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS)); 2980 amdgpu_ring_write(ring, 0); 2981 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 2982 } 2983 } 2984 2985 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 2986 uint32_t reg_val_offs) 2987 { 2988 struct amdgpu_device *adev = ring->adev; 2989 2990 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 2991 2992 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 2993 amdgpu_ring_write(ring, 0 | /* src: register*/ 2994 (5 << 8) | /* dst: memory */ 2995 (1 << 20)); /* write confirm */ 2996 amdgpu_ring_write(ring, reg); 2997 amdgpu_ring_write(ring, 0); 2998 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 2999 reg_val_offs * 4)); 3000 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 3001 reg_val_offs * 4)); 3002 } 3003 3004 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 3005 uint32_t val) 3006 { 3007 uint32_t cmd = 0; 3008 3009 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 3010 3011 switch (ring->funcs->type) { 3012 case AMDGPU_RING_TYPE_GFX: 3013 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 3014 break; 3015 case AMDGPU_RING_TYPE_KIQ: 3016 cmd = (1 << 16); /* no inc addr */ 3017 break; 3018 default: 3019 cmd = WR_CONFIRM; 3020 break; 3021 } 3022 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3023 amdgpu_ring_write(ring, cmd); 3024 amdgpu_ring_write(ring, reg); 3025 amdgpu_ring_write(ring, 0); 3026 amdgpu_ring_write(ring, val); 3027 } 3028 3029 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 3030 uint32_t val, uint32_t mask) 3031 { 3032 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 3033 } 3034 3035 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 3036 uint32_t reg0, uint32_t reg1, 3037 uint32_t ref, uint32_t mask) 3038 { 3039 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, 3040 ref, mask); 3041 } 3042 3043 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring, 3044 unsigned vmid) 3045 { 3046 struct amdgpu_device *adev = ring->adev; 3047 uint32_t value = 0; 3048 3049 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 3050 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 3051 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 3052 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 3053 amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id); 3054 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value); 3055 amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id); 3056 } 3057 3058 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3059 struct amdgpu_device *adev, int me, int pipe, 3060 enum amdgpu_interrupt_state state, int xcc_id) 3061 { 3062 u32 mec_int_cntl, mec_int_cntl_reg; 3063 3064 /* 3065 * amdgpu controls only the first MEC. That's why this function only 3066 * handles the setting of interrupts for this specific MEC. All other 3067 * pipes' interrupts are set by amdkfd. 3068 */ 3069 3070 if (me == 1) { 3071 switch (pipe) { 3072 case 0: 3073 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); 3074 break; 3075 case 1: 3076 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); 3077 break; 3078 case 2: 3079 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); 3080 break; 3081 case 3: 3082 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); 3083 break; 3084 default: 3085 DRM_DEBUG("invalid pipe %d\n", pipe); 3086 return; 3087 } 3088 } else { 3089 DRM_DEBUG("invalid me %d\n", me); 3090 return; 3091 } 3092 3093 switch (state) { 3094 case AMDGPU_IRQ_STATE_DISABLE: 3095 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3096 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3097 TIME_STAMP_INT_ENABLE, 0); 3098 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3099 break; 3100 case AMDGPU_IRQ_STATE_ENABLE: 3101 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3102 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3103 TIME_STAMP_INT_ENABLE, 1); 3104 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3105 break; 3106 default: 3107 break; 3108 } 3109 } 3110 3111 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev, 3112 int xcc_id, int me, int pipe) 3113 { 3114 /* 3115 * amdgpu controls only the first MEC. That's why this function only 3116 * handles the setting of interrupts for this specific MEC. All other 3117 * pipes' interrupts are set by amdkfd. 3118 */ 3119 if (me != 1) 3120 return 0; 3121 3122 switch (pipe) { 3123 case 0: 3124 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); 3125 case 1: 3126 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); 3127 case 2: 3128 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); 3129 case 3: 3130 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); 3131 default: 3132 return 0; 3133 } 3134 } 3135 3136 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, 3137 struct amdgpu_irq_src *source, 3138 unsigned type, 3139 enum amdgpu_interrupt_state state) 3140 { 3141 u32 mec_int_cntl_reg, mec_int_cntl; 3142 int i, j, k, num_xcc; 3143 3144 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3145 switch (state) { 3146 case AMDGPU_IRQ_STATE_DISABLE: 3147 case AMDGPU_IRQ_STATE_ENABLE: 3148 for (i = 0; i < num_xcc; i++) { 3149 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3150 PRIV_REG_INT_ENABLE, 3151 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3152 for (j = 0; j < adev->gfx.mec.num_mec; j++) { 3153 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 3154 /* MECs start at 1 */ 3155 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); 3156 3157 if (mec_int_cntl_reg) { 3158 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); 3159 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3160 PRIV_REG_INT_ENABLE, 3161 state == AMDGPU_IRQ_STATE_ENABLE ? 3162 1 : 0); 3163 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); 3164 } 3165 } 3166 } 3167 } 3168 break; 3169 default: 3170 break; 3171 } 3172 3173 return 0; 3174 } 3175 3176 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev, 3177 struct amdgpu_irq_src *source, 3178 unsigned type, 3179 enum amdgpu_interrupt_state state) 3180 { 3181 u32 mec_int_cntl_reg, mec_int_cntl; 3182 int i, j, k, num_xcc; 3183 3184 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3185 switch (state) { 3186 case AMDGPU_IRQ_STATE_DISABLE: 3187 case AMDGPU_IRQ_STATE_ENABLE: 3188 for (i = 0; i < num_xcc; i++) { 3189 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3190 OPCODE_ERROR_INT_ENABLE, 3191 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3192 for (j = 0; j < adev->gfx.mec.num_mec; j++) { 3193 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 3194 /* MECs start at 1 */ 3195 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); 3196 3197 if (mec_int_cntl_reg) { 3198 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); 3199 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3200 OPCODE_ERROR_INT_ENABLE, 3201 state == AMDGPU_IRQ_STATE_ENABLE ? 3202 1 : 0); 3203 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); 3204 } 3205 } 3206 } 3207 } 3208 break; 3209 default: 3210 break; 3211 } 3212 3213 return 0; 3214 } 3215 3216 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, 3217 struct amdgpu_irq_src *source, 3218 unsigned type, 3219 enum amdgpu_interrupt_state state) 3220 { 3221 int i, num_xcc; 3222 3223 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3224 switch (state) { 3225 case AMDGPU_IRQ_STATE_DISABLE: 3226 case AMDGPU_IRQ_STATE_ENABLE: 3227 for (i = 0; i < num_xcc; i++) 3228 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3229 PRIV_INSTR_INT_ENABLE, 3230 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3231 break; 3232 default: 3233 break; 3234 } 3235 3236 return 0; 3237 } 3238 3239 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, 3240 struct amdgpu_irq_src *src, 3241 unsigned type, 3242 enum amdgpu_interrupt_state state) 3243 { 3244 int i, num_xcc; 3245 3246 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3247 for (i = 0; i < num_xcc; i++) { 3248 switch (type) { 3249 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 3250 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3251 adev, 1, 0, state, i); 3252 break; 3253 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 3254 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3255 adev, 1, 1, state, i); 3256 break; 3257 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 3258 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3259 adev, 1, 2, state, i); 3260 break; 3261 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 3262 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3263 adev, 1, 3, state, i); 3264 break; 3265 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 3266 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3267 adev, 2, 0, state, i); 3268 break; 3269 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 3270 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3271 adev, 2, 1, state, i); 3272 break; 3273 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 3274 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3275 adev, 2, 2, state, i); 3276 break; 3277 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 3278 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3279 adev, 2, 3, state, i); 3280 break; 3281 default: 3282 break; 3283 } 3284 } 3285 3286 return 0; 3287 } 3288 3289 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, 3290 struct amdgpu_irq_src *source, 3291 struct amdgpu_iv_entry *entry) 3292 { 3293 int i, xcc_id; 3294 u8 me_id, pipe_id, queue_id; 3295 struct amdgpu_ring *ring; 3296 3297 DRM_DEBUG("IH: CP EOP\n"); 3298 me_id = (entry->ring_id & 0x0c) >> 2; 3299 pipe_id = (entry->ring_id & 0x03) >> 0; 3300 queue_id = (entry->ring_id & 0x70) >> 4; 3301 3302 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); 3303 3304 if (xcc_id == -EINVAL) 3305 return -EINVAL; 3306 3307 switch (me_id) { 3308 case 0: 3309 case 1: 3310 case 2: 3311 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3312 ring = &adev->gfx.compute_ring 3313 [i + 3314 xcc_id * adev->gfx.num_compute_rings]; 3315 /* Per-queue interrupt is supported for MEC starting from VI. 3316 * The interrupt can only be enabled/disabled per pipe instead of per queue. 3317 */ 3318 3319 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 3320 amdgpu_fence_process(ring); 3321 } 3322 break; 3323 } 3324 return 0; 3325 } 3326 3327 static void gfx_v9_4_3_fault(struct amdgpu_device *adev, 3328 struct amdgpu_iv_entry *entry) 3329 { 3330 u8 me_id, pipe_id, queue_id; 3331 struct amdgpu_ring *ring; 3332 int i, xcc_id; 3333 3334 me_id = (entry->ring_id & 0x0c) >> 2; 3335 pipe_id = (entry->ring_id & 0x03) >> 0; 3336 queue_id = (entry->ring_id & 0x70) >> 4; 3337 3338 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); 3339 3340 if (xcc_id == -EINVAL) 3341 return; 3342 3343 switch (me_id) { 3344 case 0: 3345 case 1: 3346 case 2: 3347 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3348 ring = &adev->gfx.compute_ring 3349 [i + 3350 xcc_id * adev->gfx.num_compute_rings]; 3351 if (ring->me == me_id && ring->pipe == pipe_id && 3352 ring->queue == queue_id) 3353 drm_sched_fault(&ring->sched); 3354 } 3355 break; 3356 } 3357 } 3358 3359 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev, 3360 struct amdgpu_irq_src *source, 3361 struct amdgpu_iv_entry *entry) 3362 { 3363 DRM_ERROR("Illegal register access in command stream\n"); 3364 gfx_v9_4_3_fault(adev, entry); 3365 return 0; 3366 } 3367 3368 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev, 3369 struct amdgpu_irq_src *source, 3370 struct amdgpu_iv_entry *entry) 3371 { 3372 DRM_ERROR("Illegal opcode in command stream\n"); 3373 gfx_v9_4_3_fault(adev, entry); 3374 return 0; 3375 } 3376 3377 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev, 3378 struct amdgpu_irq_src *source, 3379 struct amdgpu_iv_entry *entry) 3380 { 3381 DRM_ERROR("Illegal instruction in command stream\n"); 3382 gfx_v9_4_3_fault(adev, entry); 3383 return 0; 3384 } 3385 3386 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring) 3387 { 3388 const unsigned int cp_coher_cntl = 3389 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | 3390 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | 3391 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | 3392 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | 3393 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); 3394 3395 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ 3396 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); 3397 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */ 3398 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 3399 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 3400 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 3401 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 3402 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 3403 } 3404 3405 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring, 3406 uint32_t pipe, bool enable) 3407 { 3408 struct amdgpu_device *adev = ring->adev; 3409 uint32_t val; 3410 uint32_t wcl_cs_reg; 3411 3412 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ 3413 val = enable ? 0x1 : 0x7f; 3414 3415 switch (pipe) { 3416 case 0: 3417 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0); 3418 break; 3419 case 1: 3420 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1); 3421 break; 3422 case 2: 3423 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2); 3424 break; 3425 case 3: 3426 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3); 3427 break; 3428 default: 3429 DRM_DEBUG("invalid pipe %d\n", pipe); 3430 return; 3431 } 3432 3433 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); 3434 3435 } 3436 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) 3437 { 3438 struct amdgpu_device *adev = ring->adev; 3439 uint32_t val; 3440 int i; 3441 3442 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit 3443 * number of gfx waves. Setting 5 bit will make sure gfx only gets 3444 * around 25% of gpu resources. 3445 */ 3446 val = enable ? 0x1f : 0x07ffffff; 3447 amdgpu_ring_emit_wreg(ring, 3448 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX), 3449 val); 3450 3451 /* Restrict waves for normal/low priority compute queues as well 3452 * to get best QoS for high priority compute jobs. 3453 * 3454 * amdgpu controls only 1st ME(0-3 CS pipes). 3455 */ 3456 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3457 if (i != ring->pipe) 3458 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable); 3459 3460 } 3461 } 3462 3463 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me, 3464 uint32_t pipe, uint32_t queue, 3465 uint32_t xcc_id) 3466 { 3467 int i, r; 3468 /* make sure dequeue is complete*/ 3469 gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id); 3470 mutex_lock(&adev->srbm_mutex); 3471 soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id)); 3472 for (i = 0; i < adev->usec_timeout; i++) { 3473 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 3474 break; 3475 udelay(1); 3476 } 3477 if (i >= adev->usec_timeout) 3478 r = -ETIMEDOUT; 3479 else 3480 r = 0; 3481 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 3482 mutex_unlock(&adev->srbm_mutex); 3483 gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id); 3484 3485 return r; 3486 3487 } 3488 3489 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev) 3490 { 3491 if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) 3492 return true; 3493 else 3494 dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n"); 3495 3496 return false; 3497 } 3498 3499 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring) 3500 { 3501 struct amdgpu_device *adev = ring->adev; 3502 uint32_t reset_pipe, clean_pipe; 3503 int r; 3504 3505 if (!gfx_v9_4_3_pipe_reset_support(adev)) 3506 return -EINVAL; 3507 3508 gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id); 3509 mutex_lock(&adev->srbm_mutex); 3510 3511 reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL); 3512 clean_pipe = reset_pipe; 3513 3514 if (ring->me == 1) { 3515 switch (ring->pipe) { 3516 case 0: 3517 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3518 MEC_ME1_PIPE0_RESET, 1); 3519 break; 3520 case 1: 3521 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3522 MEC_ME1_PIPE1_RESET, 1); 3523 break; 3524 case 2: 3525 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3526 MEC_ME1_PIPE2_RESET, 1); 3527 break; 3528 case 3: 3529 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3530 MEC_ME1_PIPE3_RESET, 1); 3531 break; 3532 default: 3533 break; 3534 } 3535 } else { 3536 if (ring->pipe) 3537 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3538 MEC_ME2_PIPE1_RESET, 1); 3539 else 3540 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3541 MEC_ME2_PIPE0_RESET, 1); 3542 } 3543 3544 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe); 3545 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe); 3546 mutex_unlock(&adev->srbm_mutex); 3547 gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id); 3548 3549 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); 3550 return r; 3551 } 3552 3553 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, 3554 unsigned int vmid, 3555 struct amdgpu_fence *timedout_fence) 3556 { 3557 struct amdgpu_device *adev = ring->adev; 3558 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; 3559 struct amdgpu_ring *kiq_ring = &kiq->ring; 3560 int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE; 3561 unsigned long flags; 3562 int r; 3563 3564 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 3565 return -EINVAL; 3566 3567 amdgpu_ring_reset_helper_begin(ring, timedout_fence); 3568 3569 spin_lock_irqsave(&kiq->ring_lock, flags); 3570 3571 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 3572 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3573 return -ENOMEM; 3574 } 3575 3576 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 3577 0, 0); 3578 amdgpu_ring_commit(kiq_ring); 3579 3580 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3581 3582 r = amdgpu_ring_test_ring(kiq_ring); 3583 if (r) { 3584 dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n", 3585 ring->name); 3586 goto pipe_reset; 3587 } 3588 3589 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); 3590 if (r) 3591 dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n"); 3592 3593 pipe_reset: 3594 if (r) { 3595 if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) 3596 return -EOPNOTSUPP; 3597 r = gfx_v9_4_3_reset_hw_pipe(ring); 3598 reset_mode = AMDGPU_RESET_TYPE_PER_PIPE; 3599 dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name, 3600 r ? "failed" : "successfully"); 3601 if (r) 3602 return r; 3603 } 3604 3605 gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); 3606 3607 spin_lock_irqsave(&kiq->ring_lock, flags); 3608 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); 3609 if (r) { 3610 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3611 return -ENOMEM; 3612 } 3613 kiq->pmf->kiq_map_queues(kiq_ring, ring); 3614 amdgpu_ring_commit(kiq_ring); 3615 r = amdgpu_ring_test_ring(kiq_ring); 3616 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3617 if (r) { 3618 if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) 3619 goto pipe_reset; 3620 3621 dev_err(adev->dev, "fail to remap queue\n"); 3622 return r; 3623 } 3624 3625 if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) { 3626 r = amdgpu_ring_test_ring(ring); 3627 if (r) 3628 goto pipe_reset; 3629 } 3630 3631 3632 return amdgpu_ring_reset_helper_end(ring, timedout_fence); 3633 } 3634 3635 enum amdgpu_gfx_cp_ras_mem_id { 3636 AMDGPU_GFX_CP_MEM1 = 1, 3637 AMDGPU_GFX_CP_MEM2, 3638 AMDGPU_GFX_CP_MEM3, 3639 AMDGPU_GFX_CP_MEM4, 3640 AMDGPU_GFX_CP_MEM5, 3641 }; 3642 3643 enum amdgpu_gfx_gcea_ras_mem_id { 3644 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4, 3645 AMDGPU_GFX_GCEA_IORD_CMDMEM, 3646 AMDGPU_GFX_GCEA_GMIWR_CMDMEM, 3647 AMDGPU_GFX_GCEA_GMIRD_CMDMEM, 3648 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, 3649 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, 3650 AMDGPU_GFX_GCEA_MAM_DMEM0, 3651 AMDGPU_GFX_GCEA_MAM_DMEM1, 3652 AMDGPU_GFX_GCEA_MAM_DMEM2, 3653 AMDGPU_GFX_GCEA_MAM_DMEM3, 3654 AMDGPU_GFX_GCEA_MAM_AMEM0, 3655 AMDGPU_GFX_GCEA_MAM_AMEM1, 3656 AMDGPU_GFX_GCEA_MAM_AMEM2, 3657 AMDGPU_GFX_GCEA_MAM_AMEM3, 3658 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, 3659 AMDGPU_GFX_GCEA_WRET_TAGMEM, 3660 AMDGPU_GFX_GCEA_RRET_TAGMEM, 3661 AMDGPU_GFX_GCEA_IOWR_DATAMEM, 3662 AMDGPU_GFX_GCEA_GMIWR_DATAMEM, 3663 AMDGPU_GFX_GCEA_DRAM_DATAMEM, 3664 }; 3665 3666 enum amdgpu_gfx_gc_cane_ras_mem_id { 3667 AMDGPU_GFX_GC_CANE_MEM0 = 0, 3668 }; 3669 3670 enum amdgpu_gfx_gcutcl2_ras_mem_id { 3671 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160, 3672 }; 3673 3674 enum amdgpu_gfx_gds_ras_mem_id { 3675 AMDGPU_GFX_GDS_MEM0 = 0, 3676 }; 3677 3678 enum amdgpu_gfx_lds_ras_mem_id { 3679 AMDGPU_GFX_LDS_BANK0 = 0, 3680 AMDGPU_GFX_LDS_BANK1, 3681 AMDGPU_GFX_LDS_BANK2, 3682 AMDGPU_GFX_LDS_BANK3, 3683 AMDGPU_GFX_LDS_BANK4, 3684 AMDGPU_GFX_LDS_BANK5, 3685 AMDGPU_GFX_LDS_BANK6, 3686 AMDGPU_GFX_LDS_BANK7, 3687 AMDGPU_GFX_LDS_BANK8, 3688 AMDGPU_GFX_LDS_BANK9, 3689 AMDGPU_GFX_LDS_BANK10, 3690 AMDGPU_GFX_LDS_BANK11, 3691 AMDGPU_GFX_LDS_BANK12, 3692 AMDGPU_GFX_LDS_BANK13, 3693 AMDGPU_GFX_LDS_BANK14, 3694 AMDGPU_GFX_LDS_BANK15, 3695 AMDGPU_GFX_LDS_BANK16, 3696 AMDGPU_GFX_LDS_BANK17, 3697 AMDGPU_GFX_LDS_BANK18, 3698 AMDGPU_GFX_LDS_BANK19, 3699 AMDGPU_GFX_LDS_BANK20, 3700 AMDGPU_GFX_LDS_BANK21, 3701 AMDGPU_GFX_LDS_BANK22, 3702 AMDGPU_GFX_LDS_BANK23, 3703 AMDGPU_GFX_LDS_BANK24, 3704 AMDGPU_GFX_LDS_BANK25, 3705 AMDGPU_GFX_LDS_BANK26, 3706 AMDGPU_GFX_LDS_BANK27, 3707 AMDGPU_GFX_LDS_BANK28, 3708 AMDGPU_GFX_LDS_BANK29, 3709 AMDGPU_GFX_LDS_BANK30, 3710 AMDGPU_GFX_LDS_BANK31, 3711 AMDGPU_GFX_LDS_SP_BUFFER_A, 3712 AMDGPU_GFX_LDS_SP_BUFFER_B, 3713 }; 3714 3715 enum amdgpu_gfx_rlc_ras_mem_id { 3716 AMDGPU_GFX_RLC_GPMF32 = 1, 3717 AMDGPU_GFX_RLC_RLCVF32, 3718 AMDGPU_GFX_RLC_SCRATCH, 3719 AMDGPU_GFX_RLC_SRM_ARAM, 3720 AMDGPU_GFX_RLC_SRM_DRAM, 3721 AMDGPU_GFX_RLC_TCTAG, 3722 AMDGPU_GFX_RLC_SPM_SE, 3723 AMDGPU_GFX_RLC_SPM_GRBMT, 3724 }; 3725 3726 enum amdgpu_gfx_sp_ras_mem_id { 3727 AMDGPU_GFX_SP_SIMDID0 = 0, 3728 }; 3729 3730 enum amdgpu_gfx_spi_ras_mem_id { 3731 AMDGPU_GFX_SPI_MEM0 = 0, 3732 AMDGPU_GFX_SPI_MEM1, 3733 AMDGPU_GFX_SPI_MEM2, 3734 AMDGPU_GFX_SPI_MEM3, 3735 }; 3736 3737 enum amdgpu_gfx_sqc_ras_mem_id { 3738 AMDGPU_GFX_SQC_INST_CACHE_A = 100, 3739 AMDGPU_GFX_SQC_INST_CACHE_B = 101, 3740 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102, 3741 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103, 3742 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104, 3743 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105, 3744 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106, 3745 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107, 3746 AMDGPU_GFX_SQC_DATA_CACHE_A = 200, 3747 AMDGPU_GFX_SQC_DATA_CACHE_B = 201, 3748 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202, 3749 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203, 3750 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204, 3751 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205, 3752 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206, 3753 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207, 3754 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208, 3755 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209, 3756 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210, 3757 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211, 3758 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212, 3759 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213, 3760 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108, 3761 }; 3762 3763 enum amdgpu_gfx_sq_ras_mem_id { 3764 AMDGPU_GFX_SQ_SGPR_MEM0 = 0, 3765 AMDGPU_GFX_SQ_SGPR_MEM1, 3766 AMDGPU_GFX_SQ_SGPR_MEM2, 3767 AMDGPU_GFX_SQ_SGPR_MEM3, 3768 }; 3769 3770 enum amdgpu_gfx_ta_ras_mem_id { 3771 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1, 3772 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, 3773 AMDGPU_GFX_TA_FS_CFIFO_RAM, 3774 AMDGPU_GFX_TA_FSX_LFIFO, 3775 AMDGPU_GFX_TA_FS_DFIFO_RAM, 3776 }; 3777 3778 enum amdgpu_gfx_tcc_ras_mem_id { 3779 AMDGPU_GFX_TCC_MEM1 = 1, 3780 }; 3781 3782 enum amdgpu_gfx_tca_ras_mem_id { 3783 AMDGPU_GFX_TCA_MEM1 = 1, 3784 }; 3785 3786 enum amdgpu_gfx_tci_ras_mem_id { 3787 AMDGPU_GFX_TCIW_MEM = 1, 3788 }; 3789 3790 enum amdgpu_gfx_tcp_ras_mem_id { 3791 AMDGPU_GFX_TCP_LFIFO0 = 1, 3792 AMDGPU_GFX_TCP_SET0BANK0_RAM, 3793 AMDGPU_GFX_TCP_SET0BANK1_RAM, 3794 AMDGPU_GFX_TCP_SET0BANK2_RAM, 3795 AMDGPU_GFX_TCP_SET0BANK3_RAM, 3796 AMDGPU_GFX_TCP_SET1BANK0_RAM, 3797 AMDGPU_GFX_TCP_SET1BANK1_RAM, 3798 AMDGPU_GFX_TCP_SET1BANK2_RAM, 3799 AMDGPU_GFX_TCP_SET1BANK3_RAM, 3800 AMDGPU_GFX_TCP_SET2BANK0_RAM, 3801 AMDGPU_GFX_TCP_SET2BANK1_RAM, 3802 AMDGPU_GFX_TCP_SET2BANK2_RAM, 3803 AMDGPU_GFX_TCP_SET2BANK3_RAM, 3804 AMDGPU_GFX_TCP_SET3BANK0_RAM, 3805 AMDGPU_GFX_TCP_SET3BANK1_RAM, 3806 AMDGPU_GFX_TCP_SET3BANK2_RAM, 3807 AMDGPU_GFX_TCP_SET3BANK3_RAM, 3808 AMDGPU_GFX_TCP_VM_FIFO, 3809 AMDGPU_GFX_TCP_DB_TAGRAM0, 3810 AMDGPU_GFX_TCP_DB_TAGRAM1, 3811 AMDGPU_GFX_TCP_DB_TAGRAM2, 3812 AMDGPU_GFX_TCP_DB_TAGRAM3, 3813 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, 3814 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, 3815 AMDGPU_GFX_TCP_CMD_FIFO, 3816 }; 3817 3818 enum amdgpu_gfx_td_ras_mem_id { 3819 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1, 3820 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, 3821 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, 3822 }; 3823 3824 enum amdgpu_gfx_tcx_ras_mem_id { 3825 AMDGPU_GFX_TCX_FIFOD0 = 0, 3826 AMDGPU_GFX_TCX_FIFOD1, 3827 AMDGPU_GFX_TCX_FIFOD2, 3828 AMDGPU_GFX_TCX_FIFOD3, 3829 AMDGPU_GFX_TCX_FIFOD4, 3830 AMDGPU_GFX_TCX_FIFOD5, 3831 AMDGPU_GFX_TCX_FIFOD6, 3832 AMDGPU_GFX_TCX_FIFOD7, 3833 AMDGPU_GFX_TCX_FIFOB0, 3834 AMDGPU_GFX_TCX_FIFOB1, 3835 AMDGPU_GFX_TCX_FIFOB2, 3836 AMDGPU_GFX_TCX_FIFOB3, 3837 AMDGPU_GFX_TCX_FIFOB4, 3838 AMDGPU_GFX_TCX_FIFOB5, 3839 AMDGPU_GFX_TCX_FIFOB6, 3840 AMDGPU_GFX_TCX_FIFOB7, 3841 AMDGPU_GFX_TCX_FIFOA0, 3842 AMDGPU_GFX_TCX_FIFOA1, 3843 AMDGPU_GFX_TCX_FIFOA2, 3844 AMDGPU_GFX_TCX_FIFOA3, 3845 AMDGPU_GFX_TCX_FIFOA4, 3846 AMDGPU_GFX_TCX_FIFOA5, 3847 AMDGPU_GFX_TCX_FIFOA6, 3848 AMDGPU_GFX_TCX_FIFOA7, 3849 AMDGPU_GFX_TCX_CFIFO0, 3850 AMDGPU_GFX_TCX_CFIFO1, 3851 AMDGPU_GFX_TCX_CFIFO2, 3852 AMDGPU_GFX_TCX_CFIFO3, 3853 AMDGPU_GFX_TCX_CFIFO4, 3854 AMDGPU_GFX_TCX_CFIFO5, 3855 AMDGPU_GFX_TCX_CFIFO6, 3856 AMDGPU_GFX_TCX_CFIFO7, 3857 AMDGPU_GFX_TCX_FIFO_ACKB0, 3858 AMDGPU_GFX_TCX_FIFO_ACKB1, 3859 AMDGPU_GFX_TCX_FIFO_ACKB2, 3860 AMDGPU_GFX_TCX_FIFO_ACKB3, 3861 AMDGPU_GFX_TCX_FIFO_ACKB4, 3862 AMDGPU_GFX_TCX_FIFO_ACKB5, 3863 AMDGPU_GFX_TCX_FIFO_ACKB6, 3864 AMDGPU_GFX_TCX_FIFO_ACKB7, 3865 AMDGPU_GFX_TCX_FIFO_ACKD0, 3866 AMDGPU_GFX_TCX_FIFO_ACKD1, 3867 AMDGPU_GFX_TCX_FIFO_ACKD2, 3868 AMDGPU_GFX_TCX_FIFO_ACKD3, 3869 AMDGPU_GFX_TCX_FIFO_ACKD4, 3870 AMDGPU_GFX_TCX_FIFO_ACKD5, 3871 AMDGPU_GFX_TCX_FIFO_ACKD6, 3872 AMDGPU_GFX_TCX_FIFO_ACKD7, 3873 AMDGPU_GFX_TCX_DST_FIFOA0, 3874 AMDGPU_GFX_TCX_DST_FIFOA1, 3875 AMDGPU_GFX_TCX_DST_FIFOA2, 3876 AMDGPU_GFX_TCX_DST_FIFOA3, 3877 AMDGPU_GFX_TCX_DST_FIFOA4, 3878 AMDGPU_GFX_TCX_DST_FIFOA5, 3879 AMDGPU_GFX_TCX_DST_FIFOA6, 3880 AMDGPU_GFX_TCX_DST_FIFOA7, 3881 AMDGPU_GFX_TCX_DST_FIFOB0, 3882 AMDGPU_GFX_TCX_DST_FIFOB1, 3883 AMDGPU_GFX_TCX_DST_FIFOB2, 3884 AMDGPU_GFX_TCX_DST_FIFOB3, 3885 AMDGPU_GFX_TCX_DST_FIFOB4, 3886 AMDGPU_GFX_TCX_DST_FIFOB5, 3887 AMDGPU_GFX_TCX_DST_FIFOB6, 3888 AMDGPU_GFX_TCX_DST_FIFOB7, 3889 AMDGPU_GFX_TCX_DST_FIFOD0, 3890 AMDGPU_GFX_TCX_DST_FIFOD1, 3891 AMDGPU_GFX_TCX_DST_FIFOD2, 3892 AMDGPU_GFX_TCX_DST_FIFOD3, 3893 AMDGPU_GFX_TCX_DST_FIFOD4, 3894 AMDGPU_GFX_TCX_DST_FIFOD5, 3895 AMDGPU_GFX_TCX_DST_FIFOD6, 3896 AMDGPU_GFX_TCX_DST_FIFOD7, 3897 AMDGPU_GFX_TCX_DST_FIFO_ACKB0, 3898 AMDGPU_GFX_TCX_DST_FIFO_ACKB1, 3899 AMDGPU_GFX_TCX_DST_FIFO_ACKB2, 3900 AMDGPU_GFX_TCX_DST_FIFO_ACKB3, 3901 AMDGPU_GFX_TCX_DST_FIFO_ACKB4, 3902 AMDGPU_GFX_TCX_DST_FIFO_ACKB5, 3903 AMDGPU_GFX_TCX_DST_FIFO_ACKB6, 3904 AMDGPU_GFX_TCX_DST_FIFO_ACKB7, 3905 AMDGPU_GFX_TCX_DST_FIFO_ACKD0, 3906 AMDGPU_GFX_TCX_DST_FIFO_ACKD1, 3907 AMDGPU_GFX_TCX_DST_FIFO_ACKD2, 3908 AMDGPU_GFX_TCX_DST_FIFO_ACKD3, 3909 AMDGPU_GFX_TCX_DST_FIFO_ACKD4, 3910 AMDGPU_GFX_TCX_DST_FIFO_ACKD5, 3911 AMDGPU_GFX_TCX_DST_FIFO_ACKD6, 3912 AMDGPU_GFX_TCX_DST_FIFO_ACKD7, 3913 }; 3914 3915 enum amdgpu_gfx_atc_l2_ras_mem_id { 3916 AMDGPU_GFX_ATC_L2_MEM0 = 0, 3917 }; 3918 3919 enum amdgpu_gfx_utcl2_ras_mem_id { 3920 AMDGPU_GFX_UTCL2_MEM0 = 0, 3921 }; 3922 3923 enum amdgpu_gfx_vml2_ras_mem_id { 3924 AMDGPU_GFX_VML2_MEM0 = 0, 3925 }; 3926 3927 enum amdgpu_gfx_vml2_walker_ras_mem_id { 3928 AMDGPU_GFX_VML2_WALKER_MEM0 = 0, 3929 }; 3930 3931 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = { 3932 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"}, 3933 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"}, 3934 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"}, 3935 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"}, 3936 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"}, 3937 }; 3938 3939 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = { 3940 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"}, 3941 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"}, 3942 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"}, 3943 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"}, 3944 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"}, 3945 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"}, 3946 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"}, 3947 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"}, 3948 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"}, 3949 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"}, 3950 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"}, 3951 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"}, 3952 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"}, 3953 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"}, 3954 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"}, 3955 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"}, 3956 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"}, 3957 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"}, 3958 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"}, 3959 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"}, 3960 }; 3961 3962 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = { 3963 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"}, 3964 }; 3965 3966 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = { 3967 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"}, 3968 }; 3969 3970 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = { 3971 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"}, 3972 }; 3973 3974 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = { 3975 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"}, 3976 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"}, 3977 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"}, 3978 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"}, 3979 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"}, 3980 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"}, 3981 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"}, 3982 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"}, 3983 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"}, 3984 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"}, 3985 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"}, 3986 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"}, 3987 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"}, 3988 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"}, 3989 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"}, 3990 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"}, 3991 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"}, 3992 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"}, 3993 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"}, 3994 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"}, 3995 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"}, 3996 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"}, 3997 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"}, 3998 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"}, 3999 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"}, 4000 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"}, 4001 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"}, 4002 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"}, 4003 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"}, 4004 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"}, 4005 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"}, 4006 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"}, 4007 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"}, 4008 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"}, 4009 }; 4010 4011 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = { 4012 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"}, 4013 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"}, 4014 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"}, 4015 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"}, 4016 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"}, 4017 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"}, 4018 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"}, 4019 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"}, 4020 }; 4021 4022 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = { 4023 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"}, 4024 }; 4025 4026 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = { 4027 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"}, 4028 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"}, 4029 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"}, 4030 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"}, 4031 }; 4032 4033 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = { 4034 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"}, 4035 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"}, 4036 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"}, 4037 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"}, 4038 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"}, 4039 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"}, 4040 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"}, 4041 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"}, 4042 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"}, 4043 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"}, 4044 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"}, 4045 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"}, 4046 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"}, 4047 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"}, 4048 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"}, 4049 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"}, 4050 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"}, 4051 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"}, 4052 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"}, 4053 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"}, 4054 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"}, 4055 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"}, 4056 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"}, 4057 }; 4058 4059 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = { 4060 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"}, 4061 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"}, 4062 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"}, 4063 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"}, 4064 }; 4065 4066 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = { 4067 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"}, 4068 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"}, 4069 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"}, 4070 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"}, 4071 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"}, 4072 }; 4073 4074 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = { 4075 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"}, 4076 }; 4077 4078 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = { 4079 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"}, 4080 }; 4081 4082 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = { 4083 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"}, 4084 }; 4085 4086 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = { 4087 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"}, 4088 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"}, 4089 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"}, 4090 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"}, 4091 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"}, 4092 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"}, 4093 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"}, 4094 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"}, 4095 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"}, 4096 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"}, 4097 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"}, 4098 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"}, 4099 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"}, 4100 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"}, 4101 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"}, 4102 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"}, 4103 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"}, 4104 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"}, 4105 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"}, 4106 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"}, 4107 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"}, 4108 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"}, 4109 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"}, 4110 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"}, 4111 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"}, 4112 }; 4113 4114 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = { 4115 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"}, 4116 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"}, 4117 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"}, 4118 }; 4119 4120 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = { 4121 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"}, 4122 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"}, 4123 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"}, 4124 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"}, 4125 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"}, 4126 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"}, 4127 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"}, 4128 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"}, 4129 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"}, 4130 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"}, 4131 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"}, 4132 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"}, 4133 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"}, 4134 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"}, 4135 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"}, 4136 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"}, 4137 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"}, 4138 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"}, 4139 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"}, 4140 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"}, 4141 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"}, 4142 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"}, 4143 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"}, 4144 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"}, 4145 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"}, 4146 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"}, 4147 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"}, 4148 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"}, 4149 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"}, 4150 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"}, 4151 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"}, 4152 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"}, 4153 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"}, 4154 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"}, 4155 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"}, 4156 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"}, 4157 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"}, 4158 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"}, 4159 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"}, 4160 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"}, 4161 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"}, 4162 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"}, 4163 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"}, 4164 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"}, 4165 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"}, 4166 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"}, 4167 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"}, 4168 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"}, 4169 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"}, 4170 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"}, 4171 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"}, 4172 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"}, 4173 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"}, 4174 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"}, 4175 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"}, 4176 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"}, 4177 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"}, 4178 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"}, 4179 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"}, 4180 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"}, 4181 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"}, 4182 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"}, 4183 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"}, 4184 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"}, 4185 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"}, 4186 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"}, 4187 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"}, 4188 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"}, 4189 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"}, 4190 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"}, 4191 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"}, 4192 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"}, 4193 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"}, 4194 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"}, 4195 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"}, 4196 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"}, 4197 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"}, 4198 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"}, 4199 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"}, 4200 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"}, 4201 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"}, 4202 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"}, 4203 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"}, 4204 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"}, 4205 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"}, 4206 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"}, 4207 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"}, 4208 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"}, 4209 }; 4210 4211 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = { 4212 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"}, 4213 }; 4214 4215 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = { 4216 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"}, 4217 }; 4218 4219 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = { 4220 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"}, 4221 }; 4222 4223 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = { 4224 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"}, 4225 }; 4226 4227 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = { 4228 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list) 4229 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list) 4230 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list) 4231 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list) 4232 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list) 4233 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list) 4234 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list) 4235 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list) 4236 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list) 4237 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list) 4238 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list) 4239 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list) 4240 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list) 4241 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list) 4242 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list) 4243 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list) 4244 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list) 4245 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list) 4246 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list) 4247 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list) 4248 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list) 4249 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list) 4250 }; 4251 4252 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { 4253 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH), 4254 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, 4255 AMDGPU_GFX_RLC_MEM, 1}, 4256 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI), 4257 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, 4258 AMDGPU_GFX_CP_MEM, 1}, 4259 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI), 4260 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, 4261 AMDGPU_GFX_CP_MEM, 1}, 4262 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI), 4263 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, 4264 AMDGPU_GFX_CP_MEM, 1}, 4265 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI), 4266 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, 4267 AMDGPU_GFX_GDS_MEM, 1}, 4268 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI), 4269 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, 4270 AMDGPU_GFX_GC_CANE_MEM, 1}, 4271 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI), 4272 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, 4273 AMDGPU_GFX_SPI_MEM, 1}, 4274 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI), 4275 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, 4276 AMDGPU_GFX_SP_MEM, 4}, 4277 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI), 4278 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, 4279 AMDGPU_GFX_SP_MEM, 4}, 4280 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI), 4281 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, 4282 AMDGPU_GFX_SQ_MEM, 4}, 4283 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI), 4284 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, 4285 AMDGPU_GFX_SQC_MEM, 4}, 4286 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI), 4287 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, 4288 AMDGPU_GFX_TCX_MEM, 1}, 4289 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI), 4290 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, 4291 AMDGPU_GFX_TCC_MEM, 1}, 4292 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI), 4293 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, 4294 AMDGPU_GFX_TA_MEM, 4}, 4295 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG), 4296 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, 4297 AMDGPU_GFX_TCI_MEM, 1}, 4298 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG), 4299 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, 4300 AMDGPU_GFX_TCP_MEM, 4}, 4301 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI), 4302 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, 4303 AMDGPU_GFX_TD_MEM, 4}, 4304 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI), 4305 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, 4306 AMDGPU_GFX_GCEA_MEM, 1}, 4307 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI), 4308 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, 4309 AMDGPU_GFX_LDS_MEM, 4}, 4310 }; 4311 4312 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { 4313 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH), 4314 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, 4315 AMDGPU_GFX_RLC_MEM, 1}, 4316 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI), 4317 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, 4318 AMDGPU_GFX_CP_MEM, 1}, 4319 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI), 4320 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, 4321 AMDGPU_GFX_CP_MEM, 1}, 4322 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI), 4323 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, 4324 AMDGPU_GFX_CP_MEM, 1}, 4325 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI), 4326 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, 4327 AMDGPU_GFX_GDS_MEM, 1}, 4328 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI), 4329 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, 4330 AMDGPU_GFX_GC_CANE_MEM, 1}, 4331 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI), 4332 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, 4333 AMDGPU_GFX_SPI_MEM, 1}, 4334 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI), 4335 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, 4336 AMDGPU_GFX_SP_MEM, 4}, 4337 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI), 4338 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, 4339 AMDGPU_GFX_SP_MEM, 4}, 4340 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI), 4341 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, 4342 AMDGPU_GFX_SQ_MEM, 4}, 4343 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI), 4344 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, 4345 AMDGPU_GFX_SQC_MEM, 4}, 4346 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI), 4347 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, 4348 AMDGPU_GFX_TCX_MEM, 1}, 4349 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI), 4350 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, 4351 AMDGPU_GFX_TCC_MEM, 1}, 4352 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI), 4353 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, 4354 AMDGPU_GFX_TA_MEM, 4}, 4355 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG), 4356 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, 4357 AMDGPU_GFX_TCI_MEM, 1}, 4358 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG), 4359 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, 4360 AMDGPU_GFX_TCP_MEM, 4}, 4361 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI), 4362 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, 4363 AMDGPU_GFX_TD_MEM, 4}, 4364 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI), 4365 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"}, 4366 AMDGPU_GFX_TCA_MEM, 1}, 4367 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI), 4368 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, 4369 AMDGPU_GFX_GCEA_MEM, 1}, 4370 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI), 4371 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, 4372 AMDGPU_GFX_LDS_MEM, 4}, 4373 }; 4374 4375 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, 4376 void *ras_error_status, int xcc_id) 4377 { 4378 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 4379 unsigned long ce_count = 0, ue_count = 0; 4380 uint32_t i, j, k; 4381 4382 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */ 4383 struct amdgpu_smuio_mcm_config_info mcm_info = { 4384 .socket_id = adev->smuio.funcs->get_socket_id(adev), 4385 .die_id = xcc_id & 0x01 ? 1 : 0, 4386 }; 4387 4388 mutex_lock(&adev->grbm_idx_mutex); 4389 4390 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { 4391 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { 4392 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { 4393 /* no need to select if instance number is 1 */ 4394 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || 4395 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) 4396 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4397 4398 amdgpu_ras_inst_query_ras_error_count(adev, 4399 &(gfx_v9_4_3_ce_reg_list[i].reg_entry), 4400 1, 4401 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent, 4402 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size, 4403 GET_INST(GC, xcc_id), 4404 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 4405 &ce_count); 4406 4407 amdgpu_ras_inst_query_ras_error_count(adev, 4408 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4409 1, 4410 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, 4411 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, 4412 GET_INST(GC, xcc_id), 4413 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 4414 &ue_count); 4415 } 4416 } 4417 } 4418 4419 /* handle extra register entries of UE */ 4420 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { 4421 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { 4422 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { 4423 /* no need to select if instance number is 1 */ 4424 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || 4425 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) 4426 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4427 4428 amdgpu_ras_inst_query_ras_error_count(adev, 4429 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4430 1, 4431 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, 4432 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, 4433 GET_INST(GC, xcc_id), 4434 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 4435 &ue_count); 4436 } 4437 } 4438 } 4439 4440 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4441 xcc_id); 4442 mutex_unlock(&adev->grbm_idx_mutex); 4443 4444 /* the caller should make sure initialize value of 4445 * err_data->ue_count and err_data->ce_count 4446 */ 4447 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); 4448 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); 4449 } 4450 4451 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, 4452 void *ras_error_status, int xcc_id) 4453 { 4454 uint32_t i, j, k; 4455 4456 mutex_lock(&adev->grbm_idx_mutex); 4457 4458 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { 4459 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { 4460 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { 4461 /* no need to select if instance number is 1 */ 4462 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || 4463 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) 4464 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4465 4466 amdgpu_ras_inst_reset_ras_error_count(adev, 4467 &(gfx_v9_4_3_ce_reg_list[i].reg_entry), 4468 1, 4469 GET_INST(GC, xcc_id)); 4470 4471 amdgpu_ras_inst_reset_ras_error_count(adev, 4472 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4473 1, 4474 GET_INST(GC, xcc_id)); 4475 } 4476 } 4477 } 4478 4479 /* handle extra register entries of UE */ 4480 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { 4481 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { 4482 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { 4483 /* no need to select if instance number is 1 */ 4484 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || 4485 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) 4486 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4487 4488 amdgpu_ras_inst_reset_ras_error_count(adev, 4489 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4490 1, 4491 GET_INST(GC, xcc_id)); 4492 } 4493 } 4494 } 4495 4496 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4497 xcc_id); 4498 mutex_unlock(&adev->grbm_idx_mutex); 4499 } 4500 4501 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, 4502 void *ras_error_status, int xcc_id) 4503 { 4504 uint32_t i; 4505 uint32_t data; 4506 4507 if (amdgpu_sriov_vf(adev)) 4508 return; 4509 4510 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG); 4511 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, 4512 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); 4513 4514 if (amdgpu_watchdog_timer.timeout_fatal_disable && 4515 (amdgpu_watchdog_timer.period < 1 || 4516 amdgpu_watchdog_timer.period > 0x23)) { 4517 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n"); 4518 amdgpu_watchdog_timer.period = 0x23; 4519 } 4520 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, 4521 amdgpu_watchdog_timer.period); 4522 4523 mutex_lock(&adev->grbm_idx_mutex); 4524 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4525 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id); 4526 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data); 4527 } 4528 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4529 xcc_id); 4530 mutex_unlock(&adev->grbm_idx_mutex); 4531 } 4532 4533 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, 4534 void *ras_error_status) 4535 { 4536 amdgpu_gfx_ras_error_func(adev, ras_error_status, 4537 gfx_v9_4_3_inst_query_ras_err_count); 4538 } 4539 4540 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) 4541 { 4542 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count); 4543 } 4544 4545 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) 4546 { 4547 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); 4548 } 4549 4550 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 4551 { 4552 /* Header itself is a NOP packet */ 4553 if (num_nop == 1) { 4554 amdgpu_ring_write(ring, ring->funcs->nop); 4555 return; 4556 } 4557 4558 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 4559 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 4560 4561 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 4562 amdgpu_ring_insert_nop(ring, num_nop - 1); 4563 } 4564 4565 static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 4566 { 4567 struct amdgpu_device *adev = ip_block->adev; 4568 uint32_t i, j, k; 4569 uint32_t xcc_id, xcc_offset, inst_offset; 4570 uint32_t num_xcc, reg, num_inst; 4571 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 4572 4573 if (!adev->gfx.ip_dump_core) 4574 return; 4575 4576 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4577 drm_printf(p, "Number of Instances:%d\n", num_xcc); 4578 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4579 xcc_offset = xcc_id * reg_count; 4580 drm_printf(p, "\nInstance id:%d\n", xcc_id); 4581 for (i = 0; i < reg_count; i++) 4582 drm_printf(p, "%-50s \t 0x%08x\n", 4583 gc_reg_list_9_4_3[i].reg_name, 4584 adev->gfx.ip_dump_core[xcc_offset + i]); 4585 } 4586 4587 /* print compute queue registers for all instances */ 4588 if (!adev->gfx.ip_dump_compute_queues) 4589 return; 4590 4591 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 4592 adev->gfx.mec.num_queue_per_pipe; 4593 4594 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 4595 drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n", 4596 num_xcc, 4597 adev->gfx.mec.num_mec, 4598 adev->gfx.mec.num_pipe_per_mec, 4599 adev->gfx.mec.num_queue_per_pipe); 4600 4601 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4602 xcc_offset = xcc_id * reg_count * num_inst; 4603 inst_offset = 0; 4604 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4605 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4606 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 4607 drm_printf(p, 4608 "\nxcc:%d mec:%d, pipe:%d, queue:%d\n", 4609 xcc_id, i, j, k); 4610 for (reg = 0; reg < reg_count; reg++) { 4611 if (i && gc_cp_reg_list_9_4_3[reg].reg_offset == 4612 regCP_MEC_ME1_HEADER_DUMP) 4613 drm_printf(p, 4614 "%-50s \t 0x%08x\n", 4615 "regCP_MEC_ME2_HEADER_DUMP", 4616 adev->gfx.ip_dump_compute_queues 4617 [xcc_offset + inst_offset + 4618 reg]); 4619 else 4620 drm_printf(p, 4621 "%-50s \t 0x%08x\n", 4622 gc_cp_reg_list_9_4_3[reg].reg_name, 4623 adev->gfx.ip_dump_compute_queues 4624 [xcc_offset + inst_offset + 4625 reg]); 4626 } 4627 inst_offset += reg_count; 4628 } 4629 } 4630 } 4631 } 4632 } 4633 4634 static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block) 4635 { 4636 struct amdgpu_device *adev = ip_block->adev; 4637 uint32_t i, j, k; 4638 uint32_t num_xcc, reg, num_inst; 4639 uint32_t xcc_id, xcc_offset, inst_offset; 4640 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 4641 4642 if (!adev->gfx.ip_dump_core) 4643 return; 4644 4645 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4646 4647 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4648 xcc_offset = xcc_id * reg_count; 4649 for (i = 0; i < reg_count; i++) 4650 adev->gfx.ip_dump_core[xcc_offset + i] = 4651 RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i], 4652 GET_INST(GC, xcc_id))); 4653 } 4654 4655 /* dump compute queue registers for all instances */ 4656 if (!adev->gfx.ip_dump_compute_queues) 4657 return; 4658 4659 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 4660 adev->gfx.mec.num_queue_per_pipe; 4661 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 4662 mutex_lock(&adev->srbm_mutex); 4663 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4664 xcc_offset = xcc_id * reg_count * num_inst; 4665 inst_offset = 0; 4666 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4667 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4668 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 4669 /* ME0 is for GFX so start from 1 for CP */ 4670 soc15_grbm_select(adev, 1 + i, j, k, 0, 4671 GET_INST(GC, xcc_id)); 4672 4673 for (reg = 0; reg < reg_count; reg++) { 4674 if (i && gc_cp_reg_list_9_4_3[reg].reg_offset == 4675 regCP_MEC_ME1_HEADER_DUMP) 4676 adev->gfx.ip_dump_compute_queues 4677 [xcc_offset + 4678 inst_offset + reg] = 4679 RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), 4680 regCP_MEC_ME2_HEADER_DUMP)); 4681 else 4682 adev->gfx.ip_dump_compute_queues 4683 [xcc_offset + 4684 inst_offset + reg] = 4685 RREG32(SOC15_REG_ENTRY_OFFSET_INST( 4686 gc_cp_reg_list_9_4_3[reg], 4687 GET_INST(GC, xcc_id))); 4688 } 4689 inst_offset += reg_count; 4690 } 4691 } 4692 } 4693 } 4694 soc15_grbm_select(adev, 0, 0, 0, 0, 0); 4695 mutex_unlock(&adev->srbm_mutex); 4696 } 4697 4698 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 4699 { 4700 /* Emit the cleaner shader */ 4701 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 4702 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 4703 } 4704 4705 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { 4706 .name = "gfx_v9_4_3", 4707 .early_init = gfx_v9_4_3_early_init, 4708 .late_init = gfx_v9_4_3_late_init, 4709 .sw_init = gfx_v9_4_3_sw_init, 4710 .sw_fini = gfx_v9_4_3_sw_fini, 4711 .hw_init = gfx_v9_4_3_hw_init, 4712 .hw_fini = gfx_v9_4_3_hw_fini, 4713 .suspend = gfx_v9_4_3_suspend, 4714 .resume = gfx_v9_4_3_resume, 4715 .is_idle = gfx_v9_4_3_is_idle, 4716 .wait_for_idle = gfx_v9_4_3_wait_for_idle, 4717 .soft_reset = gfx_v9_4_3_soft_reset, 4718 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, 4719 .set_powergating_state = gfx_v9_4_3_set_powergating_state, 4720 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, 4721 .dump_ip_state = gfx_v9_4_3_ip_dump, 4722 .print_ip_state = gfx_v9_4_3_ip_print, 4723 }; 4724 4725 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { 4726 .type = AMDGPU_RING_TYPE_COMPUTE, 4727 .align_mask = 0xff, 4728 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4729 .support_64bit_ptrs = true, 4730 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, 4731 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, 4732 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, 4733 .emit_frame_size = 4734 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ 4735 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ 4736 5 + /* hdp invalidate */ 4737 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ 4738 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4739 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4740 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ 4741 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */ 4742 7 + /* gfx_v9_4_3_emit_mem_sync */ 4743 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */ 4744 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ 4745 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */ 4746 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ 4747 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute, 4748 .emit_fence = gfx_v9_4_3_ring_emit_fence, 4749 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync, 4750 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush, 4751 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch, 4752 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, 4753 .test_ring = gfx_v9_4_3_ring_test_ring, 4754 .test_ib = gfx_v9_4_3_ring_test_ib, 4755 .insert_nop = gfx_v9_4_3_ring_insert_nop, 4756 .pad_ib = amdgpu_ring_generic_pad_ib, 4757 .emit_wreg = gfx_v9_4_3_ring_emit_wreg, 4758 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, 4759 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, 4760 .soft_recovery = gfx_v9_4_3_ring_soft_recovery, 4761 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync, 4762 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit, 4763 .reset = gfx_v9_4_3_reset_kcq, 4764 .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader, 4765 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, 4766 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, 4767 }; 4768 4769 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { 4770 .type = AMDGPU_RING_TYPE_KIQ, 4771 .align_mask = 0xff, 4772 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4773 .support_64bit_ptrs = true, 4774 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, 4775 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, 4776 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, 4777 .emit_frame_size = 4778 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ 4779 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ 4780 5 + /* hdp invalidate */ 4781 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ 4782 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4783 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4784 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ 4785 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */ 4786 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ 4787 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq, 4788 .test_ring = gfx_v9_4_3_ring_test_ring, 4789 .insert_nop = amdgpu_ring_insert_nop, 4790 .pad_ib = amdgpu_ring_generic_pad_ib, 4791 .emit_rreg = gfx_v9_4_3_ring_emit_rreg, 4792 .emit_wreg = gfx_v9_4_3_ring_emit_wreg, 4793 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, 4794 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, 4795 }; 4796 4797 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) 4798 { 4799 int i, j, num_xcc; 4800 4801 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4802 for (i = 0; i < num_xcc; i++) { 4803 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; 4804 4805 for (j = 0; j < adev->gfx.num_compute_rings; j++) 4806 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs 4807 = &gfx_v9_4_3_ring_funcs_compute; 4808 } 4809 } 4810 4811 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = { 4812 .set = gfx_v9_4_3_set_eop_interrupt_state, 4813 .process = gfx_v9_4_3_eop_irq, 4814 }; 4815 4816 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = { 4817 .set = gfx_v9_4_3_set_priv_reg_fault_state, 4818 .process = gfx_v9_4_3_priv_reg_irq, 4819 }; 4820 4821 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = { 4822 .set = gfx_v9_4_3_set_bad_op_fault_state, 4823 .process = gfx_v9_4_3_bad_op_irq, 4824 }; 4825 4826 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = { 4827 .set = gfx_v9_4_3_set_priv_inst_fault_state, 4828 .process = gfx_v9_4_3_priv_inst_irq, 4829 }; 4830 4831 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev) 4832 { 4833 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 4834 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs; 4835 4836 adev->gfx.priv_reg_irq.num_types = 1; 4837 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs; 4838 4839 adev->gfx.bad_op_irq.num_types = 1; 4840 adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs; 4841 4842 adev->gfx.priv_inst_irq.num_types = 1; 4843 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs; 4844 } 4845 4846 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev) 4847 { 4848 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs; 4849 } 4850 4851 4852 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) 4853 { 4854 /* 9.4.3 variants removed all the GDS internal memory, 4855 * only support GWS opcode in kernel, like barrier 4856 * semaphore.etc */ 4857 4858 /* init asic gds info */ 4859 adev->gds.gds_size = 0; 4860 adev->gds.gds_compute_max_wave_id = 0; 4861 adev->gds.gws_size = 64; 4862 adev->gds.oa_size = 16; 4863 } 4864 4865 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 4866 u32 bitmap, int xcc_id) 4867 { 4868 u32 data; 4869 4870 if (!bitmap) 4871 return; 4872 4873 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4874 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4875 4876 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data); 4877 } 4878 4879 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id) 4880 { 4881 u32 data, mask; 4882 4883 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG); 4884 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG); 4885 4886 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4887 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4888 4889 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 4890 4891 return (~data) & mask; 4892 } 4893 4894 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, 4895 struct amdgpu_cu_info *cu_info) 4896 { 4897 int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0; 4898 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp; 4899 unsigned disable_masks[4 * 4]; 4900 bool is_symmetric_cus; 4901 4902 if (!adev || !cu_info) 4903 return -EINVAL; 4904 4905 /* 4906 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs 4907 */ 4908 if (adev->gfx.config.max_shader_engines * 4909 adev->gfx.config.max_sh_per_se > 16) 4910 return -EINVAL; 4911 4912 amdgpu_gfx_parse_disable_cu(disable_masks, 4913 adev->gfx.config.max_shader_engines, 4914 adev->gfx.config.max_sh_per_se); 4915 4916 mutex_lock(&adev->grbm_idx_mutex); 4917 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { 4918 is_symmetric_cus = true; 4919 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4920 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4921 mask = 1; 4922 ao_bitmap = 0; 4923 counter = 0; 4924 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id); 4925 gfx_v9_4_3_set_user_cu_inactive_bitmap( 4926 adev, 4927 disable_masks[i * adev->gfx.config.max_sh_per_se + j], 4928 xcc_id); 4929 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id); 4930 4931 cu_info->bitmap[xcc_id][i][j] = bitmap; 4932 4933 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 4934 if (bitmap & mask) { 4935 if (counter < adev->gfx.config.max_cu_per_sh) 4936 ao_bitmap |= mask; 4937 counter++; 4938 } 4939 mask <<= 1; 4940 } 4941 active_cu_number += counter; 4942 if (i < 2 && j < 2) 4943 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 4944 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 4945 } 4946 if (i && is_symmetric_cus && prev_counter != counter) 4947 is_symmetric_cus = false; 4948 prev_counter = counter; 4949 } 4950 if (is_symmetric_cus) { 4951 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG); 4952 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1); 4953 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1); 4954 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp); 4955 } 4956 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4957 xcc_id); 4958 } 4959 mutex_unlock(&adev->grbm_idx_mutex); 4960 4961 cu_info->number = active_cu_number; 4962 cu_info->ao_cu_mask = ao_cu_mask; 4963 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 4964 4965 return 0; 4966 } 4967 4968 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { 4969 .type = AMD_IP_BLOCK_TYPE_GFX, 4970 .major = 9, 4971 .minor = 4, 4972 .rev = 3, 4973 .funcs = &gfx_v9_4_3_ip_funcs, 4974 }; 4975 4976 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask) 4977 { 4978 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4979 uint32_t tmp_mask; 4980 int i, r; 4981 4982 /* TODO : Initialize golden regs */ 4983 /* gfx_v9_4_3_init_golden_registers(adev); */ 4984 4985 tmp_mask = inst_mask; 4986 for_each_inst(i, tmp_mask) 4987 gfx_v9_4_3_xcc_constants_init(adev, i); 4988 4989 if (!amdgpu_sriov_vf(adev)) { 4990 tmp_mask = inst_mask; 4991 for_each_inst(i, tmp_mask) { 4992 r = gfx_v9_4_3_xcc_rlc_resume(adev, i); 4993 if (r) 4994 return r; 4995 } 4996 } 4997 4998 tmp_mask = inst_mask; 4999 for_each_inst(i, tmp_mask) { 5000 r = gfx_v9_4_3_xcc_cp_resume(adev, i); 5001 if (r) 5002 return r; 5003 } 5004 5005 return 0; 5006 } 5007 5008 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask) 5009 { 5010 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5011 int i; 5012 5013 for_each_inst(i, inst_mask) 5014 gfx_v9_4_3_xcc_fini(adev, i); 5015 5016 return 0; 5017 } 5018 5019 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { 5020 .suspend = &gfx_v9_4_3_xcp_suspend, 5021 .resume = &gfx_v9_4_3_xcp_resume 5022 }; 5023 5024 struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { 5025 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, 5026 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, 5027 }; 5028 5029 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 5030 { 5031 int r; 5032 5033 r = amdgpu_ras_block_late_init(adev, ras_block); 5034 if (r) 5035 return r; 5036 5037 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX, 5038 &gfx_v9_4_3_aca_info, 5039 NULL); 5040 if (r) 5041 goto late_fini; 5042 5043 return 0; 5044 5045 late_fini: 5046 amdgpu_ras_block_late_fini(adev, ras_block); 5047 5048 return r; 5049 } 5050 5051 struct amdgpu_gfx_ras gfx_v9_4_3_ras = { 5052 .ras_block = { 5053 .hw_ops = &gfx_v9_4_3_ras_ops, 5054 .ras_late_init = &gfx_v9_4_3_ras_late_init, 5055 }, 5056 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, 5057 }; 5058