1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 25 #include "amdgpu.h" 26 #include "amdgpu_gfx.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "soc15_common.h" 30 #include "vega10_enum.h" 31 32 #include "v9_structs.h" 33 34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" 35 36 #include "gc/gc_9_4_3_offset.h" 37 #include "gc/gc_9_4_3_sh_mask.h" 38 39 #include "gfx_v9_4_3.h" 40 #include "gfx_v9_4_3_cleaner_shader.h" 41 #include "amdgpu_xcp.h" 42 #include "amdgpu_aca.h" 43 44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); 45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin"); 46 MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin"); 47 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); 48 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin"); 49 MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin"); 50 MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin"); 51 MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin"); 52 53 #define GFX9_MEC_HPD_SIZE 4096 54 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 55 56 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 57 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 58 59 #define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */ 60 #define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */ 61 #define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */ 62 #define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */ 63 64 #define NORMALIZE_XCC_REG_OFFSET(offset) \ 65 (offset & 0xFFFF) 66 67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = { 68 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 69 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 71 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 72 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 73 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 74 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 75 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 78 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 79 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 80 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 81 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 82 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), 83 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), 84 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), 85 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 86 SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS), 87 SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS), 88 SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS), 89 SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS), 90 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 91 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL), 92 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS), 93 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 94 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 95 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), 96 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR), 97 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 98 SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT), 99 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND), 100 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE), 101 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1), 102 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2), 103 SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE), 104 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE), 105 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE), 106 SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT), 107 SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6), 108 /* SE status registers */ 109 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 110 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 111 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 112 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) 113 }; 114 115 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = { 116 /* compute queue registers */ 117 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 118 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE), 119 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 120 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 121 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 122 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 123 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 124 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 125 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 126 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 127 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 128 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 129 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 130 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 131 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 132 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 157 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 158 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 159 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 160 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 161 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 162 }; 163 164 struct amdgpu_gfx_ras gfx_v9_4_3_ras; 165 166 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); 167 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); 168 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); 169 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev); 170 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, 171 struct amdgpu_cu_info *cu_info); 172 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 173 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 174 175 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, 176 uint64_t queue_mask) 177 { 178 struct amdgpu_device *adev = kiq_ring->adev; 179 u64 shader_mc_addr; 180 181 /* Cleaner shader MC address */ 182 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; 183 184 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 185 amdgpu_ring_write(kiq_ring, 186 PACKET3_SET_RESOURCES_VMID_MASK(0) | 187 /* vmid_mask:0* queue_type:0 (KIQ) */ 188 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); 189 amdgpu_ring_write(kiq_ring, 190 lower_32_bits(queue_mask)); /* queue mask lo */ 191 amdgpu_ring_write(kiq_ring, 192 upper_32_bits(queue_mask)); /* queue mask hi */ 193 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ 194 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ 195 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 196 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 197 } 198 199 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring, 200 struct amdgpu_ring *ring) 201 { 202 struct amdgpu_device *adev = kiq_ring->adev; 203 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 204 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 205 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 206 207 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 208 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 209 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 210 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 211 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 212 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 213 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 214 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 215 /*queue_type: normal compute queue */ 216 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | 217 /* alloc format: all_on_one_pipe */ 218 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | 219 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 220 /* num_queues: must be 1 */ 221 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); 222 amdgpu_ring_write(kiq_ring, 223 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 224 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 225 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 226 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 227 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 228 } 229 230 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 231 struct amdgpu_ring *ring, 232 enum amdgpu_unmap_queues_action action, 233 u64 gpu_addr, u64 seq) 234 { 235 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 236 237 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 238 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 239 PACKET3_UNMAP_QUEUES_ACTION(action) | 240 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 241 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 242 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 243 amdgpu_ring_write(kiq_ring, 244 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 245 246 if (action == PREEMPT_QUEUES_NO_UNMAP) { 247 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 248 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 249 amdgpu_ring_write(kiq_ring, seq); 250 } else { 251 amdgpu_ring_write(kiq_ring, 0); 252 amdgpu_ring_write(kiq_ring, 0); 253 amdgpu_ring_write(kiq_ring, 0); 254 } 255 } 256 257 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring, 258 struct amdgpu_ring *ring, 259 u64 addr, 260 u64 seq) 261 { 262 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 263 264 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 265 amdgpu_ring_write(kiq_ring, 266 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 267 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 268 PACKET3_QUERY_STATUS_COMMAND(2)); 269 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 270 amdgpu_ring_write(kiq_ring, 271 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 272 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 273 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 274 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 275 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 276 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 277 } 278 279 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 280 uint16_t pasid, uint32_t flush_type, 281 bool all_hub) 282 { 283 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 284 amdgpu_ring_write(kiq_ring, 285 PACKET3_INVALIDATE_TLBS_DST_SEL(1) | 286 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 287 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 288 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 289 } 290 291 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type, 292 uint32_t me_id, uint32_t pipe_id, uint32_t queue_id, 293 uint32_t xcc_id, uint32_t vmid) 294 { 295 struct amdgpu_device *adev = kiq_ring->adev; 296 unsigned i; 297 298 /* enter save mode */ 299 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 300 mutex_lock(&adev->srbm_mutex); 301 soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id); 302 303 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 304 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2); 305 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1); 306 /* wait till dequeue take effects */ 307 for (i = 0; i < adev->usec_timeout; i++) { 308 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 309 break; 310 udelay(1); 311 } 312 if (i >= adev->usec_timeout) 313 dev_err(adev->dev, "fail to wait on hqd deactive\n"); 314 } else { 315 dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type); 316 } 317 318 soc15_grbm_select(adev, 0, 0, 0, 0, 0); 319 mutex_unlock(&adev->srbm_mutex); 320 /* exit safe mode */ 321 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 322 } 323 324 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { 325 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources, 326 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues, 327 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues, 328 .kiq_query_status = gfx_v9_4_3_kiq_query_status, 329 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs, 330 .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue, 331 .set_resources_size = 8, 332 .map_queues_size = 7, 333 .unmap_queues_size = 6, 334 .query_status_size = 7, 335 .invalidate_tlbs_size = 2, 336 }; 337 338 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) 339 { 340 int i, num_xcc; 341 342 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 343 for (i = 0; i < num_xcc; i++) 344 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; 345 } 346 347 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) 348 { 349 int i, num_xcc, dev_inst; 350 351 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 352 for (i = 0; i < num_xcc; i++) { 353 dev_inst = GET_INST(GC, i); 354 355 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, 356 GOLDEN_GB_ADDR_CONFIG); 357 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1); 358 } 359 } 360 361 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg) 362 { 363 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg); 364 365 /* If it is an XCC reg, normalize the reg to keep 366 lower 16 bits in local xcc */ 367 368 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) || 369 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH))) 370 return normalized_reg; 371 else 372 return reg; 373 } 374 375 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 376 bool wc, uint32_t reg, uint32_t val) 377 { 378 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 379 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 380 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 381 WRITE_DATA_DST_SEL(0) | 382 (wc ? WR_CONFIRM : 0)); 383 amdgpu_ring_write(ring, reg); 384 amdgpu_ring_write(ring, 0); 385 amdgpu_ring_write(ring, val); 386 } 387 388 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 389 int mem_space, int opt, uint32_t addr0, 390 uint32_t addr1, uint32_t ref, uint32_t mask, 391 uint32_t inv) 392 { 393 /* Only do the normalization on regspace */ 394 if (mem_space == 0) { 395 addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0); 396 addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1); 397 } 398 399 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 400 amdgpu_ring_write(ring, 401 /* memory (1) or register (0) */ 402 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 403 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 404 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 405 WAIT_REG_MEM_ENGINE(eng_sel))); 406 407 if (mem_space) 408 BUG_ON(addr0 & 0x3); /* Dword align */ 409 amdgpu_ring_write(ring, addr0); 410 amdgpu_ring_write(ring, addr1); 411 amdgpu_ring_write(ring, ref); 412 amdgpu_ring_write(ring, mask); 413 amdgpu_ring_write(ring, inv); /* poll interval */ 414 } 415 416 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) 417 { 418 uint32_t scratch_reg0_offset, xcc_offset; 419 struct amdgpu_device *adev = ring->adev; 420 uint32_t tmp = 0; 421 unsigned i; 422 int r; 423 424 /* Use register offset which is local to XCC in the packet */ 425 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 426 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); 427 WREG32(scratch_reg0_offset, 0xCAFEDEAD); 428 tmp = RREG32(scratch_reg0_offset); 429 430 r = amdgpu_ring_alloc(ring, 3); 431 if (r) 432 return r; 433 434 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 435 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START); 436 amdgpu_ring_write(ring, 0xDEADBEEF); 437 amdgpu_ring_commit(ring); 438 439 for (i = 0; i < adev->usec_timeout; i++) { 440 tmp = RREG32(scratch_reg0_offset); 441 if (tmp == 0xDEADBEEF) 442 break; 443 udelay(1); 444 } 445 446 if (i >= adev->usec_timeout) 447 r = -ETIMEDOUT; 448 return r; 449 } 450 451 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout) 452 { 453 struct amdgpu_device *adev = ring->adev; 454 struct amdgpu_ib ib; 455 struct dma_fence *f = NULL; 456 457 unsigned index; 458 uint64_t gpu_addr; 459 uint32_t tmp; 460 long r; 461 462 r = amdgpu_device_wb_get(adev, &index); 463 if (r) 464 return r; 465 466 gpu_addr = adev->wb.gpu_addr + (index * 4); 467 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 468 memset(&ib, 0, sizeof(ib)); 469 470 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 471 if (r) 472 goto err1; 473 474 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 475 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 476 ib.ptr[2] = lower_32_bits(gpu_addr); 477 ib.ptr[3] = upper_32_bits(gpu_addr); 478 ib.ptr[4] = 0xDEADBEEF; 479 ib.length_dw = 5; 480 481 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 482 if (r) 483 goto err2; 484 485 r = dma_fence_wait_timeout(f, false, timeout); 486 if (r == 0) { 487 r = -ETIMEDOUT; 488 goto err2; 489 } else if (r < 0) { 490 goto err2; 491 } 492 493 tmp = adev->wb.wb[index]; 494 if (tmp == 0xDEADBEEF) 495 r = 0; 496 else 497 r = -EINVAL; 498 499 err2: 500 amdgpu_ib_free(&ib, NULL); 501 dma_fence_put(f); 502 err1: 503 amdgpu_device_wb_free(adev, index); 504 return r; 505 } 506 507 508 /* This value might differs per partition */ 509 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) 510 { 511 uint64_t clock; 512 513 mutex_lock(&adev->gfx.gpu_clock_mutex); 514 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 515 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) | 516 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 517 mutex_unlock(&adev->gfx.gpu_clock_mutex); 518 519 return clock; 520 } 521 522 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev) 523 { 524 amdgpu_ucode_release(&adev->gfx.pfp_fw); 525 amdgpu_ucode_release(&adev->gfx.me_fw); 526 amdgpu_ucode_release(&adev->gfx.ce_fw); 527 amdgpu_ucode_release(&adev->gfx.rlc_fw); 528 amdgpu_ucode_release(&adev->gfx.mec_fw); 529 amdgpu_ucode_release(&adev->gfx.mec2_fw); 530 531 kfree(adev->gfx.rlc.register_list_format); 532 } 533 534 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev, 535 const char *chip_name) 536 { 537 int err; 538 const struct rlc_firmware_header_v2_0 *rlc_hdr; 539 uint16_t version_major; 540 uint16_t version_minor; 541 542 543 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 544 AMDGPU_UCODE_REQUIRED, 545 "amdgpu/%s_rlc.bin", chip_name); 546 if (err) 547 goto out; 548 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 549 550 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 551 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 552 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 553 out: 554 if (err) 555 amdgpu_ucode_release(&adev->gfx.rlc_fw); 556 557 return err; 558 } 559 560 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, 561 const char *chip_name) 562 { 563 int err; 564 565 if (amdgpu_sriov_vf(adev)) { 566 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 567 AMDGPU_UCODE_REQUIRED, 568 "amdgpu/%s_sjt_mec.bin", chip_name); 569 570 if (err) 571 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 572 AMDGPU_UCODE_REQUIRED, 573 "amdgpu/%s_mec.bin", chip_name); 574 } else 575 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 576 AMDGPU_UCODE_REQUIRED, 577 "amdgpu/%s_mec.bin", chip_name); 578 if (err) 579 goto out; 580 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 581 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 582 583 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; 584 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; 585 586 out: 587 if (err) 588 amdgpu_ucode_release(&adev->gfx.mec_fw); 589 return err; 590 } 591 592 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) 593 { 594 char ucode_prefix[15]; 595 int r; 596 597 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 598 599 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix); 600 if (r) 601 return r; 602 603 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix); 604 if (r) 605 return r; 606 607 return r; 608 } 609 610 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) 611 { 612 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 613 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 614 } 615 616 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) 617 { 618 int r, i, num_xcc; 619 u32 *hpd; 620 const __le32 *fw_data; 621 unsigned fw_size; 622 u32 *fw; 623 size_t mec_hpd_size; 624 625 const struct gfx_firmware_header_v1_0 *mec_hdr; 626 627 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 628 for (i = 0; i < num_xcc; i++) 629 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, 630 AMDGPU_MAX_COMPUTE_QUEUES); 631 632 /* take ownership of the relevant compute queues */ 633 amdgpu_gfx_compute_queue_acquire(adev); 634 mec_hpd_size = 635 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE; 636 if (mec_hpd_size) { 637 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 638 AMDGPU_GEM_DOMAIN_VRAM | 639 AMDGPU_GEM_DOMAIN_GTT, 640 &adev->gfx.mec.hpd_eop_obj, 641 &adev->gfx.mec.hpd_eop_gpu_addr, 642 (void **)&hpd); 643 if (r) { 644 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 645 gfx_v9_4_3_mec_fini(adev); 646 return r; 647 } 648 649 if (amdgpu_emu_mode == 1) { 650 for (i = 0; i < mec_hpd_size / 4; i++) { 651 memset((void *)(hpd + i), 0, 4); 652 if (i % 50 == 0) 653 msleep(1); 654 } 655 } else { 656 memset(hpd, 0, mec_hpd_size); 657 } 658 659 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 660 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 661 } 662 663 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 664 665 fw_data = (const __le32 *) 666 (adev->gfx.mec_fw->data + 667 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 668 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 669 670 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 671 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 672 &adev->gfx.mec.mec_fw_obj, 673 &adev->gfx.mec.mec_fw_gpu_addr, 674 (void **)&fw); 675 if (r) { 676 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); 677 gfx_v9_4_3_mec_fini(adev); 678 return r; 679 } 680 681 memcpy(fw, fw_data, fw_size); 682 683 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 684 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 685 686 return 0; 687 } 688 689 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, 690 u32 sh_num, u32 instance, int xcc_id) 691 { 692 u32 data; 693 694 if (instance == 0xffffffff) 695 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 696 INSTANCE_BROADCAST_WRITES, 1); 697 else 698 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 699 INSTANCE_INDEX, instance); 700 701 if (se_num == 0xffffffff) 702 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 703 SE_BROADCAST_WRITES, 1); 704 else 705 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 706 707 if (sh_num == 0xffffffff) 708 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 709 SH_BROADCAST_WRITES, 1); 710 else 711 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 712 713 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); 714 } 715 716 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address) 717 { 718 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 719 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 720 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 721 (address << SQ_IND_INDEX__INDEX__SHIFT) | 722 (SQ_IND_INDEX__FORCE_READ_MASK)); 723 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 724 } 725 726 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 727 uint32_t wave, uint32_t thread, 728 uint32_t regno, uint32_t num, uint32_t *out) 729 { 730 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 731 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 732 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 733 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 734 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 735 (SQ_IND_INDEX__FORCE_READ_MASK) | 736 (SQ_IND_INDEX__AUTO_INCR_MASK)); 737 while (num--) 738 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 739 } 740 741 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, 742 uint32_t xcc_id, uint32_t simd, uint32_t wave, 743 uint32_t *dst, int *no_fields) 744 { 745 /* type 1 wave data */ 746 dst[(*no_fields)++] = 1; 747 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); 748 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); 749 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); 750 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); 751 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); 752 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID); 753 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); 754 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); 755 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC); 756 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC); 757 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS); 758 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); 759 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0); 760 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0); 761 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE); 762 } 763 764 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 765 uint32_t wave, uint32_t start, 766 uint32_t size, uint32_t *dst) 767 { 768 wave_read_regs(adev, xcc_id, simd, wave, 0, 769 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 770 } 771 772 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 773 uint32_t wave, uint32_t thread, 774 uint32_t start, uint32_t size, 775 uint32_t *dst) 776 { 777 wave_read_regs(adev, xcc_id, simd, wave, thread, 778 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 779 } 780 781 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, 782 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 783 { 784 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); 785 } 786 787 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev) 788 { 789 u32 xcp_ctl; 790 791 /* Value is expected to be the same on all, fetch from first instance */ 792 xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL); 793 794 return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP); 795 } 796 797 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, 798 int num_xccs_per_xcp) 799 { 800 int ret, i, num_xcc; 801 u32 tmp = 0; 802 803 if (adev->psp.funcs) { 804 ret = psp_spatial_partition(&adev->psp, 805 NUM_XCC(adev->gfx.xcc_mask) / 806 num_xccs_per_xcp); 807 if (ret) 808 return ret; 809 } else { 810 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 811 812 for (i = 0; i < num_xcc; i++) { 813 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, 814 num_xccs_per_xcp); 815 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, 816 i % num_xccs_per_xcp); 817 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, 818 tmp); 819 } 820 ret = 0; 821 } 822 823 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; 824 825 return ret; 826 } 827 828 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) 829 { 830 int xcc; 831 832 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); 833 if (!xcc) { 834 dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); 835 return -EINVAL; 836 } 837 838 return xcc - 1; 839 } 840 841 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { 842 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, 843 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, 844 .read_wave_data = &gfx_v9_4_3_read_wave_data, 845 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, 846 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, 847 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, 848 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, 849 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, 850 .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp, 851 }; 852 853 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, 854 struct aca_bank *bank, enum aca_smu_type type, 855 void *data) 856 { 857 struct aca_bank_info info; 858 u64 misc0; 859 u32 instlo; 860 int ret; 861 862 ret = aca_bank_info_decode(bank, &info); 863 if (ret) 864 return ret; 865 866 /* NOTE: overwrite info.die_id with xcd id for gfx */ 867 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 868 instlo &= GENMASK(31, 1); 869 info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; 870 871 misc0 = bank->regs[ACA_REG_IDX_MISC0]; 872 873 switch (type) { 874 case ACA_SMU_TYPE_UE: 875 bank->aca_err_type = ACA_ERROR_TYPE_UE; 876 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL); 877 break; 878 case ACA_SMU_TYPE_CE: 879 bank->aca_err_type = ACA_ERROR_TYPE_CE; 880 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 881 ACA_REG__MISC0__ERRCNT(misc0)); 882 break; 883 default: 884 return -EINVAL; 885 } 886 887 return ret; 888 } 889 890 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 891 enum aca_smu_type type, void *data) 892 { 893 u32 instlo; 894 895 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 896 instlo &= GENMASK(31, 1); 897 switch (instlo) { 898 case mmSMNAID_XCD0_MCA_SMU: 899 case mmSMNAID_XCD1_MCA_SMU: 900 case mmSMNXCD_XCD0_MCA_SMU: 901 return true; 902 default: 903 break; 904 } 905 906 return false; 907 } 908 909 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = { 910 .aca_bank_parser = gfx_v9_4_3_aca_bank_parser, 911 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid, 912 }; 913 914 static const struct aca_info gfx_v9_4_3_aca_info = { 915 .hwip = ACA_HWIP_TYPE_SMU, 916 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, 917 .bank_ops = &gfx_v9_4_3_aca_bank_ops, 918 }; 919 920 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) 921 { 922 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; 923 adev->gfx.ras = &gfx_v9_4_3_ras; 924 925 adev->gfx.config.max_hw_contexts = 8; 926 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 927 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 928 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 929 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 930 adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG; 931 932 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 933 REG_GET_FIELD( 934 adev->gfx.config.gb_addr_config, 935 GB_ADDR_CONFIG, 936 NUM_PIPES); 937 938 adev->gfx.config.max_tile_pipes = 939 adev->gfx.config.gb_addr_config_fields.num_pipes; 940 941 adev->gfx.config.gb_addr_config_fields.num_banks = 1 << 942 REG_GET_FIELD( 943 adev->gfx.config.gb_addr_config, 944 GB_ADDR_CONFIG, 945 NUM_BANKS); 946 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 947 REG_GET_FIELD( 948 adev->gfx.config.gb_addr_config, 949 GB_ADDR_CONFIG, 950 MAX_COMPRESSED_FRAGS); 951 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 952 REG_GET_FIELD( 953 adev->gfx.config.gb_addr_config, 954 GB_ADDR_CONFIG, 955 NUM_RB_PER_SE); 956 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 957 REG_GET_FIELD( 958 adev->gfx.config.gb_addr_config, 959 GB_ADDR_CONFIG, 960 NUM_SHADER_ENGINES); 961 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 962 REG_GET_FIELD( 963 adev->gfx.config.gb_addr_config, 964 GB_ADDR_CONFIG, 965 PIPE_INTERLEAVE_SIZE)); 966 967 return 0; 968 } 969 970 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, 971 int xcc_id, int mec, int pipe, int queue) 972 { 973 unsigned irq_type; 974 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 975 unsigned int hw_prio; 976 uint32_t xcc_doorbell_start; 977 978 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + 979 ring_id]; 980 981 /* mec0 is me1 */ 982 ring->xcc_id = xcc_id; 983 ring->me = mec + 1; 984 ring->pipe = pipe; 985 ring->queue = queue; 986 987 ring->ring_obj = NULL; 988 ring->use_doorbell = true; 989 xcc_doorbell_start = adev->doorbell_index.mec_ring0 + 990 xcc_id * adev->doorbell_index.xcc_doorbell_range; 991 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; 992 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + 993 (ring_id + xcc_id * adev->gfx.num_compute_rings) * 994 GFX9_MEC_HPD_SIZE; 995 ring->vm_hub = AMDGPU_GFXHUB(xcc_id); 996 sprintf(ring->name, "comp_%d.%d.%d.%d", 997 ring->xcc_id, ring->me, ring->pipe, ring->queue); 998 999 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1000 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1001 + ring->pipe; 1002 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1003 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1004 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1005 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1006 hw_prio, NULL); 1007 } 1008 1009 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev) 1010 { 1011 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 1012 uint32_t *ptr, num_xcc, inst; 1013 1014 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1015 1016 ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL); 1017 if (!ptr) { 1018 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1019 adev->gfx.ip_dump_core = NULL; 1020 } else { 1021 adev->gfx.ip_dump_core = ptr; 1022 } 1023 1024 /* Allocate memory for compute queue registers for all the instances */ 1025 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 1026 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1027 adev->gfx.mec.num_queue_per_pipe; 1028 1029 ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL); 1030 if (!ptr) { 1031 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1032 adev->gfx.ip_dump_compute_queues = NULL; 1033 } else { 1034 adev->gfx.ip_dump_compute_queues = ptr; 1035 } 1036 } 1037 1038 static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) 1039 { 1040 int i, j, k, r, ring_id, xcc_id, num_xcc; 1041 struct amdgpu_device *adev = ip_block->adev; 1042 1043 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1044 case IP_VERSION(9, 4, 3): 1045 case IP_VERSION(9, 4, 4): 1046 adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex; 1047 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex); 1048 if (adev->gfx.mec_fw_version >= 153) { 1049 adev->gfx.enable_cleaner_shader = true; 1050 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1051 if (r) { 1052 adev->gfx.enable_cleaner_shader = false; 1053 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1054 } 1055 } 1056 break; 1057 default: 1058 adev->gfx.enable_cleaner_shader = false; 1059 break; 1060 } 1061 1062 adev->gfx.mec.num_mec = 2; 1063 adev->gfx.mec.num_pipe_per_mec = 4; 1064 adev->gfx.mec.num_queue_per_pipe = 8; 1065 1066 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1067 1068 /* EOP Event */ 1069 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); 1070 if (r) 1071 return r; 1072 1073 /* Bad opcode Event */ 1074 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 1075 GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR, 1076 &adev->gfx.bad_op_irq); 1077 if (r) 1078 return r; 1079 1080 /* Privileged reg */ 1081 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, 1082 &adev->gfx.priv_reg_irq); 1083 if (r) 1084 return r; 1085 1086 /* Privileged inst */ 1087 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, 1088 &adev->gfx.priv_inst_irq); 1089 if (r) 1090 return r; 1091 1092 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1093 1094 r = adev->gfx.rlc.funcs->init(adev); 1095 if (r) { 1096 DRM_ERROR("Failed to init rlc BOs!\n"); 1097 return r; 1098 } 1099 1100 r = gfx_v9_4_3_mec_init(adev); 1101 if (r) { 1102 DRM_ERROR("Failed to init MEC BOs!\n"); 1103 return r; 1104 } 1105 1106 /* set up the compute queues - allocate horizontally across pipes */ 1107 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 1108 ring_id = 0; 1109 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1110 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1111 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; 1112 k++) { 1113 if (!amdgpu_gfx_is_mec_queue_enabled( 1114 adev, xcc_id, i, k, j)) 1115 continue; 1116 1117 r = gfx_v9_4_3_compute_ring_init(adev, 1118 ring_id, 1119 xcc_id, 1120 i, k, j); 1121 if (r) 1122 return r; 1123 1124 ring_id++; 1125 } 1126 } 1127 } 1128 1129 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id); 1130 if (r) { 1131 DRM_ERROR("Failed to init KIQ BOs!\n"); 1132 return r; 1133 } 1134 1135 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1136 if (r) 1137 return r; 1138 1139 /* create MQD for all compute queues as wel as KIQ for SRIOV case */ 1140 r = amdgpu_gfx_mqd_sw_init(adev, 1141 sizeof(struct v9_mqd_allocation), xcc_id); 1142 if (r) 1143 return r; 1144 } 1145 1146 adev->gfx.compute_supported_reset = 1147 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 1148 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1149 case IP_VERSION(9, 4, 3): 1150 case IP_VERSION(9, 4, 4): 1151 if ((adev->gfx.mec_fw_version >= 155) && 1152 !amdgpu_sriov_vf(adev) && 1153 !adev->debug_disable_gpu_ring_reset) { 1154 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1155 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; 1156 } 1157 break; 1158 case IP_VERSION(9, 5, 0): 1159 if ((adev->gfx.mec_fw_version >= 21) && 1160 !amdgpu_sriov_vf(adev) && 1161 !adev->debug_disable_gpu_ring_reset) { 1162 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1163 adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; 1164 } 1165 break; 1166 default: 1167 break; 1168 } 1169 r = gfx_v9_4_3_gpu_early_init(adev); 1170 if (r) 1171 return r; 1172 1173 r = amdgpu_gfx_ras_sw_init(adev); 1174 if (r) 1175 return r; 1176 1177 r = amdgpu_gfx_sysfs_init(adev); 1178 if (r) 1179 return r; 1180 1181 gfx_v9_4_3_alloc_ip_dump(adev); 1182 1183 return 0; 1184 } 1185 1186 static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block) 1187 { 1188 int i, num_xcc; 1189 struct amdgpu_device *adev = ip_block->adev; 1190 1191 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1192 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) 1193 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1194 1195 for (i = 0; i < num_xcc; i++) { 1196 amdgpu_gfx_mqd_sw_fini(adev, i); 1197 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); 1198 amdgpu_gfx_kiq_fini(adev, i); 1199 } 1200 1201 amdgpu_gfx_cleaner_shader_sw_fini(adev); 1202 1203 gfx_v9_4_3_mec_fini(adev); 1204 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); 1205 gfx_v9_4_3_free_microcode(adev); 1206 amdgpu_gfx_sysfs_fini(adev); 1207 1208 kfree(adev->gfx.ip_dump_core); 1209 kfree(adev->gfx.ip_dump_compute_queues); 1210 1211 return 0; 1212 } 1213 1214 #define DEFAULT_SH_MEM_BASES (0x6000) 1215 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, 1216 int xcc_id) 1217 { 1218 int i; 1219 uint32_t sh_mem_config; 1220 uint32_t sh_mem_bases; 1221 uint32_t data; 1222 1223 /* 1224 * Configure apertures: 1225 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1226 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1227 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1228 */ 1229 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1230 1231 sh_mem_config = SH_MEM_ADDRESS_MODE_64 | 1232 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1233 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1234 1235 mutex_lock(&adev->srbm_mutex); 1236 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1237 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1238 /* CP and shaders */ 1239 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); 1240 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); 1241 1242 /* Enable trap for each kfd vmid. */ 1243 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); 1244 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1245 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); 1246 } 1247 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 1248 mutex_unlock(&adev->srbm_mutex); 1249 1250 /* 1251 * Initialize all compute VMIDs to have no GDS, GWS, or OA 1252 * access. These should be enabled by FW for target VMIDs. 1253 */ 1254 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1255 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); 1256 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); 1257 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0); 1258 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0); 1259 } 1260 } 1261 1262 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) 1263 { 1264 int vmid; 1265 1266 /* 1267 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1268 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1269 * the driver can enable them for graphics. VMID0 should maintain 1270 * access so that HWS firmware can save/restore entries. 1271 */ 1272 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { 1273 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0); 1274 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0); 1275 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0); 1276 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0); 1277 } 1278 } 1279 1280 /* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1 1281 * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain 1282 * bit in SET_RESOURCES 1283 */ 1284 static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id) 1285 { 1286 uint32_t data; 1287 1288 if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)) 1289 return; 1290 1291 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1); 1292 data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1); 1293 WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data); 1294 } 1295 1296 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, 1297 int xcc_id) 1298 { 1299 u32 tmp; 1300 int i; 1301 1302 /* XXX SH_MEM regs */ 1303 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1304 mutex_lock(&adev->srbm_mutex); 1305 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1306 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1307 /* CP and shaders */ 1308 if (i == 0) { 1309 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1310 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1311 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, 1312 !!adev->gmc.noretry); 1313 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1314 regSH_MEM_CONFIG, tmp); 1315 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1316 regSH_MEM_BASES, 0); 1317 } else { 1318 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1319 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1320 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, 1321 !!adev->gmc.noretry); 1322 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1323 regSH_MEM_CONFIG, tmp); 1324 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1325 (adev->gmc.private_aperture_start >> 1326 48)); 1327 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1328 (adev->gmc.shared_aperture_start >> 1329 48)); 1330 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1331 regSH_MEM_BASES, tmp); 1332 } 1333 } 1334 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); 1335 1336 mutex_unlock(&adev->srbm_mutex); 1337 1338 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); 1339 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); 1340 gfx_v9_4_3_xcc_init_sq(adev, xcc_id); 1341 } 1342 1343 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) 1344 { 1345 int i, num_xcc; 1346 1347 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1348 1349 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); 1350 adev->gfx.config.db_debug2 = 1351 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); 1352 1353 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1354 /* ToDo: GC 9.4.4 */ 1355 case IP_VERSION(9, 4, 3): 1356 if (adev->gfx.mec_fw_version >= 184 && 1357 (amdgpu_sriov_reg_access_sq_config(adev) || 1358 !amdgpu_sriov_vf(adev))) 1359 adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; 1360 break; 1361 case IP_VERSION(9, 5, 0): 1362 if (adev->gfx.mec_fw_version >= 23) 1363 adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; 1364 break; 1365 default: 1366 break; 1367 } 1368 1369 for (i = 0; i < num_xcc; i++) 1370 gfx_v9_4_3_xcc_constants_init(adev, i); 1371 } 1372 1373 static void 1374 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev, 1375 int xcc_id) 1376 { 1377 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); 1378 } 1379 1380 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id) 1381 { 1382 /* 1383 * Rlc save restore list is workable since v2_1. 1384 */ 1385 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id); 1386 } 1387 1388 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) 1389 { 1390 uint32_t data; 1391 1392 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG); 1393 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; 1394 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); 1395 } 1396 1397 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) 1398 { 1399 uint32_t rlc_setting; 1400 1401 /* if RLC is not enabled, do nothing */ 1402 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL); 1403 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 1404 return false; 1405 1406 return true; 1407 } 1408 1409 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 1410 { 1411 uint32_t data; 1412 unsigned i; 1413 1414 data = RLC_SAFE_MODE__CMD_MASK; 1415 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 1416 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); 1417 1418 /* wait for RLC_SAFE_MODE */ 1419 for (i = 0; i < adev->usec_timeout; i++) { 1420 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 1421 break; 1422 udelay(1); 1423 } 1424 } 1425 1426 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, 1427 int xcc_id) 1428 { 1429 uint32_t data; 1430 1431 data = RLC_SAFE_MODE__CMD_MASK; 1432 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); 1433 } 1434 1435 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 1436 { 1437 int xcc_id, num_xcc; 1438 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 1439 1440 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1441 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 1442 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; 1443 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); 1444 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); 1445 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); 1446 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); 1447 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); 1448 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); 1449 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); 1450 } 1451 adev->gfx.rlc.rlcg_reg_access_supported = true; 1452 } 1453 1454 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) 1455 { 1456 /* init spm vmid with 0xf */ 1457 if (adev->gfx.rlc.funcs->update_spm_vmid) 1458 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 1459 1460 return 0; 1461 } 1462 1463 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, 1464 int xcc_id) 1465 { 1466 u32 i, j, k; 1467 u32 mask; 1468 1469 mutex_lock(&adev->grbm_idx_mutex); 1470 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1471 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1472 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 1473 xcc_id); 1474 for (k = 0; k < adev->usec_timeout; k++) { 1475 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0) 1476 break; 1477 udelay(1); 1478 } 1479 if (k == adev->usec_timeout) { 1480 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 1481 0xffffffff, 1482 0xffffffff, xcc_id); 1483 mutex_unlock(&adev->grbm_idx_mutex); 1484 DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 1485 i, j); 1486 return; 1487 } 1488 } 1489 } 1490 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 1491 xcc_id); 1492 mutex_unlock(&adev->grbm_idx_mutex); 1493 1494 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 1495 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 1496 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 1497 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 1498 for (k = 0; k < adev->usec_timeout; k++) { 1499 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 1500 break; 1501 udelay(1); 1502 } 1503 } 1504 1505 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1506 bool enable, int xcc_id) 1507 { 1508 u32 tmp; 1509 1510 /* These interrupts should be enabled to drive DS clock */ 1511 1512 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0); 1513 1514 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 1515 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); 1516 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); 1517 1518 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); 1519 } 1520 1521 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id) 1522 { 1523 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, 1524 RLC_ENABLE_F32, 0); 1525 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 1526 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id); 1527 } 1528 1529 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) 1530 { 1531 int i, num_xcc; 1532 1533 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1534 for (i = 0; i < num_xcc; i++) 1535 gfx_v9_4_3_xcc_rlc_stop(adev, i); 1536 } 1537 1538 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id) 1539 { 1540 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, 1541 SOFT_RESET_RLC, 1); 1542 udelay(50); 1543 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, 1544 SOFT_RESET_RLC, 0); 1545 udelay(50); 1546 } 1547 1548 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) 1549 { 1550 int i, num_xcc; 1551 1552 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1553 for (i = 0; i < num_xcc; i++) 1554 gfx_v9_4_3_xcc_rlc_reset(adev, i); 1555 } 1556 1557 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id) 1558 { 1559 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, 1560 RLC_ENABLE_F32, 1); 1561 udelay(50); 1562 1563 /* carrizo do enable cp interrupt after cp inited */ 1564 if (!(adev->flags & AMD_IS_APU)) { 1565 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); 1566 udelay(50); 1567 } 1568 } 1569 1570 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) 1571 { 1572 #ifdef AMDGPU_RLC_DEBUG_RETRY 1573 u32 rlc_ucode_ver; 1574 #endif 1575 int i, num_xcc; 1576 1577 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1578 for (i = 0; i < num_xcc; i++) { 1579 gfx_v9_4_3_xcc_rlc_start(adev, i); 1580 #ifdef AMDGPU_RLC_DEBUG_RETRY 1581 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 1582 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); 1583 if (rlc_ucode_ver == 0x108) { 1584 dev_info(adev->dev, 1585 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 1586 rlc_ucode_ver, adev->gfx.rlc_fw_version); 1587 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 1588 * default is 0x9C4 to create a 100us interval */ 1589 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4); 1590 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr 1591 * to disable the page fault retry interrupts, default is 1592 * 0x100 (256) */ 1593 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100); 1594 } 1595 #endif 1596 } 1597 } 1598 1599 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev, 1600 int xcc_id) 1601 { 1602 const struct rlc_firmware_header_v2_0 *hdr; 1603 const __le32 *fw_data; 1604 unsigned i, fw_size; 1605 1606 if (!adev->gfx.rlc_fw) 1607 return -EINVAL; 1608 1609 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1610 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1611 1612 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1613 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1614 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1615 1616 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, 1617 RLCG_UCODE_LOADING_START_ADDRESS); 1618 for (i = 0; i < fw_size; i++) { 1619 if (amdgpu_emu_mode == 1 && i % 100 == 0) { 1620 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i); 1621 msleep(1); 1622 } 1623 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 1624 } 1625 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1626 1627 return 0; 1628 } 1629 1630 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id) 1631 { 1632 int r; 1633 1634 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1635 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); 1636 /* legacy rlc firmware loading */ 1637 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id); 1638 if (r) 1639 return r; 1640 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); 1641 } 1642 1643 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 1644 /* disable CG */ 1645 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); 1646 gfx_v9_4_3_xcc_init_pg(adev, xcc_id); 1647 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 1648 1649 return 0; 1650 } 1651 1652 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) 1653 { 1654 int r, i, num_xcc; 1655 1656 if (amdgpu_sriov_vf(adev)) 1657 return 0; 1658 1659 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1660 for (i = 0; i < num_xcc; i++) { 1661 r = gfx_v9_4_3_xcc_rlc_resume(adev, i); 1662 if (r) 1663 return r; 1664 } 1665 1666 return 0; 1667 } 1668 1669 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1670 unsigned vmid) 1671 { 1672 u32 reg, pre_data, data; 1673 1674 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL); 1675 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) 1676 pre_data = RREG32_NO_KIQ(reg); 1677 else 1678 pre_data = RREG32(reg); 1679 1680 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK); 1681 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 1682 1683 if (pre_data != data) { 1684 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) { 1685 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); 1686 } else 1687 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); 1688 } 1689 } 1690 1691 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = { 1692 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)}, 1693 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)}, 1694 }; 1695 1696 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, 1697 uint32_t offset, 1698 struct soc15_reg_rlcg *entries, int arr_size) 1699 { 1700 int i, inst; 1701 uint32_t reg; 1702 1703 if (!entries) 1704 return false; 1705 1706 for (i = 0; i < arr_size; i++) { 1707 const struct soc15_reg_rlcg *entry; 1708 1709 entry = &entries[i]; 1710 inst = adev->ip_map.logical_to_dev_inst ? 1711 adev->ip_map.logical_to_dev_inst( 1712 adev, entry->hwip, entry->instance) : 1713 entry->instance; 1714 reg = adev->reg_offset[entry->hwip][inst][entry->segment] + 1715 entry->reg; 1716 if (offset == reg) 1717 return true; 1718 } 1719 1720 return false; 1721 } 1722 1723 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) 1724 { 1725 return gfx_v9_4_3_check_rlcg_range(adev, offset, 1726 (void *)rlcg_access_gc_9_4_3, 1727 ARRAY_SIZE(rlcg_access_gc_9_4_3)); 1728 } 1729 1730 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, 1731 bool enable, int xcc_id) 1732 { 1733 if (enable) { 1734 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); 1735 } else { 1736 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 1737 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK | 1738 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK | 1739 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK | 1740 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK | 1741 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK | 1742 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK | 1743 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK | 1744 CP_MEC_CNTL__MEC_ME1_HALT_MASK | 1745 CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 1746 adev->gfx.kiq[xcc_id].ring.sched.ready = false; 1747 } 1748 udelay(50); 1749 } 1750 1751 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev, 1752 int xcc_id) 1753 { 1754 const struct gfx_firmware_header_v1_0 *mec_hdr; 1755 const __le32 *fw_data; 1756 unsigned i; 1757 u32 tmp; 1758 u32 mec_ucode_addr_offset; 1759 u32 mec_ucode_data_offset; 1760 1761 if (!adev->gfx.mec_fw) 1762 return -EINVAL; 1763 1764 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); 1765 1766 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 1767 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 1768 1769 fw_data = (const __le32 *) 1770 (adev->gfx.mec_fw->data + 1771 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 1772 tmp = 0; 1773 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 1774 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 1775 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp); 1776 1777 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO, 1778 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); 1779 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI, 1780 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 1781 1782 mec_ucode_addr_offset = 1783 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR); 1784 mec_ucode_data_offset = 1785 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA); 1786 1787 /* MEC1 */ 1788 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); 1789 for (i = 0; i < mec_hdr->jt_size; i++) 1790 WREG32(mec_ucode_data_offset, 1791 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 1792 1793 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version); 1794 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ 1795 1796 return 0; 1797 } 1798 1799 /* KIQ functions */ 1800 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id) 1801 { 1802 uint32_t tmp; 1803 struct amdgpu_device *adev = ring->adev; 1804 1805 /* tell RLC which is KIQ queue */ 1806 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); 1807 tmp &= 0xffffff00; 1808 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 1809 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80); 1810 } 1811 1812 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) 1813 { 1814 struct amdgpu_device *adev = ring->adev; 1815 1816 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 1817 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { 1818 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; 1819 mqd->cp_hqd_queue_priority = 1820 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; 1821 } 1822 } 1823 } 1824 1825 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id) 1826 { 1827 struct amdgpu_device *adev = ring->adev; 1828 struct v9_mqd *mqd = ring->mqd_ptr; 1829 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 1830 uint32_t tmp; 1831 1832 mqd->header = 0xC0310800; 1833 mqd->compute_pipelinestat_enable = 0x00000001; 1834 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 1835 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 1836 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 1837 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 1838 mqd->compute_misc_reserved = 0x00000003; 1839 1840 mqd->dynamic_cu_mask_addr_lo = 1841 lower_32_bits(ring->mqd_gpu_addr 1842 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 1843 mqd->dynamic_cu_mask_addr_hi = 1844 upper_32_bits(ring->mqd_gpu_addr 1845 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 1846 1847 eop_base_addr = ring->eop_gpu_addr >> 8; 1848 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 1849 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 1850 1851 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 1852 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL); 1853 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 1854 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); 1855 1856 mqd->cp_hqd_eop_control = tmp; 1857 1858 /* enable doorbell? */ 1859 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL); 1860 1861 if (ring->use_doorbell) { 1862 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1863 DOORBELL_OFFSET, ring->doorbell_index); 1864 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1865 DOORBELL_EN, 1); 1866 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1867 DOORBELL_SOURCE, 0); 1868 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1869 DOORBELL_HIT, 0); 1870 if (amdgpu_sriov_multi_vf_mode(adev)) 1871 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1872 DOORBELL_MODE, 1); 1873 } else { 1874 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1875 DOORBELL_EN, 0); 1876 } 1877 1878 mqd->cp_hqd_pq_doorbell_control = tmp; 1879 1880 /* disable the queue if it's active */ 1881 ring->wptr = 0; 1882 mqd->cp_hqd_dequeue_request = 0; 1883 mqd->cp_hqd_pq_rptr = 0; 1884 mqd->cp_hqd_pq_wptr_lo = 0; 1885 mqd->cp_hqd_pq_wptr_hi = 0; 1886 1887 /* set the pointer to the MQD */ 1888 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 1889 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 1890 1891 /* set MQD vmid to 0 */ 1892 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL); 1893 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 1894 mqd->cp_mqd_control = tmp; 1895 1896 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 1897 hqd_gpu_addr = ring->gpu_addr >> 8; 1898 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 1899 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 1900 1901 /* set up the HQD, this is similar to CP_RB0_CNTL */ 1902 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL); 1903 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 1904 (order_base_2(ring->ring_size / 4) - 1)); 1905 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 1906 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 1907 #ifdef __BIG_ENDIAN 1908 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 1909 #endif 1910 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 1911 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); 1912 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 1913 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 1914 mqd->cp_hqd_pq_control = tmp; 1915 1916 /* set the wb address whether it's enabled or not */ 1917 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 1918 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 1919 mqd->cp_hqd_pq_rptr_report_addr_hi = 1920 upper_32_bits(wb_gpu_addr) & 0xffff; 1921 1922 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 1923 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 1924 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 1925 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 1926 1927 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 1928 ring->wptr = 0; 1929 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR); 1930 1931 /* set the vmid for the queue */ 1932 mqd->cp_hqd_vmid = 0; 1933 1934 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE); 1935 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 1936 mqd->cp_hqd_persistent_state = tmp; 1937 1938 /* set MIN_IB_AVAIL_SIZE */ 1939 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL); 1940 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 1941 mqd->cp_hqd_ib_control = tmp; 1942 1943 /* set static priority for a queue/ring */ 1944 gfx_v9_4_3_mqd_set_priority(ring, mqd); 1945 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM); 1946 1947 /* map_queues packet doesn't need activate the queue, 1948 * so only kiq need set this field. 1949 */ 1950 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 1951 mqd->cp_hqd_active = 1; 1952 1953 return 0; 1954 } 1955 1956 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, 1957 int xcc_id) 1958 { 1959 struct amdgpu_device *adev = ring->adev; 1960 struct v9_mqd *mqd = ring->mqd_ptr; 1961 int j; 1962 1963 /* disable wptr polling */ 1964 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); 1965 1966 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, 1967 mqd->cp_hqd_eop_base_addr_lo); 1968 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI, 1969 mqd->cp_hqd_eop_base_addr_hi); 1970 1971 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 1972 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL, 1973 mqd->cp_hqd_eop_control); 1974 1975 /* enable doorbell? */ 1976 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 1977 mqd->cp_hqd_pq_doorbell_control); 1978 1979 /* disable the queue if it's active */ 1980 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { 1981 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); 1982 for (j = 0; j < adev->usec_timeout; j++) { 1983 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 1984 break; 1985 udelay(1); 1986 } 1987 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1988 mqd->cp_hqd_dequeue_request); 1989 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 1990 mqd->cp_hqd_pq_rptr); 1991 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 1992 mqd->cp_hqd_pq_wptr_lo); 1993 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 1994 mqd->cp_hqd_pq_wptr_hi); 1995 } 1996 1997 /* set the pointer to the MQD */ 1998 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR, 1999 mqd->cp_mqd_base_addr_lo); 2000 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI, 2001 mqd->cp_mqd_base_addr_hi); 2002 2003 /* set MQD vmid to 0 */ 2004 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL, 2005 mqd->cp_mqd_control); 2006 2007 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2008 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE, 2009 mqd->cp_hqd_pq_base_lo); 2010 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI, 2011 mqd->cp_hqd_pq_base_hi); 2012 2013 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2014 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL, 2015 mqd->cp_hqd_pq_control); 2016 2017 /* set the wb address whether it's enabled or not */ 2018 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR, 2019 mqd->cp_hqd_pq_rptr_report_addr_lo); 2020 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 2021 mqd->cp_hqd_pq_rptr_report_addr_hi); 2022 2023 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2024 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR, 2025 mqd->cp_hqd_pq_wptr_poll_addr_lo); 2026 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 2027 mqd->cp_hqd_pq_wptr_poll_addr_hi); 2028 2029 /* enable the doorbell if requested */ 2030 if (ring->use_doorbell) { 2031 WREG32_SOC15( 2032 GC, GET_INST(GC, xcc_id), 2033 regCP_MEC_DOORBELL_RANGE_LOWER, 2034 ((adev->doorbell_index.kiq + 2035 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 2036 2) << 2); 2037 WREG32_SOC15( 2038 GC, GET_INST(GC, xcc_id), 2039 regCP_MEC_DOORBELL_RANGE_UPPER, 2040 ((adev->doorbell_index.userqueue_end + 2041 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 2042 2) << 2); 2043 } 2044 2045 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 2046 mqd->cp_hqd_pq_doorbell_control); 2047 2048 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2049 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 2050 mqd->cp_hqd_pq_wptr_lo); 2051 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 2052 mqd->cp_hqd_pq_wptr_hi); 2053 2054 /* set the vmid for the queue */ 2055 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid); 2056 2057 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 2058 mqd->cp_hqd_persistent_state); 2059 2060 /* activate the queue */ 2061 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 2062 mqd->cp_hqd_active); 2063 2064 if (ring->use_doorbell) 2065 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1); 2066 2067 return 0; 2068 } 2069 2070 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, 2071 int xcc_id) 2072 { 2073 struct amdgpu_device *adev = ring->adev; 2074 int j; 2075 2076 /* disable the queue if it's active */ 2077 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { 2078 2079 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); 2080 2081 for (j = 0; j < adev->usec_timeout; j++) { 2082 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 2083 break; 2084 udelay(1); 2085 } 2086 2087 if (j == AMDGPU_MAX_USEC_TIMEOUT) { 2088 DRM_DEBUG("%s dequeue request failed.\n", ring->name); 2089 2090 /* Manual disable if dequeue request times out */ 2091 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); 2092 } 2093 2094 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 2095 0); 2096 } 2097 2098 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); 2099 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); 2100 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT); 2101 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); 2102 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); 2103 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); 2104 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0); 2105 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0); 2106 2107 return 0; 2108 } 2109 2110 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) 2111 { 2112 struct amdgpu_device *adev = ring->adev; 2113 struct v9_mqd *mqd = ring->mqd_ptr; 2114 struct v9_mqd *tmp_mqd; 2115 2116 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id); 2117 2118 /* GPU could be in bad state during probe, driver trigger the reset 2119 * after load the SMU, in this case , the mqd is not be initialized. 2120 * driver need to re-init the mqd. 2121 * check mqd->cp_hqd_pq_control since this value should not be 0 2122 */ 2123 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup; 2124 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) { 2125 /* for GPU_RESET case , reset MQD to a clean status */ 2126 if (adev->gfx.kiq[xcc_id].mqd_backup) 2127 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation)); 2128 2129 /* reset ring buffer */ 2130 ring->wptr = 0; 2131 amdgpu_ring_clear_ring(ring); 2132 mutex_lock(&adev->srbm_mutex); 2133 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2134 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); 2135 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2136 mutex_unlock(&adev->srbm_mutex); 2137 } else { 2138 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2139 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2140 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2141 mutex_lock(&adev->srbm_mutex); 2142 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 2143 amdgpu_ring_clear_ring(ring); 2144 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2145 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); 2146 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); 2147 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2148 mutex_unlock(&adev->srbm_mutex); 2149 2150 if (adev->gfx.kiq[xcc_id].mqd_backup) 2151 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); 2152 } 2153 2154 return 0; 2155 } 2156 2157 static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, 2158 bool restore) 2159 { 2160 struct amdgpu_device *adev = ring->adev; 2161 struct v9_mqd *mqd = ring->mqd_ptr; 2162 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 2163 struct v9_mqd *tmp_mqd; 2164 2165 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control 2166 * is not be initialized before 2167 */ 2168 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; 2169 2170 if (!restore && (!tmp_mqd->cp_hqd_pq_control || 2171 (!amdgpu_in_reset(adev) && !adev->in_suspend))) { 2172 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2173 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2174 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2175 mutex_lock(&adev->srbm_mutex); 2176 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2177 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); 2178 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2179 mutex_unlock(&adev->srbm_mutex); 2180 2181 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2182 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 2183 } else { 2184 /* restore MQD to a clean status */ 2185 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2186 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 2187 /* reset ring buffer */ 2188 ring->wptr = 0; 2189 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); 2190 amdgpu_ring_clear_ring(ring); 2191 } 2192 } 2193 2194 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) 2195 { 2196 struct amdgpu_ring *ring; 2197 int j; 2198 2199 for (j = 0; j < adev->gfx.num_compute_rings; j++) { 2200 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings]; 2201 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2202 mutex_lock(&adev->srbm_mutex); 2203 soc15_grbm_select(adev, ring->me, 2204 ring->pipe, 2205 ring->queue, 0, GET_INST(GC, xcc_id)); 2206 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id); 2207 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2208 mutex_unlock(&adev->srbm_mutex); 2209 } 2210 } 2211 2212 return 0; 2213 } 2214 2215 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) 2216 { 2217 gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id); 2218 return 0; 2219 } 2220 2221 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) 2222 { 2223 struct amdgpu_ring *ring; 2224 int i; 2225 2226 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); 2227 2228 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2229 ring = &adev->gfx.compute_ring[i + xcc_id * 2230 adev->gfx.num_compute_rings]; 2231 2232 gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); 2233 } 2234 2235 return amdgpu_gfx_enable_kcq(adev, xcc_id); 2236 } 2237 2238 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) 2239 { 2240 struct amdgpu_ring *ring; 2241 int r, j; 2242 2243 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 2244 2245 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2246 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id); 2247 2248 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); 2249 if (r) 2250 return r; 2251 } else { 2252 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); 2253 } 2254 2255 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); 2256 if (r) 2257 return r; 2258 2259 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id); 2260 if (r) 2261 return r; 2262 2263 for (j = 0; j < adev->gfx.num_compute_rings; j++) { 2264 ring = &adev->gfx.compute_ring 2265 [j + xcc_id * adev->gfx.num_compute_rings]; 2266 r = amdgpu_ring_test_helper(ring); 2267 if (r) 2268 return r; 2269 } 2270 2271 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); 2272 2273 return 0; 2274 } 2275 2276 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) 2277 { 2278 int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp; 2279 2280 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2281 if (amdgpu_sriov_vf(adev)) { 2282 enum amdgpu_gfx_partition mode; 2283 2284 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2285 AMDGPU_XCP_FL_NONE); 2286 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2287 return -EINVAL; 2288 num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev); 2289 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp; 2290 num_xcp = num_xcc / num_xcc_per_xcp; 2291 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); 2292 2293 } else { 2294 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2295 AMDGPU_XCP_FL_NONE) == 2296 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2297 r = amdgpu_xcp_switch_partition_mode( 2298 adev->xcp_mgr, amdgpu_user_partt_mode); 2299 } 2300 if (r) 2301 return r; 2302 2303 for (i = 0; i < num_xcc; i++) { 2304 r = gfx_v9_4_3_xcc_cp_resume(adev, i); 2305 if (r) 2306 return r; 2307 } 2308 2309 return 0; 2310 } 2311 2312 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) 2313 { 2314 if (amdgpu_gfx_disable_kcq(adev, xcc_id)) 2315 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id); 2316 2317 if (amdgpu_sriov_vf(adev)) { 2318 /* must disable polling for SRIOV when hw finished, otherwise 2319 * CPC engine may still keep fetching WB address which is already 2320 * invalid after sw finished and trigger DMAR reading error in 2321 * hypervisor side. 2322 */ 2323 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); 2324 return; 2325 } 2326 2327 /* Use deinitialize sequence from CAIL when unbinding device 2328 * from driver, otherwise KIQ is hanging when binding back 2329 */ 2330 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2331 mutex_lock(&adev->srbm_mutex); 2332 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me, 2333 adev->gfx.kiq[xcc_id].ring.pipe, 2334 adev->gfx.kiq[xcc_id].ring.queue, 0, 2335 GET_INST(GC, xcc_id)); 2336 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring, 2337 xcc_id); 2338 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2339 mutex_unlock(&adev->srbm_mutex); 2340 } 2341 2342 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); 2343 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); 2344 } 2345 2346 static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block) 2347 { 2348 int r; 2349 struct amdgpu_device *adev = ip_block->adev; 2350 2351 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, 2352 adev->gfx.cleaner_shader_ptr); 2353 2354 if (!amdgpu_sriov_vf(adev)) 2355 gfx_v9_4_3_init_golden_registers(adev); 2356 2357 gfx_v9_4_3_constants_init(adev); 2358 2359 r = adev->gfx.rlc.funcs->resume(adev); 2360 if (r) 2361 return r; 2362 2363 r = gfx_v9_4_3_cp_resume(adev); 2364 if (r) 2365 return r; 2366 2367 return r; 2368 } 2369 2370 static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block) 2371 { 2372 struct amdgpu_device *adev = ip_block->adev; 2373 int i, num_xcc; 2374 2375 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 2376 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 2377 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 2378 2379 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2380 for (i = 0; i < num_xcc; i++) { 2381 gfx_v9_4_3_xcc_fini(adev, i); 2382 } 2383 2384 return 0; 2385 } 2386 2387 static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block) 2388 { 2389 return gfx_v9_4_3_hw_fini(ip_block); 2390 } 2391 2392 static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block) 2393 { 2394 return gfx_v9_4_3_hw_init(ip_block); 2395 } 2396 2397 static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block) 2398 { 2399 struct amdgpu_device *adev = ip_block->adev; 2400 int i, num_xcc; 2401 2402 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2403 for (i = 0; i < num_xcc; i++) { 2404 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS), 2405 GRBM_STATUS, GUI_ACTIVE)) 2406 return false; 2407 } 2408 return true; 2409 } 2410 2411 static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block) 2412 { 2413 unsigned i; 2414 struct amdgpu_device *adev = ip_block->adev; 2415 2416 for (i = 0; i < adev->usec_timeout; i++) { 2417 if (gfx_v9_4_3_is_idle(ip_block)) 2418 return 0; 2419 udelay(1); 2420 } 2421 return -ETIMEDOUT; 2422 } 2423 2424 static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block) 2425 { 2426 u32 grbm_soft_reset = 0; 2427 u32 tmp; 2428 struct amdgpu_device *adev = ip_block->adev; 2429 2430 /* GRBM_STATUS */ 2431 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); 2432 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 2433 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 2434 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 2435 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 2436 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 2437 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { 2438 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2439 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 2440 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2441 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 2442 } 2443 2444 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 2445 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2446 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 2447 } 2448 2449 /* GRBM_STATUS2 */ 2450 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2); 2451 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 2452 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2453 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2454 2455 2456 if (grbm_soft_reset) { 2457 /* stop the rlc */ 2458 adev->gfx.rlc.funcs->stop(adev); 2459 2460 /* Disable MEC parsing/prefetching */ 2461 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0); 2462 2463 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2464 tmp |= grbm_soft_reset; 2465 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 2466 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); 2467 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2468 2469 udelay(50); 2470 2471 tmp &= ~grbm_soft_reset; 2472 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); 2473 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2474 2475 /* Wait a little for things to settle down */ 2476 udelay(50); 2477 } 2478 return 0; 2479 } 2480 2481 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, 2482 uint32_t vmid, 2483 uint32_t gds_base, uint32_t gds_size, 2484 uint32_t gws_base, uint32_t gws_size, 2485 uint32_t oa_base, uint32_t oa_size) 2486 { 2487 struct amdgpu_device *adev = ring->adev; 2488 2489 /* GDS Base */ 2490 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2491 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid, 2492 gds_base); 2493 2494 /* GDS Size */ 2495 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2496 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid, 2497 gds_size); 2498 2499 /* GWS */ 2500 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2501 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid, 2502 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 2503 2504 /* OA */ 2505 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2506 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid, 2507 (1 << (oa_size + oa_base)) - (1 << oa_base)); 2508 } 2509 2510 static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block) 2511 { 2512 struct amdgpu_device *adev = ip_block->adev; 2513 2514 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 2515 AMDGPU_MAX_COMPUTE_RINGS); 2516 gfx_v9_4_3_set_kiq_pm4_funcs(adev); 2517 gfx_v9_4_3_set_ring_funcs(adev); 2518 gfx_v9_4_3_set_irq_funcs(adev); 2519 gfx_v9_4_3_set_gds_init(adev); 2520 gfx_v9_4_3_set_rlc_funcs(adev); 2521 2522 /* init rlcg reg access ctrl */ 2523 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev); 2524 2525 return gfx_v9_4_3_init_microcode(adev); 2526 } 2527 2528 static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block) 2529 { 2530 struct amdgpu_device *adev = ip_block->adev; 2531 int r; 2532 2533 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 2534 if (r) 2535 return r; 2536 2537 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 2538 if (r) 2539 return r; 2540 2541 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 2542 if (r) 2543 return r; 2544 2545 if (adev->gfx.ras && 2546 adev->gfx.ras->enable_watchdog_timer) 2547 adev->gfx.ras->enable_watchdog_timer(adev); 2548 2549 return 0; 2550 } 2551 2552 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev, 2553 bool enable, int xcc_id) 2554 { 2555 uint32_t def, data; 2556 2557 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 2558 return; 2559 2560 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2561 regRLC_CGTT_MGCG_OVERRIDE); 2562 2563 if (enable) 2564 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 2565 else 2566 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 2567 2568 if (def != data) 2569 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2570 regRLC_CGTT_MGCG_OVERRIDE, data); 2571 2572 } 2573 2574 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev, 2575 bool enable, int xcc_id) 2576 { 2577 uint32_t def, data; 2578 2579 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 2580 return; 2581 2582 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2583 regRLC_CGTT_MGCG_OVERRIDE); 2584 2585 if (enable) 2586 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; 2587 else 2588 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; 2589 2590 if (def != data) 2591 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2592 regRLC_CGTT_MGCG_OVERRIDE, data); 2593 } 2594 2595 static void 2596 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, 2597 bool enable, int xcc_id) 2598 { 2599 uint32_t data, def; 2600 2601 /* It is disabled by HW by default */ 2602 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 2603 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 2604 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2605 2606 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 2607 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 2608 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 2609 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 2610 2611 if (def != data) 2612 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2613 2614 /* MGLS is a global flag to control all MGLS in GFX */ 2615 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 2616 /* 2 - RLC memory Light sleep */ 2617 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 2618 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); 2619 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 2620 if (def != data) 2621 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); 2622 } 2623 /* 3 - CP memory Light sleep */ 2624 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 2625 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); 2626 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 2627 if (def != data) 2628 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); 2629 } 2630 } 2631 } else { 2632 /* 1 - MGCG_OVERRIDE */ 2633 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2634 2635 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 2636 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 2637 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 2638 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 2639 2640 if (def != data) 2641 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2642 2643 /* 2 - disable MGLS in RLC */ 2644 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); 2645 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 2646 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 2647 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); 2648 } 2649 2650 /* 3 - disable MGLS in CP */ 2651 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); 2652 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 2653 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 2654 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); 2655 } 2656 } 2657 2658 } 2659 2660 static void 2661 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 2662 bool enable, int xcc_id) 2663 { 2664 uint32_t def, data; 2665 2666 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 2667 2668 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2669 /* unset CGCG override */ 2670 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 2671 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 2672 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 2673 else 2674 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 2675 /* update CGCG and CGLS override bits */ 2676 if (def != data) 2677 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2678 2679 /* CGCG Hysteresis: 400us */ 2680 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 2681 2682 data = (0x2710 2683 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 2684 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 2685 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 2686 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 2687 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 2688 if (def != data) 2689 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); 2690 2691 /* set IDLE_POLL_COUNT(0x33450100)*/ 2692 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); 2693 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 2694 (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 2695 if (def != data) 2696 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); 2697 } else { 2698 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 2699 /* reset CGCG/CGLS bits */ 2700 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 2701 /* disable cgcg and cgls in FSM */ 2702 if (def != data) 2703 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); 2704 } 2705 2706 } 2707 2708 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, 2709 bool enable, int xcc_id) 2710 { 2711 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 2712 2713 if (enable) { 2714 /* FGCG */ 2715 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); 2716 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); 2717 2718 /* CGCG/CGLS should be enabled after MGCG/MGLS 2719 * === MGCG + MGLS === 2720 */ 2721 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, 2722 xcc_id); 2723 /* === CGCG + CGLS === */ 2724 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, 2725 xcc_id); 2726 } else { 2727 /* CGCG/CGLS should be disabled before MGCG/MGLS 2728 * === CGCG + CGLS === 2729 */ 2730 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, 2731 xcc_id); 2732 /* === MGCG + MGLS === */ 2733 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, 2734 xcc_id); 2735 2736 /* FGCG */ 2737 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); 2738 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); 2739 } 2740 2741 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 2742 2743 return 0; 2744 } 2745 2746 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { 2747 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, 2748 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode, 2749 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode, 2750 .init = gfx_v9_4_3_rlc_init, 2751 .resume = gfx_v9_4_3_rlc_resume, 2752 .stop = gfx_v9_4_3_rlc_stop, 2753 .reset = gfx_v9_4_3_rlc_reset, 2754 .start = gfx_v9_4_3_rlc_start, 2755 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid, 2756 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range, 2757 }; 2758 2759 static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block, 2760 enum amd_powergating_state state) 2761 { 2762 return 0; 2763 } 2764 2765 static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block, 2766 enum amd_clockgating_state state) 2767 { 2768 struct amdgpu_device *adev = ip_block->adev; 2769 int i, num_xcc; 2770 2771 if (amdgpu_sriov_vf(adev)) 2772 return 0; 2773 2774 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2775 for (i = 0; i < num_xcc; i++) 2776 gfx_v9_4_3_xcc_update_gfx_clock_gating( 2777 adev, state == AMD_CG_STATE_GATE, i); 2778 2779 return 0; 2780 } 2781 2782 static void gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 2783 { 2784 struct amdgpu_device *adev = ip_block->adev; 2785 int data; 2786 2787 if (amdgpu_sriov_vf(adev)) 2788 *flags = 0; 2789 2790 /* AMD_CG_SUPPORT_GFX_MGCG */ 2791 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE)); 2792 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 2793 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 2794 2795 /* AMD_CG_SUPPORT_GFX_CGCG */ 2796 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL)); 2797 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 2798 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 2799 2800 /* AMD_CG_SUPPORT_GFX_CGLS */ 2801 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 2802 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 2803 2804 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 2805 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL)); 2806 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 2807 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 2808 2809 /* AMD_CG_SUPPORT_GFX_CP_LS */ 2810 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL)); 2811 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 2812 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 2813 } 2814 2815 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) 2816 { 2817 struct amdgpu_device *adev = ring->adev; 2818 u32 ref_and_mask, reg_mem_engine; 2819 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 2820 2821 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 2822 switch (ring->me) { 2823 case 1: 2824 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 2825 break; 2826 case 2: 2827 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 2828 break; 2829 default: 2830 return; 2831 } 2832 reg_mem_engine = 0; 2833 } else { 2834 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 2835 reg_mem_engine = 1; /* pfp */ 2836 } 2837 2838 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1, 2839 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 2840 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 2841 ref_and_mask, ref_and_mask, 0x20); 2842 } 2843 2844 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring, 2845 struct amdgpu_job *job, 2846 struct amdgpu_ib *ib, 2847 uint32_t flags) 2848 { 2849 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2850 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 2851 2852 /* Currently, there is a high possibility to get wave ID mismatch 2853 * between ME and GDS, leading to a hw deadlock, because ME generates 2854 * different wave IDs than the GDS expects. This situation happens 2855 * randomly when at least 5 compute pipes use GDS ordered append. 2856 * The wave IDs generated by ME are also wrong after suspend/resume. 2857 * Those are probably bugs somewhere else in the kernel driver. 2858 * 2859 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 2860 * GDS to 0 for this ring (me/pipe). 2861 */ 2862 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 2863 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2864 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 2865 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 2866 } 2867 2868 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2869 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 2870 amdgpu_ring_write(ring, 2871 #ifdef __BIG_ENDIAN 2872 (2 << 0) | 2873 #endif 2874 lower_32_bits(ib->gpu_addr)); 2875 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 2876 amdgpu_ring_write(ring, control); 2877 } 2878 2879 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 2880 u64 seq, unsigned flags) 2881 { 2882 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2883 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2884 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; 2885 2886 /* RELEASE_MEM - flush caches, send int */ 2887 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 2888 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | 2889 EOP_TC_NC_ACTION_EN) : 2890 (EOP_TCL1_ACTION_EN | 2891 EOP_TC_ACTION_EN | 2892 EOP_TC_WB_ACTION_EN | 2893 EOP_TC_MD_ACTION_EN)) | 2894 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2895 EVENT_INDEX(5))); 2896 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2897 2898 /* 2899 * the address should be Qword aligned if 64bit write, Dword 2900 * aligned if only send 32bit data low (discard data high) 2901 */ 2902 if (write64bit) 2903 BUG_ON(addr & 0x7); 2904 else 2905 BUG_ON(addr & 0x3); 2906 amdgpu_ring_write(ring, lower_32_bits(addr)); 2907 amdgpu_ring_write(ring, upper_32_bits(addr)); 2908 amdgpu_ring_write(ring, lower_32_bits(seq)); 2909 amdgpu_ring_write(ring, upper_32_bits(seq)); 2910 amdgpu_ring_write(ring, 0); 2911 } 2912 2913 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 2914 { 2915 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 2916 uint32_t seq = ring->fence_drv.sync_seq; 2917 uint64_t addr = ring->fence_drv.gpu_addr; 2918 2919 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0, 2920 lower_32_bits(addr), upper_32_bits(addr), 2921 seq, 0xffffffff, 4); 2922 } 2923 2924 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring, 2925 unsigned vmid, uint64_t pd_addr) 2926 { 2927 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 2928 } 2929 2930 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring) 2931 { 2932 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ 2933 } 2934 2935 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring) 2936 { 2937 u64 wptr; 2938 2939 /* XXX check if swapping is necessary on BE */ 2940 if (ring->use_doorbell) 2941 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); 2942 else 2943 BUG(); 2944 return wptr; 2945 } 2946 2947 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring) 2948 { 2949 struct amdgpu_device *adev = ring->adev; 2950 2951 /* XXX check if swapping is necessary on BE */ 2952 if (ring->use_doorbell) { 2953 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); 2954 WDOORBELL64(ring->doorbell_index, ring->wptr); 2955 } else { 2956 BUG(); /* only DOORBELL method supported on gfx9 now */ 2957 } 2958 } 2959 2960 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 2961 u64 seq, unsigned int flags) 2962 { 2963 struct amdgpu_device *adev = ring->adev; 2964 2965 /* we only allocate 32bit for each seq wb address */ 2966 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 2967 2968 /* write fence seq to the "addr" */ 2969 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2970 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2971 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 2972 amdgpu_ring_write(ring, lower_32_bits(addr)); 2973 amdgpu_ring_write(ring, upper_32_bits(addr)); 2974 amdgpu_ring_write(ring, lower_32_bits(seq)); 2975 2976 if (flags & AMDGPU_FENCE_FLAG_INT) { 2977 /* set register to trigger INT */ 2978 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2979 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2980 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 2981 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS)); 2982 amdgpu_ring_write(ring, 0); 2983 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 2984 } 2985 } 2986 2987 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 2988 uint32_t reg_val_offs) 2989 { 2990 struct amdgpu_device *adev = ring->adev; 2991 2992 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 2993 2994 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 2995 amdgpu_ring_write(ring, 0 | /* src: register*/ 2996 (5 << 8) | /* dst: memory */ 2997 (1 << 20)); /* write confirm */ 2998 amdgpu_ring_write(ring, reg); 2999 amdgpu_ring_write(ring, 0); 3000 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 3001 reg_val_offs * 4)); 3002 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 3003 reg_val_offs * 4)); 3004 } 3005 3006 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 3007 uint32_t val) 3008 { 3009 uint32_t cmd = 0; 3010 3011 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 3012 3013 switch (ring->funcs->type) { 3014 case AMDGPU_RING_TYPE_GFX: 3015 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 3016 break; 3017 case AMDGPU_RING_TYPE_KIQ: 3018 cmd = (1 << 16); /* no inc addr */ 3019 break; 3020 default: 3021 cmd = WR_CONFIRM; 3022 break; 3023 } 3024 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3025 amdgpu_ring_write(ring, cmd); 3026 amdgpu_ring_write(ring, reg); 3027 amdgpu_ring_write(ring, 0); 3028 amdgpu_ring_write(ring, val); 3029 } 3030 3031 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 3032 uint32_t val, uint32_t mask) 3033 { 3034 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 3035 } 3036 3037 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 3038 uint32_t reg0, uint32_t reg1, 3039 uint32_t ref, uint32_t mask) 3040 { 3041 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, 3042 ref, mask); 3043 } 3044 3045 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring, 3046 unsigned vmid) 3047 { 3048 struct amdgpu_device *adev = ring->adev; 3049 uint32_t value = 0; 3050 3051 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 3052 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 3053 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 3054 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 3055 amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id); 3056 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value); 3057 amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id); 3058 } 3059 3060 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3061 struct amdgpu_device *adev, int me, int pipe, 3062 enum amdgpu_interrupt_state state, int xcc_id) 3063 { 3064 u32 mec_int_cntl, mec_int_cntl_reg; 3065 3066 /* 3067 * amdgpu controls only the first MEC. That's why this function only 3068 * handles the setting of interrupts for this specific MEC. All other 3069 * pipes' interrupts are set by amdkfd. 3070 */ 3071 3072 if (me == 1) { 3073 switch (pipe) { 3074 case 0: 3075 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); 3076 break; 3077 case 1: 3078 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); 3079 break; 3080 case 2: 3081 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); 3082 break; 3083 case 3: 3084 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); 3085 break; 3086 default: 3087 DRM_DEBUG("invalid pipe %d\n", pipe); 3088 return; 3089 } 3090 } else { 3091 DRM_DEBUG("invalid me %d\n", me); 3092 return; 3093 } 3094 3095 switch (state) { 3096 case AMDGPU_IRQ_STATE_DISABLE: 3097 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3098 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3099 TIME_STAMP_INT_ENABLE, 0); 3100 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3101 break; 3102 case AMDGPU_IRQ_STATE_ENABLE: 3103 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3104 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3105 TIME_STAMP_INT_ENABLE, 1); 3106 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3107 break; 3108 default: 3109 break; 3110 } 3111 } 3112 3113 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev, 3114 int xcc_id, int me, int pipe) 3115 { 3116 /* 3117 * amdgpu controls only the first MEC. That's why this function only 3118 * handles the setting of interrupts for this specific MEC. All other 3119 * pipes' interrupts are set by amdkfd. 3120 */ 3121 if (me != 1) 3122 return 0; 3123 3124 switch (pipe) { 3125 case 0: 3126 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); 3127 case 1: 3128 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); 3129 case 2: 3130 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); 3131 case 3: 3132 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); 3133 default: 3134 return 0; 3135 } 3136 } 3137 3138 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, 3139 struct amdgpu_irq_src *source, 3140 unsigned type, 3141 enum amdgpu_interrupt_state state) 3142 { 3143 u32 mec_int_cntl_reg, mec_int_cntl; 3144 int i, j, k, num_xcc; 3145 3146 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3147 switch (state) { 3148 case AMDGPU_IRQ_STATE_DISABLE: 3149 case AMDGPU_IRQ_STATE_ENABLE: 3150 for (i = 0; i < num_xcc; i++) { 3151 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3152 PRIV_REG_INT_ENABLE, 3153 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3154 for (j = 0; j < adev->gfx.mec.num_mec; j++) { 3155 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 3156 /* MECs start at 1 */ 3157 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); 3158 3159 if (mec_int_cntl_reg) { 3160 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); 3161 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3162 PRIV_REG_INT_ENABLE, 3163 state == AMDGPU_IRQ_STATE_ENABLE ? 3164 1 : 0); 3165 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); 3166 } 3167 } 3168 } 3169 } 3170 break; 3171 default: 3172 break; 3173 } 3174 3175 return 0; 3176 } 3177 3178 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev, 3179 struct amdgpu_irq_src *source, 3180 unsigned type, 3181 enum amdgpu_interrupt_state state) 3182 { 3183 u32 mec_int_cntl_reg, mec_int_cntl; 3184 int i, j, k, num_xcc; 3185 3186 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3187 switch (state) { 3188 case AMDGPU_IRQ_STATE_DISABLE: 3189 case AMDGPU_IRQ_STATE_ENABLE: 3190 for (i = 0; i < num_xcc; i++) { 3191 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3192 OPCODE_ERROR_INT_ENABLE, 3193 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3194 for (j = 0; j < adev->gfx.mec.num_mec; j++) { 3195 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 3196 /* MECs start at 1 */ 3197 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); 3198 3199 if (mec_int_cntl_reg) { 3200 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); 3201 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3202 OPCODE_ERROR_INT_ENABLE, 3203 state == AMDGPU_IRQ_STATE_ENABLE ? 3204 1 : 0); 3205 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); 3206 } 3207 } 3208 } 3209 } 3210 break; 3211 default: 3212 break; 3213 } 3214 3215 return 0; 3216 } 3217 3218 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, 3219 struct amdgpu_irq_src *source, 3220 unsigned type, 3221 enum amdgpu_interrupt_state state) 3222 { 3223 int i, num_xcc; 3224 3225 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3226 switch (state) { 3227 case AMDGPU_IRQ_STATE_DISABLE: 3228 case AMDGPU_IRQ_STATE_ENABLE: 3229 for (i = 0; i < num_xcc; i++) 3230 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3231 PRIV_INSTR_INT_ENABLE, 3232 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3233 break; 3234 default: 3235 break; 3236 } 3237 3238 return 0; 3239 } 3240 3241 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, 3242 struct amdgpu_irq_src *src, 3243 unsigned type, 3244 enum amdgpu_interrupt_state state) 3245 { 3246 int i, num_xcc; 3247 3248 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3249 for (i = 0; i < num_xcc; i++) { 3250 switch (type) { 3251 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 3252 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3253 adev, 1, 0, state, i); 3254 break; 3255 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 3256 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3257 adev, 1, 1, state, i); 3258 break; 3259 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 3260 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3261 adev, 1, 2, state, i); 3262 break; 3263 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 3264 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3265 adev, 1, 3, state, i); 3266 break; 3267 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 3268 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3269 adev, 2, 0, state, i); 3270 break; 3271 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 3272 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3273 adev, 2, 1, state, i); 3274 break; 3275 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 3276 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3277 adev, 2, 2, state, i); 3278 break; 3279 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 3280 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3281 adev, 2, 3, state, i); 3282 break; 3283 default: 3284 break; 3285 } 3286 } 3287 3288 return 0; 3289 } 3290 3291 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, 3292 struct amdgpu_irq_src *source, 3293 struct amdgpu_iv_entry *entry) 3294 { 3295 int i, xcc_id; 3296 u8 me_id, pipe_id, queue_id; 3297 struct amdgpu_ring *ring; 3298 3299 DRM_DEBUG("IH: CP EOP\n"); 3300 me_id = (entry->ring_id & 0x0c) >> 2; 3301 pipe_id = (entry->ring_id & 0x03) >> 0; 3302 queue_id = (entry->ring_id & 0x70) >> 4; 3303 3304 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); 3305 3306 if (xcc_id == -EINVAL) 3307 return -EINVAL; 3308 3309 switch (me_id) { 3310 case 0: 3311 case 1: 3312 case 2: 3313 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3314 ring = &adev->gfx.compute_ring 3315 [i + 3316 xcc_id * adev->gfx.num_compute_rings]; 3317 /* Per-queue interrupt is supported for MEC starting from VI. 3318 * The interrupt can only be enabled/disabled per pipe instead of per queue. 3319 */ 3320 3321 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 3322 amdgpu_fence_process(ring); 3323 } 3324 break; 3325 } 3326 return 0; 3327 } 3328 3329 static void gfx_v9_4_3_fault(struct amdgpu_device *adev, 3330 struct amdgpu_iv_entry *entry) 3331 { 3332 u8 me_id, pipe_id, queue_id; 3333 struct amdgpu_ring *ring; 3334 int i, xcc_id; 3335 3336 me_id = (entry->ring_id & 0x0c) >> 2; 3337 pipe_id = (entry->ring_id & 0x03) >> 0; 3338 queue_id = (entry->ring_id & 0x70) >> 4; 3339 3340 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); 3341 3342 if (xcc_id == -EINVAL) 3343 return; 3344 3345 switch (me_id) { 3346 case 0: 3347 case 1: 3348 case 2: 3349 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3350 ring = &adev->gfx.compute_ring 3351 [i + 3352 xcc_id * adev->gfx.num_compute_rings]; 3353 if (ring->me == me_id && ring->pipe == pipe_id && 3354 ring->queue == queue_id) 3355 drm_sched_fault(&ring->sched); 3356 } 3357 break; 3358 } 3359 } 3360 3361 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev, 3362 struct amdgpu_irq_src *source, 3363 struct amdgpu_iv_entry *entry) 3364 { 3365 DRM_ERROR("Illegal register access in command stream\n"); 3366 gfx_v9_4_3_fault(adev, entry); 3367 return 0; 3368 } 3369 3370 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev, 3371 struct amdgpu_irq_src *source, 3372 struct amdgpu_iv_entry *entry) 3373 { 3374 DRM_ERROR("Illegal opcode in command stream\n"); 3375 gfx_v9_4_3_fault(adev, entry); 3376 return 0; 3377 } 3378 3379 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev, 3380 struct amdgpu_irq_src *source, 3381 struct amdgpu_iv_entry *entry) 3382 { 3383 DRM_ERROR("Illegal instruction in command stream\n"); 3384 gfx_v9_4_3_fault(adev, entry); 3385 return 0; 3386 } 3387 3388 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring) 3389 { 3390 const unsigned int cp_coher_cntl = 3391 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | 3392 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | 3393 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | 3394 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | 3395 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); 3396 3397 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ 3398 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); 3399 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */ 3400 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 3401 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 3402 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 3403 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 3404 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 3405 } 3406 3407 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring, 3408 uint32_t pipe, bool enable) 3409 { 3410 struct amdgpu_device *adev = ring->adev; 3411 uint32_t val; 3412 uint32_t wcl_cs_reg; 3413 3414 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ 3415 val = enable ? 0x1 : 0x7f; 3416 3417 switch (pipe) { 3418 case 0: 3419 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0); 3420 break; 3421 case 1: 3422 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1); 3423 break; 3424 case 2: 3425 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2); 3426 break; 3427 case 3: 3428 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3); 3429 break; 3430 default: 3431 DRM_DEBUG("invalid pipe %d\n", pipe); 3432 return; 3433 } 3434 3435 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); 3436 3437 } 3438 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) 3439 { 3440 struct amdgpu_device *adev = ring->adev; 3441 uint32_t val; 3442 int i; 3443 3444 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit 3445 * number of gfx waves. Setting 5 bit will make sure gfx only gets 3446 * around 25% of gpu resources. 3447 */ 3448 val = enable ? 0x1f : 0x07ffffff; 3449 amdgpu_ring_emit_wreg(ring, 3450 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX), 3451 val); 3452 3453 /* Restrict waves for normal/low priority compute queues as well 3454 * to get best QoS for high priority compute jobs. 3455 * 3456 * amdgpu controls only 1st ME(0-3 CS pipes). 3457 */ 3458 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3459 if (i != ring->pipe) 3460 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable); 3461 3462 } 3463 } 3464 3465 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me, 3466 uint32_t pipe, uint32_t queue, 3467 uint32_t xcc_id) 3468 { 3469 int i, r; 3470 /* make sure dequeue is complete*/ 3471 gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id); 3472 mutex_lock(&adev->srbm_mutex); 3473 soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id)); 3474 for (i = 0; i < adev->usec_timeout; i++) { 3475 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 3476 break; 3477 udelay(1); 3478 } 3479 if (i >= adev->usec_timeout) 3480 r = -ETIMEDOUT; 3481 else 3482 r = 0; 3483 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 3484 mutex_unlock(&adev->srbm_mutex); 3485 gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id); 3486 3487 return r; 3488 3489 } 3490 3491 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev) 3492 { 3493 if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) 3494 return true; 3495 else 3496 dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n"); 3497 3498 return false; 3499 } 3500 3501 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring) 3502 { 3503 struct amdgpu_device *adev = ring->adev; 3504 uint32_t reset_pipe, clean_pipe; 3505 int r; 3506 3507 if (!gfx_v9_4_3_pipe_reset_support(adev)) 3508 return -EINVAL; 3509 3510 gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id); 3511 mutex_lock(&adev->srbm_mutex); 3512 3513 reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL); 3514 clean_pipe = reset_pipe; 3515 3516 if (ring->me == 1) { 3517 switch (ring->pipe) { 3518 case 0: 3519 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3520 MEC_ME1_PIPE0_RESET, 1); 3521 break; 3522 case 1: 3523 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3524 MEC_ME1_PIPE1_RESET, 1); 3525 break; 3526 case 2: 3527 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3528 MEC_ME1_PIPE2_RESET, 1); 3529 break; 3530 case 3: 3531 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3532 MEC_ME1_PIPE3_RESET, 1); 3533 break; 3534 default: 3535 break; 3536 } 3537 } else { 3538 if (ring->pipe) 3539 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3540 MEC_ME2_PIPE1_RESET, 1); 3541 else 3542 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3543 MEC_ME2_PIPE0_RESET, 1); 3544 } 3545 3546 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe); 3547 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe); 3548 mutex_unlock(&adev->srbm_mutex); 3549 gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id); 3550 3551 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); 3552 return r; 3553 } 3554 3555 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, 3556 unsigned int vmid, 3557 struct amdgpu_fence *timedout_fence) 3558 { 3559 struct amdgpu_device *adev = ring->adev; 3560 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; 3561 struct amdgpu_ring *kiq_ring = &kiq->ring; 3562 int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE; 3563 unsigned long flags; 3564 int r; 3565 3566 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 3567 return -EINVAL; 3568 3569 amdgpu_ring_reset_helper_begin(ring, timedout_fence); 3570 3571 spin_lock_irqsave(&kiq->ring_lock, flags); 3572 3573 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 3574 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3575 return -ENOMEM; 3576 } 3577 3578 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 3579 0, 0); 3580 amdgpu_ring_commit(kiq_ring); 3581 3582 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3583 3584 r = amdgpu_ring_test_ring(kiq_ring); 3585 if (r) { 3586 dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n", 3587 ring->name); 3588 goto pipe_reset; 3589 } 3590 3591 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); 3592 if (r) 3593 dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n"); 3594 3595 pipe_reset: 3596 if (r) { 3597 if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) 3598 return -EOPNOTSUPP; 3599 r = gfx_v9_4_3_reset_hw_pipe(ring); 3600 reset_mode = AMDGPU_RESET_TYPE_PER_PIPE; 3601 dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name, 3602 r ? "failed" : "successfully"); 3603 if (r) 3604 return r; 3605 } 3606 3607 gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); 3608 3609 spin_lock_irqsave(&kiq->ring_lock, flags); 3610 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); 3611 if (r) { 3612 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3613 return -ENOMEM; 3614 } 3615 kiq->pmf->kiq_map_queues(kiq_ring, ring); 3616 amdgpu_ring_commit(kiq_ring); 3617 r = amdgpu_ring_test_ring(kiq_ring); 3618 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3619 if (r) { 3620 if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) 3621 goto pipe_reset; 3622 3623 dev_err(adev->dev, "fail to remap queue\n"); 3624 return r; 3625 } 3626 3627 if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) { 3628 r = amdgpu_ring_test_ring(ring); 3629 if (r) 3630 goto pipe_reset; 3631 } 3632 3633 3634 return amdgpu_ring_reset_helper_end(ring, timedout_fence); 3635 } 3636 3637 enum amdgpu_gfx_cp_ras_mem_id { 3638 AMDGPU_GFX_CP_MEM1 = 1, 3639 AMDGPU_GFX_CP_MEM2, 3640 AMDGPU_GFX_CP_MEM3, 3641 AMDGPU_GFX_CP_MEM4, 3642 AMDGPU_GFX_CP_MEM5, 3643 }; 3644 3645 enum amdgpu_gfx_gcea_ras_mem_id { 3646 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4, 3647 AMDGPU_GFX_GCEA_IORD_CMDMEM, 3648 AMDGPU_GFX_GCEA_GMIWR_CMDMEM, 3649 AMDGPU_GFX_GCEA_GMIRD_CMDMEM, 3650 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, 3651 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, 3652 AMDGPU_GFX_GCEA_MAM_DMEM0, 3653 AMDGPU_GFX_GCEA_MAM_DMEM1, 3654 AMDGPU_GFX_GCEA_MAM_DMEM2, 3655 AMDGPU_GFX_GCEA_MAM_DMEM3, 3656 AMDGPU_GFX_GCEA_MAM_AMEM0, 3657 AMDGPU_GFX_GCEA_MAM_AMEM1, 3658 AMDGPU_GFX_GCEA_MAM_AMEM2, 3659 AMDGPU_GFX_GCEA_MAM_AMEM3, 3660 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, 3661 AMDGPU_GFX_GCEA_WRET_TAGMEM, 3662 AMDGPU_GFX_GCEA_RRET_TAGMEM, 3663 AMDGPU_GFX_GCEA_IOWR_DATAMEM, 3664 AMDGPU_GFX_GCEA_GMIWR_DATAMEM, 3665 AMDGPU_GFX_GCEA_DRAM_DATAMEM, 3666 }; 3667 3668 enum amdgpu_gfx_gc_cane_ras_mem_id { 3669 AMDGPU_GFX_GC_CANE_MEM0 = 0, 3670 }; 3671 3672 enum amdgpu_gfx_gcutcl2_ras_mem_id { 3673 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160, 3674 }; 3675 3676 enum amdgpu_gfx_gds_ras_mem_id { 3677 AMDGPU_GFX_GDS_MEM0 = 0, 3678 }; 3679 3680 enum amdgpu_gfx_lds_ras_mem_id { 3681 AMDGPU_GFX_LDS_BANK0 = 0, 3682 AMDGPU_GFX_LDS_BANK1, 3683 AMDGPU_GFX_LDS_BANK2, 3684 AMDGPU_GFX_LDS_BANK3, 3685 AMDGPU_GFX_LDS_BANK4, 3686 AMDGPU_GFX_LDS_BANK5, 3687 AMDGPU_GFX_LDS_BANK6, 3688 AMDGPU_GFX_LDS_BANK7, 3689 AMDGPU_GFX_LDS_BANK8, 3690 AMDGPU_GFX_LDS_BANK9, 3691 AMDGPU_GFX_LDS_BANK10, 3692 AMDGPU_GFX_LDS_BANK11, 3693 AMDGPU_GFX_LDS_BANK12, 3694 AMDGPU_GFX_LDS_BANK13, 3695 AMDGPU_GFX_LDS_BANK14, 3696 AMDGPU_GFX_LDS_BANK15, 3697 AMDGPU_GFX_LDS_BANK16, 3698 AMDGPU_GFX_LDS_BANK17, 3699 AMDGPU_GFX_LDS_BANK18, 3700 AMDGPU_GFX_LDS_BANK19, 3701 AMDGPU_GFX_LDS_BANK20, 3702 AMDGPU_GFX_LDS_BANK21, 3703 AMDGPU_GFX_LDS_BANK22, 3704 AMDGPU_GFX_LDS_BANK23, 3705 AMDGPU_GFX_LDS_BANK24, 3706 AMDGPU_GFX_LDS_BANK25, 3707 AMDGPU_GFX_LDS_BANK26, 3708 AMDGPU_GFX_LDS_BANK27, 3709 AMDGPU_GFX_LDS_BANK28, 3710 AMDGPU_GFX_LDS_BANK29, 3711 AMDGPU_GFX_LDS_BANK30, 3712 AMDGPU_GFX_LDS_BANK31, 3713 AMDGPU_GFX_LDS_SP_BUFFER_A, 3714 AMDGPU_GFX_LDS_SP_BUFFER_B, 3715 }; 3716 3717 enum amdgpu_gfx_rlc_ras_mem_id { 3718 AMDGPU_GFX_RLC_GPMF32 = 1, 3719 AMDGPU_GFX_RLC_RLCVF32, 3720 AMDGPU_GFX_RLC_SCRATCH, 3721 AMDGPU_GFX_RLC_SRM_ARAM, 3722 AMDGPU_GFX_RLC_SRM_DRAM, 3723 AMDGPU_GFX_RLC_TCTAG, 3724 AMDGPU_GFX_RLC_SPM_SE, 3725 AMDGPU_GFX_RLC_SPM_GRBMT, 3726 }; 3727 3728 enum amdgpu_gfx_sp_ras_mem_id { 3729 AMDGPU_GFX_SP_SIMDID0 = 0, 3730 }; 3731 3732 enum amdgpu_gfx_spi_ras_mem_id { 3733 AMDGPU_GFX_SPI_MEM0 = 0, 3734 AMDGPU_GFX_SPI_MEM1, 3735 AMDGPU_GFX_SPI_MEM2, 3736 AMDGPU_GFX_SPI_MEM3, 3737 }; 3738 3739 enum amdgpu_gfx_sqc_ras_mem_id { 3740 AMDGPU_GFX_SQC_INST_CACHE_A = 100, 3741 AMDGPU_GFX_SQC_INST_CACHE_B = 101, 3742 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102, 3743 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103, 3744 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104, 3745 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105, 3746 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106, 3747 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107, 3748 AMDGPU_GFX_SQC_DATA_CACHE_A = 200, 3749 AMDGPU_GFX_SQC_DATA_CACHE_B = 201, 3750 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202, 3751 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203, 3752 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204, 3753 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205, 3754 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206, 3755 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207, 3756 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208, 3757 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209, 3758 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210, 3759 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211, 3760 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212, 3761 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213, 3762 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108, 3763 }; 3764 3765 enum amdgpu_gfx_sq_ras_mem_id { 3766 AMDGPU_GFX_SQ_SGPR_MEM0 = 0, 3767 AMDGPU_GFX_SQ_SGPR_MEM1, 3768 AMDGPU_GFX_SQ_SGPR_MEM2, 3769 AMDGPU_GFX_SQ_SGPR_MEM3, 3770 }; 3771 3772 enum amdgpu_gfx_ta_ras_mem_id { 3773 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1, 3774 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, 3775 AMDGPU_GFX_TA_FS_CFIFO_RAM, 3776 AMDGPU_GFX_TA_FSX_LFIFO, 3777 AMDGPU_GFX_TA_FS_DFIFO_RAM, 3778 }; 3779 3780 enum amdgpu_gfx_tcc_ras_mem_id { 3781 AMDGPU_GFX_TCC_MEM1 = 1, 3782 }; 3783 3784 enum amdgpu_gfx_tca_ras_mem_id { 3785 AMDGPU_GFX_TCA_MEM1 = 1, 3786 }; 3787 3788 enum amdgpu_gfx_tci_ras_mem_id { 3789 AMDGPU_GFX_TCIW_MEM = 1, 3790 }; 3791 3792 enum amdgpu_gfx_tcp_ras_mem_id { 3793 AMDGPU_GFX_TCP_LFIFO0 = 1, 3794 AMDGPU_GFX_TCP_SET0BANK0_RAM, 3795 AMDGPU_GFX_TCP_SET0BANK1_RAM, 3796 AMDGPU_GFX_TCP_SET0BANK2_RAM, 3797 AMDGPU_GFX_TCP_SET0BANK3_RAM, 3798 AMDGPU_GFX_TCP_SET1BANK0_RAM, 3799 AMDGPU_GFX_TCP_SET1BANK1_RAM, 3800 AMDGPU_GFX_TCP_SET1BANK2_RAM, 3801 AMDGPU_GFX_TCP_SET1BANK3_RAM, 3802 AMDGPU_GFX_TCP_SET2BANK0_RAM, 3803 AMDGPU_GFX_TCP_SET2BANK1_RAM, 3804 AMDGPU_GFX_TCP_SET2BANK2_RAM, 3805 AMDGPU_GFX_TCP_SET2BANK3_RAM, 3806 AMDGPU_GFX_TCP_SET3BANK0_RAM, 3807 AMDGPU_GFX_TCP_SET3BANK1_RAM, 3808 AMDGPU_GFX_TCP_SET3BANK2_RAM, 3809 AMDGPU_GFX_TCP_SET3BANK3_RAM, 3810 AMDGPU_GFX_TCP_VM_FIFO, 3811 AMDGPU_GFX_TCP_DB_TAGRAM0, 3812 AMDGPU_GFX_TCP_DB_TAGRAM1, 3813 AMDGPU_GFX_TCP_DB_TAGRAM2, 3814 AMDGPU_GFX_TCP_DB_TAGRAM3, 3815 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, 3816 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, 3817 AMDGPU_GFX_TCP_CMD_FIFO, 3818 }; 3819 3820 enum amdgpu_gfx_td_ras_mem_id { 3821 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1, 3822 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, 3823 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, 3824 }; 3825 3826 enum amdgpu_gfx_tcx_ras_mem_id { 3827 AMDGPU_GFX_TCX_FIFOD0 = 0, 3828 AMDGPU_GFX_TCX_FIFOD1, 3829 AMDGPU_GFX_TCX_FIFOD2, 3830 AMDGPU_GFX_TCX_FIFOD3, 3831 AMDGPU_GFX_TCX_FIFOD4, 3832 AMDGPU_GFX_TCX_FIFOD5, 3833 AMDGPU_GFX_TCX_FIFOD6, 3834 AMDGPU_GFX_TCX_FIFOD7, 3835 AMDGPU_GFX_TCX_FIFOB0, 3836 AMDGPU_GFX_TCX_FIFOB1, 3837 AMDGPU_GFX_TCX_FIFOB2, 3838 AMDGPU_GFX_TCX_FIFOB3, 3839 AMDGPU_GFX_TCX_FIFOB4, 3840 AMDGPU_GFX_TCX_FIFOB5, 3841 AMDGPU_GFX_TCX_FIFOB6, 3842 AMDGPU_GFX_TCX_FIFOB7, 3843 AMDGPU_GFX_TCX_FIFOA0, 3844 AMDGPU_GFX_TCX_FIFOA1, 3845 AMDGPU_GFX_TCX_FIFOA2, 3846 AMDGPU_GFX_TCX_FIFOA3, 3847 AMDGPU_GFX_TCX_FIFOA4, 3848 AMDGPU_GFX_TCX_FIFOA5, 3849 AMDGPU_GFX_TCX_FIFOA6, 3850 AMDGPU_GFX_TCX_FIFOA7, 3851 AMDGPU_GFX_TCX_CFIFO0, 3852 AMDGPU_GFX_TCX_CFIFO1, 3853 AMDGPU_GFX_TCX_CFIFO2, 3854 AMDGPU_GFX_TCX_CFIFO3, 3855 AMDGPU_GFX_TCX_CFIFO4, 3856 AMDGPU_GFX_TCX_CFIFO5, 3857 AMDGPU_GFX_TCX_CFIFO6, 3858 AMDGPU_GFX_TCX_CFIFO7, 3859 AMDGPU_GFX_TCX_FIFO_ACKB0, 3860 AMDGPU_GFX_TCX_FIFO_ACKB1, 3861 AMDGPU_GFX_TCX_FIFO_ACKB2, 3862 AMDGPU_GFX_TCX_FIFO_ACKB3, 3863 AMDGPU_GFX_TCX_FIFO_ACKB4, 3864 AMDGPU_GFX_TCX_FIFO_ACKB5, 3865 AMDGPU_GFX_TCX_FIFO_ACKB6, 3866 AMDGPU_GFX_TCX_FIFO_ACKB7, 3867 AMDGPU_GFX_TCX_FIFO_ACKD0, 3868 AMDGPU_GFX_TCX_FIFO_ACKD1, 3869 AMDGPU_GFX_TCX_FIFO_ACKD2, 3870 AMDGPU_GFX_TCX_FIFO_ACKD3, 3871 AMDGPU_GFX_TCX_FIFO_ACKD4, 3872 AMDGPU_GFX_TCX_FIFO_ACKD5, 3873 AMDGPU_GFX_TCX_FIFO_ACKD6, 3874 AMDGPU_GFX_TCX_FIFO_ACKD7, 3875 AMDGPU_GFX_TCX_DST_FIFOA0, 3876 AMDGPU_GFX_TCX_DST_FIFOA1, 3877 AMDGPU_GFX_TCX_DST_FIFOA2, 3878 AMDGPU_GFX_TCX_DST_FIFOA3, 3879 AMDGPU_GFX_TCX_DST_FIFOA4, 3880 AMDGPU_GFX_TCX_DST_FIFOA5, 3881 AMDGPU_GFX_TCX_DST_FIFOA6, 3882 AMDGPU_GFX_TCX_DST_FIFOA7, 3883 AMDGPU_GFX_TCX_DST_FIFOB0, 3884 AMDGPU_GFX_TCX_DST_FIFOB1, 3885 AMDGPU_GFX_TCX_DST_FIFOB2, 3886 AMDGPU_GFX_TCX_DST_FIFOB3, 3887 AMDGPU_GFX_TCX_DST_FIFOB4, 3888 AMDGPU_GFX_TCX_DST_FIFOB5, 3889 AMDGPU_GFX_TCX_DST_FIFOB6, 3890 AMDGPU_GFX_TCX_DST_FIFOB7, 3891 AMDGPU_GFX_TCX_DST_FIFOD0, 3892 AMDGPU_GFX_TCX_DST_FIFOD1, 3893 AMDGPU_GFX_TCX_DST_FIFOD2, 3894 AMDGPU_GFX_TCX_DST_FIFOD3, 3895 AMDGPU_GFX_TCX_DST_FIFOD4, 3896 AMDGPU_GFX_TCX_DST_FIFOD5, 3897 AMDGPU_GFX_TCX_DST_FIFOD6, 3898 AMDGPU_GFX_TCX_DST_FIFOD7, 3899 AMDGPU_GFX_TCX_DST_FIFO_ACKB0, 3900 AMDGPU_GFX_TCX_DST_FIFO_ACKB1, 3901 AMDGPU_GFX_TCX_DST_FIFO_ACKB2, 3902 AMDGPU_GFX_TCX_DST_FIFO_ACKB3, 3903 AMDGPU_GFX_TCX_DST_FIFO_ACKB4, 3904 AMDGPU_GFX_TCX_DST_FIFO_ACKB5, 3905 AMDGPU_GFX_TCX_DST_FIFO_ACKB6, 3906 AMDGPU_GFX_TCX_DST_FIFO_ACKB7, 3907 AMDGPU_GFX_TCX_DST_FIFO_ACKD0, 3908 AMDGPU_GFX_TCX_DST_FIFO_ACKD1, 3909 AMDGPU_GFX_TCX_DST_FIFO_ACKD2, 3910 AMDGPU_GFX_TCX_DST_FIFO_ACKD3, 3911 AMDGPU_GFX_TCX_DST_FIFO_ACKD4, 3912 AMDGPU_GFX_TCX_DST_FIFO_ACKD5, 3913 AMDGPU_GFX_TCX_DST_FIFO_ACKD6, 3914 AMDGPU_GFX_TCX_DST_FIFO_ACKD7, 3915 }; 3916 3917 enum amdgpu_gfx_atc_l2_ras_mem_id { 3918 AMDGPU_GFX_ATC_L2_MEM0 = 0, 3919 }; 3920 3921 enum amdgpu_gfx_utcl2_ras_mem_id { 3922 AMDGPU_GFX_UTCL2_MEM0 = 0, 3923 }; 3924 3925 enum amdgpu_gfx_vml2_ras_mem_id { 3926 AMDGPU_GFX_VML2_MEM0 = 0, 3927 }; 3928 3929 enum amdgpu_gfx_vml2_walker_ras_mem_id { 3930 AMDGPU_GFX_VML2_WALKER_MEM0 = 0, 3931 }; 3932 3933 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = { 3934 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"}, 3935 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"}, 3936 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"}, 3937 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"}, 3938 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"}, 3939 }; 3940 3941 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = { 3942 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"}, 3943 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"}, 3944 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"}, 3945 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"}, 3946 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"}, 3947 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"}, 3948 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"}, 3949 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"}, 3950 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"}, 3951 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"}, 3952 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"}, 3953 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"}, 3954 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"}, 3955 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"}, 3956 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"}, 3957 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"}, 3958 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"}, 3959 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"}, 3960 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"}, 3961 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"}, 3962 }; 3963 3964 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = { 3965 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"}, 3966 }; 3967 3968 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = { 3969 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"}, 3970 }; 3971 3972 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = { 3973 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"}, 3974 }; 3975 3976 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = { 3977 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"}, 3978 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"}, 3979 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"}, 3980 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"}, 3981 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"}, 3982 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"}, 3983 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"}, 3984 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"}, 3985 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"}, 3986 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"}, 3987 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"}, 3988 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"}, 3989 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"}, 3990 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"}, 3991 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"}, 3992 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"}, 3993 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"}, 3994 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"}, 3995 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"}, 3996 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"}, 3997 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"}, 3998 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"}, 3999 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"}, 4000 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"}, 4001 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"}, 4002 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"}, 4003 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"}, 4004 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"}, 4005 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"}, 4006 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"}, 4007 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"}, 4008 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"}, 4009 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"}, 4010 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"}, 4011 }; 4012 4013 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = { 4014 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"}, 4015 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"}, 4016 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"}, 4017 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"}, 4018 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"}, 4019 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"}, 4020 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"}, 4021 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"}, 4022 }; 4023 4024 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = { 4025 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"}, 4026 }; 4027 4028 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = { 4029 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"}, 4030 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"}, 4031 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"}, 4032 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"}, 4033 }; 4034 4035 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = { 4036 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"}, 4037 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"}, 4038 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"}, 4039 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"}, 4040 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"}, 4041 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"}, 4042 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"}, 4043 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"}, 4044 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"}, 4045 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"}, 4046 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"}, 4047 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"}, 4048 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"}, 4049 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"}, 4050 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"}, 4051 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"}, 4052 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"}, 4053 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"}, 4054 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"}, 4055 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"}, 4056 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"}, 4057 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"}, 4058 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"}, 4059 }; 4060 4061 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = { 4062 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"}, 4063 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"}, 4064 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"}, 4065 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"}, 4066 }; 4067 4068 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = { 4069 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"}, 4070 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"}, 4071 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"}, 4072 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"}, 4073 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"}, 4074 }; 4075 4076 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = { 4077 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"}, 4078 }; 4079 4080 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = { 4081 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"}, 4082 }; 4083 4084 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = { 4085 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"}, 4086 }; 4087 4088 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = { 4089 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"}, 4090 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"}, 4091 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"}, 4092 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"}, 4093 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"}, 4094 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"}, 4095 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"}, 4096 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"}, 4097 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"}, 4098 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"}, 4099 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"}, 4100 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"}, 4101 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"}, 4102 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"}, 4103 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"}, 4104 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"}, 4105 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"}, 4106 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"}, 4107 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"}, 4108 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"}, 4109 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"}, 4110 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"}, 4111 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"}, 4112 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"}, 4113 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"}, 4114 }; 4115 4116 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = { 4117 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"}, 4118 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"}, 4119 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"}, 4120 }; 4121 4122 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = { 4123 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"}, 4124 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"}, 4125 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"}, 4126 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"}, 4127 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"}, 4128 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"}, 4129 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"}, 4130 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"}, 4131 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"}, 4132 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"}, 4133 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"}, 4134 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"}, 4135 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"}, 4136 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"}, 4137 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"}, 4138 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"}, 4139 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"}, 4140 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"}, 4141 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"}, 4142 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"}, 4143 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"}, 4144 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"}, 4145 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"}, 4146 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"}, 4147 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"}, 4148 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"}, 4149 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"}, 4150 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"}, 4151 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"}, 4152 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"}, 4153 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"}, 4154 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"}, 4155 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"}, 4156 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"}, 4157 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"}, 4158 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"}, 4159 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"}, 4160 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"}, 4161 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"}, 4162 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"}, 4163 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"}, 4164 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"}, 4165 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"}, 4166 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"}, 4167 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"}, 4168 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"}, 4169 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"}, 4170 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"}, 4171 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"}, 4172 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"}, 4173 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"}, 4174 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"}, 4175 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"}, 4176 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"}, 4177 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"}, 4178 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"}, 4179 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"}, 4180 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"}, 4181 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"}, 4182 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"}, 4183 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"}, 4184 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"}, 4185 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"}, 4186 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"}, 4187 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"}, 4188 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"}, 4189 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"}, 4190 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"}, 4191 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"}, 4192 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"}, 4193 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"}, 4194 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"}, 4195 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"}, 4196 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"}, 4197 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"}, 4198 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"}, 4199 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"}, 4200 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"}, 4201 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"}, 4202 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"}, 4203 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"}, 4204 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"}, 4205 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"}, 4206 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"}, 4207 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"}, 4208 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"}, 4209 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"}, 4210 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"}, 4211 }; 4212 4213 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = { 4214 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"}, 4215 }; 4216 4217 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = { 4218 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"}, 4219 }; 4220 4221 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = { 4222 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"}, 4223 }; 4224 4225 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = { 4226 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"}, 4227 }; 4228 4229 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = { 4230 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list) 4231 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list) 4232 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list) 4233 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list) 4234 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list) 4235 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list) 4236 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list) 4237 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list) 4238 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list) 4239 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list) 4240 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list) 4241 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list) 4242 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list) 4243 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list) 4244 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list) 4245 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list) 4246 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list) 4247 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list) 4248 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list) 4249 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list) 4250 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list) 4251 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list) 4252 }; 4253 4254 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { 4255 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH), 4256 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, 4257 AMDGPU_GFX_RLC_MEM, 1}, 4258 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI), 4259 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, 4260 AMDGPU_GFX_CP_MEM, 1}, 4261 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI), 4262 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, 4263 AMDGPU_GFX_CP_MEM, 1}, 4264 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI), 4265 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, 4266 AMDGPU_GFX_CP_MEM, 1}, 4267 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI), 4268 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, 4269 AMDGPU_GFX_GDS_MEM, 1}, 4270 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI), 4271 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, 4272 AMDGPU_GFX_GC_CANE_MEM, 1}, 4273 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI), 4274 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, 4275 AMDGPU_GFX_SPI_MEM, 1}, 4276 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI), 4277 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, 4278 AMDGPU_GFX_SP_MEM, 4}, 4279 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI), 4280 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, 4281 AMDGPU_GFX_SP_MEM, 4}, 4282 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI), 4283 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, 4284 AMDGPU_GFX_SQ_MEM, 4}, 4285 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI), 4286 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, 4287 AMDGPU_GFX_SQC_MEM, 4}, 4288 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI), 4289 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, 4290 AMDGPU_GFX_TCX_MEM, 1}, 4291 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI), 4292 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, 4293 AMDGPU_GFX_TCC_MEM, 1}, 4294 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI), 4295 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, 4296 AMDGPU_GFX_TA_MEM, 4}, 4297 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG), 4298 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, 4299 AMDGPU_GFX_TCI_MEM, 1}, 4300 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG), 4301 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, 4302 AMDGPU_GFX_TCP_MEM, 4}, 4303 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI), 4304 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, 4305 AMDGPU_GFX_TD_MEM, 4}, 4306 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI), 4307 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, 4308 AMDGPU_GFX_GCEA_MEM, 1}, 4309 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI), 4310 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, 4311 AMDGPU_GFX_LDS_MEM, 4}, 4312 }; 4313 4314 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { 4315 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH), 4316 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, 4317 AMDGPU_GFX_RLC_MEM, 1}, 4318 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI), 4319 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, 4320 AMDGPU_GFX_CP_MEM, 1}, 4321 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI), 4322 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, 4323 AMDGPU_GFX_CP_MEM, 1}, 4324 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI), 4325 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, 4326 AMDGPU_GFX_CP_MEM, 1}, 4327 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI), 4328 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, 4329 AMDGPU_GFX_GDS_MEM, 1}, 4330 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI), 4331 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, 4332 AMDGPU_GFX_GC_CANE_MEM, 1}, 4333 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI), 4334 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, 4335 AMDGPU_GFX_SPI_MEM, 1}, 4336 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI), 4337 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, 4338 AMDGPU_GFX_SP_MEM, 4}, 4339 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI), 4340 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, 4341 AMDGPU_GFX_SP_MEM, 4}, 4342 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI), 4343 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, 4344 AMDGPU_GFX_SQ_MEM, 4}, 4345 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI), 4346 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, 4347 AMDGPU_GFX_SQC_MEM, 4}, 4348 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI), 4349 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, 4350 AMDGPU_GFX_TCX_MEM, 1}, 4351 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI), 4352 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, 4353 AMDGPU_GFX_TCC_MEM, 1}, 4354 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI), 4355 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, 4356 AMDGPU_GFX_TA_MEM, 4}, 4357 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG), 4358 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, 4359 AMDGPU_GFX_TCI_MEM, 1}, 4360 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG), 4361 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, 4362 AMDGPU_GFX_TCP_MEM, 4}, 4363 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI), 4364 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, 4365 AMDGPU_GFX_TD_MEM, 4}, 4366 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI), 4367 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"}, 4368 AMDGPU_GFX_TCA_MEM, 1}, 4369 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI), 4370 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, 4371 AMDGPU_GFX_GCEA_MEM, 1}, 4372 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI), 4373 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, 4374 AMDGPU_GFX_LDS_MEM, 4}, 4375 }; 4376 4377 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, 4378 void *ras_error_status, int xcc_id) 4379 { 4380 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 4381 unsigned long ce_count = 0, ue_count = 0; 4382 uint32_t i, j, k; 4383 4384 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */ 4385 struct amdgpu_smuio_mcm_config_info mcm_info = { 4386 .socket_id = adev->smuio.funcs->get_socket_id(adev), 4387 .die_id = xcc_id & 0x01 ? 1 : 0, 4388 }; 4389 4390 mutex_lock(&adev->grbm_idx_mutex); 4391 4392 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { 4393 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { 4394 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { 4395 /* no need to select if instance number is 1 */ 4396 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || 4397 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) 4398 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4399 4400 amdgpu_ras_inst_query_ras_error_count(adev, 4401 &(gfx_v9_4_3_ce_reg_list[i].reg_entry), 4402 1, 4403 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent, 4404 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size, 4405 GET_INST(GC, xcc_id), 4406 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 4407 &ce_count); 4408 4409 amdgpu_ras_inst_query_ras_error_count(adev, 4410 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4411 1, 4412 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, 4413 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, 4414 GET_INST(GC, xcc_id), 4415 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 4416 &ue_count); 4417 } 4418 } 4419 } 4420 4421 /* handle extra register entries of UE */ 4422 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { 4423 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { 4424 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { 4425 /* no need to select if instance number is 1 */ 4426 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || 4427 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) 4428 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4429 4430 amdgpu_ras_inst_query_ras_error_count(adev, 4431 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4432 1, 4433 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, 4434 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, 4435 GET_INST(GC, xcc_id), 4436 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 4437 &ue_count); 4438 } 4439 } 4440 } 4441 4442 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4443 xcc_id); 4444 mutex_unlock(&adev->grbm_idx_mutex); 4445 4446 /* the caller should make sure initialize value of 4447 * err_data->ue_count and err_data->ce_count 4448 */ 4449 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); 4450 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); 4451 } 4452 4453 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, 4454 void *ras_error_status, int xcc_id) 4455 { 4456 uint32_t i, j, k; 4457 4458 mutex_lock(&adev->grbm_idx_mutex); 4459 4460 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { 4461 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { 4462 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { 4463 /* no need to select if instance number is 1 */ 4464 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || 4465 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) 4466 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4467 4468 amdgpu_ras_inst_reset_ras_error_count(adev, 4469 &(gfx_v9_4_3_ce_reg_list[i].reg_entry), 4470 1, 4471 GET_INST(GC, xcc_id)); 4472 4473 amdgpu_ras_inst_reset_ras_error_count(adev, 4474 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4475 1, 4476 GET_INST(GC, xcc_id)); 4477 } 4478 } 4479 } 4480 4481 /* handle extra register entries of UE */ 4482 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { 4483 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { 4484 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { 4485 /* no need to select if instance number is 1 */ 4486 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || 4487 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) 4488 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4489 4490 amdgpu_ras_inst_reset_ras_error_count(adev, 4491 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4492 1, 4493 GET_INST(GC, xcc_id)); 4494 } 4495 } 4496 } 4497 4498 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4499 xcc_id); 4500 mutex_unlock(&adev->grbm_idx_mutex); 4501 } 4502 4503 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, 4504 void *ras_error_status, int xcc_id) 4505 { 4506 uint32_t i; 4507 uint32_t data; 4508 4509 if (amdgpu_sriov_vf(adev)) 4510 return; 4511 4512 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG); 4513 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, 4514 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); 4515 4516 if (amdgpu_watchdog_timer.timeout_fatal_disable && 4517 (amdgpu_watchdog_timer.period < 1 || 4518 amdgpu_watchdog_timer.period > 0x23)) { 4519 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n"); 4520 amdgpu_watchdog_timer.period = 0x23; 4521 } 4522 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, 4523 amdgpu_watchdog_timer.period); 4524 4525 mutex_lock(&adev->grbm_idx_mutex); 4526 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4527 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id); 4528 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data); 4529 } 4530 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4531 xcc_id); 4532 mutex_unlock(&adev->grbm_idx_mutex); 4533 } 4534 4535 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, 4536 void *ras_error_status) 4537 { 4538 amdgpu_gfx_ras_error_func(adev, ras_error_status, 4539 gfx_v9_4_3_inst_query_ras_err_count); 4540 } 4541 4542 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) 4543 { 4544 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count); 4545 } 4546 4547 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) 4548 { 4549 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); 4550 } 4551 4552 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 4553 { 4554 /* Header itself is a NOP packet */ 4555 if (num_nop == 1) { 4556 amdgpu_ring_write(ring, ring->funcs->nop); 4557 return; 4558 } 4559 4560 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 4561 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 4562 4563 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 4564 amdgpu_ring_insert_nop(ring, num_nop - 1); 4565 } 4566 4567 static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) 4568 { 4569 struct amdgpu_device *adev = ip_block->adev; 4570 uint32_t i, j, k; 4571 uint32_t xcc_id, xcc_offset, inst_offset; 4572 uint32_t num_xcc, reg, num_inst; 4573 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 4574 4575 if (!adev->gfx.ip_dump_core) 4576 return; 4577 4578 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4579 drm_printf(p, "Number of Instances:%d\n", num_xcc); 4580 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4581 xcc_offset = xcc_id * reg_count; 4582 drm_printf(p, "\nInstance id:%d\n", xcc_id); 4583 for (i = 0; i < reg_count; i++) 4584 drm_printf(p, "%-50s \t 0x%08x\n", 4585 gc_reg_list_9_4_3[i].reg_name, 4586 adev->gfx.ip_dump_core[xcc_offset + i]); 4587 } 4588 4589 /* print compute queue registers for all instances */ 4590 if (!adev->gfx.ip_dump_compute_queues) 4591 return; 4592 4593 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 4594 adev->gfx.mec.num_queue_per_pipe; 4595 4596 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 4597 drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n", 4598 num_xcc, 4599 adev->gfx.mec.num_mec, 4600 adev->gfx.mec.num_pipe_per_mec, 4601 adev->gfx.mec.num_queue_per_pipe); 4602 4603 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4604 xcc_offset = xcc_id * reg_count * num_inst; 4605 inst_offset = 0; 4606 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4607 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4608 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 4609 drm_printf(p, 4610 "\nxcc:%d mec:%d, pipe:%d, queue:%d\n", 4611 xcc_id, i, j, k); 4612 for (reg = 0; reg < reg_count; reg++) { 4613 if (i && gc_cp_reg_list_9_4_3[reg].reg_offset == 4614 regCP_MEC_ME1_HEADER_DUMP) 4615 drm_printf(p, 4616 "%-50s \t 0x%08x\n", 4617 "regCP_MEC_ME2_HEADER_DUMP", 4618 adev->gfx.ip_dump_compute_queues 4619 [xcc_offset + inst_offset + 4620 reg]); 4621 else 4622 drm_printf(p, 4623 "%-50s \t 0x%08x\n", 4624 gc_cp_reg_list_9_4_3[reg].reg_name, 4625 adev->gfx.ip_dump_compute_queues 4626 [xcc_offset + inst_offset + 4627 reg]); 4628 } 4629 inst_offset += reg_count; 4630 } 4631 } 4632 } 4633 } 4634 } 4635 4636 static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block) 4637 { 4638 struct amdgpu_device *adev = ip_block->adev; 4639 uint32_t i, j, k; 4640 uint32_t num_xcc, reg, num_inst; 4641 uint32_t xcc_id, xcc_offset, inst_offset; 4642 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 4643 4644 if (!adev->gfx.ip_dump_core) 4645 return; 4646 4647 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4648 4649 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4650 xcc_offset = xcc_id * reg_count; 4651 for (i = 0; i < reg_count; i++) 4652 adev->gfx.ip_dump_core[xcc_offset + i] = 4653 RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i], 4654 GET_INST(GC, xcc_id))); 4655 } 4656 4657 /* dump compute queue registers for all instances */ 4658 if (!adev->gfx.ip_dump_compute_queues) 4659 return; 4660 4661 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 4662 adev->gfx.mec.num_queue_per_pipe; 4663 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 4664 mutex_lock(&adev->srbm_mutex); 4665 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4666 xcc_offset = xcc_id * reg_count * num_inst; 4667 inst_offset = 0; 4668 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4669 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4670 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 4671 /* ME0 is for GFX so start from 1 for CP */ 4672 soc15_grbm_select(adev, 1 + i, j, k, 0, 4673 GET_INST(GC, xcc_id)); 4674 4675 for (reg = 0; reg < reg_count; reg++) { 4676 if (i && gc_cp_reg_list_9_4_3[reg].reg_offset == 4677 regCP_MEC_ME1_HEADER_DUMP) 4678 adev->gfx.ip_dump_compute_queues 4679 [xcc_offset + 4680 inst_offset + reg] = 4681 RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), 4682 regCP_MEC_ME2_HEADER_DUMP)); 4683 else 4684 adev->gfx.ip_dump_compute_queues 4685 [xcc_offset + 4686 inst_offset + reg] = 4687 RREG32(SOC15_REG_ENTRY_OFFSET_INST( 4688 gc_cp_reg_list_9_4_3[reg], 4689 GET_INST(GC, xcc_id))); 4690 } 4691 inst_offset += reg_count; 4692 } 4693 } 4694 } 4695 } 4696 soc15_grbm_select(adev, 0, 0, 0, 0, 0); 4697 mutex_unlock(&adev->srbm_mutex); 4698 } 4699 4700 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 4701 { 4702 /* Emit the cleaner shader */ 4703 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 4704 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 4705 } 4706 4707 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { 4708 .name = "gfx_v9_4_3", 4709 .early_init = gfx_v9_4_3_early_init, 4710 .late_init = gfx_v9_4_3_late_init, 4711 .sw_init = gfx_v9_4_3_sw_init, 4712 .sw_fini = gfx_v9_4_3_sw_fini, 4713 .hw_init = gfx_v9_4_3_hw_init, 4714 .hw_fini = gfx_v9_4_3_hw_fini, 4715 .suspend = gfx_v9_4_3_suspend, 4716 .resume = gfx_v9_4_3_resume, 4717 .is_idle = gfx_v9_4_3_is_idle, 4718 .wait_for_idle = gfx_v9_4_3_wait_for_idle, 4719 .soft_reset = gfx_v9_4_3_soft_reset, 4720 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, 4721 .set_powergating_state = gfx_v9_4_3_set_powergating_state, 4722 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, 4723 .dump_ip_state = gfx_v9_4_3_ip_dump, 4724 .print_ip_state = gfx_v9_4_3_ip_print, 4725 }; 4726 4727 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { 4728 .type = AMDGPU_RING_TYPE_COMPUTE, 4729 .align_mask = 0xff, 4730 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4731 .support_64bit_ptrs = true, 4732 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, 4733 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, 4734 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, 4735 .emit_frame_size = 4736 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ 4737 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ 4738 5 + /* hdp invalidate */ 4739 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ 4740 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4741 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4742 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ 4743 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */ 4744 7 + /* gfx_v9_4_3_emit_mem_sync */ 4745 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */ 4746 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ 4747 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */ 4748 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ 4749 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute, 4750 .emit_fence = gfx_v9_4_3_ring_emit_fence, 4751 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync, 4752 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush, 4753 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch, 4754 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, 4755 .test_ring = gfx_v9_4_3_ring_test_ring, 4756 .test_ib = gfx_v9_4_3_ring_test_ib, 4757 .insert_nop = gfx_v9_4_3_ring_insert_nop, 4758 .pad_ib = amdgpu_ring_generic_pad_ib, 4759 .emit_wreg = gfx_v9_4_3_ring_emit_wreg, 4760 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, 4761 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, 4762 .soft_recovery = gfx_v9_4_3_ring_soft_recovery, 4763 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync, 4764 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit, 4765 .reset = gfx_v9_4_3_reset_kcq, 4766 .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader, 4767 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, 4768 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, 4769 }; 4770 4771 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { 4772 .type = AMDGPU_RING_TYPE_KIQ, 4773 .align_mask = 0xff, 4774 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4775 .support_64bit_ptrs = true, 4776 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, 4777 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, 4778 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, 4779 .emit_frame_size = 4780 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ 4781 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ 4782 5 + /* hdp invalidate */ 4783 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ 4784 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4785 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4786 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ 4787 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */ 4788 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ 4789 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq, 4790 .test_ring = gfx_v9_4_3_ring_test_ring, 4791 .insert_nop = amdgpu_ring_insert_nop, 4792 .pad_ib = amdgpu_ring_generic_pad_ib, 4793 .emit_rreg = gfx_v9_4_3_ring_emit_rreg, 4794 .emit_wreg = gfx_v9_4_3_ring_emit_wreg, 4795 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, 4796 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, 4797 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, 4798 }; 4799 4800 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) 4801 { 4802 int i, j, num_xcc; 4803 4804 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4805 for (i = 0; i < num_xcc; i++) { 4806 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; 4807 4808 for (j = 0; j < adev->gfx.num_compute_rings; j++) 4809 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs 4810 = &gfx_v9_4_3_ring_funcs_compute; 4811 } 4812 } 4813 4814 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = { 4815 .set = gfx_v9_4_3_set_eop_interrupt_state, 4816 .process = gfx_v9_4_3_eop_irq, 4817 }; 4818 4819 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = { 4820 .set = gfx_v9_4_3_set_priv_reg_fault_state, 4821 .process = gfx_v9_4_3_priv_reg_irq, 4822 }; 4823 4824 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = { 4825 .set = gfx_v9_4_3_set_bad_op_fault_state, 4826 .process = gfx_v9_4_3_bad_op_irq, 4827 }; 4828 4829 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = { 4830 .set = gfx_v9_4_3_set_priv_inst_fault_state, 4831 .process = gfx_v9_4_3_priv_inst_irq, 4832 }; 4833 4834 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev) 4835 { 4836 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 4837 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs; 4838 4839 adev->gfx.priv_reg_irq.num_types = 1; 4840 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs; 4841 4842 adev->gfx.bad_op_irq.num_types = 1; 4843 adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs; 4844 4845 adev->gfx.priv_inst_irq.num_types = 1; 4846 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs; 4847 } 4848 4849 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev) 4850 { 4851 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs; 4852 } 4853 4854 4855 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) 4856 { 4857 /* 9.4.3 variants removed all the GDS internal memory, 4858 * only support GWS opcode in kernel, like barrier 4859 * semaphore.etc */ 4860 4861 /* init asic gds info */ 4862 adev->gds.gds_size = 0; 4863 adev->gds.gds_compute_max_wave_id = 0; 4864 adev->gds.gws_size = 64; 4865 adev->gds.oa_size = 16; 4866 } 4867 4868 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 4869 u32 bitmap, int xcc_id) 4870 { 4871 u32 data; 4872 4873 if (!bitmap) 4874 return; 4875 4876 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4877 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4878 4879 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data); 4880 } 4881 4882 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id) 4883 { 4884 u32 data, mask; 4885 4886 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG); 4887 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG); 4888 4889 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4890 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4891 4892 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 4893 4894 return (~data) & mask; 4895 } 4896 4897 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, 4898 struct amdgpu_cu_info *cu_info) 4899 { 4900 int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0; 4901 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp; 4902 unsigned disable_masks[4 * 4]; 4903 bool is_symmetric_cus; 4904 4905 if (!adev || !cu_info) 4906 return -EINVAL; 4907 4908 /* 4909 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs 4910 */ 4911 if (adev->gfx.config.max_shader_engines * 4912 adev->gfx.config.max_sh_per_se > 16) 4913 return -EINVAL; 4914 4915 amdgpu_gfx_parse_disable_cu(disable_masks, 4916 adev->gfx.config.max_shader_engines, 4917 adev->gfx.config.max_sh_per_se); 4918 4919 mutex_lock(&adev->grbm_idx_mutex); 4920 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { 4921 is_symmetric_cus = true; 4922 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4923 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4924 mask = 1; 4925 ao_bitmap = 0; 4926 counter = 0; 4927 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id); 4928 gfx_v9_4_3_set_user_cu_inactive_bitmap( 4929 adev, 4930 disable_masks[i * adev->gfx.config.max_sh_per_se + j], 4931 xcc_id); 4932 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id); 4933 4934 cu_info->bitmap[xcc_id][i][j] = bitmap; 4935 4936 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 4937 if (bitmap & mask) { 4938 if (counter < adev->gfx.config.max_cu_per_sh) 4939 ao_bitmap |= mask; 4940 counter++; 4941 } 4942 mask <<= 1; 4943 } 4944 active_cu_number += counter; 4945 if (i < 2 && j < 2) 4946 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 4947 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 4948 } 4949 if (i && is_symmetric_cus && prev_counter != counter) 4950 is_symmetric_cus = false; 4951 prev_counter = counter; 4952 } 4953 if (is_symmetric_cus) { 4954 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG); 4955 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1); 4956 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1); 4957 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp); 4958 } 4959 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4960 xcc_id); 4961 } 4962 mutex_unlock(&adev->grbm_idx_mutex); 4963 4964 cu_info->number = active_cu_number; 4965 cu_info->ao_cu_mask = ao_cu_mask; 4966 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 4967 4968 return 0; 4969 } 4970 4971 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { 4972 .type = AMD_IP_BLOCK_TYPE_GFX, 4973 .major = 9, 4974 .minor = 4, 4975 .rev = 3, 4976 .funcs = &gfx_v9_4_3_ip_funcs, 4977 }; 4978 4979 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask) 4980 { 4981 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4982 uint32_t tmp_mask; 4983 int i, r; 4984 4985 /* TODO : Initialize golden regs */ 4986 /* gfx_v9_4_3_init_golden_registers(adev); */ 4987 4988 tmp_mask = inst_mask; 4989 for_each_inst(i, tmp_mask) 4990 gfx_v9_4_3_xcc_constants_init(adev, i); 4991 4992 if (!amdgpu_sriov_vf(adev)) { 4993 tmp_mask = inst_mask; 4994 for_each_inst(i, tmp_mask) { 4995 r = gfx_v9_4_3_xcc_rlc_resume(adev, i); 4996 if (r) 4997 return r; 4998 } 4999 } 5000 5001 tmp_mask = inst_mask; 5002 for_each_inst(i, tmp_mask) { 5003 r = gfx_v9_4_3_xcc_cp_resume(adev, i); 5004 if (r) 5005 return r; 5006 } 5007 5008 return 0; 5009 } 5010 5011 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask) 5012 { 5013 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5014 int i; 5015 5016 for_each_inst(i, inst_mask) 5017 gfx_v9_4_3_xcc_fini(adev, i); 5018 5019 return 0; 5020 } 5021 5022 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { 5023 .suspend = &gfx_v9_4_3_xcp_suspend, 5024 .resume = &gfx_v9_4_3_xcp_resume 5025 }; 5026 5027 struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { 5028 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, 5029 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, 5030 }; 5031 5032 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 5033 { 5034 int r; 5035 5036 r = amdgpu_ras_block_late_init(adev, ras_block); 5037 if (r) 5038 return r; 5039 5040 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX, 5041 &gfx_v9_4_3_aca_info, 5042 NULL); 5043 if (r) 5044 goto late_fini; 5045 5046 return 0; 5047 5048 late_fini: 5049 amdgpu_ras_block_late_fini(adev, ras_block); 5050 5051 return r; 5052 } 5053 5054 struct amdgpu_gfx_ras gfx_v9_4_3_ras = { 5055 .ras_block = { 5056 .hw_ops = &gfx_v9_4_3_ras_ops, 5057 .ras_late_init = &gfx_v9_4_3_ras_late_init, 5058 }, 5059 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, 5060 }; 5061