1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 25 #include "amdgpu.h" 26 #include "amdgpu_gfx.h" 27 #include "soc15.h" 28 #include "soc15d.h" 29 #include "soc15_common.h" 30 #include "vega10_enum.h" 31 32 #include "v9_structs.h" 33 34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" 35 36 #include "gc/gc_9_4_3_offset.h" 37 #include "gc/gc_9_4_3_sh_mask.h" 38 39 #include "gfx_v9_4_3.h" 40 #include "gfx_v9_4_3_cleaner_shader.h" 41 #include "amdgpu_xcp.h" 42 #include "amdgpu_aca.h" 43 44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); 45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin"); 46 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); 47 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin"); 48 49 #define GFX9_MEC_HPD_SIZE 4096 50 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 51 52 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 53 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 54 55 #define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */ 56 #define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */ 57 #define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */ 58 59 #define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */ 60 #define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */ 61 #define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */ 62 #define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */ 63 64 #define NORMALIZE_XCC_REG_OFFSET(offset) \ 65 (offset & 0xFFFF) 66 67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = { 68 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS), 69 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2), 70 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1), 71 SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2), 72 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1), 73 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1), 74 SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT), 75 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT), 76 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT), 77 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS), 78 SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR), 79 SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS), 80 SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS), 81 SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS), 82 SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT), 83 SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT), 84 SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS), 85 SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS), 86 SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS), 87 SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS), 88 SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS), 89 SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS), 90 SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS), 91 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL), 92 SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS), 93 SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG), 94 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL), 95 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR), 96 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR), 97 SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS), 98 SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT), 99 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND), 100 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE), 101 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1), 102 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2), 103 SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE), 104 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE), 105 SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE), 106 SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT), 107 SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6), 108 /* cp header registers */ 109 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP), 110 SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP), 111 /* SE status registers */ 112 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0), 113 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1), 114 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2), 115 SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3) 116 }; 117 118 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = { 119 /* compute queue registers */ 120 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID), 121 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE), 122 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE), 123 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY), 124 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY), 125 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM), 126 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE), 127 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI), 128 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR), 129 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), 130 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), 131 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), 132 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL), 133 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR), 134 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI), 135 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR), 136 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL), 137 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST), 138 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR), 139 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI), 140 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL), 141 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR), 142 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR), 143 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS), 144 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO), 145 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI), 146 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL), 147 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET), 148 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE), 149 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET), 150 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE), 151 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE), 152 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR), 153 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM), 154 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO), 155 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI), 156 SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS), 157 }; 158 159 struct amdgpu_gfx_ras gfx_v9_4_3_ras; 160 161 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); 162 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); 163 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); 164 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev); 165 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, 166 struct amdgpu_cu_info *cu_info); 167 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 168 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 169 170 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, 171 uint64_t queue_mask) 172 { 173 struct amdgpu_device *adev = kiq_ring->adev; 174 u64 shader_mc_addr; 175 176 /* Cleaner shader MC address */ 177 shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8; 178 179 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 180 amdgpu_ring_write(kiq_ring, 181 PACKET3_SET_RESOURCES_VMID_MASK(0) | 182 /* vmid_mask:0* queue_type:0 (KIQ) */ 183 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); 184 amdgpu_ring_write(kiq_ring, 185 lower_32_bits(queue_mask)); /* queue mask lo */ 186 amdgpu_ring_write(kiq_ring, 187 upper_32_bits(queue_mask)); /* queue mask hi */ 188 amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */ 189 amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */ 190 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 191 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 192 } 193 194 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring, 195 struct amdgpu_ring *ring) 196 { 197 struct amdgpu_device *adev = kiq_ring->adev; 198 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 199 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 200 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 201 202 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 203 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 204 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 205 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 206 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 207 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 208 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 209 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 210 /*queue_type: normal compute queue */ 211 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | 212 /* alloc format: all_on_one_pipe */ 213 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | 214 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 215 /* num_queues: must be 1 */ 216 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); 217 amdgpu_ring_write(kiq_ring, 218 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 219 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 220 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 221 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 222 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 223 } 224 225 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 226 struct amdgpu_ring *ring, 227 enum amdgpu_unmap_queues_action action, 228 u64 gpu_addr, u64 seq) 229 { 230 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 231 232 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 233 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 234 PACKET3_UNMAP_QUEUES_ACTION(action) | 235 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 236 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 237 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 238 amdgpu_ring_write(kiq_ring, 239 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 240 241 if (action == PREEMPT_QUEUES_NO_UNMAP) { 242 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 243 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 244 amdgpu_ring_write(kiq_ring, seq); 245 } else { 246 amdgpu_ring_write(kiq_ring, 0); 247 amdgpu_ring_write(kiq_ring, 0); 248 amdgpu_ring_write(kiq_ring, 0); 249 } 250 } 251 252 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring, 253 struct amdgpu_ring *ring, 254 u64 addr, 255 u64 seq) 256 { 257 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 258 259 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 260 amdgpu_ring_write(kiq_ring, 261 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 262 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 263 PACKET3_QUERY_STATUS_COMMAND(2)); 264 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 265 amdgpu_ring_write(kiq_ring, 266 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 267 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 268 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 269 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 270 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 271 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 272 } 273 274 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 275 uint16_t pasid, uint32_t flush_type, 276 bool all_hub) 277 { 278 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 279 amdgpu_ring_write(kiq_ring, 280 PACKET3_INVALIDATE_TLBS_DST_SEL(1) | 281 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 282 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 283 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 284 } 285 286 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type, 287 uint32_t me_id, uint32_t pipe_id, uint32_t queue_id, 288 uint32_t xcc_id, uint32_t vmid) 289 { 290 struct amdgpu_device *adev = kiq_ring->adev; 291 unsigned i; 292 293 /* enter save mode */ 294 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 295 mutex_lock(&adev->srbm_mutex); 296 soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id); 297 298 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 299 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2); 300 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1); 301 /* wait till dequeue take effects */ 302 for (i = 0; i < adev->usec_timeout; i++) { 303 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 304 break; 305 udelay(1); 306 } 307 if (i >= adev->usec_timeout) 308 dev_err(adev->dev, "fail to wait on hqd deactive\n"); 309 } else { 310 dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type); 311 } 312 313 soc15_grbm_select(adev, 0, 0, 0, 0, 0); 314 mutex_unlock(&adev->srbm_mutex); 315 /* exit safe mode */ 316 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 317 } 318 319 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { 320 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources, 321 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues, 322 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues, 323 .kiq_query_status = gfx_v9_4_3_kiq_query_status, 324 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs, 325 .kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue, 326 .set_resources_size = 8, 327 .map_queues_size = 7, 328 .unmap_queues_size = 6, 329 .query_status_size = 7, 330 .invalidate_tlbs_size = 2, 331 }; 332 333 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) 334 { 335 int i, num_xcc; 336 337 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 338 for (i = 0; i < num_xcc; i++) 339 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; 340 } 341 342 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) 343 { 344 int i, num_xcc, dev_inst; 345 346 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 347 for (i = 0; i < num_xcc; i++) { 348 dev_inst = GET_INST(GC, i); 349 350 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, 351 GOLDEN_GB_ADDR_CONFIG); 352 /* Golden settings applied by driver for ASIC with rev_id 0 */ 353 if (adev->rev_id == 0) { 354 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1, 355 REDUCE_FIFO_DEPTH_BY_2, 2); 356 } else { 357 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, 358 SPARE, 0x1); 359 } 360 } 361 } 362 363 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg) 364 { 365 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg); 366 367 /* If it is an XCC reg, normalize the reg to keep 368 lower 16 bits in local xcc */ 369 370 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) || 371 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH))) 372 return normalized_reg; 373 else 374 return reg; 375 } 376 377 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 378 bool wc, uint32_t reg, uint32_t val) 379 { 380 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 381 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 382 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 383 WRITE_DATA_DST_SEL(0) | 384 (wc ? WR_CONFIRM : 0)); 385 amdgpu_ring_write(ring, reg); 386 amdgpu_ring_write(ring, 0); 387 amdgpu_ring_write(ring, val); 388 } 389 390 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 391 int mem_space, int opt, uint32_t addr0, 392 uint32_t addr1, uint32_t ref, uint32_t mask, 393 uint32_t inv) 394 { 395 /* Only do the normalization on regspace */ 396 if (mem_space == 0) { 397 addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0); 398 addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1); 399 } 400 401 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 402 amdgpu_ring_write(ring, 403 /* memory (1) or register (0) */ 404 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 405 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 406 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 407 WAIT_REG_MEM_ENGINE(eng_sel))); 408 409 if (mem_space) 410 BUG_ON(addr0 & 0x3); /* Dword align */ 411 amdgpu_ring_write(ring, addr0); 412 amdgpu_ring_write(ring, addr1); 413 amdgpu_ring_write(ring, ref); 414 amdgpu_ring_write(ring, mask); 415 amdgpu_ring_write(ring, inv); /* poll interval */ 416 } 417 418 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) 419 { 420 uint32_t scratch_reg0_offset, xcc_offset; 421 struct amdgpu_device *adev = ring->adev; 422 uint32_t tmp = 0; 423 unsigned i; 424 int r; 425 426 /* Use register offset which is local to XCC in the packet */ 427 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 428 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); 429 WREG32(scratch_reg0_offset, 0xCAFEDEAD); 430 tmp = RREG32(scratch_reg0_offset); 431 432 r = amdgpu_ring_alloc(ring, 3); 433 if (r) 434 return r; 435 436 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 437 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START); 438 amdgpu_ring_write(ring, 0xDEADBEEF); 439 amdgpu_ring_commit(ring); 440 441 for (i = 0; i < adev->usec_timeout; i++) { 442 tmp = RREG32(scratch_reg0_offset); 443 if (tmp == 0xDEADBEEF) 444 break; 445 udelay(1); 446 } 447 448 if (i >= adev->usec_timeout) 449 r = -ETIMEDOUT; 450 return r; 451 } 452 453 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout) 454 { 455 struct amdgpu_device *adev = ring->adev; 456 struct amdgpu_ib ib; 457 struct dma_fence *f = NULL; 458 459 unsigned index; 460 uint64_t gpu_addr; 461 uint32_t tmp; 462 long r; 463 464 r = amdgpu_device_wb_get(adev, &index); 465 if (r) 466 return r; 467 468 gpu_addr = adev->wb.gpu_addr + (index * 4); 469 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 470 memset(&ib, 0, sizeof(ib)); 471 472 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 473 if (r) 474 goto err1; 475 476 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 477 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 478 ib.ptr[2] = lower_32_bits(gpu_addr); 479 ib.ptr[3] = upper_32_bits(gpu_addr); 480 ib.ptr[4] = 0xDEADBEEF; 481 ib.length_dw = 5; 482 483 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 484 if (r) 485 goto err2; 486 487 r = dma_fence_wait_timeout(f, false, timeout); 488 if (r == 0) { 489 r = -ETIMEDOUT; 490 goto err2; 491 } else if (r < 0) { 492 goto err2; 493 } 494 495 tmp = adev->wb.wb[index]; 496 if (tmp == 0xDEADBEEF) 497 r = 0; 498 else 499 r = -EINVAL; 500 501 err2: 502 amdgpu_ib_free(adev, &ib, NULL); 503 dma_fence_put(f); 504 err1: 505 amdgpu_device_wb_free(adev, index); 506 return r; 507 } 508 509 510 /* This value might differs per partition */ 511 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) 512 { 513 uint64_t clock; 514 515 mutex_lock(&adev->gfx.gpu_clock_mutex); 516 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 517 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) | 518 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 519 mutex_unlock(&adev->gfx.gpu_clock_mutex); 520 521 return clock; 522 } 523 524 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev) 525 { 526 amdgpu_ucode_release(&adev->gfx.pfp_fw); 527 amdgpu_ucode_release(&adev->gfx.me_fw); 528 amdgpu_ucode_release(&adev->gfx.ce_fw); 529 amdgpu_ucode_release(&adev->gfx.rlc_fw); 530 amdgpu_ucode_release(&adev->gfx.mec_fw); 531 amdgpu_ucode_release(&adev->gfx.mec2_fw); 532 533 kfree(adev->gfx.rlc.register_list_format); 534 } 535 536 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev, 537 const char *chip_name) 538 { 539 int err; 540 const struct rlc_firmware_header_v2_0 *rlc_hdr; 541 uint16_t version_major; 542 uint16_t version_minor; 543 544 545 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 546 "amdgpu/%s_rlc.bin", chip_name); 547 if (err) 548 goto out; 549 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 550 551 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 552 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 553 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 554 out: 555 if (err) 556 amdgpu_ucode_release(&adev->gfx.rlc_fw); 557 558 return err; 559 } 560 561 static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev) 562 { 563 return true; 564 } 565 566 static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev) 567 { 568 if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev)) 569 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 570 } 571 572 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, 573 const char *chip_name) 574 { 575 int err; 576 577 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 578 "amdgpu/%s_mec.bin", chip_name); 579 if (err) 580 goto out; 581 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1); 582 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT); 583 584 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; 585 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; 586 587 gfx_v9_4_3_check_if_need_gfxoff(adev); 588 589 out: 590 if (err) 591 amdgpu_ucode_release(&adev->gfx.mec_fw); 592 return err; 593 } 594 595 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) 596 { 597 char ucode_prefix[15]; 598 int r; 599 600 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 601 602 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix); 603 if (r) 604 return r; 605 606 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix); 607 if (r) 608 return r; 609 610 return r; 611 } 612 613 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) 614 { 615 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 616 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 617 } 618 619 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) 620 { 621 int r, i, num_xcc; 622 u32 *hpd; 623 const __le32 *fw_data; 624 unsigned fw_size; 625 u32 *fw; 626 size_t mec_hpd_size; 627 628 const struct gfx_firmware_header_v1_0 *mec_hdr; 629 630 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 631 for (i = 0; i < num_xcc; i++) 632 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, 633 AMDGPU_MAX_COMPUTE_QUEUES); 634 635 /* take ownership of the relevant compute queues */ 636 amdgpu_gfx_compute_queue_acquire(adev); 637 mec_hpd_size = 638 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE; 639 if (mec_hpd_size) { 640 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 641 AMDGPU_GEM_DOMAIN_VRAM | 642 AMDGPU_GEM_DOMAIN_GTT, 643 &adev->gfx.mec.hpd_eop_obj, 644 &adev->gfx.mec.hpd_eop_gpu_addr, 645 (void **)&hpd); 646 if (r) { 647 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 648 gfx_v9_4_3_mec_fini(adev); 649 return r; 650 } 651 652 if (amdgpu_emu_mode == 1) { 653 for (i = 0; i < mec_hpd_size / 4; i++) { 654 memset((void *)(hpd + i), 0, 4); 655 if (i % 50 == 0) 656 msleep(1); 657 } 658 } else { 659 memset(hpd, 0, mec_hpd_size); 660 } 661 662 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 663 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 664 } 665 666 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 667 668 fw_data = (const __le32 *) 669 (adev->gfx.mec_fw->data + 670 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 671 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 672 673 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 674 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 675 &adev->gfx.mec.mec_fw_obj, 676 &adev->gfx.mec.mec_fw_gpu_addr, 677 (void **)&fw); 678 if (r) { 679 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); 680 gfx_v9_4_3_mec_fini(adev); 681 return r; 682 } 683 684 memcpy(fw, fw_data, fw_size); 685 686 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 687 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 688 689 return 0; 690 } 691 692 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, 693 u32 sh_num, u32 instance, int xcc_id) 694 { 695 u32 data; 696 697 if (instance == 0xffffffff) 698 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 699 INSTANCE_BROADCAST_WRITES, 1); 700 else 701 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 702 INSTANCE_INDEX, instance); 703 704 if (se_num == 0xffffffff) 705 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 706 SE_BROADCAST_WRITES, 1); 707 else 708 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 709 710 if (sh_num == 0xffffffff) 711 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 712 SH_BROADCAST_WRITES, 1); 713 else 714 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 715 716 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); 717 } 718 719 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address) 720 { 721 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 722 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 723 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 724 (address << SQ_IND_INDEX__INDEX__SHIFT) | 725 (SQ_IND_INDEX__FORCE_READ_MASK)); 726 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 727 } 728 729 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 730 uint32_t wave, uint32_t thread, 731 uint32_t regno, uint32_t num, uint32_t *out) 732 { 733 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, 734 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 735 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 736 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 737 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 738 (SQ_IND_INDEX__FORCE_READ_MASK) | 739 (SQ_IND_INDEX__AUTO_INCR_MASK)); 740 while (num--) 741 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); 742 } 743 744 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, 745 uint32_t xcc_id, uint32_t simd, uint32_t wave, 746 uint32_t *dst, int *no_fields) 747 { 748 /* type 1 wave data */ 749 dst[(*no_fields)++] = 1; 750 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); 751 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); 752 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); 753 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); 754 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); 755 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID); 756 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); 757 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); 758 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC); 759 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC); 760 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS); 761 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); 762 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0); 763 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0); 764 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE); 765 } 766 767 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 768 uint32_t wave, uint32_t start, 769 uint32_t size, uint32_t *dst) 770 { 771 wave_read_regs(adev, xcc_id, simd, wave, 0, 772 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 773 } 774 775 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 776 uint32_t wave, uint32_t thread, 777 uint32_t start, uint32_t size, 778 uint32_t *dst) 779 { 780 wave_read_regs(adev, xcc_id, simd, wave, thread, 781 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 782 } 783 784 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, 785 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 786 { 787 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); 788 } 789 790 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev) 791 { 792 u32 xcp_ctl; 793 794 /* Value is expected to be the same on all, fetch from first instance */ 795 xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL); 796 797 return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP); 798 } 799 800 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, 801 int num_xccs_per_xcp) 802 { 803 int ret, i, num_xcc; 804 u32 tmp = 0; 805 806 if (adev->psp.funcs) { 807 ret = psp_spatial_partition(&adev->psp, 808 NUM_XCC(adev->gfx.xcc_mask) / 809 num_xccs_per_xcp); 810 if (ret) 811 return ret; 812 } else { 813 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 814 815 for (i = 0; i < num_xcc; i++) { 816 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, 817 num_xccs_per_xcp); 818 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, 819 i % num_xccs_per_xcp); 820 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, 821 tmp); 822 } 823 ret = 0; 824 } 825 826 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; 827 828 return ret; 829 } 830 831 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) 832 { 833 int xcc; 834 835 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); 836 if (!xcc) { 837 dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); 838 return -EINVAL; 839 } 840 841 return xcc - 1; 842 } 843 844 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { 845 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, 846 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, 847 .read_wave_data = &gfx_v9_4_3_read_wave_data, 848 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, 849 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, 850 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, 851 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, 852 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, 853 .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp, 854 }; 855 856 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, 857 struct aca_bank *bank, enum aca_smu_type type, 858 void *data) 859 { 860 struct aca_bank_info info; 861 u64 misc0; 862 u32 instlo; 863 int ret; 864 865 ret = aca_bank_info_decode(bank, &info); 866 if (ret) 867 return ret; 868 869 /* NOTE: overwrite info.die_id with xcd id for gfx */ 870 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 871 instlo &= GENMASK(31, 1); 872 info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; 873 874 misc0 = bank->regs[ACA_REG_IDX_MISC0]; 875 876 switch (type) { 877 case ACA_SMU_TYPE_UE: 878 ret = aca_error_cache_log_bank_error(handle, &info, 879 ACA_ERROR_TYPE_UE, 1ULL); 880 break; 881 case ACA_SMU_TYPE_CE: 882 ret = aca_error_cache_log_bank_error(handle, &info, 883 ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0)); 884 break; 885 default: 886 return -EINVAL; 887 } 888 889 return ret; 890 } 891 892 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 893 enum aca_smu_type type, void *data) 894 { 895 u32 instlo; 896 897 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 898 instlo &= GENMASK(31, 1); 899 switch (instlo) { 900 case mmSMNAID_XCD0_MCA_SMU: 901 case mmSMNAID_XCD1_MCA_SMU: 902 case mmSMNXCD_XCD0_MCA_SMU: 903 return true; 904 default: 905 break; 906 } 907 908 return false; 909 } 910 911 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = { 912 .aca_bank_parser = gfx_v9_4_3_aca_bank_parser, 913 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid, 914 }; 915 916 static const struct aca_info gfx_v9_4_3_aca_info = { 917 .hwip = ACA_HWIP_TYPE_SMU, 918 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, 919 .bank_ops = &gfx_v9_4_3_aca_bank_ops, 920 }; 921 922 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) 923 { 924 u32 gb_addr_config; 925 926 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; 927 adev->gfx.ras = &gfx_v9_4_3_ras; 928 929 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 930 case IP_VERSION(9, 4, 3): 931 case IP_VERSION(9, 4, 4): 932 adev->gfx.config.max_hw_contexts = 8; 933 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 934 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 935 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 936 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 937 gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG); 938 break; 939 default: 940 BUG(); 941 break; 942 } 943 944 adev->gfx.config.gb_addr_config = gb_addr_config; 945 946 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 947 REG_GET_FIELD( 948 adev->gfx.config.gb_addr_config, 949 GB_ADDR_CONFIG, 950 NUM_PIPES); 951 952 adev->gfx.config.max_tile_pipes = 953 adev->gfx.config.gb_addr_config_fields.num_pipes; 954 955 adev->gfx.config.gb_addr_config_fields.num_banks = 1 << 956 REG_GET_FIELD( 957 adev->gfx.config.gb_addr_config, 958 GB_ADDR_CONFIG, 959 NUM_BANKS); 960 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 961 REG_GET_FIELD( 962 adev->gfx.config.gb_addr_config, 963 GB_ADDR_CONFIG, 964 MAX_COMPRESSED_FRAGS); 965 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 966 REG_GET_FIELD( 967 adev->gfx.config.gb_addr_config, 968 GB_ADDR_CONFIG, 969 NUM_RB_PER_SE); 970 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 971 REG_GET_FIELD( 972 adev->gfx.config.gb_addr_config, 973 GB_ADDR_CONFIG, 974 NUM_SHADER_ENGINES); 975 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 976 REG_GET_FIELD( 977 adev->gfx.config.gb_addr_config, 978 GB_ADDR_CONFIG, 979 PIPE_INTERLEAVE_SIZE)); 980 981 return 0; 982 } 983 984 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, 985 int xcc_id, int mec, int pipe, int queue) 986 { 987 unsigned irq_type; 988 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 989 unsigned int hw_prio; 990 uint32_t xcc_doorbell_start; 991 992 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + 993 ring_id]; 994 995 /* mec0 is me1 */ 996 ring->xcc_id = xcc_id; 997 ring->me = mec + 1; 998 ring->pipe = pipe; 999 ring->queue = queue; 1000 1001 ring->ring_obj = NULL; 1002 ring->use_doorbell = true; 1003 xcc_doorbell_start = adev->doorbell_index.mec_ring0 + 1004 xcc_id * adev->doorbell_index.xcc_doorbell_range; 1005 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; 1006 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + 1007 (ring_id + xcc_id * adev->gfx.num_compute_rings) * 1008 GFX9_MEC_HPD_SIZE; 1009 ring->vm_hub = AMDGPU_GFXHUB(xcc_id); 1010 sprintf(ring->name, "comp_%d.%d.%d.%d", 1011 ring->xcc_id, ring->me, ring->pipe, ring->queue); 1012 1013 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1014 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1015 + ring->pipe; 1016 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1017 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1018 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1019 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1020 hw_prio, NULL); 1021 } 1022 1023 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev) 1024 { 1025 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 1026 uint32_t *ptr, num_xcc, inst; 1027 1028 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1029 1030 ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL); 1031 if (!ptr) { 1032 DRM_ERROR("Failed to allocate memory for GFX IP Dump\n"); 1033 adev->gfx.ip_dump_core = NULL; 1034 } else { 1035 adev->gfx.ip_dump_core = ptr; 1036 } 1037 1038 /* Allocate memory for compute queue registers for all the instances */ 1039 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 1040 inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 1041 adev->gfx.mec.num_queue_per_pipe; 1042 1043 ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL); 1044 if (!ptr) { 1045 DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n"); 1046 adev->gfx.ip_dump_compute_queues = NULL; 1047 } else { 1048 adev->gfx.ip_dump_compute_queues = ptr; 1049 } 1050 } 1051 1052 static int gfx_v9_4_3_sw_init(void *handle) 1053 { 1054 int i, j, k, r, ring_id, xcc_id, num_xcc; 1055 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1056 1057 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1058 case IP_VERSION(9, 4, 3): 1059 case IP_VERSION(9, 4, 4): 1060 adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex; 1061 adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex); 1062 if (adev->gfx.mec_fw_version >= 153) { 1063 adev->gfx.enable_cleaner_shader = true; 1064 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1065 if (r) { 1066 adev->gfx.enable_cleaner_shader = false; 1067 dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1068 } 1069 } 1070 break; 1071 default: 1072 adev->gfx.enable_cleaner_shader = false; 1073 break; 1074 } 1075 1076 adev->gfx.mec.num_mec = 2; 1077 adev->gfx.mec.num_pipe_per_mec = 4; 1078 adev->gfx.mec.num_queue_per_pipe = 8; 1079 1080 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1081 1082 /* EOP Event */ 1083 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); 1084 if (r) 1085 return r; 1086 1087 /* Bad opcode Event */ 1088 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 1089 GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR, 1090 &adev->gfx.bad_op_irq); 1091 if (r) 1092 return r; 1093 1094 /* Privileged reg */ 1095 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, 1096 &adev->gfx.priv_reg_irq); 1097 if (r) 1098 return r; 1099 1100 /* Privileged inst */ 1101 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, 1102 &adev->gfx.priv_inst_irq); 1103 if (r) 1104 return r; 1105 1106 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1107 1108 r = adev->gfx.rlc.funcs->init(adev); 1109 if (r) { 1110 DRM_ERROR("Failed to init rlc BOs!\n"); 1111 return r; 1112 } 1113 1114 r = gfx_v9_4_3_mec_init(adev); 1115 if (r) { 1116 DRM_ERROR("Failed to init MEC BOs!\n"); 1117 return r; 1118 } 1119 1120 /* set up the compute queues - allocate horizontally across pipes */ 1121 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 1122 ring_id = 0; 1123 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1124 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1125 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; 1126 k++) { 1127 if (!amdgpu_gfx_is_mec_queue_enabled( 1128 adev, xcc_id, i, k, j)) 1129 continue; 1130 1131 r = gfx_v9_4_3_compute_ring_init(adev, 1132 ring_id, 1133 xcc_id, 1134 i, k, j); 1135 if (r) 1136 return r; 1137 1138 ring_id++; 1139 } 1140 } 1141 } 1142 1143 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id); 1144 if (r) { 1145 DRM_ERROR("Failed to init KIQ BOs!\n"); 1146 return r; 1147 } 1148 1149 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1150 if (r) 1151 return r; 1152 1153 /* create MQD for all compute queues as wel as KIQ for SRIOV case */ 1154 r = amdgpu_gfx_mqd_sw_init(adev, 1155 sizeof(struct v9_mqd_allocation), xcc_id); 1156 if (r) 1157 return r; 1158 } 1159 1160 r = gfx_v9_4_3_gpu_early_init(adev); 1161 if (r) 1162 return r; 1163 1164 r = amdgpu_gfx_ras_sw_init(adev); 1165 if (r) 1166 return r; 1167 1168 1169 if (!amdgpu_sriov_vf(adev)) { 1170 r = amdgpu_gfx_sysfs_init(adev); 1171 if (r) 1172 return r; 1173 } 1174 1175 gfx_v9_4_3_alloc_ip_dump(adev); 1176 1177 r = amdgpu_gfx_sysfs_isolation_shader_init(adev); 1178 if (r) 1179 return r; 1180 1181 return 0; 1182 } 1183 1184 static int gfx_v9_4_3_sw_fini(void *handle) 1185 { 1186 int i, num_xcc; 1187 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1188 1189 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1190 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) 1191 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1192 1193 for (i = 0; i < num_xcc; i++) { 1194 amdgpu_gfx_mqd_sw_fini(adev, i); 1195 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); 1196 amdgpu_gfx_kiq_fini(adev, i); 1197 } 1198 1199 amdgpu_gfx_cleaner_shader_sw_fini(adev); 1200 1201 gfx_v9_4_3_mec_fini(adev); 1202 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); 1203 gfx_v9_4_3_free_microcode(adev); 1204 if (!amdgpu_sriov_vf(adev)) 1205 amdgpu_gfx_sysfs_fini(adev); 1206 amdgpu_gfx_sysfs_isolation_shader_fini(adev); 1207 1208 kfree(adev->gfx.ip_dump_core); 1209 kfree(adev->gfx.ip_dump_compute_queues); 1210 1211 return 0; 1212 } 1213 1214 #define DEFAULT_SH_MEM_BASES (0x6000) 1215 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, 1216 int xcc_id) 1217 { 1218 int i; 1219 uint32_t sh_mem_config; 1220 uint32_t sh_mem_bases; 1221 uint32_t data; 1222 1223 /* 1224 * Configure apertures: 1225 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1226 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1227 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1228 */ 1229 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1230 1231 sh_mem_config = SH_MEM_ADDRESS_MODE_64 | 1232 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1233 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1234 1235 mutex_lock(&adev->srbm_mutex); 1236 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1237 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1238 /* CP and shaders */ 1239 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); 1240 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); 1241 1242 /* Enable trap for each kfd vmid. */ 1243 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); 1244 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1245 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); 1246 } 1247 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 1248 mutex_unlock(&adev->srbm_mutex); 1249 1250 /* Initialize all compute VMIDs to have no GDS, GWS, or OA 1251 acccess. These should be enabled by FW for target VMIDs. */ 1252 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1253 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); 1254 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); 1255 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0); 1256 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0); 1257 } 1258 } 1259 1260 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) 1261 { 1262 int vmid; 1263 1264 /* 1265 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1266 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1267 * the driver can enable them for graphics. VMID0 should maintain 1268 * access so that HWS firmware can save/restore entries. 1269 */ 1270 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { 1271 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0); 1272 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0); 1273 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0); 1274 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0); 1275 } 1276 } 1277 1278 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, 1279 int xcc_id) 1280 { 1281 u32 tmp; 1282 int i; 1283 1284 /* XXX SH_MEM regs */ 1285 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1286 mutex_lock(&adev->srbm_mutex); 1287 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1288 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id)); 1289 /* CP and shaders */ 1290 if (i == 0) { 1291 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1292 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1293 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, 1294 !!adev->gmc.noretry); 1295 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1296 regSH_MEM_CONFIG, tmp); 1297 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1298 regSH_MEM_BASES, 0); 1299 } else { 1300 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1301 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1302 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, 1303 !!adev->gmc.noretry); 1304 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1305 regSH_MEM_CONFIG, tmp); 1306 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1307 (adev->gmc.private_aperture_start >> 1308 48)); 1309 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1310 (adev->gmc.shared_aperture_start >> 1311 48)); 1312 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), 1313 regSH_MEM_BASES, tmp); 1314 } 1315 } 1316 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0)); 1317 1318 mutex_unlock(&adev->srbm_mutex); 1319 1320 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); 1321 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); 1322 } 1323 1324 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) 1325 { 1326 int i, num_xcc; 1327 1328 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1329 1330 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info); 1331 adev->gfx.config.db_debug2 = 1332 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); 1333 1334 for (i = 0; i < num_xcc; i++) 1335 gfx_v9_4_3_xcc_constants_init(adev, i); 1336 } 1337 1338 static void 1339 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev, 1340 int xcc_id) 1341 { 1342 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); 1343 } 1344 1345 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id) 1346 { 1347 /* 1348 * Rlc save restore list is workable since v2_1. 1349 * And it's needed by gfxoff feature. 1350 */ 1351 if (adev->gfx.rlc.is_rlc_v2_1) 1352 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id); 1353 } 1354 1355 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) 1356 { 1357 uint32_t data; 1358 1359 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG); 1360 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; 1361 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); 1362 } 1363 1364 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) 1365 { 1366 uint32_t rlc_setting; 1367 1368 /* if RLC is not enabled, do nothing */ 1369 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL); 1370 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 1371 return false; 1372 1373 return true; 1374 } 1375 1376 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 1377 { 1378 uint32_t data; 1379 unsigned i; 1380 1381 data = RLC_SAFE_MODE__CMD_MASK; 1382 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 1383 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); 1384 1385 /* wait for RLC_SAFE_MODE */ 1386 for (i = 0; i < adev->usec_timeout; i++) { 1387 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 1388 break; 1389 udelay(1); 1390 } 1391 } 1392 1393 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, 1394 int xcc_id) 1395 { 1396 uint32_t data; 1397 1398 data = RLC_SAFE_MODE__CMD_MASK; 1399 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); 1400 } 1401 1402 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 1403 { 1404 int xcc_id, num_xcc; 1405 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 1406 1407 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1408 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 1409 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; 1410 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); 1411 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); 1412 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); 1413 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); 1414 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); 1415 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); 1416 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); 1417 } 1418 adev->gfx.rlc.rlcg_reg_access_supported = true; 1419 } 1420 1421 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) 1422 { 1423 /* init spm vmid with 0xf */ 1424 if (adev->gfx.rlc.funcs->update_spm_vmid) 1425 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 1426 1427 return 0; 1428 } 1429 1430 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, 1431 int xcc_id) 1432 { 1433 u32 i, j, k; 1434 u32 mask; 1435 1436 mutex_lock(&adev->grbm_idx_mutex); 1437 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1438 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1439 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 1440 xcc_id); 1441 for (k = 0; k < adev->usec_timeout; k++) { 1442 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0) 1443 break; 1444 udelay(1); 1445 } 1446 if (k == adev->usec_timeout) { 1447 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 1448 0xffffffff, 1449 0xffffffff, xcc_id); 1450 mutex_unlock(&adev->grbm_idx_mutex); 1451 DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 1452 i, j); 1453 return; 1454 } 1455 } 1456 } 1457 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 1458 xcc_id); 1459 mutex_unlock(&adev->grbm_idx_mutex); 1460 1461 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 1462 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 1463 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 1464 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 1465 for (k = 0; k < adev->usec_timeout; k++) { 1466 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 1467 break; 1468 udelay(1); 1469 } 1470 } 1471 1472 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1473 bool enable, int xcc_id) 1474 { 1475 u32 tmp; 1476 1477 /* These interrupts should be enabled to drive DS clock */ 1478 1479 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0); 1480 1481 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 1482 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); 1483 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); 1484 1485 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); 1486 } 1487 1488 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id) 1489 { 1490 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, 1491 RLC_ENABLE_F32, 0); 1492 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 1493 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id); 1494 } 1495 1496 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) 1497 { 1498 int i, num_xcc; 1499 1500 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1501 for (i = 0; i < num_xcc; i++) 1502 gfx_v9_4_3_xcc_rlc_stop(adev, i); 1503 } 1504 1505 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id) 1506 { 1507 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, 1508 SOFT_RESET_RLC, 1); 1509 udelay(50); 1510 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, 1511 SOFT_RESET_RLC, 0); 1512 udelay(50); 1513 } 1514 1515 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) 1516 { 1517 int i, num_xcc; 1518 1519 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1520 for (i = 0; i < num_xcc; i++) 1521 gfx_v9_4_3_xcc_rlc_reset(adev, i); 1522 } 1523 1524 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id) 1525 { 1526 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, 1527 RLC_ENABLE_F32, 1); 1528 udelay(50); 1529 1530 /* carrizo do enable cp interrupt after cp inited */ 1531 if (!(adev->flags & AMD_IS_APU)) { 1532 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); 1533 udelay(50); 1534 } 1535 } 1536 1537 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) 1538 { 1539 #ifdef AMDGPU_RLC_DEBUG_RETRY 1540 u32 rlc_ucode_ver; 1541 #endif 1542 int i, num_xcc; 1543 1544 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1545 for (i = 0; i < num_xcc; i++) { 1546 gfx_v9_4_3_xcc_rlc_start(adev, i); 1547 #ifdef AMDGPU_RLC_DEBUG_RETRY 1548 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 1549 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); 1550 if (rlc_ucode_ver == 0x108) { 1551 dev_info(adev->dev, 1552 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 1553 rlc_ucode_ver, adev->gfx.rlc_fw_version); 1554 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 1555 * default is 0x9C4 to create a 100us interval */ 1556 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4); 1557 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr 1558 * to disable the page fault retry interrupts, default is 1559 * 0x100 (256) */ 1560 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100); 1561 } 1562 #endif 1563 } 1564 } 1565 1566 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev, 1567 int xcc_id) 1568 { 1569 const struct rlc_firmware_header_v2_0 *hdr; 1570 const __le32 *fw_data; 1571 unsigned i, fw_size; 1572 1573 if (!adev->gfx.rlc_fw) 1574 return -EINVAL; 1575 1576 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1577 amdgpu_ucode_print_rlc_hdr(&hdr->header); 1578 1579 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1580 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1581 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1582 1583 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, 1584 RLCG_UCODE_LOADING_START_ADDRESS); 1585 for (i = 0; i < fw_size; i++) { 1586 if (amdgpu_emu_mode == 1 && i % 100 == 0) { 1587 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i); 1588 msleep(1); 1589 } 1590 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 1591 } 1592 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1593 1594 return 0; 1595 } 1596 1597 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id) 1598 { 1599 int r; 1600 1601 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1602 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); 1603 /* legacy rlc firmware loading */ 1604 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id); 1605 if (r) 1606 return r; 1607 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); 1608 } 1609 1610 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 1611 /* disable CG */ 1612 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); 1613 gfx_v9_4_3_xcc_init_pg(adev, xcc_id); 1614 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 1615 1616 return 0; 1617 } 1618 1619 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) 1620 { 1621 int r, i, num_xcc; 1622 1623 if (amdgpu_sriov_vf(adev)) 1624 return 0; 1625 1626 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1627 for (i = 0; i < num_xcc; i++) { 1628 r = gfx_v9_4_3_xcc_rlc_resume(adev, i); 1629 if (r) 1630 return r; 1631 } 1632 1633 return 0; 1634 } 1635 1636 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1637 unsigned vmid) 1638 { 1639 u32 reg, pre_data, data; 1640 1641 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL); 1642 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) 1643 pre_data = RREG32_NO_KIQ(reg); 1644 else 1645 pre_data = RREG32(reg); 1646 1647 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK); 1648 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 1649 1650 if (pre_data != data) { 1651 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) { 1652 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); 1653 } else 1654 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); 1655 } 1656 } 1657 1658 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = { 1659 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)}, 1660 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)}, 1661 }; 1662 1663 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, 1664 uint32_t offset, 1665 struct soc15_reg_rlcg *entries, int arr_size) 1666 { 1667 int i, inst; 1668 uint32_t reg; 1669 1670 if (!entries) 1671 return false; 1672 1673 for (i = 0; i < arr_size; i++) { 1674 const struct soc15_reg_rlcg *entry; 1675 1676 entry = &entries[i]; 1677 inst = adev->ip_map.logical_to_dev_inst ? 1678 adev->ip_map.logical_to_dev_inst( 1679 adev, entry->hwip, entry->instance) : 1680 entry->instance; 1681 reg = adev->reg_offset[entry->hwip][inst][entry->segment] + 1682 entry->reg; 1683 if (offset == reg) 1684 return true; 1685 } 1686 1687 return false; 1688 } 1689 1690 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) 1691 { 1692 return gfx_v9_4_3_check_rlcg_range(adev, offset, 1693 (void *)rlcg_access_gc_9_4_3, 1694 ARRAY_SIZE(rlcg_access_gc_9_4_3)); 1695 } 1696 1697 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, 1698 bool enable, int xcc_id) 1699 { 1700 if (enable) { 1701 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); 1702 } else { 1703 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 1704 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 1705 adev->gfx.kiq[xcc_id].ring.sched.ready = false; 1706 } 1707 udelay(50); 1708 } 1709 1710 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev, 1711 int xcc_id) 1712 { 1713 const struct gfx_firmware_header_v1_0 *mec_hdr; 1714 const __le32 *fw_data; 1715 unsigned i; 1716 u32 tmp; 1717 u32 mec_ucode_addr_offset; 1718 u32 mec_ucode_data_offset; 1719 1720 if (!adev->gfx.mec_fw) 1721 return -EINVAL; 1722 1723 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); 1724 1725 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 1726 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 1727 1728 fw_data = (const __le32 *) 1729 (adev->gfx.mec_fw->data + 1730 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 1731 tmp = 0; 1732 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 1733 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 1734 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp); 1735 1736 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO, 1737 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); 1738 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI, 1739 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 1740 1741 mec_ucode_addr_offset = 1742 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR); 1743 mec_ucode_data_offset = 1744 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA); 1745 1746 /* MEC1 */ 1747 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); 1748 for (i = 0; i < mec_hdr->jt_size; i++) 1749 WREG32(mec_ucode_data_offset, 1750 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 1751 1752 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version); 1753 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ 1754 1755 return 0; 1756 } 1757 1758 /* KIQ functions */ 1759 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id) 1760 { 1761 uint32_t tmp; 1762 struct amdgpu_device *adev = ring->adev; 1763 1764 /* tell RLC which is KIQ queue */ 1765 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); 1766 tmp &= 0xffffff00; 1767 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 1768 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); 1769 tmp |= 0x80; 1770 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); 1771 } 1772 1773 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) 1774 { 1775 struct amdgpu_device *adev = ring->adev; 1776 1777 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 1778 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { 1779 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; 1780 mqd->cp_hqd_queue_priority = 1781 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; 1782 } 1783 } 1784 } 1785 1786 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id) 1787 { 1788 struct amdgpu_device *adev = ring->adev; 1789 struct v9_mqd *mqd = ring->mqd_ptr; 1790 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 1791 uint32_t tmp; 1792 1793 mqd->header = 0xC0310800; 1794 mqd->compute_pipelinestat_enable = 0x00000001; 1795 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 1796 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 1797 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 1798 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 1799 mqd->compute_misc_reserved = 0x00000003; 1800 1801 mqd->dynamic_cu_mask_addr_lo = 1802 lower_32_bits(ring->mqd_gpu_addr 1803 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 1804 mqd->dynamic_cu_mask_addr_hi = 1805 upper_32_bits(ring->mqd_gpu_addr 1806 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 1807 1808 eop_base_addr = ring->eop_gpu_addr >> 8; 1809 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 1810 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 1811 1812 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 1813 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL); 1814 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 1815 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); 1816 1817 mqd->cp_hqd_eop_control = tmp; 1818 1819 /* enable doorbell? */ 1820 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL); 1821 1822 if (ring->use_doorbell) { 1823 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1824 DOORBELL_OFFSET, ring->doorbell_index); 1825 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1826 DOORBELL_EN, 1); 1827 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1828 DOORBELL_SOURCE, 0); 1829 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1830 DOORBELL_HIT, 0); 1831 if (amdgpu_sriov_vf(adev)) 1832 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1833 DOORBELL_MODE, 1); 1834 } else { 1835 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1836 DOORBELL_EN, 0); 1837 } 1838 1839 mqd->cp_hqd_pq_doorbell_control = tmp; 1840 1841 /* disable the queue if it's active */ 1842 ring->wptr = 0; 1843 mqd->cp_hqd_dequeue_request = 0; 1844 mqd->cp_hqd_pq_rptr = 0; 1845 mqd->cp_hqd_pq_wptr_lo = 0; 1846 mqd->cp_hqd_pq_wptr_hi = 0; 1847 1848 /* set the pointer to the MQD */ 1849 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 1850 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 1851 1852 /* set MQD vmid to 0 */ 1853 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL); 1854 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 1855 mqd->cp_mqd_control = tmp; 1856 1857 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 1858 hqd_gpu_addr = ring->gpu_addr >> 8; 1859 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 1860 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 1861 1862 /* set up the HQD, this is similar to CP_RB0_CNTL */ 1863 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL); 1864 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 1865 (order_base_2(ring->ring_size / 4) - 1)); 1866 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 1867 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 1868 #ifdef __BIG_ENDIAN 1869 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 1870 #endif 1871 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 1872 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); 1873 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 1874 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 1875 mqd->cp_hqd_pq_control = tmp; 1876 1877 /* set the wb address whether it's enabled or not */ 1878 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 1879 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 1880 mqd->cp_hqd_pq_rptr_report_addr_hi = 1881 upper_32_bits(wb_gpu_addr) & 0xffff; 1882 1883 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 1884 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 1885 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 1886 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 1887 1888 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 1889 ring->wptr = 0; 1890 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR); 1891 1892 /* set the vmid for the queue */ 1893 mqd->cp_hqd_vmid = 0; 1894 1895 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE); 1896 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 1897 mqd->cp_hqd_persistent_state = tmp; 1898 1899 /* set MIN_IB_AVAIL_SIZE */ 1900 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL); 1901 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 1902 mqd->cp_hqd_ib_control = tmp; 1903 1904 /* set static priority for a queue/ring */ 1905 gfx_v9_4_3_mqd_set_priority(ring, mqd); 1906 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM); 1907 1908 /* map_queues packet doesn't need activate the queue, 1909 * so only kiq need set this field. 1910 */ 1911 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 1912 mqd->cp_hqd_active = 1; 1913 1914 return 0; 1915 } 1916 1917 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, 1918 int xcc_id) 1919 { 1920 struct amdgpu_device *adev = ring->adev; 1921 struct v9_mqd *mqd = ring->mqd_ptr; 1922 int j; 1923 1924 /* disable wptr polling */ 1925 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); 1926 1927 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, 1928 mqd->cp_hqd_eop_base_addr_lo); 1929 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI, 1930 mqd->cp_hqd_eop_base_addr_hi); 1931 1932 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 1933 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL, 1934 mqd->cp_hqd_eop_control); 1935 1936 /* enable doorbell? */ 1937 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 1938 mqd->cp_hqd_pq_doorbell_control); 1939 1940 /* disable the queue if it's active */ 1941 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { 1942 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); 1943 for (j = 0; j < adev->usec_timeout; j++) { 1944 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 1945 break; 1946 udelay(1); 1947 } 1948 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1949 mqd->cp_hqd_dequeue_request); 1950 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 1951 mqd->cp_hqd_pq_rptr); 1952 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 1953 mqd->cp_hqd_pq_wptr_lo); 1954 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 1955 mqd->cp_hqd_pq_wptr_hi); 1956 } 1957 1958 /* set the pointer to the MQD */ 1959 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR, 1960 mqd->cp_mqd_base_addr_lo); 1961 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI, 1962 mqd->cp_mqd_base_addr_hi); 1963 1964 /* set MQD vmid to 0 */ 1965 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL, 1966 mqd->cp_mqd_control); 1967 1968 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 1969 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE, 1970 mqd->cp_hqd_pq_base_lo); 1971 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI, 1972 mqd->cp_hqd_pq_base_hi); 1973 1974 /* set up the HQD, this is similar to CP_RB0_CNTL */ 1975 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL, 1976 mqd->cp_hqd_pq_control); 1977 1978 /* set the wb address whether it's enabled or not */ 1979 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR, 1980 mqd->cp_hqd_pq_rptr_report_addr_lo); 1981 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 1982 mqd->cp_hqd_pq_rptr_report_addr_hi); 1983 1984 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 1985 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR, 1986 mqd->cp_hqd_pq_wptr_poll_addr_lo); 1987 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 1988 mqd->cp_hqd_pq_wptr_poll_addr_hi); 1989 1990 /* enable the doorbell if requested */ 1991 if (ring->use_doorbell) { 1992 WREG32_SOC15( 1993 GC, GET_INST(GC, xcc_id), 1994 regCP_MEC_DOORBELL_RANGE_LOWER, 1995 ((adev->doorbell_index.kiq + 1996 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 1997 2) << 2); 1998 WREG32_SOC15( 1999 GC, GET_INST(GC, xcc_id), 2000 regCP_MEC_DOORBELL_RANGE_UPPER, 2001 ((adev->doorbell_index.userqueue_end + 2002 xcc_id * adev->doorbell_index.xcc_doorbell_range) * 2003 2) << 2); 2004 } 2005 2006 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 2007 mqd->cp_hqd_pq_doorbell_control); 2008 2009 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2010 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 2011 mqd->cp_hqd_pq_wptr_lo); 2012 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 2013 mqd->cp_hqd_pq_wptr_hi); 2014 2015 /* set the vmid for the queue */ 2016 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid); 2017 2018 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 2019 mqd->cp_hqd_persistent_state); 2020 2021 /* activate the queue */ 2022 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 2023 mqd->cp_hqd_active); 2024 2025 if (ring->use_doorbell) 2026 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1); 2027 2028 return 0; 2029 } 2030 2031 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, 2032 int xcc_id) 2033 { 2034 struct amdgpu_device *adev = ring->adev; 2035 int j; 2036 2037 /* disable the queue if it's active */ 2038 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { 2039 2040 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); 2041 2042 for (j = 0; j < adev->usec_timeout; j++) { 2043 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 2044 break; 2045 udelay(1); 2046 } 2047 2048 if (j == AMDGPU_MAX_USEC_TIMEOUT) { 2049 DRM_DEBUG("%s dequeue request failed.\n", ring->name); 2050 2051 /* Manual disable if dequeue request times out */ 2052 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); 2053 } 2054 2055 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 2056 0); 2057 } 2058 2059 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); 2060 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); 2061 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT); 2062 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); 2063 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); 2064 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); 2065 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0); 2066 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0); 2067 2068 return 0; 2069 } 2070 2071 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) 2072 { 2073 struct amdgpu_device *adev = ring->adev; 2074 struct v9_mqd *mqd = ring->mqd_ptr; 2075 struct v9_mqd *tmp_mqd; 2076 2077 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id); 2078 2079 /* GPU could be in bad state during probe, driver trigger the reset 2080 * after load the SMU, in this case , the mqd is not be initialized. 2081 * driver need to re-init the mqd. 2082 * check mqd->cp_hqd_pq_control since this value should not be 0 2083 */ 2084 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup; 2085 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) { 2086 /* for GPU_RESET case , reset MQD to a clean status */ 2087 if (adev->gfx.kiq[xcc_id].mqd_backup) 2088 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation)); 2089 2090 /* reset ring buffer */ 2091 ring->wptr = 0; 2092 amdgpu_ring_clear_ring(ring); 2093 mutex_lock(&adev->srbm_mutex); 2094 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2095 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); 2096 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2097 mutex_unlock(&adev->srbm_mutex); 2098 } else { 2099 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2100 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2101 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2102 mutex_lock(&adev->srbm_mutex); 2103 if (amdgpu_sriov_vf(adev) && adev->in_suspend) 2104 amdgpu_ring_clear_ring(ring); 2105 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2106 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); 2107 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); 2108 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2109 mutex_unlock(&adev->srbm_mutex); 2110 2111 if (adev->gfx.kiq[xcc_id].mqd_backup) 2112 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); 2113 } 2114 2115 return 0; 2116 } 2117 2118 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore) 2119 { 2120 struct amdgpu_device *adev = ring->adev; 2121 struct v9_mqd *mqd = ring->mqd_ptr; 2122 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 2123 struct v9_mqd *tmp_mqd; 2124 2125 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control 2126 * is not be initialized before 2127 */ 2128 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; 2129 2130 if (!restore && (!tmp_mqd->cp_hqd_pq_control || 2131 (!amdgpu_in_reset(adev) && !adev->in_suspend))) { 2132 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2133 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2134 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2135 mutex_lock(&adev->srbm_mutex); 2136 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id)); 2137 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); 2138 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2139 mutex_unlock(&adev->srbm_mutex); 2140 2141 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2142 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 2143 } else { 2144 /* restore MQD to a clean status */ 2145 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2146 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 2147 /* reset ring buffer */ 2148 ring->wptr = 0; 2149 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); 2150 amdgpu_ring_clear_ring(ring); 2151 } 2152 2153 return 0; 2154 } 2155 2156 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) 2157 { 2158 struct amdgpu_ring *ring; 2159 int j; 2160 2161 for (j = 0; j < adev->gfx.num_compute_rings; j++) { 2162 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings]; 2163 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2164 mutex_lock(&adev->srbm_mutex); 2165 soc15_grbm_select(adev, ring->me, 2166 ring->pipe, 2167 ring->queue, 0, GET_INST(GC, xcc_id)); 2168 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id); 2169 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2170 mutex_unlock(&adev->srbm_mutex); 2171 } 2172 } 2173 2174 return 0; 2175 } 2176 2177 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) 2178 { 2179 struct amdgpu_ring *ring; 2180 int r; 2181 2182 ring = &adev->gfx.kiq[xcc_id].ring; 2183 2184 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2185 if (unlikely(r != 0)) 2186 return r; 2187 2188 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2189 if (unlikely(r != 0)) { 2190 amdgpu_bo_unreserve(ring->mqd_obj); 2191 return r; 2192 } 2193 2194 gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id); 2195 amdgpu_bo_kunmap(ring->mqd_obj); 2196 ring->mqd_ptr = NULL; 2197 amdgpu_bo_unreserve(ring->mqd_obj); 2198 return 0; 2199 } 2200 2201 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) 2202 { 2203 struct amdgpu_ring *ring = NULL; 2204 int r = 0, i; 2205 2206 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); 2207 2208 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2209 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; 2210 2211 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2212 if (unlikely(r != 0)) 2213 goto done; 2214 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2215 if (!r) { 2216 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); 2217 amdgpu_bo_kunmap(ring->mqd_obj); 2218 ring->mqd_ptr = NULL; 2219 } 2220 amdgpu_bo_unreserve(ring->mqd_obj); 2221 if (r) 2222 goto done; 2223 } 2224 2225 r = amdgpu_gfx_enable_kcq(adev, xcc_id); 2226 done: 2227 return r; 2228 } 2229 2230 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) 2231 { 2232 struct amdgpu_ring *ring; 2233 int r, j; 2234 2235 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id); 2236 2237 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2238 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id); 2239 2240 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); 2241 if (r) 2242 return r; 2243 } 2244 2245 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); 2246 if (r) 2247 return r; 2248 2249 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id); 2250 if (r) 2251 return r; 2252 2253 for (j = 0; j < adev->gfx.num_compute_rings; j++) { 2254 ring = &adev->gfx.compute_ring 2255 [j + xcc_id * adev->gfx.num_compute_rings]; 2256 r = amdgpu_ring_test_helper(ring); 2257 if (r) 2258 return r; 2259 } 2260 2261 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id); 2262 2263 return 0; 2264 } 2265 2266 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) 2267 { 2268 int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp; 2269 2270 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2271 if (amdgpu_sriov_vf(adev)) { 2272 enum amdgpu_gfx_partition mode; 2273 2274 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2275 AMDGPU_XCP_FL_NONE); 2276 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2277 return -EINVAL; 2278 num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev); 2279 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp; 2280 num_xcp = num_xcc / num_xcc_per_xcp; 2281 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); 2282 2283 } else { 2284 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 2285 AMDGPU_XCP_FL_NONE) == 2286 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) 2287 r = amdgpu_xcp_switch_partition_mode( 2288 adev->xcp_mgr, amdgpu_user_partt_mode); 2289 } 2290 if (r) 2291 return r; 2292 2293 for (i = 0; i < num_xcc; i++) { 2294 r = gfx_v9_4_3_xcc_cp_resume(adev, i); 2295 if (r) 2296 return r; 2297 } 2298 2299 return 0; 2300 } 2301 2302 static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable, 2303 int xcc_id) 2304 { 2305 gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id); 2306 } 2307 2308 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) 2309 { 2310 if (amdgpu_gfx_disable_kcq(adev, xcc_id)) 2311 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id); 2312 2313 if (amdgpu_sriov_vf(adev)) { 2314 /* must disable polling for SRIOV when hw finished, otherwise 2315 * CPC engine may still keep fetching WB address which is already 2316 * invalid after sw finished and trigger DMAR reading error in 2317 * hypervisor side. 2318 */ 2319 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); 2320 return; 2321 } 2322 2323 /* Use deinitialize sequence from CAIL when unbinding device 2324 * from driver, otherwise KIQ is hanging when binding back 2325 */ 2326 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2327 mutex_lock(&adev->srbm_mutex); 2328 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me, 2329 adev->gfx.kiq[xcc_id].ring.pipe, 2330 adev->gfx.kiq[xcc_id].ring.queue, 0, 2331 GET_INST(GC, xcc_id)); 2332 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring, 2333 xcc_id); 2334 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 2335 mutex_unlock(&adev->srbm_mutex); 2336 } 2337 2338 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); 2339 gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); 2340 } 2341 2342 static int gfx_v9_4_3_hw_init(void *handle) 2343 { 2344 int r; 2345 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2346 2347 amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size, 2348 adev->gfx.cleaner_shader_ptr); 2349 2350 if (!amdgpu_sriov_vf(adev)) 2351 gfx_v9_4_3_init_golden_registers(adev); 2352 2353 gfx_v9_4_3_constants_init(adev); 2354 2355 r = adev->gfx.rlc.funcs->resume(adev); 2356 if (r) 2357 return r; 2358 2359 r = gfx_v9_4_3_cp_resume(adev); 2360 if (r) 2361 return r; 2362 2363 return r; 2364 } 2365 2366 static int gfx_v9_4_3_hw_fini(void *handle) 2367 { 2368 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2369 int i, num_xcc; 2370 2371 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 2372 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 2373 amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0); 2374 2375 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2376 for (i = 0; i < num_xcc; i++) { 2377 gfx_v9_4_3_xcc_fini(adev, i); 2378 } 2379 2380 return 0; 2381 } 2382 2383 static int gfx_v9_4_3_suspend(void *handle) 2384 { 2385 return gfx_v9_4_3_hw_fini(handle); 2386 } 2387 2388 static int gfx_v9_4_3_resume(void *handle) 2389 { 2390 return gfx_v9_4_3_hw_init(handle); 2391 } 2392 2393 static bool gfx_v9_4_3_is_idle(void *handle) 2394 { 2395 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2396 int i, num_xcc; 2397 2398 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2399 for (i = 0; i < num_xcc; i++) { 2400 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS), 2401 GRBM_STATUS, GUI_ACTIVE)) 2402 return false; 2403 } 2404 return true; 2405 } 2406 2407 static int gfx_v9_4_3_wait_for_idle(void *handle) 2408 { 2409 unsigned i; 2410 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2411 2412 for (i = 0; i < adev->usec_timeout; i++) { 2413 if (gfx_v9_4_3_is_idle(handle)) 2414 return 0; 2415 udelay(1); 2416 } 2417 return -ETIMEDOUT; 2418 } 2419 2420 static int gfx_v9_4_3_soft_reset(void *handle) 2421 { 2422 u32 grbm_soft_reset = 0; 2423 u32 tmp; 2424 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2425 2426 /* GRBM_STATUS */ 2427 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); 2428 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 2429 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 2430 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 2431 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 2432 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 2433 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { 2434 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2435 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 2436 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2437 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 2438 } 2439 2440 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 2441 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2442 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 2443 } 2444 2445 /* GRBM_STATUS2 */ 2446 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2); 2447 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 2448 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 2449 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2450 2451 2452 if (grbm_soft_reset) { 2453 /* stop the rlc */ 2454 adev->gfx.rlc.funcs->stop(adev); 2455 2456 /* Disable MEC parsing/prefetching */ 2457 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0); 2458 2459 if (grbm_soft_reset) { 2460 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2461 tmp |= grbm_soft_reset; 2462 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 2463 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); 2464 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2465 2466 udelay(50); 2467 2468 tmp &= ~grbm_soft_reset; 2469 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); 2470 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); 2471 } 2472 2473 /* Wait a little for things to settle down */ 2474 udelay(50); 2475 } 2476 return 0; 2477 } 2478 2479 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, 2480 uint32_t vmid, 2481 uint32_t gds_base, uint32_t gds_size, 2482 uint32_t gws_base, uint32_t gws_size, 2483 uint32_t oa_base, uint32_t oa_size) 2484 { 2485 struct amdgpu_device *adev = ring->adev; 2486 2487 /* GDS Base */ 2488 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2489 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid, 2490 gds_base); 2491 2492 /* GDS Size */ 2493 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2494 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid, 2495 gds_size); 2496 2497 /* GWS */ 2498 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2499 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid, 2500 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 2501 2502 /* OA */ 2503 gfx_v9_4_3_write_data_to_reg(ring, 0, false, 2504 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid, 2505 (1 << (oa_size + oa_base)) - (1 << oa_base)); 2506 } 2507 2508 static int gfx_v9_4_3_early_init(void *handle) 2509 { 2510 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2511 2512 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 2513 AMDGPU_MAX_COMPUTE_RINGS); 2514 gfx_v9_4_3_set_kiq_pm4_funcs(adev); 2515 gfx_v9_4_3_set_ring_funcs(adev); 2516 gfx_v9_4_3_set_irq_funcs(adev); 2517 gfx_v9_4_3_set_gds_init(adev); 2518 gfx_v9_4_3_set_rlc_funcs(adev); 2519 2520 /* init rlcg reg access ctrl */ 2521 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev); 2522 2523 return gfx_v9_4_3_init_microcode(adev); 2524 } 2525 2526 static int gfx_v9_4_3_late_init(void *handle) 2527 { 2528 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2529 int r; 2530 2531 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 2532 if (r) 2533 return r; 2534 2535 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 2536 if (r) 2537 return r; 2538 2539 r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0); 2540 if (r) 2541 return r; 2542 2543 if (adev->gfx.ras && 2544 adev->gfx.ras->enable_watchdog_timer) 2545 adev->gfx.ras->enable_watchdog_timer(adev); 2546 2547 return 0; 2548 } 2549 2550 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev, 2551 bool enable, int xcc_id) 2552 { 2553 uint32_t def, data; 2554 2555 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 2556 return; 2557 2558 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2559 regRLC_CGTT_MGCG_OVERRIDE); 2560 2561 if (enable) 2562 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 2563 else 2564 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 2565 2566 if (def != data) 2567 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2568 regRLC_CGTT_MGCG_OVERRIDE, data); 2569 2570 } 2571 2572 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev, 2573 bool enable, int xcc_id) 2574 { 2575 uint32_t def, data; 2576 2577 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 2578 return; 2579 2580 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2581 regRLC_CGTT_MGCG_OVERRIDE); 2582 2583 if (enable) 2584 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; 2585 else 2586 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; 2587 2588 if (def != data) 2589 WREG32_SOC15(GC, GET_INST(GC, xcc_id), 2590 regRLC_CGTT_MGCG_OVERRIDE, data); 2591 } 2592 2593 static void 2594 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, 2595 bool enable, int xcc_id) 2596 { 2597 uint32_t data, def; 2598 2599 /* It is disabled by HW by default */ 2600 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 2601 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 2602 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2603 2604 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 2605 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 2606 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 2607 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 2608 2609 if (def != data) 2610 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2611 2612 /* MGLS is a global flag to control all MGLS in GFX */ 2613 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 2614 /* 2 - RLC memory Light sleep */ 2615 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 2616 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); 2617 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 2618 if (def != data) 2619 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); 2620 } 2621 /* 3 - CP memory Light sleep */ 2622 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 2623 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); 2624 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 2625 if (def != data) 2626 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); 2627 } 2628 } 2629 } else { 2630 /* 1 - MGCG_OVERRIDE */ 2631 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2632 2633 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 2634 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 2635 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 2636 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 2637 2638 if (def != data) 2639 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2640 2641 /* 2 - disable MGLS in RLC */ 2642 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); 2643 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 2644 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 2645 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); 2646 } 2647 2648 /* 3 - disable MGLS in CP */ 2649 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); 2650 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 2651 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 2652 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); 2653 } 2654 } 2655 2656 } 2657 2658 static void 2659 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 2660 bool enable, int xcc_id) 2661 { 2662 uint32_t def, data; 2663 2664 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 2665 2666 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); 2667 /* unset CGCG override */ 2668 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 2669 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 2670 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 2671 else 2672 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 2673 /* update CGCG and CGLS override bits */ 2674 if (def != data) 2675 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); 2676 2677 /* CGCG Hysteresis: 400us */ 2678 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 2679 2680 data = (0x2710 2681 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 2682 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 2683 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 2684 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 2685 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 2686 if (def != data) 2687 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); 2688 2689 /* set IDLE_POLL_COUNT(0x33450100)*/ 2690 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); 2691 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 2692 (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 2693 if (def != data) 2694 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); 2695 } else { 2696 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); 2697 /* reset CGCG/CGLS bits */ 2698 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 2699 /* disable cgcg and cgls in FSM */ 2700 if (def != data) 2701 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); 2702 } 2703 2704 } 2705 2706 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, 2707 bool enable, int xcc_id) 2708 { 2709 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); 2710 2711 if (enable) { 2712 /* FGCG */ 2713 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); 2714 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); 2715 2716 /* CGCG/CGLS should be enabled after MGCG/MGLS 2717 * === MGCG + MGLS === 2718 */ 2719 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, 2720 xcc_id); 2721 /* === CGCG + CGLS === */ 2722 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, 2723 xcc_id); 2724 } else { 2725 /* CGCG/CGLS should be disabled before MGCG/MGLS 2726 * === CGCG + CGLS === 2727 */ 2728 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, 2729 xcc_id); 2730 /* === MGCG + MGLS === */ 2731 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, 2732 xcc_id); 2733 2734 /* FGCG */ 2735 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); 2736 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); 2737 } 2738 2739 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); 2740 2741 return 0; 2742 } 2743 2744 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { 2745 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, 2746 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode, 2747 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode, 2748 .init = gfx_v9_4_3_rlc_init, 2749 .resume = gfx_v9_4_3_rlc_resume, 2750 .stop = gfx_v9_4_3_rlc_stop, 2751 .reset = gfx_v9_4_3_rlc_reset, 2752 .start = gfx_v9_4_3_rlc_start, 2753 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid, 2754 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range, 2755 }; 2756 2757 static int gfx_v9_4_3_set_powergating_state(void *handle, 2758 enum amd_powergating_state state) 2759 { 2760 return 0; 2761 } 2762 2763 static int gfx_v9_4_3_set_clockgating_state(void *handle, 2764 enum amd_clockgating_state state) 2765 { 2766 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2767 int i, num_xcc; 2768 2769 if (amdgpu_sriov_vf(adev)) 2770 return 0; 2771 2772 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2773 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2774 case IP_VERSION(9, 4, 3): 2775 case IP_VERSION(9, 4, 4): 2776 for (i = 0; i < num_xcc; i++) 2777 gfx_v9_4_3_xcc_update_gfx_clock_gating( 2778 adev, state == AMD_CG_STATE_GATE, i); 2779 break; 2780 default: 2781 break; 2782 } 2783 return 0; 2784 } 2785 2786 static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags) 2787 { 2788 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2789 int data; 2790 2791 if (amdgpu_sriov_vf(adev)) 2792 *flags = 0; 2793 2794 /* AMD_CG_SUPPORT_GFX_MGCG */ 2795 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE)); 2796 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 2797 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 2798 2799 /* AMD_CG_SUPPORT_GFX_CGCG */ 2800 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL)); 2801 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 2802 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 2803 2804 /* AMD_CG_SUPPORT_GFX_CGLS */ 2805 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 2806 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 2807 2808 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 2809 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL)); 2810 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 2811 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 2812 2813 /* AMD_CG_SUPPORT_GFX_CP_LS */ 2814 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL)); 2815 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 2816 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 2817 } 2818 2819 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) 2820 { 2821 struct amdgpu_device *adev = ring->adev; 2822 u32 ref_and_mask, reg_mem_engine; 2823 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 2824 2825 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 2826 switch (ring->me) { 2827 case 1: 2828 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 2829 break; 2830 case 2: 2831 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 2832 break; 2833 default: 2834 return; 2835 } 2836 reg_mem_engine = 0; 2837 } else { 2838 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 2839 reg_mem_engine = 1; /* pfp */ 2840 } 2841 2842 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1, 2843 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 2844 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 2845 ref_and_mask, ref_and_mask, 0x20); 2846 } 2847 2848 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring, 2849 struct amdgpu_job *job, 2850 struct amdgpu_ib *ib, 2851 uint32_t flags) 2852 { 2853 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2854 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 2855 2856 /* Currently, there is a high possibility to get wave ID mismatch 2857 * between ME and GDS, leading to a hw deadlock, because ME generates 2858 * different wave IDs than the GDS expects. This situation happens 2859 * randomly when at least 5 compute pipes use GDS ordered append. 2860 * The wave IDs generated by ME are also wrong after suspend/resume. 2861 * Those are probably bugs somewhere else in the kernel driver. 2862 * 2863 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 2864 * GDS to 0 for this ring (me/pipe). 2865 */ 2866 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 2867 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2868 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 2869 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 2870 } 2871 2872 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2873 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 2874 amdgpu_ring_write(ring, 2875 #ifdef __BIG_ENDIAN 2876 (2 << 0) | 2877 #endif 2878 lower_32_bits(ib->gpu_addr)); 2879 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 2880 amdgpu_ring_write(ring, control); 2881 } 2882 2883 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 2884 u64 seq, unsigned flags) 2885 { 2886 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2887 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2888 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; 2889 2890 /* RELEASE_MEM - flush caches, send int */ 2891 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 2892 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | 2893 EOP_TC_NC_ACTION_EN) : 2894 (EOP_TCL1_ACTION_EN | 2895 EOP_TC_ACTION_EN | 2896 EOP_TC_WB_ACTION_EN | 2897 EOP_TC_MD_ACTION_EN)) | 2898 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2899 EVENT_INDEX(5))); 2900 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2901 2902 /* 2903 * the address should be Qword aligned if 64bit write, Dword 2904 * aligned if only send 32bit data low (discard data high) 2905 */ 2906 if (write64bit) 2907 BUG_ON(addr & 0x7); 2908 else 2909 BUG_ON(addr & 0x3); 2910 amdgpu_ring_write(ring, lower_32_bits(addr)); 2911 amdgpu_ring_write(ring, upper_32_bits(addr)); 2912 amdgpu_ring_write(ring, lower_32_bits(seq)); 2913 amdgpu_ring_write(ring, upper_32_bits(seq)); 2914 amdgpu_ring_write(ring, 0); 2915 } 2916 2917 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 2918 { 2919 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 2920 uint32_t seq = ring->fence_drv.sync_seq; 2921 uint64_t addr = ring->fence_drv.gpu_addr; 2922 2923 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0, 2924 lower_32_bits(addr), upper_32_bits(addr), 2925 seq, 0xffffffff, 4); 2926 } 2927 2928 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring, 2929 unsigned vmid, uint64_t pd_addr) 2930 { 2931 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 2932 } 2933 2934 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring) 2935 { 2936 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ 2937 } 2938 2939 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring) 2940 { 2941 u64 wptr; 2942 2943 /* XXX check if swapping is necessary on BE */ 2944 if (ring->use_doorbell) 2945 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); 2946 else 2947 BUG(); 2948 return wptr; 2949 } 2950 2951 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring) 2952 { 2953 struct amdgpu_device *adev = ring->adev; 2954 2955 /* XXX check if swapping is necessary on BE */ 2956 if (ring->use_doorbell) { 2957 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); 2958 WDOORBELL64(ring->doorbell_index, ring->wptr); 2959 } else { 2960 BUG(); /* only DOORBELL method supported on gfx9 now */ 2961 } 2962 } 2963 2964 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 2965 u64 seq, unsigned int flags) 2966 { 2967 struct amdgpu_device *adev = ring->adev; 2968 2969 /* we only allocate 32bit for each seq wb address */ 2970 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 2971 2972 /* write fence seq to the "addr" */ 2973 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2974 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2975 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 2976 amdgpu_ring_write(ring, lower_32_bits(addr)); 2977 amdgpu_ring_write(ring, upper_32_bits(addr)); 2978 amdgpu_ring_write(ring, lower_32_bits(seq)); 2979 2980 if (flags & AMDGPU_FENCE_FLAG_INT) { 2981 /* set register to trigger INT */ 2982 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2983 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2984 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 2985 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS)); 2986 amdgpu_ring_write(ring, 0); 2987 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 2988 } 2989 } 2990 2991 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 2992 uint32_t reg_val_offs) 2993 { 2994 struct amdgpu_device *adev = ring->adev; 2995 2996 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 2997 2998 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 2999 amdgpu_ring_write(ring, 0 | /* src: register*/ 3000 (5 << 8) | /* dst: memory */ 3001 (1 << 20)); /* write confirm */ 3002 amdgpu_ring_write(ring, reg); 3003 amdgpu_ring_write(ring, 0); 3004 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 3005 reg_val_offs * 4)); 3006 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 3007 reg_val_offs * 4)); 3008 } 3009 3010 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 3011 uint32_t val) 3012 { 3013 uint32_t cmd = 0; 3014 3015 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg); 3016 3017 switch (ring->funcs->type) { 3018 case AMDGPU_RING_TYPE_GFX: 3019 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 3020 break; 3021 case AMDGPU_RING_TYPE_KIQ: 3022 cmd = (1 << 16); /* no inc addr */ 3023 break; 3024 default: 3025 cmd = WR_CONFIRM; 3026 break; 3027 } 3028 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3029 amdgpu_ring_write(ring, cmd); 3030 amdgpu_ring_write(ring, reg); 3031 amdgpu_ring_write(ring, 0); 3032 amdgpu_ring_write(ring, val); 3033 } 3034 3035 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 3036 uint32_t val, uint32_t mask) 3037 { 3038 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 3039 } 3040 3041 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 3042 uint32_t reg0, uint32_t reg1, 3043 uint32_t ref, uint32_t mask) 3044 { 3045 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, 3046 ref, mask); 3047 } 3048 3049 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring, 3050 unsigned vmid) 3051 { 3052 struct amdgpu_device *adev = ring->adev; 3053 uint32_t value = 0; 3054 3055 if (!adev->debug_exp_resets) 3056 return; 3057 3058 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 3059 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 3060 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 3061 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 3062 amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id); 3063 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value); 3064 amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id); 3065 } 3066 3067 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3068 struct amdgpu_device *adev, int me, int pipe, 3069 enum amdgpu_interrupt_state state, int xcc_id) 3070 { 3071 u32 mec_int_cntl, mec_int_cntl_reg; 3072 3073 /* 3074 * amdgpu controls only the first MEC. That's why this function only 3075 * handles the setting of interrupts for this specific MEC. All other 3076 * pipes' interrupts are set by amdkfd. 3077 */ 3078 3079 if (me == 1) { 3080 switch (pipe) { 3081 case 0: 3082 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); 3083 break; 3084 case 1: 3085 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); 3086 break; 3087 case 2: 3088 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); 3089 break; 3090 case 3: 3091 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); 3092 break; 3093 default: 3094 DRM_DEBUG("invalid pipe %d\n", pipe); 3095 return; 3096 } 3097 } else { 3098 DRM_DEBUG("invalid me %d\n", me); 3099 return; 3100 } 3101 3102 switch (state) { 3103 case AMDGPU_IRQ_STATE_DISABLE: 3104 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3105 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3106 TIME_STAMP_INT_ENABLE, 0); 3107 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3108 break; 3109 case AMDGPU_IRQ_STATE_ENABLE: 3110 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); 3111 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3112 TIME_STAMP_INT_ENABLE, 1); 3113 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); 3114 break; 3115 default: 3116 break; 3117 } 3118 } 3119 3120 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev, 3121 int xcc_id, int me, int pipe) 3122 { 3123 /* 3124 * amdgpu controls only the first MEC. That's why this function only 3125 * handles the setting of interrupts for this specific MEC. All other 3126 * pipes' interrupts are set by amdkfd. 3127 */ 3128 if (me != 1) 3129 return 0; 3130 3131 switch (pipe) { 3132 case 0: 3133 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); 3134 case 1: 3135 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); 3136 case 2: 3137 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); 3138 case 3: 3139 return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); 3140 default: 3141 return 0; 3142 } 3143 } 3144 3145 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, 3146 struct amdgpu_irq_src *source, 3147 unsigned type, 3148 enum amdgpu_interrupt_state state) 3149 { 3150 u32 mec_int_cntl_reg, mec_int_cntl; 3151 int i, j, k, num_xcc; 3152 3153 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3154 switch (state) { 3155 case AMDGPU_IRQ_STATE_DISABLE: 3156 case AMDGPU_IRQ_STATE_ENABLE: 3157 for (i = 0; i < num_xcc; i++) { 3158 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3159 PRIV_REG_INT_ENABLE, 3160 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3161 for (j = 0; j < adev->gfx.mec.num_mec; j++) { 3162 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 3163 /* MECs start at 1 */ 3164 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); 3165 3166 if (mec_int_cntl_reg) { 3167 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); 3168 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3169 PRIV_REG_INT_ENABLE, 3170 state == AMDGPU_IRQ_STATE_ENABLE ? 3171 1 : 0); 3172 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); 3173 } 3174 } 3175 } 3176 } 3177 break; 3178 default: 3179 break; 3180 } 3181 3182 return 0; 3183 } 3184 3185 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev, 3186 struct amdgpu_irq_src *source, 3187 unsigned type, 3188 enum amdgpu_interrupt_state state) 3189 { 3190 u32 mec_int_cntl_reg, mec_int_cntl; 3191 int i, j, k, num_xcc; 3192 3193 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3194 switch (state) { 3195 case AMDGPU_IRQ_STATE_DISABLE: 3196 case AMDGPU_IRQ_STATE_ENABLE: 3197 for (i = 0; i < num_xcc; i++) { 3198 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3199 OPCODE_ERROR_INT_ENABLE, 3200 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3201 for (j = 0; j < adev->gfx.mec.num_mec; j++) { 3202 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 3203 /* MECs start at 1 */ 3204 mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k); 3205 3206 if (mec_int_cntl_reg) { 3207 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i); 3208 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 3209 OPCODE_ERROR_INT_ENABLE, 3210 state == AMDGPU_IRQ_STATE_ENABLE ? 3211 1 : 0); 3212 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i); 3213 } 3214 } 3215 } 3216 } 3217 break; 3218 default: 3219 break; 3220 } 3221 3222 return 0; 3223 } 3224 3225 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, 3226 struct amdgpu_irq_src *source, 3227 unsigned type, 3228 enum amdgpu_interrupt_state state) 3229 { 3230 int i, num_xcc; 3231 3232 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3233 switch (state) { 3234 case AMDGPU_IRQ_STATE_DISABLE: 3235 case AMDGPU_IRQ_STATE_ENABLE: 3236 for (i = 0; i < num_xcc; i++) 3237 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, 3238 PRIV_INSTR_INT_ENABLE, 3239 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 3240 break; 3241 default: 3242 break; 3243 } 3244 3245 return 0; 3246 } 3247 3248 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, 3249 struct amdgpu_irq_src *src, 3250 unsigned type, 3251 enum amdgpu_interrupt_state state) 3252 { 3253 int i, num_xcc; 3254 3255 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 3256 for (i = 0; i < num_xcc; i++) { 3257 switch (type) { 3258 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 3259 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3260 adev, 1, 0, state, i); 3261 break; 3262 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 3263 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3264 adev, 1, 1, state, i); 3265 break; 3266 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 3267 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3268 adev, 1, 2, state, i); 3269 break; 3270 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 3271 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3272 adev, 1, 3, state, i); 3273 break; 3274 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 3275 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3276 adev, 2, 0, state, i); 3277 break; 3278 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 3279 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3280 adev, 2, 1, state, i); 3281 break; 3282 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 3283 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3284 adev, 2, 2, state, i); 3285 break; 3286 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 3287 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( 3288 adev, 2, 3, state, i); 3289 break; 3290 default: 3291 break; 3292 } 3293 } 3294 3295 return 0; 3296 } 3297 3298 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, 3299 struct amdgpu_irq_src *source, 3300 struct amdgpu_iv_entry *entry) 3301 { 3302 int i, xcc_id; 3303 u8 me_id, pipe_id, queue_id; 3304 struct amdgpu_ring *ring; 3305 3306 DRM_DEBUG("IH: CP EOP\n"); 3307 me_id = (entry->ring_id & 0x0c) >> 2; 3308 pipe_id = (entry->ring_id & 0x03) >> 0; 3309 queue_id = (entry->ring_id & 0x70) >> 4; 3310 3311 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); 3312 3313 if (xcc_id == -EINVAL) 3314 return -EINVAL; 3315 3316 switch (me_id) { 3317 case 0: 3318 case 1: 3319 case 2: 3320 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3321 ring = &adev->gfx.compute_ring 3322 [i + 3323 xcc_id * adev->gfx.num_compute_rings]; 3324 /* Per-queue interrupt is supported for MEC starting from VI. 3325 * The interrupt can only be enabled/disabled per pipe instead of per queue. 3326 */ 3327 3328 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 3329 amdgpu_fence_process(ring); 3330 } 3331 break; 3332 } 3333 return 0; 3334 } 3335 3336 static void gfx_v9_4_3_fault(struct amdgpu_device *adev, 3337 struct amdgpu_iv_entry *entry) 3338 { 3339 u8 me_id, pipe_id, queue_id; 3340 struct amdgpu_ring *ring; 3341 int i, xcc_id; 3342 3343 me_id = (entry->ring_id & 0x0c) >> 2; 3344 pipe_id = (entry->ring_id & 0x03) >> 0; 3345 queue_id = (entry->ring_id & 0x70) >> 4; 3346 3347 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id); 3348 3349 if (xcc_id == -EINVAL) 3350 return; 3351 3352 switch (me_id) { 3353 case 0: 3354 case 1: 3355 case 2: 3356 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3357 ring = &adev->gfx.compute_ring 3358 [i + 3359 xcc_id * adev->gfx.num_compute_rings]; 3360 if (ring->me == me_id && ring->pipe == pipe_id && 3361 ring->queue == queue_id) 3362 drm_sched_fault(&ring->sched); 3363 } 3364 break; 3365 } 3366 } 3367 3368 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev, 3369 struct amdgpu_irq_src *source, 3370 struct amdgpu_iv_entry *entry) 3371 { 3372 DRM_ERROR("Illegal register access in command stream\n"); 3373 gfx_v9_4_3_fault(adev, entry); 3374 return 0; 3375 } 3376 3377 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev, 3378 struct amdgpu_irq_src *source, 3379 struct amdgpu_iv_entry *entry) 3380 { 3381 DRM_ERROR("Illegal opcode in command stream\n"); 3382 gfx_v9_4_3_fault(adev, entry); 3383 return 0; 3384 } 3385 3386 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev, 3387 struct amdgpu_irq_src *source, 3388 struct amdgpu_iv_entry *entry) 3389 { 3390 DRM_ERROR("Illegal instruction in command stream\n"); 3391 gfx_v9_4_3_fault(adev, entry); 3392 return 0; 3393 } 3394 3395 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring) 3396 { 3397 const unsigned int cp_coher_cntl = 3398 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | 3399 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | 3400 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | 3401 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | 3402 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); 3403 3404 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ 3405 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); 3406 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */ 3407 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 3408 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 3409 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 3410 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 3411 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 3412 } 3413 3414 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring, 3415 uint32_t pipe, bool enable) 3416 { 3417 struct amdgpu_device *adev = ring->adev; 3418 uint32_t val; 3419 uint32_t wcl_cs_reg; 3420 3421 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ 3422 val = enable ? 0x1 : 0x7f; 3423 3424 switch (pipe) { 3425 case 0: 3426 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0); 3427 break; 3428 case 1: 3429 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1); 3430 break; 3431 case 2: 3432 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2); 3433 break; 3434 case 3: 3435 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3); 3436 break; 3437 default: 3438 DRM_DEBUG("invalid pipe %d\n", pipe); 3439 return; 3440 } 3441 3442 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); 3443 3444 } 3445 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) 3446 { 3447 struct amdgpu_device *adev = ring->adev; 3448 uint32_t val; 3449 int i; 3450 3451 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit 3452 * number of gfx waves. Setting 5 bit will make sure gfx only gets 3453 * around 25% of gpu resources. 3454 */ 3455 val = enable ? 0x1f : 0x07ffffff; 3456 amdgpu_ring_emit_wreg(ring, 3457 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX), 3458 val); 3459 3460 /* Restrict waves for normal/low priority compute queues as well 3461 * to get best QoS for high priority compute jobs. 3462 * 3463 * amdgpu controls only 1st ME(0-3 CS pipes). 3464 */ 3465 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3466 if (i != ring->pipe) 3467 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable); 3468 3469 } 3470 } 3471 3472 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me, 3473 uint32_t pipe, uint32_t queue, 3474 uint32_t xcc_id) 3475 { 3476 int i, r; 3477 /* make sure dequeue is complete*/ 3478 gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id); 3479 mutex_lock(&adev->srbm_mutex); 3480 soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id)); 3481 for (i = 0; i < adev->usec_timeout; i++) { 3482 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) 3483 break; 3484 udelay(1); 3485 } 3486 if (i >= adev->usec_timeout) 3487 r = -ETIMEDOUT; 3488 else 3489 r = 0; 3490 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); 3491 mutex_unlock(&adev->srbm_mutex); 3492 gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id); 3493 3494 return r; 3495 3496 } 3497 3498 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev) 3499 { 3500 /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/ 3501 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && 3502 adev->gfx.mec_fw_version >= 0x0000009b) 3503 return true; 3504 else 3505 dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n"); 3506 3507 return false; 3508 } 3509 3510 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring) 3511 { 3512 struct amdgpu_device *adev = ring->adev; 3513 uint32_t reset_pipe, clean_pipe; 3514 int r; 3515 3516 if (!gfx_v9_4_3_pipe_reset_support(adev)) 3517 return -EINVAL; 3518 3519 gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id); 3520 mutex_lock(&adev->srbm_mutex); 3521 3522 reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL); 3523 clean_pipe = reset_pipe; 3524 3525 if (ring->me == 1) { 3526 switch (ring->pipe) { 3527 case 0: 3528 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3529 MEC_ME1_PIPE0_RESET, 1); 3530 break; 3531 case 1: 3532 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3533 MEC_ME1_PIPE1_RESET, 1); 3534 break; 3535 case 2: 3536 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3537 MEC_ME1_PIPE2_RESET, 1); 3538 break; 3539 case 3: 3540 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3541 MEC_ME1_PIPE3_RESET, 1); 3542 break; 3543 default: 3544 break; 3545 } 3546 } else { 3547 if (ring->pipe) 3548 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3549 MEC_ME2_PIPE1_RESET, 1); 3550 else 3551 reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL, 3552 MEC_ME2_PIPE0_RESET, 1); 3553 } 3554 3555 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe); 3556 WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe); 3557 mutex_unlock(&adev->srbm_mutex); 3558 gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id); 3559 3560 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); 3561 return r; 3562 } 3563 3564 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, 3565 unsigned int vmid) 3566 { 3567 struct amdgpu_device *adev = ring->adev; 3568 struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; 3569 struct amdgpu_ring *kiq_ring = &kiq->ring; 3570 unsigned long flags; 3571 int r; 3572 3573 if (!adev->debug_exp_resets) 3574 return -EINVAL; 3575 3576 if (amdgpu_sriov_vf(adev)) 3577 return -EINVAL; 3578 3579 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 3580 return -EINVAL; 3581 3582 spin_lock_irqsave(&kiq->ring_lock, flags); 3583 3584 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 3585 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3586 return -ENOMEM; 3587 } 3588 3589 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 3590 0, 0); 3591 amdgpu_ring_commit(kiq_ring); 3592 3593 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3594 3595 r = amdgpu_ring_test_ring(kiq_ring); 3596 if (r) { 3597 dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n", 3598 ring->name); 3599 goto pipe_reset; 3600 } 3601 3602 r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id); 3603 if (r) 3604 dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n"); 3605 3606 pipe_reset: 3607 if(r) { 3608 r = gfx_v9_4_3_reset_hw_pipe(ring); 3609 dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name, 3610 r ? "failed" : "successfully"); 3611 if (r) 3612 return r; 3613 } 3614 3615 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3616 if (unlikely(r != 0)){ 3617 dev_err(adev->dev, "fail to resv mqd_obj\n"); 3618 return r; 3619 } 3620 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3621 if (!r) { 3622 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); 3623 amdgpu_bo_kunmap(ring->mqd_obj); 3624 ring->mqd_ptr = NULL; 3625 } 3626 amdgpu_bo_unreserve(ring->mqd_obj); 3627 if (r) { 3628 dev_err(adev->dev, "fail to unresv mqd_obj\n"); 3629 return r; 3630 } 3631 spin_lock_irqsave(&kiq->ring_lock, flags); 3632 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); 3633 if (r) { 3634 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3635 return -ENOMEM; 3636 } 3637 kiq->pmf->kiq_map_queues(kiq_ring, ring); 3638 amdgpu_ring_commit(kiq_ring); 3639 spin_unlock_irqrestore(&kiq->ring_lock, flags); 3640 3641 r = amdgpu_ring_test_ring(kiq_ring); 3642 if (r) { 3643 dev_err(adev->dev, "fail to remap queue\n"); 3644 return r; 3645 } 3646 return amdgpu_ring_test_ring(ring); 3647 } 3648 3649 enum amdgpu_gfx_cp_ras_mem_id { 3650 AMDGPU_GFX_CP_MEM1 = 1, 3651 AMDGPU_GFX_CP_MEM2, 3652 AMDGPU_GFX_CP_MEM3, 3653 AMDGPU_GFX_CP_MEM4, 3654 AMDGPU_GFX_CP_MEM5, 3655 }; 3656 3657 enum amdgpu_gfx_gcea_ras_mem_id { 3658 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4, 3659 AMDGPU_GFX_GCEA_IORD_CMDMEM, 3660 AMDGPU_GFX_GCEA_GMIWR_CMDMEM, 3661 AMDGPU_GFX_GCEA_GMIRD_CMDMEM, 3662 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, 3663 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, 3664 AMDGPU_GFX_GCEA_MAM_DMEM0, 3665 AMDGPU_GFX_GCEA_MAM_DMEM1, 3666 AMDGPU_GFX_GCEA_MAM_DMEM2, 3667 AMDGPU_GFX_GCEA_MAM_DMEM3, 3668 AMDGPU_GFX_GCEA_MAM_AMEM0, 3669 AMDGPU_GFX_GCEA_MAM_AMEM1, 3670 AMDGPU_GFX_GCEA_MAM_AMEM2, 3671 AMDGPU_GFX_GCEA_MAM_AMEM3, 3672 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, 3673 AMDGPU_GFX_GCEA_WRET_TAGMEM, 3674 AMDGPU_GFX_GCEA_RRET_TAGMEM, 3675 AMDGPU_GFX_GCEA_IOWR_DATAMEM, 3676 AMDGPU_GFX_GCEA_GMIWR_DATAMEM, 3677 AMDGPU_GFX_GCEA_DRAM_DATAMEM, 3678 }; 3679 3680 enum amdgpu_gfx_gc_cane_ras_mem_id { 3681 AMDGPU_GFX_GC_CANE_MEM0 = 0, 3682 }; 3683 3684 enum amdgpu_gfx_gcutcl2_ras_mem_id { 3685 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160, 3686 }; 3687 3688 enum amdgpu_gfx_gds_ras_mem_id { 3689 AMDGPU_GFX_GDS_MEM0 = 0, 3690 }; 3691 3692 enum amdgpu_gfx_lds_ras_mem_id { 3693 AMDGPU_GFX_LDS_BANK0 = 0, 3694 AMDGPU_GFX_LDS_BANK1, 3695 AMDGPU_GFX_LDS_BANK2, 3696 AMDGPU_GFX_LDS_BANK3, 3697 AMDGPU_GFX_LDS_BANK4, 3698 AMDGPU_GFX_LDS_BANK5, 3699 AMDGPU_GFX_LDS_BANK6, 3700 AMDGPU_GFX_LDS_BANK7, 3701 AMDGPU_GFX_LDS_BANK8, 3702 AMDGPU_GFX_LDS_BANK9, 3703 AMDGPU_GFX_LDS_BANK10, 3704 AMDGPU_GFX_LDS_BANK11, 3705 AMDGPU_GFX_LDS_BANK12, 3706 AMDGPU_GFX_LDS_BANK13, 3707 AMDGPU_GFX_LDS_BANK14, 3708 AMDGPU_GFX_LDS_BANK15, 3709 AMDGPU_GFX_LDS_BANK16, 3710 AMDGPU_GFX_LDS_BANK17, 3711 AMDGPU_GFX_LDS_BANK18, 3712 AMDGPU_GFX_LDS_BANK19, 3713 AMDGPU_GFX_LDS_BANK20, 3714 AMDGPU_GFX_LDS_BANK21, 3715 AMDGPU_GFX_LDS_BANK22, 3716 AMDGPU_GFX_LDS_BANK23, 3717 AMDGPU_GFX_LDS_BANK24, 3718 AMDGPU_GFX_LDS_BANK25, 3719 AMDGPU_GFX_LDS_BANK26, 3720 AMDGPU_GFX_LDS_BANK27, 3721 AMDGPU_GFX_LDS_BANK28, 3722 AMDGPU_GFX_LDS_BANK29, 3723 AMDGPU_GFX_LDS_BANK30, 3724 AMDGPU_GFX_LDS_BANK31, 3725 AMDGPU_GFX_LDS_SP_BUFFER_A, 3726 AMDGPU_GFX_LDS_SP_BUFFER_B, 3727 }; 3728 3729 enum amdgpu_gfx_rlc_ras_mem_id { 3730 AMDGPU_GFX_RLC_GPMF32 = 1, 3731 AMDGPU_GFX_RLC_RLCVF32, 3732 AMDGPU_GFX_RLC_SCRATCH, 3733 AMDGPU_GFX_RLC_SRM_ARAM, 3734 AMDGPU_GFX_RLC_SRM_DRAM, 3735 AMDGPU_GFX_RLC_TCTAG, 3736 AMDGPU_GFX_RLC_SPM_SE, 3737 AMDGPU_GFX_RLC_SPM_GRBMT, 3738 }; 3739 3740 enum amdgpu_gfx_sp_ras_mem_id { 3741 AMDGPU_GFX_SP_SIMDID0 = 0, 3742 }; 3743 3744 enum amdgpu_gfx_spi_ras_mem_id { 3745 AMDGPU_GFX_SPI_MEM0 = 0, 3746 AMDGPU_GFX_SPI_MEM1, 3747 AMDGPU_GFX_SPI_MEM2, 3748 AMDGPU_GFX_SPI_MEM3, 3749 }; 3750 3751 enum amdgpu_gfx_sqc_ras_mem_id { 3752 AMDGPU_GFX_SQC_INST_CACHE_A = 100, 3753 AMDGPU_GFX_SQC_INST_CACHE_B = 101, 3754 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102, 3755 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103, 3756 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104, 3757 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105, 3758 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106, 3759 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107, 3760 AMDGPU_GFX_SQC_DATA_CACHE_A = 200, 3761 AMDGPU_GFX_SQC_DATA_CACHE_B = 201, 3762 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202, 3763 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203, 3764 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204, 3765 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205, 3766 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206, 3767 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207, 3768 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208, 3769 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209, 3770 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210, 3771 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211, 3772 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212, 3773 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213, 3774 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108, 3775 }; 3776 3777 enum amdgpu_gfx_sq_ras_mem_id { 3778 AMDGPU_GFX_SQ_SGPR_MEM0 = 0, 3779 AMDGPU_GFX_SQ_SGPR_MEM1, 3780 AMDGPU_GFX_SQ_SGPR_MEM2, 3781 AMDGPU_GFX_SQ_SGPR_MEM3, 3782 }; 3783 3784 enum amdgpu_gfx_ta_ras_mem_id { 3785 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1, 3786 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, 3787 AMDGPU_GFX_TA_FS_CFIFO_RAM, 3788 AMDGPU_GFX_TA_FSX_LFIFO, 3789 AMDGPU_GFX_TA_FS_DFIFO_RAM, 3790 }; 3791 3792 enum amdgpu_gfx_tcc_ras_mem_id { 3793 AMDGPU_GFX_TCC_MEM1 = 1, 3794 }; 3795 3796 enum amdgpu_gfx_tca_ras_mem_id { 3797 AMDGPU_GFX_TCA_MEM1 = 1, 3798 }; 3799 3800 enum amdgpu_gfx_tci_ras_mem_id { 3801 AMDGPU_GFX_TCIW_MEM = 1, 3802 }; 3803 3804 enum amdgpu_gfx_tcp_ras_mem_id { 3805 AMDGPU_GFX_TCP_LFIFO0 = 1, 3806 AMDGPU_GFX_TCP_SET0BANK0_RAM, 3807 AMDGPU_GFX_TCP_SET0BANK1_RAM, 3808 AMDGPU_GFX_TCP_SET0BANK2_RAM, 3809 AMDGPU_GFX_TCP_SET0BANK3_RAM, 3810 AMDGPU_GFX_TCP_SET1BANK0_RAM, 3811 AMDGPU_GFX_TCP_SET1BANK1_RAM, 3812 AMDGPU_GFX_TCP_SET1BANK2_RAM, 3813 AMDGPU_GFX_TCP_SET1BANK3_RAM, 3814 AMDGPU_GFX_TCP_SET2BANK0_RAM, 3815 AMDGPU_GFX_TCP_SET2BANK1_RAM, 3816 AMDGPU_GFX_TCP_SET2BANK2_RAM, 3817 AMDGPU_GFX_TCP_SET2BANK3_RAM, 3818 AMDGPU_GFX_TCP_SET3BANK0_RAM, 3819 AMDGPU_GFX_TCP_SET3BANK1_RAM, 3820 AMDGPU_GFX_TCP_SET3BANK2_RAM, 3821 AMDGPU_GFX_TCP_SET3BANK3_RAM, 3822 AMDGPU_GFX_TCP_VM_FIFO, 3823 AMDGPU_GFX_TCP_DB_TAGRAM0, 3824 AMDGPU_GFX_TCP_DB_TAGRAM1, 3825 AMDGPU_GFX_TCP_DB_TAGRAM2, 3826 AMDGPU_GFX_TCP_DB_TAGRAM3, 3827 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, 3828 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, 3829 AMDGPU_GFX_TCP_CMD_FIFO, 3830 }; 3831 3832 enum amdgpu_gfx_td_ras_mem_id { 3833 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1, 3834 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, 3835 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, 3836 }; 3837 3838 enum amdgpu_gfx_tcx_ras_mem_id { 3839 AMDGPU_GFX_TCX_FIFOD0 = 0, 3840 AMDGPU_GFX_TCX_FIFOD1, 3841 AMDGPU_GFX_TCX_FIFOD2, 3842 AMDGPU_GFX_TCX_FIFOD3, 3843 AMDGPU_GFX_TCX_FIFOD4, 3844 AMDGPU_GFX_TCX_FIFOD5, 3845 AMDGPU_GFX_TCX_FIFOD6, 3846 AMDGPU_GFX_TCX_FIFOD7, 3847 AMDGPU_GFX_TCX_FIFOB0, 3848 AMDGPU_GFX_TCX_FIFOB1, 3849 AMDGPU_GFX_TCX_FIFOB2, 3850 AMDGPU_GFX_TCX_FIFOB3, 3851 AMDGPU_GFX_TCX_FIFOB4, 3852 AMDGPU_GFX_TCX_FIFOB5, 3853 AMDGPU_GFX_TCX_FIFOB6, 3854 AMDGPU_GFX_TCX_FIFOB7, 3855 AMDGPU_GFX_TCX_FIFOA0, 3856 AMDGPU_GFX_TCX_FIFOA1, 3857 AMDGPU_GFX_TCX_FIFOA2, 3858 AMDGPU_GFX_TCX_FIFOA3, 3859 AMDGPU_GFX_TCX_FIFOA4, 3860 AMDGPU_GFX_TCX_FIFOA5, 3861 AMDGPU_GFX_TCX_FIFOA6, 3862 AMDGPU_GFX_TCX_FIFOA7, 3863 AMDGPU_GFX_TCX_CFIFO0, 3864 AMDGPU_GFX_TCX_CFIFO1, 3865 AMDGPU_GFX_TCX_CFIFO2, 3866 AMDGPU_GFX_TCX_CFIFO3, 3867 AMDGPU_GFX_TCX_CFIFO4, 3868 AMDGPU_GFX_TCX_CFIFO5, 3869 AMDGPU_GFX_TCX_CFIFO6, 3870 AMDGPU_GFX_TCX_CFIFO7, 3871 AMDGPU_GFX_TCX_FIFO_ACKB0, 3872 AMDGPU_GFX_TCX_FIFO_ACKB1, 3873 AMDGPU_GFX_TCX_FIFO_ACKB2, 3874 AMDGPU_GFX_TCX_FIFO_ACKB3, 3875 AMDGPU_GFX_TCX_FIFO_ACKB4, 3876 AMDGPU_GFX_TCX_FIFO_ACKB5, 3877 AMDGPU_GFX_TCX_FIFO_ACKB6, 3878 AMDGPU_GFX_TCX_FIFO_ACKB7, 3879 AMDGPU_GFX_TCX_FIFO_ACKD0, 3880 AMDGPU_GFX_TCX_FIFO_ACKD1, 3881 AMDGPU_GFX_TCX_FIFO_ACKD2, 3882 AMDGPU_GFX_TCX_FIFO_ACKD3, 3883 AMDGPU_GFX_TCX_FIFO_ACKD4, 3884 AMDGPU_GFX_TCX_FIFO_ACKD5, 3885 AMDGPU_GFX_TCX_FIFO_ACKD6, 3886 AMDGPU_GFX_TCX_FIFO_ACKD7, 3887 AMDGPU_GFX_TCX_DST_FIFOA0, 3888 AMDGPU_GFX_TCX_DST_FIFOA1, 3889 AMDGPU_GFX_TCX_DST_FIFOA2, 3890 AMDGPU_GFX_TCX_DST_FIFOA3, 3891 AMDGPU_GFX_TCX_DST_FIFOA4, 3892 AMDGPU_GFX_TCX_DST_FIFOA5, 3893 AMDGPU_GFX_TCX_DST_FIFOA6, 3894 AMDGPU_GFX_TCX_DST_FIFOA7, 3895 AMDGPU_GFX_TCX_DST_FIFOB0, 3896 AMDGPU_GFX_TCX_DST_FIFOB1, 3897 AMDGPU_GFX_TCX_DST_FIFOB2, 3898 AMDGPU_GFX_TCX_DST_FIFOB3, 3899 AMDGPU_GFX_TCX_DST_FIFOB4, 3900 AMDGPU_GFX_TCX_DST_FIFOB5, 3901 AMDGPU_GFX_TCX_DST_FIFOB6, 3902 AMDGPU_GFX_TCX_DST_FIFOB7, 3903 AMDGPU_GFX_TCX_DST_FIFOD0, 3904 AMDGPU_GFX_TCX_DST_FIFOD1, 3905 AMDGPU_GFX_TCX_DST_FIFOD2, 3906 AMDGPU_GFX_TCX_DST_FIFOD3, 3907 AMDGPU_GFX_TCX_DST_FIFOD4, 3908 AMDGPU_GFX_TCX_DST_FIFOD5, 3909 AMDGPU_GFX_TCX_DST_FIFOD6, 3910 AMDGPU_GFX_TCX_DST_FIFOD7, 3911 AMDGPU_GFX_TCX_DST_FIFO_ACKB0, 3912 AMDGPU_GFX_TCX_DST_FIFO_ACKB1, 3913 AMDGPU_GFX_TCX_DST_FIFO_ACKB2, 3914 AMDGPU_GFX_TCX_DST_FIFO_ACKB3, 3915 AMDGPU_GFX_TCX_DST_FIFO_ACKB4, 3916 AMDGPU_GFX_TCX_DST_FIFO_ACKB5, 3917 AMDGPU_GFX_TCX_DST_FIFO_ACKB6, 3918 AMDGPU_GFX_TCX_DST_FIFO_ACKB7, 3919 AMDGPU_GFX_TCX_DST_FIFO_ACKD0, 3920 AMDGPU_GFX_TCX_DST_FIFO_ACKD1, 3921 AMDGPU_GFX_TCX_DST_FIFO_ACKD2, 3922 AMDGPU_GFX_TCX_DST_FIFO_ACKD3, 3923 AMDGPU_GFX_TCX_DST_FIFO_ACKD4, 3924 AMDGPU_GFX_TCX_DST_FIFO_ACKD5, 3925 AMDGPU_GFX_TCX_DST_FIFO_ACKD6, 3926 AMDGPU_GFX_TCX_DST_FIFO_ACKD7, 3927 }; 3928 3929 enum amdgpu_gfx_atc_l2_ras_mem_id { 3930 AMDGPU_GFX_ATC_L2_MEM0 = 0, 3931 }; 3932 3933 enum amdgpu_gfx_utcl2_ras_mem_id { 3934 AMDGPU_GFX_UTCL2_MEM0 = 0, 3935 }; 3936 3937 enum amdgpu_gfx_vml2_ras_mem_id { 3938 AMDGPU_GFX_VML2_MEM0 = 0, 3939 }; 3940 3941 enum amdgpu_gfx_vml2_walker_ras_mem_id { 3942 AMDGPU_GFX_VML2_WALKER_MEM0 = 0, 3943 }; 3944 3945 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = { 3946 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"}, 3947 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"}, 3948 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"}, 3949 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"}, 3950 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"}, 3951 }; 3952 3953 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = { 3954 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"}, 3955 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"}, 3956 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"}, 3957 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"}, 3958 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"}, 3959 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"}, 3960 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"}, 3961 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"}, 3962 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"}, 3963 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"}, 3964 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"}, 3965 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"}, 3966 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"}, 3967 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"}, 3968 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"}, 3969 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"}, 3970 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"}, 3971 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"}, 3972 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"}, 3973 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"}, 3974 }; 3975 3976 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = { 3977 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"}, 3978 }; 3979 3980 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = { 3981 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"}, 3982 }; 3983 3984 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = { 3985 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"}, 3986 }; 3987 3988 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = { 3989 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"}, 3990 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"}, 3991 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"}, 3992 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"}, 3993 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"}, 3994 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"}, 3995 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"}, 3996 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"}, 3997 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"}, 3998 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"}, 3999 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"}, 4000 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"}, 4001 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"}, 4002 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"}, 4003 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"}, 4004 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"}, 4005 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"}, 4006 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"}, 4007 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"}, 4008 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"}, 4009 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"}, 4010 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"}, 4011 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"}, 4012 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"}, 4013 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"}, 4014 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"}, 4015 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"}, 4016 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"}, 4017 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"}, 4018 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"}, 4019 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"}, 4020 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"}, 4021 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"}, 4022 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"}, 4023 }; 4024 4025 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = { 4026 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"}, 4027 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"}, 4028 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"}, 4029 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"}, 4030 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"}, 4031 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"}, 4032 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"}, 4033 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"}, 4034 }; 4035 4036 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = { 4037 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"}, 4038 }; 4039 4040 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = { 4041 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"}, 4042 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"}, 4043 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"}, 4044 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"}, 4045 }; 4046 4047 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = { 4048 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"}, 4049 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"}, 4050 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"}, 4051 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"}, 4052 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"}, 4053 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"}, 4054 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"}, 4055 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"}, 4056 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"}, 4057 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"}, 4058 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"}, 4059 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"}, 4060 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"}, 4061 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"}, 4062 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"}, 4063 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"}, 4064 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"}, 4065 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"}, 4066 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"}, 4067 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"}, 4068 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"}, 4069 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"}, 4070 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"}, 4071 }; 4072 4073 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = { 4074 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"}, 4075 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"}, 4076 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"}, 4077 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"}, 4078 }; 4079 4080 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = { 4081 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"}, 4082 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"}, 4083 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"}, 4084 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"}, 4085 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"}, 4086 }; 4087 4088 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = { 4089 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"}, 4090 }; 4091 4092 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = { 4093 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"}, 4094 }; 4095 4096 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = { 4097 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"}, 4098 }; 4099 4100 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = { 4101 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"}, 4102 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"}, 4103 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"}, 4104 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"}, 4105 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"}, 4106 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"}, 4107 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"}, 4108 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"}, 4109 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"}, 4110 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"}, 4111 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"}, 4112 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"}, 4113 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"}, 4114 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"}, 4115 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"}, 4116 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"}, 4117 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"}, 4118 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"}, 4119 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"}, 4120 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"}, 4121 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"}, 4122 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"}, 4123 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"}, 4124 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"}, 4125 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"}, 4126 }; 4127 4128 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = { 4129 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"}, 4130 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"}, 4131 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"}, 4132 }; 4133 4134 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = { 4135 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"}, 4136 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"}, 4137 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"}, 4138 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"}, 4139 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"}, 4140 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"}, 4141 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"}, 4142 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"}, 4143 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"}, 4144 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"}, 4145 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"}, 4146 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"}, 4147 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"}, 4148 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"}, 4149 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"}, 4150 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"}, 4151 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"}, 4152 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"}, 4153 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"}, 4154 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"}, 4155 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"}, 4156 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"}, 4157 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"}, 4158 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"}, 4159 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"}, 4160 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"}, 4161 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"}, 4162 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"}, 4163 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"}, 4164 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"}, 4165 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"}, 4166 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"}, 4167 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"}, 4168 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"}, 4169 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"}, 4170 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"}, 4171 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"}, 4172 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"}, 4173 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"}, 4174 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"}, 4175 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"}, 4176 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"}, 4177 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"}, 4178 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"}, 4179 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"}, 4180 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"}, 4181 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"}, 4182 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"}, 4183 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"}, 4184 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"}, 4185 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"}, 4186 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"}, 4187 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"}, 4188 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"}, 4189 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"}, 4190 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"}, 4191 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"}, 4192 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"}, 4193 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"}, 4194 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"}, 4195 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"}, 4196 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"}, 4197 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"}, 4198 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"}, 4199 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"}, 4200 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"}, 4201 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"}, 4202 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"}, 4203 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"}, 4204 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"}, 4205 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"}, 4206 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"}, 4207 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"}, 4208 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"}, 4209 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"}, 4210 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"}, 4211 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"}, 4212 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"}, 4213 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"}, 4214 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"}, 4215 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"}, 4216 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"}, 4217 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"}, 4218 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"}, 4219 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"}, 4220 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"}, 4221 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"}, 4222 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"}, 4223 }; 4224 4225 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = { 4226 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"}, 4227 }; 4228 4229 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = { 4230 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"}, 4231 }; 4232 4233 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = { 4234 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"}, 4235 }; 4236 4237 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = { 4238 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"}, 4239 }; 4240 4241 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = { 4242 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list) 4243 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list) 4244 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list) 4245 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list) 4246 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list) 4247 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list) 4248 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list) 4249 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list) 4250 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list) 4251 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list) 4252 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list) 4253 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list) 4254 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list) 4255 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list) 4256 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list) 4257 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list) 4258 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list) 4259 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list) 4260 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list) 4261 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list) 4262 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list) 4263 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list) 4264 }; 4265 4266 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { 4267 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH), 4268 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, 4269 AMDGPU_GFX_RLC_MEM, 1}, 4270 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI), 4271 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, 4272 AMDGPU_GFX_CP_MEM, 1}, 4273 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI), 4274 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, 4275 AMDGPU_GFX_CP_MEM, 1}, 4276 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI), 4277 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, 4278 AMDGPU_GFX_CP_MEM, 1}, 4279 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI), 4280 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, 4281 AMDGPU_GFX_GDS_MEM, 1}, 4282 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI), 4283 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, 4284 AMDGPU_GFX_GC_CANE_MEM, 1}, 4285 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI), 4286 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, 4287 AMDGPU_GFX_SPI_MEM, 1}, 4288 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI), 4289 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, 4290 AMDGPU_GFX_SP_MEM, 4}, 4291 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI), 4292 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, 4293 AMDGPU_GFX_SP_MEM, 4}, 4294 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI), 4295 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, 4296 AMDGPU_GFX_SQ_MEM, 4}, 4297 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI), 4298 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, 4299 AMDGPU_GFX_SQC_MEM, 4}, 4300 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI), 4301 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, 4302 AMDGPU_GFX_TCX_MEM, 1}, 4303 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI), 4304 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, 4305 AMDGPU_GFX_TCC_MEM, 1}, 4306 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI), 4307 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, 4308 AMDGPU_GFX_TA_MEM, 4}, 4309 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG), 4310 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, 4311 AMDGPU_GFX_TCI_MEM, 1}, 4312 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG), 4313 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, 4314 AMDGPU_GFX_TCP_MEM, 4}, 4315 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI), 4316 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, 4317 AMDGPU_GFX_TD_MEM, 4}, 4318 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI), 4319 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, 4320 AMDGPU_GFX_GCEA_MEM, 1}, 4321 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI), 4322 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, 4323 AMDGPU_GFX_LDS_MEM, 4}, 4324 }; 4325 4326 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { 4327 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH), 4328 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"}, 4329 AMDGPU_GFX_RLC_MEM, 1}, 4330 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI), 4331 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"}, 4332 AMDGPU_GFX_CP_MEM, 1}, 4333 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI), 4334 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"}, 4335 AMDGPU_GFX_CP_MEM, 1}, 4336 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI), 4337 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"}, 4338 AMDGPU_GFX_CP_MEM, 1}, 4339 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI), 4340 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"}, 4341 AMDGPU_GFX_GDS_MEM, 1}, 4342 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI), 4343 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"}, 4344 AMDGPU_GFX_GC_CANE_MEM, 1}, 4345 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI), 4346 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"}, 4347 AMDGPU_GFX_SPI_MEM, 1}, 4348 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI), 4349 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"}, 4350 AMDGPU_GFX_SP_MEM, 4}, 4351 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI), 4352 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"}, 4353 AMDGPU_GFX_SP_MEM, 4}, 4354 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI), 4355 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"}, 4356 AMDGPU_GFX_SQ_MEM, 4}, 4357 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI), 4358 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"}, 4359 AMDGPU_GFX_SQC_MEM, 4}, 4360 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI), 4361 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"}, 4362 AMDGPU_GFX_TCX_MEM, 1}, 4363 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI), 4364 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"}, 4365 AMDGPU_GFX_TCC_MEM, 1}, 4366 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI), 4367 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"}, 4368 AMDGPU_GFX_TA_MEM, 4}, 4369 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG), 4370 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"}, 4371 AMDGPU_GFX_TCI_MEM, 1}, 4372 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG), 4373 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"}, 4374 AMDGPU_GFX_TCP_MEM, 4}, 4375 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI), 4376 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"}, 4377 AMDGPU_GFX_TD_MEM, 4}, 4378 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI), 4379 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"}, 4380 AMDGPU_GFX_TCA_MEM, 1}, 4381 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI), 4382 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"}, 4383 AMDGPU_GFX_GCEA_MEM, 1}, 4384 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI), 4385 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"}, 4386 AMDGPU_GFX_LDS_MEM, 4}, 4387 }; 4388 4389 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, 4390 void *ras_error_status, int xcc_id) 4391 { 4392 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 4393 unsigned long ce_count = 0, ue_count = 0; 4394 uint32_t i, j, k; 4395 4396 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */ 4397 struct amdgpu_smuio_mcm_config_info mcm_info = { 4398 .socket_id = adev->smuio.funcs->get_socket_id(adev), 4399 .die_id = xcc_id & 0x01 ? 1 : 0, 4400 }; 4401 4402 mutex_lock(&adev->grbm_idx_mutex); 4403 4404 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { 4405 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { 4406 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { 4407 /* no need to select if instance number is 1 */ 4408 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || 4409 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) 4410 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4411 4412 amdgpu_ras_inst_query_ras_error_count(adev, 4413 &(gfx_v9_4_3_ce_reg_list[i].reg_entry), 4414 1, 4415 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent, 4416 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size, 4417 GET_INST(GC, xcc_id), 4418 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 4419 &ce_count); 4420 4421 amdgpu_ras_inst_query_ras_error_count(adev, 4422 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4423 1, 4424 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, 4425 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, 4426 GET_INST(GC, xcc_id), 4427 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 4428 &ue_count); 4429 } 4430 } 4431 } 4432 4433 /* handle extra register entries of UE */ 4434 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { 4435 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { 4436 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { 4437 /* no need to select if instance number is 1 */ 4438 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || 4439 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) 4440 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4441 4442 amdgpu_ras_inst_query_ras_error_count(adev, 4443 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4444 1, 4445 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, 4446 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, 4447 GET_INST(GC, xcc_id), 4448 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 4449 &ue_count); 4450 } 4451 } 4452 } 4453 4454 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4455 xcc_id); 4456 mutex_unlock(&adev->grbm_idx_mutex); 4457 4458 /* the caller should make sure initialize value of 4459 * err_data->ue_count and err_data->ce_count 4460 */ 4461 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); 4462 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); 4463 } 4464 4465 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, 4466 void *ras_error_status, int xcc_id) 4467 { 4468 uint32_t i, j, k; 4469 4470 mutex_lock(&adev->grbm_idx_mutex); 4471 4472 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { 4473 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { 4474 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { 4475 /* no need to select if instance number is 1 */ 4476 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || 4477 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) 4478 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4479 4480 amdgpu_ras_inst_reset_ras_error_count(adev, 4481 &(gfx_v9_4_3_ce_reg_list[i].reg_entry), 4482 1, 4483 GET_INST(GC, xcc_id)); 4484 4485 amdgpu_ras_inst_reset_ras_error_count(adev, 4486 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4487 1, 4488 GET_INST(GC, xcc_id)); 4489 } 4490 } 4491 } 4492 4493 /* handle extra register entries of UE */ 4494 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { 4495 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { 4496 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { 4497 /* no need to select if instance number is 1 */ 4498 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || 4499 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) 4500 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id); 4501 4502 amdgpu_ras_inst_reset_ras_error_count(adev, 4503 &(gfx_v9_4_3_ue_reg_list[i].reg_entry), 4504 1, 4505 GET_INST(GC, xcc_id)); 4506 } 4507 } 4508 } 4509 4510 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4511 xcc_id); 4512 mutex_unlock(&adev->grbm_idx_mutex); 4513 } 4514 4515 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, 4516 void *ras_error_status, int xcc_id) 4517 { 4518 uint32_t i; 4519 uint32_t data; 4520 4521 if (amdgpu_sriov_vf(adev)) 4522 return; 4523 4524 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG); 4525 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, 4526 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); 4527 4528 if (amdgpu_watchdog_timer.timeout_fatal_disable && 4529 (amdgpu_watchdog_timer.period < 1 || 4530 amdgpu_watchdog_timer.period > 0x23)) { 4531 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n"); 4532 amdgpu_watchdog_timer.period = 0x23; 4533 } 4534 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, 4535 amdgpu_watchdog_timer.period); 4536 4537 mutex_lock(&adev->grbm_idx_mutex); 4538 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4539 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id); 4540 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data); 4541 } 4542 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4543 xcc_id); 4544 mutex_unlock(&adev->grbm_idx_mutex); 4545 } 4546 4547 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, 4548 void *ras_error_status) 4549 { 4550 amdgpu_gfx_ras_error_func(adev, ras_error_status, 4551 gfx_v9_4_3_inst_query_ras_err_count); 4552 } 4553 4554 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) 4555 { 4556 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count); 4557 } 4558 4559 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) 4560 { 4561 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); 4562 } 4563 4564 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) 4565 { 4566 int i; 4567 4568 /* Header itself is a NOP packet */ 4569 if (num_nop == 1) { 4570 amdgpu_ring_write(ring, ring->funcs->nop); 4571 return; 4572 } 4573 4574 /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/ 4575 amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe))); 4576 4577 /* Header is at index 0, followed by num_nops - 1 NOP packet's */ 4578 for (i = 1; i < num_nop; i++) 4579 amdgpu_ring_write(ring, ring->funcs->nop); 4580 } 4581 4582 static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p) 4583 { 4584 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4585 uint32_t i, j, k; 4586 uint32_t xcc_id, xcc_offset, inst_offset; 4587 uint32_t num_xcc, reg, num_inst; 4588 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 4589 4590 if (!adev->gfx.ip_dump_core) 4591 return; 4592 4593 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4594 drm_printf(p, "Number of Instances:%d\n", num_xcc); 4595 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4596 xcc_offset = xcc_id * reg_count; 4597 drm_printf(p, "\nInstance id:%d\n", xcc_id); 4598 for (i = 0; i < reg_count; i++) 4599 drm_printf(p, "%-50s \t 0x%08x\n", 4600 gc_reg_list_9_4_3[i].reg_name, 4601 adev->gfx.ip_dump_core[xcc_offset + i]); 4602 } 4603 4604 /* print compute queue registers for all instances */ 4605 if (!adev->gfx.ip_dump_compute_queues) 4606 return; 4607 4608 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 4609 adev->gfx.mec.num_queue_per_pipe; 4610 4611 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 4612 drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n", 4613 num_xcc, 4614 adev->gfx.mec.num_mec, 4615 adev->gfx.mec.num_pipe_per_mec, 4616 adev->gfx.mec.num_queue_per_pipe); 4617 4618 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4619 xcc_offset = xcc_id * reg_count * num_inst; 4620 inst_offset = 0; 4621 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4622 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4623 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 4624 drm_printf(p, 4625 "\nxcc:%d mec:%d, pipe:%d, queue:%d\n", 4626 xcc_id, i, j, k); 4627 for (reg = 0; reg < reg_count; reg++) { 4628 drm_printf(p, 4629 "%-50s \t 0x%08x\n", 4630 gc_cp_reg_list_9_4_3[reg].reg_name, 4631 adev->gfx.ip_dump_compute_queues 4632 [xcc_offset + inst_offset + 4633 reg]); 4634 } 4635 inst_offset += reg_count; 4636 } 4637 } 4638 } 4639 } 4640 } 4641 4642 static void gfx_v9_4_3_ip_dump(void *handle) 4643 { 4644 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4645 uint32_t i, j, k; 4646 uint32_t num_xcc, reg, num_inst; 4647 uint32_t xcc_id, xcc_offset, inst_offset; 4648 uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3); 4649 4650 if (!adev->gfx.ip_dump_core) 4651 return; 4652 4653 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4654 4655 amdgpu_gfx_off_ctrl(adev, false); 4656 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4657 xcc_offset = xcc_id * reg_count; 4658 for (i = 0; i < reg_count; i++) 4659 adev->gfx.ip_dump_core[xcc_offset + i] = 4660 RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i], 4661 GET_INST(GC, xcc_id))); 4662 } 4663 amdgpu_gfx_off_ctrl(adev, true); 4664 4665 /* dump compute queue registers for all instances */ 4666 if (!adev->gfx.ip_dump_compute_queues) 4667 return; 4668 4669 num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * 4670 adev->gfx.mec.num_queue_per_pipe; 4671 reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3); 4672 amdgpu_gfx_off_ctrl(adev, false); 4673 mutex_lock(&adev->srbm_mutex); 4674 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { 4675 xcc_offset = xcc_id * reg_count * num_inst; 4676 inst_offset = 0; 4677 for (i = 0; i < adev->gfx.mec.num_mec; i++) { 4678 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) { 4679 for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) { 4680 /* ME0 is for GFX so start from 1 for CP */ 4681 soc15_grbm_select(adev, 1 + i, j, k, 0, 4682 GET_INST(GC, xcc_id)); 4683 4684 for (reg = 0; reg < reg_count; reg++) { 4685 adev->gfx.ip_dump_compute_queues 4686 [xcc_offset + 4687 inst_offset + reg] = 4688 RREG32(SOC15_REG_ENTRY_OFFSET_INST( 4689 gc_cp_reg_list_9_4_3[reg], 4690 GET_INST(GC, xcc_id))); 4691 } 4692 inst_offset += reg_count; 4693 } 4694 } 4695 } 4696 } 4697 soc15_grbm_select(adev, 0, 0, 0, 0, 0); 4698 mutex_unlock(&adev->srbm_mutex); 4699 amdgpu_gfx_off_ctrl(adev, true); 4700 } 4701 4702 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring) 4703 { 4704 /* Emit the cleaner shader */ 4705 amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0)); 4706 amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */ 4707 } 4708 4709 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { 4710 .name = "gfx_v9_4_3", 4711 .early_init = gfx_v9_4_3_early_init, 4712 .late_init = gfx_v9_4_3_late_init, 4713 .sw_init = gfx_v9_4_3_sw_init, 4714 .sw_fini = gfx_v9_4_3_sw_fini, 4715 .hw_init = gfx_v9_4_3_hw_init, 4716 .hw_fini = gfx_v9_4_3_hw_fini, 4717 .suspend = gfx_v9_4_3_suspend, 4718 .resume = gfx_v9_4_3_resume, 4719 .is_idle = gfx_v9_4_3_is_idle, 4720 .wait_for_idle = gfx_v9_4_3_wait_for_idle, 4721 .soft_reset = gfx_v9_4_3_soft_reset, 4722 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, 4723 .set_powergating_state = gfx_v9_4_3_set_powergating_state, 4724 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, 4725 .dump_ip_state = gfx_v9_4_3_ip_dump, 4726 .print_ip_state = gfx_v9_4_3_ip_print, 4727 }; 4728 4729 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { 4730 .type = AMDGPU_RING_TYPE_COMPUTE, 4731 .align_mask = 0xff, 4732 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4733 .support_64bit_ptrs = true, 4734 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, 4735 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, 4736 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, 4737 .emit_frame_size = 4738 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ 4739 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ 4740 5 + /* hdp invalidate */ 4741 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ 4742 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4743 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4744 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ 4745 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */ 4746 7 + /* gfx_v9_4_3_emit_mem_sync */ 4747 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */ 4748 15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ 4749 2, /* gfx_v9_4_3_ring_emit_cleaner_shader */ 4750 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ 4751 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute, 4752 .emit_fence = gfx_v9_4_3_ring_emit_fence, 4753 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync, 4754 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush, 4755 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch, 4756 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, 4757 .test_ring = gfx_v9_4_3_ring_test_ring, 4758 .test_ib = gfx_v9_4_3_ring_test_ib, 4759 .insert_nop = gfx_v9_4_3_ring_insert_nop, 4760 .pad_ib = amdgpu_ring_generic_pad_ib, 4761 .emit_wreg = gfx_v9_4_3_ring_emit_wreg, 4762 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, 4763 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, 4764 .soft_recovery = gfx_v9_4_3_ring_soft_recovery, 4765 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync, 4766 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit, 4767 .reset = gfx_v9_4_3_reset_kcq, 4768 .emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader, 4769 .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, 4770 .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, 4771 }; 4772 4773 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { 4774 .type = AMDGPU_RING_TYPE_KIQ, 4775 .align_mask = 0xff, 4776 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4777 .support_64bit_ptrs = true, 4778 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, 4779 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, 4780 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, 4781 .emit_frame_size = 4782 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ 4783 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ 4784 5 + /* hdp invalidate */ 4785 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ 4786 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4787 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4788 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ 4789 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */ 4790 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ 4791 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq, 4792 .test_ring = gfx_v9_4_3_ring_test_ring, 4793 .insert_nop = amdgpu_ring_insert_nop, 4794 .pad_ib = amdgpu_ring_generic_pad_ib, 4795 .emit_rreg = gfx_v9_4_3_ring_emit_rreg, 4796 .emit_wreg = gfx_v9_4_3_ring_emit_wreg, 4797 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, 4798 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, 4799 }; 4800 4801 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) 4802 { 4803 int i, j, num_xcc; 4804 4805 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 4806 for (i = 0; i < num_xcc; i++) { 4807 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; 4808 4809 for (j = 0; j < adev->gfx.num_compute_rings; j++) 4810 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs 4811 = &gfx_v9_4_3_ring_funcs_compute; 4812 } 4813 } 4814 4815 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = { 4816 .set = gfx_v9_4_3_set_eop_interrupt_state, 4817 .process = gfx_v9_4_3_eop_irq, 4818 }; 4819 4820 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = { 4821 .set = gfx_v9_4_3_set_priv_reg_fault_state, 4822 .process = gfx_v9_4_3_priv_reg_irq, 4823 }; 4824 4825 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = { 4826 .set = gfx_v9_4_3_set_bad_op_fault_state, 4827 .process = gfx_v9_4_3_bad_op_irq, 4828 }; 4829 4830 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = { 4831 .set = gfx_v9_4_3_set_priv_inst_fault_state, 4832 .process = gfx_v9_4_3_priv_inst_irq, 4833 }; 4834 4835 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev) 4836 { 4837 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 4838 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs; 4839 4840 adev->gfx.priv_reg_irq.num_types = 1; 4841 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs; 4842 4843 adev->gfx.bad_op_irq.num_types = 1; 4844 adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs; 4845 4846 adev->gfx.priv_inst_irq.num_types = 1; 4847 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs; 4848 } 4849 4850 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev) 4851 { 4852 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs; 4853 } 4854 4855 4856 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) 4857 { 4858 /* init asci gds info */ 4859 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4860 case IP_VERSION(9, 4, 3): 4861 case IP_VERSION(9, 4, 4): 4862 /* 9.4.3 removed all the GDS internal memory, 4863 * only support GWS opcode in kernel, like barrier 4864 * semaphore.etc */ 4865 adev->gds.gds_size = 0; 4866 break; 4867 default: 4868 adev->gds.gds_size = 0x10000; 4869 break; 4870 } 4871 4872 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4873 case IP_VERSION(9, 4, 3): 4874 case IP_VERSION(9, 4, 4): 4875 /* deprecated for 9.4.3, no usage at all */ 4876 adev->gds.gds_compute_max_wave_id = 0; 4877 break; 4878 default: 4879 /* this really depends on the chip */ 4880 adev->gds.gds_compute_max_wave_id = 0x7ff; 4881 break; 4882 } 4883 4884 adev->gds.gws_size = 64; 4885 adev->gds.oa_size = 16; 4886 } 4887 4888 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 4889 u32 bitmap, int xcc_id) 4890 { 4891 u32 data; 4892 4893 if (!bitmap) 4894 return; 4895 4896 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4897 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4898 4899 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data); 4900 } 4901 4902 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id) 4903 { 4904 u32 data, mask; 4905 4906 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG); 4907 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG); 4908 4909 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4910 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4911 4912 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 4913 4914 return (~data) & mask; 4915 } 4916 4917 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, 4918 struct amdgpu_cu_info *cu_info) 4919 { 4920 int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0; 4921 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp; 4922 unsigned disable_masks[4 * 4]; 4923 bool is_symmetric_cus; 4924 4925 if (!adev || !cu_info) 4926 return -EINVAL; 4927 4928 /* 4929 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs 4930 */ 4931 if (adev->gfx.config.max_shader_engines * 4932 adev->gfx.config.max_sh_per_se > 16) 4933 return -EINVAL; 4934 4935 amdgpu_gfx_parse_disable_cu(disable_masks, 4936 adev->gfx.config.max_shader_engines, 4937 adev->gfx.config.max_sh_per_se); 4938 4939 mutex_lock(&adev->grbm_idx_mutex); 4940 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { 4941 is_symmetric_cus = true; 4942 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4943 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4944 mask = 1; 4945 ao_bitmap = 0; 4946 counter = 0; 4947 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id); 4948 gfx_v9_4_3_set_user_cu_inactive_bitmap( 4949 adev, 4950 disable_masks[i * adev->gfx.config.max_sh_per_se + j], 4951 xcc_id); 4952 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id); 4953 4954 cu_info->bitmap[xcc_id][i][j] = bitmap; 4955 4956 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 4957 if (bitmap & mask) { 4958 if (counter < adev->gfx.config.max_cu_per_sh) 4959 ao_bitmap |= mask; 4960 counter++; 4961 } 4962 mask <<= 1; 4963 } 4964 active_cu_number += counter; 4965 if (i < 2 && j < 2) 4966 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 4967 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 4968 } 4969 if (i && is_symmetric_cus && prev_counter != counter) 4970 is_symmetric_cus = false; 4971 prev_counter = counter; 4972 } 4973 if (is_symmetric_cus) { 4974 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG); 4975 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1); 4976 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1); 4977 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp); 4978 } 4979 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 4980 xcc_id); 4981 } 4982 mutex_unlock(&adev->grbm_idx_mutex); 4983 4984 cu_info->number = active_cu_number; 4985 cu_info->ao_cu_mask = ao_cu_mask; 4986 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 4987 4988 return 0; 4989 } 4990 4991 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { 4992 .type = AMD_IP_BLOCK_TYPE_GFX, 4993 .major = 9, 4994 .minor = 4, 4995 .rev = 3, 4996 .funcs = &gfx_v9_4_3_ip_funcs, 4997 }; 4998 4999 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask) 5000 { 5001 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5002 uint32_t tmp_mask; 5003 int i, r; 5004 5005 /* TODO : Initialize golden regs */ 5006 /* gfx_v9_4_3_init_golden_registers(adev); */ 5007 5008 tmp_mask = inst_mask; 5009 for_each_inst(i, tmp_mask) 5010 gfx_v9_4_3_xcc_constants_init(adev, i); 5011 5012 if (!amdgpu_sriov_vf(adev)) { 5013 tmp_mask = inst_mask; 5014 for_each_inst(i, tmp_mask) { 5015 r = gfx_v9_4_3_xcc_rlc_resume(adev, i); 5016 if (r) 5017 return r; 5018 } 5019 } 5020 5021 tmp_mask = inst_mask; 5022 for_each_inst(i, tmp_mask) { 5023 r = gfx_v9_4_3_xcc_cp_resume(adev, i); 5024 if (r) 5025 return r; 5026 } 5027 5028 return 0; 5029 } 5030 5031 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask) 5032 { 5033 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5034 int i; 5035 5036 for_each_inst(i, inst_mask) 5037 gfx_v9_4_3_xcc_fini(adev, i); 5038 5039 return 0; 5040 } 5041 5042 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { 5043 .suspend = &gfx_v9_4_3_xcp_suspend, 5044 .resume = &gfx_v9_4_3_xcp_resume 5045 }; 5046 5047 struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { 5048 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, 5049 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, 5050 }; 5051 5052 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 5053 { 5054 int r; 5055 5056 r = amdgpu_ras_block_late_init(adev, ras_block); 5057 if (r) 5058 return r; 5059 5060 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX, 5061 &gfx_v9_4_3_aca_info, 5062 NULL); 5063 if (r) 5064 goto late_fini; 5065 5066 return 0; 5067 5068 late_fini: 5069 amdgpu_ras_block_late_fini(adev, ras_block); 5070 5071 return r; 5072 } 5073 5074 struct amdgpu_gfx_ras gfx_v9_4_3_ras = { 5075 .ras_block = { 5076 .hw_ops = &gfx_v9_4_3_ras_ops, 5077 .ras_late_init = &gfx_v9_4_3_ras_late_init, 5078 }, 5079 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, 5080 }; 5081