1 /* 2 * Copyright 2025 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "amdgpu.h" 24 #include "amdgpu_amdkfd.h" 25 #include "gc/gc_12_1_0_offset.h" 26 #include "gc/gc_12_1_0_sh_mask.h" 27 #include "soc_v1_0.h" 28 #include <uapi/linux/kfd_ioctl.h> 29 30 static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, 31 uint32_t queue, uint32_t vmid, uint32_t inst) 32 { 33 mutex_lock(&adev->srbm_mutex); 34 amdgpu_gfx_select_me_pipe_q(adev, mec, pipe, queue, vmid, inst); 35 } 36 37 static void unlock_srbm(struct amdgpu_device *adev, uint32_t inst) 38 { 39 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, inst); 40 mutex_unlock(&adev->srbm_mutex); 41 } 42 43 static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, 44 uint32_t queue_id, uint32_t inst) 45 { 46 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 47 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 48 49 lock_srbm(adev, mec, pipe, queue_id, 0, inst); 50 } 51 52 static void release_queue(struct amdgpu_device *adev, uint32_t inst) 53 { 54 unlock_srbm(adev, inst); 55 } 56 57 static int init_interrupts_v12_1(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t inst) 58 { 59 uint32_t mec; 60 uint32_t pipe; 61 62 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 63 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 64 65 lock_srbm(adev, mec, pipe, 0, 0, inst); 66 67 WREG32_SOC15(GC, GET_INST(GC, inst), regCPC_INT_CNTL, 68 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | 69 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); 70 71 unlock_srbm(adev, inst); 72 73 return 0; 74 } 75 76 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, 77 unsigned int engine_id, 78 unsigned int queue_id) 79 { 80 uint32_t sdma_engine_reg_base = 0; 81 uint32_t sdma_rlc_reg_offset; 82 uint32_t dev_inst = GET_INST(SDMA0, engine_id); 83 84 switch (dev_inst % adev->sdma.num_inst_per_xcc) { 85 case 0: 86 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 87 dev_inst / adev->sdma.num_inst_per_xcc, 88 regSDMA0_SDMA_QUEUE0_RB_CNTL) - regSDMA0_SDMA_QUEUE0_RB_CNTL; 89 break; 90 case 1: 91 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 92 dev_inst / adev->sdma.num_inst_per_xcc, 93 regSDMA1_SDMA_QUEUE0_RB_CNTL) - regSDMA0_SDMA_QUEUE0_RB_CNTL; 94 break; 95 default: 96 BUG(); 97 } 98 99 sdma_rlc_reg_offset = sdma_engine_reg_base 100 + queue_id * (regSDMA0_SDMA_QUEUE1_RB_CNTL - regSDMA0_SDMA_QUEUE0_RB_CNTL); 101 102 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, 103 queue_id, sdma_rlc_reg_offset); 104 105 return sdma_rlc_reg_offset; 106 } 107 108 static int hqd_dump_v12_1(struct amdgpu_device *adev, 109 uint32_t pipe_id, uint32_t queue_id, 110 uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) 111 { 112 uint32_t i = 0, reg; 113 #define HQD_N_REGS 56 114 #define DUMP_REG(addr) do { \ 115 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ 116 break; \ 117 (*dump)[i][0] = (addr) << 2; \ 118 (*dump)[i++][1] = RREG32(addr); \ 119 } while (0) 120 121 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 122 if (*dump == NULL) 123 return -ENOMEM; 124 125 acquire_queue(adev, pipe_id, queue_id, inst); 126 127 for (reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_MQD_BASE_ADDR); 128 reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI); reg++) 129 DUMP_REG(reg); 130 131 release_queue(adev, inst); 132 133 WARN_ON_ONCE(i != HQD_N_REGS); 134 *n_regs = i; 135 136 return 0; 137 } 138 139 static int hqd_sdma_dump_v12_1(struct amdgpu_device *adev, 140 uint32_t engine_id, uint32_t queue_id, 141 uint32_t (**dump)[2], uint32_t *n_regs) 142 { 143 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 144 engine_id, queue_id); 145 uint32_t i = 0, reg; 146 147 const uint32_t first_reg = regSDMA0_SDMA_QUEUE0_RB_CNTL; 148 const uint32_t last_reg = regSDMA0_SDMA_QUEUE0_CONTEXT_STATUS; 149 #undef HQD_N_REGS 150 #define HQD_N_REGS (last_reg - first_reg + 1) 151 152 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 153 if (*dump == NULL) 154 return -ENOMEM; 155 156 for (reg = first_reg; 157 reg <= last_reg; reg++) 158 DUMP_REG(sdma_rlc_reg_offset + reg); 159 160 WARN_ON_ONCE(i != HQD_N_REGS); 161 *n_regs = i; 162 163 return 0; 164 } 165 166 static int wave_control_execute_v12_1(struct amdgpu_device *adev, 167 uint32_t gfx_index_val, 168 uint32_t sq_cmd, uint32_t inst) 169 { 170 uint32_t data = 0; 171 172 mutex_lock(&adev->grbm_idx_mutex); 173 174 WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regGRBM_GFX_INDEX), gfx_index_val); 175 WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regSQ_CMD), sq_cmd); 176 177 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 178 INSTANCE_BROADCAST_WRITES, 1); 179 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 180 SA_BROADCAST_WRITES, 1); 181 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 182 SE_BROADCAST_WRITES, 1); 183 184 WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regGRBM_GFX_INDEX), data); 185 mutex_unlock(&adev->grbm_idx_mutex); 186 187 return 0; 188 } 189 190 /* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ 191 static uint32_t kgd_gfx_v12_1_enable_debug_trap(struct amdgpu_device *adev, 192 bool restore_dbg_registers, 193 uint32_t vmid) 194 { 195 uint32_t data = 0; 196 197 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 198 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); 199 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); 200 201 return data; 202 } 203 204 /* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ 205 static uint32_t kgd_gfx_v12_1_disable_debug_trap(struct amdgpu_device *adev, 206 bool keep_trap_enabled, 207 uint32_t vmid) 208 { 209 uint32_t data = 0; 210 211 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 212 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); 213 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0); 214 215 return data; 216 } 217 218 static int kgd_gfx_v12_1_validate_trap_override_request(struct amdgpu_device *adev, 219 uint32_t trap_override, 220 uint32_t *trap_mask_supported) 221 { 222 *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID | 223 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | 224 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | 225 KFD_DBG_TRAP_MASK_FP_OVERFLOW | 226 KFD_DBG_TRAP_MASK_FP_UNDERFLOW | 227 KFD_DBG_TRAP_MASK_FP_INEXACT | 228 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | 229 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | 230 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION | 231 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START | 232 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; 233 234 235 if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR && 236 trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE) 237 return -EPERM; 238 239 return 0; 240 } 241 242 static uint32_t trap_mask_map_sw_to_hw(uint32_t mask) 243 { 244 uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0; 245 uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0; 246 uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID | 247 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL | 248 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO | 249 KFD_DBG_TRAP_MASK_FP_OVERFLOW | 250 KFD_DBG_TRAP_MASK_FP_UNDERFLOW | 251 KFD_DBG_TRAP_MASK_FP_INEXACT | 252 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO | 253 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | 254 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION); 255 uint32_t ret; 256 257 ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en); 258 ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start); 259 ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end); 260 261 return ret; 262 } 263 264 static uint32_t trap_mask_map_hw_to_sw(uint32_t mask) 265 { 266 uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN); 267 268 if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START)) 269 ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START; 270 271 if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END)) 272 ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; 273 274 return ret; 275 } 276 277 /* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */ 278 static uint32_t kgd_gfx_v12_1_set_wave_launch_trap_override(struct amdgpu_device *adev, 279 uint32_t vmid, 280 uint32_t trap_override, 281 uint32_t trap_mask_bits, 282 uint32_t trap_mask_request, 283 uint32_t *trap_mask_prev, 284 uint32_t kfd_dbg_trap_cntl_prev) 285 286 { 287 uint32_t data = 0; 288 289 *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev); 290 291 data = (trap_mask_bits & trap_mask_request) | (*trap_mask_prev & ~trap_mask_request); 292 data = trap_mask_map_sw_to_hw(data); 293 294 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 295 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override); 296 297 return data; 298 } 299 300 /* returns STALL_VMID or LAUNCH_MODE. */ 301 static uint32_t kgd_gfx_v12_1_set_wave_launch_mode(struct amdgpu_device *adev, 302 uint8_t wave_launch_mode, 303 uint32_t vmid) 304 { 305 uint32_t data = 0; 306 bool is_stall_mode = wave_launch_mode == 4; 307 308 if (is_stall_mode) 309 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, STALL_VMID, 310 1); 311 else 312 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, 313 wave_launch_mode); 314 315 return data; 316 } 317 318 #define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H) 319 static uint32_t kgd_gfx_v12_1_set_address_watch(struct amdgpu_device *adev, 320 uint64_t watch_address, 321 uint32_t watch_address_mask, 322 uint32_t watch_id, 323 uint32_t watch_mode, 324 uint32_t debug_vmid, 325 uint32_t inst) 326 { 327 uint32_t watch_address_high; 328 uint32_t watch_address_low; 329 uint32_t watch_address_cntl; 330 331 watch_address_cntl = 0; 332 watch_address_low = lower_32_bits(watch_address); 333 watch_address_high = upper_32_bits(watch_address) & 0xffff; 334 335 watch_address_cntl = REG_SET_FIELD(watch_address_cntl, 336 TCP_WATCH0_CNTL, 337 MODE, 338 watch_mode); 339 340 watch_address_cntl = REG_SET_FIELD(watch_address_cntl, 341 TCP_WATCH0_CNTL, 342 MASK, 343 watch_address_mask >> 7); 344 345 watch_address_cntl = REG_SET_FIELD(watch_address_cntl, 346 TCP_WATCH0_CNTL, 347 VALID, 348 1); 349 350 WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regTCP_WATCH0_ADDR_H) + 351 (watch_id * TCP_WATCH_STRIDE)), 352 watch_address_high, inst); 353 354 WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regTCP_WATCH0_ADDR_L) + 355 (watch_id * TCP_WATCH_STRIDE)), 356 watch_address_low, inst); 357 358 return watch_address_cntl; 359 } 360 361 static uint32_t kgd_gfx_v12_1_clear_address_watch(struct amdgpu_device *adev, 362 uint32_t watch_id) 363 { 364 return 0; 365 } 366 367 static uint32_t kgd_gfx_v12_1_hqd_sdma_get_doorbell(struct amdgpu_device *adev, 368 int engine, int queue) 369 { 370 return 0; 371 } 372 373 const struct kfd2kgd_calls gfx_v12_1_kfd2kgd = { 374 .init_interrupts = init_interrupts_v12_1, 375 .hqd_dump = hqd_dump_v12_1, 376 .hqd_sdma_dump = hqd_sdma_dump_v12_1, 377 .wave_control_execute = wave_control_execute_v12_1, 378 .get_atc_vmid_pasid_mapping_info = NULL, 379 .enable_debug_trap = kgd_gfx_v12_1_enable_debug_trap, 380 .disable_debug_trap = kgd_gfx_v12_1_disable_debug_trap, 381 .validate_trap_override_request = kgd_gfx_v12_1_validate_trap_override_request, 382 .set_wave_launch_trap_override = kgd_gfx_v12_1_set_wave_launch_trap_override, 383 .set_wave_launch_mode = kgd_gfx_v12_1_set_wave_launch_mode, 384 .set_address_watch = kgd_gfx_v12_1_set_address_watch, 385 .clear_address_watch = kgd_gfx_v12_1_clear_address_watch, 386 .hqd_sdma_get_doorbell = kgd_gfx_v12_1_hqd_sdma_get_doorbell 387 }; 388