1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/module.h> 26 #include "amdgpu.h" 27 #include "gfx_v12_0.h" 28 #include "soc15_common.h" 29 #include "soc21.h" 30 #include "gc/gc_12_0_0_offset.h" 31 #include "gc/gc_12_0_0_sh_mask.h" 32 #include "gc/gc_11_0_0_default.h" 33 #include "v12_structs.h" 34 #include "mes_v12_api_def.h" 35 36 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mes.bin"); 37 MODULE_FIRMWARE("amdgpu/gc_12_0_0_mes1.bin"); 38 MODULE_FIRMWARE("amdgpu/gc_12_0_0_uni_mes.bin"); 39 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin"); 40 MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin"); 41 MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin"); 42 43 static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block); 44 static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block); 45 static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev); 46 static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); 47 48 #define MES_EOP_SIZE 2048 49 50 #define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */ 51 #define MES12_HUNG_HQD_INFO_OFFSET 4 52 53 static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring) 54 { 55 struct amdgpu_device *adev = ring->adev; 56 57 if (ring->use_doorbell) { 58 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 59 ring->wptr); 60 WDOORBELL64(ring->doorbell_index, ring->wptr); 61 } else { 62 BUG(); 63 } 64 } 65 66 static u64 mes_v12_0_ring_get_rptr(struct amdgpu_ring *ring) 67 { 68 return *ring->rptr_cpu_addr; 69 } 70 71 static u64 mes_v12_0_ring_get_wptr(struct amdgpu_ring *ring) 72 { 73 u64 wptr; 74 75 if (ring->use_doorbell) 76 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 77 else 78 BUG(); 79 return wptr; 80 } 81 82 static const struct amdgpu_ring_funcs mes_v12_0_ring_funcs = { 83 .type = AMDGPU_RING_TYPE_MES, 84 .align_mask = 1, 85 .nop = 0, 86 .support_64bit_ptrs = true, 87 .get_rptr = mes_v12_0_ring_get_rptr, 88 .get_wptr = mes_v12_0_ring_get_wptr, 89 .set_wptr = mes_v12_0_ring_set_wptr, 90 .insert_nop = amdgpu_ring_insert_nop, 91 }; 92 93 static const char *mes_v12_0_opcodes[] = { 94 "SET_HW_RSRC", 95 "SET_SCHEDULING_CONFIG", 96 "ADD_QUEUE", 97 "REMOVE_QUEUE", 98 "PERFORM_YIELD", 99 "SET_GANG_PRIORITY_LEVEL", 100 "SUSPEND", 101 "RESUME", 102 "RESET", 103 "SET_LOG_BUFFER", 104 "CHANGE_GANG_PRORITY", 105 "QUERY_SCHEDULER_STATUS", 106 "unused", 107 "SET_DEBUG_VMID", 108 "MISC", 109 "UPDATE_ROOT_PAGE_TABLE", 110 "AMD_LOG", 111 "SET_SE_MODE", 112 "SET_GANG_SUBMIT", 113 "SET_HW_RSRC_1", 114 "INVALIDATE_TLBS", 115 }; 116 117 static const char *mes_v12_0_misc_opcodes[] = { 118 "WRITE_REG", 119 "INV_GART", 120 "QUERY_STATUS", 121 "READ_REG", 122 "WAIT_REG_MEM", 123 "SET_SHADER_DEBUGGER", 124 "NOTIFY_WORK_ON_UNMAPPED_QUEUE", 125 "NOTIFY_TO_UNMAP_PROCESSES", 126 }; 127 128 static const char *mes_v12_0_get_op_string(union MESAPI__MISC *x_pkt) 129 { 130 const char *op_str = NULL; 131 132 if (x_pkt->header.opcode < ARRAY_SIZE(mes_v12_0_opcodes)) 133 op_str = mes_v12_0_opcodes[x_pkt->header.opcode]; 134 135 return op_str; 136 } 137 138 static const char *mes_v12_0_get_misc_op_string(union MESAPI__MISC *x_pkt) 139 { 140 const char *op_str = NULL; 141 142 if ((x_pkt->header.opcode == MES_SCH_API_MISC) && 143 (x_pkt->opcode < ARRAY_SIZE(mes_v12_0_misc_opcodes))) 144 op_str = mes_v12_0_misc_opcodes[x_pkt->opcode]; 145 146 return op_str; 147 } 148 149 static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, 150 int pipe, void *pkt, int size, 151 int api_status_off) 152 { 153 union MESAPI__QUERY_MES_STATUS mes_status_pkt; 154 signed long timeout = 2100000; /* 2100 ms */ 155 struct amdgpu_device *adev = mes->adev; 156 struct amdgpu_ring *ring = &mes->ring[pipe]; 157 spinlock_t *ring_lock = &mes->ring_lock[pipe]; 158 struct MES_API_STATUS *api_status; 159 union MESAPI__MISC *x_pkt = pkt; 160 const char *op_str, *misc_op_str; 161 unsigned long flags; 162 u64 status_gpu_addr; 163 u32 seq, status_offset; 164 u64 *status_ptr; 165 signed long r; 166 int ret; 167 168 if (x_pkt->header.opcode >= MES_SCH_API_MAX) 169 return -EINVAL; 170 171 if (amdgpu_emu_mode) { 172 timeout *= 100; 173 } else if (amdgpu_sriov_vf(adev)) { 174 /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */ 175 timeout = 15 * 600 * 1000; 176 } 177 178 ret = amdgpu_device_wb_get(adev, &status_offset); 179 if (ret) 180 return ret; 181 182 status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4); 183 status_ptr = (u64 *)&adev->wb.wb[status_offset]; 184 *status_ptr = 0; 185 186 spin_lock_irqsave(ring_lock, flags); 187 r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4); 188 if (r) 189 goto error_unlock_free; 190 191 seq = ++ring->fence_drv.sync_seq; 192 r = amdgpu_fence_wait_polling(ring, 193 seq - ring->fence_drv.num_fences_mask, 194 timeout); 195 if (r < 1) 196 goto error_undo; 197 198 api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off); 199 api_status->api_completion_fence_addr = status_gpu_addr; 200 api_status->api_completion_fence_value = 1; 201 202 amdgpu_ring_write_multiple(ring, pkt, size / 4); 203 204 memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); 205 mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; 206 mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; 207 mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 208 mes_status_pkt.api_status.api_completion_fence_addr = 209 ring->fence_drv.gpu_addr; 210 mes_status_pkt.api_status.api_completion_fence_value = seq; 211 212 amdgpu_ring_write_multiple(ring, &mes_status_pkt, 213 sizeof(mes_status_pkt) / 4); 214 215 amdgpu_ring_commit(ring); 216 spin_unlock_irqrestore(ring_lock, flags); 217 218 op_str = mes_v12_0_get_op_string(x_pkt); 219 misc_op_str = mes_v12_0_get_misc_op_string(x_pkt); 220 221 if (misc_op_str) 222 dev_dbg(adev->dev, "MES(%d) msg=%s (%s) was emitted\n", 223 pipe, op_str, misc_op_str); 224 else if (op_str) 225 dev_dbg(adev->dev, "MES(%d) msg=%s was emitted\n", 226 pipe, op_str); 227 else 228 dev_dbg(adev->dev, "MES(%d) msg=%d was emitted\n", 229 pipe, x_pkt->header.opcode); 230 231 r = amdgpu_fence_wait_polling(ring, seq, timeout); 232 233 /* 234 * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success). 235 * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information. 236 */ 237 if (r < 1 || !(lower_32_bits(*status_ptr))) { 238 239 if (misc_op_str) 240 dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n", 241 pipe, op_str, misc_op_str); 242 else if (op_str) 243 dev_err(adev->dev, "MES(%d) failed to respond to msg=%s\n", 244 pipe, op_str); 245 else 246 dev_err(adev->dev, "MES(%d) failed to respond to msg=%d\n", 247 pipe, x_pkt->header.opcode); 248 249 while (halt_if_hws_hang) 250 schedule(); 251 252 r = -ETIMEDOUT; 253 goto error_wb_free; 254 } 255 256 amdgpu_device_wb_free(adev, status_offset); 257 return 0; 258 259 error_undo: 260 dev_err(adev->dev, "MES ring buffer is full.\n"); 261 amdgpu_ring_undo(ring); 262 263 error_unlock_free: 264 spin_unlock_irqrestore(ring_lock, flags); 265 266 error_wb_free: 267 amdgpu_device_wb_free(adev, status_offset); 268 return r; 269 } 270 271 static int convert_to_mes_queue_type(int queue_type) 272 { 273 if (queue_type == AMDGPU_RING_TYPE_GFX) 274 return MES_QUEUE_TYPE_GFX; 275 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) 276 return MES_QUEUE_TYPE_COMPUTE; 277 else if (queue_type == AMDGPU_RING_TYPE_SDMA) 278 return MES_QUEUE_TYPE_SDMA; 279 else if (queue_type == AMDGPU_RING_TYPE_MES) 280 return MES_QUEUE_TYPE_SCHQ; 281 else 282 BUG(); 283 return -1; 284 } 285 286 static int convert_to_mes_priority_level(int priority_level) 287 { 288 switch (priority_level) { 289 case AMDGPU_MES_PRIORITY_LEVEL_LOW: 290 return AMD_PRIORITY_LEVEL_LOW; 291 case AMDGPU_MES_PRIORITY_LEVEL_NORMAL: 292 default: 293 return AMD_PRIORITY_LEVEL_NORMAL; 294 case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM: 295 return AMD_PRIORITY_LEVEL_MEDIUM; 296 case AMDGPU_MES_PRIORITY_LEVEL_HIGH: 297 return AMD_PRIORITY_LEVEL_HIGH; 298 case AMDGPU_MES_PRIORITY_LEVEL_REALTIME: 299 return AMD_PRIORITY_LEVEL_REALTIME; 300 } 301 } 302 303 static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes, 304 struct mes_add_queue_input *input) 305 { 306 struct amdgpu_device *adev = mes->adev; 307 union MESAPI__ADD_QUEUE mes_add_queue_pkt; 308 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; 309 uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; 310 311 memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); 312 313 mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; 314 mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE; 315 mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 316 317 mes_add_queue_pkt.process_id = input->process_id; 318 mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr; 319 mes_add_queue_pkt.process_va_start = input->process_va_start; 320 mes_add_queue_pkt.process_va_end = input->process_va_end; 321 mes_add_queue_pkt.process_quantum = input->process_quantum; 322 mes_add_queue_pkt.process_context_addr = input->process_context_addr; 323 mes_add_queue_pkt.gang_quantum = input->gang_quantum; 324 mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; 325 mes_add_queue_pkt.inprocess_gang_priority = 326 convert_to_mes_priority_level(input->inprocess_gang_priority); 327 mes_add_queue_pkt.gang_global_priority_level = 328 convert_to_mes_priority_level(input->gang_global_priority_level); 329 mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; 330 mes_add_queue_pkt.mqd_addr = input->mqd_addr; 331 332 mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr; 333 334 mes_add_queue_pkt.queue_type = 335 convert_to_mes_queue_type(input->queue_type); 336 mes_add_queue_pkt.paging = input->paging; 337 mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl; 338 mes_add_queue_pkt.gws_base = input->gws_base; 339 mes_add_queue_pkt.gws_size = input->gws_size; 340 mes_add_queue_pkt.trap_handler_addr = input->tba_addr; 341 mes_add_queue_pkt.tma_addr = input->tma_addr; 342 mes_add_queue_pkt.trap_en = input->trap_en; 343 mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear; 344 mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; 345 346 /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ 347 mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; 348 mes_add_queue_pkt.gds_size = input->queue_size; 349 350 /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ 351 mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; 352 mes_add_queue_pkt.gds_size = input->queue_size; 353 354 return mes_v12_0_submit_pkt_and_poll_completion(mes, 355 AMDGPU_MES_SCHED_PIPE, 356 &mes_add_queue_pkt, sizeof(mes_add_queue_pkt), 357 offsetof(union MESAPI__ADD_QUEUE, api_status)); 358 } 359 360 static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, 361 struct mes_remove_queue_input *input) 362 { 363 union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; 364 uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK; 365 366 memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); 367 368 mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; 369 mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE; 370 mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 371 372 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; 373 mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; 374 375 if (mes_rev >= 0x5a) 376 mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset; 377 378 return mes_v12_0_submit_pkt_and_poll_completion(mes, 379 AMDGPU_MES_SCHED_PIPE, 380 &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), 381 offsetof(union MESAPI__REMOVE_QUEUE, api_status)); 382 } 383 384 int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev, 385 bool req) 386 { 387 u32 i, tmp, val; 388 389 for (i = 0; i < adev->usec_timeout; i++) { 390 /* Request with MeId=2, PipeId=0 */ 391 tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); 392 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); 393 WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); 394 395 val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); 396 if (req) { 397 if (val == tmp) 398 break; 399 } else { 400 tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, 401 REQUEST, 1); 402 403 /* unlocked or locked by firmware */ 404 if (val != tmp) 405 break; 406 } 407 udelay(1); 408 } 409 410 if (i >= adev->usec_timeout) 411 return -EINVAL; 412 413 return 0; 414 } 415 416 static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type, 417 uint32_t me_id, uint32_t pipe_id, 418 uint32_t queue_id, uint32_t vmid) 419 { 420 struct amdgpu_device *adev = mes->adev; 421 uint32_t value, reg; 422 int i, r = 0; 423 424 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 425 426 if (queue_type == AMDGPU_RING_TYPE_GFX) { 427 dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n", 428 me_id, pipe_id, queue_id, vmid); 429 430 mutex_lock(&adev->gfx.reset_sem_mutex); 431 gfx_v12_0_request_gfx_index_mutex(adev, true); 432 /* all se allow writes */ 433 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, 434 (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT)); 435 value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); 436 if (pipe_id == 0) 437 value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id); 438 else 439 value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id); 440 WREG32_SOC15(GC, 0, regCP_VMID_RESET, value); 441 gfx_v12_0_request_gfx_index_mutex(adev, false); 442 mutex_unlock(&adev->gfx.reset_sem_mutex); 443 444 mutex_lock(&adev->srbm_mutex); 445 soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0); 446 /* wait till dequeue take effects */ 447 for (i = 0; i < adev->usec_timeout; i++) { 448 if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1)) 449 break; 450 udelay(1); 451 } 452 if (i >= adev->usec_timeout) { 453 dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n"); 454 r = -ETIMEDOUT; 455 } 456 457 soc21_grbm_select(adev, 0, 0, 0, 0); 458 mutex_unlock(&adev->srbm_mutex); 459 } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 460 dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n", 461 me_id, pipe_id, queue_id); 462 mutex_lock(&adev->srbm_mutex); 463 soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0); 464 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 465 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 466 467 /* wait till dequeue take effects */ 468 for (i = 0; i < adev->usec_timeout; i++) { 469 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 470 break; 471 udelay(1); 472 } 473 if (i >= adev->usec_timeout) { 474 dev_err(adev->dev, "failed to wait on hqd deactivate\n"); 475 r = -ETIMEDOUT; 476 } 477 soc21_grbm_select(adev, 0, 0, 0, 0); 478 mutex_unlock(&adev->srbm_mutex); 479 } else if (queue_type == AMDGPU_RING_TYPE_SDMA) { 480 dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n", 481 me_id, pipe_id, queue_id); 482 switch (me_id) { 483 case 1: 484 reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ); 485 break; 486 case 0: 487 default: 488 reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ); 489 break; 490 } 491 492 value = 1 << queue_id; 493 WREG32(reg, value); 494 /* wait for queue reset done */ 495 for (i = 0; i < adev->usec_timeout; i++) { 496 if (!(RREG32(reg) & value)) 497 break; 498 udelay(1); 499 } 500 if (i >= adev->usec_timeout) { 501 dev_err(adev->dev, "failed to wait on sdma queue reset done\n"); 502 r = -ETIMEDOUT; 503 } 504 } 505 506 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 507 return r; 508 } 509 510 static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes, 511 struct mes_map_legacy_queue_input *input) 512 { 513 union MESAPI__ADD_QUEUE mes_add_queue_pkt; 514 int pipe; 515 516 memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); 517 518 mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; 519 mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE; 520 mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 521 522 mes_add_queue_pkt.pipe_id = input->pipe_id; 523 mes_add_queue_pkt.queue_id = input->queue_id; 524 mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; 525 mes_add_queue_pkt.mqd_addr = input->mqd_addr; 526 mes_add_queue_pkt.wptr_addr = input->wptr_addr; 527 mes_add_queue_pkt.queue_type = 528 convert_to_mes_queue_type(input->queue_type); 529 mes_add_queue_pkt.map_legacy_kq = 1; 530 531 if (mes->adev->enable_uni_mes) 532 pipe = AMDGPU_MES_KIQ_PIPE; 533 else 534 pipe = AMDGPU_MES_SCHED_PIPE; 535 536 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 537 &mes_add_queue_pkt, sizeof(mes_add_queue_pkt), 538 offsetof(union MESAPI__ADD_QUEUE, api_status)); 539 } 540 541 static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes, 542 struct mes_unmap_legacy_queue_input *input) 543 { 544 union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; 545 int pipe; 546 547 memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); 548 549 mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; 550 mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE; 551 mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 552 553 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; 554 mes_remove_queue_pkt.gang_context_addr = 0; 555 556 mes_remove_queue_pkt.pipe_id = input->pipe_id; 557 mes_remove_queue_pkt.queue_id = input->queue_id; 558 559 if (input->action == PREEMPT_QUEUES_NO_UNMAP) { 560 mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1; 561 mes_remove_queue_pkt.tf_addr = input->trail_fence_addr; 562 mes_remove_queue_pkt.tf_data = 563 lower_32_bits(input->trail_fence_data); 564 } else { 565 mes_remove_queue_pkt.unmap_legacy_queue = 1; 566 mes_remove_queue_pkt.queue_type = 567 convert_to_mes_queue_type(input->queue_type); 568 } 569 570 if (mes->adev->enable_uni_mes) 571 pipe = AMDGPU_MES_KIQ_PIPE; 572 else 573 pipe = AMDGPU_MES_SCHED_PIPE; 574 575 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 576 &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), 577 offsetof(union MESAPI__REMOVE_QUEUE, api_status)); 578 } 579 580 static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes, 581 struct mes_suspend_gang_input *input) 582 { 583 union MESAPI__SUSPEND mes_suspend_gang_pkt; 584 585 memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt)); 586 587 mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER; 588 mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND; 589 mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 590 591 mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs; 592 mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr; 593 mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr; 594 mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value; 595 596 return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, 597 &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt), 598 offsetof(union MESAPI__SUSPEND, api_status)); 599 } 600 601 static int mes_v12_0_resume_gang(struct amdgpu_mes *mes, 602 struct mes_resume_gang_input *input) 603 { 604 union MESAPI__RESUME mes_resume_gang_pkt; 605 606 memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt)); 607 608 mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER; 609 mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME; 610 mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 611 612 mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs; 613 mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr; 614 615 return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, 616 &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt), 617 offsetof(union MESAPI__RESUME, api_status)); 618 } 619 620 static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe) 621 { 622 union MESAPI__QUERY_MES_STATUS mes_status_pkt; 623 624 memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); 625 626 mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; 627 mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; 628 mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 629 630 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 631 &mes_status_pkt, sizeof(mes_status_pkt), 632 offsetof(union MESAPI__QUERY_MES_STATUS, api_status)); 633 } 634 635 static int mes_v12_0_misc_op(struct amdgpu_mes *mes, 636 struct mes_misc_op_input *input) 637 { 638 union MESAPI__MISC misc_pkt; 639 int pipe; 640 641 if (mes->adev->enable_uni_mes) 642 pipe = AMDGPU_MES_KIQ_PIPE; 643 else 644 pipe = AMDGPU_MES_SCHED_PIPE; 645 646 memset(&misc_pkt, 0, sizeof(misc_pkt)); 647 648 misc_pkt.header.type = MES_API_TYPE_SCHEDULER; 649 misc_pkt.header.opcode = MES_SCH_API_MISC; 650 misc_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 651 652 switch (input->op) { 653 case MES_MISC_OP_READ_REG: 654 misc_pkt.opcode = MESAPI_MISC__READ_REG; 655 misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset; 656 misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr; 657 break; 658 case MES_MISC_OP_WRITE_REG: 659 misc_pkt.opcode = MESAPI_MISC__WRITE_REG; 660 misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset; 661 misc_pkt.write_reg.reg_value = input->write_reg.reg_value; 662 break; 663 case MES_MISC_OP_WRM_REG_WAIT: 664 misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM; 665 misc_pkt.wait_reg_mem.op = WRM_OPERATION__WAIT_REG_MEM; 666 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref; 667 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask; 668 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0; 669 misc_pkt.wait_reg_mem.reg_offset2 = 0; 670 break; 671 case MES_MISC_OP_WRM_REG_WR_WAIT: 672 misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM; 673 misc_pkt.wait_reg_mem.op = WRM_OPERATION__WR_WAIT_WR_REG; 674 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref; 675 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask; 676 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0; 677 misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1; 678 break; 679 case MES_MISC_OP_SET_SHADER_DEBUGGER: 680 pipe = AMDGPU_MES_SCHED_PIPE; 681 misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER; 682 misc_pkt.set_shader_debugger.process_context_addr = 683 input->set_shader_debugger.process_context_addr; 684 misc_pkt.set_shader_debugger.flags.u32all = 685 input->set_shader_debugger.flags.u32all; 686 misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl = 687 input->set_shader_debugger.spi_gdbg_per_vmid_cntl; 688 memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl, 689 input->set_shader_debugger.tcp_watch_cntl, 690 sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); 691 misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; 692 break; 693 case MES_MISC_OP_CHANGE_CONFIG: 694 misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; 695 misc_pkt.change_config.opcode = 696 MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; 697 misc_pkt.change_config.option.bits.limit_single_process = 698 input->change_config.option.limit_single_process; 699 break; 700 701 default: 702 DRM_ERROR("unsupported misc op (%d) \n", input->op); 703 return -EINVAL; 704 } 705 706 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 707 &misc_pkt, sizeof(misc_pkt), 708 offsetof(union MESAPI__MISC, api_status)); 709 } 710 711 static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe) 712 { 713 union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_1_pkt; 714 715 memset(&mes_set_hw_res_1_pkt, 0, sizeof(mes_set_hw_res_1_pkt)); 716 717 mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER; 718 mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1; 719 mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 720 mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa; 721 mes_set_hw_res_1_pkt.cleaner_shader_fence_mc_addr = 722 mes->resource_1_gpu_addr[pipe]; 723 724 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 725 &mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt), 726 offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status)); 727 } 728 729 static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) 730 { 731 int i; 732 struct amdgpu_device *adev = mes->adev; 733 union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt; 734 735 memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt)); 736 737 mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER; 738 mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC; 739 mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 740 741 if (pipe == AMDGPU_MES_SCHED_PIPE) { 742 mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub; 743 mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub; 744 mes_set_hw_res_pkt.gds_size = adev->gds.gds_size; 745 mes_set_hw_res_pkt.paging_vmid = 0; 746 747 for (i = 0; i < MAX_COMPUTE_PIPES; i++) 748 mes_set_hw_res_pkt.compute_hqd_mask[i] = 749 mes->compute_hqd_mask[i]; 750 751 for (i = 0; i < MAX_GFX_PIPES; i++) 752 mes_set_hw_res_pkt.gfx_hqd_mask[i] = 753 mes->gfx_hqd_mask[i]; 754 755 for (i = 0; i < MAX_SDMA_PIPES; i++) 756 mes_set_hw_res_pkt.sdma_hqd_mask[i] = 757 mes->sdma_hqd_mask[i]; 758 759 for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++) 760 mes_set_hw_res_pkt.aggregated_doorbells[i] = 761 mes->aggregated_doorbells[i]; 762 } 763 764 mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = 765 mes->sch_ctx_gpu_addr[pipe]; 766 mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr = 767 mes->query_status_fence_gpu_addr[pipe]; 768 769 for (i = 0; i < 5; i++) { 770 mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i]; 771 mes_set_hw_res_pkt.mmhub_base[i] = 772 adev->reg_offset[MMHUB_HWIP][0][i]; 773 mes_set_hw_res_pkt.osssys_base[i] = 774 adev->reg_offset[OSSSYS_HWIP][0][i]; 775 } 776 777 mes_set_hw_res_pkt.disable_reset = 1; 778 mes_set_hw_res_pkt.disable_mes_log = 1; 779 mes_set_hw_res_pkt.use_different_vmid_compute = 1; 780 mes_set_hw_res_pkt.enable_reg_active_poll = 1; 781 mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; 782 if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82) 783 mes_set_hw_res_pkt.enable_lr_compute_wa = 1; 784 else 785 dev_info_once(adev->dev, 786 "MES FW version must be >= 0x82 to enable LR compute workaround.\n"); 787 788 /* 789 * Keep oversubscribe timer for sdma . When we have unmapped doorbell 790 * handling support, other queue will not use the oversubscribe timer. 791 * handling mode - 0: disabled; 1: basic version; 2: basic+ version 792 */ 793 mes_set_hw_res_pkt.oversubscription_timer = 50; 794 mes_set_hw_res_pkt.unmapped_doorbell_handling = 1; 795 796 if (amdgpu_mes_log_enable) { 797 mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; 798 mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + 799 pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE); 800 } 801 802 if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE) 803 mes_set_hw_res_pkt.limit_single_process = 1; 804 805 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 806 &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), 807 offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); 808 } 809 810 static void mes_v12_0_init_aggregated_doorbell(struct amdgpu_mes *mes) 811 { 812 struct amdgpu_device *adev = mes->adev; 813 uint32_t data; 814 815 data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1); 816 data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK | 817 CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK | 818 CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK); 819 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] << 820 CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT; 821 data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT; 822 WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1, data); 823 824 data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2); 825 data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK | 826 CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK | 827 CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK); 828 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] << 829 CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT; 830 data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT; 831 WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2, data); 832 833 data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3); 834 data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK | 835 CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK | 836 CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK); 837 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] << 838 CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT; 839 data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT; 840 WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3, data); 841 842 data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4); 843 data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK | 844 CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK | 845 CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK); 846 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] << 847 CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT; 848 data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT; 849 WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4, data); 850 851 data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5); 852 data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK | 853 CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK | 854 CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK); 855 data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] << 856 CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT; 857 data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT; 858 WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5, data); 859 860 data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT; 861 WREG32_SOC15(GC, 0, regCP_HQD_GFX_CONTROL, data); 862 } 863 864 865 static void mes_v12_0_enable_unmapped_doorbell_handling( 866 struct amdgpu_mes *mes, bool enable) 867 { 868 struct amdgpu_device *adev = mes->adev; 869 uint32_t data = RREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL); 870 871 /* 872 * The default PROC_LSB settng is 0xc which means doorbell 873 * addr[16:12] gives the doorbell page number. For kfd, each 874 * process will use 2 pages of doorbell, we need to change the 875 * setting to 0xd 876 */ 877 data &= ~CP_UNMAPPED_DOORBELL__PROC_LSB_MASK; 878 data |= 0xd << CP_UNMAPPED_DOORBELL__PROC_LSB__SHIFT; 879 880 data |= (enable ? 1 : 0) << CP_UNMAPPED_DOORBELL__ENABLE__SHIFT; 881 882 WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data); 883 } 884 885 static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes, 886 struct mes_reset_queue_input *input) 887 { 888 union MESAPI__RESET mes_reset_queue_pkt; 889 int pipe; 890 891 if (input->use_mmio) 892 return mes_v12_0_reset_queue_mmio(mes, input->queue_type, 893 input->me_id, input->pipe_id, 894 input->queue_id, input->vmid); 895 896 memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); 897 898 mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; 899 mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; 900 mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 901 902 mes_reset_queue_pkt.queue_type = 903 convert_to_mes_queue_type(input->queue_type); 904 905 if (input->legacy_gfx) { 906 mes_reset_queue_pkt.reset_legacy_gfx = 1; 907 mes_reset_queue_pkt.pipe_id_lp = input->pipe_id; 908 mes_reset_queue_pkt.queue_id_lp = input->queue_id; 909 mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr; 910 mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset; 911 mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr; 912 mes_reset_queue_pkt.vmid_id_lp = input->vmid; 913 } else { 914 mes_reset_queue_pkt.reset_queue_only = 1; 915 mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; 916 } 917 918 if (input->is_kq) 919 pipe = AMDGPU_MES_KIQ_PIPE; 920 else 921 pipe = AMDGPU_MES_SCHED_PIPE; 922 923 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 924 &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), 925 offsetof(union MESAPI__RESET, api_status)); 926 } 927 928 static int mes_v12_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes, 929 struct mes_detect_and_reset_queue_input *input) 930 { 931 union MESAPI__RESET mes_reset_queue_pkt; 932 933 memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); 934 935 mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; 936 mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; 937 mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 938 939 mes_reset_queue_pkt.queue_type = 940 convert_to_mes_queue_type(input->queue_type); 941 mes_reset_queue_pkt.doorbell_offset_addr = 942 mes->hung_queue_db_array_gpu_addr; 943 944 if (input->detect_only) 945 mes_reset_queue_pkt.hang_detect_only = 1; 946 else 947 mes_reset_queue_pkt.hang_detect_then_reset = 1; 948 949 return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, 950 &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt), 951 offsetof(union MESAPI__RESET, api_status)); 952 } 953 954 static int mes_v12_inv_tlb_convert_hub_id(uint8_t id) 955 { 956 /* 957 * MES doesn't support invalidate gc_hub on slave xcc individually 958 * master xcc will invalidate all gc_hub for the partition 959 */ 960 if (AMDGPU_IS_GFXHUB(id)) 961 return 0; 962 else if (AMDGPU_IS_MMHUB0(id)) 963 return 1; 964 else 965 return -EINVAL; 966 967 } 968 969 static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes, 970 struct mes_inv_tlbs_pasid_input *input) 971 { 972 union MESAPI__INV_TLBS mes_inv_tlbs; 973 int ret; 974 975 memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs)); 976 977 mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER; 978 mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS; 979 mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 980 981 mes_inv_tlbs.invalidate_tlbs.inv_sel = 0; 982 mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type; 983 mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid; 984 985 /*convert amdgpu_mes_hub_id to mes expected hub_id */ 986 ret = mes_v12_inv_tlb_convert_hub_id(input->hub_id); 987 if (ret < 0) 988 return -EINVAL; 989 mes_inv_tlbs.invalidate_tlbs.hub_id = ret; 990 return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE, 991 &mes_inv_tlbs, sizeof(mes_inv_tlbs), 992 offsetof(union MESAPI__INV_TLBS, api_status)); 993 994 } 995 996 static const struct amdgpu_mes_funcs mes_v12_0_funcs = { 997 .add_hw_queue = mes_v12_0_add_hw_queue, 998 .remove_hw_queue = mes_v12_0_remove_hw_queue, 999 .map_legacy_queue = mes_v12_0_map_legacy_queue, 1000 .unmap_legacy_queue = mes_v12_0_unmap_legacy_queue, 1001 .suspend_gang = mes_v12_0_suspend_gang, 1002 .resume_gang = mes_v12_0_resume_gang, 1003 .misc_op = mes_v12_0_misc_op, 1004 .reset_hw_queue = mes_v12_0_reset_hw_queue, 1005 .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid, 1006 .detect_and_reset_hung_queues = mes_v12_0_detect_and_reset_hung_queues, 1007 }; 1008 1009 static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev, 1010 enum amdgpu_mes_pipe pipe) 1011 { 1012 int r; 1013 const struct mes_firmware_header_v1_0 *mes_hdr; 1014 const __le32 *fw_data; 1015 unsigned fw_size; 1016 1017 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1018 adev->mes.fw[pipe]->data; 1019 1020 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1021 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1022 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1023 1024 r = amdgpu_bo_create_reserved(adev, fw_size, 1025 PAGE_SIZE, 1026 AMDGPU_GEM_DOMAIN_VRAM, 1027 &adev->mes.ucode_fw_obj[pipe], 1028 &adev->mes.ucode_fw_gpu_addr[pipe], 1029 (void **)&adev->mes.ucode_fw_ptr[pipe]); 1030 if (r) { 1031 dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r); 1032 return r; 1033 } 1034 1035 memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size); 1036 1037 amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]); 1038 amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]); 1039 1040 return 0; 1041 } 1042 1043 static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev, 1044 enum amdgpu_mes_pipe pipe) 1045 { 1046 int r; 1047 const struct mes_firmware_header_v1_0 *mes_hdr; 1048 const __le32 *fw_data; 1049 unsigned fw_size; 1050 1051 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1052 adev->mes.fw[pipe]->data; 1053 1054 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1055 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1056 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1057 1058 r = amdgpu_bo_create_reserved(adev, fw_size, 1059 64 * 1024, 1060 AMDGPU_GEM_DOMAIN_VRAM, 1061 &adev->mes.data_fw_obj[pipe], 1062 &adev->mes.data_fw_gpu_addr[pipe], 1063 (void **)&adev->mes.data_fw_ptr[pipe]); 1064 if (r) { 1065 dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r); 1066 return r; 1067 } 1068 1069 memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size); 1070 1071 amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]); 1072 amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]); 1073 1074 return 0; 1075 } 1076 1077 static void mes_v12_0_free_ucode_buffers(struct amdgpu_device *adev, 1078 enum amdgpu_mes_pipe pipe) 1079 { 1080 amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe], 1081 &adev->mes.data_fw_gpu_addr[pipe], 1082 (void **)&adev->mes.data_fw_ptr[pipe]); 1083 1084 amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe], 1085 &adev->mes.ucode_fw_gpu_addr[pipe], 1086 (void **)&adev->mes.ucode_fw_ptr[pipe]); 1087 } 1088 1089 static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable) 1090 { 1091 uint64_t ucode_addr; 1092 uint32_t pipe, data = 0; 1093 1094 if (enable) { 1095 mutex_lock(&adev->srbm_mutex); 1096 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { 1097 soc21_grbm_select(adev, 3, pipe, 0, 0); 1098 if (amdgpu_mes_log_enable) { 1099 u32 log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE; 1100 /* In case uni mes is not enabled, only program for pipe 0 */ 1101 if (adev->mes.event_log_size >= (pipe + 1) * log_size) { 1102 WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO, 1103 lower_32_bits(adev->mes.event_log_gpu_addr + 1104 pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE)); 1105 WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI, 1106 upper_32_bits(adev->mes.event_log_gpu_addr + 1107 pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE)); 1108 dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n", 1109 RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI), 1110 RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO)); 1111 } 1112 } 1113 1114 data = RREG32_SOC15(GC, 0, regCP_MES_CNTL); 1115 if (pipe == 0) 1116 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1); 1117 else 1118 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1); 1119 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data); 1120 1121 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2; 1122 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START, 1123 lower_32_bits(ucode_addr)); 1124 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI, 1125 upper_32_bits(ucode_addr)); 1126 1127 /* unhalt MES and activate one pipe each loop */ 1128 data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1); 1129 if (pipe) 1130 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1); 1131 dev_info(adev->dev, "program CP_MES_CNTL : 0x%x\n", data); 1132 1133 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data); 1134 1135 } 1136 soc21_grbm_select(adev, 0, 0, 0, 0); 1137 mutex_unlock(&adev->srbm_mutex); 1138 1139 if (amdgpu_emu_mode) 1140 msleep(100); 1141 else if (adev->enable_uni_mes) 1142 udelay(500); 1143 else 1144 udelay(50); 1145 } else { 1146 data = RREG32_SOC15(GC, 0, regCP_MES_CNTL); 1147 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0); 1148 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0); 1149 data = REG_SET_FIELD(data, CP_MES_CNTL, 1150 MES_INVALIDATE_ICACHE, 1); 1151 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1); 1152 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1); 1153 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1); 1154 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data); 1155 } 1156 } 1157 1158 static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev) 1159 { 1160 uint64_t ucode_addr; 1161 int pipe; 1162 1163 mes_v12_0_enable(adev, false); 1164 1165 mutex_lock(&adev->srbm_mutex); 1166 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { 1167 /* me=3, queue=0 */ 1168 soc21_grbm_select(adev, 3, pipe, 0, 0); 1169 1170 /* set ucode start address */ 1171 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2; 1172 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START, 1173 lower_32_bits(ucode_addr)); 1174 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI, 1175 upper_32_bits(ucode_addr)); 1176 1177 soc21_grbm_select(adev, 0, 0, 0, 0); 1178 } 1179 mutex_unlock(&adev->srbm_mutex); 1180 } 1181 1182 /* This function is for backdoor MES firmware */ 1183 static int mes_v12_0_load_microcode(struct amdgpu_device *adev, 1184 enum amdgpu_mes_pipe pipe, bool prime_icache) 1185 { 1186 int r; 1187 uint32_t data; 1188 1189 mes_v12_0_enable(adev, false); 1190 1191 if (!adev->mes.fw[pipe]) 1192 return -EINVAL; 1193 1194 r = mes_v12_0_allocate_ucode_buffer(adev, pipe); 1195 if (r) 1196 return r; 1197 1198 r = mes_v12_0_allocate_ucode_data_buffer(adev, pipe); 1199 if (r) { 1200 mes_v12_0_free_ucode_buffers(adev, pipe); 1201 return r; 1202 } 1203 1204 mutex_lock(&adev->srbm_mutex); 1205 /* me=3, pipe=0, queue=0 */ 1206 soc21_grbm_select(adev, 3, pipe, 0, 0); 1207 1208 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0); 1209 1210 /* set ucode fimrware address */ 1211 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO, 1212 lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe])); 1213 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI, 1214 upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe])); 1215 1216 /* set ucode instruction cache boundary to 2M-1 */ 1217 WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF); 1218 1219 /* set ucode data firmware address */ 1220 WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO, 1221 lower_32_bits(adev->mes.data_fw_gpu_addr[pipe])); 1222 WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI, 1223 upper_32_bits(adev->mes.data_fw_gpu_addr[pipe])); 1224 1225 /* Set data cache boundary CP_MES_MDBOUND_LO */ 1226 WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF); 1227 1228 if (prime_icache) { 1229 /* invalidate ICACHE */ 1230 data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); 1231 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); 1232 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); 1233 WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); 1234 1235 /* prime the ICACHE. */ 1236 data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); 1237 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); 1238 WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); 1239 } 1240 1241 soc21_grbm_select(adev, 0, 0, 0, 0); 1242 mutex_unlock(&adev->srbm_mutex); 1243 1244 return 0; 1245 } 1246 1247 static int mes_v12_0_allocate_eop_buf(struct amdgpu_device *adev, 1248 enum amdgpu_mes_pipe pipe) 1249 { 1250 int r; 1251 u32 *eop; 1252 1253 r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE, 1254 AMDGPU_GEM_DOMAIN_GTT, 1255 &adev->mes.eop_gpu_obj[pipe], 1256 &adev->mes.eop_gpu_addr[pipe], 1257 (void **)&eop); 1258 if (r) { 1259 dev_warn(adev->dev, "(%d) create EOP bo failed\n", r); 1260 return r; 1261 } 1262 1263 memset(eop, 0, 1264 adev->mes.eop_gpu_obj[pipe]->tbo.base.size); 1265 1266 amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]); 1267 amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]); 1268 1269 return 0; 1270 } 1271 1272 static int mes_v12_0_mqd_init(struct amdgpu_ring *ring) 1273 { 1274 struct v12_compute_mqd *mqd = ring->mqd_ptr; 1275 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 1276 uint32_t tmp; 1277 1278 mqd->header = 0xC0310800; 1279 mqd->compute_pipelinestat_enable = 0x00000001; 1280 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 1281 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 1282 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 1283 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 1284 mqd->compute_misc_reserved = 0x00000007; 1285 1286 eop_base_addr = ring->eop_gpu_addr >> 8; 1287 1288 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 1289 tmp = regCP_HQD_EOP_CONTROL_DEFAULT; 1290 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 1291 (order_base_2(MES_EOP_SIZE / 4) - 1)); 1292 1293 mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr); 1294 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 1295 mqd->cp_hqd_eop_control = tmp; 1296 1297 /* disable the queue if it's active */ 1298 ring->wptr = 0; 1299 mqd->cp_hqd_pq_rptr = 0; 1300 mqd->cp_hqd_pq_wptr_lo = 0; 1301 mqd->cp_hqd_pq_wptr_hi = 0; 1302 1303 /* set the pointer to the MQD */ 1304 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 1305 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 1306 1307 /* set MQD vmid to 0 */ 1308 tmp = regCP_MQD_CONTROL_DEFAULT; 1309 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 1310 mqd->cp_mqd_control = tmp; 1311 1312 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 1313 hqd_gpu_addr = ring->gpu_addr >> 8; 1314 mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr); 1315 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 1316 1317 /* set the wb address whether it's enabled or not */ 1318 wb_gpu_addr = ring->rptr_gpu_addr; 1319 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 1320 mqd->cp_hqd_pq_rptr_report_addr_hi = 1321 upper_32_bits(wb_gpu_addr) & 0xffff; 1322 1323 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 1324 wb_gpu_addr = ring->wptr_gpu_addr; 1325 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8; 1326 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 1327 1328 /* set up the HQD, this is similar to CP_RB0_CNTL */ 1329 tmp = regCP_HQD_PQ_CONTROL_DEFAULT; 1330 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 1331 (order_base_2(ring->ring_size / 4) - 1)); 1332 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 1333 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 1334 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); 1335 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 1336 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 1337 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 1338 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1); 1339 mqd->cp_hqd_pq_control = tmp; 1340 1341 /* enable doorbell */ 1342 tmp = 0; 1343 if (ring->use_doorbell) { 1344 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1345 DOORBELL_OFFSET, ring->doorbell_index); 1346 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1347 DOORBELL_EN, 1); 1348 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1349 DOORBELL_SOURCE, 0); 1350 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1351 DOORBELL_HIT, 0); 1352 } else { 1353 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 1354 DOORBELL_EN, 0); 1355 } 1356 mqd->cp_hqd_pq_doorbell_control = tmp; 1357 1358 mqd->cp_hqd_vmid = 0; 1359 /* activate the queue */ 1360 mqd->cp_hqd_active = 1; 1361 1362 tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT; 1363 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, 1364 PRELOAD_SIZE, 0x55); 1365 mqd->cp_hqd_persistent_state = tmp; 1366 1367 mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT; 1368 mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT; 1369 mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT; 1370 1371 /* 1372 * Set CP_HQD_GFX_CONTROL.DB_UPDATED_MSG_EN[15] to enable unmapped 1373 * doorbell handling. This is a reserved CP internal register can 1374 * not be accesss by others 1375 */ 1376 mqd->reserved_184 = BIT(15); 1377 1378 return 0; 1379 } 1380 1381 static void mes_v12_0_queue_init_register(struct amdgpu_ring *ring) 1382 { 1383 struct v12_compute_mqd *mqd = ring->mqd_ptr; 1384 struct amdgpu_device *adev = ring->adev; 1385 uint32_t data = 0; 1386 1387 mutex_lock(&adev->srbm_mutex); 1388 soc21_grbm_select(adev, 3, ring->pipe, 0, 0); 1389 1390 /* set CP_HQD_VMID.VMID = 0. */ 1391 data = RREG32_SOC15(GC, 0, regCP_HQD_VMID); 1392 data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0); 1393 WREG32_SOC15(GC, 0, regCP_HQD_VMID, data); 1394 1395 /* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */ 1396 data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 1397 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, 1398 DOORBELL_EN, 0); 1399 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data); 1400 1401 /* set CP_MQD_BASE_ADDR/HI with the MQD base address */ 1402 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); 1403 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); 1404 1405 /* set CP_MQD_CONTROL.VMID=0 */ 1406 data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 1407 data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0); 1408 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0); 1409 1410 /* set CP_HQD_PQ_BASE/HI with the ring buffer base address */ 1411 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); 1412 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); 1413 1414 /* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */ 1415 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 1416 mqd->cp_hqd_pq_rptr_report_addr_lo); 1417 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 1418 mqd->cp_hqd_pq_rptr_report_addr_hi); 1419 1420 /* set CP_HQD_PQ_CONTROL */ 1421 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); 1422 1423 /* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */ 1424 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 1425 mqd->cp_hqd_pq_wptr_poll_addr_lo); 1426 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 1427 mqd->cp_hqd_pq_wptr_poll_addr_hi); 1428 1429 /* set CP_HQD_PQ_DOORBELL_CONTROL */ 1430 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 1431 mqd->cp_hqd_pq_doorbell_control); 1432 1433 /* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */ 1434 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); 1435 1436 /* set CP_HQD_ACTIVE.ACTIVE=1 */ 1437 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active); 1438 1439 soc21_grbm_select(adev, 0, 0, 0, 0); 1440 mutex_unlock(&adev->srbm_mutex); 1441 } 1442 1443 static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev) 1444 { 1445 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 1446 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; 1447 int r; 1448 1449 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) 1450 return -EINVAL; 1451 1452 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); 1453 if (r) { 1454 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 1455 return r; 1456 } 1457 1458 kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]); 1459 1460 r = amdgpu_ring_test_ring(kiq_ring); 1461 if (r) { 1462 DRM_ERROR("kfq enable failed\n"); 1463 kiq_ring->sched.ready = false; 1464 } 1465 return r; 1466 } 1467 1468 static int mes_v12_0_queue_init(struct amdgpu_device *adev, 1469 enum amdgpu_mes_pipe pipe) 1470 { 1471 struct amdgpu_ring *ring; 1472 int r; 1473 1474 if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) 1475 ring = &adev->gfx.kiq[0].ring; 1476 else 1477 ring = &adev->mes.ring[pipe]; 1478 1479 if ((adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) && 1480 (amdgpu_in_reset(adev) || adev->in_suspend)) { 1481 *(ring->wptr_cpu_addr) = 0; 1482 *(ring->rptr_cpu_addr) = 0; 1483 amdgpu_ring_clear_ring(ring); 1484 } 1485 1486 r = mes_v12_0_mqd_init(ring); 1487 if (r) 1488 return r; 1489 1490 if (pipe == AMDGPU_MES_SCHED_PIPE) { 1491 if (adev->enable_uni_mes) 1492 r = amdgpu_mes_map_legacy_queue(adev, ring); 1493 else 1494 r = mes_v12_0_kiq_enable_queue(adev); 1495 if (r) 1496 return r; 1497 } else { 1498 mes_v12_0_queue_init_register(ring); 1499 } 1500 1501 if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) || 1502 ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) { 1503 /* get MES scheduler/KIQ versions */ 1504 mutex_lock(&adev->srbm_mutex); 1505 soc21_grbm_select(adev, 3, pipe, 0, 0); 1506 1507 if (pipe == AMDGPU_MES_SCHED_PIPE) 1508 adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); 1509 else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) 1510 adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); 1511 1512 soc21_grbm_select(adev, 0, 0, 0, 0); 1513 mutex_unlock(&adev->srbm_mutex); 1514 } 1515 1516 return 0; 1517 } 1518 1519 static int mes_v12_0_ring_init(struct amdgpu_device *adev, int pipe) 1520 { 1521 struct amdgpu_ring *ring; 1522 1523 ring = &adev->mes.ring[pipe]; 1524 1525 ring->funcs = &mes_v12_0_ring_funcs; 1526 1527 ring->me = 3; 1528 ring->pipe = pipe; 1529 ring->queue = 0; 1530 1531 ring->ring_obj = NULL; 1532 ring->use_doorbell = true; 1533 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[pipe]; 1534 ring->no_scheduler = true; 1535 sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1536 1537 if (pipe == AMDGPU_MES_SCHED_PIPE) 1538 ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1; 1539 else 1540 ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1; 1541 1542 return amdgpu_ring_init(adev, ring, 1024, NULL, 0, 1543 AMDGPU_RING_PRIO_DEFAULT, NULL); 1544 } 1545 1546 static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev) 1547 { 1548 struct amdgpu_ring *ring; 1549 1550 spin_lock_init(&adev->gfx.kiq[0].ring_lock); 1551 1552 ring = &adev->gfx.kiq[0].ring; 1553 1554 ring->me = 3; 1555 ring->pipe = 1; 1556 ring->queue = 0; 1557 1558 ring->adev = NULL; 1559 ring->ring_obj = NULL; 1560 ring->use_doorbell = true; 1561 ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1; 1562 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE]; 1563 ring->no_scheduler = true; 1564 sprintf(ring->name, "mes_kiq_%d.%d.%d", 1565 ring->me, ring->pipe, ring->queue); 1566 1567 return amdgpu_ring_init(adev, ring, 1024, NULL, 0, 1568 AMDGPU_RING_PRIO_DEFAULT, NULL); 1569 } 1570 1571 static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev, 1572 enum amdgpu_mes_pipe pipe) 1573 { 1574 int r, mqd_size = sizeof(struct v12_compute_mqd); 1575 struct amdgpu_ring *ring; 1576 1577 if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) 1578 ring = &adev->gfx.kiq[0].ring; 1579 else 1580 ring = &adev->mes.ring[pipe]; 1581 1582 if (ring->mqd_obj) 1583 return 0; 1584 1585 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 1586 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, 1587 &ring->mqd_gpu_addr, &ring->mqd_ptr); 1588 if (r) { 1589 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); 1590 return r; 1591 } 1592 1593 memset(ring->mqd_ptr, 0, mqd_size); 1594 1595 /* prepare MQD backup */ 1596 adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); 1597 if (!adev->mes.mqd_backup[pipe]) 1598 dev_warn(adev->dev, 1599 "no memory to create MQD backup for ring %s\n", 1600 ring->name); 1601 1602 return 0; 1603 } 1604 1605 static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block) 1606 { 1607 struct amdgpu_device *adev = ip_block->adev; 1608 int pipe, r; 1609 1610 adev->mes.funcs = &mes_v12_0_funcs; 1611 adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init; 1612 adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; 1613 adev->mes.enable_legacy_queue_map = true; 1614 1615 adev->mes.event_log_size = adev->enable_uni_mes ? 1616 (AMDGPU_MAX_MES_PIPES * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE)) : 1617 (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE); 1618 r = amdgpu_mes_init(adev); 1619 if (r) 1620 return r; 1621 1622 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { 1623 r = mes_v12_0_allocate_eop_buf(adev, pipe); 1624 if (r) 1625 return r; 1626 1627 r = mes_v12_0_mqd_sw_init(adev, pipe); 1628 if (r) 1629 return r; 1630 1631 if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) { 1632 r = mes_v12_0_kiq_ring_init(adev); 1633 } 1634 else { 1635 r = mes_v12_0_ring_init(adev, pipe); 1636 if (r) 1637 return r; 1638 r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, 1639 AMDGPU_GEM_DOMAIN_VRAM, 1640 &adev->mes.resource_1[pipe], 1641 &adev->mes.resource_1_gpu_addr[pipe], 1642 &adev->mes.resource_1_addr[pipe]); 1643 if (r) { 1644 dev_err(adev->dev, "(%d) failed to create mes resource_1 bo pipe[%d]\n", r, pipe); 1645 return r; 1646 } 1647 } 1648 } 1649 1650 return 0; 1651 } 1652 1653 static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) 1654 { 1655 struct amdgpu_device *adev = ip_block->adev; 1656 int pipe; 1657 1658 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { 1659 amdgpu_bo_free_kernel(&adev->mes.resource_1[pipe], 1660 &adev->mes.resource_1_gpu_addr[pipe], 1661 &adev->mes.resource_1_addr[pipe]); 1662 1663 kfree(adev->mes.mqd_backup[pipe]); 1664 1665 amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe], 1666 &adev->mes.eop_gpu_addr[pipe], 1667 NULL); 1668 amdgpu_ucode_release(&adev->mes.fw[pipe]); 1669 1670 if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) { 1671 amdgpu_bo_free_kernel(&adev->mes.ring[pipe].mqd_obj, 1672 &adev->mes.ring[pipe].mqd_gpu_addr, 1673 &adev->mes.ring[pipe].mqd_ptr); 1674 amdgpu_ring_fini(&adev->mes.ring[pipe]); 1675 } 1676 } 1677 1678 if (!adev->enable_uni_mes) { 1679 amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj, 1680 &adev->gfx.kiq[0].ring.mqd_gpu_addr, 1681 &adev->gfx.kiq[0].ring.mqd_ptr); 1682 amdgpu_ring_fini(&adev->gfx.kiq[0].ring); 1683 } 1684 1685 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1686 mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE); 1687 mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE); 1688 } 1689 1690 amdgpu_mes_fini(adev); 1691 return 0; 1692 } 1693 1694 static void mes_v12_0_kiq_dequeue_sched(struct amdgpu_device *adev) 1695 { 1696 uint32_t data; 1697 int i; 1698 1699 mutex_lock(&adev->srbm_mutex); 1700 soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0); 1701 1702 /* disable the queue if it's active */ 1703 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 1704 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 1705 for (i = 0; i < adev->usec_timeout; i++) { 1706 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 1707 break; 1708 udelay(1); 1709 } 1710 } 1711 data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 1712 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, 1713 DOORBELL_EN, 0); 1714 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, 1715 DOORBELL_HIT, 1); 1716 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data); 1717 1718 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0); 1719 1720 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0); 1721 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0); 1722 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0); 1723 1724 soc21_grbm_select(adev, 0, 0, 0, 0); 1725 mutex_unlock(&adev->srbm_mutex); 1726 1727 adev->mes.ring[0].sched.ready = false; 1728 } 1729 1730 static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring) 1731 { 1732 uint32_t tmp; 1733 struct amdgpu_device *adev = ring->adev; 1734 1735 /* tell RLC which is KIQ queue */ 1736 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 1737 tmp &= 0xffffff00; 1738 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 1739 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80); 1740 } 1741 1742 static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) 1743 { 1744 int r = 0; 1745 struct amdgpu_ip_block *ip_block; 1746 1747 if (adev->enable_uni_mes) 1748 mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]); 1749 else 1750 mes_v12_0_kiq_setting(&adev->gfx.kiq[0].ring); 1751 1752 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1753 1754 r = mes_v12_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false); 1755 if (r) { 1756 DRM_ERROR("failed to load MES fw, r=%d\n", r); 1757 return r; 1758 } 1759 1760 r = mes_v12_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true); 1761 if (r) { 1762 DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); 1763 return r; 1764 } 1765 1766 mes_v12_0_set_ucode_start_addr(adev); 1767 1768 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1769 mes_v12_0_set_ucode_start_addr(adev); 1770 1771 mes_v12_0_enable(adev, true); 1772 1773 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES); 1774 if (unlikely(!ip_block)) { 1775 dev_err(adev->dev, "Failed to get MES handle\n"); 1776 return -EINVAL; 1777 } 1778 1779 r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); 1780 if (r) 1781 goto failure; 1782 1783 if (adev->enable_uni_mes) { 1784 r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_KIQ_PIPE); 1785 if (r) 1786 goto failure; 1787 1788 mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE); 1789 } 1790 1791 if (adev->mes.enable_legacy_queue_map) { 1792 r = mes_v12_0_hw_init(ip_block); 1793 if (r) 1794 goto failure; 1795 } 1796 1797 return r; 1798 1799 failure: 1800 mes_v12_0_hw_fini(ip_block); 1801 return r; 1802 } 1803 1804 static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev) 1805 { 1806 if (adev->mes.ring[0].sched.ready) { 1807 if (adev->enable_uni_mes) 1808 amdgpu_mes_unmap_legacy_queue(adev, 1809 &adev->mes.ring[AMDGPU_MES_SCHED_PIPE], 1810 RESET_QUEUES, 0, 0); 1811 else 1812 mes_v12_0_kiq_dequeue_sched(adev); 1813 1814 adev->mes.ring[0].sched.ready = false; 1815 } 1816 1817 mes_v12_0_enable(adev, false); 1818 1819 return 0; 1820 } 1821 1822 static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) 1823 { 1824 int r; 1825 struct amdgpu_device *adev = ip_block->adev; 1826 1827 if (adev->mes.ring[0].sched.ready) 1828 goto out; 1829 1830 if (!adev->enable_mes_kiq) { 1831 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1832 r = mes_v12_0_load_microcode(adev, 1833 AMDGPU_MES_SCHED_PIPE, true); 1834 if (r) { 1835 DRM_ERROR("failed to MES fw, r=%d\n", r); 1836 return r; 1837 } 1838 1839 mes_v12_0_set_ucode_start_addr(adev); 1840 1841 } else if (adev->firmware.load_type == 1842 AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1843 1844 mes_v12_0_set_ucode_start_addr(adev); 1845 } 1846 1847 mes_v12_0_enable(adev, true); 1848 } 1849 1850 /* Enable the MES to handle doorbell ring on unmapped queue */ 1851 mes_v12_0_enable_unmapped_doorbell_handling(&adev->mes, true); 1852 1853 r = mes_v12_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE); 1854 if (r) 1855 goto failure; 1856 1857 r = mes_v12_0_set_hw_resources(&adev->mes, AMDGPU_MES_SCHED_PIPE); 1858 if (r) 1859 goto failure; 1860 1861 if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b) 1862 mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE); 1863 1864 mes_v12_0_init_aggregated_doorbell(&adev->mes); 1865 1866 r = mes_v12_0_query_sched_status(&adev->mes, AMDGPU_MES_SCHED_PIPE); 1867 if (r) { 1868 DRM_ERROR("MES is busy\n"); 1869 goto failure; 1870 } 1871 1872 r = amdgpu_mes_update_enforce_isolation(adev); 1873 if (r) 1874 goto failure; 1875 1876 out: 1877 /* 1878 * Disable KIQ ring usage from the driver once MES is enabled. 1879 * MES uses KIQ ring exclusively so driver cannot access KIQ ring 1880 * with MES enabled. 1881 */ 1882 adev->gfx.kiq[0].ring.sched.ready = false; 1883 adev->mes.ring[0].sched.ready = true; 1884 1885 return 0; 1886 1887 failure: 1888 mes_v12_0_hw_fini(ip_block); 1889 return r; 1890 } 1891 1892 static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) 1893 { 1894 return 0; 1895 } 1896 1897 static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block) 1898 { 1899 return mes_v12_0_hw_fini(ip_block); 1900 } 1901 1902 static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block) 1903 { 1904 return mes_v12_0_hw_init(ip_block); 1905 } 1906 1907 static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) 1908 { 1909 struct amdgpu_device *adev = ip_block->adev; 1910 int pipe, r; 1911 1912 adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE; 1913 adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET; 1914 1915 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { 1916 r = amdgpu_mes_init_microcode(adev, pipe); 1917 if (r) 1918 return r; 1919 } 1920 1921 return 0; 1922 } 1923 1924 static const struct amd_ip_funcs mes_v12_0_ip_funcs = { 1925 .name = "mes_v12_0", 1926 .early_init = mes_v12_0_early_init, 1927 .late_init = NULL, 1928 .sw_init = mes_v12_0_sw_init, 1929 .sw_fini = mes_v12_0_sw_fini, 1930 .hw_init = mes_v12_0_hw_init, 1931 .hw_fini = mes_v12_0_hw_fini, 1932 .suspend = mes_v12_0_suspend, 1933 .resume = mes_v12_0_resume, 1934 }; 1935 1936 const struct amdgpu_ip_block_version mes_v12_0_ip_block = { 1937 .type = AMD_IP_BLOCK_TYPE_MES, 1938 .major = 12, 1939 .minor = 0, 1940 .rev = 0, 1941 .funcs = &mes_v12_0_ip_funcs, 1942 }; 1943