1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drm_exec.h> 26 27 #include "amdgpu_mes.h" 28 #include "amdgpu.h" 29 #include "soc15_common.h" 30 #include "amdgpu_mes_ctx.h" 31 32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 33 #define AMDGPU_ONE_DOORBELL_SIZE 8 34 35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) 36 { 37 return roundup(AMDGPU_ONE_DOORBELL_SIZE * 38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 39 PAGE_SIZE); 40 } 41 42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, 43 int ip_type, uint64_t *doorbell_index) 44 { 45 unsigned int offset, found; 46 struct amdgpu_mes *mes = &adev->mes; 47 48 if (ip_type == AMDGPU_RING_TYPE_SDMA) 49 offset = adev->doorbell_index.sdma_engine[0]; 50 else 51 offset = 0; 52 53 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset); 54 if (found >= mes->num_mes_dbs) { 55 DRM_WARN("No doorbell available\n"); 56 return -ENOSPC; 57 } 58 59 set_bit(found, mes->doorbell_bitmap); 60 61 /* Get the absolute doorbell index on BAR */ 62 *doorbell_index = mes->db_start_dw_offset + found * 2; 63 return 0; 64 } 65 66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, 67 uint32_t doorbell_index) 68 { 69 unsigned int old, rel_index; 70 struct amdgpu_mes *mes = &adev->mes; 71 72 /* Find the relative index of the doorbell in this object */ 73 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2; 74 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap); 75 WARN_ON(!old); 76 } 77 78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) 79 { 80 int i; 81 struct amdgpu_mes *mes = &adev->mes; 82 83 /* Bitmap for dynamic allocation of kernel doorbells */ 84 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); 85 if (!mes->doorbell_bitmap) { 86 DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); 87 return -ENOMEM; 88 } 89 90 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE; 91 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) { 92 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2; 93 set_bit(i, mes->doorbell_bitmap); 94 } 95 96 return 0; 97 } 98 99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) 100 { 101 int r; 102 103 if (!amdgpu_mes_log_enable) 104 return 0; 105 106 r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE, 107 AMDGPU_GEM_DOMAIN_GTT, 108 &adev->mes.event_log_gpu_obj, 109 &adev->mes.event_log_gpu_addr, 110 &adev->mes.event_log_cpu_addr); 111 if (r) { 112 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); 113 return r; 114 } 115 116 memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size); 117 118 return 0; 119 120 } 121 122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) 123 { 124 bitmap_free(adev->mes.doorbell_bitmap); 125 } 126 127 int amdgpu_mes_init(struct amdgpu_device *adev) 128 { 129 int i, r; 130 131 adev->mes.adev = adev; 132 133 idr_init(&adev->mes.pasid_idr); 134 idr_init(&adev->mes.gang_id_idr); 135 idr_init(&adev->mes.queue_id_idr); 136 ida_init(&adev->mes.doorbell_ida); 137 spin_lock_init(&adev->mes.queue_id_lock); 138 mutex_init(&adev->mes.mutex_hidden); 139 140 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) 141 spin_lock_init(&adev->mes.ring_lock[i]); 142 143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; 144 adev->mes.vmid_mask_mmhub = 0xffffff00; 145 adev->mes.vmid_mask_gfxhub = 0xffffff00; 146 147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { 148 /* use only 1st MEC pipes */ 149 if (i >= adev->gfx.mec.num_pipe_per_mec) 150 continue; 151 adev->mes.compute_hqd_mask[i] = 0xc; 152 } 153 154 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) 155 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; 156 157 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { 158 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < 159 IP_VERSION(6, 0, 0)) 160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; 161 /* zero sdma_hqd_mask for non-existent engine */ 162 else if (adev->sdma.num_instances == 1) 163 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc; 164 else 165 adev->mes.sdma_hqd_mask[i] = 0xfc; 166 } 167 168 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) { 169 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]); 170 if (r) { 171 dev_err(adev->dev, 172 "(%d) ring trail_fence_offs wb alloc failed\n", 173 r); 174 goto error; 175 } 176 adev->mes.sch_ctx_gpu_addr[i] = 177 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4); 178 adev->mes.sch_ctx_ptr[i] = 179 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]]; 180 181 r = amdgpu_device_wb_get(adev, 182 &adev->mes.query_status_fence_offs[i]); 183 if (r) { 184 dev_err(adev->dev, 185 "(%d) query_status_fence_offs wb alloc failed\n", 186 r); 187 goto error; 188 } 189 adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr + 190 (adev->mes.query_status_fence_offs[i] * 4); 191 adev->mes.query_status_fence_ptr[i] = 192 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]]; 193 } 194 195 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); 196 if (r) { 197 dev_err(adev->dev, 198 "(%d) read_val_offs alloc failed\n", r); 199 goto error; 200 } 201 adev->mes.read_val_gpu_addr = 202 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); 203 adev->mes.read_val_ptr = 204 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; 205 206 r = amdgpu_mes_doorbell_init(adev); 207 if (r) 208 goto error; 209 210 r = amdgpu_mes_event_log_init(adev); 211 if (r) 212 goto error_doorbell; 213 214 return 0; 215 216 error_doorbell: 217 amdgpu_mes_doorbell_free(adev); 218 error: 219 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) { 220 if (adev->mes.sch_ctx_ptr[i]) 221 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]); 222 if (adev->mes.query_status_fence_ptr[i]) 223 amdgpu_device_wb_free(adev, 224 adev->mes.query_status_fence_offs[i]); 225 } 226 if (adev->mes.read_val_ptr) 227 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 228 229 idr_destroy(&adev->mes.pasid_idr); 230 idr_destroy(&adev->mes.gang_id_idr); 231 idr_destroy(&adev->mes.queue_id_idr); 232 ida_destroy(&adev->mes.doorbell_ida); 233 mutex_destroy(&adev->mes.mutex_hidden); 234 return r; 235 } 236 237 void amdgpu_mes_fini(struct amdgpu_device *adev) 238 { 239 int i; 240 241 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, 242 &adev->mes.event_log_gpu_addr, 243 &adev->mes.event_log_cpu_addr); 244 245 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) { 246 if (adev->mes.sch_ctx_ptr[i]) 247 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]); 248 if (adev->mes.query_status_fence_ptr[i]) 249 amdgpu_device_wb_free(adev, 250 adev->mes.query_status_fence_offs[i]); 251 } 252 if (adev->mes.read_val_ptr) 253 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 254 255 amdgpu_mes_doorbell_free(adev); 256 257 idr_destroy(&adev->mes.pasid_idr); 258 idr_destroy(&adev->mes.gang_id_idr); 259 idr_destroy(&adev->mes.queue_id_idr); 260 ida_destroy(&adev->mes.doorbell_ida); 261 mutex_destroy(&adev->mes.mutex_hidden); 262 } 263 264 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q) 265 { 266 amdgpu_bo_free_kernel(&q->mqd_obj, 267 &q->mqd_gpu_addr, 268 &q->mqd_cpu_ptr); 269 } 270 271 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, 272 struct amdgpu_vm *vm) 273 { 274 struct amdgpu_mes_process *process; 275 int r; 276 277 /* allocate the mes process buffer */ 278 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); 279 if (!process) { 280 DRM_ERROR("no more memory to create mes process\n"); 281 return -ENOMEM; 282 } 283 284 /* allocate the process context bo and map it */ 285 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, 286 AMDGPU_GEM_DOMAIN_GTT, 287 &process->proc_ctx_bo, 288 &process->proc_ctx_gpu_addr, 289 &process->proc_ctx_cpu_ptr); 290 if (r) { 291 DRM_ERROR("failed to allocate process context bo\n"); 292 goto clean_up_memory; 293 } 294 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 295 296 /* 297 * Avoid taking any other locks under MES lock to avoid circular 298 * lock dependencies. 299 */ 300 amdgpu_mes_lock(&adev->mes); 301 302 /* add the mes process to idr list */ 303 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, 304 GFP_KERNEL); 305 if (r < 0) { 306 DRM_ERROR("failed to lock pasid=%d\n", pasid); 307 goto clean_up_ctx; 308 } 309 310 INIT_LIST_HEAD(&process->gang_list); 311 process->vm = vm; 312 process->pasid = pasid; 313 process->process_quantum = adev->mes.default_process_quantum; 314 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); 315 316 amdgpu_mes_unlock(&adev->mes); 317 return 0; 318 319 clean_up_ctx: 320 amdgpu_mes_unlock(&adev->mes); 321 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 322 &process->proc_ctx_gpu_addr, 323 &process->proc_ctx_cpu_ptr); 324 clean_up_memory: 325 kfree(process); 326 return r; 327 } 328 329 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) 330 { 331 struct amdgpu_mes_process *process; 332 struct amdgpu_mes_gang *gang, *tmp1; 333 struct amdgpu_mes_queue *queue, *tmp2; 334 struct mes_remove_queue_input queue_input; 335 unsigned long flags; 336 int r; 337 338 /* 339 * Avoid taking any other locks under MES lock to avoid circular 340 * lock dependencies. 341 */ 342 amdgpu_mes_lock(&adev->mes); 343 344 process = idr_find(&adev->mes.pasid_idr, pasid); 345 if (!process) { 346 DRM_WARN("pasid %d doesn't exist\n", pasid); 347 amdgpu_mes_unlock(&adev->mes); 348 return; 349 } 350 351 /* Remove all queues from hardware */ 352 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 353 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 354 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 355 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 356 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 357 358 queue_input.doorbell_offset = queue->doorbell_off; 359 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 360 361 r = adev->mes.funcs->remove_hw_queue(&adev->mes, 362 &queue_input); 363 if (r) 364 DRM_WARN("failed to remove hardware queue\n"); 365 } 366 367 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 368 } 369 370 idr_remove(&adev->mes.pasid_idr, pasid); 371 amdgpu_mes_unlock(&adev->mes); 372 373 /* free all memory allocated by the process */ 374 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 375 /* free all queues in the gang */ 376 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 377 amdgpu_mes_queue_free_mqd(queue); 378 list_del(&queue->list); 379 kfree(queue); 380 } 381 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 382 &gang->gang_ctx_gpu_addr, 383 &gang->gang_ctx_cpu_ptr); 384 list_del(&gang->list); 385 kfree(gang); 386 387 } 388 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 389 &process->proc_ctx_gpu_addr, 390 &process->proc_ctx_cpu_ptr); 391 kfree(process); 392 } 393 394 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, 395 struct amdgpu_mes_gang_properties *gprops, 396 int *gang_id) 397 { 398 struct amdgpu_mes_process *process; 399 struct amdgpu_mes_gang *gang; 400 int r; 401 402 /* allocate the mes gang buffer */ 403 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL); 404 if (!gang) { 405 return -ENOMEM; 406 } 407 408 /* allocate the gang context bo and map it to cpu space */ 409 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE, 410 AMDGPU_GEM_DOMAIN_GTT, 411 &gang->gang_ctx_bo, 412 &gang->gang_ctx_gpu_addr, 413 &gang->gang_ctx_cpu_ptr); 414 if (r) { 415 DRM_ERROR("failed to allocate process context bo\n"); 416 goto clean_up_mem; 417 } 418 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); 419 420 /* 421 * Avoid taking any other locks under MES lock to avoid circular 422 * lock dependencies. 423 */ 424 amdgpu_mes_lock(&adev->mes); 425 426 process = idr_find(&adev->mes.pasid_idr, pasid); 427 if (!process) { 428 DRM_ERROR("pasid %d doesn't exist\n", pasid); 429 r = -EINVAL; 430 goto clean_up_ctx; 431 } 432 433 /* add the mes gang to idr list */ 434 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0, 435 GFP_KERNEL); 436 if (r < 0) { 437 DRM_ERROR("failed to allocate idr for gang\n"); 438 goto clean_up_ctx; 439 } 440 441 gang->gang_id = r; 442 *gang_id = r; 443 444 INIT_LIST_HEAD(&gang->queue_list); 445 gang->process = process; 446 gang->priority = gprops->priority; 447 gang->gang_quantum = gprops->gang_quantum ? 448 gprops->gang_quantum : adev->mes.default_gang_quantum; 449 gang->global_priority_level = gprops->global_priority_level; 450 gang->inprocess_gang_priority = gprops->inprocess_gang_priority; 451 list_add_tail(&gang->list, &process->gang_list); 452 453 amdgpu_mes_unlock(&adev->mes); 454 return 0; 455 456 clean_up_ctx: 457 amdgpu_mes_unlock(&adev->mes); 458 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 459 &gang->gang_ctx_gpu_addr, 460 &gang->gang_ctx_cpu_ptr); 461 clean_up_mem: 462 kfree(gang); 463 return r; 464 } 465 466 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) 467 { 468 struct amdgpu_mes_gang *gang; 469 470 /* 471 * Avoid taking any other locks under MES lock to avoid circular 472 * lock dependencies. 473 */ 474 amdgpu_mes_lock(&adev->mes); 475 476 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 477 if (!gang) { 478 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 479 amdgpu_mes_unlock(&adev->mes); 480 return -EINVAL; 481 } 482 483 if (!list_empty(&gang->queue_list)) { 484 DRM_ERROR("queue list is not empty\n"); 485 amdgpu_mes_unlock(&adev->mes); 486 return -EBUSY; 487 } 488 489 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 490 list_del(&gang->list); 491 amdgpu_mes_unlock(&adev->mes); 492 493 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 494 &gang->gang_ctx_gpu_addr, 495 &gang->gang_ctx_cpu_ptr); 496 497 kfree(gang); 498 499 return 0; 500 } 501 502 int amdgpu_mes_suspend(struct amdgpu_device *adev) 503 { 504 struct mes_suspend_gang_input input; 505 int r; 506 507 if (!amdgpu_mes_suspend_resume_all_supported(adev)) 508 return 0; 509 510 memset(&input, 0x0, sizeof(struct mes_suspend_gang_input)); 511 input.suspend_all_gangs = 1; 512 513 /* 514 * Avoid taking any other locks under MES lock to avoid circular 515 * lock dependencies. 516 */ 517 amdgpu_mes_lock(&adev->mes); 518 r = adev->mes.funcs->suspend_gang(&adev->mes, &input); 519 amdgpu_mes_unlock(&adev->mes); 520 if (r) 521 DRM_ERROR("failed to suspend all gangs"); 522 523 return r; 524 } 525 526 int amdgpu_mes_resume(struct amdgpu_device *adev) 527 { 528 struct mes_resume_gang_input input; 529 int r; 530 531 if (!amdgpu_mes_suspend_resume_all_supported(adev)) 532 return 0; 533 534 memset(&input, 0x0, sizeof(struct mes_resume_gang_input)); 535 input.resume_all_gangs = 1; 536 537 /* 538 * Avoid taking any other locks under MES lock to avoid circular 539 * lock dependencies. 540 */ 541 amdgpu_mes_lock(&adev->mes); 542 r = adev->mes.funcs->resume_gang(&adev->mes, &input); 543 amdgpu_mes_unlock(&adev->mes); 544 if (r) 545 DRM_ERROR("failed to resume all gangs"); 546 547 return r; 548 } 549 550 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, 551 struct amdgpu_mes_queue *q, 552 struct amdgpu_mes_queue_properties *p) 553 { 554 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 555 u32 mqd_size = mqd_mgr->mqd_size; 556 int r; 557 558 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 559 AMDGPU_GEM_DOMAIN_GTT, 560 &q->mqd_obj, 561 &q->mqd_gpu_addr, &q->mqd_cpu_ptr); 562 if (r) { 563 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r); 564 return r; 565 } 566 memset(q->mqd_cpu_ptr, 0, mqd_size); 567 568 r = amdgpu_bo_reserve(q->mqd_obj, false); 569 if (unlikely(r != 0)) 570 goto clean_up; 571 572 return 0; 573 574 clean_up: 575 amdgpu_bo_free_kernel(&q->mqd_obj, 576 &q->mqd_gpu_addr, 577 &q->mqd_cpu_ptr); 578 return r; 579 } 580 581 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, 582 struct amdgpu_mes_queue *q, 583 struct amdgpu_mes_queue_properties *p) 584 { 585 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 586 struct amdgpu_mqd_prop mqd_prop = {0}; 587 588 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr; 589 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr; 590 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr; 591 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr; 592 mqd_prop.queue_size = p->queue_size; 593 mqd_prop.use_doorbell = true; 594 mqd_prop.doorbell_index = p->doorbell_off; 595 mqd_prop.eop_gpu_addr = p->eop_gpu_addr; 596 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority; 597 mqd_prop.hqd_queue_priority = p->hqd_queue_priority; 598 mqd_prop.hqd_active = false; 599 600 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 601 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 602 mutex_lock(&adev->srbm_mutex); 603 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0); 604 } 605 606 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); 607 608 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 609 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 610 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); 611 mutex_unlock(&adev->srbm_mutex); 612 } 613 614 amdgpu_bo_unreserve(q->mqd_obj); 615 } 616 617 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, 618 struct amdgpu_mes_queue_properties *qprops, 619 int *queue_id) 620 { 621 struct amdgpu_mes_queue *queue; 622 struct amdgpu_mes_gang *gang; 623 struct mes_add_queue_input queue_input; 624 unsigned long flags; 625 int r; 626 627 memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); 628 629 /* allocate the mes queue buffer */ 630 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); 631 if (!queue) { 632 DRM_ERROR("Failed to allocate memory for queue\n"); 633 return -ENOMEM; 634 } 635 636 /* Allocate the queue mqd */ 637 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops); 638 if (r) 639 goto clean_up_memory; 640 641 /* 642 * Avoid taking any other locks under MES lock to avoid circular 643 * lock dependencies. 644 */ 645 amdgpu_mes_lock(&adev->mes); 646 647 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 648 if (!gang) { 649 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 650 r = -EINVAL; 651 goto clean_up_mqd; 652 } 653 654 /* add the mes gang to idr list */ 655 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 656 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0, 657 GFP_ATOMIC); 658 if (r < 0) { 659 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 660 goto clean_up_mqd; 661 } 662 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 663 *queue_id = queue->queue_id = r; 664 665 /* allocate a doorbell index for the queue */ 666 r = amdgpu_mes_kernel_doorbell_get(adev, 667 qprops->queue_type, 668 &qprops->doorbell_off); 669 if (r) 670 goto clean_up_queue_id; 671 672 /* initialize the queue mqd */ 673 amdgpu_mes_queue_init_mqd(adev, queue, qprops); 674 675 /* add hw queue to mes */ 676 queue_input.process_id = gang->process->pasid; 677 678 queue_input.page_table_base_addr = 679 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr - 680 adev->gmc.vram_start; 681 682 queue_input.process_va_start = 0; 683 queue_input.process_va_end = 684 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; 685 queue_input.process_quantum = gang->process->process_quantum; 686 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr; 687 queue_input.gang_quantum = gang->gang_quantum; 688 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 689 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority; 690 queue_input.gang_global_priority_level = gang->global_priority_level; 691 queue_input.doorbell_offset = qprops->doorbell_off; 692 queue_input.mqd_addr = queue->mqd_gpu_addr; 693 queue_input.wptr_addr = qprops->wptr_gpu_addr; 694 queue_input.wptr_mc_addr = qprops->wptr_mc_addr; 695 queue_input.queue_type = qprops->queue_type; 696 queue_input.paging = qprops->paging; 697 queue_input.is_kfd_process = 0; 698 699 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 700 if (r) { 701 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n", 702 qprops->doorbell_off); 703 goto clean_up_doorbell; 704 } 705 706 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, " 707 "queue type=%d, doorbell=0x%llx\n", 708 gang->process->pasid, gang_id, qprops->queue_type, 709 qprops->doorbell_off); 710 711 queue->ring = qprops->ring; 712 queue->doorbell_off = qprops->doorbell_off; 713 queue->wptr_gpu_addr = qprops->wptr_gpu_addr; 714 queue->queue_type = qprops->queue_type; 715 queue->paging = qprops->paging; 716 queue->gang = gang; 717 queue->ring->mqd_ptr = queue->mqd_cpu_ptr; 718 list_add_tail(&queue->list, &gang->queue_list); 719 720 amdgpu_mes_unlock(&adev->mes); 721 return 0; 722 723 clean_up_doorbell: 724 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off); 725 clean_up_queue_id: 726 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 727 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 728 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 729 clean_up_mqd: 730 amdgpu_mes_unlock(&adev->mes); 731 amdgpu_mes_queue_free_mqd(queue); 732 clean_up_memory: 733 kfree(queue); 734 return r; 735 } 736 737 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) 738 { 739 unsigned long flags; 740 struct amdgpu_mes_queue *queue; 741 struct amdgpu_mes_gang *gang; 742 struct mes_remove_queue_input queue_input; 743 int r; 744 745 /* 746 * Avoid taking any other locks under MES lock to avoid circular 747 * lock dependencies. 748 */ 749 amdgpu_mes_lock(&adev->mes); 750 751 /* remove the mes gang from idr list */ 752 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 753 754 queue = idr_find(&adev->mes.queue_id_idr, queue_id); 755 if (!queue) { 756 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 757 amdgpu_mes_unlock(&adev->mes); 758 DRM_ERROR("queue id %d doesn't exist\n", queue_id); 759 return -EINVAL; 760 } 761 762 idr_remove(&adev->mes.queue_id_idr, queue_id); 763 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 764 765 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n", 766 queue->doorbell_off); 767 768 gang = queue->gang; 769 queue_input.doorbell_offset = queue->doorbell_off; 770 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 771 772 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 773 if (r) 774 DRM_ERROR("failed to remove hardware queue, queue id = %d\n", 775 queue_id); 776 777 list_del(&queue->list); 778 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off); 779 amdgpu_mes_unlock(&adev->mes); 780 781 amdgpu_mes_queue_free_mqd(queue); 782 kfree(queue); 783 return 0; 784 } 785 786 int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id) 787 { 788 unsigned long flags; 789 struct amdgpu_mes_queue *queue; 790 struct amdgpu_mes_gang *gang; 791 struct mes_reset_queue_input queue_input; 792 int r; 793 794 /* 795 * Avoid taking any other locks under MES lock to avoid circular 796 * lock dependencies. 797 */ 798 amdgpu_mes_lock(&adev->mes); 799 800 /* remove the mes gang from idr list */ 801 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 802 803 queue = idr_find(&adev->mes.queue_id_idr, queue_id); 804 if (!queue) { 805 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 806 amdgpu_mes_unlock(&adev->mes); 807 DRM_ERROR("queue id %d doesn't exist\n", queue_id); 808 return -EINVAL; 809 } 810 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 811 812 DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n", 813 queue->doorbell_off); 814 815 gang = queue->gang; 816 queue_input.doorbell_offset = queue->doorbell_off; 817 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 818 819 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); 820 if (r) 821 DRM_ERROR("failed to reset hardware queue, queue id = %d\n", 822 queue_id); 823 824 amdgpu_mes_unlock(&adev->mes); 825 826 return 0; 827 } 828 829 int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type, 830 int me_id, int pipe_id, int queue_id, int vmid) 831 { 832 struct mes_reset_queue_input queue_input; 833 int r; 834 835 queue_input.use_mmio = true; 836 queue_input.me_id = me_id; 837 queue_input.pipe_id = pipe_id; 838 queue_input.queue_id = queue_id; 839 queue_input.vmid = vmid; 840 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); 841 if (r) 842 DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n", 843 queue_id); 844 return r; 845 } 846 847 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, 848 struct amdgpu_ring *ring) 849 { 850 struct mes_map_legacy_queue_input queue_input; 851 int r; 852 853 memset(&queue_input, 0, sizeof(queue_input)); 854 855 queue_input.queue_type = ring->funcs->type; 856 queue_input.doorbell_offset = ring->doorbell_index; 857 queue_input.pipe_id = ring->pipe; 858 queue_input.queue_id = ring->queue; 859 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 860 queue_input.wptr_addr = ring->wptr_gpu_addr; 861 862 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); 863 if (r) 864 DRM_ERROR("failed to map legacy queue\n"); 865 866 return r; 867 } 868 869 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, 870 struct amdgpu_ring *ring, 871 enum amdgpu_unmap_queues_action action, 872 u64 gpu_addr, u64 seq) 873 { 874 struct mes_unmap_legacy_queue_input queue_input; 875 int r; 876 877 queue_input.action = action; 878 queue_input.queue_type = ring->funcs->type; 879 queue_input.doorbell_offset = ring->doorbell_index; 880 queue_input.pipe_id = ring->pipe; 881 queue_input.queue_id = ring->queue; 882 queue_input.trail_fence_addr = gpu_addr; 883 queue_input.trail_fence_data = seq; 884 885 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 886 if (r) 887 DRM_ERROR("failed to unmap legacy queue\n"); 888 889 return r; 890 } 891 892 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, 893 struct amdgpu_ring *ring, 894 unsigned int vmid, 895 bool use_mmio) 896 { 897 struct mes_reset_legacy_queue_input queue_input; 898 int r; 899 900 memset(&queue_input, 0, sizeof(queue_input)); 901 902 queue_input.queue_type = ring->funcs->type; 903 queue_input.doorbell_offset = ring->doorbell_index; 904 queue_input.me_id = ring->me; 905 queue_input.pipe_id = ring->pipe; 906 queue_input.queue_id = ring->queue; 907 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 908 queue_input.wptr_addr = ring->wptr_gpu_addr; 909 queue_input.vmid = vmid; 910 queue_input.use_mmio = use_mmio; 911 912 r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input); 913 if (r) 914 DRM_ERROR("failed to reset legacy queue\n"); 915 916 return r; 917 } 918 919 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) 920 { 921 struct mes_misc_op_input op_input; 922 int r, val = 0; 923 924 op_input.op = MES_MISC_OP_READ_REG; 925 op_input.read_reg.reg_offset = reg; 926 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; 927 928 if (!adev->mes.funcs->misc_op) { 929 DRM_ERROR("mes rreg is not supported!\n"); 930 goto error; 931 } 932 933 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 934 if (r) 935 DRM_ERROR("failed to read reg (0x%x)\n", reg); 936 else 937 val = *(adev->mes.read_val_ptr); 938 939 error: 940 return val; 941 } 942 943 int amdgpu_mes_wreg(struct amdgpu_device *adev, 944 uint32_t reg, uint32_t val) 945 { 946 struct mes_misc_op_input op_input; 947 int r; 948 949 op_input.op = MES_MISC_OP_WRITE_REG; 950 op_input.write_reg.reg_offset = reg; 951 op_input.write_reg.reg_value = val; 952 953 if (!adev->mes.funcs->misc_op) { 954 DRM_ERROR("mes wreg is not supported!\n"); 955 r = -EINVAL; 956 goto error; 957 } 958 959 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 960 if (r) 961 DRM_ERROR("failed to write reg (0x%x)\n", reg); 962 963 error: 964 return r; 965 } 966 967 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, 968 uint32_t reg0, uint32_t reg1, 969 uint32_t ref, uint32_t mask) 970 { 971 struct mes_misc_op_input op_input; 972 int r; 973 974 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT; 975 op_input.wrm_reg.reg0 = reg0; 976 op_input.wrm_reg.reg1 = reg1; 977 op_input.wrm_reg.ref = ref; 978 op_input.wrm_reg.mask = mask; 979 980 if (!adev->mes.funcs->misc_op) { 981 DRM_ERROR("mes reg_write_reg_wait is not supported!\n"); 982 r = -EINVAL; 983 goto error; 984 } 985 986 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 987 if (r) 988 DRM_ERROR("failed to reg_write_reg_wait\n"); 989 990 error: 991 return r; 992 } 993 994 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, 995 uint32_t val, uint32_t mask) 996 { 997 struct mes_misc_op_input op_input; 998 int r; 999 1000 op_input.op = MES_MISC_OP_WRM_REG_WAIT; 1001 op_input.wrm_reg.reg0 = reg; 1002 op_input.wrm_reg.ref = val; 1003 op_input.wrm_reg.mask = mask; 1004 1005 if (!adev->mes.funcs->misc_op) { 1006 DRM_ERROR("mes reg wait is not supported!\n"); 1007 r = -EINVAL; 1008 goto error; 1009 } 1010 1011 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 1012 if (r) 1013 DRM_ERROR("failed to reg_write_reg_wait\n"); 1014 1015 error: 1016 return r; 1017 } 1018 1019 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, 1020 uint64_t process_context_addr, 1021 uint32_t spi_gdbg_per_vmid_cntl, 1022 const uint32_t *tcp_watch_cntl, 1023 uint32_t flags, 1024 bool trap_en) 1025 { 1026 struct mes_misc_op_input op_input = {0}; 1027 int r; 1028 1029 if (!adev->mes.funcs->misc_op) { 1030 DRM_ERROR("mes set shader debugger is not supported!\n"); 1031 return -EINVAL; 1032 } 1033 1034 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 1035 op_input.set_shader_debugger.process_context_addr = process_context_addr; 1036 op_input.set_shader_debugger.flags.u32all = flags; 1037 1038 /* use amdgpu mes_flush_shader_debugger instead */ 1039 if (op_input.set_shader_debugger.flags.process_ctx_flush) 1040 return -EINVAL; 1041 1042 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; 1043 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, 1044 sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); 1045 1046 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> 1047 AMDGPU_MES_API_VERSION_SHIFT) >= 14) 1048 op_input.set_shader_debugger.trap_en = trap_en; 1049 1050 amdgpu_mes_lock(&adev->mes); 1051 1052 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 1053 if (r) 1054 DRM_ERROR("failed to set_shader_debugger\n"); 1055 1056 amdgpu_mes_unlock(&adev->mes); 1057 1058 return r; 1059 } 1060 1061 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, 1062 uint64_t process_context_addr) 1063 { 1064 struct mes_misc_op_input op_input = {0}; 1065 int r; 1066 1067 if (!adev->mes.funcs->misc_op) { 1068 DRM_ERROR("mes flush shader debugger is not supported!\n"); 1069 return -EINVAL; 1070 } 1071 1072 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 1073 op_input.set_shader_debugger.process_context_addr = process_context_addr; 1074 op_input.set_shader_debugger.flags.process_ctx_flush = true; 1075 1076 amdgpu_mes_lock(&adev->mes); 1077 1078 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 1079 if (r) 1080 DRM_ERROR("failed to set_shader_debugger\n"); 1081 1082 amdgpu_mes_unlock(&adev->mes); 1083 1084 return r; 1085 } 1086 1087 static void 1088 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, 1089 struct amdgpu_ring *ring, 1090 struct amdgpu_mes_queue_properties *props) 1091 { 1092 props->queue_type = ring->funcs->type; 1093 props->hqd_base_gpu_addr = ring->gpu_addr; 1094 props->rptr_gpu_addr = ring->rptr_gpu_addr; 1095 props->wptr_gpu_addr = ring->wptr_gpu_addr; 1096 props->wptr_mc_addr = 1097 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs; 1098 props->queue_size = ring->ring_size; 1099 props->eop_gpu_addr = ring->eop_gpu_addr; 1100 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 1101 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 1102 props->paging = false; 1103 props->ring = ring; 1104 } 1105 1106 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ 1107 do { \ 1108 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ 1109 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1110 _eng[ring->idx].slots[id_offs]); \ 1111 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ 1112 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1113 _eng[ring->idx].ring); \ 1114 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ 1115 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1116 _eng[ring->idx].ib); \ 1117 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ 1118 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1119 _eng[ring->idx].padding); \ 1120 } while(0) 1121 1122 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) 1123 { 1124 switch (ring->funcs->type) { 1125 case AMDGPU_RING_TYPE_GFX: 1126 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); 1127 break; 1128 case AMDGPU_RING_TYPE_COMPUTE: 1129 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); 1130 break; 1131 case AMDGPU_RING_TYPE_SDMA: 1132 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); 1133 break; 1134 default: 1135 break; 1136 } 1137 1138 WARN_ON(1); 1139 return -EINVAL; 1140 } 1141 1142 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, 1143 int queue_type, int idx, 1144 struct amdgpu_mes_ctx_data *ctx_data, 1145 struct amdgpu_ring **out) 1146 { 1147 struct amdgpu_ring *ring; 1148 struct amdgpu_mes_gang *gang; 1149 struct amdgpu_mes_queue_properties qprops = {0}; 1150 int r, queue_id, pasid; 1151 1152 /* 1153 * Avoid taking any other locks under MES lock to avoid circular 1154 * lock dependencies. 1155 */ 1156 amdgpu_mes_lock(&adev->mes); 1157 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 1158 if (!gang) { 1159 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 1160 amdgpu_mes_unlock(&adev->mes); 1161 return -EINVAL; 1162 } 1163 pasid = gang->process->pasid; 1164 1165 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL); 1166 if (!ring) { 1167 amdgpu_mes_unlock(&adev->mes); 1168 return -ENOMEM; 1169 } 1170 1171 ring->ring_obj = NULL; 1172 ring->use_doorbell = true; 1173 ring->is_mes_queue = true; 1174 ring->mes_ctx = ctx_data; 1175 ring->idx = idx; 1176 ring->no_scheduler = true; 1177 1178 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 1179 int offset = offsetof(struct amdgpu_mes_ctx_meta_data, 1180 compute[ring->idx].mec_hpd); 1181 ring->eop_gpu_addr = 1182 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 1183 } 1184 1185 switch (queue_type) { 1186 case AMDGPU_RING_TYPE_GFX: 1187 ring->funcs = adev->gfx.gfx_ring[0].funcs; 1188 ring->me = adev->gfx.gfx_ring[0].me; 1189 ring->pipe = adev->gfx.gfx_ring[0].pipe; 1190 break; 1191 case AMDGPU_RING_TYPE_COMPUTE: 1192 ring->funcs = adev->gfx.compute_ring[0].funcs; 1193 ring->me = adev->gfx.compute_ring[0].me; 1194 ring->pipe = adev->gfx.compute_ring[0].pipe; 1195 break; 1196 case AMDGPU_RING_TYPE_SDMA: 1197 ring->funcs = adev->sdma.instance[0].ring.funcs; 1198 break; 1199 default: 1200 BUG(); 1201 } 1202 1203 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, 1204 AMDGPU_RING_PRIO_DEFAULT, NULL); 1205 if (r) 1206 goto clean_up_memory; 1207 1208 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); 1209 1210 dma_fence_wait(gang->process->vm->last_update, false); 1211 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false); 1212 amdgpu_mes_unlock(&adev->mes); 1213 1214 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id); 1215 if (r) 1216 goto clean_up_ring; 1217 1218 ring->hw_queue_id = queue_id; 1219 ring->doorbell_index = qprops.doorbell_off; 1220 1221 if (queue_type == AMDGPU_RING_TYPE_GFX) 1222 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id); 1223 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) 1224 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id, 1225 queue_id); 1226 else if (queue_type == AMDGPU_RING_TYPE_SDMA) 1227 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id, 1228 queue_id); 1229 else 1230 BUG(); 1231 1232 *out = ring; 1233 return 0; 1234 1235 clean_up_ring: 1236 amdgpu_ring_fini(ring); 1237 clean_up_memory: 1238 kfree(ring); 1239 amdgpu_mes_unlock(&adev->mes); 1240 return r; 1241 } 1242 1243 void amdgpu_mes_remove_ring(struct amdgpu_device *adev, 1244 struct amdgpu_ring *ring) 1245 { 1246 if (!ring) 1247 return; 1248 1249 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 1250 del_timer_sync(&ring->fence_drv.fallback_timer); 1251 amdgpu_ring_fini(ring); 1252 kfree(ring); 1253 } 1254 1255 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, 1256 enum amdgpu_mes_priority_level prio) 1257 { 1258 return adev->mes.aggregated_doorbells[prio]; 1259 } 1260 1261 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, 1262 struct amdgpu_mes_ctx_data *ctx_data) 1263 { 1264 int r; 1265 1266 r = amdgpu_bo_create_kernel(adev, 1267 sizeof(struct amdgpu_mes_ctx_meta_data), 1268 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1269 &ctx_data->meta_data_obj, 1270 &ctx_data->meta_data_mc_addr, 1271 &ctx_data->meta_data_ptr); 1272 if (r) { 1273 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r); 1274 return r; 1275 } 1276 1277 if (!ctx_data->meta_data_obj) 1278 return -ENOMEM; 1279 1280 memset(ctx_data->meta_data_ptr, 0, 1281 sizeof(struct amdgpu_mes_ctx_meta_data)); 1282 1283 return 0; 1284 } 1285 1286 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data) 1287 { 1288 if (ctx_data->meta_data_obj) 1289 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, 1290 &ctx_data->meta_data_mc_addr, 1291 &ctx_data->meta_data_ptr); 1292 } 1293 1294 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, 1295 struct amdgpu_vm *vm, 1296 struct amdgpu_mes_ctx_data *ctx_data) 1297 { 1298 struct amdgpu_bo_va *bo_va; 1299 struct amdgpu_sync sync; 1300 struct drm_exec exec; 1301 int r; 1302 1303 amdgpu_sync_create(&sync); 1304 1305 drm_exec_init(&exec, 0, 0); 1306 drm_exec_until_all_locked(&exec) { 1307 r = drm_exec_lock_obj(&exec, 1308 &ctx_data->meta_data_obj->tbo.base); 1309 drm_exec_retry_on_contention(&exec); 1310 if (unlikely(r)) 1311 goto error_fini_exec; 1312 1313 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1314 drm_exec_retry_on_contention(&exec); 1315 if (unlikely(r)) 1316 goto error_fini_exec; 1317 } 1318 1319 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); 1320 if (!bo_va) { 1321 DRM_ERROR("failed to create bo_va for meta data BO\n"); 1322 r = -ENOMEM; 1323 goto error_fini_exec; 1324 } 1325 1326 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, 1327 sizeof(struct amdgpu_mes_ctx_meta_data), 1328 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 1329 AMDGPU_PTE_EXECUTABLE); 1330 1331 if (r) { 1332 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); 1333 goto error_del_bo_va; 1334 } 1335 1336 r = amdgpu_vm_bo_update(adev, bo_va, false); 1337 if (r) { 1338 DRM_ERROR("failed to do vm_bo_update on meta data\n"); 1339 goto error_del_bo_va; 1340 } 1341 amdgpu_sync_fence(&sync, bo_va->last_pt_update); 1342 1343 r = amdgpu_vm_update_pdes(adev, vm, false); 1344 if (r) { 1345 DRM_ERROR("failed to update pdes on meta data\n"); 1346 goto error_del_bo_va; 1347 } 1348 amdgpu_sync_fence(&sync, vm->last_update); 1349 1350 amdgpu_sync_wait(&sync, false); 1351 drm_exec_fini(&exec); 1352 1353 amdgpu_sync_free(&sync); 1354 ctx_data->meta_data_va = bo_va; 1355 return 0; 1356 1357 error_del_bo_va: 1358 amdgpu_vm_bo_del(adev, bo_va); 1359 1360 error_fini_exec: 1361 drm_exec_fini(&exec); 1362 amdgpu_sync_free(&sync); 1363 return r; 1364 } 1365 1366 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, 1367 struct amdgpu_mes_ctx_data *ctx_data) 1368 { 1369 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va; 1370 struct amdgpu_bo *bo = ctx_data->meta_data_obj; 1371 struct amdgpu_vm *vm = bo_va->base.vm; 1372 struct dma_fence *fence; 1373 struct drm_exec exec; 1374 long r; 1375 1376 drm_exec_init(&exec, 0, 0); 1377 drm_exec_until_all_locked(&exec) { 1378 r = drm_exec_lock_obj(&exec, 1379 &ctx_data->meta_data_obj->tbo.base); 1380 drm_exec_retry_on_contention(&exec); 1381 if (unlikely(r)) 1382 goto out_unlock; 1383 1384 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1385 drm_exec_retry_on_contention(&exec); 1386 if (unlikely(r)) 1387 goto out_unlock; 1388 } 1389 1390 amdgpu_vm_bo_del(adev, bo_va); 1391 if (!amdgpu_vm_ready(vm)) 1392 goto out_unlock; 1393 1394 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, 1395 &fence); 1396 if (r) 1397 goto out_unlock; 1398 if (fence) { 1399 amdgpu_bo_fence(bo, fence, true); 1400 fence = NULL; 1401 } 1402 1403 r = amdgpu_vm_clear_freed(adev, vm, &fence); 1404 if (r || !fence) 1405 goto out_unlock; 1406 1407 dma_fence_wait(fence, false); 1408 amdgpu_bo_fence(bo, fence, true); 1409 dma_fence_put(fence); 1410 1411 out_unlock: 1412 if (unlikely(r < 0)) 1413 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r); 1414 drm_exec_fini(&exec); 1415 1416 return r; 1417 } 1418 1419 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev, 1420 int pasid, int *gang_id, 1421 int queue_type, int num_queue, 1422 struct amdgpu_ring **added_rings, 1423 struct amdgpu_mes_ctx_data *ctx_data) 1424 { 1425 struct amdgpu_ring *ring; 1426 struct amdgpu_mes_gang_properties gprops = {0}; 1427 int r, j; 1428 1429 /* create a gang for the process */ 1430 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1431 gprops.gang_quantum = adev->mes.default_gang_quantum; 1432 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1433 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1434 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1435 1436 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id); 1437 if (r) { 1438 DRM_ERROR("failed to add gang\n"); 1439 return r; 1440 } 1441 1442 /* create queues for the gang */ 1443 for (j = 0; j < num_queue; j++) { 1444 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j, 1445 ctx_data, &ring); 1446 if (r) { 1447 DRM_ERROR("failed to add ring\n"); 1448 break; 1449 } 1450 1451 DRM_INFO("ring %s was added\n", ring->name); 1452 added_rings[j] = ring; 1453 } 1454 1455 return 0; 1456 } 1457 1458 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) 1459 { 1460 struct amdgpu_ring *ring; 1461 int i, r; 1462 1463 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) { 1464 ring = added_rings[i]; 1465 if (!ring) 1466 continue; 1467 1468 r = amdgpu_ring_test_helper(ring); 1469 if (r) 1470 return r; 1471 1472 r = amdgpu_ring_test_ib(ring, 1000 * 10); 1473 if (r) { 1474 DRM_DEV_ERROR(ring->adev->dev, 1475 "ring %s ib test failed (%d)\n", 1476 ring->name, r); 1477 return r; 1478 } else 1479 DRM_INFO("ring %s ib test pass\n", ring->name); 1480 } 1481 1482 return 0; 1483 } 1484 1485 int amdgpu_mes_self_test(struct amdgpu_device *adev) 1486 { 1487 struct amdgpu_vm *vm = NULL; 1488 struct amdgpu_mes_ctx_data ctx_data = {0}; 1489 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; 1490 int gang_ids[3] = {0}; 1491 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 }, 1492 { AMDGPU_RING_TYPE_COMPUTE, 1 }, 1493 { AMDGPU_RING_TYPE_SDMA, 1} }; 1494 int i, r, pasid, k = 0; 1495 1496 pasid = amdgpu_pasid_alloc(16); 1497 if (pasid < 0) { 1498 dev_warn(adev->dev, "No more PASIDs available!"); 1499 pasid = 0; 1500 } 1501 1502 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 1503 if (!vm) { 1504 r = -ENOMEM; 1505 goto error_pasid; 1506 } 1507 1508 r = amdgpu_vm_init(adev, vm, -1); 1509 if (r) { 1510 DRM_ERROR("failed to initialize vm\n"); 1511 goto error_pasid; 1512 } 1513 1514 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data); 1515 if (r) { 1516 DRM_ERROR("failed to alloc ctx meta data\n"); 1517 goto error_fini; 1518 } 1519 1520 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; 1521 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); 1522 if (r) { 1523 DRM_ERROR("failed to map ctx meta data\n"); 1524 goto error_vm; 1525 } 1526 1527 r = amdgpu_mes_create_process(adev, pasid, vm); 1528 if (r) { 1529 DRM_ERROR("failed to create MES process\n"); 1530 goto error_vm; 1531 } 1532 1533 for (i = 0; i < ARRAY_SIZE(queue_types); i++) { 1534 /* On GFX v10.3, fw hasn't supported to map sdma queue. */ 1535 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= 1536 IP_VERSION(10, 3, 0) && 1537 amdgpu_ip_version(adev, GC_HWIP, 0) < 1538 IP_VERSION(11, 0, 0) && 1539 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) 1540 continue; 1541 1542 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid, 1543 &gang_ids[i], 1544 queue_types[i][0], 1545 queue_types[i][1], 1546 &added_rings[k], 1547 &ctx_data); 1548 if (r) 1549 goto error_queues; 1550 1551 k += queue_types[i][1]; 1552 } 1553 1554 /* start ring test and ib test for MES queues */ 1555 amdgpu_mes_test_queues(added_rings); 1556 1557 error_queues: 1558 /* remove all queues */ 1559 for (i = 0; i < ARRAY_SIZE(added_rings); i++) { 1560 if (!added_rings[i]) 1561 continue; 1562 amdgpu_mes_remove_ring(adev, added_rings[i]); 1563 } 1564 1565 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) { 1566 if (!gang_ids[i]) 1567 continue; 1568 amdgpu_mes_remove_gang(adev, gang_ids[i]); 1569 } 1570 1571 amdgpu_mes_destroy_process(adev, pasid); 1572 1573 error_vm: 1574 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data); 1575 1576 error_fini: 1577 amdgpu_vm_fini(adev, vm); 1578 1579 error_pasid: 1580 if (pasid) 1581 amdgpu_pasid_free(pasid); 1582 1583 amdgpu_mes_ctx_free_meta_data(&ctx_data); 1584 kfree(vm); 1585 return 0; 1586 } 1587 1588 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) 1589 { 1590 const struct mes_firmware_header_v1_0 *mes_hdr; 1591 struct amdgpu_firmware_info *info; 1592 char ucode_prefix[30]; 1593 char fw_name[50]; 1594 bool need_retry = false; 1595 int r; 1596 1597 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, 1598 sizeof(ucode_prefix)); 1599 if (adev->enable_uni_mes) { 1600 snprintf(fw_name, sizeof(fw_name), 1601 "amdgpu/%s_uni_mes.bin", ucode_prefix); 1602 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && 1603 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) { 1604 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1605 ucode_prefix, 1606 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); 1607 need_retry = true; 1608 } else { 1609 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1610 ucode_prefix, 1611 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); 1612 } 1613 1614 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name); 1615 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { 1616 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix); 1617 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], 1618 "amdgpu/%s_mes.bin", ucode_prefix); 1619 } 1620 1621 if (r) 1622 goto out; 1623 1624 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1625 adev->mes.fw[pipe]->data; 1626 adev->mes.uc_start_addr[pipe] = 1627 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | 1628 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); 1629 adev->mes.data_start_addr[pipe] = 1630 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | 1631 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); 1632 1633 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1634 int ucode, ucode_data; 1635 1636 if (pipe == AMDGPU_MES_SCHED_PIPE) { 1637 ucode = AMDGPU_UCODE_ID_CP_MES; 1638 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; 1639 } else { 1640 ucode = AMDGPU_UCODE_ID_CP_MES1; 1641 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; 1642 } 1643 1644 info = &adev->firmware.ucode[ucode]; 1645 info->ucode_id = ucode; 1646 info->fw = adev->mes.fw[pipe]; 1647 adev->firmware.fw_size += 1648 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), 1649 PAGE_SIZE); 1650 1651 info = &adev->firmware.ucode[ucode_data]; 1652 info->ucode_id = ucode_data; 1653 info->fw = adev->mes.fw[pipe]; 1654 adev->firmware.fw_size += 1655 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), 1656 PAGE_SIZE); 1657 } 1658 1659 return 0; 1660 out: 1661 amdgpu_ucode_release(&adev->mes.fw[pipe]); 1662 return r; 1663 } 1664 1665 bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) 1666 { 1667 uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; 1668 bool is_supported = false; 1669 1670 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && 1671 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && 1672 mes_rev >= 0x63) 1673 is_supported = true; 1674 1675 return is_supported; 1676 } 1677 1678 #if defined(CONFIG_DEBUG_FS) 1679 1680 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) 1681 { 1682 struct amdgpu_device *adev = m->private; 1683 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); 1684 1685 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, 1686 mem, adev->mes.event_log_size, false); 1687 1688 return 0; 1689 } 1690 1691 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); 1692 1693 #endif 1694 1695 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) 1696 { 1697 1698 #if defined(CONFIG_DEBUG_FS) 1699 struct drm_minor *minor = adev_to_drm(adev)->primary; 1700 struct dentry *root = minor->debugfs_root; 1701 if (adev->enable_mes && amdgpu_mes_log_enable) 1702 debugfs_create_file("amdgpu_mes_event_log", 0444, root, 1703 adev, &amdgpu_debugfs_mes_event_log_fops); 1704 1705 #endif 1706 } 1707