1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drm_exec.h> 26 27 #include "amdgpu_mes.h" 28 #include "amdgpu.h" 29 #include "soc15_common.h" 30 #include "amdgpu_mes_ctx.h" 31 32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 33 #define AMDGPU_ONE_DOORBELL_SIZE 8 34 35 signed long amdgpu_mes_fence_wait_polling(u64 *fence, 36 u64 wait_seq, 37 signed long timeout) 38 { 39 40 while ((s64)(wait_seq - *fence) > 0 && timeout > 0) { 41 udelay(2); 42 timeout -= 2; 43 } 44 return timeout > 0 ? timeout : 0; 45 } 46 47 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) 48 { 49 return roundup(AMDGPU_ONE_DOORBELL_SIZE * 50 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 51 PAGE_SIZE); 52 } 53 54 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, 55 int ip_type, uint64_t *doorbell_index) 56 { 57 unsigned int offset, found; 58 struct amdgpu_mes *mes = &adev->mes; 59 60 if (ip_type == AMDGPU_RING_TYPE_SDMA) 61 offset = adev->doorbell_index.sdma_engine[0]; 62 else 63 offset = 0; 64 65 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset); 66 if (found >= mes->num_mes_dbs) { 67 DRM_WARN("No doorbell available\n"); 68 return -ENOSPC; 69 } 70 71 set_bit(found, mes->doorbell_bitmap); 72 73 /* Get the absolute doorbell index on BAR */ 74 *doorbell_index = mes->db_start_dw_offset + found * 2; 75 return 0; 76 } 77 78 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, 79 uint32_t doorbell_index) 80 { 81 unsigned int old, rel_index; 82 struct amdgpu_mes *mes = &adev->mes; 83 84 /* Find the relative index of the doorbell in this object */ 85 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2; 86 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap); 87 WARN_ON(!old); 88 } 89 90 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) 91 { 92 int i; 93 struct amdgpu_mes *mes = &adev->mes; 94 95 /* Bitmap for dynamic allocation of kernel doorbells */ 96 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); 97 if (!mes->doorbell_bitmap) { 98 DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); 99 return -ENOMEM; 100 } 101 102 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE; 103 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) { 104 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2; 105 set_bit(i, mes->doorbell_bitmap); 106 } 107 108 return 0; 109 } 110 111 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) 112 { 113 int r; 114 115 if (!amdgpu_mes_log_enable) 116 return 0; 117 118 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE, 119 AMDGPU_GEM_DOMAIN_GTT, 120 &adev->mes.event_log_gpu_obj, 121 &adev->mes.event_log_gpu_addr, 122 &adev->mes.event_log_cpu_addr); 123 if (r) { 124 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); 125 return r; 126 } 127 128 memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE); 129 130 return 0; 131 132 } 133 134 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) 135 { 136 bitmap_free(adev->mes.doorbell_bitmap); 137 } 138 139 int amdgpu_mes_init(struct amdgpu_device *adev) 140 { 141 int i, r; 142 143 adev->mes.adev = adev; 144 145 idr_init(&adev->mes.pasid_idr); 146 idr_init(&adev->mes.gang_id_idr); 147 idr_init(&adev->mes.queue_id_idr); 148 ida_init(&adev->mes.doorbell_ida); 149 spin_lock_init(&adev->mes.queue_id_lock); 150 spin_lock_init(&adev->mes.ring_lock); 151 mutex_init(&adev->mes.mutex_hidden); 152 153 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; 154 adev->mes.vmid_mask_mmhub = 0xffffff00; 155 adev->mes.vmid_mask_gfxhub = 0xffffff00; 156 157 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { 158 /* use only 1st MEC pipes */ 159 if (i >= 4) 160 continue; 161 adev->mes.compute_hqd_mask[i] = 0xc; 162 } 163 164 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) 165 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; 166 167 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { 168 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < 169 IP_VERSION(6, 0, 0)) 170 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; 171 /* zero sdma_hqd_mask for non-existent engine */ 172 else if (adev->sdma.num_instances == 1) 173 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc; 174 else 175 adev->mes.sdma_hqd_mask[i] = 0xfc; 176 } 177 178 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs); 179 if (r) { 180 dev_err(adev->dev, 181 "(%d) ring trail_fence_offs wb alloc failed\n", r); 182 goto error_ids; 183 } 184 adev->mes.sch_ctx_gpu_addr = 185 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4); 186 adev->mes.sch_ctx_ptr = 187 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs]; 188 189 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs); 190 if (r) { 191 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 192 dev_err(adev->dev, 193 "(%d) query_status_fence_offs wb alloc failed\n", r); 194 goto error_ids; 195 } 196 adev->mes.query_status_fence_gpu_addr = 197 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4); 198 adev->mes.query_status_fence_ptr = 199 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs]; 200 201 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); 202 if (r) { 203 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 204 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 205 dev_err(adev->dev, 206 "(%d) read_val_offs alloc failed\n", r); 207 goto error_ids; 208 } 209 adev->mes.read_val_gpu_addr = 210 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); 211 adev->mes.read_val_ptr = 212 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; 213 214 r = amdgpu_mes_doorbell_init(adev); 215 if (r) 216 goto error; 217 218 r = amdgpu_mes_event_log_init(adev); 219 if (r) 220 goto error_doorbell; 221 222 return 0; 223 224 error_doorbell: 225 amdgpu_mes_doorbell_free(adev); 226 error: 227 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 228 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 229 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 230 error_ids: 231 idr_destroy(&adev->mes.pasid_idr); 232 idr_destroy(&adev->mes.gang_id_idr); 233 idr_destroy(&adev->mes.queue_id_idr); 234 ida_destroy(&adev->mes.doorbell_ida); 235 mutex_destroy(&adev->mes.mutex_hidden); 236 return r; 237 } 238 239 void amdgpu_mes_fini(struct amdgpu_device *adev) 240 { 241 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, 242 &adev->mes.event_log_gpu_addr, 243 &adev->mes.event_log_cpu_addr); 244 245 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 246 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 247 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 248 amdgpu_mes_doorbell_free(adev); 249 250 idr_destroy(&adev->mes.pasid_idr); 251 idr_destroy(&adev->mes.gang_id_idr); 252 idr_destroy(&adev->mes.queue_id_idr); 253 ida_destroy(&adev->mes.doorbell_ida); 254 mutex_destroy(&adev->mes.mutex_hidden); 255 } 256 257 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q) 258 { 259 amdgpu_bo_free_kernel(&q->mqd_obj, 260 &q->mqd_gpu_addr, 261 &q->mqd_cpu_ptr); 262 } 263 264 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, 265 struct amdgpu_vm *vm) 266 { 267 struct amdgpu_mes_process *process; 268 int r; 269 270 /* allocate the mes process buffer */ 271 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); 272 if (!process) { 273 DRM_ERROR("no more memory to create mes process\n"); 274 return -ENOMEM; 275 } 276 277 /* allocate the process context bo and map it */ 278 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, 279 AMDGPU_GEM_DOMAIN_GTT, 280 &process->proc_ctx_bo, 281 &process->proc_ctx_gpu_addr, 282 &process->proc_ctx_cpu_ptr); 283 if (r) { 284 DRM_ERROR("failed to allocate process context bo\n"); 285 goto clean_up_memory; 286 } 287 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 288 289 /* 290 * Avoid taking any other locks under MES lock to avoid circular 291 * lock dependencies. 292 */ 293 amdgpu_mes_lock(&adev->mes); 294 295 /* add the mes process to idr list */ 296 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, 297 GFP_KERNEL); 298 if (r < 0) { 299 DRM_ERROR("failed to lock pasid=%d\n", pasid); 300 goto clean_up_ctx; 301 } 302 303 INIT_LIST_HEAD(&process->gang_list); 304 process->vm = vm; 305 process->pasid = pasid; 306 process->process_quantum = adev->mes.default_process_quantum; 307 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); 308 309 amdgpu_mes_unlock(&adev->mes); 310 return 0; 311 312 clean_up_ctx: 313 amdgpu_mes_unlock(&adev->mes); 314 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 315 &process->proc_ctx_gpu_addr, 316 &process->proc_ctx_cpu_ptr); 317 clean_up_memory: 318 kfree(process); 319 return r; 320 } 321 322 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) 323 { 324 struct amdgpu_mes_process *process; 325 struct amdgpu_mes_gang *gang, *tmp1; 326 struct amdgpu_mes_queue *queue, *tmp2; 327 struct mes_remove_queue_input queue_input; 328 unsigned long flags; 329 int r; 330 331 /* 332 * Avoid taking any other locks under MES lock to avoid circular 333 * lock dependencies. 334 */ 335 amdgpu_mes_lock(&adev->mes); 336 337 process = idr_find(&adev->mes.pasid_idr, pasid); 338 if (!process) { 339 DRM_WARN("pasid %d doesn't exist\n", pasid); 340 amdgpu_mes_unlock(&adev->mes); 341 return; 342 } 343 344 /* Remove all queues from hardware */ 345 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 346 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 347 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 348 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 349 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 350 351 queue_input.doorbell_offset = queue->doorbell_off; 352 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 353 354 r = adev->mes.funcs->remove_hw_queue(&adev->mes, 355 &queue_input); 356 if (r) 357 DRM_WARN("failed to remove hardware queue\n"); 358 } 359 360 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 361 } 362 363 idr_remove(&adev->mes.pasid_idr, pasid); 364 amdgpu_mes_unlock(&adev->mes); 365 366 /* free all memory allocated by the process */ 367 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 368 /* free all queues in the gang */ 369 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 370 amdgpu_mes_queue_free_mqd(queue); 371 list_del(&queue->list); 372 kfree(queue); 373 } 374 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 375 &gang->gang_ctx_gpu_addr, 376 &gang->gang_ctx_cpu_ptr); 377 list_del(&gang->list); 378 kfree(gang); 379 380 } 381 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 382 &process->proc_ctx_gpu_addr, 383 &process->proc_ctx_cpu_ptr); 384 kfree(process); 385 } 386 387 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, 388 struct amdgpu_mes_gang_properties *gprops, 389 int *gang_id) 390 { 391 struct amdgpu_mes_process *process; 392 struct amdgpu_mes_gang *gang; 393 int r; 394 395 /* allocate the mes gang buffer */ 396 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL); 397 if (!gang) { 398 return -ENOMEM; 399 } 400 401 /* allocate the gang context bo and map it to cpu space */ 402 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE, 403 AMDGPU_GEM_DOMAIN_GTT, 404 &gang->gang_ctx_bo, 405 &gang->gang_ctx_gpu_addr, 406 &gang->gang_ctx_cpu_ptr); 407 if (r) { 408 DRM_ERROR("failed to allocate process context bo\n"); 409 goto clean_up_mem; 410 } 411 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); 412 413 /* 414 * Avoid taking any other locks under MES lock to avoid circular 415 * lock dependencies. 416 */ 417 amdgpu_mes_lock(&adev->mes); 418 419 process = idr_find(&adev->mes.pasid_idr, pasid); 420 if (!process) { 421 DRM_ERROR("pasid %d doesn't exist\n", pasid); 422 r = -EINVAL; 423 goto clean_up_ctx; 424 } 425 426 /* add the mes gang to idr list */ 427 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0, 428 GFP_KERNEL); 429 if (r < 0) { 430 DRM_ERROR("failed to allocate idr for gang\n"); 431 goto clean_up_ctx; 432 } 433 434 gang->gang_id = r; 435 *gang_id = r; 436 437 INIT_LIST_HEAD(&gang->queue_list); 438 gang->process = process; 439 gang->priority = gprops->priority; 440 gang->gang_quantum = gprops->gang_quantum ? 441 gprops->gang_quantum : adev->mes.default_gang_quantum; 442 gang->global_priority_level = gprops->global_priority_level; 443 gang->inprocess_gang_priority = gprops->inprocess_gang_priority; 444 list_add_tail(&gang->list, &process->gang_list); 445 446 amdgpu_mes_unlock(&adev->mes); 447 return 0; 448 449 clean_up_ctx: 450 amdgpu_mes_unlock(&adev->mes); 451 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 452 &gang->gang_ctx_gpu_addr, 453 &gang->gang_ctx_cpu_ptr); 454 clean_up_mem: 455 kfree(gang); 456 return r; 457 } 458 459 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) 460 { 461 struct amdgpu_mes_gang *gang; 462 463 /* 464 * Avoid taking any other locks under MES lock to avoid circular 465 * lock dependencies. 466 */ 467 amdgpu_mes_lock(&adev->mes); 468 469 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 470 if (!gang) { 471 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 472 amdgpu_mes_unlock(&adev->mes); 473 return -EINVAL; 474 } 475 476 if (!list_empty(&gang->queue_list)) { 477 DRM_ERROR("queue list is not empty\n"); 478 amdgpu_mes_unlock(&adev->mes); 479 return -EBUSY; 480 } 481 482 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 483 list_del(&gang->list); 484 amdgpu_mes_unlock(&adev->mes); 485 486 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 487 &gang->gang_ctx_gpu_addr, 488 &gang->gang_ctx_cpu_ptr); 489 490 kfree(gang); 491 492 return 0; 493 } 494 495 int amdgpu_mes_suspend(struct amdgpu_device *adev) 496 { 497 struct idr *idp; 498 struct amdgpu_mes_process *process; 499 struct amdgpu_mes_gang *gang; 500 struct mes_suspend_gang_input input; 501 int r, pasid; 502 503 /* 504 * Avoid taking any other locks under MES lock to avoid circular 505 * lock dependencies. 506 */ 507 amdgpu_mes_lock(&adev->mes); 508 509 idp = &adev->mes.pasid_idr; 510 511 idr_for_each_entry(idp, process, pasid) { 512 list_for_each_entry(gang, &process->gang_list, list) { 513 r = adev->mes.funcs->suspend_gang(&adev->mes, &input); 514 if (r) 515 DRM_ERROR("failed to suspend pasid %d gangid %d", 516 pasid, gang->gang_id); 517 } 518 } 519 520 amdgpu_mes_unlock(&adev->mes); 521 return 0; 522 } 523 524 int amdgpu_mes_resume(struct amdgpu_device *adev) 525 { 526 struct idr *idp; 527 struct amdgpu_mes_process *process; 528 struct amdgpu_mes_gang *gang; 529 struct mes_resume_gang_input input; 530 int r, pasid; 531 532 /* 533 * Avoid taking any other locks under MES lock to avoid circular 534 * lock dependencies. 535 */ 536 amdgpu_mes_lock(&adev->mes); 537 538 idp = &adev->mes.pasid_idr; 539 540 idr_for_each_entry(idp, process, pasid) { 541 list_for_each_entry(gang, &process->gang_list, list) { 542 r = adev->mes.funcs->resume_gang(&adev->mes, &input); 543 if (r) 544 DRM_ERROR("failed to resume pasid %d gangid %d", 545 pasid, gang->gang_id); 546 } 547 } 548 549 amdgpu_mes_unlock(&adev->mes); 550 return 0; 551 } 552 553 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, 554 struct amdgpu_mes_queue *q, 555 struct amdgpu_mes_queue_properties *p) 556 { 557 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 558 u32 mqd_size = mqd_mgr->mqd_size; 559 int r; 560 561 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 562 AMDGPU_GEM_DOMAIN_GTT, 563 &q->mqd_obj, 564 &q->mqd_gpu_addr, &q->mqd_cpu_ptr); 565 if (r) { 566 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r); 567 return r; 568 } 569 memset(q->mqd_cpu_ptr, 0, mqd_size); 570 571 r = amdgpu_bo_reserve(q->mqd_obj, false); 572 if (unlikely(r != 0)) 573 goto clean_up; 574 575 return 0; 576 577 clean_up: 578 amdgpu_bo_free_kernel(&q->mqd_obj, 579 &q->mqd_gpu_addr, 580 &q->mqd_cpu_ptr); 581 return r; 582 } 583 584 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, 585 struct amdgpu_mes_queue *q, 586 struct amdgpu_mes_queue_properties *p) 587 { 588 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 589 struct amdgpu_mqd_prop mqd_prop = {0}; 590 591 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr; 592 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr; 593 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr; 594 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr; 595 mqd_prop.queue_size = p->queue_size; 596 mqd_prop.use_doorbell = true; 597 mqd_prop.doorbell_index = p->doorbell_off; 598 mqd_prop.eop_gpu_addr = p->eop_gpu_addr; 599 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority; 600 mqd_prop.hqd_queue_priority = p->hqd_queue_priority; 601 mqd_prop.hqd_active = false; 602 603 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 604 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 605 mutex_lock(&adev->srbm_mutex); 606 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0); 607 } 608 609 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); 610 611 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 612 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 613 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); 614 mutex_unlock(&adev->srbm_mutex); 615 } 616 617 amdgpu_bo_unreserve(q->mqd_obj); 618 } 619 620 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, 621 struct amdgpu_mes_queue_properties *qprops, 622 int *queue_id) 623 { 624 struct amdgpu_mes_queue *queue; 625 struct amdgpu_mes_gang *gang; 626 struct mes_add_queue_input queue_input; 627 unsigned long flags; 628 int r; 629 630 memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); 631 632 /* allocate the mes queue buffer */ 633 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); 634 if (!queue) { 635 DRM_ERROR("Failed to allocate memory for queue\n"); 636 return -ENOMEM; 637 } 638 639 /* Allocate the queue mqd */ 640 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops); 641 if (r) 642 goto clean_up_memory; 643 644 /* 645 * Avoid taking any other locks under MES lock to avoid circular 646 * lock dependencies. 647 */ 648 amdgpu_mes_lock(&adev->mes); 649 650 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 651 if (!gang) { 652 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 653 r = -EINVAL; 654 goto clean_up_mqd; 655 } 656 657 /* add the mes gang to idr list */ 658 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 659 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0, 660 GFP_ATOMIC); 661 if (r < 0) { 662 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 663 goto clean_up_mqd; 664 } 665 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 666 *queue_id = queue->queue_id = r; 667 668 /* allocate a doorbell index for the queue */ 669 r = amdgpu_mes_kernel_doorbell_get(adev, 670 qprops->queue_type, 671 &qprops->doorbell_off); 672 if (r) 673 goto clean_up_queue_id; 674 675 /* initialize the queue mqd */ 676 amdgpu_mes_queue_init_mqd(adev, queue, qprops); 677 678 /* add hw queue to mes */ 679 queue_input.process_id = gang->process->pasid; 680 681 queue_input.page_table_base_addr = 682 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr - 683 adev->gmc.vram_start; 684 685 queue_input.process_va_start = 0; 686 queue_input.process_va_end = 687 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; 688 queue_input.process_quantum = gang->process->process_quantum; 689 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr; 690 queue_input.gang_quantum = gang->gang_quantum; 691 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 692 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority; 693 queue_input.gang_global_priority_level = gang->global_priority_level; 694 queue_input.doorbell_offset = qprops->doorbell_off; 695 queue_input.mqd_addr = queue->mqd_gpu_addr; 696 queue_input.wptr_addr = qprops->wptr_gpu_addr; 697 queue_input.wptr_mc_addr = qprops->wptr_mc_addr; 698 queue_input.queue_type = qprops->queue_type; 699 queue_input.paging = qprops->paging; 700 queue_input.is_kfd_process = 0; 701 702 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 703 if (r) { 704 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n", 705 qprops->doorbell_off); 706 goto clean_up_doorbell; 707 } 708 709 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, " 710 "queue type=%d, doorbell=0x%llx\n", 711 gang->process->pasid, gang_id, qprops->queue_type, 712 qprops->doorbell_off); 713 714 queue->ring = qprops->ring; 715 queue->doorbell_off = qprops->doorbell_off; 716 queue->wptr_gpu_addr = qprops->wptr_gpu_addr; 717 queue->queue_type = qprops->queue_type; 718 queue->paging = qprops->paging; 719 queue->gang = gang; 720 queue->ring->mqd_ptr = queue->mqd_cpu_ptr; 721 list_add_tail(&queue->list, &gang->queue_list); 722 723 amdgpu_mes_unlock(&adev->mes); 724 return 0; 725 726 clean_up_doorbell: 727 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off); 728 clean_up_queue_id: 729 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 730 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 731 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 732 clean_up_mqd: 733 amdgpu_mes_unlock(&adev->mes); 734 amdgpu_mes_queue_free_mqd(queue); 735 clean_up_memory: 736 kfree(queue); 737 return r; 738 } 739 740 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) 741 { 742 unsigned long flags; 743 struct amdgpu_mes_queue *queue; 744 struct amdgpu_mes_gang *gang; 745 struct mes_remove_queue_input queue_input; 746 int r; 747 748 /* 749 * Avoid taking any other locks under MES lock to avoid circular 750 * lock dependencies. 751 */ 752 amdgpu_mes_lock(&adev->mes); 753 754 /* remove the mes gang from idr list */ 755 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 756 757 queue = idr_find(&adev->mes.queue_id_idr, queue_id); 758 if (!queue) { 759 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 760 amdgpu_mes_unlock(&adev->mes); 761 DRM_ERROR("queue id %d doesn't exist\n", queue_id); 762 return -EINVAL; 763 } 764 765 idr_remove(&adev->mes.queue_id_idr, queue_id); 766 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 767 768 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n", 769 queue->doorbell_off); 770 771 gang = queue->gang; 772 queue_input.doorbell_offset = queue->doorbell_off; 773 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 774 775 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 776 if (r) 777 DRM_ERROR("failed to remove hardware queue, queue id = %d\n", 778 queue_id); 779 780 list_del(&queue->list); 781 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off); 782 amdgpu_mes_unlock(&adev->mes); 783 784 amdgpu_mes_queue_free_mqd(queue); 785 kfree(queue); 786 return 0; 787 } 788 789 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, 790 struct amdgpu_ring *ring, 791 enum amdgpu_unmap_queues_action action, 792 u64 gpu_addr, u64 seq) 793 { 794 struct mes_unmap_legacy_queue_input queue_input; 795 int r; 796 797 queue_input.action = action; 798 queue_input.queue_type = ring->funcs->type; 799 queue_input.doorbell_offset = ring->doorbell_index; 800 queue_input.pipe_id = ring->pipe; 801 queue_input.queue_id = ring->queue; 802 queue_input.trail_fence_addr = gpu_addr; 803 queue_input.trail_fence_data = seq; 804 805 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 806 if (r) 807 DRM_ERROR("failed to unmap legacy queue\n"); 808 809 return r; 810 } 811 812 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) 813 { 814 struct mes_misc_op_input op_input; 815 int r, val = 0; 816 817 op_input.op = MES_MISC_OP_READ_REG; 818 op_input.read_reg.reg_offset = reg; 819 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; 820 821 if (!adev->mes.funcs->misc_op) { 822 DRM_ERROR("mes rreg is not supported!\n"); 823 goto error; 824 } 825 826 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 827 if (r) 828 DRM_ERROR("failed to read reg (0x%x)\n", reg); 829 else 830 val = *(adev->mes.read_val_ptr); 831 832 error: 833 return val; 834 } 835 836 int amdgpu_mes_wreg(struct amdgpu_device *adev, 837 uint32_t reg, uint32_t val) 838 { 839 struct mes_misc_op_input op_input; 840 int r; 841 842 op_input.op = MES_MISC_OP_WRITE_REG; 843 op_input.write_reg.reg_offset = reg; 844 op_input.write_reg.reg_value = val; 845 846 if (!adev->mes.funcs->misc_op) { 847 DRM_ERROR("mes wreg is not supported!\n"); 848 r = -EINVAL; 849 goto error; 850 } 851 852 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 853 if (r) 854 DRM_ERROR("failed to write reg (0x%x)\n", reg); 855 856 error: 857 return r; 858 } 859 860 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, 861 uint32_t reg0, uint32_t reg1, 862 uint32_t ref, uint32_t mask) 863 { 864 struct mes_misc_op_input op_input; 865 int r; 866 867 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT; 868 op_input.wrm_reg.reg0 = reg0; 869 op_input.wrm_reg.reg1 = reg1; 870 op_input.wrm_reg.ref = ref; 871 op_input.wrm_reg.mask = mask; 872 873 if (!adev->mes.funcs->misc_op) { 874 DRM_ERROR("mes reg_write_reg_wait is not supported!\n"); 875 r = -EINVAL; 876 goto error; 877 } 878 879 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 880 if (r) 881 DRM_ERROR("failed to reg_write_reg_wait\n"); 882 883 error: 884 return r; 885 } 886 887 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, 888 uint32_t val, uint32_t mask) 889 { 890 struct mes_misc_op_input op_input; 891 int r; 892 893 op_input.op = MES_MISC_OP_WRM_REG_WAIT; 894 op_input.wrm_reg.reg0 = reg; 895 op_input.wrm_reg.ref = val; 896 op_input.wrm_reg.mask = mask; 897 898 if (!adev->mes.funcs->misc_op) { 899 DRM_ERROR("mes reg wait is not supported!\n"); 900 r = -EINVAL; 901 goto error; 902 } 903 904 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 905 if (r) 906 DRM_ERROR("failed to reg_write_reg_wait\n"); 907 908 error: 909 return r; 910 } 911 912 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, 913 uint64_t process_context_addr, 914 uint32_t spi_gdbg_per_vmid_cntl, 915 const uint32_t *tcp_watch_cntl, 916 uint32_t flags, 917 bool trap_en) 918 { 919 struct mes_misc_op_input op_input = {0}; 920 int r; 921 922 if (!adev->mes.funcs->misc_op) { 923 DRM_ERROR("mes set shader debugger is not supported!\n"); 924 return -EINVAL; 925 } 926 927 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 928 op_input.set_shader_debugger.process_context_addr = process_context_addr; 929 op_input.set_shader_debugger.flags.u32all = flags; 930 931 /* use amdgpu mes_flush_shader_debugger instead */ 932 if (op_input.set_shader_debugger.flags.process_ctx_flush) 933 return -EINVAL; 934 935 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; 936 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, 937 sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); 938 939 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> 940 AMDGPU_MES_API_VERSION_SHIFT) >= 14) 941 op_input.set_shader_debugger.trap_en = trap_en; 942 943 amdgpu_mes_lock(&adev->mes); 944 945 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 946 if (r) 947 DRM_ERROR("failed to set_shader_debugger\n"); 948 949 amdgpu_mes_unlock(&adev->mes); 950 951 return r; 952 } 953 954 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, 955 uint64_t process_context_addr) 956 { 957 struct mes_misc_op_input op_input = {0}; 958 int r; 959 960 if (!adev->mes.funcs->misc_op) { 961 DRM_ERROR("mes flush shader debugger is not supported!\n"); 962 return -EINVAL; 963 } 964 965 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 966 op_input.set_shader_debugger.process_context_addr = process_context_addr; 967 op_input.set_shader_debugger.flags.process_ctx_flush = true; 968 969 amdgpu_mes_lock(&adev->mes); 970 971 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 972 if (r) 973 DRM_ERROR("failed to set_shader_debugger\n"); 974 975 amdgpu_mes_unlock(&adev->mes); 976 977 return r; 978 } 979 980 static void 981 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, 982 struct amdgpu_ring *ring, 983 struct amdgpu_mes_queue_properties *props) 984 { 985 props->queue_type = ring->funcs->type; 986 props->hqd_base_gpu_addr = ring->gpu_addr; 987 props->rptr_gpu_addr = ring->rptr_gpu_addr; 988 props->wptr_gpu_addr = ring->wptr_gpu_addr; 989 props->wptr_mc_addr = 990 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs; 991 props->queue_size = ring->ring_size; 992 props->eop_gpu_addr = ring->eop_gpu_addr; 993 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 994 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 995 props->paging = false; 996 props->ring = ring; 997 } 998 999 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ 1000 do { \ 1001 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ 1002 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1003 _eng[ring->idx].slots[id_offs]); \ 1004 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ 1005 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1006 _eng[ring->idx].ring); \ 1007 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ 1008 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1009 _eng[ring->idx].ib); \ 1010 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ 1011 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1012 _eng[ring->idx].padding); \ 1013 } while(0) 1014 1015 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) 1016 { 1017 switch (ring->funcs->type) { 1018 case AMDGPU_RING_TYPE_GFX: 1019 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); 1020 break; 1021 case AMDGPU_RING_TYPE_COMPUTE: 1022 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); 1023 break; 1024 case AMDGPU_RING_TYPE_SDMA: 1025 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); 1026 break; 1027 default: 1028 break; 1029 } 1030 1031 WARN_ON(1); 1032 return -EINVAL; 1033 } 1034 1035 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, 1036 int queue_type, int idx, 1037 struct amdgpu_mes_ctx_data *ctx_data, 1038 struct amdgpu_ring **out) 1039 { 1040 struct amdgpu_ring *ring; 1041 struct amdgpu_mes_gang *gang; 1042 struct amdgpu_mes_queue_properties qprops = {0}; 1043 int r, queue_id, pasid; 1044 1045 /* 1046 * Avoid taking any other locks under MES lock to avoid circular 1047 * lock dependencies. 1048 */ 1049 amdgpu_mes_lock(&adev->mes); 1050 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 1051 if (!gang) { 1052 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 1053 amdgpu_mes_unlock(&adev->mes); 1054 return -EINVAL; 1055 } 1056 pasid = gang->process->pasid; 1057 1058 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL); 1059 if (!ring) { 1060 amdgpu_mes_unlock(&adev->mes); 1061 return -ENOMEM; 1062 } 1063 1064 ring->ring_obj = NULL; 1065 ring->use_doorbell = true; 1066 ring->is_mes_queue = true; 1067 ring->mes_ctx = ctx_data; 1068 ring->idx = idx; 1069 ring->no_scheduler = true; 1070 1071 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 1072 int offset = offsetof(struct amdgpu_mes_ctx_meta_data, 1073 compute[ring->idx].mec_hpd); 1074 ring->eop_gpu_addr = 1075 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 1076 } 1077 1078 switch (queue_type) { 1079 case AMDGPU_RING_TYPE_GFX: 1080 ring->funcs = adev->gfx.gfx_ring[0].funcs; 1081 ring->me = adev->gfx.gfx_ring[0].me; 1082 ring->pipe = adev->gfx.gfx_ring[0].pipe; 1083 break; 1084 case AMDGPU_RING_TYPE_COMPUTE: 1085 ring->funcs = adev->gfx.compute_ring[0].funcs; 1086 ring->me = adev->gfx.compute_ring[0].me; 1087 ring->pipe = adev->gfx.compute_ring[0].pipe; 1088 break; 1089 case AMDGPU_RING_TYPE_SDMA: 1090 ring->funcs = adev->sdma.instance[0].ring.funcs; 1091 break; 1092 default: 1093 BUG(); 1094 } 1095 1096 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, 1097 AMDGPU_RING_PRIO_DEFAULT, NULL); 1098 if (r) 1099 goto clean_up_memory; 1100 1101 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); 1102 1103 dma_fence_wait(gang->process->vm->last_update, false); 1104 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false); 1105 amdgpu_mes_unlock(&adev->mes); 1106 1107 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id); 1108 if (r) 1109 goto clean_up_ring; 1110 1111 ring->hw_queue_id = queue_id; 1112 ring->doorbell_index = qprops.doorbell_off; 1113 1114 if (queue_type == AMDGPU_RING_TYPE_GFX) 1115 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id); 1116 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) 1117 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id, 1118 queue_id); 1119 else if (queue_type == AMDGPU_RING_TYPE_SDMA) 1120 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id, 1121 queue_id); 1122 else 1123 BUG(); 1124 1125 *out = ring; 1126 return 0; 1127 1128 clean_up_ring: 1129 amdgpu_ring_fini(ring); 1130 clean_up_memory: 1131 kfree(ring); 1132 amdgpu_mes_unlock(&adev->mes); 1133 return r; 1134 } 1135 1136 void amdgpu_mes_remove_ring(struct amdgpu_device *adev, 1137 struct amdgpu_ring *ring) 1138 { 1139 if (!ring) 1140 return; 1141 1142 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 1143 del_timer_sync(&ring->fence_drv.fallback_timer); 1144 amdgpu_ring_fini(ring); 1145 kfree(ring); 1146 } 1147 1148 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, 1149 enum amdgpu_mes_priority_level prio) 1150 { 1151 return adev->mes.aggregated_doorbells[prio]; 1152 } 1153 1154 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, 1155 struct amdgpu_mes_ctx_data *ctx_data) 1156 { 1157 int r; 1158 1159 r = amdgpu_bo_create_kernel(adev, 1160 sizeof(struct amdgpu_mes_ctx_meta_data), 1161 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1162 &ctx_data->meta_data_obj, 1163 &ctx_data->meta_data_mc_addr, 1164 &ctx_data->meta_data_ptr); 1165 if (r) { 1166 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r); 1167 return r; 1168 } 1169 1170 if (!ctx_data->meta_data_obj) 1171 return -ENOMEM; 1172 1173 memset(ctx_data->meta_data_ptr, 0, 1174 sizeof(struct amdgpu_mes_ctx_meta_data)); 1175 1176 return 0; 1177 } 1178 1179 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data) 1180 { 1181 if (ctx_data->meta_data_obj) 1182 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, 1183 &ctx_data->meta_data_mc_addr, 1184 &ctx_data->meta_data_ptr); 1185 } 1186 1187 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, 1188 struct amdgpu_vm *vm, 1189 struct amdgpu_mes_ctx_data *ctx_data) 1190 { 1191 struct amdgpu_bo_va *bo_va; 1192 struct amdgpu_sync sync; 1193 struct drm_exec exec; 1194 int r; 1195 1196 amdgpu_sync_create(&sync); 1197 1198 drm_exec_init(&exec, 0, 0); 1199 drm_exec_until_all_locked(&exec) { 1200 r = drm_exec_lock_obj(&exec, 1201 &ctx_data->meta_data_obj->tbo.base); 1202 drm_exec_retry_on_contention(&exec); 1203 if (unlikely(r)) 1204 goto error_fini_exec; 1205 1206 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1207 drm_exec_retry_on_contention(&exec); 1208 if (unlikely(r)) 1209 goto error_fini_exec; 1210 } 1211 1212 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); 1213 if (!bo_va) { 1214 DRM_ERROR("failed to create bo_va for meta data BO\n"); 1215 r = -ENOMEM; 1216 goto error_fini_exec; 1217 } 1218 1219 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, 1220 sizeof(struct amdgpu_mes_ctx_meta_data), 1221 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 1222 AMDGPU_PTE_EXECUTABLE); 1223 1224 if (r) { 1225 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); 1226 goto error_del_bo_va; 1227 } 1228 1229 r = amdgpu_vm_bo_update(adev, bo_va, false); 1230 if (r) { 1231 DRM_ERROR("failed to do vm_bo_update on meta data\n"); 1232 goto error_del_bo_va; 1233 } 1234 amdgpu_sync_fence(&sync, bo_va->last_pt_update); 1235 1236 r = amdgpu_vm_update_pdes(adev, vm, false); 1237 if (r) { 1238 DRM_ERROR("failed to update pdes on meta data\n"); 1239 goto error_del_bo_va; 1240 } 1241 amdgpu_sync_fence(&sync, vm->last_update); 1242 1243 amdgpu_sync_wait(&sync, false); 1244 drm_exec_fini(&exec); 1245 1246 amdgpu_sync_free(&sync); 1247 ctx_data->meta_data_va = bo_va; 1248 return 0; 1249 1250 error_del_bo_va: 1251 amdgpu_vm_bo_del(adev, bo_va); 1252 1253 error_fini_exec: 1254 drm_exec_fini(&exec); 1255 amdgpu_sync_free(&sync); 1256 return r; 1257 } 1258 1259 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, 1260 struct amdgpu_mes_ctx_data *ctx_data) 1261 { 1262 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va; 1263 struct amdgpu_bo *bo = ctx_data->meta_data_obj; 1264 struct amdgpu_vm *vm = bo_va->base.vm; 1265 struct dma_fence *fence; 1266 struct drm_exec exec; 1267 long r; 1268 1269 drm_exec_init(&exec, 0, 0); 1270 drm_exec_until_all_locked(&exec) { 1271 r = drm_exec_lock_obj(&exec, 1272 &ctx_data->meta_data_obj->tbo.base); 1273 drm_exec_retry_on_contention(&exec); 1274 if (unlikely(r)) 1275 goto out_unlock; 1276 1277 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1278 drm_exec_retry_on_contention(&exec); 1279 if (unlikely(r)) 1280 goto out_unlock; 1281 } 1282 1283 amdgpu_vm_bo_del(adev, bo_va); 1284 if (!amdgpu_vm_ready(vm)) 1285 goto out_unlock; 1286 1287 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, 1288 &fence); 1289 if (r) 1290 goto out_unlock; 1291 if (fence) { 1292 amdgpu_bo_fence(bo, fence, true); 1293 fence = NULL; 1294 } 1295 1296 r = amdgpu_vm_clear_freed(adev, vm, &fence); 1297 if (r || !fence) 1298 goto out_unlock; 1299 1300 dma_fence_wait(fence, false); 1301 amdgpu_bo_fence(bo, fence, true); 1302 dma_fence_put(fence); 1303 1304 out_unlock: 1305 if (unlikely(r < 0)) 1306 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r); 1307 drm_exec_fini(&exec); 1308 1309 return r; 1310 } 1311 1312 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev, 1313 int pasid, int *gang_id, 1314 int queue_type, int num_queue, 1315 struct amdgpu_ring **added_rings, 1316 struct amdgpu_mes_ctx_data *ctx_data) 1317 { 1318 struct amdgpu_ring *ring; 1319 struct amdgpu_mes_gang_properties gprops = {0}; 1320 int r, j; 1321 1322 /* create a gang for the process */ 1323 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1324 gprops.gang_quantum = adev->mes.default_gang_quantum; 1325 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1326 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1327 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1328 1329 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id); 1330 if (r) { 1331 DRM_ERROR("failed to add gang\n"); 1332 return r; 1333 } 1334 1335 /* create queues for the gang */ 1336 for (j = 0; j < num_queue; j++) { 1337 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j, 1338 ctx_data, &ring); 1339 if (r) { 1340 DRM_ERROR("failed to add ring\n"); 1341 break; 1342 } 1343 1344 DRM_INFO("ring %s was added\n", ring->name); 1345 added_rings[j] = ring; 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) 1352 { 1353 struct amdgpu_ring *ring; 1354 int i, r; 1355 1356 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) { 1357 ring = added_rings[i]; 1358 if (!ring) 1359 continue; 1360 1361 r = amdgpu_ring_test_helper(ring); 1362 if (r) 1363 return r; 1364 1365 r = amdgpu_ring_test_ib(ring, 1000 * 10); 1366 if (r) { 1367 DRM_DEV_ERROR(ring->adev->dev, 1368 "ring %s ib test failed (%d)\n", 1369 ring->name, r); 1370 return r; 1371 } else 1372 DRM_INFO("ring %s ib test pass\n", ring->name); 1373 } 1374 1375 return 0; 1376 } 1377 1378 int amdgpu_mes_self_test(struct amdgpu_device *adev) 1379 { 1380 struct amdgpu_vm *vm = NULL; 1381 struct amdgpu_mes_ctx_data ctx_data = {0}; 1382 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; 1383 int gang_ids[3] = {0}; 1384 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 }, 1385 { AMDGPU_RING_TYPE_COMPUTE, 1 }, 1386 { AMDGPU_RING_TYPE_SDMA, 1} }; 1387 int i, r, pasid, k = 0; 1388 1389 pasid = amdgpu_pasid_alloc(16); 1390 if (pasid < 0) { 1391 dev_warn(adev->dev, "No more PASIDs available!"); 1392 pasid = 0; 1393 } 1394 1395 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 1396 if (!vm) { 1397 r = -ENOMEM; 1398 goto error_pasid; 1399 } 1400 1401 r = amdgpu_vm_init(adev, vm, -1); 1402 if (r) { 1403 DRM_ERROR("failed to initialize vm\n"); 1404 goto error_pasid; 1405 } 1406 1407 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data); 1408 if (r) { 1409 DRM_ERROR("failed to alloc ctx meta data\n"); 1410 goto error_fini; 1411 } 1412 1413 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; 1414 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); 1415 if (r) { 1416 DRM_ERROR("failed to map ctx meta data\n"); 1417 goto error_vm; 1418 } 1419 1420 r = amdgpu_mes_create_process(adev, pasid, vm); 1421 if (r) { 1422 DRM_ERROR("failed to create MES process\n"); 1423 goto error_vm; 1424 } 1425 1426 for (i = 0; i < ARRAY_SIZE(queue_types); i++) { 1427 /* On GFX v10.3, fw hasn't supported to map sdma queue. */ 1428 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= 1429 IP_VERSION(10, 3, 0) && 1430 amdgpu_ip_version(adev, GC_HWIP, 0) < 1431 IP_VERSION(11, 0, 0) && 1432 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) 1433 continue; 1434 1435 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid, 1436 &gang_ids[i], 1437 queue_types[i][0], 1438 queue_types[i][1], 1439 &added_rings[k], 1440 &ctx_data); 1441 if (r) 1442 goto error_queues; 1443 1444 k += queue_types[i][1]; 1445 } 1446 1447 /* start ring test and ib test for MES queues */ 1448 amdgpu_mes_test_queues(added_rings); 1449 1450 error_queues: 1451 /* remove all queues */ 1452 for (i = 0; i < ARRAY_SIZE(added_rings); i++) { 1453 if (!added_rings[i]) 1454 continue; 1455 amdgpu_mes_remove_ring(adev, added_rings[i]); 1456 } 1457 1458 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) { 1459 if (!gang_ids[i]) 1460 continue; 1461 amdgpu_mes_remove_gang(adev, gang_ids[i]); 1462 } 1463 1464 amdgpu_mes_destroy_process(adev, pasid); 1465 1466 error_vm: 1467 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data); 1468 1469 error_fini: 1470 amdgpu_vm_fini(adev, vm); 1471 1472 error_pasid: 1473 if (pasid) 1474 amdgpu_pasid_free(pasid); 1475 1476 amdgpu_mes_ctx_free_meta_data(&ctx_data); 1477 kfree(vm); 1478 return 0; 1479 } 1480 1481 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) 1482 { 1483 const struct mes_firmware_header_v1_0 *mes_hdr; 1484 struct amdgpu_firmware_info *info; 1485 char ucode_prefix[30]; 1486 char fw_name[50]; 1487 bool need_retry = false; 1488 int r; 1489 1490 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, 1491 sizeof(ucode_prefix)); 1492 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { 1493 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1494 ucode_prefix, 1495 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); 1496 need_retry = true; 1497 } else { 1498 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1499 ucode_prefix, 1500 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); 1501 } 1502 1503 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name); 1504 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { 1505 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", 1506 ucode_prefix); 1507 DRM_INFO("try to fall back to %s\n", fw_name); 1508 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], 1509 fw_name); 1510 } 1511 1512 if (r) 1513 goto out; 1514 1515 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1516 adev->mes.fw[pipe]->data; 1517 adev->mes.uc_start_addr[pipe] = 1518 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | 1519 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); 1520 adev->mes.data_start_addr[pipe] = 1521 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | 1522 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); 1523 1524 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1525 int ucode, ucode_data; 1526 1527 if (pipe == AMDGPU_MES_SCHED_PIPE) { 1528 ucode = AMDGPU_UCODE_ID_CP_MES; 1529 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; 1530 } else { 1531 ucode = AMDGPU_UCODE_ID_CP_MES1; 1532 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; 1533 } 1534 1535 info = &adev->firmware.ucode[ucode]; 1536 info->ucode_id = ucode; 1537 info->fw = adev->mes.fw[pipe]; 1538 adev->firmware.fw_size += 1539 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), 1540 PAGE_SIZE); 1541 1542 info = &adev->firmware.ucode[ucode_data]; 1543 info->ucode_id = ucode_data; 1544 info->fw = adev->mes.fw[pipe]; 1545 adev->firmware.fw_size += 1546 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), 1547 PAGE_SIZE); 1548 } 1549 1550 return 0; 1551 out: 1552 amdgpu_ucode_release(&adev->mes.fw[pipe]); 1553 return r; 1554 } 1555 1556 #if defined(CONFIG_DEBUG_FS) 1557 1558 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) 1559 { 1560 struct amdgpu_device *adev = m->private; 1561 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); 1562 1563 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, 1564 mem, AMDGPU_MES_LOG_BUFFER_SIZE, false); 1565 1566 return 0; 1567 } 1568 1569 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); 1570 1571 #endif 1572 1573 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) 1574 { 1575 1576 #if defined(CONFIG_DEBUG_FS) 1577 struct drm_minor *minor = adev_to_drm(adev)->primary; 1578 struct dentry *root = minor->debugfs_root; 1579 if (adev->enable_mes && amdgpu_mes_log_enable) 1580 debugfs_create_file("amdgpu_mes_event_log", 0444, root, 1581 adev, &amdgpu_debugfs_mes_event_log_fops); 1582 1583 #endif 1584 } 1585