1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drm_exec.h> 26 27 #include "amdgpu_mes.h" 28 #include "amdgpu.h" 29 #include "soc15_common.h" 30 #include "amdgpu_mes_ctx.h" 31 32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 33 #define AMDGPU_ONE_DOORBELL_SIZE 8 34 35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) 36 { 37 return roundup(AMDGPU_ONE_DOORBELL_SIZE * 38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 39 PAGE_SIZE); 40 } 41 42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, 43 int ip_type, uint64_t *doorbell_index) 44 { 45 unsigned int offset, found; 46 struct amdgpu_mes *mes = &adev->mes; 47 48 if (ip_type == AMDGPU_RING_TYPE_SDMA) 49 offset = adev->doorbell_index.sdma_engine[0]; 50 else 51 offset = 0; 52 53 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset); 54 if (found >= mes->num_mes_dbs) { 55 DRM_WARN("No doorbell available\n"); 56 return -ENOSPC; 57 } 58 59 set_bit(found, mes->doorbell_bitmap); 60 61 /* Get the absolute doorbell index on BAR */ 62 *doorbell_index = mes->db_start_dw_offset + found * 2; 63 return 0; 64 } 65 66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, 67 uint32_t doorbell_index) 68 { 69 unsigned int old, rel_index; 70 struct amdgpu_mes *mes = &adev->mes; 71 72 /* Find the relative index of the doorbell in this object */ 73 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2; 74 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap); 75 WARN_ON(!old); 76 } 77 78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) 79 { 80 int i; 81 struct amdgpu_mes *mes = &adev->mes; 82 83 /* Bitmap for dynamic allocation of kernel doorbells */ 84 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); 85 if (!mes->doorbell_bitmap) { 86 DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); 87 return -ENOMEM; 88 } 89 90 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE; 91 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) { 92 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2; 93 set_bit(i, mes->doorbell_bitmap); 94 } 95 96 return 0; 97 } 98 99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) 100 { 101 int r; 102 103 if (!amdgpu_mes_log_enable) 104 return 0; 105 106 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE, 107 AMDGPU_GEM_DOMAIN_GTT, 108 &adev->mes.event_log_gpu_obj, 109 &adev->mes.event_log_gpu_addr, 110 &adev->mes.event_log_cpu_addr); 111 if (r) { 112 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); 113 return r; 114 } 115 116 memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE); 117 118 return 0; 119 120 } 121 122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) 123 { 124 bitmap_free(adev->mes.doorbell_bitmap); 125 } 126 127 int amdgpu_mes_init(struct amdgpu_device *adev) 128 { 129 int i, r; 130 131 adev->mes.adev = adev; 132 133 idr_init(&adev->mes.pasid_idr); 134 idr_init(&adev->mes.gang_id_idr); 135 idr_init(&adev->mes.queue_id_idr); 136 ida_init(&adev->mes.doorbell_ida); 137 spin_lock_init(&adev->mes.queue_id_lock); 138 spin_lock_init(&adev->mes.ring_lock); 139 mutex_init(&adev->mes.mutex_hidden); 140 141 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; 142 adev->mes.vmid_mask_mmhub = 0xffffff00; 143 adev->mes.vmid_mask_gfxhub = 0xffffff00; 144 145 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { 146 /* use only 1st MEC pipes */ 147 if (i >= adev->gfx.mec.num_pipe_per_mec) 148 continue; 149 adev->mes.compute_hqd_mask[i] = 0xc; 150 } 151 152 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) 153 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; 154 155 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { 156 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < 157 IP_VERSION(6, 0, 0)) 158 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; 159 /* zero sdma_hqd_mask for non-existent engine */ 160 else if (adev->sdma.num_instances == 1) 161 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc; 162 else 163 adev->mes.sdma_hqd_mask[i] = 0xfc; 164 } 165 166 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs); 167 if (r) { 168 dev_err(adev->dev, 169 "(%d) ring trail_fence_offs wb alloc failed\n", r); 170 goto error_ids; 171 } 172 adev->mes.sch_ctx_gpu_addr = 173 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4); 174 adev->mes.sch_ctx_ptr = 175 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs]; 176 177 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs); 178 if (r) { 179 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 180 dev_err(adev->dev, 181 "(%d) query_status_fence_offs wb alloc failed\n", r); 182 goto error_ids; 183 } 184 adev->mes.query_status_fence_gpu_addr = 185 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4); 186 adev->mes.query_status_fence_ptr = 187 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs]; 188 189 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); 190 if (r) { 191 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 192 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 193 dev_err(adev->dev, 194 "(%d) read_val_offs alloc failed\n", r); 195 goto error_ids; 196 } 197 adev->mes.read_val_gpu_addr = 198 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); 199 adev->mes.read_val_ptr = 200 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; 201 202 r = amdgpu_mes_doorbell_init(adev); 203 if (r) 204 goto error; 205 206 r = amdgpu_mes_event_log_init(adev); 207 if (r) 208 goto error_doorbell; 209 210 return 0; 211 212 error_doorbell: 213 amdgpu_mes_doorbell_free(adev); 214 error: 215 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 216 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 217 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 218 error_ids: 219 idr_destroy(&adev->mes.pasid_idr); 220 idr_destroy(&adev->mes.gang_id_idr); 221 idr_destroy(&adev->mes.queue_id_idr); 222 ida_destroy(&adev->mes.doorbell_ida); 223 mutex_destroy(&adev->mes.mutex_hidden); 224 return r; 225 } 226 227 void amdgpu_mes_fini(struct amdgpu_device *adev) 228 { 229 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, 230 &adev->mes.event_log_gpu_addr, 231 &adev->mes.event_log_cpu_addr); 232 233 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 234 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 235 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 236 amdgpu_mes_doorbell_free(adev); 237 238 idr_destroy(&adev->mes.pasid_idr); 239 idr_destroy(&adev->mes.gang_id_idr); 240 idr_destroy(&adev->mes.queue_id_idr); 241 ida_destroy(&adev->mes.doorbell_ida); 242 mutex_destroy(&adev->mes.mutex_hidden); 243 } 244 245 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q) 246 { 247 amdgpu_bo_free_kernel(&q->mqd_obj, 248 &q->mqd_gpu_addr, 249 &q->mqd_cpu_ptr); 250 } 251 252 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, 253 struct amdgpu_vm *vm) 254 { 255 struct amdgpu_mes_process *process; 256 int r; 257 258 /* allocate the mes process buffer */ 259 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); 260 if (!process) { 261 DRM_ERROR("no more memory to create mes process\n"); 262 return -ENOMEM; 263 } 264 265 /* allocate the process context bo and map it */ 266 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, 267 AMDGPU_GEM_DOMAIN_GTT, 268 &process->proc_ctx_bo, 269 &process->proc_ctx_gpu_addr, 270 &process->proc_ctx_cpu_ptr); 271 if (r) { 272 DRM_ERROR("failed to allocate process context bo\n"); 273 goto clean_up_memory; 274 } 275 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 276 277 /* 278 * Avoid taking any other locks under MES lock to avoid circular 279 * lock dependencies. 280 */ 281 amdgpu_mes_lock(&adev->mes); 282 283 /* add the mes process to idr list */ 284 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, 285 GFP_KERNEL); 286 if (r < 0) { 287 DRM_ERROR("failed to lock pasid=%d\n", pasid); 288 goto clean_up_ctx; 289 } 290 291 INIT_LIST_HEAD(&process->gang_list); 292 process->vm = vm; 293 process->pasid = pasid; 294 process->process_quantum = adev->mes.default_process_quantum; 295 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); 296 297 amdgpu_mes_unlock(&adev->mes); 298 return 0; 299 300 clean_up_ctx: 301 amdgpu_mes_unlock(&adev->mes); 302 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 303 &process->proc_ctx_gpu_addr, 304 &process->proc_ctx_cpu_ptr); 305 clean_up_memory: 306 kfree(process); 307 return r; 308 } 309 310 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) 311 { 312 struct amdgpu_mes_process *process; 313 struct amdgpu_mes_gang *gang, *tmp1; 314 struct amdgpu_mes_queue *queue, *tmp2; 315 struct mes_remove_queue_input queue_input; 316 unsigned long flags; 317 int r; 318 319 /* 320 * Avoid taking any other locks under MES lock to avoid circular 321 * lock dependencies. 322 */ 323 amdgpu_mes_lock(&adev->mes); 324 325 process = idr_find(&adev->mes.pasid_idr, pasid); 326 if (!process) { 327 DRM_WARN("pasid %d doesn't exist\n", pasid); 328 amdgpu_mes_unlock(&adev->mes); 329 return; 330 } 331 332 /* Remove all queues from hardware */ 333 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 334 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 335 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 336 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 337 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 338 339 queue_input.doorbell_offset = queue->doorbell_off; 340 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 341 342 r = adev->mes.funcs->remove_hw_queue(&adev->mes, 343 &queue_input); 344 if (r) 345 DRM_WARN("failed to remove hardware queue\n"); 346 } 347 348 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 349 } 350 351 idr_remove(&adev->mes.pasid_idr, pasid); 352 amdgpu_mes_unlock(&adev->mes); 353 354 /* free all memory allocated by the process */ 355 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 356 /* free all queues in the gang */ 357 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 358 amdgpu_mes_queue_free_mqd(queue); 359 list_del(&queue->list); 360 kfree(queue); 361 } 362 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 363 &gang->gang_ctx_gpu_addr, 364 &gang->gang_ctx_cpu_ptr); 365 list_del(&gang->list); 366 kfree(gang); 367 368 } 369 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 370 &process->proc_ctx_gpu_addr, 371 &process->proc_ctx_cpu_ptr); 372 kfree(process); 373 } 374 375 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, 376 struct amdgpu_mes_gang_properties *gprops, 377 int *gang_id) 378 { 379 struct amdgpu_mes_process *process; 380 struct amdgpu_mes_gang *gang; 381 int r; 382 383 /* allocate the mes gang buffer */ 384 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL); 385 if (!gang) { 386 return -ENOMEM; 387 } 388 389 /* allocate the gang context bo and map it to cpu space */ 390 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE, 391 AMDGPU_GEM_DOMAIN_GTT, 392 &gang->gang_ctx_bo, 393 &gang->gang_ctx_gpu_addr, 394 &gang->gang_ctx_cpu_ptr); 395 if (r) { 396 DRM_ERROR("failed to allocate process context bo\n"); 397 goto clean_up_mem; 398 } 399 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); 400 401 /* 402 * Avoid taking any other locks under MES lock to avoid circular 403 * lock dependencies. 404 */ 405 amdgpu_mes_lock(&adev->mes); 406 407 process = idr_find(&adev->mes.pasid_idr, pasid); 408 if (!process) { 409 DRM_ERROR("pasid %d doesn't exist\n", pasid); 410 r = -EINVAL; 411 goto clean_up_ctx; 412 } 413 414 /* add the mes gang to idr list */ 415 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0, 416 GFP_KERNEL); 417 if (r < 0) { 418 DRM_ERROR("failed to allocate idr for gang\n"); 419 goto clean_up_ctx; 420 } 421 422 gang->gang_id = r; 423 *gang_id = r; 424 425 INIT_LIST_HEAD(&gang->queue_list); 426 gang->process = process; 427 gang->priority = gprops->priority; 428 gang->gang_quantum = gprops->gang_quantum ? 429 gprops->gang_quantum : adev->mes.default_gang_quantum; 430 gang->global_priority_level = gprops->global_priority_level; 431 gang->inprocess_gang_priority = gprops->inprocess_gang_priority; 432 list_add_tail(&gang->list, &process->gang_list); 433 434 amdgpu_mes_unlock(&adev->mes); 435 return 0; 436 437 clean_up_ctx: 438 amdgpu_mes_unlock(&adev->mes); 439 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 440 &gang->gang_ctx_gpu_addr, 441 &gang->gang_ctx_cpu_ptr); 442 clean_up_mem: 443 kfree(gang); 444 return r; 445 } 446 447 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) 448 { 449 struct amdgpu_mes_gang *gang; 450 451 /* 452 * Avoid taking any other locks under MES lock to avoid circular 453 * lock dependencies. 454 */ 455 amdgpu_mes_lock(&adev->mes); 456 457 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 458 if (!gang) { 459 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 460 amdgpu_mes_unlock(&adev->mes); 461 return -EINVAL; 462 } 463 464 if (!list_empty(&gang->queue_list)) { 465 DRM_ERROR("queue list is not empty\n"); 466 amdgpu_mes_unlock(&adev->mes); 467 return -EBUSY; 468 } 469 470 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 471 list_del(&gang->list); 472 amdgpu_mes_unlock(&adev->mes); 473 474 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 475 &gang->gang_ctx_gpu_addr, 476 &gang->gang_ctx_cpu_ptr); 477 478 kfree(gang); 479 480 return 0; 481 } 482 483 int amdgpu_mes_suspend(struct amdgpu_device *adev) 484 { 485 struct idr *idp; 486 struct amdgpu_mes_process *process; 487 struct amdgpu_mes_gang *gang; 488 struct mes_suspend_gang_input input; 489 int r, pasid; 490 491 /* 492 * Avoid taking any other locks under MES lock to avoid circular 493 * lock dependencies. 494 */ 495 amdgpu_mes_lock(&adev->mes); 496 497 idp = &adev->mes.pasid_idr; 498 499 idr_for_each_entry(idp, process, pasid) { 500 list_for_each_entry(gang, &process->gang_list, list) { 501 r = adev->mes.funcs->suspend_gang(&adev->mes, &input); 502 if (r) 503 DRM_ERROR("failed to suspend pasid %d gangid %d", 504 pasid, gang->gang_id); 505 } 506 } 507 508 amdgpu_mes_unlock(&adev->mes); 509 return 0; 510 } 511 512 int amdgpu_mes_resume(struct amdgpu_device *adev) 513 { 514 struct idr *idp; 515 struct amdgpu_mes_process *process; 516 struct amdgpu_mes_gang *gang; 517 struct mes_resume_gang_input input; 518 int r, pasid; 519 520 /* 521 * Avoid taking any other locks under MES lock to avoid circular 522 * lock dependencies. 523 */ 524 amdgpu_mes_lock(&adev->mes); 525 526 idp = &adev->mes.pasid_idr; 527 528 idr_for_each_entry(idp, process, pasid) { 529 list_for_each_entry(gang, &process->gang_list, list) { 530 r = adev->mes.funcs->resume_gang(&adev->mes, &input); 531 if (r) 532 DRM_ERROR("failed to resume pasid %d gangid %d", 533 pasid, gang->gang_id); 534 } 535 } 536 537 amdgpu_mes_unlock(&adev->mes); 538 return 0; 539 } 540 541 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, 542 struct amdgpu_mes_queue *q, 543 struct amdgpu_mes_queue_properties *p) 544 { 545 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 546 u32 mqd_size = mqd_mgr->mqd_size; 547 int r; 548 549 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 550 AMDGPU_GEM_DOMAIN_GTT, 551 &q->mqd_obj, 552 &q->mqd_gpu_addr, &q->mqd_cpu_ptr); 553 if (r) { 554 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r); 555 return r; 556 } 557 memset(q->mqd_cpu_ptr, 0, mqd_size); 558 559 r = amdgpu_bo_reserve(q->mqd_obj, false); 560 if (unlikely(r != 0)) 561 goto clean_up; 562 563 return 0; 564 565 clean_up: 566 amdgpu_bo_free_kernel(&q->mqd_obj, 567 &q->mqd_gpu_addr, 568 &q->mqd_cpu_ptr); 569 return r; 570 } 571 572 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, 573 struct amdgpu_mes_queue *q, 574 struct amdgpu_mes_queue_properties *p) 575 { 576 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 577 struct amdgpu_mqd_prop mqd_prop = {0}; 578 579 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr; 580 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr; 581 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr; 582 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr; 583 mqd_prop.queue_size = p->queue_size; 584 mqd_prop.use_doorbell = true; 585 mqd_prop.doorbell_index = p->doorbell_off; 586 mqd_prop.eop_gpu_addr = p->eop_gpu_addr; 587 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority; 588 mqd_prop.hqd_queue_priority = p->hqd_queue_priority; 589 mqd_prop.hqd_active = false; 590 591 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 592 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 593 mutex_lock(&adev->srbm_mutex); 594 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0); 595 } 596 597 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); 598 599 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 600 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 601 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); 602 mutex_unlock(&adev->srbm_mutex); 603 } 604 605 amdgpu_bo_unreserve(q->mqd_obj); 606 } 607 608 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, 609 struct amdgpu_mes_queue_properties *qprops, 610 int *queue_id) 611 { 612 struct amdgpu_mes_queue *queue; 613 struct amdgpu_mes_gang *gang; 614 struct mes_add_queue_input queue_input; 615 unsigned long flags; 616 int r; 617 618 memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); 619 620 /* allocate the mes queue buffer */ 621 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); 622 if (!queue) { 623 DRM_ERROR("Failed to allocate memory for queue\n"); 624 return -ENOMEM; 625 } 626 627 /* Allocate the queue mqd */ 628 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops); 629 if (r) 630 goto clean_up_memory; 631 632 /* 633 * Avoid taking any other locks under MES lock to avoid circular 634 * lock dependencies. 635 */ 636 amdgpu_mes_lock(&adev->mes); 637 638 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 639 if (!gang) { 640 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 641 r = -EINVAL; 642 goto clean_up_mqd; 643 } 644 645 /* add the mes gang to idr list */ 646 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 647 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0, 648 GFP_ATOMIC); 649 if (r < 0) { 650 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 651 goto clean_up_mqd; 652 } 653 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 654 *queue_id = queue->queue_id = r; 655 656 /* allocate a doorbell index for the queue */ 657 r = amdgpu_mes_kernel_doorbell_get(adev, 658 qprops->queue_type, 659 &qprops->doorbell_off); 660 if (r) 661 goto clean_up_queue_id; 662 663 /* initialize the queue mqd */ 664 amdgpu_mes_queue_init_mqd(adev, queue, qprops); 665 666 /* add hw queue to mes */ 667 queue_input.process_id = gang->process->pasid; 668 669 queue_input.page_table_base_addr = 670 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr - 671 adev->gmc.vram_start; 672 673 queue_input.process_va_start = 0; 674 queue_input.process_va_end = 675 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; 676 queue_input.process_quantum = gang->process->process_quantum; 677 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr; 678 queue_input.gang_quantum = gang->gang_quantum; 679 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 680 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority; 681 queue_input.gang_global_priority_level = gang->global_priority_level; 682 queue_input.doorbell_offset = qprops->doorbell_off; 683 queue_input.mqd_addr = queue->mqd_gpu_addr; 684 queue_input.wptr_addr = qprops->wptr_gpu_addr; 685 queue_input.wptr_mc_addr = qprops->wptr_mc_addr; 686 queue_input.queue_type = qprops->queue_type; 687 queue_input.paging = qprops->paging; 688 queue_input.is_kfd_process = 0; 689 690 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 691 if (r) { 692 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n", 693 qprops->doorbell_off); 694 goto clean_up_doorbell; 695 } 696 697 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, " 698 "queue type=%d, doorbell=0x%llx\n", 699 gang->process->pasid, gang_id, qprops->queue_type, 700 qprops->doorbell_off); 701 702 queue->ring = qprops->ring; 703 queue->doorbell_off = qprops->doorbell_off; 704 queue->wptr_gpu_addr = qprops->wptr_gpu_addr; 705 queue->queue_type = qprops->queue_type; 706 queue->paging = qprops->paging; 707 queue->gang = gang; 708 queue->ring->mqd_ptr = queue->mqd_cpu_ptr; 709 list_add_tail(&queue->list, &gang->queue_list); 710 711 amdgpu_mes_unlock(&adev->mes); 712 return 0; 713 714 clean_up_doorbell: 715 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off); 716 clean_up_queue_id: 717 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 718 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 719 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 720 clean_up_mqd: 721 amdgpu_mes_unlock(&adev->mes); 722 amdgpu_mes_queue_free_mqd(queue); 723 clean_up_memory: 724 kfree(queue); 725 return r; 726 } 727 728 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) 729 { 730 unsigned long flags; 731 struct amdgpu_mes_queue *queue; 732 struct amdgpu_mes_gang *gang; 733 struct mes_remove_queue_input queue_input; 734 int r; 735 736 /* 737 * Avoid taking any other locks under MES lock to avoid circular 738 * lock dependencies. 739 */ 740 amdgpu_mes_lock(&adev->mes); 741 742 /* remove the mes gang from idr list */ 743 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 744 745 queue = idr_find(&adev->mes.queue_id_idr, queue_id); 746 if (!queue) { 747 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 748 amdgpu_mes_unlock(&adev->mes); 749 DRM_ERROR("queue id %d doesn't exist\n", queue_id); 750 return -EINVAL; 751 } 752 753 idr_remove(&adev->mes.queue_id_idr, queue_id); 754 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 755 756 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n", 757 queue->doorbell_off); 758 759 gang = queue->gang; 760 queue_input.doorbell_offset = queue->doorbell_off; 761 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 762 763 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 764 if (r) 765 DRM_ERROR("failed to remove hardware queue, queue id = %d\n", 766 queue_id); 767 768 list_del(&queue->list); 769 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off); 770 amdgpu_mes_unlock(&adev->mes); 771 772 amdgpu_mes_queue_free_mqd(queue); 773 kfree(queue); 774 return 0; 775 } 776 777 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, 778 struct amdgpu_ring *ring) 779 { 780 struct mes_map_legacy_queue_input queue_input; 781 int r; 782 783 memset(&queue_input, 0, sizeof(queue_input)); 784 785 queue_input.queue_type = ring->funcs->type; 786 queue_input.doorbell_offset = ring->doorbell_index; 787 queue_input.pipe_id = ring->pipe; 788 queue_input.queue_id = ring->queue; 789 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 790 queue_input.wptr_addr = ring->wptr_gpu_addr; 791 792 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); 793 if (r) 794 DRM_ERROR("failed to map legacy queue\n"); 795 796 return r; 797 } 798 799 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, 800 struct amdgpu_ring *ring, 801 enum amdgpu_unmap_queues_action action, 802 u64 gpu_addr, u64 seq) 803 { 804 struct mes_unmap_legacy_queue_input queue_input; 805 int r; 806 807 queue_input.action = action; 808 queue_input.queue_type = ring->funcs->type; 809 queue_input.doorbell_offset = ring->doorbell_index; 810 queue_input.pipe_id = ring->pipe; 811 queue_input.queue_id = ring->queue; 812 queue_input.trail_fence_addr = gpu_addr; 813 queue_input.trail_fence_data = seq; 814 815 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 816 if (r) 817 DRM_ERROR("failed to unmap legacy queue\n"); 818 819 return r; 820 } 821 822 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) 823 { 824 struct mes_misc_op_input op_input; 825 int r, val = 0; 826 827 op_input.op = MES_MISC_OP_READ_REG; 828 op_input.read_reg.reg_offset = reg; 829 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; 830 831 if (!adev->mes.funcs->misc_op) { 832 DRM_ERROR("mes rreg is not supported!\n"); 833 goto error; 834 } 835 836 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 837 if (r) 838 DRM_ERROR("failed to read reg (0x%x)\n", reg); 839 else 840 val = *(adev->mes.read_val_ptr); 841 842 error: 843 return val; 844 } 845 846 int amdgpu_mes_wreg(struct amdgpu_device *adev, 847 uint32_t reg, uint32_t val) 848 { 849 struct mes_misc_op_input op_input; 850 int r; 851 852 op_input.op = MES_MISC_OP_WRITE_REG; 853 op_input.write_reg.reg_offset = reg; 854 op_input.write_reg.reg_value = val; 855 856 if (!adev->mes.funcs->misc_op) { 857 DRM_ERROR("mes wreg is not supported!\n"); 858 r = -EINVAL; 859 goto error; 860 } 861 862 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 863 if (r) 864 DRM_ERROR("failed to write reg (0x%x)\n", reg); 865 866 error: 867 return r; 868 } 869 870 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, 871 uint32_t reg0, uint32_t reg1, 872 uint32_t ref, uint32_t mask) 873 { 874 struct mes_misc_op_input op_input; 875 int r; 876 877 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT; 878 op_input.wrm_reg.reg0 = reg0; 879 op_input.wrm_reg.reg1 = reg1; 880 op_input.wrm_reg.ref = ref; 881 op_input.wrm_reg.mask = mask; 882 883 if (!adev->mes.funcs->misc_op) { 884 DRM_ERROR("mes reg_write_reg_wait is not supported!\n"); 885 r = -EINVAL; 886 goto error; 887 } 888 889 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 890 if (r) 891 DRM_ERROR("failed to reg_write_reg_wait\n"); 892 893 error: 894 return r; 895 } 896 897 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, 898 uint32_t val, uint32_t mask) 899 { 900 struct mes_misc_op_input op_input; 901 int r; 902 903 op_input.op = MES_MISC_OP_WRM_REG_WAIT; 904 op_input.wrm_reg.reg0 = reg; 905 op_input.wrm_reg.ref = val; 906 op_input.wrm_reg.mask = mask; 907 908 if (!adev->mes.funcs->misc_op) { 909 DRM_ERROR("mes reg wait is not supported!\n"); 910 r = -EINVAL; 911 goto error; 912 } 913 914 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 915 if (r) 916 DRM_ERROR("failed to reg_write_reg_wait\n"); 917 918 error: 919 return r; 920 } 921 922 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, 923 uint64_t process_context_addr, 924 uint32_t spi_gdbg_per_vmid_cntl, 925 const uint32_t *tcp_watch_cntl, 926 uint32_t flags, 927 bool trap_en) 928 { 929 struct mes_misc_op_input op_input = {0}; 930 int r; 931 932 if (!adev->mes.funcs->misc_op) { 933 DRM_ERROR("mes set shader debugger is not supported!\n"); 934 return -EINVAL; 935 } 936 937 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 938 op_input.set_shader_debugger.process_context_addr = process_context_addr; 939 op_input.set_shader_debugger.flags.u32all = flags; 940 941 /* use amdgpu mes_flush_shader_debugger instead */ 942 if (op_input.set_shader_debugger.flags.process_ctx_flush) 943 return -EINVAL; 944 945 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; 946 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, 947 sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); 948 949 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> 950 AMDGPU_MES_API_VERSION_SHIFT) >= 14) 951 op_input.set_shader_debugger.trap_en = trap_en; 952 953 amdgpu_mes_lock(&adev->mes); 954 955 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 956 if (r) 957 DRM_ERROR("failed to set_shader_debugger\n"); 958 959 amdgpu_mes_unlock(&adev->mes); 960 961 return r; 962 } 963 964 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, 965 uint64_t process_context_addr) 966 { 967 struct mes_misc_op_input op_input = {0}; 968 int r; 969 970 if (!adev->mes.funcs->misc_op) { 971 DRM_ERROR("mes flush shader debugger is not supported!\n"); 972 return -EINVAL; 973 } 974 975 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 976 op_input.set_shader_debugger.process_context_addr = process_context_addr; 977 op_input.set_shader_debugger.flags.process_ctx_flush = true; 978 979 amdgpu_mes_lock(&adev->mes); 980 981 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 982 if (r) 983 DRM_ERROR("failed to set_shader_debugger\n"); 984 985 amdgpu_mes_unlock(&adev->mes); 986 987 return r; 988 } 989 990 static void 991 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, 992 struct amdgpu_ring *ring, 993 struct amdgpu_mes_queue_properties *props) 994 { 995 props->queue_type = ring->funcs->type; 996 props->hqd_base_gpu_addr = ring->gpu_addr; 997 props->rptr_gpu_addr = ring->rptr_gpu_addr; 998 props->wptr_gpu_addr = ring->wptr_gpu_addr; 999 props->wptr_mc_addr = 1000 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs; 1001 props->queue_size = ring->ring_size; 1002 props->eop_gpu_addr = ring->eop_gpu_addr; 1003 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 1004 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 1005 props->paging = false; 1006 props->ring = ring; 1007 } 1008 1009 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ 1010 do { \ 1011 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ 1012 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1013 _eng[ring->idx].slots[id_offs]); \ 1014 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ 1015 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1016 _eng[ring->idx].ring); \ 1017 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ 1018 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1019 _eng[ring->idx].ib); \ 1020 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ 1021 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1022 _eng[ring->idx].padding); \ 1023 } while(0) 1024 1025 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) 1026 { 1027 switch (ring->funcs->type) { 1028 case AMDGPU_RING_TYPE_GFX: 1029 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); 1030 break; 1031 case AMDGPU_RING_TYPE_COMPUTE: 1032 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); 1033 break; 1034 case AMDGPU_RING_TYPE_SDMA: 1035 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); 1036 break; 1037 default: 1038 break; 1039 } 1040 1041 WARN_ON(1); 1042 return -EINVAL; 1043 } 1044 1045 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, 1046 int queue_type, int idx, 1047 struct amdgpu_mes_ctx_data *ctx_data, 1048 struct amdgpu_ring **out) 1049 { 1050 struct amdgpu_ring *ring; 1051 struct amdgpu_mes_gang *gang; 1052 struct amdgpu_mes_queue_properties qprops = {0}; 1053 int r, queue_id, pasid; 1054 1055 /* 1056 * Avoid taking any other locks under MES lock to avoid circular 1057 * lock dependencies. 1058 */ 1059 amdgpu_mes_lock(&adev->mes); 1060 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 1061 if (!gang) { 1062 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 1063 amdgpu_mes_unlock(&adev->mes); 1064 return -EINVAL; 1065 } 1066 pasid = gang->process->pasid; 1067 1068 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL); 1069 if (!ring) { 1070 amdgpu_mes_unlock(&adev->mes); 1071 return -ENOMEM; 1072 } 1073 1074 ring->ring_obj = NULL; 1075 ring->use_doorbell = true; 1076 ring->is_mes_queue = true; 1077 ring->mes_ctx = ctx_data; 1078 ring->idx = idx; 1079 ring->no_scheduler = true; 1080 1081 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 1082 int offset = offsetof(struct amdgpu_mes_ctx_meta_data, 1083 compute[ring->idx].mec_hpd); 1084 ring->eop_gpu_addr = 1085 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 1086 } 1087 1088 switch (queue_type) { 1089 case AMDGPU_RING_TYPE_GFX: 1090 ring->funcs = adev->gfx.gfx_ring[0].funcs; 1091 ring->me = adev->gfx.gfx_ring[0].me; 1092 ring->pipe = adev->gfx.gfx_ring[0].pipe; 1093 break; 1094 case AMDGPU_RING_TYPE_COMPUTE: 1095 ring->funcs = adev->gfx.compute_ring[0].funcs; 1096 ring->me = adev->gfx.compute_ring[0].me; 1097 ring->pipe = adev->gfx.compute_ring[0].pipe; 1098 break; 1099 case AMDGPU_RING_TYPE_SDMA: 1100 ring->funcs = adev->sdma.instance[0].ring.funcs; 1101 break; 1102 default: 1103 BUG(); 1104 } 1105 1106 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, 1107 AMDGPU_RING_PRIO_DEFAULT, NULL); 1108 if (r) 1109 goto clean_up_memory; 1110 1111 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); 1112 1113 dma_fence_wait(gang->process->vm->last_update, false); 1114 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false); 1115 amdgpu_mes_unlock(&adev->mes); 1116 1117 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id); 1118 if (r) 1119 goto clean_up_ring; 1120 1121 ring->hw_queue_id = queue_id; 1122 ring->doorbell_index = qprops.doorbell_off; 1123 1124 if (queue_type == AMDGPU_RING_TYPE_GFX) 1125 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id); 1126 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) 1127 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id, 1128 queue_id); 1129 else if (queue_type == AMDGPU_RING_TYPE_SDMA) 1130 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id, 1131 queue_id); 1132 else 1133 BUG(); 1134 1135 *out = ring; 1136 return 0; 1137 1138 clean_up_ring: 1139 amdgpu_ring_fini(ring); 1140 clean_up_memory: 1141 kfree(ring); 1142 amdgpu_mes_unlock(&adev->mes); 1143 return r; 1144 } 1145 1146 void amdgpu_mes_remove_ring(struct amdgpu_device *adev, 1147 struct amdgpu_ring *ring) 1148 { 1149 if (!ring) 1150 return; 1151 1152 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 1153 del_timer_sync(&ring->fence_drv.fallback_timer); 1154 amdgpu_ring_fini(ring); 1155 kfree(ring); 1156 } 1157 1158 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, 1159 enum amdgpu_mes_priority_level prio) 1160 { 1161 return adev->mes.aggregated_doorbells[prio]; 1162 } 1163 1164 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, 1165 struct amdgpu_mes_ctx_data *ctx_data) 1166 { 1167 int r; 1168 1169 r = amdgpu_bo_create_kernel(adev, 1170 sizeof(struct amdgpu_mes_ctx_meta_data), 1171 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1172 &ctx_data->meta_data_obj, 1173 &ctx_data->meta_data_mc_addr, 1174 &ctx_data->meta_data_ptr); 1175 if (r) { 1176 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r); 1177 return r; 1178 } 1179 1180 if (!ctx_data->meta_data_obj) 1181 return -ENOMEM; 1182 1183 memset(ctx_data->meta_data_ptr, 0, 1184 sizeof(struct amdgpu_mes_ctx_meta_data)); 1185 1186 return 0; 1187 } 1188 1189 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data) 1190 { 1191 if (ctx_data->meta_data_obj) 1192 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, 1193 &ctx_data->meta_data_mc_addr, 1194 &ctx_data->meta_data_ptr); 1195 } 1196 1197 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, 1198 struct amdgpu_vm *vm, 1199 struct amdgpu_mes_ctx_data *ctx_data) 1200 { 1201 struct amdgpu_bo_va *bo_va; 1202 struct amdgpu_sync sync; 1203 struct drm_exec exec; 1204 int r; 1205 1206 amdgpu_sync_create(&sync); 1207 1208 drm_exec_init(&exec, 0, 0); 1209 drm_exec_until_all_locked(&exec) { 1210 r = drm_exec_lock_obj(&exec, 1211 &ctx_data->meta_data_obj->tbo.base); 1212 drm_exec_retry_on_contention(&exec); 1213 if (unlikely(r)) 1214 goto error_fini_exec; 1215 1216 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1217 drm_exec_retry_on_contention(&exec); 1218 if (unlikely(r)) 1219 goto error_fini_exec; 1220 } 1221 1222 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); 1223 if (!bo_va) { 1224 DRM_ERROR("failed to create bo_va for meta data BO\n"); 1225 r = -ENOMEM; 1226 goto error_fini_exec; 1227 } 1228 1229 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, 1230 sizeof(struct amdgpu_mes_ctx_meta_data), 1231 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 1232 AMDGPU_PTE_EXECUTABLE); 1233 1234 if (r) { 1235 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); 1236 goto error_del_bo_va; 1237 } 1238 1239 r = amdgpu_vm_bo_update(adev, bo_va, false); 1240 if (r) { 1241 DRM_ERROR("failed to do vm_bo_update on meta data\n"); 1242 goto error_del_bo_va; 1243 } 1244 amdgpu_sync_fence(&sync, bo_va->last_pt_update); 1245 1246 r = amdgpu_vm_update_pdes(adev, vm, false); 1247 if (r) { 1248 DRM_ERROR("failed to update pdes on meta data\n"); 1249 goto error_del_bo_va; 1250 } 1251 amdgpu_sync_fence(&sync, vm->last_update); 1252 1253 amdgpu_sync_wait(&sync, false); 1254 drm_exec_fini(&exec); 1255 1256 amdgpu_sync_free(&sync); 1257 ctx_data->meta_data_va = bo_va; 1258 return 0; 1259 1260 error_del_bo_va: 1261 amdgpu_vm_bo_del(adev, bo_va); 1262 1263 error_fini_exec: 1264 drm_exec_fini(&exec); 1265 amdgpu_sync_free(&sync); 1266 return r; 1267 } 1268 1269 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, 1270 struct amdgpu_mes_ctx_data *ctx_data) 1271 { 1272 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va; 1273 struct amdgpu_bo *bo = ctx_data->meta_data_obj; 1274 struct amdgpu_vm *vm = bo_va->base.vm; 1275 struct dma_fence *fence; 1276 struct drm_exec exec; 1277 long r; 1278 1279 drm_exec_init(&exec, 0, 0); 1280 drm_exec_until_all_locked(&exec) { 1281 r = drm_exec_lock_obj(&exec, 1282 &ctx_data->meta_data_obj->tbo.base); 1283 drm_exec_retry_on_contention(&exec); 1284 if (unlikely(r)) 1285 goto out_unlock; 1286 1287 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1288 drm_exec_retry_on_contention(&exec); 1289 if (unlikely(r)) 1290 goto out_unlock; 1291 } 1292 1293 amdgpu_vm_bo_del(adev, bo_va); 1294 if (!amdgpu_vm_ready(vm)) 1295 goto out_unlock; 1296 1297 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, 1298 &fence); 1299 if (r) 1300 goto out_unlock; 1301 if (fence) { 1302 amdgpu_bo_fence(bo, fence, true); 1303 fence = NULL; 1304 } 1305 1306 r = amdgpu_vm_clear_freed(adev, vm, &fence); 1307 if (r || !fence) 1308 goto out_unlock; 1309 1310 dma_fence_wait(fence, false); 1311 amdgpu_bo_fence(bo, fence, true); 1312 dma_fence_put(fence); 1313 1314 out_unlock: 1315 if (unlikely(r < 0)) 1316 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r); 1317 drm_exec_fini(&exec); 1318 1319 return r; 1320 } 1321 1322 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev, 1323 int pasid, int *gang_id, 1324 int queue_type, int num_queue, 1325 struct amdgpu_ring **added_rings, 1326 struct amdgpu_mes_ctx_data *ctx_data) 1327 { 1328 struct amdgpu_ring *ring; 1329 struct amdgpu_mes_gang_properties gprops = {0}; 1330 int r, j; 1331 1332 /* create a gang for the process */ 1333 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1334 gprops.gang_quantum = adev->mes.default_gang_quantum; 1335 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1336 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1337 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1338 1339 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id); 1340 if (r) { 1341 DRM_ERROR("failed to add gang\n"); 1342 return r; 1343 } 1344 1345 /* create queues for the gang */ 1346 for (j = 0; j < num_queue; j++) { 1347 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j, 1348 ctx_data, &ring); 1349 if (r) { 1350 DRM_ERROR("failed to add ring\n"); 1351 break; 1352 } 1353 1354 DRM_INFO("ring %s was added\n", ring->name); 1355 added_rings[j] = ring; 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) 1362 { 1363 struct amdgpu_ring *ring; 1364 int i, r; 1365 1366 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) { 1367 ring = added_rings[i]; 1368 if (!ring) 1369 continue; 1370 1371 r = amdgpu_ring_test_helper(ring); 1372 if (r) 1373 return r; 1374 1375 r = amdgpu_ring_test_ib(ring, 1000 * 10); 1376 if (r) { 1377 DRM_DEV_ERROR(ring->adev->dev, 1378 "ring %s ib test failed (%d)\n", 1379 ring->name, r); 1380 return r; 1381 } else 1382 DRM_INFO("ring %s ib test pass\n", ring->name); 1383 } 1384 1385 return 0; 1386 } 1387 1388 int amdgpu_mes_self_test(struct amdgpu_device *adev) 1389 { 1390 struct amdgpu_vm *vm = NULL; 1391 struct amdgpu_mes_ctx_data ctx_data = {0}; 1392 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; 1393 int gang_ids[3] = {0}; 1394 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 }, 1395 { AMDGPU_RING_TYPE_COMPUTE, 1 }, 1396 { AMDGPU_RING_TYPE_SDMA, 1} }; 1397 int i, r, pasid, k = 0; 1398 1399 pasid = amdgpu_pasid_alloc(16); 1400 if (pasid < 0) { 1401 dev_warn(adev->dev, "No more PASIDs available!"); 1402 pasid = 0; 1403 } 1404 1405 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 1406 if (!vm) { 1407 r = -ENOMEM; 1408 goto error_pasid; 1409 } 1410 1411 r = amdgpu_vm_init(adev, vm, -1); 1412 if (r) { 1413 DRM_ERROR("failed to initialize vm\n"); 1414 goto error_pasid; 1415 } 1416 1417 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data); 1418 if (r) { 1419 DRM_ERROR("failed to alloc ctx meta data\n"); 1420 goto error_fini; 1421 } 1422 1423 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; 1424 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); 1425 if (r) { 1426 DRM_ERROR("failed to map ctx meta data\n"); 1427 goto error_vm; 1428 } 1429 1430 r = amdgpu_mes_create_process(adev, pasid, vm); 1431 if (r) { 1432 DRM_ERROR("failed to create MES process\n"); 1433 goto error_vm; 1434 } 1435 1436 for (i = 0; i < ARRAY_SIZE(queue_types); i++) { 1437 /* On GFX v10.3, fw hasn't supported to map sdma queue. */ 1438 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= 1439 IP_VERSION(10, 3, 0) && 1440 amdgpu_ip_version(adev, GC_HWIP, 0) < 1441 IP_VERSION(11, 0, 0) && 1442 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) 1443 continue; 1444 1445 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid, 1446 &gang_ids[i], 1447 queue_types[i][0], 1448 queue_types[i][1], 1449 &added_rings[k], 1450 &ctx_data); 1451 if (r) 1452 goto error_queues; 1453 1454 k += queue_types[i][1]; 1455 } 1456 1457 /* start ring test and ib test for MES queues */ 1458 amdgpu_mes_test_queues(added_rings); 1459 1460 error_queues: 1461 /* remove all queues */ 1462 for (i = 0; i < ARRAY_SIZE(added_rings); i++) { 1463 if (!added_rings[i]) 1464 continue; 1465 amdgpu_mes_remove_ring(adev, added_rings[i]); 1466 } 1467 1468 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) { 1469 if (!gang_ids[i]) 1470 continue; 1471 amdgpu_mes_remove_gang(adev, gang_ids[i]); 1472 } 1473 1474 amdgpu_mes_destroy_process(adev, pasid); 1475 1476 error_vm: 1477 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data); 1478 1479 error_fini: 1480 amdgpu_vm_fini(adev, vm); 1481 1482 error_pasid: 1483 if (pasid) 1484 amdgpu_pasid_free(pasid); 1485 1486 amdgpu_mes_ctx_free_meta_data(&ctx_data); 1487 kfree(vm); 1488 return 0; 1489 } 1490 1491 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) 1492 { 1493 const struct mes_firmware_header_v1_0 *mes_hdr; 1494 struct amdgpu_firmware_info *info; 1495 char ucode_prefix[30]; 1496 char fw_name[50]; 1497 bool need_retry = false; 1498 int r; 1499 1500 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, 1501 sizeof(ucode_prefix)); 1502 if (adev->enable_uni_mes && pipe == AMDGPU_MES_SCHED_PIPE) { 1503 snprintf(fw_name, sizeof(fw_name), 1504 "amdgpu/%s_uni_mes.bin", ucode_prefix); 1505 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && 1506 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) { 1507 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1508 ucode_prefix, 1509 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); 1510 need_retry = true; 1511 } else { 1512 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1513 ucode_prefix, 1514 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); 1515 } 1516 1517 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name); 1518 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { 1519 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix); 1520 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], 1521 "amdgpu/%s_mes.bin", ucode_prefix); 1522 } 1523 1524 if (r) 1525 goto out; 1526 1527 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1528 adev->mes.fw[pipe]->data; 1529 adev->mes.uc_start_addr[pipe] = 1530 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | 1531 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); 1532 adev->mes.data_start_addr[pipe] = 1533 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | 1534 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); 1535 1536 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1537 int ucode, ucode_data; 1538 1539 if (pipe == AMDGPU_MES_SCHED_PIPE) { 1540 ucode = AMDGPU_UCODE_ID_CP_MES; 1541 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; 1542 } else { 1543 ucode = AMDGPU_UCODE_ID_CP_MES1; 1544 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; 1545 } 1546 1547 info = &adev->firmware.ucode[ucode]; 1548 info->ucode_id = ucode; 1549 info->fw = adev->mes.fw[pipe]; 1550 adev->firmware.fw_size += 1551 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), 1552 PAGE_SIZE); 1553 1554 info = &adev->firmware.ucode[ucode_data]; 1555 info->ucode_id = ucode_data; 1556 info->fw = adev->mes.fw[pipe]; 1557 adev->firmware.fw_size += 1558 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), 1559 PAGE_SIZE); 1560 } 1561 1562 return 0; 1563 out: 1564 amdgpu_ucode_release(&adev->mes.fw[pipe]); 1565 return r; 1566 } 1567 1568 #if defined(CONFIG_DEBUG_FS) 1569 1570 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) 1571 { 1572 struct amdgpu_device *adev = m->private; 1573 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); 1574 1575 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, 1576 mem, AMDGPU_MES_LOG_BUFFER_SIZE, false); 1577 1578 return 0; 1579 } 1580 1581 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); 1582 1583 #endif 1584 1585 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) 1586 { 1587 1588 #if defined(CONFIG_DEBUG_FS) 1589 struct drm_minor *minor = adev_to_drm(adev)->primary; 1590 struct dentry *root = minor->debugfs_root; 1591 if (adev->enable_mes && amdgpu_mes_log_enable) 1592 debugfs_create_file("amdgpu_mes_event_log", 0444, root, 1593 adev, &amdgpu_debugfs_mes_event_log_fops); 1594 1595 #endif 1596 } 1597