1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drm_exec.h> 26 27 #include "amdgpu_mes.h" 28 #include "amdgpu.h" 29 #include "soc15_common.h" 30 #include "amdgpu_mes_ctx.h" 31 32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 33 #define AMDGPU_ONE_DOORBELL_SIZE 8 34 35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) 36 { 37 return roundup(AMDGPU_ONE_DOORBELL_SIZE * 38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 39 PAGE_SIZE); 40 } 41 42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, 43 int ip_type, uint64_t *doorbell_index) 44 { 45 unsigned int offset, found; 46 struct amdgpu_mes *mes = &adev->mes; 47 48 if (ip_type == AMDGPU_RING_TYPE_SDMA) 49 offset = adev->doorbell_index.sdma_engine[0]; 50 else 51 offset = 0; 52 53 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset); 54 if (found >= mes->num_mes_dbs) { 55 DRM_WARN("No doorbell available\n"); 56 return -ENOSPC; 57 } 58 59 set_bit(found, mes->doorbell_bitmap); 60 61 /* Get the absolute doorbell index on BAR */ 62 *doorbell_index = mes->db_start_dw_offset + found * 2; 63 return 0; 64 } 65 66 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, 67 uint32_t doorbell_index) 68 { 69 unsigned int old, rel_index; 70 struct amdgpu_mes *mes = &adev->mes; 71 72 /* Find the relative index of the doorbell in this object */ 73 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2; 74 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap); 75 WARN_ON(!old); 76 } 77 78 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) 79 { 80 int i; 81 struct amdgpu_mes *mes = &adev->mes; 82 83 /* Bitmap for dynamic allocation of kernel doorbells */ 84 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); 85 if (!mes->doorbell_bitmap) { 86 DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); 87 return -ENOMEM; 88 } 89 90 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE; 91 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) { 92 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2; 93 set_bit(i, mes->doorbell_bitmap); 94 } 95 96 return 0; 97 } 98 99 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) 100 { 101 int r; 102 103 if (!amdgpu_mes_log_enable) 104 return 0; 105 106 r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE, 107 AMDGPU_GEM_DOMAIN_GTT, 108 &adev->mes.event_log_gpu_obj, 109 &adev->mes.event_log_gpu_addr, 110 &adev->mes.event_log_cpu_addr); 111 if (r) { 112 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); 113 return r; 114 } 115 116 memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size); 117 118 return 0; 119 120 } 121 122 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) 123 { 124 bitmap_free(adev->mes.doorbell_bitmap); 125 } 126 127 int amdgpu_mes_init(struct amdgpu_device *adev) 128 { 129 int i, r; 130 131 adev->mes.adev = adev; 132 133 idr_init(&adev->mes.pasid_idr); 134 idr_init(&adev->mes.gang_id_idr); 135 idr_init(&adev->mes.queue_id_idr); 136 ida_init(&adev->mes.doorbell_ida); 137 spin_lock_init(&adev->mes.queue_id_lock); 138 mutex_init(&adev->mes.mutex_hidden); 139 140 for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) 141 spin_lock_init(&adev->mes.ring_lock[i]); 142 143 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; 144 adev->mes.vmid_mask_mmhub = 0xffffff00; 145 adev->mes.vmid_mask_gfxhub = 0xffffff00; 146 147 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { 148 /* use only 1st MEC pipes */ 149 if (i >= adev->gfx.mec.num_pipe_per_mec) 150 continue; 151 adev->mes.compute_hqd_mask[i] = 0xc; 152 } 153 154 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) 155 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; 156 157 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { 158 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < 159 IP_VERSION(6, 0, 0)) 160 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; 161 /* zero sdma_hqd_mask for non-existent engine */ 162 else if (adev->sdma.num_instances == 1) 163 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc; 164 else 165 adev->mes.sdma_hqd_mask[i] = 0xfc; 166 } 167 168 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs); 169 if (r) { 170 dev_err(adev->dev, 171 "(%d) ring trail_fence_offs wb alloc failed\n", r); 172 goto error_ids; 173 } 174 adev->mes.sch_ctx_gpu_addr = 175 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4); 176 adev->mes.sch_ctx_ptr = 177 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs]; 178 179 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs); 180 if (r) { 181 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 182 dev_err(adev->dev, 183 "(%d) query_status_fence_offs wb alloc failed\n", r); 184 goto error_ids; 185 } 186 adev->mes.query_status_fence_gpu_addr = 187 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4); 188 adev->mes.query_status_fence_ptr = 189 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs]; 190 191 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); 192 if (r) { 193 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 194 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 195 dev_err(adev->dev, 196 "(%d) read_val_offs alloc failed\n", r); 197 goto error_ids; 198 } 199 adev->mes.read_val_gpu_addr = 200 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); 201 adev->mes.read_val_ptr = 202 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; 203 204 r = amdgpu_mes_doorbell_init(adev); 205 if (r) 206 goto error; 207 208 r = amdgpu_mes_event_log_init(adev); 209 if (r) 210 goto error_doorbell; 211 212 return 0; 213 214 error_doorbell: 215 amdgpu_mes_doorbell_free(adev); 216 error: 217 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 218 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 219 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 220 error_ids: 221 idr_destroy(&adev->mes.pasid_idr); 222 idr_destroy(&adev->mes.gang_id_idr); 223 idr_destroy(&adev->mes.queue_id_idr); 224 ida_destroy(&adev->mes.doorbell_ida); 225 mutex_destroy(&adev->mes.mutex_hidden); 226 return r; 227 } 228 229 void amdgpu_mes_fini(struct amdgpu_device *adev) 230 { 231 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, 232 &adev->mes.event_log_gpu_addr, 233 &adev->mes.event_log_cpu_addr); 234 235 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 236 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 237 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 238 amdgpu_mes_doorbell_free(adev); 239 240 idr_destroy(&adev->mes.pasid_idr); 241 idr_destroy(&adev->mes.gang_id_idr); 242 idr_destroy(&adev->mes.queue_id_idr); 243 ida_destroy(&adev->mes.doorbell_ida); 244 mutex_destroy(&adev->mes.mutex_hidden); 245 } 246 247 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q) 248 { 249 amdgpu_bo_free_kernel(&q->mqd_obj, 250 &q->mqd_gpu_addr, 251 &q->mqd_cpu_ptr); 252 } 253 254 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, 255 struct amdgpu_vm *vm) 256 { 257 struct amdgpu_mes_process *process; 258 int r; 259 260 /* allocate the mes process buffer */ 261 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); 262 if (!process) { 263 DRM_ERROR("no more memory to create mes process\n"); 264 return -ENOMEM; 265 } 266 267 /* allocate the process context bo and map it */ 268 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, 269 AMDGPU_GEM_DOMAIN_GTT, 270 &process->proc_ctx_bo, 271 &process->proc_ctx_gpu_addr, 272 &process->proc_ctx_cpu_ptr); 273 if (r) { 274 DRM_ERROR("failed to allocate process context bo\n"); 275 goto clean_up_memory; 276 } 277 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 278 279 /* 280 * Avoid taking any other locks under MES lock to avoid circular 281 * lock dependencies. 282 */ 283 amdgpu_mes_lock(&adev->mes); 284 285 /* add the mes process to idr list */ 286 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, 287 GFP_KERNEL); 288 if (r < 0) { 289 DRM_ERROR("failed to lock pasid=%d\n", pasid); 290 goto clean_up_ctx; 291 } 292 293 INIT_LIST_HEAD(&process->gang_list); 294 process->vm = vm; 295 process->pasid = pasid; 296 process->process_quantum = adev->mes.default_process_quantum; 297 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); 298 299 amdgpu_mes_unlock(&adev->mes); 300 return 0; 301 302 clean_up_ctx: 303 amdgpu_mes_unlock(&adev->mes); 304 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 305 &process->proc_ctx_gpu_addr, 306 &process->proc_ctx_cpu_ptr); 307 clean_up_memory: 308 kfree(process); 309 return r; 310 } 311 312 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) 313 { 314 struct amdgpu_mes_process *process; 315 struct amdgpu_mes_gang *gang, *tmp1; 316 struct amdgpu_mes_queue *queue, *tmp2; 317 struct mes_remove_queue_input queue_input; 318 unsigned long flags; 319 int r; 320 321 /* 322 * Avoid taking any other locks under MES lock to avoid circular 323 * lock dependencies. 324 */ 325 amdgpu_mes_lock(&adev->mes); 326 327 process = idr_find(&adev->mes.pasid_idr, pasid); 328 if (!process) { 329 DRM_WARN("pasid %d doesn't exist\n", pasid); 330 amdgpu_mes_unlock(&adev->mes); 331 return; 332 } 333 334 /* Remove all queues from hardware */ 335 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 336 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 337 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 338 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 339 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 340 341 queue_input.doorbell_offset = queue->doorbell_off; 342 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 343 344 r = adev->mes.funcs->remove_hw_queue(&adev->mes, 345 &queue_input); 346 if (r) 347 DRM_WARN("failed to remove hardware queue\n"); 348 } 349 350 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 351 } 352 353 idr_remove(&adev->mes.pasid_idr, pasid); 354 amdgpu_mes_unlock(&adev->mes); 355 356 /* free all memory allocated by the process */ 357 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 358 /* free all queues in the gang */ 359 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 360 amdgpu_mes_queue_free_mqd(queue); 361 list_del(&queue->list); 362 kfree(queue); 363 } 364 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 365 &gang->gang_ctx_gpu_addr, 366 &gang->gang_ctx_cpu_ptr); 367 list_del(&gang->list); 368 kfree(gang); 369 370 } 371 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 372 &process->proc_ctx_gpu_addr, 373 &process->proc_ctx_cpu_ptr); 374 kfree(process); 375 } 376 377 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, 378 struct amdgpu_mes_gang_properties *gprops, 379 int *gang_id) 380 { 381 struct amdgpu_mes_process *process; 382 struct amdgpu_mes_gang *gang; 383 int r; 384 385 /* allocate the mes gang buffer */ 386 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL); 387 if (!gang) { 388 return -ENOMEM; 389 } 390 391 /* allocate the gang context bo and map it to cpu space */ 392 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE, 393 AMDGPU_GEM_DOMAIN_GTT, 394 &gang->gang_ctx_bo, 395 &gang->gang_ctx_gpu_addr, 396 &gang->gang_ctx_cpu_ptr); 397 if (r) { 398 DRM_ERROR("failed to allocate process context bo\n"); 399 goto clean_up_mem; 400 } 401 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); 402 403 /* 404 * Avoid taking any other locks under MES lock to avoid circular 405 * lock dependencies. 406 */ 407 amdgpu_mes_lock(&adev->mes); 408 409 process = idr_find(&adev->mes.pasid_idr, pasid); 410 if (!process) { 411 DRM_ERROR("pasid %d doesn't exist\n", pasid); 412 r = -EINVAL; 413 goto clean_up_ctx; 414 } 415 416 /* add the mes gang to idr list */ 417 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0, 418 GFP_KERNEL); 419 if (r < 0) { 420 DRM_ERROR("failed to allocate idr for gang\n"); 421 goto clean_up_ctx; 422 } 423 424 gang->gang_id = r; 425 *gang_id = r; 426 427 INIT_LIST_HEAD(&gang->queue_list); 428 gang->process = process; 429 gang->priority = gprops->priority; 430 gang->gang_quantum = gprops->gang_quantum ? 431 gprops->gang_quantum : adev->mes.default_gang_quantum; 432 gang->global_priority_level = gprops->global_priority_level; 433 gang->inprocess_gang_priority = gprops->inprocess_gang_priority; 434 list_add_tail(&gang->list, &process->gang_list); 435 436 amdgpu_mes_unlock(&adev->mes); 437 return 0; 438 439 clean_up_ctx: 440 amdgpu_mes_unlock(&adev->mes); 441 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 442 &gang->gang_ctx_gpu_addr, 443 &gang->gang_ctx_cpu_ptr); 444 clean_up_mem: 445 kfree(gang); 446 return r; 447 } 448 449 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) 450 { 451 struct amdgpu_mes_gang *gang; 452 453 /* 454 * Avoid taking any other locks under MES lock to avoid circular 455 * lock dependencies. 456 */ 457 amdgpu_mes_lock(&adev->mes); 458 459 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 460 if (!gang) { 461 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 462 amdgpu_mes_unlock(&adev->mes); 463 return -EINVAL; 464 } 465 466 if (!list_empty(&gang->queue_list)) { 467 DRM_ERROR("queue list is not empty\n"); 468 amdgpu_mes_unlock(&adev->mes); 469 return -EBUSY; 470 } 471 472 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 473 list_del(&gang->list); 474 amdgpu_mes_unlock(&adev->mes); 475 476 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 477 &gang->gang_ctx_gpu_addr, 478 &gang->gang_ctx_cpu_ptr); 479 480 kfree(gang); 481 482 return 0; 483 } 484 485 int amdgpu_mes_suspend(struct amdgpu_device *adev) 486 { 487 struct idr *idp; 488 struct amdgpu_mes_process *process; 489 struct amdgpu_mes_gang *gang; 490 struct mes_suspend_gang_input input; 491 int r, pasid; 492 493 /* 494 * Avoid taking any other locks under MES lock to avoid circular 495 * lock dependencies. 496 */ 497 amdgpu_mes_lock(&adev->mes); 498 499 idp = &adev->mes.pasid_idr; 500 501 idr_for_each_entry(idp, process, pasid) { 502 list_for_each_entry(gang, &process->gang_list, list) { 503 r = adev->mes.funcs->suspend_gang(&adev->mes, &input); 504 if (r) 505 DRM_ERROR("failed to suspend pasid %d gangid %d", 506 pasid, gang->gang_id); 507 } 508 } 509 510 amdgpu_mes_unlock(&adev->mes); 511 return 0; 512 } 513 514 int amdgpu_mes_resume(struct amdgpu_device *adev) 515 { 516 struct idr *idp; 517 struct amdgpu_mes_process *process; 518 struct amdgpu_mes_gang *gang; 519 struct mes_resume_gang_input input; 520 int r, pasid; 521 522 /* 523 * Avoid taking any other locks under MES lock to avoid circular 524 * lock dependencies. 525 */ 526 amdgpu_mes_lock(&adev->mes); 527 528 idp = &adev->mes.pasid_idr; 529 530 idr_for_each_entry(idp, process, pasid) { 531 list_for_each_entry(gang, &process->gang_list, list) { 532 r = adev->mes.funcs->resume_gang(&adev->mes, &input); 533 if (r) 534 DRM_ERROR("failed to resume pasid %d gangid %d", 535 pasid, gang->gang_id); 536 } 537 } 538 539 amdgpu_mes_unlock(&adev->mes); 540 return 0; 541 } 542 543 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, 544 struct amdgpu_mes_queue *q, 545 struct amdgpu_mes_queue_properties *p) 546 { 547 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 548 u32 mqd_size = mqd_mgr->mqd_size; 549 int r; 550 551 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 552 AMDGPU_GEM_DOMAIN_GTT, 553 &q->mqd_obj, 554 &q->mqd_gpu_addr, &q->mqd_cpu_ptr); 555 if (r) { 556 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r); 557 return r; 558 } 559 memset(q->mqd_cpu_ptr, 0, mqd_size); 560 561 r = amdgpu_bo_reserve(q->mqd_obj, false); 562 if (unlikely(r != 0)) 563 goto clean_up; 564 565 return 0; 566 567 clean_up: 568 amdgpu_bo_free_kernel(&q->mqd_obj, 569 &q->mqd_gpu_addr, 570 &q->mqd_cpu_ptr); 571 return r; 572 } 573 574 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, 575 struct amdgpu_mes_queue *q, 576 struct amdgpu_mes_queue_properties *p) 577 { 578 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 579 struct amdgpu_mqd_prop mqd_prop = {0}; 580 581 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr; 582 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr; 583 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr; 584 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr; 585 mqd_prop.queue_size = p->queue_size; 586 mqd_prop.use_doorbell = true; 587 mqd_prop.doorbell_index = p->doorbell_off; 588 mqd_prop.eop_gpu_addr = p->eop_gpu_addr; 589 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority; 590 mqd_prop.hqd_queue_priority = p->hqd_queue_priority; 591 mqd_prop.hqd_active = false; 592 593 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 594 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 595 mutex_lock(&adev->srbm_mutex); 596 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0); 597 } 598 599 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); 600 601 if (p->queue_type == AMDGPU_RING_TYPE_GFX || 602 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 603 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); 604 mutex_unlock(&adev->srbm_mutex); 605 } 606 607 amdgpu_bo_unreserve(q->mqd_obj); 608 } 609 610 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, 611 struct amdgpu_mes_queue_properties *qprops, 612 int *queue_id) 613 { 614 struct amdgpu_mes_queue *queue; 615 struct amdgpu_mes_gang *gang; 616 struct mes_add_queue_input queue_input; 617 unsigned long flags; 618 int r; 619 620 memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); 621 622 /* allocate the mes queue buffer */ 623 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); 624 if (!queue) { 625 DRM_ERROR("Failed to allocate memory for queue\n"); 626 return -ENOMEM; 627 } 628 629 /* Allocate the queue mqd */ 630 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops); 631 if (r) 632 goto clean_up_memory; 633 634 /* 635 * Avoid taking any other locks under MES lock to avoid circular 636 * lock dependencies. 637 */ 638 amdgpu_mes_lock(&adev->mes); 639 640 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 641 if (!gang) { 642 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 643 r = -EINVAL; 644 goto clean_up_mqd; 645 } 646 647 /* add the mes gang to idr list */ 648 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 649 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0, 650 GFP_ATOMIC); 651 if (r < 0) { 652 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 653 goto clean_up_mqd; 654 } 655 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 656 *queue_id = queue->queue_id = r; 657 658 /* allocate a doorbell index for the queue */ 659 r = amdgpu_mes_kernel_doorbell_get(adev, 660 qprops->queue_type, 661 &qprops->doorbell_off); 662 if (r) 663 goto clean_up_queue_id; 664 665 /* initialize the queue mqd */ 666 amdgpu_mes_queue_init_mqd(adev, queue, qprops); 667 668 /* add hw queue to mes */ 669 queue_input.process_id = gang->process->pasid; 670 671 queue_input.page_table_base_addr = 672 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr - 673 adev->gmc.vram_start; 674 675 queue_input.process_va_start = 0; 676 queue_input.process_va_end = 677 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; 678 queue_input.process_quantum = gang->process->process_quantum; 679 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr; 680 queue_input.gang_quantum = gang->gang_quantum; 681 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 682 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority; 683 queue_input.gang_global_priority_level = gang->global_priority_level; 684 queue_input.doorbell_offset = qprops->doorbell_off; 685 queue_input.mqd_addr = queue->mqd_gpu_addr; 686 queue_input.wptr_addr = qprops->wptr_gpu_addr; 687 queue_input.wptr_mc_addr = qprops->wptr_mc_addr; 688 queue_input.queue_type = qprops->queue_type; 689 queue_input.paging = qprops->paging; 690 queue_input.is_kfd_process = 0; 691 692 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 693 if (r) { 694 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n", 695 qprops->doorbell_off); 696 goto clean_up_doorbell; 697 } 698 699 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, " 700 "queue type=%d, doorbell=0x%llx\n", 701 gang->process->pasid, gang_id, qprops->queue_type, 702 qprops->doorbell_off); 703 704 queue->ring = qprops->ring; 705 queue->doorbell_off = qprops->doorbell_off; 706 queue->wptr_gpu_addr = qprops->wptr_gpu_addr; 707 queue->queue_type = qprops->queue_type; 708 queue->paging = qprops->paging; 709 queue->gang = gang; 710 queue->ring->mqd_ptr = queue->mqd_cpu_ptr; 711 list_add_tail(&queue->list, &gang->queue_list); 712 713 amdgpu_mes_unlock(&adev->mes); 714 return 0; 715 716 clean_up_doorbell: 717 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off); 718 clean_up_queue_id: 719 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 720 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 721 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 722 clean_up_mqd: 723 amdgpu_mes_unlock(&adev->mes); 724 amdgpu_mes_queue_free_mqd(queue); 725 clean_up_memory: 726 kfree(queue); 727 return r; 728 } 729 730 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) 731 { 732 unsigned long flags; 733 struct amdgpu_mes_queue *queue; 734 struct amdgpu_mes_gang *gang; 735 struct mes_remove_queue_input queue_input; 736 int r; 737 738 /* 739 * Avoid taking any other locks under MES lock to avoid circular 740 * lock dependencies. 741 */ 742 amdgpu_mes_lock(&adev->mes); 743 744 /* remove the mes gang from idr list */ 745 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 746 747 queue = idr_find(&adev->mes.queue_id_idr, queue_id); 748 if (!queue) { 749 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 750 amdgpu_mes_unlock(&adev->mes); 751 DRM_ERROR("queue id %d doesn't exist\n", queue_id); 752 return -EINVAL; 753 } 754 755 idr_remove(&adev->mes.queue_id_idr, queue_id); 756 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 757 758 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n", 759 queue->doorbell_off); 760 761 gang = queue->gang; 762 queue_input.doorbell_offset = queue->doorbell_off; 763 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 764 765 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 766 if (r) 767 DRM_ERROR("failed to remove hardware queue, queue id = %d\n", 768 queue_id); 769 770 list_del(&queue->list); 771 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off); 772 amdgpu_mes_unlock(&adev->mes); 773 774 amdgpu_mes_queue_free_mqd(queue); 775 kfree(queue); 776 return 0; 777 } 778 779 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, 780 struct amdgpu_ring *ring) 781 { 782 struct mes_map_legacy_queue_input queue_input; 783 int r; 784 785 memset(&queue_input, 0, sizeof(queue_input)); 786 787 queue_input.queue_type = ring->funcs->type; 788 queue_input.doorbell_offset = ring->doorbell_index; 789 queue_input.pipe_id = ring->pipe; 790 queue_input.queue_id = ring->queue; 791 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 792 queue_input.wptr_addr = ring->wptr_gpu_addr; 793 794 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); 795 if (r) 796 DRM_ERROR("failed to map legacy queue\n"); 797 798 return r; 799 } 800 801 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, 802 struct amdgpu_ring *ring, 803 enum amdgpu_unmap_queues_action action, 804 u64 gpu_addr, u64 seq) 805 { 806 struct mes_unmap_legacy_queue_input queue_input; 807 int r; 808 809 queue_input.action = action; 810 queue_input.queue_type = ring->funcs->type; 811 queue_input.doorbell_offset = ring->doorbell_index; 812 queue_input.pipe_id = ring->pipe; 813 queue_input.queue_id = ring->queue; 814 queue_input.trail_fence_addr = gpu_addr; 815 queue_input.trail_fence_data = seq; 816 817 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 818 if (r) 819 DRM_ERROR("failed to unmap legacy queue\n"); 820 821 return r; 822 } 823 824 int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, 825 struct amdgpu_ring *ring, 826 unsigned int vmid) 827 { 828 struct mes_reset_legacy_queue_input queue_input; 829 int r; 830 831 memset(&queue_input, 0, sizeof(queue_input)); 832 833 queue_input.queue_type = ring->funcs->type; 834 queue_input.doorbell_offset = ring->doorbell_index; 835 queue_input.pipe_id = ring->pipe; 836 queue_input.queue_id = ring->queue; 837 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 838 queue_input.wptr_addr = ring->wptr_gpu_addr; 839 queue_input.vmid = vmid; 840 841 r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input); 842 if (r) 843 DRM_ERROR("failed to reset legacy queue\n"); 844 845 return r; 846 } 847 848 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) 849 { 850 struct mes_misc_op_input op_input; 851 int r, val = 0; 852 853 op_input.op = MES_MISC_OP_READ_REG; 854 op_input.read_reg.reg_offset = reg; 855 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; 856 857 if (!adev->mes.funcs->misc_op) { 858 DRM_ERROR("mes rreg is not supported!\n"); 859 goto error; 860 } 861 862 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 863 if (r) 864 DRM_ERROR("failed to read reg (0x%x)\n", reg); 865 else 866 val = *(adev->mes.read_val_ptr); 867 868 error: 869 return val; 870 } 871 872 int amdgpu_mes_wreg(struct amdgpu_device *adev, 873 uint32_t reg, uint32_t val) 874 { 875 struct mes_misc_op_input op_input; 876 int r; 877 878 op_input.op = MES_MISC_OP_WRITE_REG; 879 op_input.write_reg.reg_offset = reg; 880 op_input.write_reg.reg_value = val; 881 882 if (!adev->mes.funcs->misc_op) { 883 DRM_ERROR("mes wreg is not supported!\n"); 884 r = -EINVAL; 885 goto error; 886 } 887 888 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 889 if (r) 890 DRM_ERROR("failed to write reg (0x%x)\n", reg); 891 892 error: 893 return r; 894 } 895 896 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, 897 uint32_t reg0, uint32_t reg1, 898 uint32_t ref, uint32_t mask) 899 { 900 struct mes_misc_op_input op_input; 901 int r; 902 903 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT; 904 op_input.wrm_reg.reg0 = reg0; 905 op_input.wrm_reg.reg1 = reg1; 906 op_input.wrm_reg.ref = ref; 907 op_input.wrm_reg.mask = mask; 908 909 if (!adev->mes.funcs->misc_op) { 910 DRM_ERROR("mes reg_write_reg_wait is not supported!\n"); 911 r = -EINVAL; 912 goto error; 913 } 914 915 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 916 if (r) 917 DRM_ERROR("failed to reg_write_reg_wait\n"); 918 919 error: 920 return r; 921 } 922 923 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, 924 uint32_t val, uint32_t mask) 925 { 926 struct mes_misc_op_input op_input; 927 int r; 928 929 op_input.op = MES_MISC_OP_WRM_REG_WAIT; 930 op_input.wrm_reg.reg0 = reg; 931 op_input.wrm_reg.ref = val; 932 op_input.wrm_reg.mask = mask; 933 934 if (!adev->mes.funcs->misc_op) { 935 DRM_ERROR("mes reg wait is not supported!\n"); 936 r = -EINVAL; 937 goto error; 938 } 939 940 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 941 if (r) 942 DRM_ERROR("failed to reg_write_reg_wait\n"); 943 944 error: 945 return r; 946 } 947 948 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, 949 uint64_t process_context_addr, 950 uint32_t spi_gdbg_per_vmid_cntl, 951 const uint32_t *tcp_watch_cntl, 952 uint32_t flags, 953 bool trap_en) 954 { 955 struct mes_misc_op_input op_input = {0}; 956 int r; 957 958 if (!adev->mes.funcs->misc_op) { 959 DRM_ERROR("mes set shader debugger is not supported!\n"); 960 return -EINVAL; 961 } 962 963 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 964 op_input.set_shader_debugger.process_context_addr = process_context_addr; 965 op_input.set_shader_debugger.flags.u32all = flags; 966 967 /* use amdgpu mes_flush_shader_debugger instead */ 968 if (op_input.set_shader_debugger.flags.process_ctx_flush) 969 return -EINVAL; 970 971 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; 972 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, 973 sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); 974 975 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> 976 AMDGPU_MES_API_VERSION_SHIFT) >= 14) 977 op_input.set_shader_debugger.trap_en = trap_en; 978 979 amdgpu_mes_lock(&adev->mes); 980 981 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 982 if (r) 983 DRM_ERROR("failed to set_shader_debugger\n"); 984 985 amdgpu_mes_unlock(&adev->mes); 986 987 return r; 988 } 989 990 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, 991 uint64_t process_context_addr) 992 { 993 struct mes_misc_op_input op_input = {0}; 994 int r; 995 996 if (!adev->mes.funcs->misc_op) { 997 DRM_ERROR("mes flush shader debugger is not supported!\n"); 998 return -EINVAL; 999 } 1000 1001 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 1002 op_input.set_shader_debugger.process_context_addr = process_context_addr; 1003 op_input.set_shader_debugger.flags.process_ctx_flush = true; 1004 1005 amdgpu_mes_lock(&adev->mes); 1006 1007 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 1008 if (r) 1009 DRM_ERROR("failed to set_shader_debugger\n"); 1010 1011 amdgpu_mes_unlock(&adev->mes); 1012 1013 return r; 1014 } 1015 1016 static void 1017 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, 1018 struct amdgpu_ring *ring, 1019 struct amdgpu_mes_queue_properties *props) 1020 { 1021 props->queue_type = ring->funcs->type; 1022 props->hqd_base_gpu_addr = ring->gpu_addr; 1023 props->rptr_gpu_addr = ring->rptr_gpu_addr; 1024 props->wptr_gpu_addr = ring->wptr_gpu_addr; 1025 props->wptr_mc_addr = 1026 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs; 1027 props->queue_size = ring->ring_size; 1028 props->eop_gpu_addr = ring->eop_gpu_addr; 1029 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 1030 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 1031 props->paging = false; 1032 props->ring = ring; 1033 } 1034 1035 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ 1036 do { \ 1037 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ 1038 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1039 _eng[ring->idx].slots[id_offs]); \ 1040 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ 1041 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1042 _eng[ring->idx].ring); \ 1043 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ 1044 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1045 _eng[ring->idx].ib); \ 1046 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ 1047 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 1048 _eng[ring->idx].padding); \ 1049 } while(0) 1050 1051 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) 1052 { 1053 switch (ring->funcs->type) { 1054 case AMDGPU_RING_TYPE_GFX: 1055 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); 1056 break; 1057 case AMDGPU_RING_TYPE_COMPUTE: 1058 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); 1059 break; 1060 case AMDGPU_RING_TYPE_SDMA: 1061 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); 1062 break; 1063 default: 1064 break; 1065 } 1066 1067 WARN_ON(1); 1068 return -EINVAL; 1069 } 1070 1071 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, 1072 int queue_type, int idx, 1073 struct amdgpu_mes_ctx_data *ctx_data, 1074 struct amdgpu_ring **out) 1075 { 1076 struct amdgpu_ring *ring; 1077 struct amdgpu_mes_gang *gang; 1078 struct amdgpu_mes_queue_properties qprops = {0}; 1079 int r, queue_id, pasid; 1080 1081 /* 1082 * Avoid taking any other locks under MES lock to avoid circular 1083 * lock dependencies. 1084 */ 1085 amdgpu_mes_lock(&adev->mes); 1086 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 1087 if (!gang) { 1088 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 1089 amdgpu_mes_unlock(&adev->mes); 1090 return -EINVAL; 1091 } 1092 pasid = gang->process->pasid; 1093 1094 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL); 1095 if (!ring) { 1096 amdgpu_mes_unlock(&adev->mes); 1097 return -ENOMEM; 1098 } 1099 1100 ring->ring_obj = NULL; 1101 ring->use_doorbell = true; 1102 ring->is_mes_queue = true; 1103 ring->mes_ctx = ctx_data; 1104 ring->idx = idx; 1105 ring->no_scheduler = true; 1106 1107 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 1108 int offset = offsetof(struct amdgpu_mes_ctx_meta_data, 1109 compute[ring->idx].mec_hpd); 1110 ring->eop_gpu_addr = 1111 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 1112 } 1113 1114 switch (queue_type) { 1115 case AMDGPU_RING_TYPE_GFX: 1116 ring->funcs = adev->gfx.gfx_ring[0].funcs; 1117 ring->me = adev->gfx.gfx_ring[0].me; 1118 ring->pipe = adev->gfx.gfx_ring[0].pipe; 1119 break; 1120 case AMDGPU_RING_TYPE_COMPUTE: 1121 ring->funcs = adev->gfx.compute_ring[0].funcs; 1122 ring->me = adev->gfx.compute_ring[0].me; 1123 ring->pipe = adev->gfx.compute_ring[0].pipe; 1124 break; 1125 case AMDGPU_RING_TYPE_SDMA: 1126 ring->funcs = adev->sdma.instance[0].ring.funcs; 1127 break; 1128 default: 1129 BUG(); 1130 } 1131 1132 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, 1133 AMDGPU_RING_PRIO_DEFAULT, NULL); 1134 if (r) 1135 goto clean_up_memory; 1136 1137 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); 1138 1139 dma_fence_wait(gang->process->vm->last_update, false); 1140 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false); 1141 amdgpu_mes_unlock(&adev->mes); 1142 1143 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id); 1144 if (r) 1145 goto clean_up_ring; 1146 1147 ring->hw_queue_id = queue_id; 1148 ring->doorbell_index = qprops.doorbell_off; 1149 1150 if (queue_type == AMDGPU_RING_TYPE_GFX) 1151 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id); 1152 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) 1153 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id, 1154 queue_id); 1155 else if (queue_type == AMDGPU_RING_TYPE_SDMA) 1156 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id, 1157 queue_id); 1158 else 1159 BUG(); 1160 1161 *out = ring; 1162 return 0; 1163 1164 clean_up_ring: 1165 amdgpu_ring_fini(ring); 1166 clean_up_memory: 1167 kfree(ring); 1168 amdgpu_mes_unlock(&adev->mes); 1169 return r; 1170 } 1171 1172 void amdgpu_mes_remove_ring(struct amdgpu_device *adev, 1173 struct amdgpu_ring *ring) 1174 { 1175 if (!ring) 1176 return; 1177 1178 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 1179 del_timer_sync(&ring->fence_drv.fallback_timer); 1180 amdgpu_ring_fini(ring); 1181 kfree(ring); 1182 } 1183 1184 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, 1185 enum amdgpu_mes_priority_level prio) 1186 { 1187 return adev->mes.aggregated_doorbells[prio]; 1188 } 1189 1190 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, 1191 struct amdgpu_mes_ctx_data *ctx_data) 1192 { 1193 int r; 1194 1195 r = amdgpu_bo_create_kernel(adev, 1196 sizeof(struct amdgpu_mes_ctx_meta_data), 1197 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1198 &ctx_data->meta_data_obj, 1199 &ctx_data->meta_data_mc_addr, 1200 &ctx_data->meta_data_ptr); 1201 if (r) { 1202 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r); 1203 return r; 1204 } 1205 1206 if (!ctx_data->meta_data_obj) 1207 return -ENOMEM; 1208 1209 memset(ctx_data->meta_data_ptr, 0, 1210 sizeof(struct amdgpu_mes_ctx_meta_data)); 1211 1212 return 0; 1213 } 1214 1215 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data) 1216 { 1217 if (ctx_data->meta_data_obj) 1218 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, 1219 &ctx_data->meta_data_mc_addr, 1220 &ctx_data->meta_data_ptr); 1221 } 1222 1223 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, 1224 struct amdgpu_vm *vm, 1225 struct amdgpu_mes_ctx_data *ctx_data) 1226 { 1227 struct amdgpu_bo_va *bo_va; 1228 struct amdgpu_sync sync; 1229 struct drm_exec exec; 1230 int r; 1231 1232 amdgpu_sync_create(&sync); 1233 1234 drm_exec_init(&exec, 0, 0); 1235 drm_exec_until_all_locked(&exec) { 1236 r = drm_exec_lock_obj(&exec, 1237 &ctx_data->meta_data_obj->tbo.base); 1238 drm_exec_retry_on_contention(&exec); 1239 if (unlikely(r)) 1240 goto error_fini_exec; 1241 1242 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1243 drm_exec_retry_on_contention(&exec); 1244 if (unlikely(r)) 1245 goto error_fini_exec; 1246 } 1247 1248 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); 1249 if (!bo_va) { 1250 DRM_ERROR("failed to create bo_va for meta data BO\n"); 1251 r = -ENOMEM; 1252 goto error_fini_exec; 1253 } 1254 1255 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, 1256 sizeof(struct amdgpu_mes_ctx_meta_data), 1257 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 1258 AMDGPU_PTE_EXECUTABLE); 1259 1260 if (r) { 1261 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); 1262 goto error_del_bo_va; 1263 } 1264 1265 r = amdgpu_vm_bo_update(adev, bo_va, false); 1266 if (r) { 1267 DRM_ERROR("failed to do vm_bo_update on meta data\n"); 1268 goto error_del_bo_va; 1269 } 1270 amdgpu_sync_fence(&sync, bo_va->last_pt_update); 1271 1272 r = amdgpu_vm_update_pdes(adev, vm, false); 1273 if (r) { 1274 DRM_ERROR("failed to update pdes on meta data\n"); 1275 goto error_del_bo_va; 1276 } 1277 amdgpu_sync_fence(&sync, vm->last_update); 1278 1279 amdgpu_sync_wait(&sync, false); 1280 drm_exec_fini(&exec); 1281 1282 amdgpu_sync_free(&sync); 1283 ctx_data->meta_data_va = bo_va; 1284 return 0; 1285 1286 error_del_bo_va: 1287 amdgpu_vm_bo_del(adev, bo_va); 1288 1289 error_fini_exec: 1290 drm_exec_fini(&exec); 1291 amdgpu_sync_free(&sync); 1292 return r; 1293 } 1294 1295 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, 1296 struct amdgpu_mes_ctx_data *ctx_data) 1297 { 1298 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va; 1299 struct amdgpu_bo *bo = ctx_data->meta_data_obj; 1300 struct amdgpu_vm *vm = bo_va->base.vm; 1301 struct dma_fence *fence; 1302 struct drm_exec exec; 1303 long r; 1304 1305 drm_exec_init(&exec, 0, 0); 1306 drm_exec_until_all_locked(&exec) { 1307 r = drm_exec_lock_obj(&exec, 1308 &ctx_data->meta_data_obj->tbo.base); 1309 drm_exec_retry_on_contention(&exec); 1310 if (unlikely(r)) 1311 goto out_unlock; 1312 1313 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1314 drm_exec_retry_on_contention(&exec); 1315 if (unlikely(r)) 1316 goto out_unlock; 1317 } 1318 1319 amdgpu_vm_bo_del(adev, bo_va); 1320 if (!amdgpu_vm_ready(vm)) 1321 goto out_unlock; 1322 1323 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, 1324 &fence); 1325 if (r) 1326 goto out_unlock; 1327 if (fence) { 1328 amdgpu_bo_fence(bo, fence, true); 1329 fence = NULL; 1330 } 1331 1332 r = amdgpu_vm_clear_freed(adev, vm, &fence); 1333 if (r || !fence) 1334 goto out_unlock; 1335 1336 dma_fence_wait(fence, false); 1337 amdgpu_bo_fence(bo, fence, true); 1338 dma_fence_put(fence); 1339 1340 out_unlock: 1341 if (unlikely(r < 0)) 1342 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r); 1343 drm_exec_fini(&exec); 1344 1345 return r; 1346 } 1347 1348 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev, 1349 int pasid, int *gang_id, 1350 int queue_type, int num_queue, 1351 struct amdgpu_ring **added_rings, 1352 struct amdgpu_mes_ctx_data *ctx_data) 1353 { 1354 struct amdgpu_ring *ring; 1355 struct amdgpu_mes_gang_properties gprops = {0}; 1356 int r, j; 1357 1358 /* create a gang for the process */ 1359 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1360 gprops.gang_quantum = adev->mes.default_gang_quantum; 1361 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1362 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1363 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1364 1365 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id); 1366 if (r) { 1367 DRM_ERROR("failed to add gang\n"); 1368 return r; 1369 } 1370 1371 /* create queues for the gang */ 1372 for (j = 0; j < num_queue; j++) { 1373 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j, 1374 ctx_data, &ring); 1375 if (r) { 1376 DRM_ERROR("failed to add ring\n"); 1377 break; 1378 } 1379 1380 DRM_INFO("ring %s was added\n", ring->name); 1381 added_rings[j] = ring; 1382 } 1383 1384 return 0; 1385 } 1386 1387 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) 1388 { 1389 struct amdgpu_ring *ring; 1390 int i, r; 1391 1392 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) { 1393 ring = added_rings[i]; 1394 if (!ring) 1395 continue; 1396 1397 r = amdgpu_ring_test_helper(ring); 1398 if (r) 1399 return r; 1400 1401 r = amdgpu_ring_test_ib(ring, 1000 * 10); 1402 if (r) { 1403 DRM_DEV_ERROR(ring->adev->dev, 1404 "ring %s ib test failed (%d)\n", 1405 ring->name, r); 1406 return r; 1407 } else 1408 DRM_INFO("ring %s ib test pass\n", ring->name); 1409 } 1410 1411 return 0; 1412 } 1413 1414 int amdgpu_mes_self_test(struct amdgpu_device *adev) 1415 { 1416 struct amdgpu_vm *vm = NULL; 1417 struct amdgpu_mes_ctx_data ctx_data = {0}; 1418 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; 1419 int gang_ids[3] = {0}; 1420 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 }, 1421 { AMDGPU_RING_TYPE_COMPUTE, 1 }, 1422 { AMDGPU_RING_TYPE_SDMA, 1} }; 1423 int i, r, pasid, k = 0; 1424 1425 pasid = amdgpu_pasid_alloc(16); 1426 if (pasid < 0) { 1427 dev_warn(adev->dev, "No more PASIDs available!"); 1428 pasid = 0; 1429 } 1430 1431 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 1432 if (!vm) { 1433 r = -ENOMEM; 1434 goto error_pasid; 1435 } 1436 1437 r = amdgpu_vm_init(adev, vm, -1); 1438 if (r) { 1439 DRM_ERROR("failed to initialize vm\n"); 1440 goto error_pasid; 1441 } 1442 1443 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data); 1444 if (r) { 1445 DRM_ERROR("failed to alloc ctx meta data\n"); 1446 goto error_fini; 1447 } 1448 1449 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; 1450 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); 1451 if (r) { 1452 DRM_ERROR("failed to map ctx meta data\n"); 1453 goto error_vm; 1454 } 1455 1456 r = amdgpu_mes_create_process(adev, pasid, vm); 1457 if (r) { 1458 DRM_ERROR("failed to create MES process\n"); 1459 goto error_vm; 1460 } 1461 1462 for (i = 0; i < ARRAY_SIZE(queue_types); i++) { 1463 /* On GFX v10.3, fw hasn't supported to map sdma queue. */ 1464 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= 1465 IP_VERSION(10, 3, 0) && 1466 amdgpu_ip_version(adev, GC_HWIP, 0) < 1467 IP_VERSION(11, 0, 0) && 1468 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) 1469 continue; 1470 1471 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid, 1472 &gang_ids[i], 1473 queue_types[i][0], 1474 queue_types[i][1], 1475 &added_rings[k], 1476 &ctx_data); 1477 if (r) 1478 goto error_queues; 1479 1480 k += queue_types[i][1]; 1481 } 1482 1483 /* start ring test and ib test for MES queues */ 1484 amdgpu_mes_test_queues(added_rings); 1485 1486 error_queues: 1487 /* remove all queues */ 1488 for (i = 0; i < ARRAY_SIZE(added_rings); i++) { 1489 if (!added_rings[i]) 1490 continue; 1491 amdgpu_mes_remove_ring(adev, added_rings[i]); 1492 } 1493 1494 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) { 1495 if (!gang_ids[i]) 1496 continue; 1497 amdgpu_mes_remove_gang(adev, gang_ids[i]); 1498 } 1499 1500 amdgpu_mes_destroy_process(adev, pasid); 1501 1502 error_vm: 1503 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data); 1504 1505 error_fini: 1506 amdgpu_vm_fini(adev, vm); 1507 1508 error_pasid: 1509 if (pasid) 1510 amdgpu_pasid_free(pasid); 1511 1512 amdgpu_mes_ctx_free_meta_data(&ctx_data); 1513 kfree(vm); 1514 return 0; 1515 } 1516 1517 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) 1518 { 1519 const struct mes_firmware_header_v1_0 *mes_hdr; 1520 struct amdgpu_firmware_info *info; 1521 char ucode_prefix[30]; 1522 char fw_name[50]; 1523 bool need_retry = false; 1524 int r; 1525 1526 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, 1527 sizeof(ucode_prefix)); 1528 if (adev->enable_uni_mes && pipe == AMDGPU_MES_SCHED_PIPE) { 1529 snprintf(fw_name, sizeof(fw_name), 1530 "amdgpu/%s_uni_mes.bin", ucode_prefix); 1531 } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && 1532 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) { 1533 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1534 ucode_prefix, 1535 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); 1536 need_retry = true; 1537 } else { 1538 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1539 ucode_prefix, 1540 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); 1541 } 1542 1543 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name); 1544 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { 1545 dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix); 1546 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], 1547 "amdgpu/%s_mes.bin", ucode_prefix); 1548 } 1549 1550 if (r) 1551 goto out; 1552 1553 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1554 adev->mes.fw[pipe]->data; 1555 adev->mes.uc_start_addr[pipe] = 1556 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | 1557 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); 1558 adev->mes.data_start_addr[pipe] = 1559 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | 1560 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); 1561 1562 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1563 int ucode, ucode_data; 1564 1565 if (pipe == AMDGPU_MES_SCHED_PIPE) { 1566 ucode = AMDGPU_UCODE_ID_CP_MES; 1567 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; 1568 } else { 1569 ucode = AMDGPU_UCODE_ID_CP_MES1; 1570 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; 1571 } 1572 1573 info = &adev->firmware.ucode[ucode]; 1574 info->ucode_id = ucode; 1575 info->fw = adev->mes.fw[pipe]; 1576 adev->firmware.fw_size += 1577 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), 1578 PAGE_SIZE); 1579 1580 info = &adev->firmware.ucode[ucode_data]; 1581 info->ucode_id = ucode_data; 1582 info->fw = adev->mes.fw[pipe]; 1583 adev->firmware.fw_size += 1584 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), 1585 PAGE_SIZE); 1586 } 1587 1588 return 0; 1589 out: 1590 amdgpu_ucode_release(&adev->mes.fw[pipe]); 1591 return r; 1592 } 1593 1594 #if defined(CONFIG_DEBUG_FS) 1595 1596 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) 1597 { 1598 struct amdgpu_device *adev = m->private; 1599 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); 1600 1601 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, 1602 mem, adev->mes.event_log_size, false); 1603 1604 return 0; 1605 } 1606 1607 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); 1608 1609 #endif 1610 1611 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) 1612 { 1613 1614 #if defined(CONFIG_DEBUG_FS) 1615 struct drm_minor *minor = adev_to_drm(adev)->primary; 1616 struct dentry *root = minor->debugfs_root; 1617 if (adev->enable_mes && amdgpu_mes_log_enable) 1618 debugfs_create_file("amdgpu_mes_event_log", 0444, root, 1619 adev, &amdgpu_debugfs_mes_event_log_fops); 1620 1621 #endif 1622 } 1623