1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu_mes.h" 25 #include "amdgpu.h" 26 #include "soc15_common.h" 27 #include "amdgpu_mes_ctx.h" 28 29 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 30 #define AMDGPU_ONE_DOORBELL_SIZE 8 31 32 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) 33 { 34 return roundup(AMDGPU_ONE_DOORBELL_SIZE * 35 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 36 PAGE_SIZE); 37 } 38 39 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev, 40 unsigned int *doorbell_index) 41 { 42 int r = ida_simple_get(&adev->mes.doorbell_ida, 2, 43 adev->mes.max_doorbell_slices, 44 GFP_KERNEL); 45 if (r > 0) 46 *doorbell_index = r; 47 48 return r; 49 } 50 51 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev, 52 unsigned int doorbell_index) 53 { 54 if (doorbell_index) 55 ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index); 56 } 57 58 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar( 59 struct amdgpu_device *adev, 60 uint32_t doorbell_index, 61 unsigned int doorbell_id) 62 { 63 return ((doorbell_index * 64 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) + 65 doorbell_id * 2); 66 } 67 68 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev, 69 struct amdgpu_mes_process *process, 70 int ip_type, uint64_t *doorbell_index) 71 { 72 unsigned int offset, found; 73 74 if (ip_type == AMDGPU_RING_TYPE_SDMA) { 75 offset = adev->doorbell_index.sdma_engine[0]; 76 found = find_next_zero_bit(process->doorbell_bitmap, 77 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 78 offset); 79 } else { 80 found = find_first_zero_bit(process->doorbell_bitmap, 81 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS); 82 } 83 84 if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) { 85 DRM_WARN("No doorbell available\n"); 86 return -ENOSPC; 87 } 88 89 set_bit(found, process->doorbell_bitmap); 90 91 *doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev, 92 process->doorbell_index, found); 93 94 return 0; 95 } 96 97 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev, 98 struct amdgpu_mes_process *process, 99 uint32_t doorbell_index) 100 { 101 unsigned int old, doorbell_id; 102 103 doorbell_id = doorbell_index - 104 (process->doorbell_index * 105 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32); 106 doorbell_id /= 2; 107 108 old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap); 109 WARN_ON(!old); 110 } 111 112 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) 113 { 114 size_t doorbell_start_offset; 115 size_t doorbell_aperture_size; 116 size_t doorbell_process_limit; 117 118 doorbell_start_offset = (adev->doorbell_index.max_assignment+1) * sizeof(u32); 119 doorbell_start_offset = 120 roundup(doorbell_start_offset, 121 amdgpu_mes_doorbell_process_slice(adev)); 122 123 doorbell_aperture_size = adev->doorbell.size; 124 doorbell_aperture_size = 125 rounddown(doorbell_aperture_size, 126 amdgpu_mes_doorbell_process_slice(adev)); 127 128 if (doorbell_aperture_size > doorbell_start_offset) 129 doorbell_process_limit = 130 (doorbell_aperture_size - doorbell_start_offset) / 131 amdgpu_mes_doorbell_process_slice(adev); 132 else 133 return -ENOSPC; 134 135 adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32); 136 adev->mes.max_doorbell_slices = doorbell_process_limit; 137 138 DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit); 139 return 0; 140 } 141 142 int amdgpu_mes_init(struct amdgpu_device *adev) 143 { 144 int i, r; 145 146 adev->mes.adev = adev; 147 148 idr_init(&adev->mes.pasid_idr); 149 idr_init(&adev->mes.gang_id_idr); 150 idr_init(&adev->mes.queue_id_idr); 151 ida_init(&adev->mes.doorbell_ida); 152 spin_lock_init(&adev->mes.queue_id_lock); 153 mutex_init(&adev->mes.mutex_hidden); 154 155 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; 156 adev->mes.vmid_mask_mmhub = 0xffffff00; 157 adev->mes.vmid_mask_gfxhub = 0xffffff00; 158 159 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { 160 /* use only 1st MEC pipes */ 161 if (i >= 4) 162 continue; 163 adev->mes.compute_hqd_mask[i] = 0xc; 164 } 165 166 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) 167 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; 168 169 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { 170 if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0)) 171 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; 172 else 173 adev->mes.sdma_hqd_mask[i] = 0xfc; 174 } 175 176 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) 177 adev->mes.agreegated_doorbells[i] = 0xffffffff; 178 179 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs); 180 if (r) { 181 dev_err(adev->dev, 182 "(%d) ring trail_fence_offs wb alloc failed\n", r); 183 goto error_ids; 184 } 185 adev->mes.sch_ctx_gpu_addr = 186 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4); 187 adev->mes.sch_ctx_ptr = 188 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs]; 189 190 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs); 191 if (r) { 192 dev_err(adev->dev, 193 "(%d) query_status_fence_offs wb alloc failed\n", r); 194 return r; 195 } 196 adev->mes.query_status_fence_gpu_addr = 197 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4); 198 adev->mes.query_status_fence_ptr = 199 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs]; 200 201 r = amdgpu_mes_doorbell_init(adev); 202 if (r) 203 goto error; 204 205 return 0; 206 207 error: 208 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 209 error_ids: 210 idr_destroy(&adev->mes.pasid_idr); 211 idr_destroy(&adev->mes.gang_id_idr); 212 idr_destroy(&adev->mes.queue_id_idr); 213 ida_destroy(&adev->mes.doorbell_ida); 214 mutex_destroy(&adev->mes.mutex_hidden); 215 return r; 216 } 217 218 void amdgpu_mes_fini(struct amdgpu_device *adev) 219 { 220 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 221 222 idr_destroy(&adev->mes.pasid_idr); 223 idr_destroy(&adev->mes.gang_id_idr); 224 idr_destroy(&adev->mes.queue_id_idr); 225 ida_destroy(&adev->mes.doorbell_ida); 226 mutex_destroy(&adev->mes.mutex_hidden); 227 } 228 229 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q) 230 { 231 amdgpu_bo_free_kernel(&q->mqd_obj, 232 &q->mqd_gpu_addr, 233 &q->mqd_cpu_ptr); 234 } 235 236 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, 237 struct amdgpu_vm *vm) 238 { 239 struct amdgpu_mes_process *process; 240 int r; 241 242 /* allocate the mes process buffer */ 243 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); 244 if (!process) { 245 DRM_ERROR("no more memory to create mes process\n"); 246 return -ENOMEM; 247 } 248 249 process->doorbell_bitmap = 250 kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 251 BITS_PER_BYTE), GFP_KERNEL); 252 if (!process->doorbell_bitmap) { 253 DRM_ERROR("failed to allocate doorbell bitmap\n"); 254 kfree(process); 255 return -ENOMEM; 256 } 257 258 /* allocate the process context bo and map it */ 259 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, 260 AMDGPU_GEM_DOMAIN_GTT, 261 &process->proc_ctx_bo, 262 &process->proc_ctx_gpu_addr, 263 &process->proc_ctx_cpu_ptr); 264 if (r) { 265 DRM_ERROR("failed to allocate process context bo\n"); 266 goto clean_up_memory; 267 } 268 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 269 270 /* 271 * Avoid taking any other locks under MES lock to avoid circular 272 * lock dependencies. 273 */ 274 amdgpu_mes_lock(&adev->mes); 275 276 /* add the mes process to idr list */ 277 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, 278 GFP_KERNEL); 279 if (r < 0) { 280 DRM_ERROR("failed to lock pasid=%d\n", pasid); 281 goto clean_up_ctx; 282 } 283 284 /* allocate the starting doorbell index of the process */ 285 r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index); 286 if (r < 0) { 287 DRM_ERROR("failed to allocate doorbell for process\n"); 288 goto clean_up_pasid; 289 } 290 291 DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index); 292 293 INIT_LIST_HEAD(&process->gang_list); 294 process->vm = vm; 295 process->pasid = pasid; 296 process->process_quantum = adev->mes.default_process_quantum; 297 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); 298 299 amdgpu_mes_unlock(&adev->mes); 300 return 0; 301 302 clean_up_pasid: 303 idr_remove(&adev->mes.pasid_idr, pasid); 304 amdgpu_mes_unlock(&adev->mes); 305 clean_up_ctx: 306 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 307 &process->proc_ctx_gpu_addr, 308 &process->proc_ctx_cpu_ptr); 309 clean_up_memory: 310 kfree(process->doorbell_bitmap); 311 kfree(process); 312 return r; 313 } 314 315 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) 316 { 317 struct amdgpu_mes_process *process; 318 struct amdgpu_mes_gang *gang, *tmp1; 319 struct amdgpu_mes_queue *queue, *tmp2; 320 struct mes_remove_queue_input queue_input; 321 unsigned long flags; 322 int r; 323 324 /* 325 * Avoid taking any other locks under MES lock to avoid circular 326 * lock dependencies. 327 */ 328 amdgpu_mes_lock(&adev->mes); 329 330 process = idr_find(&adev->mes.pasid_idr, pasid); 331 if (!process) { 332 DRM_WARN("pasid %d doesn't exist\n", pasid); 333 amdgpu_mes_unlock(&adev->mes); 334 return; 335 } 336 337 /* Remove all queues from hardware */ 338 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 339 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 340 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 341 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 342 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 343 344 queue_input.doorbell_offset = queue->doorbell_off; 345 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 346 347 r = adev->mes.funcs->remove_hw_queue(&adev->mes, 348 &queue_input); 349 if (r) 350 DRM_WARN("failed to remove hardware queue\n"); 351 } 352 353 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 354 } 355 356 amdgpu_mes_free_process_doorbells(adev, process->doorbell_index); 357 idr_remove(&adev->mes.pasid_idr, pasid); 358 amdgpu_mes_unlock(&adev->mes); 359 360 /* free all memory allocated by the process */ 361 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 362 /* free all queues in the gang */ 363 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 364 amdgpu_mes_queue_free_mqd(queue); 365 list_del(&queue->list); 366 kfree(queue); 367 } 368 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 369 &gang->gang_ctx_gpu_addr, 370 &gang->gang_ctx_cpu_ptr); 371 list_del(&gang->list); 372 kfree(gang); 373 374 } 375 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 376 &process->proc_ctx_gpu_addr, 377 &process->proc_ctx_cpu_ptr); 378 kfree(process->doorbell_bitmap); 379 kfree(process); 380 } 381 382 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, 383 struct amdgpu_mes_gang_properties *gprops, 384 int *gang_id) 385 { 386 struct amdgpu_mes_process *process; 387 struct amdgpu_mes_gang *gang; 388 int r; 389 390 /* allocate the mes gang buffer */ 391 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL); 392 if (!gang) { 393 return -ENOMEM; 394 } 395 396 /* allocate the gang context bo and map it to cpu space */ 397 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE, 398 AMDGPU_GEM_DOMAIN_GTT, 399 &gang->gang_ctx_bo, 400 &gang->gang_ctx_gpu_addr, 401 &gang->gang_ctx_cpu_ptr); 402 if (r) { 403 DRM_ERROR("failed to allocate process context bo\n"); 404 goto clean_up_mem; 405 } 406 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); 407 408 /* 409 * Avoid taking any other locks under MES lock to avoid circular 410 * lock dependencies. 411 */ 412 amdgpu_mes_lock(&adev->mes); 413 414 process = idr_find(&adev->mes.pasid_idr, pasid); 415 if (!process) { 416 DRM_ERROR("pasid %d doesn't exist\n", pasid); 417 r = -EINVAL; 418 goto clean_up_ctx; 419 } 420 421 /* add the mes gang to idr list */ 422 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0, 423 GFP_KERNEL); 424 if (r < 0) { 425 DRM_ERROR("failed to allocate idr for gang\n"); 426 goto clean_up_ctx; 427 } 428 429 gang->gang_id = r; 430 *gang_id = r; 431 432 INIT_LIST_HEAD(&gang->queue_list); 433 gang->process = process; 434 gang->priority = gprops->priority; 435 gang->gang_quantum = gprops->gang_quantum ? 436 gprops->gang_quantum : adev->mes.default_gang_quantum; 437 gang->global_priority_level = gprops->global_priority_level; 438 gang->inprocess_gang_priority = gprops->inprocess_gang_priority; 439 list_add_tail(&gang->list, &process->gang_list); 440 441 amdgpu_mes_unlock(&adev->mes); 442 return 0; 443 444 clean_up_ctx: 445 amdgpu_mes_unlock(&adev->mes); 446 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 447 &gang->gang_ctx_gpu_addr, 448 &gang->gang_ctx_cpu_ptr); 449 clean_up_mem: 450 kfree(gang); 451 return r; 452 } 453 454 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) 455 { 456 struct amdgpu_mes_gang *gang; 457 458 /* 459 * Avoid taking any other locks under MES lock to avoid circular 460 * lock dependencies. 461 */ 462 amdgpu_mes_lock(&adev->mes); 463 464 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 465 if (!gang) { 466 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 467 amdgpu_mes_unlock(&adev->mes); 468 return -EINVAL; 469 } 470 471 if (!list_empty(&gang->queue_list)) { 472 DRM_ERROR("queue list is not empty\n"); 473 amdgpu_mes_unlock(&adev->mes); 474 return -EBUSY; 475 } 476 477 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 478 list_del(&gang->list); 479 amdgpu_mes_unlock(&adev->mes); 480 481 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 482 &gang->gang_ctx_gpu_addr, 483 &gang->gang_ctx_cpu_ptr); 484 485 kfree(gang); 486 487 return 0; 488 } 489 490 int amdgpu_mes_suspend(struct amdgpu_device *adev) 491 { 492 struct idr *idp; 493 struct amdgpu_mes_process *process; 494 struct amdgpu_mes_gang *gang; 495 struct mes_suspend_gang_input input; 496 int r, pasid; 497 498 /* 499 * Avoid taking any other locks under MES lock to avoid circular 500 * lock dependencies. 501 */ 502 amdgpu_mes_lock(&adev->mes); 503 504 idp = &adev->mes.pasid_idr; 505 506 idr_for_each_entry(idp, process, pasid) { 507 list_for_each_entry(gang, &process->gang_list, list) { 508 r = adev->mes.funcs->suspend_gang(&adev->mes, &input); 509 if (r) 510 DRM_ERROR("failed to suspend pasid %d gangid %d", 511 pasid, gang->gang_id); 512 } 513 } 514 515 amdgpu_mes_unlock(&adev->mes); 516 return 0; 517 } 518 519 int amdgpu_mes_resume(struct amdgpu_device *adev) 520 { 521 struct idr *idp; 522 struct amdgpu_mes_process *process; 523 struct amdgpu_mes_gang *gang; 524 struct mes_resume_gang_input input; 525 int r, pasid; 526 527 /* 528 * Avoid taking any other locks under MES lock to avoid circular 529 * lock dependencies. 530 */ 531 amdgpu_mes_lock(&adev->mes); 532 533 idp = &adev->mes.pasid_idr; 534 535 idr_for_each_entry(idp, process, pasid) { 536 list_for_each_entry(gang, &process->gang_list, list) { 537 r = adev->mes.funcs->resume_gang(&adev->mes, &input); 538 if (r) 539 DRM_ERROR("failed to resume pasid %d gangid %d", 540 pasid, gang->gang_id); 541 } 542 } 543 544 amdgpu_mes_unlock(&adev->mes); 545 return 0; 546 } 547 548 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, 549 struct amdgpu_mes_queue *q, 550 struct amdgpu_mes_queue_properties *p) 551 { 552 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 553 u32 mqd_size = mqd_mgr->mqd_size; 554 int r; 555 556 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 557 AMDGPU_GEM_DOMAIN_GTT, 558 &q->mqd_obj, 559 &q->mqd_gpu_addr, &q->mqd_cpu_ptr); 560 if (r) { 561 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r); 562 return r; 563 } 564 memset(q->mqd_cpu_ptr, 0, mqd_size); 565 566 r = amdgpu_bo_reserve(q->mqd_obj, false); 567 if (unlikely(r != 0)) 568 goto clean_up; 569 570 return 0; 571 572 clean_up: 573 amdgpu_bo_free_kernel(&q->mqd_obj, 574 &q->mqd_gpu_addr, 575 &q->mqd_cpu_ptr); 576 return r; 577 } 578 579 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, 580 struct amdgpu_mes_queue *q, 581 struct amdgpu_mes_queue_properties *p) 582 { 583 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 584 struct amdgpu_mqd_prop mqd_prop = {0}; 585 586 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr; 587 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr; 588 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr; 589 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr; 590 mqd_prop.queue_size = p->queue_size; 591 mqd_prop.use_doorbell = true; 592 mqd_prop.doorbell_index = p->doorbell_off; 593 mqd_prop.eop_gpu_addr = p->eop_gpu_addr; 594 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority; 595 mqd_prop.hqd_queue_priority = p->hqd_queue_priority; 596 mqd_prop.hqd_active = false; 597 598 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); 599 600 amdgpu_bo_unreserve(q->mqd_obj); 601 } 602 603 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, 604 struct amdgpu_mes_queue_properties *qprops, 605 int *queue_id) 606 { 607 struct amdgpu_mes_queue *queue; 608 struct amdgpu_mes_gang *gang; 609 struct mes_add_queue_input queue_input; 610 unsigned long flags; 611 int r; 612 613 /* allocate the mes queue buffer */ 614 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); 615 if (!queue) { 616 DRM_ERROR("Failed to allocate memory for queue\n"); 617 return -ENOMEM; 618 } 619 620 /* Allocate the queue mqd */ 621 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops); 622 if (r) 623 goto clean_up_memory; 624 625 /* 626 * Avoid taking any other locks under MES lock to avoid circular 627 * lock dependencies. 628 */ 629 amdgpu_mes_lock(&adev->mes); 630 631 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 632 if (!gang) { 633 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 634 r = -EINVAL; 635 goto clean_up_mqd; 636 } 637 638 /* add the mes gang to idr list */ 639 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 640 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0, 641 GFP_ATOMIC); 642 if (r < 0) { 643 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 644 goto clean_up_mqd; 645 } 646 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 647 *queue_id = queue->queue_id = r; 648 649 /* allocate a doorbell index for the queue */ 650 r = amdgpu_mes_queue_doorbell_get(adev, gang->process, 651 qprops->queue_type, 652 &qprops->doorbell_off); 653 if (r) 654 goto clean_up_queue_id; 655 656 /* initialize the queue mqd */ 657 amdgpu_mes_queue_init_mqd(adev, queue, qprops); 658 659 /* add hw queue to mes */ 660 queue_input.process_id = gang->process->pasid; 661 662 queue_input.page_table_base_addr = 663 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr - 664 adev->gmc.vram_start; 665 666 queue_input.process_va_start = 0; 667 queue_input.process_va_end = 668 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; 669 queue_input.process_quantum = gang->process->process_quantum; 670 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr; 671 queue_input.gang_quantum = gang->gang_quantum; 672 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 673 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority; 674 queue_input.gang_global_priority_level = gang->global_priority_level; 675 queue_input.doorbell_offset = qprops->doorbell_off; 676 queue_input.mqd_addr = queue->mqd_gpu_addr; 677 queue_input.wptr_addr = qprops->wptr_gpu_addr; 678 queue_input.wptr_mc_addr = qprops->wptr_mc_addr; 679 queue_input.queue_type = qprops->queue_type; 680 queue_input.paging = qprops->paging; 681 queue_input.is_kfd_process = 0; 682 683 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 684 if (r) { 685 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n", 686 qprops->doorbell_off); 687 goto clean_up_doorbell; 688 } 689 690 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, " 691 "queue type=%d, doorbell=0x%llx\n", 692 gang->process->pasid, gang_id, qprops->queue_type, 693 qprops->doorbell_off); 694 695 queue->ring = qprops->ring; 696 queue->doorbell_off = qprops->doorbell_off; 697 queue->wptr_gpu_addr = qprops->wptr_gpu_addr; 698 queue->queue_type = qprops->queue_type; 699 queue->paging = qprops->paging; 700 queue->gang = gang; 701 list_add_tail(&queue->list, &gang->queue_list); 702 703 amdgpu_mes_unlock(&adev->mes); 704 return 0; 705 706 clean_up_doorbell: 707 amdgpu_mes_queue_doorbell_free(adev, gang->process, 708 qprops->doorbell_off); 709 clean_up_queue_id: 710 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 711 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 712 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 713 clean_up_mqd: 714 amdgpu_mes_unlock(&adev->mes); 715 amdgpu_mes_queue_free_mqd(queue); 716 clean_up_memory: 717 kfree(queue); 718 return r; 719 } 720 721 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) 722 { 723 unsigned long flags; 724 struct amdgpu_mes_queue *queue; 725 struct amdgpu_mes_gang *gang; 726 struct mes_remove_queue_input queue_input; 727 int r; 728 729 /* 730 * Avoid taking any other locks under MES lock to avoid circular 731 * lock dependencies. 732 */ 733 amdgpu_mes_lock(&adev->mes); 734 735 /* remove the mes gang from idr list */ 736 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 737 738 queue = idr_find(&adev->mes.queue_id_idr, queue_id); 739 if (!queue) { 740 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 741 amdgpu_mes_unlock(&adev->mes); 742 DRM_ERROR("queue id %d doesn't exist\n", queue_id); 743 return -EINVAL; 744 } 745 746 idr_remove(&adev->mes.queue_id_idr, queue_id); 747 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 748 749 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n", 750 queue->doorbell_off); 751 752 gang = queue->gang; 753 queue_input.doorbell_offset = queue->doorbell_off; 754 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 755 756 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 757 if (r) 758 DRM_ERROR("failed to remove hardware queue, queue id = %d\n", 759 queue_id); 760 761 list_del(&queue->list); 762 amdgpu_mes_queue_doorbell_free(adev, gang->process, 763 queue->doorbell_off); 764 amdgpu_mes_unlock(&adev->mes); 765 766 amdgpu_mes_queue_free_mqd(queue); 767 kfree(queue); 768 return 0; 769 } 770 771 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, 772 struct amdgpu_ring *ring, 773 enum amdgpu_unmap_queues_action action, 774 u64 gpu_addr, u64 seq) 775 { 776 struct mes_unmap_legacy_queue_input queue_input; 777 int r; 778 779 amdgpu_mes_lock(&adev->mes); 780 781 queue_input.action = action; 782 queue_input.queue_type = ring->funcs->type; 783 queue_input.doorbell_offset = ring->doorbell_index; 784 queue_input.pipe_id = ring->pipe; 785 queue_input.queue_id = ring->queue; 786 queue_input.trail_fence_addr = gpu_addr; 787 queue_input.trail_fence_data = seq; 788 789 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 790 if (r) 791 DRM_ERROR("failed to unmap legacy queue\n"); 792 793 amdgpu_mes_unlock(&adev->mes); 794 return r; 795 } 796 797 static void 798 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, 799 struct amdgpu_ring *ring, 800 struct amdgpu_mes_queue_properties *props) 801 { 802 props->queue_type = ring->funcs->type; 803 props->hqd_base_gpu_addr = ring->gpu_addr; 804 props->rptr_gpu_addr = ring->rptr_gpu_addr; 805 props->wptr_gpu_addr = ring->wptr_gpu_addr; 806 props->wptr_mc_addr = 807 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs; 808 props->queue_size = ring->ring_size; 809 props->eop_gpu_addr = ring->eop_gpu_addr; 810 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 811 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 812 props->paging = false; 813 props->ring = ring; 814 } 815 816 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ 817 do { \ 818 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ 819 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 820 _eng[ring->idx].slots[id_offs]); \ 821 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ 822 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 823 _eng[ring->idx].ring); \ 824 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ 825 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 826 _eng[ring->idx].ib); \ 827 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ 828 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 829 _eng[ring->idx].padding); \ 830 } while(0) 831 832 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) 833 { 834 switch (ring->funcs->type) { 835 case AMDGPU_RING_TYPE_GFX: 836 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); 837 break; 838 case AMDGPU_RING_TYPE_COMPUTE: 839 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); 840 break; 841 case AMDGPU_RING_TYPE_SDMA: 842 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); 843 break; 844 default: 845 break; 846 } 847 848 WARN_ON(1); 849 return -EINVAL; 850 } 851 852 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, 853 int queue_type, int idx, 854 struct amdgpu_mes_ctx_data *ctx_data, 855 struct amdgpu_ring **out) 856 { 857 struct amdgpu_ring *ring; 858 struct amdgpu_mes_gang *gang; 859 struct amdgpu_mes_queue_properties qprops = {0}; 860 int r, queue_id, pasid; 861 862 /* 863 * Avoid taking any other locks under MES lock to avoid circular 864 * lock dependencies. 865 */ 866 amdgpu_mes_lock(&adev->mes); 867 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 868 if (!gang) { 869 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 870 amdgpu_mes_unlock(&adev->mes); 871 return -EINVAL; 872 } 873 pasid = gang->process->pasid; 874 875 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL); 876 if (!ring) { 877 amdgpu_mes_unlock(&adev->mes); 878 return -ENOMEM; 879 } 880 881 ring->ring_obj = NULL; 882 ring->use_doorbell = true; 883 ring->is_mes_queue = true; 884 ring->mes_ctx = ctx_data; 885 ring->idx = idx; 886 ring->no_scheduler = true; 887 888 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 889 int offset = offsetof(struct amdgpu_mes_ctx_meta_data, 890 compute[ring->idx].mec_hpd); 891 ring->eop_gpu_addr = 892 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 893 } 894 895 switch (queue_type) { 896 case AMDGPU_RING_TYPE_GFX: 897 ring->funcs = adev->gfx.gfx_ring[0].funcs; 898 break; 899 case AMDGPU_RING_TYPE_COMPUTE: 900 ring->funcs = adev->gfx.compute_ring[0].funcs; 901 break; 902 case AMDGPU_RING_TYPE_SDMA: 903 ring->funcs = adev->sdma.instance[0].ring.funcs; 904 break; 905 default: 906 BUG(); 907 } 908 909 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, 910 AMDGPU_RING_PRIO_DEFAULT, NULL); 911 if (r) 912 goto clean_up_memory; 913 914 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); 915 916 dma_fence_wait(gang->process->vm->last_update, false); 917 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false); 918 amdgpu_mes_unlock(&adev->mes); 919 920 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id); 921 if (r) 922 goto clean_up_ring; 923 924 ring->hw_queue_id = queue_id; 925 ring->doorbell_index = qprops.doorbell_off; 926 927 if (queue_type == AMDGPU_RING_TYPE_GFX) 928 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id); 929 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) 930 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id, 931 queue_id); 932 else if (queue_type == AMDGPU_RING_TYPE_SDMA) 933 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id, 934 queue_id); 935 else 936 BUG(); 937 938 *out = ring; 939 return 0; 940 941 clean_up_ring: 942 amdgpu_ring_fini(ring); 943 clean_up_memory: 944 kfree(ring); 945 amdgpu_mes_unlock(&adev->mes); 946 return r; 947 } 948 949 void amdgpu_mes_remove_ring(struct amdgpu_device *adev, 950 struct amdgpu_ring *ring) 951 { 952 if (!ring) 953 return; 954 955 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 956 amdgpu_ring_fini(ring); 957 kfree(ring); 958 } 959 960 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, 961 struct amdgpu_mes_ctx_data *ctx_data) 962 { 963 int r; 964 965 r = amdgpu_bo_create_kernel(adev, 966 sizeof(struct amdgpu_mes_ctx_meta_data), 967 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 968 &ctx_data->meta_data_obj, 969 &ctx_data->meta_data_mc_addr, 970 &ctx_data->meta_data_ptr); 971 if (!ctx_data->meta_data_obj) 972 return -ENOMEM; 973 974 memset(ctx_data->meta_data_ptr, 0, 975 sizeof(struct amdgpu_mes_ctx_meta_data)); 976 977 return 0; 978 } 979 980 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data) 981 { 982 if (ctx_data->meta_data_obj) 983 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, 984 &ctx_data->meta_data_mc_addr, 985 &ctx_data->meta_data_ptr); 986 } 987 988 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, 989 struct amdgpu_vm *vm, 990 struct amdgpu_mes_ctx_data *ctx_data) 991 { 992 struct amdgpu_bo_va *bo_va; 993 struct ww_acquire_ctx ticket; 994 struct list_head list; 995 struct amdgpu_bo_list_entry pd; 996 struct ttm_validate_buffer csa_tv; 997 struct amdgpu_sync sync; 998 int r; 999 1000 amdgpu_sync_create(&sync); 1001 INIT_LIST_HEAD(&list); 1002 INIT_LIST_HEAD(&csa_tv.head); 1003 1004 csa_tv.bo = &ctx_data->meta_data_obj->tbo; 1005 csa_tv.num_shared = 1; 1006 1007 list_add(&csa_tv.head, &list); 1008 amdgpu_vm_get_pd_bo(vm, &list, &pd); 1009 1010 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 1011 if (r) { 1012 DRM_ERROR("failed to reserve meta data BO: err=%d\n", r); 1013 return r; 1014 } 1015 1016 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); 1017 if (!bo_va) { 1018 ttm_eu_backoff_reservation(&ticket, &list); 1019 DRM_ERROR("failed to create bo_va for meta data BO\n"); 1020 return -ENOMEM; 1021 } 1022 1023 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, 1024 sizeof(struct amdgpu_mes_ctx_meta_data), 1025 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 1026 AMDGPU_PTE_EXECUTABLE); 1027 1028 if (r) { 1029 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); 1030 goto error; 1031 } 1032 1033 r = amdgpu_vm_bo_update(adev, bo_va, false); 1034 if (r) { 1035 DRM_ERROR("failed to do vm_bo_update on meta data\n"); 1036 goto error; 1037 } 1038 amdgpu_sync_fence(&sync, bo_va->last_pt_update); 1039 1040 r = amdgpu_vm_update_pdes(adev, vm, false); 1041 if (r) { 1042 DRM_ERROR("failed to update pdes on meta data\n"); 1043 goto error; 1044 } 1045 amdgpu_sync_fence(&sync, vm->last_update); 1046 1047 amdgpu_sync_wait(&sync, false); 1048 ttm_eu_backoff_reservation(&ticket, &list); 1049 1050 amdgpu_sync_free(&sync); 1051 ctx_data->meta_data_va = bo_va; 1052 return 0; 1053 1054 error: 1055 amdgpu_vm_bo_del(adev, bo_va); 1056 ttm_eu_backoff_reservation(&ticket, &list); 1057 amdgpu_sync_free(&sync); 1058 return r; 1059 } 1060 1061 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev, 1062 int pasid, int *gang_id, 1063 int queue_type, int num_queue, 1064 struct amdgpu_ring **added_rings, 1065 struct amdgpu_mes_ctx_data *ctx_data) 1066 { 1067 struct amdgpu_ring *ring; 1068 struct amdgpu_mes_gang_properties gprops = {0}; 1069 int r, j; 1070 1071 /* create a gang for the process */ 1072 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1073 gprops.gang_quantum = adev->mes.default_gang_quantum; 1074 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1075 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1076 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1077 1078 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id); 1079 if (r) { 1080 DRM_ERROR("failed to add gang\n"); 1081 return r; 1082 } 1083 1084 /* create queues for the gang */ 1085 for (j = 0; j < num_queue; j++) { 1086 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j, 1087 ctx_data, &ring); 1088 if (r) { 1089 DRM_ERROR("failed to add ring\n"); 1090 break; 1091 } 1092 1093 DRM_INFO("ring %s was added\n", ring->name); 1094 added_rings[j] = ring; 1095 } 1096 1097 return 0; 1098 } 1099 1100 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) 1101 { 1102 struct amdgpu_ring *ring; 1103 int i, r; 1104 1105 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) { 1106 ring = added_rings[i]; 1107 if (!ring) 1108 continue; 1109 1110 r = amdgpu_ring_test_ring(ring); 1111 if (r) { 1112 DRM_DEV_ERROR(ring->adev->dev, 1113 "ring %s test failed (%d)\n", 1114 ring->name, r); 1115 return r; 1116 } else 1117 DRM_INFO("ring %s test pass\n", ring->name); 1118 1119 r = amdgpu_ring_test_ib(ring, 1000 * 10); 1120 if (r) { 1121 DRM_DEV_ERROR(ring->adev->dev, 1122 "ring %s ib test failed (%d)\n", 1123 ring->name, r); 1124 return r; 1125 } else 1126 DRM_INFO("ring %s ib test pass\n", ring->name); 1127 } 1128 1129 return 0; 1130 } 1131 1132 int amdgpu_mes_self_test(struct amdgpu_device *adev) 1133 { 1134 struct amdgpu_vm *vm = NULL; 1135 struct amdgpu_mes_ctx_data ctx_data = {0}; 1136 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; 1137 int gang_ids[3] = {0}; 1138 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1139 AMDGPU_MES_CTX_MAX_GFX_RINGS}, 1140 { AMDGPU_RING_TYPE_COMPUTE, 1141 AMDGPU_MES_CTX_MAX_COMPUTE_RINGS}, 1142 { AMDGPU_RING_TYPE_SDMA, 1143 AMDGPU_MES_CTX_MAX_SDMA_RINGS } }; 1144 int i, r, pasid, k = 0; 1145 1146 pasid = amdgpu_pasid_alloc(16); 1147 if (pasid < 0) { 1148 dev_warn(adev->dev, "No more PASIDs available!"); 1149 pasid = 0; 1150 } 1151 1152 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 1153 if (!vm) { 1154 r = -ENOMEM; 1155 goto error_pasid; 1156 } 1157 1158 r = amdgpu_vm_init(adev, vm); 1159 if (r) { 1160 DRM_ERROR("failed to initialize vm\n"); 1161 goto error_pasid; 1162 } 1163 1164 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data); 1165 if (r) { 1166 DRM_ERROR("failed to alloc ctx meta data\n"); 1167 goto error_pasid; 1168 } 1169 1170 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE; 1171 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); 1172 if (r) { 1173 DRM_ERROR("failed to map ctx meta data\n"); 1174 goto error_vm; 1175 } 1176 1177 r = amdgpu_mes_create_process(adev, pasid, vm); 1178 if (r) { 1179 DRM_ERROR("failed to create MES process\n"); 1180 goto error_vm; 1181 } 1182 1183 for (i = 0; i < ARRAY_SIZE(queue_types); i++) { 1184 /* On GFX v10.3, fw hasn't supported to map sdma queue. */ 1185 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) && 1186 adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) && 1187 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) 1188 continue; 1189 1190 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid, 1191 &gang_ids[i], 1192 queue_types[i][0], 1193 queue_types[i][1], 1194 &added_rings[k], 1195 &ctx_data); 1196 if (r) 1197 goto error_queues; 1198 1199 k += queue_types[i][1]; 1200 } 1201 1202 /* start ring test and ib test for MES queues */ 1203 amdgpu_mes_test_queues(added_rings); 1204 1205 error_queues: 1206 /* remove all queues */ 1207 for (i = 0; i < ARRAY_SIZE(added_rings); i++) { 1208 if (!added_rings[i]) 1209 continue; 1210 amdgpu_mes_remove_ring(adev, added_rings[i]); 1211 } 1212 1213 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) { 1214 if (!gang_ids[i]) 1215 continue; 1216 amdgpu_mes_remove_gang(adev, gang_ids[i]); 1217 } 1218 1219 amdgpu_mes_destroy_process(adev, pasid); 1220 1221 error_vm: 1222 BUG_ON(amdgpu_bo_reserve(ctx_data.meta_data_obj, true)); 1223 amdgpu_vm_bo_del(adev, ctx_data.meta_data_va); 1224 amdgpu_bo_unreserve(ctx_data.meta_data_obj); 1225 amdgpu_vm_fini(adev, vm); 1226 1227 error_pasid: 1228 if (pasid) 1229 amdgpu_pasid_free(pasid); 1230 1231 amdgpu_mes_ctx_free_meta_data(&ctx_data); 1232 kfree(vm); 1233 return 0; 1234 } 1235