1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <drm/drm_auth.h> 26 #include <drm/drm_exec.h> 27 #include <linux/pm_runtime.h> 28 #include <drm/drm_drv.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_reset.h" 32 #include "amdgpu_vm.h" 33 #include "amdgpu_userq.h" 34 #include "amdgpu_hmm.h" 35 #include "amdgpu_userq_fence.h" 36 37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) 38 { 39 int i; 40 u32 userq_ip_mask = 0; 41 42 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { 43 if (adev->userq_funcs[i]) 44 userq_ip_mask |= (1 << i); 45 } 46 47 return userq_ip_mask; 48 } 49 50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev, 51 enum amdgpu_ring_type ring_type, int reset_type) 52 { 53 54 if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX) 55 return false; 56 57 switch (ring_type) { 58 case AMDGPU_RING_TYPE_GFX: 59 if (adev->gfx.gfx_supported_reset & reset_type) 60 return true; 61 break; 62 case AMDGPU_RING_TYPE_COMPUTE: 63 if (adev->gfx.compute_supported_reset & reset_type) 64 return true; 65 break; 66 case AMDGPU_RING_TYPE_SDMA: 67 if (adev->sdma.supported_reset & reset_type) 68 return true; 69 break; 70 case AMDGPU_RING_TYPE_VCN_DEC: 71 case AMDGPU_RING_TYPE_VCN_ENC: 72 if (adev->vcn.supported_reset & reset_type) 73 return true; 74 break; 75 case AMDGPU_RING_TYPE_VCN_JPEG: 76 if (adev->jpeg.supported_reset & reset_type) 77 return true; 78 break; 79 default: 80 break; 81 } 82 return false; 83 } 84 85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev) 86 { 87 if (amdgpu_device_should_recover_gpu(adev)) { 88 amdgpu_reset_domain_schedule(adev->reset_domain, 89 &adev->userq_reset_work); 90 /* Wait for the reset job to complete */ 91 flush_work(&adev->userq_reset_work); 92 } 93 } 94 95 static int 96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr) 97 { 98 struct amdgpu_device *adev = uq_mgr->adev; 99 const int queue_types[] = { 100 AMDGPU_RING_TYPE_COMPUTE, 101 AMDGPU_RING_TYPE_GFX, 102 AMDGPU_RING_TYPE_SDMA 103 }; 104 const int num_queue_types = ARRAY_SIZE(queue_types); 105 bool gpu_reset = false; 106 int r = 0; 107 int i; 108 109 /* Warning if current process mutex is not held */ 110 WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex)); 111 112 if (unlikely(adev->debug_disable_gpu_ring_reset)) { 113 dev_err(adev->dev, "userq reset disabled by debug mask\n"); 114 return 0; 115 } 116 117 /* 118 * If GPU recovery feature is disabled system-wide, 119 * skip all reset detection logic 120 */ 121 if (!amdgpu_gpu_recovery) 122 return 0; 123 124 /* 125 * Iterate through all queue types to detect and reset problematic queues 126 * Process each queue type in the defined order 127 */ 128 for (i = 0; i < num_queue_types; i++) { 129 int ring_type = queue_types[i]; 130 const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type]; 131 132 if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE)) 133 continue; 134 135 if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 && 136 funcs && funcs->detect_and_reset) { 137 r = funcs->detect_and_reset(adev, ring_type); 138 if (r) { 139 gpu_reset = true; 140 break; 141 } 142 } 143 } 144 145 if (gpu_reset) 146 amdgpu_userq_gpu_reset(adev); 147 148 return r; 149 } 150 151 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue, 152 struct amdgpu_bo_va_mapping *va_map, u64 addr) 153 { 154 struct amdgpu_userq_va_cursor *va_cursor; 155 struct userq_va_list; 156 157 va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL); 158 if (!va_cursor) 159 return -ENOMEM; 160 161 INIT_LIST_HEAD(&va_cursor->list); 162 va_cursor->gpu_addr = addr; 163 atomic_set(&va_map->bo_va->userq_va_mapped, 1); 164 list_add(&va_cursor->list, &queue->userq_va_list); 165 166 return 0; 167 } 168 169 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev, 170 struct amdgpu_usermode_queue *queue, 171 u64 addr, u64 expected_size) 172 { 173 struct amdgpu_bo_va_mapping *va_map; 174 struct amdgpu_vm *vm = queue->vm; 175 u64 user_addr; 176 u64 size; 177 int r = 0; 178 179 user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT; 180 size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; 181 182 r = amdgpu_bo_reserve(vm->root.bo, false); 183 if (r) 184 return r; 185 186 va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); 187 if (!va_map) { 188 r = -EINVAL; 189 goto out_err; 190 } 191 /* Only validate the userq whether resident in the VM mapping range */ 192 if (user_addr >= va_map->start && 193 va_map->last - user_addr + 1 >= size) { 194 amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); 195 amdgpu_bo_unreserve(vm->root.bo); 196 return 0; 197 } 198 199 r = -EINVAL; 200 out_err: 201 amdgpu_bo_unreserve(vm->root.bo); 202 return r; 203 } 204 205 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) 206 { 207 struct amdgpu_bo_va_mapping *mapping; 208 bool r; 209 210 if (amdgpu_bo_reserve(vm->root.bo, false)) 211 return false; 212 213 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 214 if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped)) 215 r = true; 216 else 217 r = false; 218 amdgpu_bo_unreserve(vm->root.bo); 219 220 return r; 221 } 222 223 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue) 224 { 225 struct amdgpu_userq_va_cursor *va_cursor, *tmp; 226 int r = 0; 227 228 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { 229 r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr); 230 dev_dbg(queue->userq_mgr->adev->dev, 231 "validate the userq mapping:%p va:%llx r:%d\n", 232 queue, va_cursor->gpu_addr, r); 233 } 234 235 if (r != 0) 236 return true; 237 238 return false; 239 } 240 241 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping, 242 struct amdgpu_userq_va_cursor *va_cursor) 243 { 244 atomic_set(&mapping->bo_va->userq_va_mapped, 0); 245 list_del(&va_cursor->list); 246 kfree(va_cursor); 247 } 248 249 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev, 250 struct amdgpu_usermode_queue *queue) 251 { 252 struct amdgpu_userq_va_cursor *va_cursor, *tmp; 253 struct amdgpu_bo_va_mapping *mapping; 254 int r; 255 256 r = amdgpu_bo_reserve(queue->vm->root.bo, false); 257 if (r) 258 return r; 259 260 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { 261 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); 262 if (!mapping) { 263 r = -EINVAL; 264 goto err; 265 } 266 dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", 267 queue, va_cursor->gpu_addr); 268 amdgpu_userq_buffer_va_list_del(mapping, va_cursor); 269 } 270 err: 271 amdgpu_bo_unreserve(queue->vm->root.bo); 272 return r; 273 } 274 275 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue) 276 { 277 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 278 struct amdgpu_device *adev = uq_mgr->adev; 279 const struct amdgpu_userq_funcs *userq_funcs = 280 adev->userq_funcs[queue->queue_type]; 281 bool found_hung_queue = false; 282 int r = 0; 283 284 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 285 r = userq_funcs->preempt(queue); 286 if (r) { 287 queue->state = AMDGPU_USERQ_STATE_HUNG; 288 found_hung_queue = true; 289 } else { 290 queue->state = AMDGPU_USERQ_STATE_PREEMPTED; 291 } 292 } 293 294 if (found_hung_queue) 295 amdgpu_userq_detect_and_reset_queues(uq_mgr); 296 297 return r; 298 } 299 300 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue) 301 { 302 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 303 struct amdgpu_device *adev = uq_mgr->adev; 304 const struct amdgpu_userq_funcs *userq_funcs = 305 adev->userq_funcs[queue->queue_type]; 306 int r = 0; 307 308 if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) { 309 r = userq_funcs->restore(queue); 310 if (r) { 311 queue->state = AMDGPU_USERQ_STATE_HUNG; 312 } else { 313 queue->state = AMDGPU_USERQ_STATE_MAPPED; 314 } 315 } 316 317 return r; 318 } 319 320 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue) 321 { 322 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 323 struct amdgpu_device *adev = uq_mgr->adev; 324 const struct amdgpu_userq_funcs *userq_funcs = 325 adev->userq_funcs[queue->queue_type]; 326 bool found_hung_queue = false; 327 int r = 0; 328 329 if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || 330 (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { 331 r = userq_funcs->unmap(queue); 332 if (r) { 333 queue->state = AMDGPU_USERQ_STATE_HUNG; 334 found_hung_queue = true; 335 } else { 336 queue->state = AMDGPU_USERQ_STATE_UNMAPPED; 337 } 338 } 339 340 if (found_hung_queue) 341 amdgpu_userq_detect_and_reset_queues(uq_mgr); 342 343 return r; 344 } 345 346 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue) 347 { 348 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 349 struct amdgpu_device *adev = uq_mgr->adev; 350 const struct amdgpu_userq_funcs *userq_funcs = 351 adev->userq_funcs[queue->queue_type]; 352 int r = 0; 353 354 if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) { 355 r = userq_funcs->map(queue); 356 if (r) { 357 queue->state = AMDGPU_USERQ_STATE_HUNG; 358 amdgpu_userq_detect_and_reset_queues(uq_mgr); 359 } else { 360 queue->state = AMDGPU_USERQ_STATE_MAPPED; 361 } 362 } 363 364 return r; 365 } 366 367 static int amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue) 368 { 369 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 370 struct dma_fence *f = queue->last_fence; 371 int ret = 0; 372 373 if (f && !dma_fence_is_signaled(f)) { 374 ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT); 375 if (ret <= 0) { 376 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 377 f->context, f->seqno); 378 queue->state = AMDGPU_USERQ_STATE_HUNG; 379 return -ETIME; 380 } 381 } 382 383 return ret; 384 } 385 386 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue, 387 int queue_id) 388 { 389 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 390 struct amdgpu_device *adev = uq_mgr->adev; 391 const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; 392 393 /* Wait for mode-1 reset to complete */ 394 down_read(&adev->reset_domain->sem); 395 396 /* Drop the userq reference. */ 397 amdgpu_userq_buffer_vas_list_cleanup(adev, queue); 398 uq_funcs->mqd_destroy(queue); 399 amdgpu_userq_fence_driver_free(queue); 400 /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ 401 xa_erase_irq(&uq_mgr->userq_xa, (unsigned long)queue_id); 402 xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); 403 queue->userq_mgr = NULL; 404 list_del(&queue->userq_va_list); 405 kfree(queue); 406 407 up_read(&adev->reset_domain->sem); 408 } 409 410 static struct amdgpu_usermode_queue * 411 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) 412 { 413 return xa_load(&uq_mgr->userq_xa, qid); 414 } 415 416 void 417 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr, 418 struct amdgpu_eviction_fence_mgr *evf_mgr) 419 { 420 struct amdgpu_eviction_fence *ev_fence; 421 422 retry: 423 /* Flush any pending resume work to create ev_fence */ 424 flush_delayed_work(&uq_mgr->resume_work); 425 426 mutex_lock(&uq_mgr->userq_mutex); 427 spin_lock(&evf_mgr->ev_fence_lock); 428 ev_fence = evf_mgr->ev_fence; 429 spin_unlock(&evf_mgr->ev_fence_lock); 430 if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) { 431 mutex_unlock(&uq_mgr->userq_mutex); 432 /* 433 * Looks like there was no pending resume work, 434 * add one now to create a valid eviction fence 435 */ 436 schedule_delayed_work(&uq_mgr->resume_work, 0); 437 goto retry; 438 } 439 } 440 441 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr, 442 struct amdgpu_userq_obj *userq_obj, 443 int size) 444 { 445 struct amdgpu_device *adev = uq_mgr->adev; 446 struct amdgpu_bo_param bp; 447 int r; 448 449 memset(&bp, 0, sizeof(bp)); 450 bp.byte_align = PAGE_SIZE; 451 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 452 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 453 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 454 bp.type = ttm_bo_type_kernel; 455 bp.size = size; 456 bp.resv = NULL; 457 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 458 459 r = amdgpu_bo_create(adev, &bp, &userq_obj->obj); 460 if (r) { 461 drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r); 462 return r; 463 } 464 465 r = amdgpu_bo_reserve(userq_obj->obj, true); 466 if (r) { 467 drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r); 468 goto free_obj; 469 } 470 471 r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo); 472 if (r) { 473 drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r); 474 goto unresv; 475 } 476 477 r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr); 478 if (r) { 479 drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r); 480 goto unresv; 481 } 482 483 userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj); 484 amdgpu_bo_unreserve(userq_obj->obj); 485 memset(userq_obj->cpu_ptr, 0, size); 486 return 0; 487 488 unresv: 489 amdgpu_bo_unreserve(userq_obj->obj); 490 491 free_obj: 492 amdgpu_bo_unref(&userq_obj->obj); 493 return r; 494 } 495 496 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, 497 struct amdgpu_userq_obj *userq_obj) 498 { 499 amdgpu_bo_kunmap(userq_obj->obj); 500 amdgpu_bo_unref(&userq_obj->obj); 501 } 502 503 uint64_t 504 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, 505 struct amdgpu_db_info *db_info, 506 struct drm_file *filp) 507 { 508 uint64_t index; 509 struct drm_gem_object *gobj; 510 struct amdgpu_userq_obj *db_obj = db_info->db_obj; 511 int r, db_size; 512 513 gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle); 514 if (gobj == NULL) { 515 drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n"); 516 return -EINVAL; 517 } 518 519 db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 520 drm_gem_object_put(gobj); 521 522 r = amdgpu_bo_reserve(db_obj->obj, true); 523 if (r) { 524 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 525 goto unref_bo; 526 } 527 528 /* Pin the BO before generating the index, unpin in queue destroy */ 529 r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); 530 if (r) { 531 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 532 goto unresv_bo; 533 } 534 535 switch (db_info->queue_type) { 536 case AMDGPU_HW_IP_GFX: 537 case AMDGPU_HW_IP_COMPUTE: 538 case AMDGPU_HW_IP_DMA: 539 db_size = sizeof(u64); 540 break; 541 default: 542 drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", 543 db_info->queue_type); 544 r = -EINVAL; 545 goto unpin_bo; 546 } 547 548 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, 549 db_info->doorbell_offset, db_size); 550 drm_dbg_driver(adev_to_drm(uq_mgr->adev), 551 "[Usermode queues] doorbell index=%lld\n", index); 552 amdgpu_bo_unreserve(db_obj->obj); 553 return index; 554 555 unpin_bo: 556 amdgpu_bo_unpin(db_obj->obj); 557 unresv_bo: 558 amdgpu_bo_unreserve(db_obj->obj); 559 unref_bo: 560 amdgpu_bo_unref(&db_obj->obj); 561 return r; 562 } 563 564 static int 565 amdgpu_userq_destroy(struct drm_file *filp, int queue_id) 566 { 567 struct amdgpu_fpriv *fpriv = filp->driver_priv; 568 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 569 struct amdgpu_device *adev = uq_mgr->adev; 570 struct amdgpu_usermode_queue *queue; 571 int r = 0; 572 573 cancel_delayed_work_sync(&uq_mgr->resume_work); 574 mutex_lock(&uq_mgr->userq_mutex); 575 576 queue = amdgpu_userq_find(uq_mgr, queue_id); 577 if (!queue) { 578 drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n"); 579 mutex_unlock(&uq_mgr->userq_mutex); 580 return -EINVAL; 581 } 582 amdgpu_userq_wait_for_last_fence(queue); 583 r = amdgpu_bo_reserve(queue->db_obj.obj, true); 584 if (!r) { 585 amdgpu_bo_unpin(queue->db_obj.obj); 586 amdgpu_bo_unreserve(queue->db_obj.obj); 587 } 588 amdgpu_bo_unref(&queue->db_obj.obj); 589 590 r = amdgpu_bo_reserve(queue->wptr_obj.obj, true); 591 if (!r) { 592 amdgpu_bo_unpin(queue->wptr_obj.obj); 593 amdgpu_bo_unreserve(queue->wptr_obj.obj); 594 } 595 amdgpu_bo_unref(&queue->wptr_obj.obj); 596 597 atomic_dec(&uq_mgr->userq_count[queue->queue_type]); 598 #if defined(CONFIG_DEBUG_FS) 599 debugfs_remove_recursive(queue->debugfs_queue); 600 #endif 601 amdgpu_userq_detect_and_reset_queues(uq_mgr); 602 r = amdgpu_userq_unmap_helper(queue); 603 /*TODO: It requires a reset for userq hw unmap error*/ 604 if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { 605 drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); 606 queue->state = AMDGPU_USERQ_STATE_HUNG; 607 } 608 amdgpu_userq_cleanup(queue, queue_id); 609 mutex_unlock(&uq_mgr->userq_mutex); 610 611 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 612 613 return r; 614 } 615 616 static int amdgpu_userq_priority_permit(struct drm_file *filp, 617 int priority) 618 { 619 if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH) 620 return 0; 621 622 if (capable(CAP_SYS_NICE)) 623 return 0; 624 625 if (drm_is_current_master(filp)) 626 return 0; 627 628 return -EACCES; 629 } 630 631 #if defined(CONFIG_DEBUG_FS) 632 static int amdgpu_mqd_info_read(struct seq_file *m, void *unused) 633 { 634 struct amdgpu_usermode_queue *queue = m->private; 635 struct amdgpu_bo *bo; 636 int r; 637 638 if (!queue || !queue->mqd.obj) 639 return -EINVAL; 640 641 bo = amdgpu_bo_ref(queue->mqd.obj); 642 r = amdgpu_bo_reserve(bo, true); 643 if (r) { 644 amdgpu_bo_unref(&bo); 645 return -EINVAL; 646 } 647 648 seq_printf(m, "queue_type: %d\n", queue->queue_type); 649 seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj)); 650 651 amdgpu_bo_unreserve(bo); 652 amdgpu_bo_unref(&bo); 653 654 return 0; 655 } 656 657 static int amdgpu_mqd_info_open(struct inode *inode, struct file *file) 658 { 659 return single_open(file, amdgpu_mqd_info_read, inode->i_private); 660 } 661 662 static const struct file_operations amdgpu_mqd_info_fops = { 663 .owner = THIS_MODULE, 664 .open = amdgpu_mqd_info_open, 665 .read = seq_read, 666 .llseek = seq_lseek, 667 .release = single_release, 668 }; 669 #endif 670 671 static int 672 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) 673 { 674 struct amdgpu_fpriv *fpriv = filp->driver_priv; 675 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 676 struct amdgpu_device *adev = uq_mgr->adev; 677 const struct amdgpu_userq_funcs *uq_funcs; 678 struct amdgpu_usermode_queue *queue; 679 struct amdgpu_db_info db_info; 680 char *queue_name; 681 bool skip_map_queue; 682 u32 qid; 683 uint64_t index; 684 int r = 0; 685 int priority = 686 (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> 687 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; 688 689 r = amdgpu_userq_priority_permit(filp, priority); 690 if (r) 691 return r; 692 693 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 694 if (r < 0) { 695 drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); 696 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 697 return r; 698 } 699 700 /* 701 * There could be a situation that we are creating a new queue while 702 * the other queues under this UQ_mgr are suspended. So if there is any 703 * resume work pending, wait for it to get done. 704 * 705 * This will also make sure we have a valid eviction fence ready to be used. 706 */ 707 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); 708 709 uq_funcs = adev->userq_funcs[args->in.ip_type]; 710 if (!uq_funcs) { 711 drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n", 712 args->in.ip_type); 713 r = -EINVAL; 714 goto unlock; 715 } 716 717 queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL); 718 if (!queue) { 719 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); 720 r = -ENOMEM; 721 goto unlock; 722 } 723 724 INIT_LIST_HEAD(&queue->userq_va_list); 725 queue->doorbell_handle = args->in.doorbell_handle; 726 queue->queue_type = args->in.ip_type; 727 queue->vm = &fpriv->vm; 728 queue->priority = priority; 729 730 db_info.queue_type = queue->queue_type; 731 db_info.doorbell_handle = queue->doorbell_handle; 732 db_info.db_obj = &queue->db_obj; 733 db_info.doorbell_offset = args->in.doorbell_offset; 734 735 queue->userq_mgr = uq_mgr; 736 /* Validate the userq virtual address.*/ 737 if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) || 738 amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || 739 amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { 740 r = -EINVAL; 741 kfree(queue); 742 goto unlock; 743 } 744 745 /* Convert relative doorbell offset into absolute doorbell index */ 746 index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); 747 if (index == (uint64_t)-EINVAL) { 748 drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); 749 kfree(queue); 750 r = -EINVAL; 751 goto unlock; 752 } 753 754 queue->doorbell_index = index; 755 xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC); 756 r = amdgpu_userq_fence_driver_alloc(adev, queue); 757 if (r) { 758 drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); 759 goto unlock; 760 } 761 762 r = uq_funcs->mqd_create(queue, &args->in); 763 if (r) { 764 drm_file_err(uq_mgr->file, "Failed to create Queue\n"); 765 amdgpu_userq_fence_driver_free(queue); 766 kfree(queue); 767 goto unlock; 768 } 769 770 /* Wait for mode-1 reset to complete */ 771 down_read(&adev->reset_domain->sem); 772 r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); 773 if (r) { 774 kfree(queue); 775 up_read(&adev->reset_domain->sem); 776 goto unlock; 777 } 778 779 r = xa_alloc(&uq_mgr->userq_xa, &qid, queue, 780 XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL); 781 if (r) { 782 drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); 783 amdgpu_userq_fence_driver_free(queue); 784 uq_funcs->mqd_destroy(queue); 785 kfree(queue); 786 r = -ENOMEM; 787 up_read(&adev->reset_domain->sem); 788 goto unlock; 789 } 790 up_read(&adev->reset_domain->sem); 791 792 /* don't map the queue if scheduling is halted */ 793 if (adev->userq_halt_for_enforce_isolation && 794 ((queue->queue_type == AMDGPU_HW_IP_GFX) || 795 (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) 796 skip_map_queue = true; 797 else 798 skip_map_queue = false; 799 if (!skip_map_queue) { 800 r = amdgpu_userq_map_helper(queue); 801 if (r) { 802 drm_file_err(uq_mgr->file, "Failed to map Queue\n"); 803 xa_erase(&uq_mgr->userq_xa, qid); 804 amdgpu_userq_fence_driver_free(queue); 805 uq_funcs->mqd_destroy(queue); 806 kfree(queue); 807 goto unlock; 808 } 809 } 810 811 queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); 812 if (!queue_name) { 813 r = -ENOMEM; 814 goto unlock; 815 } 816 817 #if defined(CONFIG_DEBUG_FS) 818 /* Queue dentry per client to hold MQD information */ 819 queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client); 820 debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops); 821 #endif 822 kfree(queue_name); 823 824 args->out.queue_id = qid; 825 atomic_inc(&uq_mgr->userq_count[queue->queue_type]); 826 827 unlock: 828 mutex_unlock(&uq_mgr->userq_mutex); 829 830 return r; 831 } 832 833 static int amdgpu_userq_input_args_validate(struct drm_device *dev, 834 union drm_amdgpu_userq *args, 835 struct drm_file *filp) 836 { 837 struct amdgpu_device *adev = drm_to_adev(dev); 838 839 switch (args->in.op) { 840 case AMDGPU_USERQ_OP_CREATE: 841 if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | 842 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) 843 return -EINVAL; 844 /* Usermode queues are only supported for GFX IP as of now */ 845 if (args->in.ip_type != AMDGPU_HW_IP_GFX && 846 args->in.ip_type != AMDGPU_HW_IP_DMA && 847 args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { 848 drm_file_err(filp, "Usermode queue doesn't support IP type %u\n", 849 args->in.ip_type); 850 return -EINVAL; 851 } 852 853 if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && 854 (args->in.ip_type != AMDGPU_HW_IP_GFX) && 855 (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && 856 !amdgpu_is_tmz(adev)) { 857 drm_file_err(filp, "Secure only supported on GFX/Compute queues\n"); 858 return -EINVAL; 859 } 860 861 if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET || 862 args->in.queue_va == 0 || 863 args->in.queue_size == 0) { 864 drm_file_err(filp, "invalidate userq queue va or size\n"); 865 return -EINVAL; 866 } 867 if (!args->in.wptr_va || !args->in.rptr_va) { 868 drm_file_err(filp, "invalidate userq queue rptr or wptr\n"); 869 return -EINVAL; 870 } 871 break; 872 case AMDGPU_USERQ_OP_FREE: 873 if (args->in.ip_type || 874 args->in.doorbell_handle || 875 args->in.doorbell_offset || 876 args->in.flags || 877 args->in.queue_va || 878 args->in.queue_size || 879 args->in.rptr_va || 880 args->in.wptr_va || 881 args->in.mqd || 882 args->in.mqd_size) 883 return -EINVAL; 884 break; 885 default: 886 return -EINVAL; 887 } 888 889 return 0; 890 } 891 892 bool amdgpu_userq_enabled(struct drm_device *dev) 893 { 894 struct amdgpu_device *adev = drm_to_adev(dev); 895 int i; 896 897 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { 898 if (adev->userq_funcs[i]) 899 return true; 900 } 901 902 return false; 903 } 904 905 int amdgpu_userq_ioctl(struct drm_device *dev, void *data, 906 struct drm_file *filp) 907 { 908 union drm_amdgpu_userq *args = data; 909 int r; 910 911 if (!amdgpu_userq_enabled(dev)) 912 return -ENOTSUPP; 913 914 if (amdgpu_userq_input_args_validate(dev, args, filp) < 0) 915 return -EINVAL; 916 917 switch (args->in.op) { 918 case AMDGPU_USERQ_OP_CREATE: 919 r = amdgpu_userq_create(filp, args); 920 if (r) 921 drm_file_err(filp, "Failed to create usermode queue\n"); 922 break; 923 924 case AMDGPU_USERQ_OP_FREE: 925 r = amdgpu_userq_destroy(filp, args->in.queue_id); 926 if (r) 927 drm_file_err(filp, "Failed to destroy usermode queue\n"); 928 break; 929 930 default: 931 drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op); 932 return -EINVAL; 933 } 934 935 return r; 936 } 937 938 static int 939 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) 940 { 941 struct amdgpu_usermode_queue *queue; 942 unsigned long queue_id; 943 int ret = 0, r; 944 945 /* Resume all the queues for this process */ 946 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 947 if (!amdgpu_userq_buffer_vas_mapped(queue)) { 948 drm_file_err(uq_mgr->file, 949 "trying restore queue without va mapping\n"); 950 queue->state = AMDGPU_USERQ_STATE_INVALID_VA; 951 continue; 952 } 953 954 r = amdgpu_userq_restore_helper(queue); 955 if (r) 956 ret = r; 957 } 958 959 if (ret) 960 drm_file_err(uq_mgr->file, "Failed to map all the queues\n"); 961 return ret; 962 } 963 964 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo) 965 { 966 struct ttm_operation_ctx ctx = { false, false }; 967 968 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 969 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 970 } 971 972 /* Handle all BOs on the invalidated list, validate them and update the PTs */ 973 static int 974 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, 975 struct amdgpu_vm *vm) 976 { 977 struct ttm_operation_ctx ctx = { false, false }; 978 struct amdgpu_bo_va *bo_va; 979 struct amdgpu_bo *bo; 980 int ret; 981 982 spin_lock(&vm->status_lock); 983 while (!list_empty(&vm->invalidated)) { 984 bo_va = list_first_entry(&vm->invalidated, 985 struct amdgpu_bo_va, 986 base.vm_status); 987 spin_unlock(&vm->status_lock); 988 989 bo = bo_va->base.bo; 990 ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); 991 if (unlikely(ret)) 992 return ret; 993 994 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 995 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 996 if (ret) 997 return ret; 998 999 /* This moves the bo_va to the done list */ 1000 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1001 if (ret) 1002 return ret; 1003 1004 spin_lock(&vm->status_lock); 1005 } 1006 spin_unlock(&vm->status_lock); 1007 1008 return 0; 1009 } 1010 1011 /* Make sure the whole VM is ready to be used */ 1012 static int 1013 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) 1014 { 1015 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1016 bool invalidated = false, new_addition = false; 1017 struct ttm_operation_ctx ctx = { true, false }; 1018 struct amdgpu_device *adev = uq_mgr->adev; 1019 struct amdgpu_hmm_range *range; 1020 struct amdgpu_vm *vm = &fpriv->vm; 1021 unsigned long key, tmp_key; 1022 struct amdgpu_bo_va *bo_va; 1023 struct amdgpu_bo *bo; 1024 struct drm_exec exec; 1025 struct xarray xa; 1026 int ret; 1027 1028 xa_init(&xa); 1029 1030 retry_lock: 1031 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 1032 drm_exec_until_all_locked(&exec) { 1033 ret = amdgpu_vm_lock_pd(vm, &exec, 1); 1034 drm_exec_retry_on_contention(&exec); 1035 if (unlikely(ret)) 1036 goto unlock_all; 1037 1038 ret = amdgpu_vm_lock_done_list(vm, &exec, 1); 1039 drm_exec_retry_on_contention(&exec); 1040 if (unlikely(ret)) 1041 goto unlock_all; 1042 1043 /* This validates PDs, PTs and per VM BOs */ 1044 ret = amdgpu_vm_validate(adev, vm, NULL, 1045 amdgpu_userq_validate_vm, 1046 NULL); 1047 if (unlikely(ret)) 1048 goto unlock_all; 1049 1050 /* This locks and validates the remaining evicted BOs */ 1051 ret = amdgpu_userq_bo_validate(adev, &exec, vm); 1052 drm_exec_retry_on_contention(&exec); 1053 if (unlikely(ret)) 1054 goto unlock_all; 1055 } 1056 1057 if (invalidated) { 1058 xa_for_each(&xa, tmp_key, range) { 1059 bo = range->bo; 1060 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1061 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1062 if (ret) 1063 goto unlock_all; 1064 1065 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); 1066 1067 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 1068 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1069 if (ret) 1070 goto unlock_all; 1071 } 1072 invalidated = false; 1073 } 1074 1075 ret = amdgpu_vm_handle_moved(adev, vm, NULL); 1076 if (ret) 1077 goto unlock_all; 1078 1079 key = 0; 1080 /* Validate User Ptr BOs */ 1081 list_for_each_entry(bo_va, &vm->done, base.vm_status) { 1082 bo = bo_va->base.bo; 1083 if (!bo) 1084 continue; 1085 1086 if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm)) 1087 continue; 1088 1089 range = xa_load(&xa, key); 1090 if (range && range->bo != bo) { 1091 xa_erase(&xa, key); 1092 amdgpu_hmm_range_free(range); 1093 range = NULL; 1094 } 1095 1096 if (!range) { 1097 range = amdgpu_hmm_range_alloc(bo); 1098 if (!range) { 1099 ret = -ENOMEM; 1100 goto unlock_all; 1101 } 1102 1103 xa_store(&xa, key, range, GFP_KERNEL); 1104 new_addition = true; 1105 } 1106 key++; 1107 } 1108 1109 if (new_addition) { 1110 drm_exec_fini(&exec); 1111 xa_for_each(&xa, tmp_key, range) { 1112 if (!range) 1113 continue; 1114 bo = range->bo; 1115 ret = amdgpu_ttm_tt_get_user_pages(bo, range); 1116 if (ret) 1117 goto unlock_all; 1118 } 1119 1120 invalidated = true; 1121 new_addition = false; 1122 goto retry_lock; 1123 } 1124 1125 ret = amdgpu_vm_update_pdes(adev, vm, false); 1126 if (ret) 1127 goto unlock_all; 1128 1129 /* 1130 * We need to wait for all VM updates to finish before restarting the 1131 * queues. Using the done list like that is now ok since everything is 1132 * locked in place. 1133 */ 1134 list_for_each_entry(bo_va, &vm->done, base.vm_status) 1135 dma_fence_wait(bo_va->last_pt_update, false); 1136 dma_fence_wait(vm->last_update, false); 1137 1138 ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); 1139 if (ret) 1140 drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n"); 1141 1142 unlock_all: 1143 drm_exec_fini(&exec); 1144 xa_for_each(&xa, tmp_key, range) { 1145 if (!range) 1146 continue; 1147 bo = range->bo; 1148 amdgpu_hmm_range_free(range); 1149 } 1150 xa_destroy(&xa); 1151 return ret; 1152 } 1153 1154 static void amdgpu_userq_restore_worker(struct work_struct *work) 1155 { 1156 struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work); 1157 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1158 int ret; 1159 1160 flush_delayed_work(&fpriv->evf_mgr.suspend_work); 1161 1162 mutex_lock(&uq_mgr->userq_mutex); 1163 1164 ret = amdgpu_userq_vm_validate(uq_mgr); 1165 if (ret) { 1166 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); 1167 goto unlock; 1168 } 1169 1170 ret = amdgpu_userq_restore_all(uq_mgr); 1171 if (ret) { 1172 drm_file_err(uq_mgr->file, "Failed to restore all queues\n"); 1173 goto unlock; 1174 } 1175 1176 unlock: 1177 mutex_unlock(&uq_mgr->userq_mutex); 1178 } 1179 1180 static int 1181 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) 1182 { 1183 struct amdgpu_usermode_queue *queue; 1184 unsigned long queue_id; 1185 int ret = 0, r; 1186 1187 amdgpu_userq_detect_and_reset_queues(uq_mgr); 1188 /* Try to unmap all the queues in this process ctx */ 1189 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1190 r = amdgpu_userq_preempt_helper(queue); 1191 if (r) 1192 ret = r; 1193 } 1194 1195 if (ret) 1196 drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n"); 1197 return ret; 1198 } 1199 1200 void amdgpu_userq_reset_work(struct work_struct *work) 1201 { 1202 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 1203 userq_reset_work); 1204 struct amdgpu_reset_context reset_context; 1205 1206 memset(&reset_context, 0, sizeof(reset_context)); 1207 1208 reset_context.method = AMD_RESET_METHOD_NONE; 1209 reset_context.reset_req_dev = adev; 1210 reset_context.src = AMDGPU_RESET_SRC_USERQ; 1211 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 1212 /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/ 1213 1214 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 1215 } 1216 1217 static int 1218 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) 1219 { 1220 struct amdgpu_usermode_queue *queue; 1221 unsigned long queue_id; 1222 int ret; 1223 1224 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1225 struct dma_fence *f = queue->last_fence; 1226 1227 if (!f || dma_fence_is_signaled(f)) 1228 continue; 1229 ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); 1230 if (ret <= 0) { 1231 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 1232 f->context, f->seqno); 1233 return -ETIMEDOUT; 1234 } 1235 } 1236 1237 return 0; 1238 } 1239 1240 void 1241 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, 1242 struct amdgpu_eviction_fence *ev_fence) 1243 { 1244 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1245 struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; 1246 struct amdgpu_device *adev = uq_mgr->adev; 1247 int ret; 1248 1249 /* Wait for any pending userqueue fence work to finish */ 1250 ret = amdgpu_userq_wait_for_signal(uq_mgr); 1251 if (ret) 1252 dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n"); 1253 1254 ret = amdgpu_userq_evict_all(uq_mgr); 1255 if (ret) 1256 dev_err(adev->dev, "Failed to evict userqueue\n"); 1257 1258 /* Signal current eviction fence */ 1259 amdgpu_eviction_fence_signal(evf_mgr, ev_fence); 1260 1261 if (evf_mgr->fd_closing) { 1262 cancel_delayed_work_sync(&uq_mgr->resume_work); 1263 return; 1264 } 1265 1266 /* Schedule a resume work */ 1267 schedule_delayed_work(&uq_mgr->resume_work, 0); 1268 } 1269 1270 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv, 1271 struct amdgpu_device *adev) 1272 { 1273 mutex_init(&userq_mgr->userq_mutex); 1274 xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC); 1275 userq_mgr->adev = adev; 1276 userq_mgr->file = file_priv; 1277 1278 INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); 1279 return 0; 1280 } 1281 1282 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) 1283 { 1284 struct amdgpu_usermode_queue *queue; 1285 unsigned long queue_id; 1286 1287 cancel_delayed_work_sync(&userq_mgr->resume_work); 1288 1289 mutex_lock(&userq_mgr->userq_mutex); 1290 amdgpu_userq_detect_and_reset_queues(userq_mgr); 1291 xa_for_each(&userq_mgr->userq_xa, queue_id, queue) { 1292 amdgpu_userq_wait_for_last_fence(queue); 1293 amdgpu_userq_unmap_helper(queue); 1294 amdgpu_userq_cleanup(queue, queue_id); 1295 } 1296 1297 xa_destroy(&userq_mgr->userq_xa); 1298 mutex_unlock(&userq_mgr->userq_mutex); 1299 mutex_destroy(&userq_mgr->userq_mutex); 1300 } 1301 1302 int amdgpu_userq_suspend(struct amdgpu_device *adev) 1303 { 1304 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1305 struct amdgpu_usermode_queue *queue; 1306 struct amdgpu_userq_mgr *uqm; 1307 unsigned long queue_id; 1308 int r; 1309 1310 if (!ip_mask) 1311 return 0; 1312 1313 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1314 uqm = queue->userq_mgr; 1315 cancel_delayed_work_sync(&uqm->resume_work); 1316 guard(mutex)(&uqm->userq_mutex); 1317 amdgpu_userq_detect_and_reset_queues(uqm); 1318 if (adev->in_s0ix) 1319 r = amdgpu_userq_preempt_helper(queue); 1320 else 1321 r = amdgpu_userq_unmap_helper(queue); 1322 if (r) 1323 return r; 1324 } 1325 return 0; 1326 } 1327 1328 int amdgpu_userq_resume(struct amdgpu_device *adev) 1329 { 1330 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1331 struct amdgpu_usermode_queue *queue; 1332 struct amdgpu_userq_mgr *uqm; 1333 unsigned long queue_id; 1334 int r; 1335 1336 if (!ip_mask) 1337 return 0; 1338 1339 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1340 uqm = queue->userq_mgr; 1341 guard(mutex)(&uqm->userq_mutex); 1342 if (adev->in_s0ix) 1343 r = amdgpu_userq_restore_helper(queue); 1344 else 1345 r = amdgpu_userq_map_helper(queue); 1346 if (r) 1347 return r; 1348 } 1349 1350 return 0; 1351 } 1352 1353 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, 1354 u32 idx) 1355 { 1356 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1357 struct amdgpu_usermode_queue *queue; 1358 struct amdgpu_userq_mgr *uqm; 1359 unsigned long queue_id; 1360 int ret = 0, r; 1361 1362 /* only need to stop gfx/compute */ 1363 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 1364 return 0; 1365 1366 if (adev->userq_halt_for_enforce_isolation) 1367 dev_warn(adev->dev, "userq scheduling already stopped!\n"); 1368 adev->userq_halt_for_enforce_isolation = true; 1369 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1370 uqm = queue->userq_mgr; 1371 cancel_delayed_work_sync(&uqm->resume_work); 1372 mutex_lock(&uqm->userq_mutex); 1373 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1374 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1375 (queue->xcp_id == idx)) { 1376 amdgpu_userq_detect_and_reset_queues(uqm); 1377 r = amdgpu_userq_preempt_helper(queue); 1378 if (r) 1379 ret = r; 1380 } 1381 mutex_unlock(&uqm->userq_mutex); 1382 } 1383 1384 return ret; 1385 } 1386 1387 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, 1388 u32 idx) 1389 { 1390 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1391 struct amdgpu_usermode_queue *queue; 1392 struct amdgpu_userq_mgr *uqm; 1393 unsigned long queue_id; 1394 int ret = 0, r; 1395 1396 /* only need to stop gfx/compute */ 1397 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 1398 return 0; 1399 1400 if (!adev->userq_halt_for_enforce_isolation) 1401 dev_warn(adev->dev, "userq scheduling already started!\n"); 1402 adev->userq_halt_for_enforce_isolation = false; 1403 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1404 uqm = queue->userq_mgr; 1405 mutex_lock(&uqm->userq_mutex); 1406 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1407 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1408 (queue->xcp_id == idx)) { 1409 r = amdgpu_userq_restore_helper(queue); 1410 if (r) 1411 ret = r; 1412 } 1413 mutex_unlock(&uqm->userq_mutex); 1414 } 1415 1416 return ret; 1417 } 1418 1419 int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, 1420 struct amdgpu_bo_va_mapping *mapping, 1421 uint64_t saddr) 1422 { 1423 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1424 struct amdgpu_bo_va *bo_va = mapping->bo_va; 1425 struct dma_resv *resv = bo_va->base.bo->tbo.base.resv; 1426 int ret = 0; 1427 1428 if (!ip_mask) 1429 return 0; 1430 1431 dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr); 1432 /** 1433 * The userq VA mapping reservation should include the eviction fence, 1434 * if the eviction fence can't signal successfully during unmapping, 1435 * then driver will warn to flag this improper unmap of the userq VA. 1436 * Note: The eviction fence may be attached to different BOs, and this 1437 * unmap is only for one kind of userq VAs, so at this point suppose 1438 * the eviction fence is always unsignaled. 1439 */ 1440 if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) { 1441 ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true, 1442 MAX_SCHEDULE_TIMEOUT); 1443 if (ret <= 0) 1444 return -EBUSY; 1445 } 1446 1447 return 0; 1448 } 1449 1450 void amdgpu_userq_pre_reset(struct amdgpu_device *adev) 1451 { 1452 const struct amdgpu_userq_funcs *userq_funcs; 1453 struct amdgpu_usermode_queue *queue; 1454 struct amdgpu_userq_mgr *uqm; 1455 unsigned long queue_id; 1456 1457 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1458 uqm = queue->userq_mgr; 1459 cancel_delayed_work_sync(&uqm->resume_work); 1460 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 1461 amdgpu_userq_wait_for_last_fence(queue); 1462 userq_funcs = adev->userq_funcs[queue->queue_type]; 1463 userq_funcs->unmap(queue); 1464 /* just mark all queues as hung at this point. 1465 * if unmap succeeds, we could map again 1466 * in amdgpu_userq_post_reset() if vram is not lost 1467 */ 1468 queue->state = AMDGPU_USERQ_STATE_HUNG; 1469 amdgpu_userq_fence_driver_force_completion(queue); 1470 } 1471 } 1472 } 1473 1474 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost) 1475 { 1476 /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED 1477 * at this point, we should be able to map it again 1478 * and continue if vram is not lost. 1479 */ 1480 struct amdgpu_usermode_queue *queue; 1481 const struct amdgpu_userq_funcs *userq_funcs; 1482 unsigned long queue_id; 1483 int r = 0; 1484 1485 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1486 if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) { 1487 userq_funcs = adev->userq_funcs[queue->queue_type]; 1488 /* Re-map queue */ 1489 r = userq_funcs->map(queue); 1490 if (r) { 1491 dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id); 1492 continue; 1493 } 1494 queue->state = AMDGPU_USERQ_STATE_MAPPED; 1495 } 1496 } 1497 1498 return r; 1499 } 1500