1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <drm/drm_auth.h> 26 #include <drm/drm_exec.h> 27 #include <linux/pm_runtime.h> 28 #include <drm/drm_drv.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_reset.h" 32 #include "amdgpu_vm.h" 33 #include "amdgpu_userq.h" 34 #include "amdgpu_hmm.h" 35 #include "amdgpu_userq_fence.h" 36 37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) 38 { 39 int i; 40 u32 userq_ip_mask = 0; 41 42 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { 43 if (adev->userq_funcs[i]) 44 userq_ip_mask |= (1 << i); 45 } 46 47 return userq_ip_mask; 48 } 49 50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev, 51 enum amdgpu_ring_type ring_type, int reset_type) 52 { 53 54 if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX) 55 return false; 56 57 switch (ring_type) { 58 case AMDGPU_RING_TYPE_GFX: 59 if (adev->gfx.gfx_supported_reset & reset_type) 60 return true; 61 break; 62 case AMDGPU_RING_TYPE_COMPUTE: 63 if (adev->gfx.compute_supported_reset & reset_type) 64 return true; 65 break; 66 case AMDGPU_RING_TYPE_SDMA: 67 if (adev->sdma.supported_reset & reset_type) 68 return true; 69 break; 70 case AMDGPU_RING_TYPE_VCN_DEC: 71 case AMDGPU_RING_TYPE_VCN_ENC: 72 if (adev->vcn.supported_reset & reset_type) 73 return true; 74 break; 75 case AMDGPU_RING_TYPE_VCN_JPEG: 76 if (adev->jpeg.supported_reset & reset_type) 77 return true; 78 break; 79 default: 80 break; 81 } 82 return false; 83 } 84 85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev) 86 { 87 if (amdgpu_device_should_recover_gpu(adev)) { 88 amdgpu_reset_domain_schedule(adev->reset_domain, 89 &adev->userq_reset_work); 90 /* Wait for the reset job to complete */ 91 flush_work(&adev->userq_reset_work); 92 } 93 } 94 95 static int 96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr) 97 { 98 struct amdgpu_device *adev = uq_mgr->adev; 99 const int queue_types[] = { 100 AMDGPU_RING_TYPE_COMPUTE, 101 AMDGPU_RING_TYPE_GFX, 102 AMDGPU_RING_TYPE_SDMA 103 }; 104 const int num_queue_types = ARRAY_SIZE(queue_types); 105 bool gpu_reset = false; 106 int r = 0; 107 int i; 108 109 /* Warning if current process mutex is not held */ 110 WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex)); 111 112 if (unlikely(adev->debug_disable_gpu_ring_reset)) { 113 dev_err(adev->dev, "userq reset disabled by debug mask\n"); 114 return 0; 115 } 116 117 /* 118 * If GPU recovery feature is disabled system-wide, 119 * skip all reset detection logic 120 */ 121 if (!amdgpu_gpu_recovery) 122 return 0; 123 124 /* 125 * Iterate through all queue types to detect and reset problematic queues 126 * Process each queue type in the defined order 127 */ 128 for (i = 0; i < num_queue_types; i++) { 129 int ring_type = queue_types[i]; 130 const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type]; 131 132 if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE)) 133 continue; 134 135 if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 && 136 funcs && funcs->detect_and_reset) { 137 r = funcs->detect_and_reset(adev, ring_type); 138 if (r) { 139 gpu_reset = true; 140 break; 141 } 142 } 143 } 144 145 if (gpu_reset) 146 amdgpu_userq_gpu_reset(adev); 147 148 return r; 149 } 150 151 static void amdgpu_userq_hang_detect_work(struct work_struct *work) 152 { 153 struct amdgpu_usermode_queue *queue = container_of(work, 154 struct amdgpu_usermode_queue, 155 hang_detect_work.work); 156 struct dma_fence *fence; 157 struct amdgpu_userq_mgr *uq_mgr; 158 159 if (!queue || !queue->userq_mgr) 160 return; 161 162 uq_mgr = queue->userq_mgr; 163 fence = READ_ONCE(queue->hang_detect_fence); 164 /* Fence already signaled – no action needed */ 165 if (!fence || dma_fence_is_signaled(fence)) 166 return; 167 168 mutex_lock(&uq_mgr->userq_mutex); 169 amdgpu_userq_detect_and_reset_queues(uq_mgr); 170 mutex_unlock(&uq_mgr->userq_mutex); 171 } 172 173 /* 174 * Start hang detection for a user queue fence. A delayed work will be scheduled 175 * to check if the fence is still pending after the timeout period. 176 */ 177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue) 178 { 179 struct amdgpu_device *adev; 180 unsigned long timeout_ms; 181 182 if (!queue || !queue->userq_mgr || !queue->userq_mgr->adev) 183 return; 184 185 adev = queue->userq_mgr->adev; 186 /* Determine timeout based on queue type */ 187 switch (queue->queue_type) { 188 case AMDGPU_RING_TYPE_GFX: 189 timeout_ms = adev->gfx_timeout; 190 break; 191 case AMDGPU_RING_TYPE_COMPUTE: 192 timeout_ms = adev->compute_timeout; 193 break; 194 case AMDGPU_RING_TYPE_SDMA: 195 timeout_ms = adev->sdma_timeout; 196 break; 197 default: 198 timeout_ms = adev->gfx_timeout; 199 break; 200 } 201 202 /* Store the fence to monitor and schedule hang detection */ 203 WRITE_ONCE(queue->hang_detect_fence, queue->last_fence); 204 schedule_delayed_work(&queue->hang_detect_work, 205 msecs_to_jiffies(timeout_ms)); 206 } 207 208 static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue) 209 { 210 INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work); 211 queue->hang_detect_fence = NULL; 212 } 213 214 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue, 215 struct amdgpu_bo_va_mapping *va_map, u64 addr) 216 { 217 struct amdgpu_userq_va_cursor *va_cursor; 218 struct userq_va_list; 219 220 va_cursor = kzalloc_obj(*va_cursor); 221 if (!va_cursor) 222 return -ENOMEM; 223 224 INIT_LIST_HEAD(&va_cursor->list); 225 va_cursor->gpu_addr = addr; 226 atomic_set(&va_map->bo_va->userq_va_mapped, 1); 227 list_add(&va_cursor->list, &queue->userq_va_list); 228 229 return 0; 230 } 231 232 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev, 233 struct amdgpu_usermode_queue *queue, 234 u64 addr, u64 expected_size) 235 { 236 struct amdgpu_bo_va_mapping *va_map; 237 struct amdgpu_vm *vm = queue->vm; 238 u64 user_addr; 239 u64 size; 240 int r = 0; 241 242 user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT; 243 size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; 244 245 r = amdgpu_bo_reserve(vm->root.bo, false); 246 if (r) 247 return r; 248 249 va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); 250 if (!va_map) { 251 r = -EINVAL; 252 goto out_err; 253 } 254 /* Only validate the userq whether resident in the VM mapping range */ 255 if (user_addr >= va_map->start && 256 va_map->last - user_addr + 1 >= size) { 257 amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); 258 amdgpu_bo_unreserve(vm->root.bo); 259 return 0; 260 } 261 262 r = -EINVAL; 263 out_err: 264 amdgpu_bo_unreserve(vm->root.bo); 265 return r; 266 } 267 268 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) 269 { 270 struct amdgpu_bo_va_mapping *mapping; 271 bool r; 272 273 if (amdgpu_bo_reserve(vm->root.bo, false)) 274 return false; 275 276 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 277 if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped)) 278 r = true; 279 else 280 r = false; 281 amdgpu_bo_unreserve(vm->root.bo); 282 283 return r; 284 } 285 286 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue) 287 { 288 struct amdgpu_userq_va_cursor *va_cursor, *tmp; 289 int r = 0; 290 291 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { 292 r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr); 293 dev_dbg(queue->userq_mgr->adev->dev, 294 "validate the userq mapping:%p va:%llx r:%d\n", 295 queue, va_cursor->gpu_addr, r); 296 } 297 298 if (r != 0) 299 return true; 300 301 return false; 302 } 303 304 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping, 305 struct amdgpu_userq_va_cursor *va_cursor) 306 { 307 atomic_set(&mapping->bo_va->userq_va_mapped, 0); 308 list_del(&va_cursor->list); 309 kfree(va_cursor); 310 } 311 312 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev, 313 struct amdgpu_usermode_queue *queue) 314 { 315 struct amdgpu_userq_va_cursor *va_cursor, *tmp; 316 struct amdgpu_bo_va_mapping *mapping; 317 int r; 318 319 r = amdgpu_bo_reserve(queue->vm->root.bo, false); 320 if (r) 321 return r; 322 323 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { 324 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); 325 if (!mapping) { 326 r = -EINVAL; 327 goto err; 328 } 329 dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", 330 queue, va_cursor->gpu_addr); 331 amdgpu_userq_buffer_va_list_del(mapping, va_cursor); 332 } 333 err: 334 amdgpu_bo_unreserve(queue->vm->root.bo); 335 return r; 336 } 337 338 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue) 339 { 340 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 341 struct amdgpu_device *adev = uq_mgr->adev; 342 const struct amdgpu_userq_funcs *userq_funcs = 343 adev->userq_funcs[queue->queue_type]; 344 bool found_hung_queue = false; 345 int r = 0; 346 347 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 348 r = userq_funcs->preempt(queue); 349 if (r) { 350 queue->state = AMDGPU_USERQ_STATE_HUNG; 351 found_hung_queue = true; 352 } else { 353 queue->state = AMDGPU_USERQ_STATE_PREEMPTED; 354 } 355 } 356 357 if (found_hung_queue) 358 amdgpu_userq_detect_and_reset_queues(uq_mgr); 359 360 return r; 361 } 362 363 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue) 364 { 365 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 366 struct amdgpu_device *adev = uq_mgr->adev; 367 const struct amdgpu_userq_funcs *userq_funcs = 368 adev->userq_funcs[queue->queue_type]; 369 int r = 0; 370 371 if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) { 372 r = userq_funcs->restore(queue); 373 if (r) { 374 queue->state = AMDGPU_USERQ_STATE_HUNG; 375 } else { 376 queue->state = AMDGPU_USERQ_STATE_MAPPED; 377 } 378 } 379 380 return r; 381 } 382 383 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue) 384 { 385 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 386 struct amdgpu_device *adev = uq_mgr->adev; 387 const struct amdgpu_userq_funcs *userq_funcs = 388 adev->userq_funcs[queue->queue_type]; 389 bool found_hung_queue = false; 390 int r = 0; 391 392 if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || 393 (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { 394 r = userq_funcs->unmap(queue); 395 if (r) { 396 queue->state = AMDGPU_USERQ_STATE_HUNG; 397 found_hung_queue = true; 398 } else { 399 queue->state = AMDGPU_USERQ_STATE_UNMAPPED; 400 } 401 } 402 403 if (found_hung_queue) 404 amdgpu_userq_detect_and_reset_queues(uq_mgr); 405 406 return r; 407 } 408 409 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue) 410 { 411 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 412 struct amdgpu_device *adev = uq_mgr->adev; 413 const struct amdgpu_userq_funcs *userq_funcs = 414 adev->userq_funcs[queue->queue_type]; 415 int r = 0; 416 417 if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) { 418 r = userq_funcs->map(queue); 419 if (r) { 420 queue->state = AMDGPU_USERQ_STATE_HUNG; 421 amdgpu_userq_detect_and_reset_queues(uq_mgr); 422 } else { 423 queue->state = AMDGPU_USERQ_STATE_MAPPED; 424 } 425 } 426 427 return r; 428 } 429 430 static int amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue) 431 { 432 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 433 struct dma_fence *f = queue->last_fence; 434 int ret = 0; 435 436 if (f && !dma_fence_is_signaled(f)) { 437 ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT); 438 if (ret <= 0) { 439 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 440 f->context, f->seqno); 441 queue->state = AMDGPU_USERQ_STATE_HUNG; 442 return -ETIME; 443 } 444 } 445 446 return ret; 447 } 448 449 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue) 450 { 451 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 452 struct amdgpu_device *adev = uq_mgr->adev; 453 const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; 454 455 /* Wait for mode-1 reset to complete */ 456 down_read(&adev->reset_domain->sem); 457 458 /* Drop the userq reference. */ 459 amdgpu_userq_buffer_vas_list_cleanup(adev, queue); 460 uq_funcs->mqd_destroy(queue); 461 amdgpu_userq_fence_driver_free(queue); 462 /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ 463 xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); 464 queue->userq_mgr = NULL; 465 list_del(&queue->userq_va_list); 466 kfree(queue); 467 468 up_read(&adev->reset_domain->sem); 469 } 470 471 void 472 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr, 473 struct amdgpu_eviction_fence_mgr *evf_mgr) 474 { 475 struct amdgpu_eviction_fence *ev_fence; 476 477 retry: 478 /* Flush any pending resume work to create ev_fence */ 479 flush_delayed_work(&uq_mgr->resume_work); 480 481 mutex_lock(&uq_mgr->userq_mutex); 482 spin_lock(&evf_mgr->ev_fence_lock); 483 ev_fence = evf_mgr->ev_fence; 484 spin_unlock(&evf_mgr->ev_fence_lock); 485 if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) { 486 mutex_unlock(&uq_mgr->userq_mutex); 487 /* 488 * Looks like there was no pending resume work, 489 * add one now to create a valid eviction fence 490 */ 491 schedule_delayed_work(&uq_mgr->resume_work, 0); 492 goto retry; 493 } 494 } 495 496 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr, 497 struct amdgpu_userq_obj *userq_obj, 498 int size) 499 { 500 struct amdgpu_device *adev = uq_mgr->adev; 501 struct amdgpu_bo_param bp; 502 int r; 503 504 memset(&bp, 0, sizeof(bp)); 505 bp.byte_align = PAGE_SIZE; 506 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 507 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 508 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 509 bp.type = ttm_bo_type_kernel; 510 bp.size = size; 511 bp.resv = NULL; 512 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 513 514 r = amdgpu_bo_create(adev, &bp, &userq_obj->obj); 515 if (r) { 516 drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r); 517 return r; 518 } 519 520 r = amdgpu_bo_reserve(userq_obj->obj, true); 521 if (r) { 522 drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r); 523 goto free_obj; 524 } 525 526 r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo); 527 if (r) { 528 drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r); 529 goto unresv; 530 } 531 532 r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr); 533 if (r) { 534 drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r); 535 goto unresv; 536 } 537 538 userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj); 539 amdgpu_bo_unreserve(userq_obj->obj); 540 memset(userq_obj->cpu_ptr, 0, size); 541 return 0; 542 543 unresv: 544 amdgpu_bo_unreserve(userq_obj->obj); 545 546 free_obj: 547 amdgpu_bo_unref(&userq_obj->obj); 548 return r; 549 } 550 551 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, 552 struct amdgpu_userq_obj *userq_obj) 553 { 554 amdgpu_bo_kunmap(userq_obj->obj); 555 amdgpu_bo_unref(&userq_obj->obj); 556 } 557 558 uint64_t 559 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, 560 struct amdgpu_db_info *db_info, 561 struct drm_file *filp) 562 { 563 uint64_t index; 564 struct drm_gem_object *gobj; 565 struct amdgpu_userq_obj *db_obj = db_info->db_obj; 566 int r, db_size; 567 568 gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle); 569 if (gobj == NULL) { 570 drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n"); 571 return -EINVAL; 572 } 573 574 db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 575 drm_gem_object_put(gobj); 576 577 r = amdgpu_bo_reserve(db_obj->obj, true); 578 if (r) { 579 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 580 goto unref_bo; 581 } 582 583 /* Pin the BO before generating the index, unpin in queue destroy */ 584 r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); 585 if (r) { 586 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 587 goto unresv_bo; 588 } 589 590 switch (db_info->queue_type) { 591 case AMDGPU_HW_IP_GFX: 592 case AMDGPU_HW_IP_COMPUTE: 593 case AMDGPU_HW_IP_DMA: 594 db_size = sizeof(u64); 595 break; 596 default: 597 drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", 598 db_info->queue_type); 599 r = -EINVAL; 600 goto unpin_bo; 601 } 602 603 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, 604 db_info->doorbell_offset, db_size); 605 drm_dbg_driver(adev_to_drm(uq_mgr->adev), 606 "[Usermode queues] doorbell index=%lld\n", index); 607 amdgpu_bo_unreserve(db_obj->obj); 608 return index; 609 610 unpin_bo: 611 amdgpu_bo_unpin(db_obj->obj); 612 unresv_bo: 613 amdgpu_bo_unreserve(db_obj->obj); 614 unref_bo: 615 amdgpu_bo_unref(&db_obj->obj); 616 return r; 617 } 618 619 static int 620 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) 621 { 622 struct amdgpu_device *adev = uq_mgr->adev; 623 int r = 0; 624 625 cancel_delayed_work_sync(&uq_mgr->resume_work); 626 mutex_lock(&uq_mgr->userq_mutex); 627 amdgpu_userq_wait_for_last_fence(queue); 628 /* Cancel any pending hang detection work and cleanup */ 629 if (queue->hang_detect_fence) { 630 cancel_delayed_work_sync(&queue->hang_detect_work); 631 queue->hang_detect_fence = NULL; 632 } 633 r = amdgpu_bo_reserve(queue->db_obj.obj, true); 634 if (!r) { 635 amdgpu_bo_unpin(queue->db_obj.obj); 636 amdgpu_bo_unreserve(queue->db_obj.obj); 637 } 638 amdgpu_bo_unref(&queue->db_obj.obj); 639 640 r = amdgpu_bo_reserve(queue->wptr_obj.obj, true); 641 if (!r) { 642 amdgpu_bo_unpin(queue->wptr_obj.obj); 643 amdgpu_bo_unreserve(queue->wptr_obj.obj); 644 } 645 amdgpu_bo_unref(&queue->wptr_obj.obj); 646 647 atomic_dec(&uq_mgr->userq_count[queue->queue_type]); 648 #if defined(CONFIG_DEBUG_FS) 649 debugfs_remove_recursive(queue->debugfs_queue); 650 #endif 651 amdgpu_userq_detect_and_reset_queues(uq_mgr); 652 r = amdgpu_userq_unmap_helper(queue); 653 /*TODO: It requires a reset for userq hw unmap error*/ 654 if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { 655 drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); 656 queue->state = AMDGPU_USERQ_STATE_HUNG; 657 } 658 amdgpu_userq_cleanup(queue); 659 mutex_unlock(&uq_mgr->userq_mutex); 660 661 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 662 663 return r; 664 } 665 666 static void amdgpu_userq_kref_destroy(struct kref *kref) 667 { 668 int r; 669 struct amdgpu_usermode_queue *queue = 670 container_of(kref, struct amdgpu_usermode_queue, refcount); 671 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 672 673 r = amdgpu_userq_destroy(uq_mgr, queue); 674 if (r) 675 drm_file_err(uq_mgr->file, "Failed to destroy usermode queue %d\n", r); 676 } 677 678 struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid) 679 { 680 struct amdgpu_usermode_queue *queue; 681 682 xa_lock(&uq_mgr->userq_xa); 683 queue = xa_load(&uq_mgr->userq_xa, qid); 684 if (queue) 685 kref_get(&queue->refcount); 686 xa_unlock(&uq_mgr->userq_xa); 687 688 return queue; 689 } 690 691 void amdgpu_userq_put(struct amdgpu_usermode_queue *queue) 692 { 693 if (queue) 694 kref_put(&queue->refcount, amdgpu_userq_kref_destroy); 695 } 696 697 static int amdgpu_userq_priority_permit(struct drm_file *filp, 698 int priority) 699 { 700 if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH) 701 return 0; 702 703 if (capable(CAP_SYS_NICE)) 704 return 0; 705 706 if (drm_is_current_master(filp)) 707 return 0; 708 709 return -EACCES; 710 } 711 712 #if defined(CONFIG_DEBUG_FS) 713 static int amdgpu_mqd_info_read(struct seq_file *m, void *unused) 714 { 715 struct amdgpu_usermode_queue *queue = m->private; 716 struct amdgpu_bo *bo; 717 int r; 718 719 if (!queue || !queue->mqd.obj) 720 return -EINVAL; 721 722 bo = amdgpu_bo_ref(queue->mqd.obj); 723 r = amdgpu_bo_reserve(bo, true); 724 if (r) { 725 amdgpu_bo_unref(&bo); 726 return -EINVAL; 727 } 728 729 seq_printf(m, "queue_type: %d\n", queue->queue_type); 730 seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj)); 731 732 amdgpu_bo_unreserve(bo); 733 amdgpu_bo_unref(&bo); 734 735 return 0; 736 } 737 738 static int amdgpu_mqd_info_open(struct inode *inode, struct file *file) 739 { 740 return single_open(file, amdgpu_mqd_info_read, inode->i_private); 741 } 742 743 static const struct file_operations amdgpu_mqd_info_fops = { 744 .owner = THIS_MODULE, 745 .open = amdgpu_mqd_info_open, 746 .read = seq_read, 747 .llseek = seq_lseek, 748 .release = single_release, 749 }; 750 #endif 751 752 static int 753 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) 754 { 755 struct amdgpu_fpriv *fpriv = filp->driver_priv; 756 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 757 struct amdgpu_device *adev = uq_mgr->adev; 758 const struct amdgpu_userq_funcs *uq_funcs; 759 struct amdgpu_usermode_queue *queue; 760 struct amdgpu_db_info db_info; 761 char *queue_name; 762 bool skip_map_queue; 763 u32 qid; 764 uint64_t index; 765 int r = 0; 766 int priority = 767 (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> 768 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; 769 770 r = amdgpu_userq_priority_permit(filp, priority); 771 if (r) 772 return r; 773 774 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 775 if (r < 0) { 776 drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); 777 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 778 return r; 779 } 780 781 /* 782 * There could be a situation that we are creating a new queue while 783 * the other queues under this UQ_mgr are suspended. So if there is any 784 * resume work pending, wait for it to get done. 785 * 786 * This will also make sure we have a valid eviction fence ready to be used. 787 */ 788 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); 789 790 uq_funcs = adev->userq_funcs[args->in.ip_type]; 791 if (!uq_funcs) { 792 drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n", 793 args->in.ip_type); 794 r = -EINVAL; 795 goto unlock; 796 } 797 798 queue = kzalloc_obj(struct amdgpu_usermode_queue); 799 if (!queue) { 800 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); 801 r = -ENOMEM; 802 goto unlock; 803 } 804 805 INIT_LIST_HEAD(&queue->userq_va_list); 806 queue->doorbell_handle = args->in.doorbell_handle; 807 queue->queue_type = args->in.ip_type; 808 queue->vm = &fpriv->vm; 809 queue->priority = priority; 810 811 db_info.queue_type = queue->queue_type; 812 db_info.doorbell_handle = queue->doorbell_handle; 813 db_info.db_obj = &queue->db_obj; 814 db_info.doorbell_offset = args->in.doorbell_offset; 815 816 queue->userq_mgr = uq_mgr; 817 /* Validate the userq virtual address.*/ 818 if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) || 819 amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || 820 amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { 821 r = -EINVAL; 822 kfree(queue); 823 goto unlock; 824 } 825 826 /* Convert relative doorbell offset into absolute doorbell index */ 827 index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); 828 if (index == (uint64_t)-EINVAL) { 829 drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); 830 kfree(queue); 831 r = -EINVAL; 832 goto unlock; 833 } 834 835 queue->doorbell_index = index; 836 xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC); 837 r = amdgpu_userq_fence_driver_alloc(adev, queue); 838 if (r) { 839 drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); 840 goto unlock; 841 } 842 843 r = uq_funcs->mqd_create(queue, &args->in); 844 if (r) { 845 drm_file_err(uq_mgr->file, "Failed to create Queue\n"); 846 amdgpu_userq_fence_driver_free(queue); 847 kfree(queue); 848 goto unlock; 849 } 850 851 /* drop this refcount during queue destroy */ 852 kref_init(&queue->refcount); 853 854 /* Wait for mode-1 reset to complete */ 855 down_read(&adev->reset_domain->sem); 856 r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); 857 if (r) { 858 kfree(queue); 859 up_read(&adev->reset_domain->sem); 860 goto unlock; 861 } 862 863 r = xa_alloc(&uq_mgr->userq_xa, &qid, queue, 864 XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL); 865 if (r) { 866 drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); 867 amdgpu_userq_fence_driver_free(queue); 868 uq_funcs->mqd_destroy(queue); 869 kfree(queue); 870 r = -ENOMEM; 871 up_read(&adev->reset_domain->sem); 872 goto unlock; 873 } 874 up_read(&adev->reset_domain->sem); 875 876 /* don't map the queue if scheduling is halted */ 877 if (adev->userq_halt_for_enforce_isolation && 878 ((queue->queue_type == AMDGPU_HW_IP_GFX) || 879 (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) 880 skip_map_queue = true; 881 else 882 skip_map_queue = false; 883 if (!skip_map_queue) { 884 r = amdgpu_userq_map_helper(queue); 885 if (r) { 886 drm_file_err(uq_mgr->file, "Failed to map Queue\n"); 887 xa_erase(&uq_mgr->userq_xa, qid); 888 amdgpu_userq_fence_driver_free(queue); 889 uq_funcs->mqd_destroy(queue); 890 kfree(queue); 891 goto unlock; 892 } 893 } 894 895 queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); 896 if (!queue_name) { 897 r = -ENOMEM; 898 goto unlock; 899 } 900 901 #if defined(CONFIG_DEBUG_FS) 902 /* Queue dentry per client to hold MQD information */ 903 queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client); 904 debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops); 905 #endif 906 amdgpu_userq_init_hang_detect_work(queue); 907 kfree(queue_name); 908 909 args->out.queue_id = qid; 910 atomic_inc(&uq_mgr->userq_count[queue->queue_type]); 911 912 unlock: 913 mutex_unlock(&uq_mgr->userq_mutex); 914 915 return r; 916 } 917 918 static int amdgpu_userq_input_args_validate(struct drm_device *dev, 919 union drm_amdgpu_userq *args, 920 struct drm_file *filp) 921 { 922 struct amdgpu_device *adev = drm_to_adev(dev); 923 924 switch (args->in.op) { 925 case AMDGPU_USERQ_OP_CREATE: 926 if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | 927 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) 928 return -EINVAL; 929 /* Usermode queues are only supported for GFX IP as of now */ 930 if (args->in.ip_type != AMDGPU_HW_IP_GFX && 931 args->in.ip_type != AMDGPU_HW_IP_DMA && 932 args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { 933 drm_file_err(filp, "Usermode queue doesn't support IP type %u\n", 934 args->in.ip_type); 935 return -EINVAL; 936 } 937 938 if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && 939 (args->in.ip_type != AMDGPU_HW_IP_GFX) && 940 (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && 941 !amdgpu_is_tmz(adev)) { 942 drm_file_err(filp, "Secure only supported on GFX/Compute queues\n"); 943 return -EINVAL; 944 } 945 946 if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET || 947 args->in.queue_va == 0 || 948 args->in.queue_size == 0) { 949 drm_file_err(filp, "invalidate userq queue va or size\n"); 950 return -EINVAL; 951 } 952 953 if (!is_power_of_2(args->in.queue_size)) { 954 drm_file_err(filp, "Queue size must be a power of 2\n"); 955 return -EINVAL; 956 } 957 958 if (args->in.queue_size < AMDGPU_GPU_PAGE_SIZE) { 959 drm_file_err(filp, "Queue size smaller than AMDGPU_GPU_PAGE_SIZE\n"); 960 return -EINVAL; 961 } 962 963 if (!args->in.wptr_va || !args->in.rptr_va) { 964 drm_file_err(filp, "invalidate userq queue rptr or wptr\n"); 965 return -EINVAL; 966 } 967 break; 968 case AMDGPU_USERQ_OP_FREE: 969 if (args->in.ip_type || 970 args->in.doorbell_handle || 971 args->in.doorbell_offset || 972 args->in.flags || 973 args->in.queue_va || 974 args->in.queue_size || 975 args->in.rptr_va || 976 args->in.wptr_va || 977 args->in.mqd || 978 args->in.mqd_size) 979 return -EINVAL; 980 break; 981 default: 982 return -EINVAL; 983 } 984 985 return 0; 986 } 987 988 bool amdgpu_userq_enabled(struct drm_device *dev) 989 { 990 struct amdgpu_device *adev = drm_to_adev(dev); 991 int i; 992 993 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { 994 if (adev->userq_funcs[i]) 995 return true; 996 } 997 998 return false; 999 } 1000 1001 int amdgpu_userq_ioctl(struct drm_device *dev, void *data, 1002 struct drm_file *filp) 1003 { 1004 union drm_amdgpu_userq *args = data; 1005 struct amdgpu_fpriv *fpriv = filp->driver_priv; 1006 struct amdgpu_usermode_queue *queue; 1007 int r = 0; 1008 1009 if (!amdgpu_userq_enabled(dev)) 1010 return -ENOTSUPP; 1011 1012 if (amdgpu_userq_input_args_validate(dev, args, filp) < 0) 1013 return -EINVAL; 1014 1015 switch (args->in.op) { 1016 case AMDGPU_USERQ_OP_CREATE: 1017 r = amdgpu_userq_create(filp, args); 1018 if (r) 1019 drm_file_err(filp, "Failed to create usermode queue\n"); 1020 break; 1021 1022 case AMDGPU_USERQ_OP_FREE: { 1023 xa_lock(&fpriv->userq_mgr.userq_xa); 1024 queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id); 1025 xa_unlock(&fpriv->userq_mgr.userq_xa); 1026 if (!queue) 1027 return -ENOENT; 1028 1029 amdgpu_userq_put(queue); 1030 break; 1031 } 1032 1033 default: 1034 drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op); 1035 return -EINVAL; 1036 } 1037 1038 return r; 1039 } 1040 1041 static int 1042 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) 1043 { 1044 struct amdgpu_usermode_queue *queue; 1045 unsigned long queue_id; 1046 int ret = 0, r; 1047 1048 /* Resume all the queues for this process */ 1049 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1050 queue = amdgpu_userq_get(uq_mgr, queue_id); 1051 if (!queue) 1052 continue; 1053 1054 if (!amdgpu_userq_buffer_vas_mapped(queue)) { 1055 drm_file_err(uq_mgr->file, 1056 "trying restore queue without va mapping\n"); 1057 queue->state = AMDGPU_USERQ_STATE_INVALID_VA; 1058 amdgpu_userq_put(queue); 1059 continue; 1060 } 1061 1062 r = amdgpu_userq_restore_helper(queue); 1063 if (r) 1064 ret = r; 1065 1066 amdgpu_userq_put(queue); 1067 } 1068 1069 if (ret) 1070 drm_file_err(uq_mgr->file, "Failed to map all the queues\n"); 1071 return ret; 1072 } 1073 1074 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo) 1075 { 1076 struct ttm_operation_ctx ctx = { false, false }; 1077 1078 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 1079 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1080 } 1081 1082 /* Handle all BOs on the invalidated list, validate them and update the PTs */ 1083 static int 1084 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, 1085 struct amdgpu_vm *vm) 1086 { 1087 struct ttm_operation_ctx ctx = { false, false }; 1088 struct amdgpu_bo_va *bo_va; 1089 struct amdgpu_bo *bo; 1090 int ret; 1091 1092 spin_lock(&vm->status_lock); 1093 while (!list_empty(&vm->invalidated)) { 1094 bo_va = list_first_entry(&vm->invalidated, 1095 struct amdgpu_bo_va, 1096 base.vm_status); 1097 spin_unlock(&vm->status_lock); 1098 1099 bo = bo_va->base.bo; 1100 ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); 1101 if (unlikely(ret)) 1102 return ret; 1103 1104 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 1105 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1106 if (ret) 1107 return ret; 1108 1109 /* This moves the bo_va to the done list */ 1110 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1111 if (ret) 1112 return ret; 1113 1114 spin_lock(&vm->status_lock); 1115 } 1116 spin_unlock(&vm->status_lock); 1117 1118 return 0; 1119 } 1120 1121 /* Make sure the whole VM is ready to be used */ 1122 static int 1123 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) 1124 { 1125 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1126 bool invalidated = false, new_addition = false; 1127 struct ttm_operation_ctx ctx = { true, false }; 1128 struct amdgpu_device *adev = uq_mgr->adev; 1129 struct amdgpu_hmm_range *range; 1130 struct amdgpu_vm *vm = &fpriv->vm; 1131 unsigned long key, tmp_key; 1132 struct amdgpu_bo_va *bo_va; 1133 struct amdgpu_bo *bo; 1134 struct drm_exec exec; 1135 struct xarray xa; 1136 int ret; 1137 1138 xa_init(&xa); 1139 1140 retry_lock: 1141 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 1142 drm_exec_until_all_locked(&exec) { 1143 ret = amdgpu_vm_lock_pd(vm, &exec, 1); 1144 drm_exec_retry_on_contention(&exec); 1145 if (unlikely(ret)) 1146 goto unlock_all; 1147 1148 ret = amdgpu_vm_lock_done_list(vm, &exec, 1); 1149 drm_exec_retry_on_contention(&exec); 1150 if (unlikely(ret)) 1151 goto unlock_all; 1152 1153 /* This validates PDs, PTs and per VM BOs */ 1154 ret = amdgpu_vm_validate(adev, vm, NULL, 1155 amdgpu_userq_validate_vm, 1156 NULL); 1157 if (unlikely(ret)) 1158 goto unlock_all; 1159 1160 /* This locks and validates the remaining evicted BOs */ 1161 ret = amdgpu_userq_bo_validate(adev, &exec, vm); 1162 drm_exec_retry_on_contention(&exec); 1163 if (unlikely(ret)) 1164 goto unlock_all; 1165 } 1166 1167 if (invalidated) { 1168 xa_for_each(&xa, tmp_key, range) { 1169 bo = range->bo; 1170 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1171 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1172 if (ret) 1173 goto unlock_all; 1174 1175 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); 1176 1177 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 1178 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1179 if (ret) 1180 goto unlock_all; 1181 } 1182 invalidated = false; 1183 } 1184 1185 ret = amdgpu_vm_handle_moved(adev, vm, NULL); 1186 if (ret) 1187 goto unlock_all; 1188 1189 key = 0; 1190 /* Validate User Ptr BOs */ 1191 list_for_each_entry(bo_va, &vm->done, base.vm_status) { 1192 bo = bo_va->base.bo; 1193 if (!bo) 1194 continue; 1195 1196 if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm)) 1197 continue; 1198 1199 range = xa_load(&xa, key); 1200 if (range && range->bo != bo) { 1201 xa_erase(&xa, key); 1202 amdgpu_hmm_range_free(range); 1203 range = NULL; 1204 } 1205 1206 if (!range) { 1207 range = amdgpu_hmm_range_alloc(bo); 1208 if (!range) { 1209 ret = -ENOMEM; 1210 goto unlock_all; 1211 } 1212 1213 xa_store(&xa, key, range, GFP_KERNEL); 1214 new_addition = true; 1215 } 1216 key++; 1217 } 1218 1219 if (new_addition) { 1220 drm_exec_fini(&exec); 1221 xa_for_each(&xa, tmp_key, range) { 1222 if (!range) 1223 continue; 1224 bo = range->bo; 1225 ret = amdgpu_ttm_tt_get_user_pages(bo, range); 1226 if (ret) 1227 goto unlock_all; 1228 } 1229 1230 invalidated = true; 1231 new_addition = false; 1232 goto retry_lock; 1233 } 1234 1235 ret = amdgpu_vm_update_pdes(adev, vm, false); 1236 if (ret) 1237 goto unlock_all; 1238 1239 /* 1240 * We need to wait for all VM updates to finish before restarting the 1241 * queues. Using the done list like that is now ok since everything is 1242 * locked in place. 1243 */ 1244 list_for_each_entry(bo_va, &vm->done, base.vm_status) 1245 dma_fence_wait(bo_va->last_pt_update, false); 1246 dma_fence_wait(vm->last_update, false); 1247 1248 ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); 1249 if (ret) 1250 drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n"); 1251 1252 unlock_all: 1253 drm_exec_fini(&exec); 1254 xa_for_each(&xa, tmp_key, range) { 1255 if (!range) 1256 continue; 1257 bo = range->bo; 1258 amdgpu_hmm_range_free(range); 1259 } 1260 xa_destroy(&xa); 1261 return ret; 1262 } 1263 1264 static void amdgpu_userq_restore_worker(struct work_struct *work) 1265 { 1266 struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work); 1267 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1268 int ret; 1269 1270 flush_delayed_work(&fpriv->evf_mgr.suspend_work); 1271 1272 mutex_lock(&uq_mgr->userq_mutex); 1273 1274 ret = amdgpu_userq_vm_validate(uq_mgr); 1275 if (ret) { 1276 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); 1277 goto unlock; 1278 } 1279 1280 ret = amdgpu_userq_restore_all(uq_mgr); 1281 if (ret) { 1282 drm_file_err(uq_mgr->file, "Failed to restore all queues\n"); 1283 goto unlock; 1284 } 1285 1286 unlock: 1287 mutex_unlock(&uq_mgr->userq_mutex); 1288 } 1289 1290 static int 1291 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) 1292 { 1293 struct amdgpu_usermode_queue *queue; 1294 unsigned long queue_id; 1295 int ret = 0, r; 1296 1297 amdgpu_userq_detect_and_reset_queues(uq_mgr); 1298 /* Try to unmap all the queues in this process ctx */ 1299 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1300 queue = amdgpu_userq_get(uq_mgr, queue_id); 1301 if (!queue) 1302 continue; 1303 r = amdgpu_userq_preempt_helper(queue); 1304 if (r) 1305 ret = r; 1306 amdgpu_userq_put(queue); 1307 } 1308 1309 if (ret) 1310 drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n"); 1311 return ret; 1312 } 1313 1314 void amdgpu_userq_reset_work(struct work_struct *work) 1315 { 1316 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 1317 userq_reset_work); 1318 struct amdgpu_reset_context reset_context; 1319 1320 memset(&reset_context, 0, sizeof(reset_context)); 1321 1322 reset_context.method = AMD_RESET_METHOD_NONE; 1323 reset_context.reset_req_dev = adev; 1324 reset_context.src = AMDGPU_RESET_SRC_USERQ; 1325 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 1326 /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/ 1327 1328 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 1329 } 1330 1331 static int 1332 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) 1333 { 1334 struct amdgpu_usermode_queue *queue; 1335 unsigned long queue_id; 1336 int ret; 1337 1338 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1339 queue = amdgpu_userq_get(uq_mgr, queue_id); 1340 if (!queue) 1341 continue; 1342 1343 struct dma_fence *f = queue->last_fence; 1344 1345 if (!f || dma_fence_is_signaled(f)) { 1346 amdgpu_userq_put(queue); 1347 continue; 1348 } 1349 ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); 1350 if (ret <= 0) { 1351 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 1352 f->context, f->seqno); 1353 amdgpu_userq_put(queue); 1354 return -ETIMEDOUT; 1355 } 1356 amdgpu_userq_put(queue); 1357 } 1358 1359 return 0; 1360 } 1361 1362 void 1363 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, 1364 struct amdgpu_eviction_fence *ev_fence) 1365 { 1366 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1367 struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; 1368 struct amdgpu_device *adev = uq_mgr->adev; 1369 int ret; 1370 1371 /* Wait for any pending userqueue fence work to finish */ 1372 ret = amdgpu_userq_wait_for_signal(uq_mgr); 1373 if (ret) 1374 dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n"); 1375 1376 ret = amdgpu_userq_evict_all(uq_mgr); 1377 if (ret) 1378 dev_err(adev->dev, "Failed to evict userqueue\n"); 1379 1380 /* Signal current eviction fence */ 1381 amdgpu_eviction_fence_signal(evf_mgr, ev_fence); 1382 1383 if (evf_mgr->fd_closing) { 1384 cancel_delayed_work_sync(&uq_mgr->resume_work); 1385 return; 1386 } 1387 1388 /* Schedule a resume work */ 1389 schedule_delayed_work(&uq_mgr->resume_work, 0); 1390 } 1391 1392 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv, 1393 struct amdgpu_device *adev) 1394 { 1395 mutex_init(&userq_mgr->userq_mutex); 1396 xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC); 1397 userq_mgr->adev = adev; 1398 userq_mgr->file = file_priv; 1399 1400 INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); 1401 return 0; 1402 } 1403 1404 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) 1405 { 1406 struct amdgpu_usermode_queue *queue; 1407 unsigned long queue_id = 0; 1408 1409 for (;;) { 1410 xa_lock(&userq_mgr->userq_xa); 1411 queue = xa_find(&userq_mgr->userq_xa, &queue_id, ULONG_MAX, 1412 XA_PRESENT); 1413 if (queue) 1414 __xa_erase(&userq_mgr->userq_xa, queue_id); 1415 xa_unlock(&userq_mgr->userq_xa); 1416 1417 if (!queue) 1418 break; 1419 1420 amdgpu_userq_put(queue); 1421 } 1422 1423 xa_destroy(&userq_mgr->userq_xa); 1424 mutex_destroy(&userq_mgr->userq_mutex); 1425 } 1426 1427 int amdgpu_userq_suspend(struct amdgpu_device *adev) 1428 { 1429 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1430 struct amdgpu_usermode_queue *queue; 1431 struct amdgpu_userq_mgr *uqm; 1432 unsigned long queue_id; 1433 int r; 1434 1435 if (!ip_mask) 1436 return 0; 1437 1438 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1439 uqm = queue->userq_mgr; 1440 cancel_delayed_work_sync(&uqm->resume_work); 1441 guard(mutex)(&uqm->userq_mutex); 1442 amdgpu_userq_detect_and_reset_queues(uqm); 1443 if (adev->in_s0ix) 1444 r = amdgpu_userq_preempt_helper(queue); 1445 else 1446 r = amdgpu_userq_unmap_helper(queue); 1447 if (r) 1448 return r; 1449 } 1450 return 0; 1451 } 1452 1453 int amdgpu_userq_resume(struct amdgpu_device *adev) 1454 { 1455 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1456 struct amdgpu_usermode_queue *queue; 1457 struct amdgpu_userq_mgr *uqm; 1458 unsigned long queue_id; 1459 int r; 1460 1461 if (!ip_mask) 1462 return 0; 1463 1464 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1465 uqm = queue->userq_mgr; 1466 guard(mutex)(&uqm->userq_mutex); 1467 if (adev->in_s0ix) 1468 r = amdgpu_userq_restore_helper(queue); 1469 else 1470 r = amdgpu_userq_map_helper(queue); 1471 if (r) 1472 return r; 1473 } 1474 1475 return 0; 1476 } 1477 1478 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, 1479 u32 idx) 1480 { 1481 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1482 struct amdgpu_usermode_queue *queue; 1483 struct amdgpu_userq_mgr *uqm; 1484 unsigned long queue_id; 1485 int ret = 0, r; 1486 1487 /* only need to stop gfx/compute */ 1488 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 1489 return 0; 1490 1491 if (adev->userq_halt_for_enforce_isolation) 1492 dev_warn(adev->dev, "userq scheduling already stopped!\n"); 1493 adev->userq_halt_for_enforce_isolation = true; 1494 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1495 uqm = queue->userq_mgr; 1496 cancel_delayed_work_sync(&uqm->resume_work); 1497 mutex_lock(&uqm->userq_mutex); 1498 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1499 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1500 (queue->xcp_id == idx)) { 1501 amdgpu_userq_detect_and_reset_queues(uqm); 1502 r = amdgpu_userq_preempt_helper(queue); 1503 if (r) 1504 ret = r; 1505 } 1506 mutex_unlock(&uqm->userq_mutex); 1507 } 1508 1509 return ret; 1510 } 1511 1512 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, 1513 u32 idx) 1514 { 1515 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1516 struct amdgpu_usermode_queue *queue; 1517 struct amdgpu_userq_mgr *uqm; 1518 unsigned long queue_id; 1519 int ret = 0, r; 1520 1521 /* only need to stop gfx/compute */ 1522 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 1523 return 0; 1524 1525 if (!adev->userq_halt_for_enforce_isolation) 1526 dev_warn(adev->dev, "userq scheduling already started!\n"); 1527 adev->userq_halt_for_enforce_isolation = false; 1528 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1529 uqm = queue->userq_mgr; 1530 mutex_lock(&uqm->userq_mutex); 1531 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1532 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1533 (queue->xcp_id == idx)) { 1534 r = amdgpu_userq_restore_helper(queue); 1535 if (r) 1536 ret = r; 1537 } 1538 mutex_unlock(&uqm->userq_mutex); 1539 } 1540 1541 return ret; 1542 } 1543 1544 int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, 1545 struct amdgpu_bo_va_mapping *mapping, 1546 uint64_t saddr) 1547 { 1548 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1549 struct amdgpu_bo_va *bo_va = mapping->bo_va; 1550 struct dma_resv *resv = bo_va->base.bo->tbo.base.resv; 1551 int ret = 0; 1552 1553 if (!ip_mask) 1554 return 0; 1555 1556 dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr); 1557 /** 1558 * The userq VA mapping reservation should include the eviction fence, 1559 * if the eviction fence can't signal successfully during unmapping, 1560 * then driver will warn to flag this improper unmap of the userq VA. 1561 * Note: The eviction fence may be attached to different BOs, and this 1562 * unmap is only for one kind of userq VAs, so at this point suppose 1563 * the eviction fence is always unsignaled. 1564 */ 1565 if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) { 1566 ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true, 1567 MAX_SCHEDULE_TIMEOUT); 1568 if (ret <= 0) 1569 return -EBUSY; 1570 } 1571 1572 return 0; 1573 } 1574 1575 void amdgpu_userq_pre_reset(struct amdgpu_device *adev) 1576 { 1577 const struct amdgpu_userq_funcs *userq_funcs; 1578 struct amdgpu_usermode_queue *queue; 1579 struct amdgpu_userq_mgr *uqm; 1580 unsigned long queue_id; 1581 1582 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1583 uqm = queue->userq_mgr; 1584 cancel_delayed_work_sync(&uqm->resume_work); 1585 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 1586 amdgpu_userq_wait_for_last_fence(queue); 1587 userq_funcs = adev->userq_funcs[queue->queue_type]; 1588 userq_funcs->unmap(queue); 1589 /* just mark all queues as hung at this point. 1590 * if unmap succeeds, we could map again 1591 * in amdgpu_userq_post_reset() if vram is not lost 1592 */ 1593 queue->state = AMDGPU_USERQ_STATE_HUNG; 1594 amdgpu_userq_fence_driver_force_completion(queue); 1595 } 1596 } 1597 } 1598 1599 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost) 1600 { 1601 /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED 1602 * at this point, we should be able to map it again 1603 * and continue if vram is not lost. 1604 */ 1605 struct amdgpu_usermode_queue *queue; 1606 const struct amdgpu_userq_funcs *userq_funcs; 1607 unsigned long queue_id; 1608 int r = 0; 1609 1610 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1611 if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) { 1612 userq_funcs = adev->userq_funcs[queue->queue_type]; 1613 /* Re-map queue */ 1614 r = userq_funcs->map(queue); 1615 if (r) { 1616 dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id); 1617 continue; 1618 } 1619 queue->state = AMDGPU_USERQ_STATE_MAPPED; 1620 } 1621 } 1622 1623 return r; 1624 } 1625