1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <drm/drm_auth.h> 26 #include <drm/drm_exec.h> 27 #include <linux/pm_runtime.h> 28 #include <drm/drm_drv.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_reset.h" 32 #include "amdgpu_vm.h" 33 #include "amdgpu_userq.h" 34 #include "amdgpu_hmm.h" 35 #include "amdgpu_userq_fence.h" 36 37 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) 38 { 39 int i; 40 u32 userq_ip_mask = 0; 41 42 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { 43 if (adev->userq_funcs[i]) 44 userq_ip_mask |= (1 << i); 45 } 46 47 return userq_ip_mask; 48 } 49 50 static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev, 51 enum amdgpu_ring_type ring_type, int reset_type) 52 { 53 54 if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX) 55 return false; 56 57 switch (ring_type) { 58 case AMDGPU_RING_TYPE_GFX: 59 if (adev->gfx.gfx_supported_reset & reset_type) 60 return true; 61 break; 62 case AMDGPU_RING_TYPE_COMPUTE: 63 if (adev->gfx.compute_supported_reset & reset_type) 64 return true; 65 break; 66 case AMDGPU_RING_TYPE_SDMA: 67 if (adev->sdma.supported_reset & reset_type) 68 return true; 69 break; 70 case AMDGPU_RING_TYPE_VCN_DEC: 71 case AMDGPU_RING_TYPE_VCN_ENC: 72 if (adev->vcn.supported_reset & reset_type) 73 return true; 74 break; 75 case AMDGPU_RING_TYPE_VCN_JPEG: 76 if (adev->jpeg.supported_reset & reset_type) 77 return true; 78 break; 79 default: 80 break; 81 } 82 return false; 83 } 84 85 static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev) 86 { 87 if (amdgpu_device_should_recover_gpu(adev)) { 88 amdgpu_reset_domain_schedule(adev->reset_domain, 89 &adev->userq_reset_work); 90 /* Wait for the reset job to complete */ 91 flush_work(&adev->userq_reset_work); 92 } 93 } 94 95 static int 96 amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr) 97 { 98 struct amdgpu_device *adev = uq_mgr->adev; 99 const int queue_types[] = { 100 AMDGPU_RING_TYPE_COMPUTE, 101 AMDGPU_RING_TYPE_GFX, 102 AMDGPU_RING_TYPE_SDMA 103 }; 104 const int num_queue_types = ARRAY_SIZE(queue_types); 105 bool gpu_reset = false; 106 int r = 0; 107 int i; 108 109 /* Warning if current process mutex is not held */ 110 WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex)); 111 112 if (unlikely(adev->debug_disable_gpu_ring_reset)) { 113 dev_err(adev->dev, "userq reset disabled by debug mask\n"); 114 return 0; 115 } 116 117 /* 118 * If GPU recovery feature is disabled system-wide, 119 * skip all reset detection logic 120 */ 121 if (!amdgpu_gpu_recovery) 122 return 0; 123 124 /* 125 * Iterate through all queue types to detect and reset problematic queues 126 * Process each queue type in the defined order 127 */ 128 for (i = 0; i < num_queue_types; i++) { 129 int ring_type = queue_types[i]; 130 const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type]; 131 132 if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE)) 133 continue; 134 135 if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 && 136 funcs && funcs->detect_and_reset) { 137 r = funcs->detect_and_reset(adev, ring_type); 138 if (r) { 139 gpu_reset = true; 140 break; 141 } 142 } 143 } 144 145 if (gpu_reset) 146 amdgpu_userq_gpu_reset(adev); 147 148 return r; 149 } 150 151 static void amdgpu_userq_hang_detect_work(struct work_struct *work) 152 { 153 struct amdgpu_usermode_queue *queue = container_of(work, 154 struct amdgpu_usermode_queue, 155 hang_detect_work.work); 156 struct dma_fence *fence; 157 struct amdgpu_userq_mgr *uq_mgr; 158 159 if (!queue || !queue->userq_mgr) 160 return; 161 162 uq_mgr = queue->userq_mgr; 163 fence = READ_ONCE(queue->hang_detect_fence); 164 /* Fence already signaled – no action needed */ 165 if (!fence || dma_fence_is_signaled(fence)) 166 return; 167 168 mutex_lock(&uq_mgr->userq_mutex); 169 amdgpu_userq_detect_and_reset_queues(uq_mgr); 170 mutex_unlock(&uq_mgr->userq_mutex); 171 } 172 173 /* 174 * Start hang detection for a user queue fence. A delayed work will be scheduled 175 * to check if the fence is still pending after the timeout period. 176 */ 177 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue) 178 { 179 struct amdgpu_device *adev; 180 unsigned long timeout_ms; 181 182 if (!queue || !queue->userq_mgr || !queue->userq_mgr->adev) 183 return; 184 185 adev = queue->userq_mgr->adev; 186 /* Determine timeout based on queue type */ 187 switch (queue->queue_type) { 188 case AMDGPU_RING_TYPE_GFX: 189 timeout_ms = adev->gfx_timeout; 190 break; 191 case AMDGPU_RING_TYPE_COMPUTE: 192 timeout_ms = adev->compute_timeout; 193 break; 194 case AMDGPU_RING_TYPE_SDMA: 195 timeout_ms = adev->sdma_timeout; 196 break; 197 default: 198 timeout_ms = adev->gfx_timeout; 199 break; 200 } 201 202 /* Store the fence to monitor and schedule hang detection */ 203 WRITE_ONCE(queue->hang_detect_fence, queue->last_fence); 204 schedule_delayed_work(&queue->hang_detect_work, 205 msecs_to_jiffies(timeout_ms)); 206 } 207 208 static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue) 209 { 210 INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work); 211 queue->hang_detect_fence = NULL; 212 } 213 214 static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue, 215 struct amdgpu_bo_va_mapping *va_map, u64 addr) 216 { 217 struct amdgpu_userq_va_cursor *va_cursor; 218 struct userq_va_list; 219 220 va_cursor = kzalloc_obj(*va_cursor); 221 if (!va_cursor) 222 return -ENOMEM; 223 224 INIT_LIST_HEAD(&va_cursor->list); 225 va_cursor->gpu_addr = addr; 226 atomic_set(&va_map->bo_va->userq_va_mapped, 1); 227 list_add(&va_cursor->list, &queue->userq_va_list); 228 229 return 0; 230 } 231 232 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev, 233 struct amdgpu_usermode_queue *queue, 234 u64 addr, u64 expected_size) 235 { 236 struct amdgpu_bo_va_mapping *va_map; 237 struct amdgpu_vm *vm = queue->vm; 238 u64 user_addr; 239 u64 size; 240 int r = 0; 241 242 user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT; 243 size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; 244 245 r = amdgpu_bo_reserve(vm->root.bo, false); 246 if (r) 247 return r; 248 249 va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); 250 if (!va_map) { 251 r = -EINVAL; 252 goto out_err; 253 } 254 /* Only validate the userq whether resident in the VM mapping range */ 255 if (user_addr >= va_map->start && 256 va_map->last - user_addr + 1 >= size) { 257 amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); 258 amdgpu_bo_unreserve(vm->root.bo); 259 return 0; 260 } 261 262 r = -EINVAL; 263 out_err: 264 amdgpu_bo_unreserve(vm->root.bo); 265 return r; 266 } 267 268 static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) 269 { 270 struct amdgpu_bo_va_mapping *mapping; 271 bool r; 272 273 if (amdgpu_bo_reserve(vm->root.bo, false)) 274 return false; 275 276 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 277 if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped)) 278 r = true; 279 else 280 r = false; 281 amdgpu_bo_unreserve(vm->root.bo); 282 283 return r; 284 } 285 286 static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue) 287 { 288 struct amdgpu_userq_va_cursor *va_cursor, *tmp; 289 int r = 0; 290 291 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { 292 r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr); 293 dev_dbg(queue->userq_mgr->adev->dev, 294 "validate the userq mapping:%p va:%llx r:%d\n", 295 queue, va_cursor->gpu_addr, r); 296 } 297 298 if (r != 0) 299 return true; 300 301 return false; 302 } 303 304 static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping, 305 struct amdgpu_userq_va_cursor *va_cursor) 306 { 307 atomic_set(&mapping->bo_va->userq_va_mapped, 0); 308 list_del(&va_cursor->list); 309 kfree(va_cursor); 310 } 311 312 static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev, 313 struct amdgpu_usermode_queue *queue) 314 { 315 struct amdgpu_userq_va_cursor *va_cursor, *tmp; 316 struct amdgpu_bo_va_mapping *mapping; 317 int r; 318 319 r = amdgpu_bo_reserve(queue->vm->root.bo, false); 320 if (r) 321 return r; 322 323 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { 324 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); 325 if (!mapping) { 326 r = -EINVAL; 327 goto err; 328 } 329 dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", 330 queue, va_cursor->gpu_addr); 331 amdgpu_userq_buffer_va_list_del(mapping, va_cursor); 332 } 333 err: 334 amdgpu_bo_unreserve(queue->vm->root.bo); 335 return r; 336 } 337 338 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue) 339 { 340 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 341 struct amdgpu_device *adev = uq_mgr->adev; 342 const struct amdgpu_userq_funcs *userq_funcs = 343 adev->userq_funcs[queue->queue_type]; 344 bool found_hung_queue = false; 345 int r = 0; 346 347 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 348 r = userq_funcs->preempt(queue); 349 if (r) { 350 queue->state = AMDGPU_USERQ_STATE_HUNG; 351 found_hung_queue = true; 352 } else { 353 queue->state = AMDGPU_USERQ_STATE_PREEMPTED; 354 } 355 } 356 357 if (found_hung_queue) 358 amdgpu_userq_detect_and_reset_queues(uq_mgr); 359 360 return r; 361 } 362 363 static int amdgpu_userq_restore_helper(struct amdgpu_usermode_queue *queue) 364 { 365 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 366 struct amdgpu_device *adev = uq_mgr->adev; 367 const struct amdgpu_userq_funcs *userq_funcs = 368 adev->userq_funcs[queue->queue_type]; 369 int r = 0; 370 371 if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) { 372 r = userq_funcs->restore(queue); 373 if (r) { 374 queue->state = AMDGPU_USERQ_STATE_HUNG; 375 } else { 376 queue->state = AMDGPU_USERQ_STATE_MAPPED; 377 } 378 } 379 380 return r; 381 } 382 383 static int amdgpu_userq_unmap_helper(struct amdgpu_usermode_queue *queue) 384 { 385 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 386 struct amdgpu_device *adev = uq_mgr->adev; 387 const struct amdgpu_userq_funcs *userq_funcs = 388 adev->userq_funcs[queue->queue_type]; 389 bool found_hung_queue = false; 390 int r = 0; 391 392 if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || 393 (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { 394 r = userq_funcs->unmap(queue); 395 if (r) { 396 queue->state = AMDGPU_USERQ_STATE_HUNG; 397 found_hung_queue = true; 398 } else { 399 queue->state = AMDGPU_USERQ_STATE_UNMAPPED; 400 } 401 } 402 403 if (found_hung_queue) 404 amdgpu_userq_detect_and_reset_queues(uq_mgr); 405 406 return r; 407 } 408 409 static int amdgpu_userq_map_helper(struct amdgpu_usermode_queue *queue) 410 { 411 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 412 struct amdgpu_device *adev = uq_mgr->adev; 413 const struct amdgpu_userq_funcs *userq_funcs = 414 adev->userq_funcs[queue->queue_type]; 415 int r = 0; 416 417 if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) { 418 r = userq_funcs->map(queue); 419 if (r) { 420 queue->state = AMDGPU_USERQ_STATE_HUNG; 421 amdgpu_userq_detect_and_reset_queues(uq_mgr); 422 } else { 423 queue->state = AMDGPU_USERQ_STATE_MAPPED; 424 } 425 } 426 427 return r; 428 } 429 430 static int amdgpu_userq_wait_for_last_fence(struct amdgpu_usermode_queue *queue) 431 { 432 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 433 struct dma_fence *f = queue->last_fence; 434 int ret = 0; 435 436 if (f && !dma_fence_is_signaled(f)) { 437 ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT); 438 if (ret <= 0) { 439 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 440 f->context, f->seqno); 441 queue->state = AMDGPU_USERQ_STATE_HUNG; 442 return -ETIME; 443 } 444 } 445 446 return ret; 447 } 448 449 static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue) 450 { 451 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 452 struct amdgpu_device *adev = uq_mgr->adev; 453 const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; 454 455 /* Wait for mode-1 reset to complete */ 456 down_read(&adev->reset_domain->sem); 457 458 /* Drop the userq reference. */ 459 amdgpu_userq_buffer_vas_list_cleanup(adev, queue); 460 uq_funcs->mqd_destroy(queue); 461 amdgpu_userq_fence_driver_free(queue); 462 /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ 463 xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); 464 queue->userq_mgr = NULL; 465 list_del(&queue->userq_va_list); 466 kfree(queue); 467 468 up_read(&adev->reset_domain->sem); 469 } 470 471 void 472 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr, 473 struct amdgpu_eviction_fence_mgr *evf_mgr) 474 { 475 struct amdgpu_eviction_fence *ev_fence; 476 477 retry: 478 /* Flush any pending resume work to create ev_fence */ 479 flush_delayed_work(&uq_mgr->resume_work); 480 481 mutex_lock(&uq_mgr->userq_mutex); 482 spin_lock(&evf_mgr->ev_fence_lock); 483 ev_fence = evf_mgr->ev_fence; 484 spin_unlock(&evf_mgr->ev_fence_lock); 485 if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) { 486 mutex_unlock(&uq_mgr->userq_mutex); 487 /* 488 * Looks like there was no pending resume work, 489 * add one now to create a valid eviction fence 490 */ 491 schedule_delayed_work(&uq_mgr->resume_work, 0); 492 goto retry; 493 } 494 } 495 496 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr, 497 struct amdgpu_userq_obj *userq_obj, 498 int size) 499 { 500 struct amdgpu_device *adev = uq_mgr->adev; 501 struct amdgpu_bo_param bp; 502 int r; 503 504 memset(&bp, 0, sizeof(bp)); 505 bp.byte_align = PAGE_SIZE; 506 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 507 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 508 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 509 bp.type = ttm_bo_type_kernel; 510 bp.size = size; 511 bp.resv = NULL; 512 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 513 514 r = amdgpu_bo_create(adev, &bp, &userq_obj->obj); 515 if (r) { 516 drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r); 517 return r; 518 } 519 520 r = amdgpu_bo_reserve(userq_obj->obj, true); 521 if (r) { 522 drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r); 523 goto free_obj; 524 } 525 526 r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo); 527 if (r) { 528 drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r); 529 goto unresv; 530 } 531 532 r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr); 533 if (r) { 534 drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r); 535 goto unresv; 536 } 537 538 userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj); 539 amdgpu_bo_unreserve(userq_obj->obj); 540 memset(userq_obj->cpu_ptr, 0, size); 541 return 0; 542 543 unresv: 544 amdgpu_bo_unreserve(userq_obj->obj); 545 546 free_obj: 547 amdgpu_bo_unref(&userq_obj->obj); 548 return r; 549 } 550 551 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, 552 struct amdgpu_userq_obj *userq_obj) 553 { 554 amdgpu_bo_kunmap(userq_obj->obj); 555 amdgpu_bo_unref(&userq_obj->obj); 556 } 557 558 uint64_t 559 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, 560 struct amdgpu_db_info *db_info, 561 struct drm_file *filp) 562 { 563 uint64_t index; 564 struct drm_gem_object *gobj; 565 struct amdgpu_userq_obj *db_obj = db_info->db_obj; 566 int r, db_size; 567 568 gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle); 569 if (gobj == NULL) { 570 drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n"); 571 return -EINVAL; 572 } 573 574 db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 575 drm_gem_object_put(gobj); 576 577 r = amdgpu_bo_reserve(db_obj->obj, true); 578 if (r) { 579 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 580 goto unref_bo; 581 } 582 583 /* Pin the BO before generating the index, unpin in queue destroy */ 584 r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); 585 if (r) { 586 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 587 goto unresv_bo; 588 } 589 590 switch (db_info->queue_type) { 591 case AMDGPU_HW_IP_GFX: 592 case AMDGPU_HW_IP_COMPUTE: 593 case AMDGPU_HW_IP_DMA: 594 db_size = sizeof(u64); 595 break; 596 default: 597 drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", 598 db_info->queue_type); 599 r = -EINVAL; 600 goto unpin_bo; 601 } 602 603 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, 604 db_info->doorbell_offset, db_size); 605 drm_dbg_driver(adev_to_drm(uq_mgr->adev), 606 "[Usermode queues] doorbell index=%lld\n", index); 607 amdgpu_bo_unreserve(db_obj->obj); 608 return index; 609 610 unpin_bo: 611 amdgpu_bo_unpin(db_obj->obj); 612 unresv_bo: 613 amdgpu_bo_unreserve(db_obj->obj); 614 unref_bo: 615 amdgpu_bo_unref(&db_obj->obj); 616 return r; 617 } 618 619 static int 620 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) 621 { 622 struct amdgpu_device *adev = uq_mgr->adev; 623 int r = 0; 624 625 cancel_delayed_work_sync(&uq_mgr->resume_work); 626 mutex_lock(&uq_mgr->userq_mutex); 627 amdgpu_userq_wait_for_last_fence(queue); 628 /* Cancel any pending hang detection work and cleanup */ 629 if (queue->hang_detect_fence) { 630 cancel_delayed_work_sync(&queue->hang_detect_work); 631 queue->hang_detect_fence = NULL; 632 } 633 r = amdgpu_bo_reserve(queue->db_obj.obj, true); 634 if (!r) { 635 amdgpu_bo_unpin(queue->db_obj.obj); 636 amdgpu_bo_unreserve(queue->db_obj.obj); 637 } 638 amdgpu_bo_unref(&queue->db_obj.obj); 639 640 r = amdgpu_bo_reserve(queue->wptr_obj.obj, true); 641 if (!r) { 642 amdgpu_bo_unpin(queue->wptr_obj.obj); 643 amdgpu_bo_unreserve(queue->wptr_obj.obj); 644 } 645 amdgpu_bo_unref(&queue->wptr_obj.obj); 646 647 atomic_dec(&uq_mgr->userq_count[queue->queue_type]); 648 #if defined(CONFIG_DEBUG_FS) 649 debugfs_remove_recursive(queue->debugfs_queue); 650 #endif 651 amdgpu_userq_detect_and_reset_queues(uq_mgr); 652 r = amdgpu_userq_unmap_helper(queue); 653 /*TODO: It requires a reset for userq hw unmap error*/ 654 if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { 655 drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); 656 queue->state = AMDGPU_USERQ_STATE_HUNG; 657 } 658 amdgpu_userq_cleanup(queue); 659 mutex_unlock(&uq_mgr->userq_mutex); 660 661 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 662 663 return r; 664 } 665 666 static void amdgpu_userq_kref_destroy(struct kref *kref) 667 { 668 int r; 669 struct amdgpu_usermode_queue *queue = 670 container_of(kref, struct amdgpu_usermode_queue, refcount); 671 struct amdgpu_userq_mgr *uq_mgr = queue->userq_mgr; 672 673 r = amdgpu_userq_destroy(uq_mgr, queue); 674 if (r) 675 drm_file_err(uq_mgr->file, "Failed to destroy usermode queue %d\n", r); 676 } 677 678 struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid) 679 { 680 struct amdgpu_usermode_queue *queue; 681 682 xa_lock(&uq_mgr->userq_xa); 683 queue = xa_load(&uq_mgr->userq_xa, qid); 684 if (queue) 685 kref_get(&queue->refcount); 686 xa_unlock(&uq_mgr->userq_xa); 687 688 return queue; 689 } 690 691 void amdgpu_userq_put(struct amdgpu_usermode_queue *queue) 692 { 693 if (queue) 694 kref_put(&queue->refcount, amdgpu_userq_kref_destroy); 695 } 696 697 static int amdgpu_userq_priority_permit(struct drm_file *filp, 698 int priority) 699 { 700 if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH) 701 return 0; 702 703 if (capable(CAP_SYS_NICE)) 704 return 0; 705 706 if (drm_is_current_master(filp)) 707 return 0; 708 709 return -EACCES; 710 } 711 712 static int 713 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) 714 { 715 struct amdgpu_fpriv *fpriv = filp->driver_priv; 716 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 717 struct amdgpu_device *adev = uq_mgr->adev; 718 const struct amdgpu_userq_funcs *uq_funcs; 719 struct amdgpu_usermode_queue *queue; 720 struct amdgpu_db_info db_info; 721 bool skip_map_queue; 722 u32 qid; 723 uint64_t index; 724 int r = 0; 725 int priority = 726 (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> 727 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; 728 729 r = amdgpu_userq_priority_permit(filp, priority); 730 if (r) 731 return r; 732 733 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 734 if (r < 0) { 735 drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); 736 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 737 return r; 738 } 739 740 /* 741 * There could be a situation that we are creating a new queue while 742 * the other queues under this UQ_mgr are suspended. So if there is any 743 * resume work pending, wait for it to get done. 744 * 745 * This will also make sure we have a valid eviction fence ready to be used. 746 */ 747 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); 748 749 uq_funcs = adev->userq_funcs[args->in.ip_type]; 750 if (!uq_funcs) { 751 drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n", 752 args->in.ip_type); 753 r = -EINVAL; 754 goto unlock; 755 } 756 757 queue = kzalloc_obj(struct amdgpu_usermode_queue); 758 if (!queue) { 759 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); 760 r = -ENOMEM; 761 goto unlock; 762 } 763 764 INIT_LIST_HEAD(&queue->userq_va_list); 765 queue->doorbell_handle = args->in.doorbell_handle; 766 queue->queue_type = args->in.ip_type; 767 queue->vm = &fpriv->vm; 768 queue->priority = priority; 769 770 db_info.queue_type = queue->queue_type; 771 db_info.doorbell_handle = queue->doorbell_handle; 772 db_info.db_obj = &queue->db_obj; 773 db_info.doorbell_offset = args->in.doorbell_offset; 774 775 queue->userq_mgr = uq_mgr; 776 /* Validate the userq virtual address.*/ 777 if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) || 778 amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || 779 amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { 780 r = -EINVAL; 781 goto free_queue; 782 } 783 784 /* Convert relative doorbell offset into absolute doorbell index */ 785 index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); 786 if (index == (uint64_t)-EINVAL) { 787 drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); 788 r = -EINVAL; 789 goto free_queue; 790 } 791 792 queue->doorbell_index = index; 793 xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC); 794 r = amdgpu_userq_fence_driver_alloc(adev, queue); 795 if (r) { 796 drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); 797 goto free_queue; 798 } 799 800 r = uq_funcs->mqd_create(queue, &args->in); 801 if (r) { 802 drm_file_err(uq_mgr->file, "Failed to create Queue\n"); 803 goto clean_fence_driver; 804 } 805 806 /* don't map the queue if scheduling is halted */ 807 if (adev->userq_halt_for_enforce_isolation && 808 ((queue->queue_type == AMDGPU_HW_IP_GFX) || 809 (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) 810 skip_map_queue = true; 811 else 812 skip_map_queue = false; 813 if (!skip_map_queue) { 814 r = amdgpu_userq_map_helper(queue); 815 if (r) { 816 drm_file_err(uq_mgr->file, "Failed to map Queue\n"); 817 down_read(&adev->reset_domain->sem); 818 goto clean_mqd; 819 } 820 } 821 822 /* drop this refcount during queue destroy */ 823 kref_init(&queue->refcount); 824 825 /* Wait for mode-1 reset to complete */ 826 down_read(&adev->reset_domain->sem); 827 828 r = xa_alloc(&uq_mgr->userq_xa, &qid, queue, 829 XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL); 830 if (r) { 831 if (!skip_map_queue) 832 amdgpu_userq_unmap_helper(queue); 833 834 r = -ENOMEM; 835 goto clean_mqd; 836 } 837 838 r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); 839 if (r) { 840 xa_erase(&uq_mgr->userq_xa, qid); 841 if (!skip_map_queue) 842 amdgpu_userq_unmap_helper(queue); 843 844 goto clean_mqd; 845 } 846 up_read(&adev->reset_domain->sem); 847 848 amdgpu_debugfs_userq_init(filp, queue, qid); 849 amdgpu_userq_init_hang_detect_work(queue); 850 851 args->out.queue_id = qid; 852 atomic_inc(&uq_mgr->userq_count[queue->queue_type]); 853 mutex_unlock(&uq_mgr->userq_mutex); 854 return 0; 855 856 clean_mqd: 857 uq_funcs->mqd_destroy(queue); 858 up_read(&adev->reset_domain->sem); 859 clean_fence_driver: 860 amdgpu_userq_fence_driver_free(queue); 861 free_queue: 862 kfree(queue); 863 unlock: 864 mutex_unlock(&uq_mgr->userq_mutex); 865 866 return r; 867 } 868 869 static int amdgpu_userq_input_args_validate(struct drm_device *dev, 870 union drm_amdgpu_userq *args, 871 struct drm_file *filp) 872 { 873 struct amdgpu_device *adev = drm_to_adev(dev); 874 875 switch (args->in.op) { 876 case AMDGPU_USERQ_OP_CREATE: 877 if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | 878 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) 879 return -EINVAL; 880 /* Usermode queues are only supported for GFX IP as of now */ 881 if (args->in.ip_type != AMDGPU_HW_IP_GFX && 882 args->in.ip_type != AMDGPU_HW_IP_DMA && 883 args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { 884 drm_file_err(filp, "Usermode queue doesn't support IP type %u\n", 885 args->in.ip_type); 886 return -EINVAL; 887 } 888 889 if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && 890 (args->in.ip_type != AMDGPU_HW_IP_GFX) && 891 (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && 892 !amdgpu_is_tmz(adev)) { 893 drm_file_err(filp, "Secure only supported on GFX/Compute queues\n"); 894 return -EINVAL; 895 } 896 897 if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET || 898 args->in.queue_va == 0 || 899 args->in.queue_size == 0) { 900 drm_file_err(filp, "invalidate userq queue va or size\n"); 901 return -EINVAL; 902 } 903 904 if (!is_power_of_2(args->in.queue_size)) { 905 drm_file_err(filp, "Queue size must be a power of 2\n"); 906 return -EINVAL; 907 } 908 909 if (args->in.queue_size < AMDGPU_GPU_PAGE_SIZE) { 910 drm_file_err(filp, "Queue size smaller than AMDGPU_GPU_PAGE_SIZE\n"); 911 return -EINVAL; 912 } 913 914 if (!args->in.wptr_va || !args->in.rptr_va) { 915 drm_file_err(filp, "invalidate userq queue rptr or wptr\n"); 916 return -EINVAL; 917 } 918 break; 919 case AMDGPU_USERQ_OP_FREE: 920 if (args->in.ip_type || 921 args->in.doorbell_handle || 922 args->in.doorbell_offset || 923 args->in.flags || 924 args->in.queue_va || 925 args->in.queue_size || 926 args->in.rptr_va || 927 args->in.wptr_va || 928 args->in.mqd || 929 args->in.mqd_size) 930 return -EINVAL; 931 break; 932 default: 933 return -EINVAL; 934 } 935 936 return 0; 937 } 938 939 bool amdgpu_userq_enabled(struct drm_device *dev) 940 { 941 struct amdgpu_device *adev = drm_to_adev(dev); 942 int i; 943 944 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { 945 if (adev->userq_funcs[i]) 946 return true; 947 } 948 949 return false; 950 } 951 952 int amdgpu_userq_ioctl(struct drm_device *dev, void *data, 953 struct drm_file *filp) 954 { 955 union drm_amdgpu_userq *args = data; 956 struct amdgpu_fpriv *fpriv = filp->driver_priv; 957 struct amdgpu_usermode_queue *queue; 958 int r = 0; 959 960 if (!amdgpu_userq_enabled(dev)) 961 return -ENOTSUPP; 962 963 if (amdgpu_userq_input_args_validate(dev, args, filp) < 0) 964 return -EINVAL; 965 966 switch (args->in.op) { 967 case AMDGPU_USERQ_OP_CREATE: 968 r = amdgpu_userq_create(filp, args); 969 if (r) 970 drm_file_err(filp, "Failed to create usermode queue\n"); 971 break; 972 973 case AMDGPU_USERQ_OP_FREE: { 974 xa_lock(&fpriv->userq_mgr.userq_xa); 975 queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id); 976 xa_unlock(&fpriv->userq_mgr.userq_xa); 977 if (!queue) 978 return -ENOENT; 979 980 amdgpu_userq_put(queue); 981 break; 982 } 983 984 default: 985 drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op); 986 return -EINVAL; 987 } 988 989 return r; 990 } 991 992 static int 993 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) 994 { 995 struct amdgpu_usermode_queue *queue; 996 unsigned long queue_id; 997 int ret = 0, r; 998 999 /* Resume all the queues for this process */ 1000 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1001 queue = amdgpu_userq_get(uq_mgr, queue_id); 1002 if (!queue) 1003 continue; 1004 1005 if (!amdgpu_userq_buffer_vas_mapped(queue)) { 1006 drm_file_err(uq_mgr->file, 1007 "trying restore queue without va mapping\n"); 1008 queue->state = AMDGPU_USERQ_STATE_INVALID_VA; 1009 amdgpu_userq_put(queue); 1010 continue; 1011 } 1012 1013 r = amdgpu_userq_restore_helper(queue); 1014 if (r) 1015 ret = r; 1016 1017 amdgpu_userq_put(queue); 1018 } 1019 1020 if (ret) 1021 drm_file_err(uq_mgr->file, "Failed to map all the queues\n"); 1022 return ret; 1023 } 1024 1025 static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo) 1026 { 1027 struct ttm_operation_ctx ctx = { false, false }; 1028 1029 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 1030 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1031 } 1032 1033 /* Handle all BOs on the invalidated list, validate them and update the PTs */ 1034 static int 1035 amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, 1036 struct amdgpu_vm *vm) 1037 { 1038 struct ttm_operation_ctx ctx = { false, false }; 1039 struct amdgpu_bo_va *bo_va; 1040 struct amdgpu_bo *bo; 1041 int ret; 1042 1043 spin_lock(&vm->status_lock); 1044 while (!list_empty(&vm->invalidated)) { 1045 bo_va = list_first_entry(&vm->invalidated, 1046 struct amdgpu_bo_va, 1047 base.vm_status); 1048 spin_unlock(&vm->status_lock); 1049 1050 bo = bo_va->base.bo; 1051 ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); 1052 if (unlikely(ret)) 1053 return ret; 1054 1055 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 1056 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1057 if (ret) 1058 return ret; 1059 1060 /* This moves the bo_va to the done list */ 1061 ret = amdgpu_vm_bo_update(adev, bo_va, false); 1062 if (ret) 1063 return ret; 1064 1065 spin_lock(&vm->status_lock); 1066 } 1067 spin_unlock(&vm->status_lock); 1068 1069 return 0; 1070 } 1071 1072 /* Make sure the whole VM is ready to be used */ 1073 static int 1074 amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) 1075 { 1076 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1077 bool invalidated = false, new_addition = false; 1078 struct ttm_operation_ctx ctx = { true, false }; 1079 struct amdgpu_device *adev = uq_mgr->adev; 1080 struct amdgpu_hmm_range *range; 1081 struct amdgpu_vm *vm = &fpriv->vm; 1082 unsigned long key, tmp_key; 1083 struct amdgpu_bo_va *bo_va; 1084 struct amdgpu_bo *bo; 1085 struct drm_exec exec; 1086 struct xarray xa; 1087 int ret; 1088 1089 xa_init(&xa); 1090 1091 retry_lock: 1092 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 1093 drm_exec_until_all_locked(&exec) { 1094 ret = amdgpu_vm_lock_pd(vm, &exec, 1); 1095 drm_exec_retry_on_contention(&exec); 1096 if (unlikely(ret)) 1097 goto unlock_all; 1098 1099 ret = amdgpu_vm_lock_done_list(vm, &exec, 1); 1100 drm_exec_retry_on_contention(&exec); 1101 if (unlikely(ret)) 1102 goto unlock_all; 1103 1104 /* This validates PDs, PTs and per VM BOs */ 1105 ret = amdgpu_vm_validate(adev, vm, NULL, 1106 amdgpu_userq_validate_vm, 1107 NULL); 1108 if (unlikely(ret)) 1109 goto unlock_all; 1110 1111 /* This locks and validates the remaining evicted BOs */ 1112 ret = amdgpu_userq_bo_validate(adev, &exec, vm); 1113 drm_exec_retry_on_contention(&exec); 1114 if (unlikely(ret)) 1115 goto unlock_all; 1116 } 1117 1118 if (invalidated) { 1119 xa_for_each(&xa, tmp_key, range) { 1120 bo = range->bo; 1121 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); 1122 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1123 if (ret) 1124 goto unlock_all; 1125 1126 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); 1127 1128 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 1129 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1130 if (ret) 1131 goto unlock_all; 1132 } 1133 invalidated = false; 1134 } 1135 1136 ret = amdgpu_vm_handle_moved(adev, vm, NULL); 1137 if (ret) 1138 goto unlock_all; 1139 1140 key = 0; 1141 /* Validate User Ptr BOs */ 1142 list_for_each_entry(bo_va, &vm->done, base.vm_status) { 1143 bo = bo_va->base.bo; 1144 if (!bo) 1145 continue; 1146 1147 if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm)) 1148 continue; 1149 1150 range = xa_load(&xa, key); 1151 if (range && range->bo != bo) { 1152 xa_erase(&xa, key); 1153 amdgpu_hmm_range_free(range); 1154 range = NULL; 1155 } 1156 1157 if (!range) { 1158 range = amdgpu_hmm_range_alloc(bo); 1159 if (!range) { 1160 ret = -ENOMEM; 1161 goto unlock_all; 1162 } 1163 1164 xa_store(&xa, key, range, GFP_KERNEL); 1165 new_addition = true; 1166 } 1167 key++; 1168 } 1169 1170 if (new_addition) { 1171 drm_exec_fini(&exec); 1172 xa_for_each(&xa, tmp_key, range) { 1173 if (!range) 1174 continue; 1175 bo = range->bo; 1176 ret = amdgpu_ttm_tt_get_user_pages(bo, range); 1177 if (ret) 1178 goto unlock_all; 1179 } 1180 1181 invalidated = true; 1182 new_addition = false; 1183 goto retry_lock; 1184 } 1185 1186 ret = amdgpu_vm_update_pdes(adev, vm, false); 1187 if (ret) 1188 goto unlock_all; 1189 1190 /* 1191 * We need to wait for all VM updates to finish before restarting the 1192 * queues. Using the done list like that is now ok since everything is 1193 * locked in place. 1194 */ 1195 list_for_each_entry(bo_va, &vm->done, base.vm_status) 1196 dma_fence_wait(bo_va->last_pt_update, false); 1197 dma_fence_wait(vm->last_update, false); 1198 1199 ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); 1200 if (ret) 1201 drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n"); 1202 1203 unlock_all: 1204 drm_exec_fini(&exec); 1205 xa_for_each(&xa, tmp_key, range) { 1206 if (!range) 1207 continue; 1208 bo = range->bo; 1209 amdgpu_hmm_range_free(range); 1210 } 1211 xa_destroy(&xa); 1212 return ret; 1213 } 1214 1215 static void amdgpu_userq_restore_worker(struct work_struct *work) 1216 { 1217 struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work); 1218 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1219 int ret; 1220 1221 flush_delayed_work(&fpriv->evf_mgr.suspend_work); 1222 1223 mutex_lock(&uq_mgr->userq_mutex); 1224 1225 ret = amdgpu_userq_vm_validate(uq_mgr); 1226 if (ret) { 1227 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); 1228 goto unlock; 1229 } 1230 1231 ret = amdgpu_userq_restore_all(uq_mgr); 1232 if (ret) { 1233 drm_file_err(uq_mgr->file, "Failed to restore all queues\n"); 1234 goto unlock; 1235 } 1236 1237 unlock: 1238 mutex_unlock(&uq_mgr->userq_mutex); 1239 } 1240 1241 static int 1242 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) 1243 { 1244 struct amdgpu_usermode_queue *queue; 1245 unsigned long queue_id; 1246 int ret = 0, r; 1247 1248 amdgpu_userq_detect_and_reset_queues(uq_mgr); 1249 /* Try to unmap all the queues in this process ctx */ 1250 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1251 queue = amdgpu_userq_get(uq_mgr, queue_id); 1252 if (!queue) 1253 continue; 1254 r = amdgpu_userq_preempt_helper(queue); 1255 if (r) 1256 ret = r; 1257 amdgpu_userq_put(queue); 1258 } 1259 1260 if (ret) 1261 drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n"); 1262 return ret; 1263 } 1264 1265 void amdgpu_userq_reset_work(struct work_struct *work) 1266 { 1267 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 1268 userq_reset_work); 1269 struct amdgpu_reset_context reset_context; 1270 1271 memset(&reset_context, 0, sizeof(reset_context)); 1272 1273 reset_context.method = AMD_RESET_METHOD_NONE; 1274 reset_context.reset_req_dev = adev; 1275 reset_context.src = AMDGPU_RESET_SRC_USERQ; 1276 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 1277 /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/ 1278 1279 amdgpu_device_gpu_recover(adev, NULL, &reset_context); 1280 } 1281 1282 static int 1283 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) 1284 { 1285 struct amdgpu_usermode_queue *queue; 1286 unsigned long queue_id; 1287 int ret; 1288 1289 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1290 queue = amdgpu_userq_get(uq_mgr, queue_id); 1291 if (!queue) 1292 continue; 1293 1294 struct dma_fence *f = queue->last_fence; 1295 1296 if (!f || dma_fence_is_signaled(f)) { 1297 amdgpu_userq_put(queue); 1298 continue; 1299 } 1300 ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); 1301 if (ret <= 0) { 1302 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 1303 f->context, f->seqno); 1304 amdgpu_userq_put(queue); 1305 return -ETIMEDOUT; 1306 } 1307 amdgpu_userq_put(queue); 1308 } 1309 1310 return 0; 1311 } 1312 1313 void 1314 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, 1315 struct amdgpu_eviction_fence *ev_fence) 1316 { 1317 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 1318 struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; 1319 struct amdgpu_device *adev = uq_mgr->adev; 1320 int ret; 1321 1322 /* Wait for any pending userqueue fence work to finish */ 1323 ret = amdgpu_userq_wait_for_signal(uq_mgr); 1324 if (ret) 1325 dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n"); 1326 1327 ret = amdgpu_userq_evict_all(uq_mgr); 1328 if (ret) 1329 dev_err(adev->dev, "Failed to evict userqueue\n"); 1330 1331 /* Signal current eviction fence */ 1332 amdgpu_eviction_fence_signal(evf_mgr, ev_fence); 1333 1334 if (evf_mgr->fd_closing) { 1335 cancel_delayed_work_sync(&uq_mgr->resume_work); 1336 return; 1337 } 1338 1339 /* Schedule a resume work */ 1340 schedule_delayed_work(&uq_mgr->resume_work, 0); 1341 } 1342 1343 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv, 1344 struct amdgpu_device *adev) 1345 { 1346 mutex_init(&userq_mgr->userq_mutex); 1347 xa_init_flags(&userq_mgr->userq_xa, XA_FLAGS_ALLOC); 1348 userq_mgr->adev = adev; 1349 userq_mgr->file = file_priv; 1350 1351 INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); 1352 return 0; 1353 } 1354 1355 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) 1356 { 1357 struct amdgpu_usermode_queue *queue; 1358 unsigned long queue_id = 0; 1359 1360 for (;;) { 1361 xa_lock(&userq_mgr->userq_xa); 1362 queue = xa_find(&userq_mgr->userq_xa, &queue_id, ULONG_MAX, 1363 XA_PRESENT); 1364 if (queue) 1365 __xa_erase(&userq_mgr->userq_xa, queue_id); 1366 xa_unlock(&userq_mgr->userq_xa); 1367 1368 if (!queue) 1369 break; 1370 1371 amdgpu_userq_put(queue); 1372 } 1373 1374 xa_destroy(&userq_mgr->userq_xa); 1375 mutex_destroy(&userq_mgr->userq_mutex); 1376 } 1377 1378 int amdgpu_userq_suspend(struct amdgpu_device *adev) 1379 { 1380 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1381 struct amdgpu_usermode_queue *queue; 1382 struct amdgpu_userq_mgr *uqm; 1383 unsigned long queue_id; 1384 int r; 1385 1386 if (!ip_mask) 1387 return 0; 1388 1389 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1390 uqm = queue->userq_mgr; 1391 cancel_delayed_work_sync(&uqm->resume_work); 1392 guard(mutex)(&uqm->userq_mutex); 1393 amdgpu_userq_detect_and_reset_queues(uqm); 1394 if (adev->in_s0ix) 1395 r = amdgpu_userq_preempt_helper(queue); 1396 else 1397 r = amdgpu_userq_unmap_helper(queue); 1398 if (r) 1399 return r; 1400 } 1401 return 0; 1402 } 1403 1404 int amdgpu_userq_resume(struct amdgpu_device *adev) 1405 { 1406 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1407 struct amdgpu_usermode_queue *queue; 1408 struct amdgpu_userq_mgr *uqm; 1409 unsigned long queue_id; 1410 int r; 1411 1412 if (!ip_mask) 1413 return 0; 1414 1415 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1416 uqm = queue->userq_mgr; 1417 guard(mutex)(&uqm->userq_mutex); 1418 if (adev->in_s0ix) 1419 r = amdgpu_userq_restore_helper(queue); 1420 else 1421 r = amdgpu_userq_map_helper(queue); 1422 if (r) 1423 return r; 1424 } 1425 1426 return 0; 1427 } 1428 1429 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, 1430 u32 idx) 1431 { 1432 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1433 struct amdgpu_usermode_queue *queue; 1434 struct amdgpu_userq_mgr *uqm; 1435 unsigned long queue_id; 1436 int ret = 0, r; 1437 1438 /* only need to stop gfx/compute */ 1439 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 1440 return 0; 1441 1442 if (adev->userq_halt_for_enforce_isolation) 1443 dev_warn(adev->dev, "userq scheduling already stopped!\n"); 1444 adev->userq_halt_for_enforce_isolation = true; 1445 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1446 uqm = queue->userq_mgr; 1447 cancel_delayed_work_sync(&uqm->resume_work); 1448 mutex_lock(&uqm->userq_mutex); 1449 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1450 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1451 (queue->xcp_id == idx)) { 1452 amdgpu_userq_detect_and_reset_queues(uqm); 1453 r = amdgpu_userq_preempt_helper(queue); 1454 if (r) 1455 ret = r; 1456 } 1457 mutex_unlock(&uqm->userq_mutex); 1458 } 1459 1460 return ret; 1461 } 1462 1463 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, 1464 u32 idx) 1465 { 1466 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1467 struct amdgpu_usermode_queue *queue; 1468 struct amdgpu_userq_mgr *uqm; 1469 unsigned long queue_id; 1470 int ret = 0, r; 1471 1472 /* only need to stop gfx/compute */ 1473 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 1474 return 0; 1475 1476 if (!adev->userq_halt_for_enforce_isolation) 1477 dev_warn(adev->dev, "userq scheduling already started!\n"); 1478 adev->userq_halt_for_enforce_isolation = false; 1479 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1480 uqm = queue->userq_mgr; 1481 mutex_lock(&uqm->userq_mutex); 1482 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1483 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1484 (queue->xcp_id == idx)) { 1485 r = amdgpu_userq_restore_helper(queue); 1486 if (r) 1487 ret = r; 1488 } 1489 mutex_unlock(&uqm->userq_mutex); 1490 } 1491 1492 return ret; 1493 } 1494 1495 int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, 1496 struct amdgpu_bo_va_mapping *mapping, 1497 uint64_t saddr) 1498 { 1499 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 1500 struct amdgpu_bo_va *bo_va = mapping->bo_va; 1501 struct dma_resv *resv = bo_va->base.bo->tbo.base.resv; 1502 int ret = 0; 1503 1504 if (!ip_mask) 1505 return 0; 1506 1507 dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr); 1508 /** 1509 * The userq VA mapping reservation should include the eviction fence, 1510 * if the eviction fence can't signal successfully during unmapping, 1511 * then driver will warn to flag this improper unmap of the userq VA. 1512 * Note: The eviction fence may be attached to different BOs, and this 1513 * unmap is only for one kind of userq VAs, so at this point suppose 1514 * the eviction fence is always unsignaled. 1515 */ 1516 if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) { 1517 ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true, 1518 MAX_SCHEDULE_TIMEOUT); 1519 if (ret <= 0) 1520 return -EBUSY; 1521 } 1522 1523 return 0; 1524 } 1525 1526 void amdgpu_userq_pre_reset(struct amdgpu_device *adev) 1527 { 1528 const struct amdgpu_userq_funcs *userq_funcs; 1529 struct amdgpu_usermode_queue *queue; 1530 struct amdgpu_userq_mgr *uqm; 1531 unsigned long queue_id; 1532 1533 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1534 uqm = queue->userq_mgr; 1535 cancel_delayed_work_sync(&uqm->resume_work); 1536 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 1537 amdgpu_userq_wait_for_last_fence(queue); 1538 userq_funcs = adev->userq_funcs[queue->queue_type]; 1539 userq_funcs->unmap(queue); 1540 /* just mark all queues as hung at this point. 1541 * if unmap succeeds, we could map again 1542 * in amdgpu_userq_post_reset() if vram is not lost 1543 */ 1544 queue->state = AMDGPU_USERQ_STATE_HUNG; 1545 amdgpu_userq_fence_driver_force_completion(queue); 1546 } 1547 } 1548 } 1549 1550 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost) 1551 { 1552 /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED 1553 * at this point, we should be able to map it again 1554 * and continue if vram is not lost. 1555 */ 1556 struct amdgpu_usermode_queue *queue; 1557 const struct amdgpu_userq_funcs *userq_funcs; 1558 unsigned long queue_id; 1559 int r = 0; 1560 1561 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1562 if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) { 1563 userq_funcs = adev->userq_funcs[queue->queue_type]; 1564 /* Re-map queue */ 1565 r = userq_funcs->map(queue); 1566 if (r) { 1567 dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id); 1568 continue; 1569 } 1570 queue->state = AMDGPU_USERQ_STATE_MAPPED; 1571 } 1572 } 1573 1574 return r; 1575 } 1576