1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <drm/drm_auth.h> 26 #include <drm/drm_exec.h> 27 #include <linux/pm_runtime.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_userq.h" 32 #include "amdgpu_userq_fence.h" 33 34 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) 35 { 36 int i; 37 u32 userq_ip_mask = 0; 38 39 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { 40 if (adev->userq_funcs[i]) 41 userq_ip_mask |= (1 << i); 42 } 43 44 return userq_ip_mask; 45 } 46 47 static int 48 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, 49 struct amdgpu_usermode_queue *queue) 50 { 51 struct amdgpu_device *adev = uq_mgr->adev; 52 const struct amdgpu_userq_funcs *userq_funcs = 53 adev->userq_funcs[queue->queue_type]; 54 int r = 0; 55 56 if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 57 r = userq_funcs->unmap(uq_mgr, queue); 58 if (r) 59 queue->state = AMDGPU_USERQ_STATE_HUNG; 60 else 61 queue->state = AMDGPU_USERQ_STATE_UNMAPPED; 62 } 63 return r; 64 } 65 66 static int 67 amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr, 68 struct amdgpu_usermode_queue *queue) 69 { 70 struct amdgpu_device *adev = uq_mgr->adev; 71 const struct amdgpu_userq_funcs *userq_funcs = 72 adev->userq_funcs[queue->queue_type]; 73 int r = 0; 74 75 if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) { 76 r = userq_funcs->map(uq_mgr, queue); 77 if (r) { 78 queue->state = AMDGPU_USERQ_STATE_HUNG; 79 } else { 80 queue->state = AMDGPU_USERQ_STATE_MAPPED; 81 } 82 } 83 return r; 84 } 85 86 static void 87 amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr, 88 struct amdgpu_usermode_queue *queue) 89 { 90 struct dma_fence *f = queue->last_fence; 91 int ret; 92 93 if (f && !dma_fence_is_signaled(f)) { 94 ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); 95 if (ret <= 0) 96 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 97 f->context, f->seqno); 98 } 99 } 100 101 static void 102 amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr, 103 struct amdgpu_usermode_queue *queue, 104 int queue_id) 105 { 106 struct amdgpu_device *adev = uq_mgr->adev; 107 const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; 108 109 uq_funcs->mqd_destroy(uq_mgr, queue); 110 amdgpu_userq_fence_driver_free(queue); 111 idr_remove(&uq_mgr->userq_idr, queue_id); 112 kfree(queue); 113 } 114 115 int 116 amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr) 117 { 118 struct amdgpu_usermode_queue *queue; 119 int queue_id; 120 int ret = 0; 121 122 mutex_lock(&uq_mgr->userq_mutex); 123 /* Resume all the queues for this process */ 124 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) 125 ret += queue->state == AMDGPU_USERQ_STATE_MAPPED; 126 127 mutex_unlock(&uq_mgr->userq_mutex); 128 return ret; 129 } 130 131 static struct amdgpu_usermode_queue * 132 amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) 133 { 134 return idr_find(&uq_mgr->userq_idr, qid); 135 } 136 137 void 138 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr, 139 struct amdgpu_eviction_fence_mgr *evf_mgr) 140 { 141 struct amdgpu_eviction_fence *ev_fence; 142 143 retry: 144 /* Flush any pending resume work to create ev_fence */ 145 flush_delayed_work(&uq_mgr->resume_work); 146 147 mutex_lock(&uq_mgr->userq_mutex); 148 spin_lock(&evf_mgr->ev_fence_lock); 149 ev_fence = evf_mgr->ev_fence; 150 spin_unlock(&evf_mgr->ev_fence_lock); 151 if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) { 152 mutex_unlock(&uq_mgr->userq_mutex); 153 /* 154 * Looks like there was no pending resume work, 155 * add one now to create a valid eviction fence 156 */ 157 schedule_delayed_work(&uq_mgr->resume_work, 0); 158 goto retry; 159 } 160 } 161 162 int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr, 163 struct amdgpu_userq_obj *userq_obj, 164 int size) 165 { 166 struct amdgpu_device *adev = uq_mgr->adev; 167 struct amdgpu_bo_param bp; 168 int r; 169 170 memset(&bp, 0, sizeof(bp)); 171 bp.byte_align = PAGE_SIZE; 172 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 173 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 174 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 175 bp.type = ttm_bo_type_kernel; 176 bp.size = size; 177 bp.resv = NULL; 178 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 179 180 r = amdgpu_bo_create(adev, &bp, &userq_obj->obj); 181 if (r) { 182 drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r); 183 return r; 184 } 185 186 r = amdgpu_bo_reserve(userq_obj->obj, true); 187 if (r) { 188 drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r); 189 goto free_obj; 190 } 191 192 r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo); 193 if (r) { 194 drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r); 195 goto unresv; 196 } 197 198 r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr); 199 if (r) { 200 drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r); 201 goto unresv; 202 } 203 204 userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj); 205 amdgpu_bo_unreserve(userq_obj->obj); 206 memset(userq_obj->cpu_ptr, 0, size); 207 return 0; 208 209 unresv: 210 amdgpu_bo_unreserve(userq_obj->obj); 211 212 free_obj: 213 amdgpu_bo_unref(&userq_obj->obj); 214 return r; 215 } 216 217 void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr, 218 struct amdgpu_userq_obj *userq_obj) 219 { 220 amdgpu_bo_kunmap(userq_obj->obj); 221 amdgpu_bo_unref(&userq_obj->obj); 222 } 223 224 uint64_t 225 amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, 226 struct amdgpu_db_info *db_info, 227 struct drm_file *filp) 228 { 229 uint64_t index; 230 struct drm_gem_object *gobj; 231 struct amdgpu_userq_obj *db_obj = db_info->db_obj; 232 int r, db_size; 233 234 gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle); 235 if (gobj == NULL) { 236 drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n"); 237 return -EINVAL; 238 } 239 240 db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 241 drm_gem_object_put(gobj); 242 243 r = amdgpu_bo_reserve(db_obj->obj, true); 244 if (r) { 245 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 246 goto unref_bo; 247 } 248 249 /* Pin the BO before generating the index, unpin in queue destroy */ 250 r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); 251 if (r) { 252 drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n"); 253 goto unresv_bo; 254 } 255 256 switch (db_info->queue_type) { 257 case AMDGPU_HW_IP_GFX: 258 case AMDGPU_HW_IP_COMPUTE: 259 case AMDGPU_HW_IP_DMA: 260 db_size = sizeof(u64); 261 break; 262 263 case AMDGPU_HW_IP_VCN_ENC: 264 db_size = sizeof(u32); 265 db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1; 266 break; 267 268 case AMDGPU_HW_IP_VPE: 269 db_size = sizeof(u32); 270 db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1; 271 break; 272 273 default: 274 drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", 275 db_info->queue_type); 276 r = -EINVAL; 277 goto unpin_bo; 278 } 279 280 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, 281 db_info->doorbell_offset, db_size); 282 drm_dbg_driver(adev_to_drm(uq_mgr->adev), 283 "[Usermode queues] doorbell index=%lld\n", index); 284 amdgpu_bo_unreserve(db_obj->obj); 285 return index; 286 287 unpin_bo: 288 amdgpu_bo_unpin(db_obj->obj); 289 unresv_bo: 290 amdgpu_bo_unreserve(db_obj->obj); 291 unref_bo: 292 amdgpu_bo_unref(&db_obj->obj); 293 return r; 294 } 295 296 static int 297 amdgpu_userq_destroy(struct drm_file *filp, int queue_id) 298 { 299 struct amdgpu_fpriv *fpriv = filp->driver_priv; 300 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 301 struct amdgpu_device *adev = uq_mgr->adev; 302 struct amdgpu_usermode_queue *queue; 303 int r = 0; 304 305 cancel_delayed_work_sync(&uq_mgr->resume_work); 306 mutex_lock(&uq_mgr->userq_mutex); 307 308 queue = amdgpu_userq_find(uq_mgr, queue_id); 309 if (!queue) { 310 drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n"); 311 mutex_unlock(&uq_mgr->userq_mutex); 312 return -EINVAL; 313 } 314 amdgpu_userq_wait_for_last_fence(uq_mgr, queue); 315 r = amdgpu_bo_reserve(queue->db_obj.obj, true); 316 if (!r) { 317 amdgpu_bo_unpin(queue->db_obj.obj); 318 amdgpu_bo_unreserve(queue->db_obj.obj); 319 } 320 amdgpu_bo_unref(&queue->db_obj.obj); 321 r = amdgpu_userq_unmap_helper(uq_mgr, queue); 322 amdgpu_userq_cleanup(uq_mgr, queue, queue_id); 323 mutex_unlock(&uq_mgr->userq_mutex); 324 325 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 326 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 327 328 return r; 329 } 330 331 static int amdgpu_userq_priority_permit(struct drm_file *filp, 332 int priority) 333 { 334 if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH) 335 return 0; 336 337 if (capable(CAP_SYS_NICE)) 338 return 0; 339 340 if (drm_is_current_master(filp)) 341 return 0; 342 343 return -EACCES; 344 } 345 346 static int 347 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) 348 { 349 struct amdgpu_fpriv *fpriv = filp->driver_priv; 350 struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr; 351 struct amdgpu_device *adev = uq_mgr->adev; 352 const struct amdgpu_userq_funcs *uq_funcs; 353 struct amdgpu_usermode_queue *queue; 354 struct amdgpu_db_info db_info; 355 bool skip_map_queue; 356 uint64_t index; 357 int qid, r = 0; 358 int priority = 359 (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> 360 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; 361 362 /* Usermode queues are only supported for GFX IP as of now */ 363 if (args->in.ip_type != AMDGPU_HW_IP_GFX && 364 args->in.ip_type != AMDGPU_HW_IP_DMA && 365 args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { 366 drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n", 367 args->in.ip_type); 368 return -EINVAL; 369 } 370 371 r = amdgpu_userq_priority_permit(filp, priority); 372 if (r) 373 return r; 374 375 if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && 376 (args->in.ip_type != AMDGPU_HW_IP_GFX) && 377 (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && 378 !amdgpu_is_tmz(adev)) { 379 drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n"); 380 return -EINVAL; 381 } 382 383 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 384 if (r < 0) { 385 drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); 386 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 387 return r; 388 } 389 390 /* 391 * There could be a situation that we are creating a new queue while 392 * the other queues under this UQ_mgr are suspended. So if there is any 393 * resume work pending, wait for it to get done. 394 * 395 * This will also make sure we have a valid eviction fence ready to be used. 396 */ 397 mutex_lock(&adev->userq_mutex); 398 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); 399 400 uq_funcs = adev->userq_funcs[args->in.ip_type]; 401 if (!uq_funcs) { 402 drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n", 403 args->in.ip_type); 404 r = -EINVAL; 405 goto unlock; 406 } 407 408 queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL); 409 if (!queue) { 410 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); 411 r = -ENOMEM; 412 goto unlock; 413 } 414 queue->doorbell_handle = args->in.doorbell_handle; 415 queue->queue_type = args->in.ip_type; 416 queue->vm = &fpriv->vm; 417 queue->priority = priority; 418 419 db_info.queue_type = queue->queue_type; 420 db_info.doorbell_handle = queue->doorbell_handle; 421 db_info.db_obj = &queue->db_obj; 422 db_info.doorbell_offset = args->in.doorbell_offset; 423 424 /* Convert relative doorbell offset into absolute doorbell index */ 425 index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); 426 if (index == (uint64_t)-EINVAL) { 427 drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); 428 kfree(queue); 429 goto unlock; 430 } 431 432 queue->doorbell_index = index; 433 xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC); 434 r = amdgpu_userq_fence_driver_alloc(adev, queue); 435 if (r) { 436 drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); 437 goto unlock; 438 } 439 440 r = uq_funcs->mqd_create(uq_mgr, &args->in, queue); 441 if (r) { 442 drm_file_err(uq_mgr->file, "Failed to create Queue\n"); 443 amdgpu_userq_fence_driver_free(queue); 444 kfree(queue); 445 goto unlock; 446 } 447 448 449 qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); 450 if (qid < 0) { 451 drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); 452 amdgpu_userq_fence_driver_free(queue); 453 uq_funcs->mqd_destroy(uq_mgr, queue); 454 kfree(queue); 455 r = -ENOMEM; 456 goto unlock; 457 } 458 459 /* don't map the queue if scheduling is halted */ 460 if (adev->userq_halt_for_enforce_isolation && 461 ((queue->queue_type == AMDGPU_HW_IP_GFX) || 462 (queue->queue_type == AMDGPU_HW_IP_COMPUTE))) 463 skip_map_queue = true; 464 else 465 skip_map_queue = false; 466 if (!skip_map_queue) { 467 r = amdgpu_userq_map_helper(uq_mgr, queue); 468 if (r) { 469 drm_file_err(uq_mgr->file, "Failed to map Queue\n"); 470 idr_remove(&uq_mgr->userq_idr, qid); 471 amdgpu_userq_fence_driver_free(queue); 472 uq_funcs->mqd_destroy(uq_mgr, queue); 473 kfree(queue); 474 goto unlock; 475 } 476 } 477 478 479 args->out.queue_id = qid; 480 481 unlock: 482 mutex_unlock(&uq_mgr->userq_mutex); 483 mutex_unlock(&adev->userq_mutex); 484 485 return r; 486 } 487 488 int amdgpu_userq_ioctl(struct drm_device *dev, void *data, 489 struct drm_file *filp) 490 { 491 union drm_amdgpu_userq *args = data; 492 int r; 493 494 switch (args->in.op) { 495 case AMDGPU_USERQ_OP_CREATE: 496 if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | 497 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) 498 return -EINVAL; 499 r = amdgpu_userq_create(filp, args); 500 if (r) 501 drm_file_err(filp, "Failed to create usermode queue\n"); 502 break; 503 504 case AMDGPU_USERQ_OP_FREE: 505 if (args->in.ip_type || 506 args->in.doorbell_handle || 507 args->in.doorbell_offset || 508 args->in.flags || 509 args->in.queue_va || 510 args->in.queue_size || 511 args->in.rptr_va || 512 args->in.wptr_va || 513 args->in.wptr_va || 514 args->in.mqd || 515 args->in.mqd_size) 516 return -EINVAL; 517 r = amdgpu_userq_destroy(filp, args->in.queue_id); 518 if (r) 519 drm_file_err(filp, "Failed to destroy usermode queue\n"); 520 break; 521 522 default: 523 drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op); 524 return -EINVAL; 525 } 526 527 return r; 528 } 529 530 static int 531 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) 532 { 533 struct amdgpu_usermode_queue *queue; 534 int queue_id; 535 int ret = 0, r; 536 537 /* Resume all the queues for this process */ 538 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { 539 r = amdgpu_userq_map_helper(uq_mgr, queue); 540 if (r) 541 ret = r; 542 } 543 544 if (ret) 545 drm_file_err(uq_mgr->file, "Failed to map all the queues\n"); 546 return ret; 547 } 548 549 static int 550 amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 551 { 552 struct ttm_operation_ctx ctx = { false, false }; 553 int ret; 554 555 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 556 557 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 558 if (ret) 559 DRM_ERROR("Fail to validate\n"); 560 561 return ret; 562 } 563 564 static int 565 amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr) 566 { 567 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 568 struct amdgpu_vm *vm = &fpriv->vm; 569 struct amdgpu_device *adev = uq_mgr->adev; 570 struct amdgpu_bo_va *bo_va; 571 struct ww_acquire_ctx *ticket; 572 struct drm_exec exec; 573 struct amdgpu_bo *bo; 574 struct dma_resv *resv; 575 bool clear, unlock; 576 int ret = 0; 577 578 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 579 drm_exec_until_all_locked(&exec) { 580 ret = amdgpu_vm_lock_pd(vm, &exec, 2); 581 drm_exec_retry_on_contention(&exec); 582 if (unlikely(ret)) { 583 drm_file_err(uq_mgr->file, "Failed to lock PD\n"); 584 goto unlock_all; 585 } 586 587 /* Lock the done list */ 588 list_for_each_entry(bo_va, &vm->done, base.vm_status) { 589 bo = bo_va->base.bo; 590 if (!bo) 591 continue; 592 593 ret = drm_exec_lock_obj(&exec, &bo->tbo.base); 594 drm_exec_retry_on_contention(&exec); 595 if (unlikely(ret)) 596 goto unlock_all; 597 } 598 } 599 600 spin_lock(&vm->status_lock); 601 while (!list_empty(&vm->moved)) { 602 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, 603 base.vm_status); 604 spin_unlock(&vm->status_lock); 605 606 /* Per VM BOs never need to bo cleared in the page tables */ 607 ret = amdgpu_vm_bo_update(adev, bo_va, false); 608 if (ret) 609 goto unlock_all; 610 spin_lock(&vm->status_lock); 611 } 612 613 ticket = &exec.ticket; 614 while (!list_empty(&vm->invalidated)) { 615 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 616 base.vm_status); 617 resv = bo_va->base.bo->tbo.base.resv; 618 spin_unlock(&vm->status_lock); 619 620 bo = bo_va->base.bo; 621 ret = amdgpu_userq_validate_vm_bo(NULL, bo); 622 if (ret) { 623 drm_file_err(uq_mgr->file, "Failed to validate BO\n"); 624 goto unlock_all; 625 } 626 627 /* Try to reserve the BO to avoid clearing its ptes */ 628 if (!adev->debug_vm && dma_resv_trylock(resv)) { 629 clear = false; 630 unlock = true; 631 /* The caller is already holding the reservation lock */ 632 } else if (dma_resv_locking_ctx(resv) == ticket) { 633 clear = false; 634 unlock = false; 635 /* Somebody else is using the BO right now */ 636 } else { 637 clear = true; 638 unlock = false; 639 } 640 641 ret = amdgpu_vm_bo_update(adev, bo_va, clear); 642 643 if (unlock) 644 dma_resv_unlock(resv); 645 if (ret) 646 goto unlock_all; 647 648 spin_lock(&vm->status_lock); 649 } 650 spin_unlock(&vm->status_lock); 651 652 ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); 653 if (ret) 654 drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n"); 655 656 unlock_all: 657 drm_exec_fini(&exec); 658 return ret; 659 } 660 661 static void amdgpu_userq_restore_worker(struct work_struct *work) 662 { 663 struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work); 664 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 665 int ret; 666 667 flush_work(&fpriv->evf_mgr.suspend_work.work); 668 669 mutex_lock(&uq_mgr->userq_mutex); 670 671 ret = amdgpu_userq_validate_bos(uq_mgr); 672 if (ret) { 673 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); 674 goto unlock; 675 } 676 677 ret = amdgpu_userq_restore_all(uq_mgr); 678 if (ret) { 679 drm_file_err(uq_mgr->file, "Failed to restore all queues\n"); 680 goto unlock; 681 } 682 683 unlock: 684 mutex_unlock(&uq_mgr->userq_mutex); 685 } 686 687 static int 688 amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) 689 { 690 struct amdgpu_usermode_queue *queue; 691 int queue_id; 692 int ret = 0, r; 693 694 /* Try to unmap all the queues in this process ctx */ 695 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { 696 r = amdgpu_userq_unmap_helper(uq_mgr, queue); 697 if (r) 698 ret = r; 699 } 700 701 if (ret) 702 drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n"); 703 return ret; 704 } 705 706 static int 707 amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) 708 { 709 struct amdgpu_usermode_queue *queue; 710 int queue_id, ret; 711 712 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { 713 struct dma_fence *f = queue->last_fence; 714 715 if (!f || dma_fence_is_signaled(f)) 716 continue; 717 ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); 718 if (ret <= 0) { 719 drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", 720 f->context, f->seqno); 721 return -ETIMEDOUT; 722 } 723 } 724 725 return 0; 726 } 727 728 void 729 amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, 730 struct amdgpu_eviction_fence *ev_fence) 731 { 732 int ret; 733 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 734 struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr; 735 736 /* Wait for any pending userqueue fence work to finish */ 737 ret = amdgpu_userq_wait_for_signal(uq_mgr); 738 if (ret) { 739 drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n"); 740 return; 741 } 742 743 ret = amdgpu_userq_evict_all(uq_mgr); 744 if (ret) { 745 drm_file_err(uq_mgr->file, "Failed to evict userqueue\n"); 746 return; 747 } 748 749 /* Signal current eviction fence */ 750 amdgpu_eviction_fence_signal(evf_mgr, ev_fence); 751 752 if (evf_mgr->fd_closing) { 753 cancel_delayed_work_sync(&uq_mgr->resume_work); 754 return; 755 } 756 757 /* Schedule a resume work */ 758 schedule_delayed_work(&uq_mgr->resume_work, 0); 759 } 760 761 int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv, 762 struct amdgpu_device *adev) 763 { 764 mutex_init(&userq_mgr->userq_mutex); 765 idr_init_base(&userq_mgr->userq_idr, 1); 766 userq_mgr->adev = adev; 767 userq_mgr->file = file_priv; 768 769 mutex_lock(&adev->userq_mutex); 770 list_add(&userq_mgr->list, &adev->userq_mgr_list); 771 mutex_unlock(&adev->userq_mutex); 772 773 INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); 774 return 0; 775 } 776 777 void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) 778 { 779 struct amdgpu_device *adev = userq_mgr->adev; 780 struct amdgpu_usermode_queue *queue; 781 struct amdgpu_userq_mgr *uqm, *tmp; 782 uint32_t queue_id; 783 784 cancel_delayed_work_sync(&userq_mgr->resume_work); 785 786 mutex_lock(&adev->userq_mutex); 787 mutex_lock(&userq_mgr->userq_mutex); 788 idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { 789 amdgpu_userq_wait_for_last_fence(userq_mgr, queue); 790 amdgpu_userq_unmap_helper(userq_mgr, queue); 791 amdgpu_userq_cleanup(userq_mgr, queue, queue_id); 792 } 793 794 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { 795 if (uqm == userq_mgr) { 796 list_del(&uqm->list); 797 break; 798 } 799 } 800 idr_destroy(&userq_mgr->userq_idr); 801 mutex_unlock(&userq_mgr->userq_mutex); 802 mutex_unlock(&adev->userq_mutex); 803 mutex_destroy(&userq_mgr->userq_mutex); 804 } 805 806 int amdgpu_userq_suspend(struct amdgpu_device *adev) 807 { 808 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 809 struct amdgpu_usermode_queue *queue; 810 struct amdgpu_userq_mgr *uqm, *tmp; 811 int queue_id; 812 int ret = 0, r; 813 814 if (!ip_mask) 815 return 0; 816 817 mutex_lock(&adev->userq_mutex); 818 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { 819 cancel_delayed_work_sync(&uqm->resume_work); 820 mutex_lock(&uqm->userq_mutex); 821 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { 822 r = amdgpu_userq_unmap_helper(uqm, queue); 823 if (r) 824 ret = r; 825 } 826 mutex_unlock(&uqm->userq_mutex); 827 } 828 mutex_unlock(&adev->userq_mutex); 829 return ret; 830 } 831 832 int amdgpu_userq_resume(struct amdgpu_device *adev) 833 { 834 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 835 struct amdgpu_usermode_queue *queue; 836 struct amdgpu_userq_mgr *uqm, *tmp; 837 int queue_id; 838 int ret = 0, r; 839 840 if (!ip_mask) 841 return 0; 842 843 mutex_lock(&adev->userq_mutex); 844 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { 845 mutex_lock(&uqm->userq_mutex); 846 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { 847 r = amdgpu_userq_map_helper(uqm, queue); 848 if (r) 849 ret = r; 850 } 851 mutex_unlock(&uqm->userq_mutex); 852 } 853 mutex_unlock(&adev->userq_mutex); 854 return ret; 855 } 856 857 int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, 858 u32 idx) 859 { 860 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 861 struct amdgpu_usermode_queue *queue; 862 struct amdgpu_userq_mgr *uqm, *tmp; 863 int queue_id; 864 int ret = 0, r; 865 866 /* only need to stop gfx/compute */ 867 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 868 return 0; 869 870 mutex_lock(&adev->userq_mutex); 871 if (adev->userq_halt_for_enforce_isolation) 872 dev_warn(adev->dev, "userq scheduling already stopped!\n"); 873 adev->userq_halt_for_enforce_isolation = true; 874 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { 875 cancel_delayed_work_sync(&uqm->resume_work); 876 mutex_lock(&uqm->userq_mutex); 877 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { 878 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 879 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 880 (queue->xcp_id == idx)) { 881 r = amdgpu_userq_unmap_helper(uqm, queue); 882 if (r) 883 ret = r; 884 } 885 } 886 mutex_unlock(&uqm->userq_mutex); 887 } 888 mutex_unlock(&adev->userq_mutex); 889 return ret; 890 } 891 892 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, 893 u32 idx) 894 { 895 u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); 896 struct amdgpu_usermode_queue *queue; 897 struct amdgpu_userq_mgr *uqm, *tmp; 898 int queue_id; 899 int ret = 0, r; 900 901 /* only need to stop gfx/compute */ 902 if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) 903 return 0; 904 905 mutex_lock(&adev->userq_mutex); 906 if (!adev->userq_halt_for_enforce_isolation) 907 dev_warn(adev->dev, "userq scheduling already started!\n"); 908 adev->userq_halt_for_enforce_isolation = false; 909 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { 910 mutex_lock(&uqm->userq_mutex); 911 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { 912 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 913 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 914 (queue->xcp_id == idx)) { 915 r = amdgpu_userq_map_helper(uqm, queue); 916 if (r) 917 ret = r; 918 } 919 } 920 mutex_unlock(&uqm->userq_mutex); 921 } 922 mutex_unlock(&adev->userq_mutex); 923 return ret; 924 } 925