1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/kref.h> 26 #include <linux/slab.h> 27 #include <linux/dma-fence-unwrap.h> 28 29 #include <drm/drm_exec.h> 30 #include <drm/drm_syncobj.h> 31 32 #include "amdgpu.h" 33 #include "amdgpu_userq_fence.h" 34 35 static const struct dma_fence_ops amdgpu_userq_fence_ops; 36 static struct kmem_cache *amdgpu_userq_fence_slab; 37 38 #define AMDGPU_USERQ_MAX_HANDLES (1U << 16) 39 40 int amdgpu_userq_fence_slab_init(void) 41 { 42 amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence", 43 sizeof(struct amdgpu_userq_fence), 44 0, 45 SLAB_HWCACHE_ALIGN, 46 NULL); 47 if (!amdgpu_userq_fence_slab) 48 return -ENOMEM; 49 50 return 0; 51 } 52 53 void amdgpu_userq_fence_slab_fini(void) 54 { 55 rcu_barrier(); 56 kmem_cache_destroy(amdgpu_userq_fence_slab); 57 } 58 59 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f) 60 { 61 if (!f || f->ops != &amdgpu_userq_fence_ops) 62 return NULL; 63 64 return container_of(f, struct amdgpu_userq_fence, base); 65 } 66 67 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv) 68 { 69 return le64_to_cpu(*fence_drv->cpu_addr); 70 } 71 72 static void 73 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv, 74 u64 seq) 75 { 76 if (fence_drv->cpu_addr) 77 *fence_drv->cpu_addr = cpu_to_le64(seq); 78 } 79 80 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, 81 struct amdgpu_usermode_queue *userq) 82 { 83 struct amdgpu_userq_fence_driver *fence_drv; 84 unsigned long flags; 85 int r; 86 87 fence_drv = kzalloc_obj(*fence_drv); 88 if (!fence_drv) 89 return -ENOMEM; 90 91 /* Acquire seq64 memory */ 92 r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr, 93 &fence_drv->cpu_addr); 94 if (r) 95 goto free_fence_drv; 96 97 memset(fence_drv->cpu_addr, 0, sizeof(u64)); 98 99 kref_init(&fence_drv->refcount); 100 INIT_LIST_HEAD(&fence_drv->fences); 101 spin_lock_init(&fence_drv->fence_list_lock); 102 103 fence_drv->adev = adev; 104 fence_drv->context = dma_fence_context_alloc(1); 105 get_task_comm(fence_drv->timeline_name, current); 106 107 xa_lock_irqsave(&adev->userq_xa, flags); 108 r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index, 109 fence_drv, GFP_KERNEL)); 110 xa_unlock_irqrestore(&adev->userq_xa, flags); 111 if (r) 112 goto free_seq64; 113 114 userq->fence_drv = fence_drv; 115 116 return 0; 117 118 free_seq64: 119 amdgpu_seq64_free(adev, fence_drv->va); 120 free_fence_drv: 121 kfree(fence_drv); 122 123 return r; 124 } 125 126 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa) 127 { 128 struct amdgpu_userq_fence_driver *fence_drv; 129 unsigned long index; 130 131 if (xa_empty(xa)) 132 return; 133 134 xa_lock(xa); 135 xa_for_each(xa, index, fence_drv) { 136 __xa_erase(xa, index); 137 amdgpu_userq_fence_driver_put(fence_drv); 138 } 139 140 xa_unlock(xa); 141 } 142 143 void 144 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq) 145 { 146 dma_fence_put(userq->last_fence); 147 148 amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa); 149 xa_destroy(&userq->fence_drv_xa); 150 /* Drop the fence_drv reference held by user queue */ 151 amdgpu_userq_fence_driver_put(userq->fence_drv); 152 } 153 154 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv) 155 { 156 struct amdgpu_userq_fence *userq_fence, *tmp; 157 struct dma_fence *fence; 158 unsigned long flags; 159 u64 rptr; 160 int i; 161 162 if (!fence_drv) 163 return; 164 165 spin_lock_irqsave(&fence_drv->fence_list_lock, flags); 166 rptr = amdgpu_userq_fence_read(fence_drv); 167 168 list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) { 169 fence = &userq_fence->base; 170 171 if (rptr < fence->seqno) 172 break; 173 174 dma_fence_signal(fence); 175 176 for (i = 0; i < userq_fence->fence_drv_array_count; i++) 177 amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]); 178 179 list_del(&userq_fence->link); 180 dma_fence_put(fence); 181 } 182 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); 183 } 184 185 void amdgpu_userq_fence_driver_destroy(struct kref *ref) 186 { 187 struct amdgpu_userq_fence_driver *fence_drv = container_of(ref, 188 struct amdgpu_userq_fence_driver, 189 refcount); 190 struct amdgpu_userq_fence_driver *xa_fence_drv; 191 struct amdgpu_device *adev = fence_drv->adev; 192 struct amdgpu_userq_fence *fence, *tmp; 193 struct xarray *xa = &adev->userq_xa; 194 unsigned long index, flags; 195 struct dma_fence *f; 196 197 spin_lock_irqsave(&fence_drv->fence_list_lock, flags); 198 list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) { 199 f = &fence->base; 200 201 if (!dma_fence_is_signaled(f)) { 202 dma_fence_set_error(f, -ECANCELED); 203 dma_fence_signal(f); 204 } 205 206 list_del(&fence->link); 207 dma_fence_put(f); 208 } 209 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); 210 211 xa_lock_irqsave(xa, flags); 212 xa_for_each(xa, index, xa_fence_drv) 213 if (xa_fence_drv == fence_drv) 214 __xa_erase(xa, index); 215 xa_unlock_irqrestore(xa, flags); 216 217 /* Free seq64 memory */ 218 amdgpu_seq64_free(adev, fence_drv->va); 219 kfree(fence_drv); 220 } 221 222 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv) 223 { 224 kref_get(&fence_drv->refcount); 225 } 226 227 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv) 228 { 229 kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy); 230 } 231 232 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence) 233 { 234 *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC); 235 return *userq_fence ? 0 : -ENOMEM; 236 } 237 238 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq, 239 struct amdgpu_userq_fence *userq_fence, 240 u64 seq, struct dma_fence **f) 241 { 242 struct amdgpu_userq_fence_driver *fence_drv; 243 struct dma_fence *fence; 244 unsigned long flags; 245 246 fence_drv = userq->fence_drv; 247 if (!fence_drv) 248 return -EINVAL; 249 250 spin_lock_init(&userq_fence->lock); 251 INIT_LIST_HEAD(&userq_fence->link); 252 fence = &userq_fence->base; 253 userq_fence->fence_drv = fence_drv; 254 255 dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock, 256 fence_drv->context, seq); 257 258 amdgpu_userq_fence_driver_get(fence_drv); 259 dma_fence_get(fence); 260 261 if (!xa_empty(&userq->fence_drv_xa)) { 262 struct amdgpu_userq_fence_driver *stored_fence_drv; 263 unsigned long index, count = 0; 264 int i = 0; 265 266 xa_lock(&userq->fence_drv_xa); 267 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) 268 count++; 269 270 userq_fence->fence_drv_array = 271 kvmalloc_objs(struct amdgpu_userq_fence_driver *, count, 272 GFP_ATOMIC); 273 274 if (userq_fence->fence_drv_array) { 275 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) { 276 userq_fence->fence_drv_array[i] = stored_fence_drv; 277 __xa_erase(&userq->fence_drv_xa, index); 278 i++; 279 } 280 } 281 282 userq_fence->fence_drv_array_count = i; 283 xa_unlock(&userq->fence_drv_xa); 284 } else { 285 userq_fence->fence_drv_array = NULL; 286 userq_fence->fence_drv_array_count = 0; 287 } 288 289 /* Check if hardware has already processed the job */ 290 spin_lock_irqsave(&fence_drv->fence_list_lock, flags); 291 if (!dma_fence_is_signaled(fence)) 292 list_add_tail(&userq_fence->link, &fence_drv->fences); 293 else 294 dma_fence_put(fence); 295 296 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); 297 298 *f = fence; 299 300 return 0; 301 } 302 303 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f) 304 { 305 return "amdgpu_userq_fence"; 306 } 307 308 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f) 309 { 310 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f); 311 312 return fence->fence_drv->timeline_name; 313 } 314 315 static bool amdgpu_userq_fence_signaled(struct dma_fence *f) 316 { 317 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f); 318 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv; 319 u64 rptr, wptr; 320 321 rptr = amdgpu_userq_fence_read(fence_drv); 322 wptr = fence->base.seqno; 323 324 if (rptr >= wptr) 325 return true; 326 327 return false; 328 } 329 330 static void amdgpu_userq_fence_free(struct rcu_head *rcu) 331 { 332 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu); 333 struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence); 334 struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv; 335 336 /* Release the fence driver reference */ 337 amdgpu_userq_fence_driver_put(fence_drv); 338 339 kvfree(userq_fence->fence_drv_array); 340 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence); 341 } 342 343 static void amdgpu_userq_fence_release(struct dma_fence *f) 344 { 345 call_rcu(&f->rcu, amdgpu_userq_fence_free); 346 } 347 348 static const struct dma_fence_ops amdgpu_userq_fence_ops = { 349 .get_driver_name = amdgpu_userq_fence_get_driver_name, 350 .get_timeline_name = amdgpu_userq_fence_get_timeline_name, 351 .signaled = amdgpu_userq_fence_signaled, 352 .release = amdgpu_userq_fence_release, 353 }; 354 355 /** 356 * amdgpu_userq_fence_read_wptr - Read the userq wptr value 357 * 358 * @adev: amdgpu_device pointer 359 * @queue: user mode queue structure pointer 360 * @wptr: write pointer value 361 * 362 * Read the wptr value from userq's MQD. The userq signal IOCTL 363 * creates a dma_fence for the shared buffers that expects the 364 * RPTR value written to seq64 memory >= WPTR. 365 * 366 * Returns wptr value on success, error on failure. 367 */ 368 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev, 369 struct amdgpu_usermode_queue *queue, 370 u64 *wptr) 371 { 372 struct amdgpu_bo_va_mapping *mapping; 373 struct amdgpu_bo *bo; 374 u64 addr, *ptr; 375 int r; 376 377 r = amdgpu_bo_reserve(queue->vm->root.bo, false); 378 if (r) 379 return r; 380 381 addr = queue->userq_prop->wptr_gpu_addr; 382 addr &= AMDGPU_GMC_HOLE_MASK; 383 384 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT); 385 if (!mapping) { 386 amdgpu_bo_unreserve(queue->vm->root.bo); 387 DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n"); 388 return -EINVAL; 389 } 390 391 bo = amdgpu_bo_ref(mapping->bo_va->base.bo); 392 amdgpu_bo_unreserve(queue->vm->root.bo); 393 r = amdgpu_bo_reserve(bo, true); 394 if (r) { 395 amdgpu_bo_unref(&bo); 396 DRM_ERROR("Failed to reserve userqueue wptr bo"); 397 return r; 398 } 399 400 r = amdgpu_bo_kmap(bo, (void **)&ptr); 401 if (r) { 402 DRM_ERROR("Failed mapping the userqueue wptr bo"); 403 goto map_error; 404 } 405 406 *wptr = le64_to_cpu(*ptr); 407 408 amdgpu_bo_kunmap(bo); 409 amdgpu_bo_unreserve(bo); 410 amdgpu_bo_unref(&bo); 411 412 return 0; 413 414 map_error: 415 amdgpu_bo_unreserve(bo); 416 amdgpu_bo_unref(&bo); 417 418 return r; 419 } 420 421 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence) 422 { 423 dma_fence_put(fence); 424 } 425 426 static void 427 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence, 428 int error) 429 { 430 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv; 431 unsigned long flags; 432 struct dma_fence *f; 433 434 spin_lock_irqsave(&fence_drv->fence_list_lock, flags); 435 436 f = rcu_dereference_protected(&fence->base, 437 lockdep_is_held(&fence_drv->fence_list_lock)); 438 if (f && !dma_fence_is_signaled_locked(f)) 439 dma_fence_set_error(f, error); 440 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); 441 } 442 443 void 444 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq) 445 { 446 struct dma_fence *f = userq->last_fence; 447 448 if (f) { 449 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f); 450 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv; 451 u64 wptr = fence->base.seqno; 452 453 amdgpu_userq_fence_driver_set_error(fence, -ECANCELED); 454 amdgpu_userq_fence_write(fence_drv, wptr); 455 amdgpu_userq_fence_driver_process(fence_drv); 456 457 } 458 } 459 460 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, 461 struct drm_file *filp) 462 { 463 struct amdgpu_device *adev = drm_to_adev(dev); 464 struct drm_amdgpu_userq_signal *args = data; 465 const unsigned int num_write_bo_handles = args->num_bo_write_handles; 466 const unsigned int num_read_bo_handles = args->num_bo_read_handles; 467 struct amdgpu_fpriv *fpriv = filp->driver_priv; 468 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr; 469 struct drm_gem_object **gobj_write, **gobj_read; 470 u32 *syncobj_handles, num_syncobj_handles; 471 struct amdgpu_userq_fence *userq_fence; 472 struct amdgpu_usermode_queue *queue = NULL; 473 struct drm_syncobj **syncobj = NULL; 474 struct dma_fence *fence; 475 struct drm_exec exec; 476 int r, i, entry; 477 u64 wptr; 478 479 if (!amdgpu_userq_enabled(dev)) 480 return -ENOTSUPP; 481 482 if (args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES || 483 args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES) 484 return -EINVAL; 485 486 num_syncobj_handles = args->num_syncobj_handles; 487 syncobj_handles = memdup_array_user(u64_to_user_ptr(args->syncobj_handles), 488 num_syncobj_handles, sizeof(u32)); 489 if (IS_ERR(syncobj_handles)) 490 return PTR_ERR(syncobj_handles); 491 492 /* Array of pointers to the looked up syncobjs */ 493 syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL); 494 if (!syncobj) { 495 r = -ENOMEM; 496 goto free_syncobj_handles; 497 } 498 499 for (entry = 0; entry < num_syncobj_handles; entry++) { 500 syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]); 501 if (!syncobj[entry]) { 502 r = -ENOENT; 503 goto free_syncobj; 504 } 505 } 506 507 r = drm_gem_objects_lookup(filp, 508 u64_to_user_ptr(args->bo_read_handles), 509 num_read_bo_handles, 510 &gobj_read); 511 if (r) 512 goto free_syncobj; 513 514 r = drm_gem_objects_lookup(filp, 515 u64_to_user_ptr(args->bo_write_handles), 516 num_write_bo_handles, 517 &gobj_write); 518 if (r) 519 goto put_gobj_read; 520 521 /* Retrieve the user queue */ 522 queue = amdgpu_userq_get(userq_mgr, args->queue_id); 523 if (!queue) { 524 r = -ENOENT; 525 goto put_gobj_write; 526 } 527 528 r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr); 529 if (r) 530 goto put_gobj_write; 531 532 r = amdgpu_userq_fence_alloc(&userq_fence); 533 if (r) 534 goto put_gobj_write; 535 536 /* We are here means UQ is active, make sure the eviction fence is valid */ 537 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); 538 539 /* Create a new fence */ 540 r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence); 541 if (r) { 542 mutex_unlock(&userq_mgr->userq_mutex); 543 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence); 544 goto put_gobj_write; 545 } 546 547 dma_fence_put(queue->last_fence); 548 queue->last_fence = dma_fence_get(fence); 549 amdgpu_userq_start_hang_detect_work(queue); 550 mutex_unlock(&userq_mgr->userq_mutex); 551 552 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 553 (num_read_bo_handles + num_write_bo_handles)); 554 555 /* Lock all BOs with retry handling */ 556 drm_exec_until_all_locked(&exec) { 557 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1); 558 drm_exec_retry_on_contention(&exec); 559 if (r) { 560 amdgpu_userq_fence_cleanup(fence); 561 goto exec_fini; 562 } 563 564 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1); 565 drm_exec_retry_on_contention(&exec); 566 if (r) { 567 amdgpu_userq_fence_cleanup(fence); 568 goto exec_fini; 569 } 570 } 571 572 for (i = 0; i < num_read_bo_handles; i++) { 573 if (!gobj_read || !gobj_read[i]->resv) 574 continue; 575 576 dma_resv_add_fence(gobj_read[i]->resv, fence, 577 DMA_RESV_USAGE_READ); 578 } 579 580 for (i = 0; i < num_write_bo_handles; i++) { 581 if (!gobj_write || !gobj_write[i]->resv) 582 continue; 583 584 dma_resv_add_fence(gobj_write[i]->resv, fence, 585 DMA_RESV_USAGE_WRITE); 586 } 587 588 /* Add the created fence to syncobj/BO's */ 589 for (i = 0; i < num_syncobj_handles; i++) 590 drm_syncobj_replace_fence(syncobj[i], fence); 591 592 /* drop the reference acquired in fence creation function */ 593 dma_fence_put(fence); 594 595 exec_fini: 596 drm_exec_fini(&exec); 597 put_gobj_write: 598 for (i = 0; i < num_write_bo_handles; i++) 599 drm_gem_object_put(gobj_write[i]); 600 kvfree(gobj_write); 601 put_gobj_read: 602 for (i = 0; i < num_read_bo_handles; i++) 603 drm_gem_object_put(gobj_read[i]); 604 kvfree(gobj_read); 605 free_syncobj: 606 while (entry-- > 0) 607 if (syncobj[entry]) 608 drm_syncobj_put(syncobj[entry]); 609 kfree(syncobj); 610 free_syncobj_handles: 611 kfree(syncobj_handles); 612 613 if (queue) 614 amdgpu_userq_put(queue); 615 616 return r; 617 } 618 619 /* Count the number of expected fences so userspace can alloc a buffer */ 620 static int 621 amdgpu_userq_wait_count_fences(struct drm_file *filp, 622 struct drm_amdgpu_userq_wait *wait_info, 623 u32 *syncobj_handles, u32 *timeline_points, 624 u32 *timeline_handles, 625 struct drm_gem_object **gobj_write, 626 struct drm_gem_object **gobj_read) 627 { 628 int num_read_bo_handles, num_write_bo_handles; 629 struct dma_fence_unwrap iter; 630 struct dma_fence *fence, *f; 631 unsigned int num_fences = 0; 632 struct drm_exec exec; 633 int i, r; 634 635 /* 636 * This needs to be outside of the lock provided by drm_exec for 637 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT to work correctly. 638 */ 639 640 /* Count timeline fences */ 641 for (i = 0; i < wait_info->num_syncobj_timeline_handles; i++) { 642 r = drm_syncobj_find_fence(filp, timeline_handles[i], 643 timeline_points[i], 644 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, 645 &fence); 646 if (r) 647 return r; 648 649 dma_fence_unwrap_for_each(f, &iter, fence) 650 num_fences++; 651 652 dma_fence_put(fence); 653 } 654 655 /* Count boolean fences */ 656 for (i = 0; i < wait_info->num_syncobj_handles; i++) { 657 r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 658 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, 659 &fence); 660 if (r) 661 return r; 662 663 num_fences++; 664 dma_fence_put(fence); 665 } 666 667 /* Lock all the GEM objects */ 668 /* TODO: It is actually not necessary to lock them */ 669 num_read_bo_handles = wait_info->num_bo_read_handles; 670 num_write_bo_handles = wait_info->num_bo_write_handles; 671 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 672 num_read_bo_handles + num_write_bo_handles); 673 674 drm_exec_until_all_locked(&exec) { 675 r = drm_exec_prepare_array(&exec, gobj_read, 676 num_read_bo_handles, 1); 677 drm_exec_retry_on_contention(&exec); 678 if (r) 679 goto error_unlock; 680 681 r = drm_exec_prepare_array(&exec, gobj_write, 682 num_write_bo_handles, 1); 683 drm_exec_retry_on_contention(&exec); 684 if (r) 685 goto error_unlock; 686 } 687 688 /* Count read fences */ 689 for (i = 0; i < num_read_bo_handles; i++) { 690 struct dma_resv_iter resv_cursor; 691 struct dma_fence *fence; 692 693 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, 694 DMA_RESV_USAGE_READ, fence) 695 num_fences++; 696 } 697 698 /* Count write fences */ 699 for (i = 0; i < num_write_bo_handles; i++) { 700 struct dma_resv_iter resv_cursor; 701 struct dma_fence *fence; 702 703 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, 704 DMA_RESV_USAGE_WRITE, fence) 705 num_fences++; 706 } 707 708 wait_info->num_fences = min(num_fences, USHRT_MAX); 709 r = 0; 710 711 error_unlock: 712 /* Unlock all the GEM objects */ 713 drm_exec_fini(&exec); 714 return r; 715 } 716 717 static int 718 amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait *wait_info, 719 struct dma_fence **fences, unsigned int *num_fences, 720 struct dma_fence *fence) 721 { 722 /* As fallback shouldn't userspace allocate enough space */ 723 if (*num_fences >= wait_info->num_fences) 724 return dma_fence_wait(fence, true); 725 726 fences[(*num_fences)++] = dma_fence_get(fence); 727 return 0; 728 } 729 730 static int 731 amdgpu_userq_wait_return_fence_info(struct drm_file *filp, 732 struct drm_amdgpu_userq_wait *wait_info, 733 u32 *syncobj_handles, u32 *timeline_points, 734 u32 *timeline_handles, 735 struct drm_gem_object **gobj_write, 736 struct drm_gem_object **gobj_read) 737 { 738 struct amdgpu_fpriv *fpriv = filp->driver_priv; 739 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr; 740 struct drm_amdgpu_userq_fence_info *fence_info; 741 int num_read_bo_handles, num_write_bo_handles; 742 struct amdgpu_usermode_queue *waitq; 743 struct dma_fence **fences, *fence, *f; 744 struct dma_fence_unwrap iter; 745 int num_points, num_syncobj; 746 unsigned int num_fences = 0; 747 struct drm_exec exec; 748 int i, cnt, r; 749 750 fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), 751 GFP_KERNEL); 752 if (!fence_info) 753 return -ENOMEM; 754 755 fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), 756 GFP_KERNEL); 757 if (!fences) { 758 r = -ENOMEM; 759 goto free_fence_info; 760 } 761 762 /* Retrieve timeline fences */ 763 num_points = wait_info->num_syncobj_timeline_handles; 764 for (i = 0; i < num_points; i++) { 765 r = drm_syncobj_find_fence(filp, timeline_handles[i], 766 timeline_points[i], 767 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, 768 &fence); 769 if (r) 770 goto free_fences; 771 772 dma_fence_unwrap_for_each(f, &iter, fence) { 773 r = amdgpu_userq_wait_add_fence(wait_info, fences, 774 &num_fences, f); 775 if (r) { 776 dma_fence_put(fence); 777 goto free_fences; 778 } 779 } 780 781 dma_fence_put(fence); 782 } 783 784 /* Retrieve boolean fences */ 785 num_syncobj = wait_info->num_syncobj_handles; 786 for (i = 0; i < num_syncobj; i++) { 787 struct dma_fence *fence; 788 789 r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 790 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, 791 &fence); 792 if (r) 793 goto free_fences; 794 795 r = amdgpu_userq_wait_add_fence(wait_info, fences, 796 &num_fences, fence); 797 dma_fence_put(fence); 798 if (r) 799 goto free_fences; 800 801 } 802 803 /* Lock all the GEM objects */ 804 num_read_bo_handles = wait_info->num_bo_read_handles; 805 num_write_bo_handles = wait_info->num_bo_write_handles; 806 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 807 num_read_bo_handles + num_write_bo_handles); 808 809 drm_exec_until_all_locked(&exec) { 810 r = drm_exec_prepare_array(&exec, gobj_read, 811 num_read_bo_handles, 1); 812 drm_exec_retry_on_contention(&exec); 813 if (r) 814 goto error_unlock; 815 816 r = drm_exec_prepare_array(&exec, gobj_write, 817 num_write_bo_handles, 1); 818 drm_exec_retry_on_contention(&exec); 819 if (r) 820 goto error_unlock; 821 } 822 823 /* Retrieve GEM read objects fence */ 824 for (i = 0; i < num_read_bo_handles; i++) { 825 struct dma_resv_iter resv_cursor; 826 struct dma_fence *fence; 827 828 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv, 829 DMA_RESV_USAGE_READ, fence) { 830 r = amdgpu_userq_wait_add_fence(wait_info, fences, 831 &num_fences, fence); 832 if (r) 833 goto error_unlock; 834 } 835 } 836 837 /* Retrieve GEM write objects fence */ 838 for (i = 0; i < num_write_bo_handles; i++) { 839 struct dma_resv_iter resv_cursor; 840 struct dma_fence *fence; 841 842 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv, 843 DMA_RESV_USAGE_WRITE, fence) { 844 r = amdgpu_userq_wait_add_fence(wait_info, fences, 845 &num_fences, fence); 846 if (r) 847 goto error_unlock; 848 } 849 } 850 851 drm_exec_fini(&exec); 852 853 /* 854 * Keep only the latest fences to reduce the number of values 855 * given back to userspace. 856 */ 857 num_fences = dma_fence_dedup_array(fences, num_fences); 858 859 waitq = amdgpu_userq_get(userq_mgr, wait_info->waitq_id); 860 if (!waitq) { 861 r = -EINVAL; 862 goto free_fences; 863 } 864 865 for (i = 0, cnt = 0; i < num_fences; i++) { 866 struct amdgpu_userq_fence_driver *fence_drv; 867 struct amdgpu_userq_fence *userq_fence; 868 u32 index; 869 870 userq_fence = to_amdgpu_userq_fence(fences[i]); 871 if (!userq_fence) { 872 /* 873 * Just waiting on other driver fences should 874 * be good for now 875 */ 876 r = dma_fence_wait(fences[i], true); 877 if (r) 878 goto put_waitq; 879 880 continue; 881 } 882 883 fence_drv = userq_fence->fence_drv; 884 /* 885 * We need to make sure the user queue release their reference 886 * to the fence drivers at some point before queue destruction. 887 * Otherwise, we would gather those references until we don't 888 * have any more space left and crash. 889 */ 890 r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv, 891 xa_limit_32b, GFP_KERNEL); 892 if (r) 893 goto put_waitq; 894 895 amdgpu_userq_fence_driver_get(fence_drv); 896 897 /* Store drm syncobj's gpu va address and value */ 898 fence_info[cnt].va = fence_drv->va; 899 fence_info[cnt].value = fences[i]->seqno; 900 901 /* Increment the actual userq fence count */ 902 cnt++; 903 } 904 wait_info->num_fences = cnt; 905 906 /* Copy userq fence info to user space */ 907 if (copy_to_user(u64_to_user_ptr(wait_info->out_fences), 908 fence_info, cnt * sizeof(*fence_info))) 909 r = -EFAULT; 910 else 911 r = 0; 912 913 put_waitq: 914 amdgpu_userq_put(waitq); 915 916 free_fences: 917 while (num_fences--) 918 dma_fence_put(fences[num_fences]); 919 kfree(fences); 920 921 free_fence_info: 922 kfree(fence_info); 923 return r; 924 925 error_unlock: 926 drm_exec_fini(&exec); 927 goto free_fences; 928 } 929 930 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, 931 struct drm_file *filp) 932 { 933 int num_points, num_syncobj, num_read_bo_handles, num_write_bo_handles; 934 u32 *syncobj_handles, *timeline_points, *timeline_handles; 935 struct drm_amdgpu_userq_wait *wait_info = data; 936 struct drm_gem_object **gobj_write; 937 struct drm_gem_object **gobj_read; 938 void __user *ptr; 939 int r; 940 941 if (!amdgpu_userq_enabled(dev)) 942 return -ENOTSUPP; 943 944 if (wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES || 945 wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES) 946 return -EINVAL; 947 948 num_syncobj = wait_info->num_syncobj_handles; 949 ptr = u64_to_user_ptr(wait_info->syncobj_handles); 950 syncobj_handles = memdup_array_user(ptr, num_syncobj, sizeof(u32)); 951 if (IS_ERR(syncobj_handles)) 952 return PTR_ERR(syncobj_handles); 953 954 num_points = wait_info->num_syncobj_timeline_handles; 955 ptr = u64_to_user_ptr(wait_info->syncobj_timeline_handles); 956 timeline_handles = memdup_array_user(ptr, num_points, sizeof(u32)); 957 if (IS_ERR(timeline_handles)) { 958 r = PTR_ERR(timeline_handles); 959 goto free_syncobj_handles; 960 } 961 962 ptr = u64_to_user_ptr(wait_info->syncobj_timeline_points); 963 timeline_points = memdup_array_user(ptr, num_points, sizeof(u32)); 964 if (IS_ERR(timeline_points)) { 965 r = PTR_ERR(timeline_points); 966 goto free_timeline_handles; 967 } 968 969 num_read_bo_handles = wait_info->num_bo_read_handles; 970 ptr = u64_to_user_ptr(wait_info->bo_read_handles); 971 r = drm_gem_objects_lookup(filp, ptr, num_read_bo_handles, &gobj_read); 972 if (r) 973 goto free_timeline_points; 974 975 num_write_bo_handles = wait_info->num_bo_write_handles; 976 ptr = u64_to_user_ptr(wait_info->bo_write_handles); 977 r = drm_gem_objects_lookup(filp, ptr, num_write_bo_handles, 978 &gobj_write); 979 if (r) 980 goto put_gobj_read; 981 982 /* 983 * Passing num_fences = 0 means that userspace doesn't want to 984 * retrieve userq_fence_info. If num_fences = 0 we skip filling 985 * userq_fence_info and return the actual number of fences on 986 * args->num_fences. 987 */ 988 if (!wait_info->num_fences) { 989 r = amdgpu_userq_wait_count_fences(filp, wait_info, 990 syncobj_handles, 991 timeline_points, 992 timeline_handles, 993 gobj_write, 994 gobj_read); 995 } else { 996 r = amdgpu_userq_wait_return_fence_info(filp, wait_info, 997 syncobj_handles, 998 timeline_points, 999 timeline_handles, 1000 gobj_write, 1001 gobj_read); 1002 } 1003 1004 while (num_write_bo_handles--) 1005 drm_gem_object_put(gobj_write[num_write_bo_handles]); 1006 kvfree(gobj_write); 1007 1008 put_gobj_read: 1009 while (num_read_bo_handles--) 1010 drm_gem_object_put(gobj_read[num_read_bo_handles]); 1011 kvfree(gobj_read); 1012 1013 free_timeline_points: 1014 kfree(timeline_points); 1015 free_timeline_handles: 1016 kfree(timeline_handles); 1017 free_syncobj_handles: 1018 kfree(syncobj_handles); 1019 return r; 1020 } 1021