1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37
amdgpu_userq_fence_slab_init(void)38 int amdgpu_userq_fence_slab_init(void)
39 {
40 amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
41 sizeof(struct amdgpu_userq_fence),
42 0,
43 SLAB_HWCACHE_ALIGN,
44 NULL);
45 if (!amdgpu_userq_fence_slab)
46 return -ENOMEM;
47
48 return 0;
49 }
50
amdgpu_userq_fence_slab_fini(void)51 void amdgpu_userq_fence_slab_fini(void)
52 {
53 rcu_barrier();
54 kmem_cache_destroy(amdgpu_userq_fence_slab);
55 }
56
to_amdgpu_userq_fence(struct dma_fence * f)57 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
58 {
59 if (!f || f->ops != &amdgpu_userq_fence_ops)
60 return NULL;
61
62 return container_of(f, struct amdgpu_userq_fence, base);
63 }
64
amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver * fence_drv)65 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
66 {
67 return le64_to_cpu(*fence_drv->cpu_addr);
68 }
69
70 static void
amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver * fence_drv,u64 seq)71 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
72 u64 seq)
73 {
74 if (fence_drv->cpu_addr)
75 *fence_drv->cpu_addr = cpu_to_le64(seq);
76 }
77
amdgpu_userq_fence_driver_alloc(struct amdgpu_device * adev,struct amdgpu_usermode_queue * userq)78 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
79 struct amdgpu_usermode_queue *userq)
80 {
81 struct amdgpu_userq_fence_driver *fence_drv;
82 unsigned long flags;
83 int r;
84
85 fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
86 if (!fence_drv)
87 return -ENOMEM;
88
89 /* Acquire seq64 memory */
90 r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
91 &fence_drv->cpu_addr);
92 if (r)
93 goto free_fence_drv;
94
95 memset(fence_drv->cpu_addr, 0, sizeof(u64));
96
97 kref_init(&fence_drv->refcount);
98 INIT_LIST_HEAD(&fence_drv->fences);
99 spin_lock_init(&fence_drv->fence_list_lock);
100
101 fence_drv->adev = adev;
102 fence_drv->context = dma_fence_context_alloc(1);
103 get_task_comm(fence_drv->timeline_name, current);
104
105 xa_lock_irqsave(&adev->userq_xa, flags);
106 r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
107 fence_drv, GFP_KERNEL));
108 xa_unlock_irqrestore(&adev->userq_xa, flags);
109 if (r)
110 goto free_seq64;
111
112 userq->fence_drv = fence_drv;
113
114 return 0;
115
116 free_seq64:
117 amdgpu_seq64_free(adev, fence_drv->va);
118 free_fence_drv:
119 kfree(fence_drv);
120
121 return r;
122 }
123
amdgpu_userq_walk_and_drop_fence_drv(struct xarray * xa)124 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
125 {
126 struct amdgpu_userq_fence_driver *fence_drv;
127 unsigned long index;
128
129 if (xa_empty(xa))
130 return;
131
132 xa_lock(xa);
133 xa_for_each(xa, index, fence_drv) {
134 __xa_erase(xa, index);
135 amdgpu_userq_fence_driver_put(fence_drv);
136 }
137
138 xa_unlock(xa);
139 }
140
141 void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue * userq)142 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
143 {
144 dma_fence_put(userq->last_fence);
145
146 amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
147 xa_destroy(&userq->fence_drv_xa);
148 /* Drop the fence_drv reference held by user queue */
149 amdgpu_userq_fence_driver_put(userq->fence_drv);
150 }
151
amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver * fence_drv)152 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
153 {
154 struct amdgpu_userq_fence *userq_fence, *tmp;
155 struct dma_fence *fence;
156 unsigned long flags;
157 u64 rptr;
158 int i;
159
160 if (!fence_drv)
161 return;
162
163 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
164 rptr = amdgpu_userq_fence_read(fence_drv);
165
166 list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
167 fence = &userq_fence->base;
168
169 if (rptr < fence->seqno)
170 break;
171
172 dma_fence_signal(fence);
173
174 for (i = 0; i < userq_fence->fence_drv_array_count; i++)
175 amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
176
177 list_del(&userq_fence->link);
178 dma_fence_put(fence);
179 }
180 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
181 }
182
amdgpu_userq_fence_driver_destroy(struct kref * ref)183 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
184 {
185 struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
186 struct amdgpu_userq_fence_driver,
187 refcount);
188 struct amdgpu_userq_fence_driver *xa_fence_drv;
189 struct amdgpu_device *adev = fence_drv->adev;
190 struct amdgpu_userq_fence *fence, *tmp;
191 struct xarray *xa = &adev->userq_xa;
192 unsigned long index, flags;
193 struct dma_fence *f;
194
195 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
196 list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
197 f = &fence->base;
198
199 if (!dma_fence_is_signaled(f)) {
200 dma_fence_set_error(f, -ECANCELED);
201 dma_fence_signal(f);
202 }
203
204 list_del(&fence->link);
205 dma_fence_put(f);
206 }
207 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
208
209 xa_lock_irqsave(xa, flags);
210 xa_for_each(xa, index, xa_fence_drv)
211 if (xa_fence_drv == fence_drv)
212 __xa_erase(xa, index);
213 xa_unlock_irqrestore(xa, flags);
214
215 /* Free seq64 memory */
216 amdgpu_seq64_free(adev, fence_drv->va);
217 kfree(fence_drv);
218 }
219
amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver * fence_drv)220 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
221 {
222 kref_get(&fence_drv->refcount);
223 }
224
amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver * fence_drv)225 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
226 {
227 kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
228 }
229
amdgpu_userq_fence_alloc(struct amdgpu_userq_fence ** userq_fence)230 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
231 {
232 *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
233 return *userq_fence ? 0 : -ENOMEM;
234 }
235
amdgpu_userq_fence_create(struct amdgpu_usermode_queue * userq,struct amdgpu_userq_fence * userq_fence,u64 seq,struct dma_fence ** f)236 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
237 struct amdgpu_userq_fence *userq_fence,
238 u64 seq, struct dma_fence **f)
239 {
240 struct amdgpu_userq_fence_driver *fence_drv;
241 struct dma_fence *fence;
242 unsigned long flags;
243
244 fence_drv = userq->fence_drv;
245 if (!fence_drv)
246 return -EINVAL;
247
248 spin_lock_init(&userq_fence->lock);
249 INIT_LIST_HEAD(&userq_fence->link);
250 fence = &userq_fence->base;
251 userq_fence->fence_drv = fence_drv;
252
253 dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
254 fence_drv->context, seq);
255
256 amdgpu_userq_fence_driver_get(fence_drv);
257 dma_fence_get(fence);
258
259 if (!xa_empty(&userq->fence_drv_xa)) {
260 struct amdgpu_userq_fence_driver *stored_fence_drv;
261 unsigned long index, count = 0;
262 int i = 0;
263
264 xa_lock(&userq->fence_drv_xa);
265 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
266 count++;
267
268 userq_fence->fence_drv_array =
269 kvmalloc_array(count,
270 sizeof(struct amdgpu_userq_fence_driver *),
271 GFP_ATOMIC);
272
273 if (userq_fence->fence_drv_array) {
274 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
275 userq_fence->fence_drv_array[i] = stored_fence_drv;
276 __xa_erase(&userq->fence_drv_xa, index);
277 i++;
278 }
279 }
280
281 userq_fence->fence_drv_array_count = i;
282 xa_unlock(&userq->fence_drv_xa);
283 } else {
284 userq_fence->fence_drv_array = NULL;
285 userq_fence->fence_drv_array_count = 0;
286 }
287
288 /* Check if hardware has already processed the job */
289 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
290 if (!dma_fence_is_signaled(fence))
291 list_add_tail(&userq_fence->link, &fence_drv->fences);
292 else
293 dma_fence_put(fence);
294
295 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
296
297 *f = fence;
298
299 return 0;
300 }
301
amdgpu_userq_fence_get_driver_name(struct dma_fence * f)302 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
303 {
304 return "amdgpu_userq_fence";
305 }
306
amdgpu_userq_fence_get_timeline_name(struct dma_fence * f)307 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
308 {
309 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
310
311 return fence->fence_drv->timeline_name;
312 }
313
amdgpu_userq_fence_signaled(struct dma_fence * f)314 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
315 {
316 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
317 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
318 u64 rptr, wptr;
319
320 rptr = amdgpu_userq_fence_read(fence_drv);
321 wptr = fence->base.seqno;
322
323 if (rptr >= wptr)
324 return true;
325
326 return false;
327 }
328
amdgpu_userq_fence_free(struct rcu_head * rcu)329 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
330 {
331 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
332 struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
333 struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
334
335 /* Release the fence driver reference */
336 amdgpu_userq_fence_driver_put(fence_drv);
337
338 kvfree(userq_fence->fence_drv_array);
339 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
340 }
341
amdgpu_userq_fence_release(struct dma_fence * f)342 static void amdgpu_userq_fence_release(struct dma_fence *f)
343 {
344 call_rcu(&f->rcu, amdgpu_userq_fence_free);
345 }
346
347 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
348 .get_driver_name = amdgpu_userq_fence_get_driver_name,
349 .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
350 .signaled = amdgpu_userq_fence_signaled,
351 .release = amdgpu_userq_fence_release,
352 };
353
354 /**
355 * amdgpu_userq_fence_read_wptr - Read the userq wptr value
356 *
357 * @queue: user mode queue structure pointer
358 * @wptr: write pointer value
359 *
360 * Read the wptr value from userq's MQD. The userq signal IOCTL
361 * creates a dma_fence for the shared buffers that expects the
362 * RPTR value written to seq64 memory >= WPTR.
363 *
364 * Returns wptr value on success, error on failure.
365 */
amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue * queue,u64 * wptr)366 static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
367 u64 *wptr)
368 {
369 struct amdgpu_bo_va_mapping *mapping;
370 struct amdgpu_bo *bo;
371 u64 addr, *ptr;
372 int r;
373
374 r = amdgpu_bo_reserve(queue->vm->root.bo, false);
375 if (r)
376 return r;
377
378 addr = queue->userq_prop->wptr_gpu_addr;
379 addr &= AMDGPU_GMC_HOLE_MASK;
380
381 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
382 if (!mapping) {
383 amdgpu_bo_unreserve(queue->vm->root.bo);
384 DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
385 return -EINVAL;
386 }
387
388 bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
389 amdgpu_bo_unreserve(queue->vm->root.bo);
390 r = amdgpu_bo_reserve(bo, true);
391 if (r) {
392 amdgpu_bo_unref(&bo);
393 DRM_ERROR("Failed to reserve userqueue wptr bo");
394 return r;
395 }
396
397 r = amdgpu_bo_kmap(bo, (void **)&ptr);
398 if (r) {
399 DRM_ERROR("Failed mapping the userqueue wptr bo");
400 goto map_error;
401 }
402
403 *wptr = le64_to_cpu(*ptr);
404
405 amdgpu_bo_kunmap(bo);
406 amdgpu_bo_unreserve(bo);
407 amdgpu_bo_unref(&bo);
408
409 return 0;
410
411 map_error:
412 amdgpu_bo_unreserve(bo);
413 amdgpu_bo_unref(&bo);
414
415 return r;
416 }
417
amdgpu_userq_fence_cleanup(struct dma_fence * fence)418 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
419 {
420 dma_fence_put(fence);
421 }
422
423 static void
amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence * fence,int error)424 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
425 int error)
426 {
427 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
428 unsigned long flags;
429 struct dma_fence *f;
430
431 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
432
433 f = rcu_dereference_protected(&fence->base,
434 lockdep_is_held(&fence_drv->fence_list_lock));
435 if (f && !dma_fence_is_signaled_locked(f))
436 dma_fence_set_error(f, error);
437 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
438 }
439
440 void
amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue * userq)441 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
442 {
443 struct dma_fence *f = userq->last_fence;
444
445 if (f) {
446 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
447 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
448 u64 wptr = fence->base.seqno;
449
450 amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
451 amdgpu_userq_fence_write(fence_drv, wptr);
452 amdgpu_userq_fence_driver_process(fence_drv);
453
454 }
455 }
456
amdgpu_userq_signal_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)457 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
458 struct drm_file *filp)
459 {
460 struct amdgpu_fpriv *fpriv = filp->driver_priv;
461 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
462 struct drm_amdgpu_userq_signal *args = data;
463 struct drm_gem_object **gobj_write = NULL;
464 struct drm_gem_object **gobj_read = NULL;
465 struct amdgpu_usermode_queue *queue;
466 struct amdgpu_userq_fence *userq_fence;
467 struct drm_syncobj **syncobj = NULL;
468 u32 *bo_handles_write, num_write_bo_handles;
469 u32 *syncobj_handles, num_syncobj_handles;
470 u32 *bo_handles_read, num_read_bo_handles;
471 int r, i, entry, rentry, wentry;
472 struct dma_fence *fence;
473 struct drm_exec exec;
474 u64 wptr;
475
476 if (!amdgpu_userq_enabled(dev))
477 return -ENOTSUPP;
478
479 num_syncobj_handles = args->num_syncobj_handles;
480 syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
481 size_mul(sizeof(u32), num_syncobj_handles));
482 if (IS_ERR(syncobj_handles))
483 return PTR_ERR(syncobj_handles);
484
485 /* Array of pointers to the looked up syncobjs */
486 syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
487 if (!syncobj) {
488 r = -ENOMEM;
489 goto free_syncobj_handles;
490 }
491
492 for (entry = 0; entry < num_syncobj_handles; entry++) {
493 syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
494 if (!syncobj[entry]) {
495 r = -ENOENT;
496 goto free_syncobj;
497 }
498 }
499
500 num_read_bo_handles = args->num_bo_read_handles;
501 bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
502 sizeof(u32) * num_read_bo_handles);
503 if (IS_ERR(bo_handles_read)) {
504 r = PTR_ERR(bo_handles_read);
505 goto free_syncobj;
506 }
507
508 /* Array of pointers to the GEM read objects */
509 gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
510 if (!gobj_read) {
511 r = -ENOMEM;
512 goto free_bo_handles_read;
513 }
514
515 for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
516 gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
517 if (!gobj_read[rentry]) {
518 r = -ENOENT;
519 goto put_gobj_read;
520 }
521 }
522
523 num_write_bo_handles = args->num_bo_write_handles;
524 bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
525 sizeof(u32) * num_write_bo_handles);
526 if (IS_ERR(bo_handles_write)) {
527 r = PTR_ERR(bo_handles_write);
528 goto put_gobj_read;
529 }
530
531 /* Array of pointers to the GEM write objects */
532 gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
533 if (!gobj_write) {
534 r = -ENOMEM;
535 goto free_bo_handles_write;
536 }
537
538 for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
539 gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
540 if (!gobj_write[wentry]) {
541 r = -ENOENT;
542 goto put_gobj_write;
543 }
544 }
545
546 /* Retrieve the user queue */
547 queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
548 if (!queue) {
549 r = -ENOENT;
550 goto put_gobj_write;
551 }
552
553 r = amdgpu_userq_fence_read_wptr(queue, &wptr);
554 if (r)
555 goto put_gobj_write;
556
557 r = amdgpu_userq_fence_alloc(&userq_fence);
558 if (r)
559 goto put_gobj_write;
560
561 /* We are here means UQ is active, make sure the eviction fence is valid */
562 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
563
564 /* Create a new fence */
565 r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
566 if (r) {
567 mutex_unlock(&userq_mgr->userq_mutex);
568 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
569 goto put_gobj_write;
570 }
571
572 dma_fence_put(queue->last_fence);
573 queue->last_fence = dma_fence_get(fence);
574 mutex_unlock(&userq_mgr->userq_mutex);
575
576 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
577 (num_read_bo_handles + num_write_bo_handles));
578
579 /* Lock all BOs with retry handling */
580 drm_exec_until_all_locked(&exec) {
581 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
582 drm_exec_retry_on_contention(&exec);
583 if (r) {
584 amdgpu_userq_fence_cleanup(fence);
585 goto exec_fini;
586 }
587
588 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
589 drm_exec_retry_on_contention(&exec);
590 if (r) {
591 amdgpu_userq_fence_cleanup(fence);
592 goto exec_fini;
593 }
594 }
595
596 for (i = 0; i < num_read_bo_handles; i++) {
597 if (!gobj_read || !gobj_read[i]->resv)
598 continue;
599
600 dma_resv_add_fence(gobj_read[i]->resv, fence,
601 DMA_RESV_USAGE_READ);
602 }
603
604 for (i = 0; i < num_write_bo_handles; i++) {
605 if (!gobj_write || !gobj_write[i]->resv)
606 continue;
607
608 dma_resv_add_fence(gobj_write[i]->resv, fence,
609 DMA_RESV_USAGE_WRITE);
610 }
611
612 /* Add the created fence to syncobj/BO's */
613 for (i = 0; i < num_syncobj_handles; i++)
614 drm_syncobj_replace_fence(syncobj[i], fence);
615
616 /* drop the reference acquired in fence creation function */
617 dma_fence_put(fence);
618
619 exec_fini:
620 drm_exec_fini(&exec);
621 put_gobj_write:
622 while (wentry-- > 0)
623 drm_gem_object_put(gobj_write[wentry]);
624 kfree(gobj_write);
625 free_bo_handles_write:
626 kfree(bo_handles_write);
627 put_gobj_read:
628 while (rentry-- > 0)
629 drm_gem_object_put(gobj_read[rentry]);
630 kfree(gobj_read);
631 free_bo_handles_read:
632 kfree(bo_handles_read);
633 free_syncobj:
634 while (entry-- > 0)
635 if (syncobj[entry])
636 drm_syncobj_put(syncobj[entry]);
637 kfree(syncobj);
638 free_syncobj_handles:
639 kfree(syncobj_handles);
640
641 return r;
642 }
643
amdgpu_userq_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)644 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
645 struct drm_file *filp)
646 {
647 u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
648 u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
649 struct drm_amdgpu_userq_fence_info *fence_info = NULL;
650 struct drm_amdgpu_userq_wait *wait_info = data;
651 struct amdgpu_fpriv *fpriv = filp->driver_priv;
652 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
653 struct amdgpu_usermode_queue *waitq;
654 struct drm_gem_object **gobj_write;
655 struct drm_gem_object **gobj_read;
656 struct dma_fence **fences = NULL;
657 u16 num_points, num_fences = 0;
658 int r, i, rentry, wentry, cnt;
659 struct drm_exec exec;
660
661 if (!amdgpu_userq_enabled(dev))
662 return -ENOTSUPP;
663
664 num_read_bo_handles = wait_info->num_bo_read_handles;
665 bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
666 size_mul(sizeof(u32), num_read_bo_handles));
667 if (IS_ERR(bo_handles_read))
668 return PTR_ERR(bo_handles_read);
669
670 num_write_bo_handles = wait_info->num_bo_write_handles;
671 bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
672 size_mul(sizeof(u32), num_write_bo_handles));
673 if (IS_ERR(bo_handles_write)) {
674 r = PTR_ERR(bo_handles_write);
675 goto free_bo_handles_read;
676 }
677
678 num_syncobj = wait_info->num_syncobj_handles;
679 syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
680 size_mul(sizeof(u32), num_syncobj));
681 if (IS_ERR(syncobj_handles)) {
682 r = PTR_ERR(syncobj_handles);
683 goto free_bo_handles_write;
684 }
685
686 num_points = wait_info->num_syncobj_timeline_handles;
687 timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
688 sizeof(u32) * num_points);
689 if (IS_ERR(timeline_handles)) {
690 r = PTR_ERR(timeline_handles);
691 goto free_syncobj_handles;
692 }
693
694 timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
695 sizeof(u32) * num_points);
696 if (IS_ERR(timeline_points)) {
697 r = PTR_ERR(timeline_points);
698 goto free_timeline_handles;
699 }
700
701 gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
702 if (!gobj_read) {
703 r = -ENOMEM;
704 goto free_timeline_points;
705 }
706
707 for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
708 gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
709 if (!gobj_read[rentry]) {
710 r = -ENOENT;
711 goto put_gobj_read;
712 }
713 }
714
715 gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
716 if (!gobj_write) {
717 r = -ENOMEM;
718 goto put_gobj_read;
719 }
720
721 for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
722 gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
723 if (!gobj_write[wentry]) {
724 r = -ENOENT;
725 goto put_gobj_write;
726 }
727 }
728
729 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
730 (num_read_bo_handles + num_write_bo_handles));
731
732 /* Lock all BOs with retry handling */
733 drm_exec_until_all_locked(&exec) {
734 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
735 drm_exec_retry_on_contention(&exec);
736 if (r) {
737 drm_exec_fini(&exec);
738 goto put_gobj_write;
739 }
740
741 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
742 drm_exec_retry_on_contention(&exec);
743 if (r) {
744 drm_exec_fini(&exec);
745 goto put_gobj_write;
746 }
747 }
748
749 if (!wait_info->num_fences) {
750 if (num_points) {
751 struct dma_fence_unwrap iter;
752 struct dma_fence *fence;
753 struct dma_fence *f;
754
755 for (i = 0; i < num_points; i++) {
756 r = drm_syncobj_find_fence(filp, timeline_handles[i],
757 timeline_points[i],
758 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
759 &fence);
760 if (r)
761 goto exec_fini;
762
763 dma_fence_unwrap_for_each(f, &iter, fence)
764 num_fences++;
765
766 dma_fence_put(fence);
767 }
768 }
769
770 /* Count syncobj's fence */
771 for (i = 0; i < num_syncobj; i++) {
772 struct dma_fence *fence;
773
774 r = drm_syncobj_find_fence(filp, syncobj_handles[i],
775 0,
776 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
777 &fence);
778 if (r)
779 goto exec_fini;
780
781 num_fences++;
782 dma_fence_put(fence);
783 }
784
785 /* Count GEM objects fence */
786 for (i = 0; i < num_read_bo_handles; i++) {
787 struct dma_resv_iter resv_cursor;
788 struct dma_fence *fence;
789
790 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
791 DMA_RESV_USAGE_READ, fence)
792 num_fences++;
793 }
794
795 for (i = 0; i < num_write_bo_handles; i++) {
796 struct dma_resv_iter resv_cursor;
797 struct dma_fence *fence;
798
799 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
800 DMA_RESV_USAGE_WRITE, fence)
801 num_fences++;
802 }
803
804 /*
805 * Passing num_fences = 0 means that userspace doesn't want to
806 * retrieve userq_fence_info. If num_fences = 0 we skip filling
807 * userq_fence_info and return the actual number of fences on
808 * args->num_fences.
809 */
810 wait_info->num_fences = num_fences;
811 } else {
812 /* Array of fence info */
813 fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
814 if (!fence_info) {
815 r = -ENOMEM;
816 goto exec_fini;
817 }
818
819 /* Array of fences */
820 fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
821 if (!fences) {
822 r = -ENOMEM;
823 goto free_fence_info;
824 }
825
826 /* Retrieve GEM read objects fence */
827 for (i = 0; i < num_read_bo_handles; i++) {
828 struct dma_resv_iter resv_cursor;
829 struct dma_fence *fence;
830
831 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
832 DMA_RESV_USAGE_READ, fence) {
833 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
834 r = -EINVAL;
835 goto free_fences;
836 }
837
838 fences[num_fences++] = fence;
839 dma_fence_get(fence);
840 }
841 }
842
843 /* Retrieve GEM write objects fence */
844 for (i = 0; i < num_write_bo_handles; i++) {
845 struct dma_resv_iter resv_cursor;
846 struct dma_fence *fence;
847
848 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
849 DMA_RESV_USAGE_WRITE, fence) {
850 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
851 r = -EINVAL;
852 goto free_fences;
853 }
854
855 fences[num_fences++] = fence;
856 dma_fence_get(fence);
857 }
858 }
859
860 if (num_points) {
861 struct dma_fence_unwrap iter;
862 struct dma_fence *fence;
863 struct dma_fence *f;
864
865 for (i = 0; i < num_points; i++) {
866 r = drm_syncobj_find_fence(filp, timeline_handles[i],
867 timeline_points[i],
868 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
869 &fence);
870 if (r)
871 goto free_fences;
872
873 dma_fence_unwrap_for_each(f, &iter, fence) {
874 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
875 r = -EINVAL;
876 goto free_fences;
877 }
878
879 dma_fence_get(f);
880 fences[num_fences++] = f;
881 }
882
883 dma_fence_put(fence);
884 }
885 }
886
887 /* Retrieve syncobj's fence */
888 for (i = 0; i < num_syncobj; i++) {
889 struct dma_fence *fence;
890
891 r = drm_syncobj_find_fence(filp, syncobj_handles[i],
892 0,
893 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
894 &fence);
895 if (r)
896 goto free_fences;
897
898 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
899 r = -EINVAL;
900 goto free_fences;
901 }
902
903 fences[num_fences++] = fence;
904 }
905
906 /*
907 * Keep only the latest fences to reduce the number of values
908 * given back to userspace.
909 */
910 num_fences = dma_fence_dedup_array(fences, num_fences);
911
912 waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
913 if (!waitq) {
914 r = -EINVAL;
915 goto free_fences;
916 }
917
918 for (i = 0, cnt = 0; i < num_fences; i++) {
919 struct amdgpu_userq_fence_driver *fence_drv;
920 struct amdgpu_userq_fence *userq_fence;
921 u32 index;
922
923 userq_fence = to_amdgpu_userq_fence(fences[i]);
924 if (!userq_fence) {
925 /*
926 * Just waiting on other driver fences should
927 * be good for now
928 */
929 r = dma_fence_wait(fences[i], true);
930 if (r) {
931 dma_fence_put(fences[i]);
932 goto free_fences;
933 }
934
935 dma_fence_put(fences[i]);
936 continue;
937 }
938
939 fence_drv = userq_fence->fence_drv;
940 /*
941 * We need to make sure the user queue release their reference
942 * to the fence drivers at some point before queue destruction.
943 * Otherwise, we would gather those references until we don't
944 * have any more space left and crash.
945 */
946 r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
947 xa_limit_32b, GFP_KERNEL);
948 if (r)
949 goto free_fences;
950
951 amdgpu_userq_fence_driver_get(fence_drv);
952
953 /* Store drm syncobj's gpu va address and value */
954 fence_info[cnt].va = fence_drv->va;
955 fence_info[cnt].value = fences[i]->seqno;
956
957 dma_fence_put(fences[i]);
958 /* Increment the actual userq fence count */
959 cnt++;
960 }
961
962 wait_info->num_fences = cnt;
963 /* Copy userq fence info to user space */
964 if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
965 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
966 r = -EFAULT;
967 goto free_fences;
968 }
969
970 kfree(fences);
971 kfree(fence_info);
972 }
973
974 drm_exec_fini(&exec);
975 for (i = 0; i < num_read_bo_handles; i++)
976 drm_gem_object_put(gobj_read[i]);
977 kfree(gobj_read);
978
979 for (i = 0; i < num_write_bo_handles; i++)
980 drm_gem_object_put(gobj_write[i]);
981 kfree(gobj_write);
982
983 kfree(timeline_points);
984 kfree(timeline_handles);
985 kfree(syncobj_handles);
986 kfree(bo_handles_write);
987 kfree(bo_handles_read);
988
989 return 0;
990
991 free_fences:
992 while (num_fences-- > 0)
993 dma_fence_put(fences[num_fences]);
994 kfree(fences);
995 free_fence_info:
996 kfree(fence_info);
997 exec_fini:
998 drm_exec_fini(&exec);
999 put_gobj_write:
1000 while (wentry-- > 0)
1001 drm_gem_object_put(gobj_write[wentry]);
1002 kfree(gobj_write);
1003 put_gobj_read:
1004 while (rentry-- > 0)
1005 drm_gem_object_put(gobj_read[rentry]);
1006 kfree(gobj_read);
1007 free_timeline_points:
1008 kfree(timeline_points);
1009 free_timeline_handles:
1010 kfree(timeline_handles);
1011 free_syncobj_handles:
1012 kfree(syncobj_handles);
1013 free_bo_handles_write:
1014 kfree(bo_handles_write);
1015 free_bo_handles_read:
1016 kfree(bo_handles_read);
1017
1018 return r;
1019 }
1020