1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37
amdgpu_userq_fence_slab_init(void)38 int amdgpu_userq_fence_slab_init(void)
39 {
40 amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
41 sizeof(struct amdgpu_userq_fence),
42 0,
43 SLAB_HWCACHE_ALIGN,
44 NULL);
45 if (!amdgpu_userq_fence_slab)
46 return -ENOMEM;
47
48 return 0;
49 }
50
amdgpu_userq_fence_slab_fini(void)51 void amdgpu_userq_fence_slab_fini(void)
52 {
53 rcu_barrier();
54 kmem_cache_destroy(amdgpu_userq_fence_slab);
55 }
56
to_amdgpu_userq_fence(struct dma_fence * f)57 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
58 {
59 if (!f || f->ops != &amdgpu_userq_fence_ops)
60 return NULL;
61
62 return container_of(f, struct amdgpu_userq_fence, base);
63 }
64
amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver * fence_drv)65 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
66 {
67 return le64_to_cpu(*fence_drv->cpu_addr);
68 }
69
70 static void
amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver * fence_drv,u64 seq)71 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
72 u64 seq)
73 {
74 if (fence_drv->cpu_addr)
75 *fence_drv->cpu_addr = cpu_to_le64(seq);
76 }
77
amdgpu_userq_fence_driver_alloc(struct amdgpu_device * adev,struct amdgpu_usermode_queue * userq)78 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
79 struct amdgpu_usermode_queue *userq)
80 {
81 struct amdgpu_userq_fence_driver *fence_drv;
82 unsigned long flags;
83 int r;
84
85 fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
86 if (!fence_drv)
87 return -ENOMEM;
88
89 /* Acquire seq64 memory */
90 r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
91 &fence_drv->cpu_addr);
92 if (r)
93 goto free_fence_drv;
94
95 memset(fence_drv->cpu_addr, 0, sizeof(u64));
96
97 kref_init(&fence_drv->refcount);
98 INIT_LIST_HEAD(&fence_drv->fences);
99 spin_lock_init(&fence_drv->fence_list_lock);
100
101 fence_drv->adev = adev;
102 fence_drv->context = dma_fence_context_alloc(1);
103 get_task_comm(fence_drv->timeline_name, current);
104
105 xa_lock_irqsave(&adev->userq_xa, flags);
106 r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
107 fence_drv, GFP_KERNEL));
108 xa_unlock_irqrestore(&adev->userq_xa, flags);
109 if (r)
110 goto free_seq64;
111
112 userq->fence_drv = fence_drv;
113
114 return 0;
115
116 free_seq64:
117 amdgpu_seq64_free(adev, fence_drv->va);
118 free_fence_drv:
119 kfree(fence_drv);
120
121 return r;
122 }
123
amdgpu_userq_walk_and_drop_fence_drv(struct xarray * xa)124 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
125 {
126 struct amdgpu_userq_fence_driver *fence_drv;
127 unsigned long index;
128
129 if (xa_empty(xa))
130 return;
131
132 xa_lock(xa);
133 xa_for_each(xa, index, fence_drv) {
134 __xa_erase(xa, index);
135 amdgpu_userq_fence_driver_put(fence_drv);
136 }
137
138 xa_unlock(xa);
139 }
140
141 void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue * userq)142 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
143 {
144 amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
145 xa_destroy(&userq->fence_drv_xa);
146 /* Drop the fence_drv reference held by user queue */
147 amdgpu_userq_fence_driver_put(userq->fence_drv);
148 }
149
amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver * fence_drv)150 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
151 {
152 struct amdgpu_userq_fence *userq_fence, *tmp;
153 struct dma_fence *fence;
154 unsigned long flags;
155 u64 rptr;
156 int i;
157
158 if (!fence_drv)
159 return;
160
161 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
162 rptr = amdgpu_userq_fence_read(fence_drv);
163
164 list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
165 fence = &userq_fence->base;
166
167 if (rptr < fence->seqno)
168 break;
169
170 dma_fence_signal(fence);
171
172 for (i = 0; i < userq_fence->fence_drv_array_count; i++)
173 amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
174
175 list_del(&userq_fence->link);
176 dma_fence_put(fence);
177 }
178 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
179 }
180
amdgpu_userq_fence_driver_destroy(struct kref * ref)181 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
182 {
183 struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
184 struct amdgpu_userq_fence_driver,
185 refcount);
186 struct amdgpu_userq_fence_driver *xa_fence_drv;
187 struct amdgpu_device *adev = fence_drv->adev;
188 struct amdgpu_userq_fence *fence, *tmp;
189 struct xarray *xa = &adev->userq_xa;
190 unsigned long index, flags;
191 struct dma_fence *f;
192
193 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
194 list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
195 f = &fence->base;
196
197 if (!dma_fence_is_signaled(f)) {
198 dma_fence_set_error(f, -ECANCELED);
199 dma_fence_signal(f);
200 }
201
202 list_del(&fence->link);
203 dma_fence_put(f);
204 }
205 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
206
207 xa_lock_irqsave(xa, flags);
208 xa_for_each(xa, index, xa_fence_drv)
209 if (xa_fence_drv == fence_drv)
210 __xa_erase(xa, index);
211 xa_unlock_irqrestore(xa, flags);
212
213 /* Free seq64 memory */
214 amdgpu_seq64_free(adev, fence_drv->va);
215 kfree(fence_drv);
216 }
217
amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver * fence_drv)218 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
219 {
220 kref_get(&fence_drv->refcount);
221 }
222
amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver * fence_drv)223 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
224 {
225 kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
226 }
227
amdgpu_userq_fence_alloc(struct amdgpu_userq_fence ** userq_fence)228 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
229 {
230 *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
231 return *userq_fence ? 0 : -ENOMEM;
232 }
233
amdgpu_userq_fence_create(struct amdgpu_usermode_queue * userq,struct amdgpu_userq_fence * userq_fence,u64 seq,struct dma_fence ** f)234 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
235 struct amdgpu_userq_fence *userq_fence,
236 u64 seq, struct dma_fence **f)
237 {
238 struct amdgpu_userq_fence_driver *fence_drv;
239 struct dma_fence *fence;
240 unsigned long flags;
241
242 fence_drv = userq->fence_drv;
243 if (!fence_drv)
244 return -EINVAL;
245
246 spin_lock_init(&userq_fence->lock);
247 INIT_LIST_HEAD(&userq_fence->link);
248 fence = &userq_fence->base;
249 userq_fence->fence_drv = fence_drv;
250
251 dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
252 fence_drv->context, seq);
253
254 amdgpu_userq_fence_driver_get(fence_drv);
255 dma_fence_get(fence);
256
257 if (!xa_empty(&userq->fence_drv_xa)) {
258 struct amdgpu_userq_fence_driver *stored_fence_drv;
259 unsigned long index, count = 0;
260 int i = 0;
261
262 xa_lock(&userq->fence_drv_xa);
263 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
264 count++;
265
266 userq_fence->fence_drv_array =
267 kvmalloc_array(count,
268 sizeof(struct amdgpu_userq_fence_driver *),
269 GFP_ATOMIC);
270
271 if (userq_fence->fence_drv_array) {
272 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
273 userq_fence->fence_drv_array[i] = stored_fence_drv;
274 __xa_erase(&userq->fence_drv_xa, index);
275 i++;
276 }
277 }
278
279 userq_fence->fence_drv_array_count = i;
280 xa_unlock(&userq->fence_drv_xa);
281 } else {
282 userq_fence->fence_drv_array = NULL;
283 userq_fence->fence_drv_array_count = 0;
284 }
285
286 /* Check if hardware has already processed the job */
287 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
288 if (!dma_fence_is_signaled(fence))
289 list_add_tail(&userq_fence->link, &fence_drv->fences);
290 else
291 dma_fence_put(fence);
292
293 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
294
295 *f = fence;
296
297 return 0;
298 }
299
amdgpu_userq_fence_get_driver_name(struct dma_fence * f)300 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
301 {
302 return "amdgpu_userq_fence";
303 }
304
amdgpu_userq_fence_get_timeline_name(struct dma_fence * f)305 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
306 {
307 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
308
309 return fence->fence_drv->timeline_name;
310 }
311
amdgpu_userq_fence_signaled(struct dma_fence * f)312 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
313 {
314 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
315 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
316 u64 rptr, wptr;
317
318 rptr = amdgpu_userq_fence_read(fence_drv);
319 wptr = fence->base.seqno;
320
321 if (rptr >= wptr)
322 return true;
323
324 return false;
325 }
326
amdgpu_userq_fence_free(struct rcu_head * rcu)327 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
328 {
329 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
330 struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
331 struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
332
333 /* Release the fence driver reference */
334 amdgpu_userq_fence_driver_put(fence_drv);
335
336 kvfree(userq_fence->fence_drv_array);
337 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
338 }
339
amdgpu_userq_fence_release(struct dma_fence * f)340 static void amdgpu_userq_fence_release(struct dma_fence *f)
341 {
342 call_rcu(&f->rcu, amdgpu_userq_fence_free);
343 }
344
345 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
346 .get_driver_name = amdgpu_userq_fence_get_driver_name,
347 .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
348 .signaled = amdgpu_userq_fence_signaled,
349 .release = amdgpu_userq_fence_release,
350 };
351
352 /**
353 * amdgpu_userq_fence_read_wptr - Read the userq wptr value
354 *
355 * @queue: user mode queue structure pointer
356 * @wptr: write pointer value
357 *
358 * Read the wptr value from userq's MQD. The userq signal IOCTL
359 * creates a dma_fence for the shared buffers that expects the
360 * RPTR value written to seq64 memory >= WPTR.
361 *
362 * Returns wptr value on success, error on failure.
363 */
amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue * queue,u64 * wptr)364 static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
365 u64 *wptr)
366 {
367 struct amdgpu_bo_va_mapping *mapping;
368 struct amdgpu_bo *bo;
369 u64 addr, *ptr;
370 int r;
371
372 r = amdgpu_bo_reserve(queue->vm->root.bo, false);
373 if (r)
374 return r;
375
376 addr = queue->userq_prop->wptr_gpu_addr;
377 addr &= AMDGPU_GMC_HOLE_MASK;
378
379 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
380 if (!mapping) {
381 amdgpu_bo_unreserve(queue->vm->root.bo);
382 DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
383 return -EINVAL;
384 }
385
386 bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
387 amdgpu_bo_unreserve(queue->vm->root.bo);
388 r = amdgpu_bo_reserve(bo, true);
389 if (r) {
390 DRM_ERROR("Failed to reserve userqueue wptr bo");
391 return r;
392 }
393
394 r = amdgpu_bo_kmap(bo, (void **)&ptr);
395 if (r) {
396 DRM_ERROR("Failed mapping the userqueue wptr bo");
397 goto map_error;
398 }
399
400 *wptr = le64_to_cpu(*ptr);
401
402 amdgpu_bo_kunmap(bo);
403 amdgpu_bo_unreserve(bo);
404 amdgpu_bo_unref(&bo);
405
406 return 0;
407
408 map_error:
409 amdgpu_bo_unreserve(bo);
410 amdgpu_bo_unref(&bo);
411
412 return r;
413 }
414
amdgpu_userq_fence_cleanup(struct dma_fence * fence)415 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
416 {
417 dma_fence_put(fence);
418 }
419
420 static void
amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence * fence,int error)421 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
422 int error)
423 {
424 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
425 unsigned long flags;
426 struct dma_fence *f;
427
428 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
429
430 f = rcu_dereference_protected(&fence->base,
431 lockdep_is_held(&fence_drv->fence_list_lock));
432 if (f && !dma_fence_is_signaled_locked(f))
433 dma_fence_set_error(f, error);
434 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
435 }
436
437 void
amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue * userq)438 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
439 {
440 struct dma_fence *f = userq->last_fence;
441
442 if (f) {
443 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
444 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
445 u64 wptr = fence->base.seqno;
446
447 amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
448 amdgpu_userq_fence_write(fence_drv, wptr);
449 amdgpu_userq_fence_driver_process(fence_drv);
450
451 }
452 }
453
amdgpu_userq_signal_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)454 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
455 struct drm_file *filp)
456 {
457 struct amdgpu_fpriv *fpriv = filp->driver_priv;
458 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
459 struct drm_amdgpu_userq_signal *args = data;
460 struct drm_gem_object **gobj_write = NULL;
461 struct drm_gem_object **gobj_read = NULL;
462 struct amdgpu_usermode_queue *queue;
463 struct amdgpu_userq_fence *userq_fence;
464 struct drm_syncobj **syncobj = NULL;
465 u32 *bo_handles_write, num_write_bo_handles;
466 u32 *syncobj_handles, num_syncobj_handles;
467 u32 *bo_handles_read, num_read_bo_handles;
468 int r, i, entry, rentry, wentry;
469 struct dma_fence *fence;
470 struct drm_exec exec;
471 u64 wptr;
472
473 num_syncobj_handles = args->num_syncobj_handles;
474 syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
475 size_mul(sizeof(u32), num_syncobj_handles));
476 if (IS_ERR(syncobj_handles))
477 return PTR_ERR(syncobj_handles);
478
479 /* Array of pointers to the looked up syncobjs */
480 syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
481 if (!syncobj) {
482 r = -ENOMEM;
483 goto free_syncobj_handles;
484 }
485
486 for (entry = 0; entry < num_syncobj_handles; entry++) {
487 syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
488 if (!syncobj[entry]) {
489 r = -ENOENT;
490 goto free_syncobj;
491 }
492 }
493
494 num_read_bo_handles = args->num_bo_read_handles;
495 bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
496 sizeof(u32) * num_read_bo_handles);
497 if (IS_ERR(bo_handles_read)) {
498 r = PTR_ERR(bo_handles_read);
499 goto free_syncobj;
500 }
501
502 /* Array of pointers to the GEM read objects */
503 gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
504 if (!gobj_read) {
505 r = -ENOMEM;
506 goto free_bo_handles_read;
507 }
508
509 for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
510 gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
511 if (!gobj_read[rentry]) {
512 r = -ENOENT;
513 goto put_gobj_read;
514 }
515 }
516
517 num_write_bo_handles = args->num_bo_write_handles;
518 bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
519 sizeof(u32) * num_write_bo_handles);
520 if (IS_ERR(bo_handles_write)) {
521 r = PTR_ERR(bo_handles_write);
522 goto put_gobj_read;
523 }
524
525 /* Array of pointers to the GEM write objects */
526 gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
527 if (!gobj_write) {
528 r = -ENOMEM;
529 goto free_bo_handles_write;
530 }
531
532 for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
533 gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
534 if (!gobj_write[wentry]) {
535 r = -ENOENT;
536 goto put_gobj_write;
537 }
538 }
539
540 /* Retrieve the user queue */
541 queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
542 if (!queue) {
543 r = -ENOENT;
544 goto put_gobj_write;
545 }
546
547 r = amdgpu_userq_fence_read_wptr(queue, &wptr);
548 if (r)
549 goto put_gobj_write;
550
551 r = amdgpu_userq_fence_alloc(&userq_fence);
552 if (r)
553 goto put_gobj_write;
554
555 /* We are here means UQ is active, make sure the eviction fence is valid */
556 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
557
558 /* Create a new fence */
559 r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
560 if (r) {
561 mutex_unlock(&userq_mgr->userq_mutex);
562 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
563 goto put_gobj_write;
564 }
565
566 dma_fence_put(queue->last_fence);
567 queue->last_fence = dma_fence_get(fence);
568 mutex_unlock(&userq_mgr->userq_mutex);
569
570 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
571 (num_read_bo_handles + num_write_bo_handles));
572
573 /* Lock all BOs with retry handling */
574 drm_exec_until_all_locked(&exec) {
575 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
576 drm_exec_retry_on_contention(&exec);
577 if (r) {
578 amdgpu_userq_fence_cleanup(fence);
579 goto exec_fini;
580 }
581
582 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
583 drm_exec_retry_on_contention(&exec);
584 if (r) {
585 amdgpu_userq_fence_cleanup(fence);
586 goto exec_fini;
587 }
588 }
589
590 for (i = 0; i < num_read_bo_handles; i++) {
591 if (!gobj_read || !gobj_read[i]->resv)
592 continue;
593
594 dma_resv_add_fence(gobj_read[i]->resv, fence,
595 DMA_RESV_USAGE_READ);
596 }
597
598 for (i = 0; i < num_write_bo_handles; i++) {
599 if (!gobj_write || !gobj_write[i]->resv)
600 continue;
601
602 dma_resv_add_fence(gobj_write[i]->resv, fence,
603 DMA_RESV_USAGE_WRITE);
604 }
605
606 /* Add the created fence to syncobj/BO's */
607 for (i = 0; i < num_syncobj_handles; i++)
608 drm_syncobj_replace_fence(syncobj[i], fence);
609
610 /* drop the reference acquired in fence creation function */
611 dma_fence_put(fence);
612
613 exec_fini:
614 drm_exec_fini(&exec);
615 put_gobj_write:
616 while (wentry-- > 0)
617 drm_gem_object_put(gobj_write[wentry]);
618 kfree(gobj_write);
619 free_bo_handles_write:
620 kfree(bo_handles_write);
621 put_gobj_read:
622 while (rentry-- > 0)
623 drm_gem_object_put(gobj_read[rentry]);
624 kfree(gobj_read);
625 free_bo_handles_read:
626 kfree(bo_handles_read);
627 free_syncobj:
628 while (entry-- > 0)
629 if (syncobj[entry])
630 drm_syncobj_put(syncobj[entry]);
631 kfree(syncobj);
632 free_syncobj_handles:
633 kfree(syncobj_handles);
634
635 return r;
636 }
637
amdgpu_userq_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)638 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
639 struct drm_file *filp)
640 {
641 u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
642 u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
643 struct drm_amdgpu_userq_fence_info *fence_info = NULL;
644 struct drm_amdgpu_userq_wait *wait_info = data;
645 struct amdgpu_fpriv *fpriv = filp->driver_priv;
646 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
647 struct amdgpu_usermode_queue *waitq;
648 struct drm_gem_object **gobj_write;
649 struct drm_gem_object **gobj_read;
650 struct dma_fence **fences = NULL;
651 u16 num_points, num_fences = 0;
652 int r, i, rentry, wentry, cnt;
653 struct drm_exec exec;
654
655 num_read_bo_handles = wait_info->num_bo_read_handles;
656 bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
657 size_mul(sizeof(u32), num_read_bo_handles));
658 if (IS_ERR(bo_handles_read))
659 return PTR_ERR(bo_handles_read);
660
661 num_write_bo_handles = wait_info->num_bo_write_handles;
662 bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
663 size_mul(sizeof(u32), num_write_bo_handles));
664 if (IS_ERR(bo_handles_write)) {
665 r = PTR_ERR(bo_handles_write);
666 goto free_bo_handles_read;
667 }
668
669 num_syncobj = wait_info->num_syncobj_handles;
670 syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
671 size_mul(sizeof(u32), num_syncobj));
672 if (IS_ERR(syncobj_handles)) {
673 r = PTR_ERR(syncobj_handles);
674 goto free_bo_handles_write;
675 }
676
677 num_points = wait_info->num_syncobj_timeline_handles;
678 timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
679 sizeof(u32) * num_points);
680 if (IS_ERR(timeline_handles)) {
681 r = PTR_ERR(timeline_handles);
682 goto free_syncobj_handles;
683 }
684
685 timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
686 sizeof(u32) * num_points);
687 if (IS_ERR(timeline_points)) {
688 r = PTR_ERR(timeline_points);
689 goto free_timeline_handles;
690 }
691
692 gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
693 if (!gobj_read) {
694 r = -ENOMEM;
695 goto free_timeline_points;
696 }
697
698 for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
699 gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
700 if (!gobj_read[rentry]) {
701 r = -ENOENT;
702 goto put_gobj_read;
703 }
704 }
705
706 gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
707 if (!gobj_write) {
708 r = -ENOMEM;
709 goto put_gobj_read;
710 }
711
712 for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
713 gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
714 if (!gobj_write[wentry]) {
715 r = -ENOENT;
716 goto put_gobj_write;
717 }
718 }
719
720 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
721 (num_read_bo_handles + num_write_bo_handles));
722
723 /* Lock all BOs with retry handling */
724 drm_exec_until_all_locked(&exec) {
725 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
726 drm_exec_retry_on_contention(&exec);
727 if (r) {
728 drm_exec_fini(&exec);
729 goto put_gobj_write;
730 }
731
732 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
733 drm_exec_retry_on_contention(&exec);
734 if (r) {
735 drm_exec_fini(&exec);
736 goto put_gobj_write;
737 }
738 }
739
740 if (!wait_info->num_fences) {
741 if (num_points) {
742 struct dma_fence_unwrap iter;
743 struct dma_fence *fence;
744 struct dma_fence *f;
745
746 for (i = 0; i < num_points; i++) {
747 r = drm_syncobj_find_fence(filp, timeline_handles[i],
748 timeline_points[i],
749 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
750 &fence);
751 if (r)
752 goto exec_fini;
753
754 dma_fence_unwrap_for_each(f, &iter, fence)
755 num_fences++;
756
757 dma_fence_put(fence);
758 }
759 }
760
761 /* Count syncobj's fence */
762 for (i = 0; i < num_syncobj; i++) {
763 struct dma_fence *fence;
764
765 r = drm_syncobj_find_fence(filp, syncobj_handles[i],
766 0,
767 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
768 &fence);
769 if (r)
770 goto exec_fini;
771
772 num_fences++;
773 dma_fence_put(fence);
774 }
775
776 /* Count GEM objects fence */
777 for (i = 0; i < num_read_bo_handles; i++) {
778 struct dma_resv_iter resv_cursor;
779 struct dma_fence *fence;
780
781 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
782 DMA_RESV_USAGE_READ, fence)
783 num_fences++;
784 }
785
786 for (i = 0; i < num_write_bo_handles; i++) {
787 struct dma_resv_iter resv_cursor;
788 struct dma_fence *fence;
789
790 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
791 DMA_RESV_USAGE_WRITE, fence)
792 num_fences++;
793 }
794
795 /*
796 * Passing num_fences = 0 means that userspace doesn't want to
797 * retrieve userq_fence_info. If num_fences = 0 we skip filling
798 * userq_fence_info and return the actual number of fences on
799 * args->num_fences.
800 */
801 wait_info->num_fences = num_fences;
802 } else {
803 /* Array of fence info */
804 fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
805 if (!fence_info) {
806 r = -ENOMEM;
807 goto exec_fini;
808 }
809
810 /* Array of fences */
811 fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
812 if (!fences) {
813 r = -ENOMEM;
814 goto free_fence_info;
815 }
816
817 /* Retrieve GEM read objects fence */
818 for (i = 0; i < num_read_bo_handles; i++) {
819 struct dma_resv_iter resv_cursor;
820 struct dma_fence *fence;
821
822 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
823 DMA_RESV_USAGE_READ, fence) {
824 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
825 r = -EINVAL;
826 goto free_fences;
827 }
828
829 fences[num_fences++] = fence;
830 dma_fence_get(fence);
831 }
832 }
833
834 /* Retrieve GEM write objects fence */
835 for (i = 0; i < num_write_bo_handles; i++) {
836 struct dma_resv_iter resv_cursor;
837 struct dma_fence *fence;
838
839 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
840 DMA_RESV_USAGE_WRITE, fence) {
841 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
842 r = -EINVAL;
843 goto free_fences;
844 }
845
846 fences[num_fences++] = fence;
847 dma_fence_get(fence);
848 }
849 }
850
851 if (num_points) {
852 struct dma_fence_unwrap iter;
853 struct dma_fence *fence;
854 struct dma_fence *f;
855
856 for (i = 0; i < num_points; i++) {
857 r = drm_syncobj_find_fence(filp, timeline_handles[i],
858 timeline_points[i],
859 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
860 &fence);
861 if (r)
862 goto free_fences;
863
864 dma_fence_unwrap_for_each(f, &iter, fence) {
865 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
866 r = -EINVAL;
867 goto free_fences;
868 }
869
870 dma_fence_get(f);
871 fences[num_fences++] = f;
872 }
873
874 dma_fence_put(fence);
875 }
876 }
877
878 /* Retrieve syncobj's fence */
879 for (i = 0; i < num_syncobj; i++) {
880 struct dma_fence *fence;
881
882 r = drm_syncobj_find_fence(filp, syncobj_handles[i],
883 0,
884 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
885 &fence);
886 if (r)
887 goto free_fences;
888
889 if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
890 r = -EINVAL;
891 goto free_fences;
892 }
893
894 fences[num_fences++] = fence;
895 }
896
897 /*
898 * Keep only the latest fences to reduce the number of values
899 * given back to userspace.
900 */
901 num_fences = dma_fence_dedup_array(fences, num_fences);
902
903 waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
904 if (!waitq) {
905 r = -EINVAL;
906 goto free_fences;
907 }
908
909 for (i = 0, cnt = 0; i < num_fences; i++) {
910 struct amdgpu_userq_fence_driver *fence_drv;
911 struct amdgpu_userq_fence *userq_fence;
912 u32 index;
913
914 userq_fence = to_amdgpu_userq_fence(fences[i]);
915 if (!userq_fence) {
916 /*
917 * Just waiting on other driver fences should
918 * be good for now
919 */
920 r = dma_fence_wait(fences[i], true);
921 if (r) {
922 dma_fence_put(fences[i]);
923 goto free_fences;
924 }
925
926 dma_fence_put(fences[i]);
927 continue;
928 }
929
930 fence_drv = userq_fence->fence_drv;
931 /*
932 * We need to make sure the user queue release their reference
933 * to the fence drivers at some point before queue destruction.
934 * Otherwise, we would gather those references until we don't
935 * have any more space left and crash.
936 */
937 r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
938 xa_limit_32b, GFP_KERNEL);
939 if (r)
940 goto free_fences;
941
942 amdgpu_userq_fence_driver_get(fence_drv);
943
944 /* Store drm syncobj's gpu va address and value */
945 fence_info[cnt].va = fence_drv->va;
946 fence_info[cnt].value = fences[i]->seqno;
947
948 dma_fence_put(fences[i]);
949 /* Increment the actual userq fence count */
950 cnt++;
951 }
952
953 wait_info->num_fences = cnt;
954 /* Copy userq fence info to user space */
955 if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
956 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
957 r = -EFAULT;
958 goto free_fences;
959 }
960
961 kfree(fences);
962 kfree(fence_info);
963 }
964
965 drm_exec_fini(&exec);
966 for (i = 0; i < num_read_bo_handles; i++)
967 drm_gem_object_put(gobj_read[i]);
968 kfree(gobj_read);
969
970 for (i = 0; i < num_write_bo_handles; i++)
971 drm_gem_object_put(gobj_write[i]);
972 kfree(gobj_write);
973
974 kfree(timeline_points);
975 kfree(timeline_handles);
976 kfree(syncobj_handles);
977 kfree(bo_handles_write);
978 kfree(bo_handles_read);
979
980 return 0;
981
982 free_fences:
983 while (num_fences-- > 0)
984 dma_fence_put(fences[num_fences]);
985 kfree(fences);
986 free_fence_info:
987 kfree(fence_info);
988 exec_fini:
989 drm_exec_fini(&exec);
990 put_gobj_write:
991 while (wentry-- > 0)
992 drm_gem_object_put(gobj_write[wentry]);
993 kfree(gobj_write);
994 put_gobj_read:
995 while (rentry-- > 0)
996 drm_gem_object_put(gobj_read[rentry]);
997 kfree(gobj_read);
998 free_timeline_points:
999 kfree(timeline_points);
1000 free_timeline_handles:
1001 kfree(timeline_handles);
1002 free_syncobj_handles:
1003 kfree(syncobj_handles);
1004 free_bo_handles_write:
1005 kfree(bo_handles_write);
1006 free_bo_handles_read:
1007 kfree(bo_handles_read);
1008
1009 return r;
1010 }
1011