1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37
38 #define AMDGPU_USERQ_MAX_HANDLES (1U << 16)
39
amdgpu_userq_fence_slab_init(void)40 int amdgpu_userq_fence_slab_init(void)
41 {
42 amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
43 sizeof(struct amdgpu_userq_fence),
44 0,
45 SLAB_HWCACHE_ALIGN,
46 NULL);
47 if (!amdgpu_userq_fence_slab)
48 return -ENOMEM;
49
50 return 0;
51 }
52
amdgpu_userq_fence_slab_fini(void)53 void amdgpu_userq_fence_slab_fini(void)
54 {
55 rcu_barrier();
56 kmem_cache_destroy(amdgpu_userq_fence_slab);
57 }
58
to_amdgpu_userq_fence(struct dma_fence * f)59 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
60 {
61 if (!f || f->ops != &amdgpu_userq_fence_ops)
62 return NULL;
63
64 return container_of(f, struct amdgpu_userq_fence, base);
65 }
66
amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver * fence_drv)67 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
68 {
69 return le64_to_cpu(*fence_drv->cpu_addr);
70 }
71
72 static void
amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver * fence_drv,u64 seq)73 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
74 u64 seq)
75 {
76 if (fence_drv->cpu_addr)
77 *fence_drv->cpu_addr = cpu_to_le64(seq);
78 }
79
amdgpu_userq_fence_driver_alloc(struct amdgpu_device * adev,struct amdgpu_usermode_queue * userq)80 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
81 struct amdgpu_usermode_queue *userq)
82 {
83 struct amdgpu_userq_fence_driver *fence_drv;
84 unsigned long flags;
85 int r;
86
87 fence_drv = kzalloc_obj(*fence_drv);
88 if (!fence_drv)
89 return -ENOMEM;
90
91 /* Acquire seq64 memory */
92 r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
93 &fence_drv->cpu_addr);
94 if (r)
95 goto free_fence_drv;
96
97 memset(fence_drv->cpu_addr, 0, sizeof(u64));
98
99 kref_init(&fence_drv->refcount);
100 INIT_LIST_HEAD(&fence_drv->fences);
101 spin_lock_init(&fence_drv->fence_list_lock);
102
103 fence_drv->adev = adev;
104 fence_drv->context = dma_fence_context_alloc(1);
105 get_task_comm(fence_drv->timeline_name, current);
106
107 xa_lock_irqsave(&adev->userq_xa, flags);
108 r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
109 fence_drv, GFP_KERNEL));
110 xa_unlock_irqrestore(&adev->userq_xa, flags);
111 if (r)
112 goto free_seq64;
113
114 userq->fence_drv = fence_drv;
115
116 return 0;
117
118 free_seq64:
119 amdgpu_seq64_free(adev, fence_drv->va);
120 free_fence_drv:
121 kfree(fence_drv);
122
123 return r;
124 }
125
amdgpu_userq_walk_and_drop_fence_drv(struct xarray * xa)126 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
127 {
128 struct amdgpu_userq_fence_driver *fence_drv;
129 unsigned long index;
130
131 if (xa_empty(xa))
132 return;
133
134 xa_lock(xa);
135 xa_for_each(xa, index, fence_drv) {
136 __xa_erase(xa, index);
137 amdgpu_userq_fence_driver_put(fence_drv);
138 }
139
140 xa_unlock(xa);
141 }
142
143 void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue * userq)144 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
145 {
146 dma_fence_put(userq->last_fence);
147
148 amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
149 xa_destroy(&userq->fence_drv_xa);
150 /* Drop the fence_drv reference held by user queue */
151 amdgpu_userq_fence_driver_put(userq->fence_drv);
152 }
153
amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver * fence_drv)154 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
155 {
156 struct amdgpu_userq_fence *userq_fence, *tmp;
157 struct dma_fence *fence;
158 unsigned long flags;
159 u64 rptr;
160 int i;
161
162 if (!fence_drv)
163 return;
164
165 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
166 rptr = amdgpu_userq_fence_read(fence_drv);
167
168 list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
169 fence = &userq_fence->base;
170
171 if (rptr < fence->seqno)
172 break;
173
174 dma_fence_signal(fence);
175
176 for (i = 0; i < userq_fence->fence_drv_array_count; i++)
177 amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
178
179 list_del(&userq_fence->link);
180 dma_fence_put(fence);
181 }
182 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
183 }
184
amdgpu_userq_fence_driver_destroy(struct kref * ref)185 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
186 {
187 struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
188 struct amdgpu_userq_fence_driver,
189 refcount);
190 struct amdgpu_userq_fence_driver *xa_fence_drv;
191 struct amdgpu_device *adev = fence_drv->adev;
192 struct amdgpu_userq_fence *fence, *tmp;
193 struct xarray *xa = &adev->userq_xa;
194 unsigned long index, flags;
195 struct dma_fence *f;
196
197 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
198 list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
199 f = &fence->base;
200
201 if (!dma_fence_is_signaled(f)) {
202 dma_fence_set_error(f, -ECANCELED);
203 dma_fence_signal(f);
204 }
205
206 list_del(&fence->link);
207 dma_fence_put(f);
208 }
209 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
210
211 xa_lock_irqsave(xa, flags);
212 xa_for_each(xa, index, xa_fence_drv)
213 if (xa_fence_drv == fence_drv)
214 __xa_erase(xa, index);
215 xa_unlock_irqrestore(xa, flags);
216
217 /* Free seq64 memory */
218 amdgpu_seq64_free(adev, fence_drv->va);
219 kfree(fence_drv);
220 }
221
amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver * fence_drv)222 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
223 {
224 kref_get(&fence_drv->refcount);
225 }
226
amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver * fence_drv)227 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
228 {
229 kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
230 }
231
amdgpu_userq_fence_alloc(struct amdgpu_userq_fence ** userq_fence)232 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
233 {
234 *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
235 return *userq_fence ? 0 : -ENOMEM;
236 }
237
amdgpu_userq_fence_create(struct amdgpu_usermode_queue * userq,struct amdgpu_userq_fence * userq_fence,u64 seq,struct dma_fence ** f)238 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
239 struct amdgpu_userq_fence *userq_fence,
240 u64 seq, struct dma_fence **f)
241 {
242 struct amdgpu_userq_fence_driver *fence_drv;
243 struct dma_fence *fence;
244 unsigned long flags;
245
246 fence_drv = userq->fence_drv;
247 if (!fence_drv)
248 return -EINVAL;
249
250 spin_lock_init(&userq_fence->lock);
251 INIT_LIST_HEAD(&userq_fence->link);
252 fence = &userq_fence->base;
253 userq_fence->fence_drv = fence_drv;
254
255 dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
256 fence_drv->context, seq);
257
258 amdgpu_userq_fence_driver_get(fence_drv);
259 dma_fence_get(fence);
260
261 if (!xa_empty(&userq->fence_drv_xa)) {
262 struct amdgpu_userq_fence_driver *stored_fence_drv;
263 unsigned long index, count = 0;
264 int i = 0;
265
266 xa_lock(&userq->fence_drv_xa);
267 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
268 count++;
269
270 userq_fence->fence_drv_array =
271 kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
272 GFP_ATOMIC);
273
274 if (userq_fence->fence_drv_array) {
275 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
276 userq_fence->fence_drv_array[i] = stored_fence_drv;
277 __xa_erase(&userq->fence_drv_xa, index);
278 i++;
279 }
280 }
281
282 userq_fence->fence_drv_array_count = i;
283 xa_unlock(&userq->fence_drv_xa);
284 } else {
285 userq_fence->fence_drv_array = NULL;
286 userq_fence->fence_drv_array_count = 0;
287 }
288
289 /* Check if hardware has already processed the job */
290 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
291 if (!dma_fence_is_signaled(fence))
292 list_add_tail(&userq_fence->link, &fence_drv->fences);
293 else
294 dma_fence_put(fence);
295
296 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
297
298 *f = fence;
299
300 return 0;
301 }
302
amdgpu_userq_fence_get_driver_name(struct dma_fence * f)303 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
304 {
305 return "amdgpu_userq_fence";
306 }
307
amdgpu_userq_fence_get_timeline_name(struct dma_fence * f)308 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
309 {
310 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
311
312 return fence->fence_drv->timeline_name;
313 }
314
amdgpu_userq_fence_signaled(struct dma_fence * f)315 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
316 {
317 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
318 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
319 u64 rptr, wptr;
320
321 rptr = amdgpu_userq_fence_read(fence_drv);
322 wptr = fence->base.seqno;
323
324 if (rptr >= wptr)
325 return true;
326
327 return false;
328 }
329
amdgpu_userq_fence_free(struct rcu_head * rcu)330 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
331 {
332 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
333 struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
334 struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
335
336 /* Release the fence driver reference */
337 amdgpu_userq_fence_driver_put(fence_drv);
338
339 kvfree(userq_fence->fence_drv_array);
340 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
341 }
342
amdgpu_userq_fence_release(struct dma_fence * f)343 static void amdgpu_userq_fence_release(struct dma_fence *f)
344 {
345 call_rcu(&f->rcu, amdgpu_userq_fence_free);
346 }
347
348 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
349 .get_driver_name = amdgpu_userq_fence_get_driver_name,
350 .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
351 .signaled = amdgpu_userq_fence_signaled,
352 .release = amdgpu_userq_fence_release,
353 };
354
355 /**
356 * amdgpu_userq_fence_read_wptr - Read the userq wptr value
357 *
358 * @adev: amdgpu_device pointer
359 * @queue: user mode queue structure pointer
360 * @wptr: write pointer value
361 *
362 * Read the wptr value from userq's MQD. The userq signal IOCTL
363 * creates a dma_fence for the shared buffers that expects the
364 * RPTR value written to seq64 memory >= WPTR.
365 *
366 * Returns wptr value on success, error on failure.
367 */
amdgpu_userq_fence_read_wptr(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 * wptr)368 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
369 struct amdgpu_usermode_queue *queue,
370 u64 *wptr)
371 {
372 struct amdgpu_bo_va_mapping *mapping;
373 struct amdgpu_bo *bo;
374 u64 addr, *ptr;
375 int r;
376
377 r = amdgpu_bo_reserve(queue->vm->root.bo, false);
378 if (r)
379 return r;
380
381 addr = queue->userq_prop->wptr_gpu_addr;
382 addr &= AMDGPU_GMC_HOLE_MASK;
383
384 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
385 if (!mapping) {
386 amdgpu_bo_unreserve(queue->vm->root.bo);
387 DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
388 return -EINVAL;
389 }
390
391 bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
392 amdgpu_bo_unreserve(queue->vm->root.bo);
393 r = amdgpu_bo_reserve(bo, true);
394 if (r) {
395 amdgpu_bo_unref(&bo);
396 DRM_ERROR("Failed to reserve userqueue wptr bo");
397 return r;
398 }
399
400 r = amdgpu_bo_kmap(bo, (void **)&ptr);
401 if (r) {
402 DRM_ERROR("Failed mapping the userqueue wptr bo");
403 goto map_error;
404 }
405
406 *wptr = le64_to_cpu(*ptr);
407
408 amdgpu_bo_kunmap(bo);
409 amdgpu_bo_unreserve(bo);
410 amdgpu_bo_unref(&bo);
411
412 return 0;
413
414 map_error:
415 amdgpu_bo_unreserve(bo);
416 amdgpu_bo_unref(&bo);
417
418 return r;
419 }
420
amdgpu_userq_fence_cleanup(struct dma_fence * fence)421 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
422 {
423 dma_fence_put(fence);
424 }
425
426 static void
amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence * fence,int error)427 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
428 int error)
429 {
430 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
431 unsigned long flags;
432 struct dma_fence *f;
433
434 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
435
436 f = rcu_dereference_protected(&fence->base,
437 lockdep_is_held(&fence_drv->fence_list_lock));
438 if (f && !dma_fence_is_signaled_locked(f))
439 dma_fence_set_error(f, error);
440 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
441 }
442
443 void
amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue * userq)444 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
445 {
446 struct dma_fence *f = userq->last_fence;
447
448 if (f) {
449 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
450 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
451 u64 wptr = fence->base.seqno;
452
453 amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
454 amdgpu_userq_fence_write(fence_drv, wptr);
455 amdgpu_userq_fence_driver_process(fence_drv);
456
457 }
458 }
459
amdgpu_userq_signal_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)460 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
461 struct drm_file *filp)
462 {
463 struct amdgpu_device *adev = drm_to_adev(dev);
464 struct amdgpu_fpriv *fpriv = filp->driver_priv;
465 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
466 struct drm_amdgpu_userq_signal *args = data;
467 struct drm_gem_object **gobj_write = NULL;
468 struct drm_gem_object **gobj_read = NULL;
469 struct amdgpu_usermode_queue *queue = NULL;
470 struct amdgpu_userq_fence *userq_fence;
471 struct drm_syncobj **syncobj = NULL;
472 u32 *bo_handles_write, num_write_bo_handles;
473 u32 *syncobj_handles, num_syncobj_handles;
474 u32 *bo_handles_read, num_read_bo_handles;
475 int r, i, entry, rentry, wentry;
476 struct dma_fence *fence;
477 struct drm_exec exec;
478 u64 wptr;
479
480 if (!amdgpu_userq_enabled(dev))
481 return -ENOTSUPP;
482
483 if (args->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
484 args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
485 args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
486 return -EINVAL;
487
488 num_syncobj_handles = args->num_syncobj_handles;
489 syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
490 size_mul(sizeof(u32), num_syncobj_handles));
491 if (IS_ERR(syncobj_handles))
492 return PTR_ERR(syncobj_handles);
493
494 /* Array of pointers to the looked up syncobjs */
495 syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
496 if (!syncobj) {
497 r = -ENOMEM;
498 goto free_syncobj_handles;
499 }
500
501 for (entry = 0; entry < num_syncobj_handles; entry++) {
502 syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
503 if (!syncobj[entry]) {
504 r = -ENOENT;
505 goto free_syncobj;
506 }
507 }
508
509 num_read_bo_handles = args->num_bo_read_handles;
510 bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
511 sizeof(u32) * num_read_bo_handles);
512 if (IS_ERR(bo_handles_read)) {
513 r = PTR_ERR(bo_handles_read);
514 goto free_syncobj;
515 }
516
517 /* Array of pointers to the GEM read objects */
518 gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
519 if (!gobj_read) {
520 r = -ENOMEM;
521 goto free_bo_handles_read;
522 }
523
524 for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
525 gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
526 if (!gobj_read[rentry]) {
527 r = -ENOENT;
528 goto put_gobj_read;
529 }
530 }
531
532 num_write_bo_handles = args->num_bo_write_handles;
533 bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
534 sizeof(u32) * num_write_bo_handles);
535 if (IS_ERR(bo_handles_write)) {
536 r = PTR_ERR(bo_handles_write);
537 goto put_gobj_read;
538 }
539
540 /* Array of pointers to the GEM write objects */
541 gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
542 if (!gobj_write) {
543 r = -ENOMEM;
544 goto free_bo_handles_write;
545 }
546
547 for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
548 gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
549 if (!gobj_write[wentry]) {
550 r = -ENOENT;
551 goto put_gobj_write;
552 }
553 }
554
555 /* Retrieve the user queue */
556 queue = amdgpu_userq_get(userq_mgr, args->queue_id);
557 if (!queue) {
558 r = -ENOENT;
559 goto put_gobj_write;
560 }
561
562 r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
563 if (r)
564 goto put_gobj_write;
565
566 r = amdgpu_userq_fence_alloc(&userq_fence);
567 if (r)
568 goto put_gobj_write;
569
570 /* We are here means UQ is active, make sure the eviction fence is valid */
571 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
572
573 /* Create a new fence */
574 r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
575 if (r) {
576 mutex_unlock(&userq_mgr->userq_mutex);
577 kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
578 goto put_gobj_write;
579 }
580
581 dma_fence_put(queue->last_fence);
582 queue->last_fence = dma_fence_get(fence);
583 amdgpu_userq_start_hang_detect_work(queue);
584 mutex_unlock(&userq_mgr->userq_mutex);
585
586 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
587 (num_read_bo_handles + num_write_bo_handles));
588
589 /* Lock all BOs with retry handling */
590 drm_exec_until_all_locked(&exec) {
591 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
592 drm_exec_retry_on_contention(&exec);
593 if (r) {
594 amdgpu_userq_fence_cleanup(fence);
595 goto exec_fini;
596 }
597
598 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
599 drm_exec_retry_on_contention(&exec);
600 if (r) {
601 amdgpu_userq_fence_cleanup(fence);
602 goto exec_fini;
603 }
604 }
605
606 for (i = 0; i < num_read_bo_handles; i++) {
607 if (!gobj_read || !gobj_read[i]->resv)
608 continue;
609
610 dma_resv_add_fence(gobj_read[i]->resv, fence,
611 DMA_RESV_USAGE_READ);
612 }
613
614 for (i = 0; i < num_write_bo_handles; i++) {
615 if (!gobj_write || !gobj_write[i]->resv)
616 continue;
617
618 dma_resv_add_fence(gobj_write[i]->resv, fence,
619 DMA_RESV_USAGE_WRITE);
620 }
621
622 /* Add the created fence to syncobj/BO's */
623 for (i = 0; i < num_syncobj_handles; i++)
624 drm_syncobj_replace_fence(syncobj[i], fence);
625
626 /* drop the reference acquired in fence creation function */
627 dma_fence_put(fence);
628
629 exec_fini:
630 drm_exec_fini(&exec);
631 put_gobj_write:
632 while (wentry-- > 0)
633 drm_gem_object_put(gobj_write[wentry]);
634 kfree(gobj_write);
635 free_bo_handles_write:
636 kfree(bo_handles_write);
637 put_gobj_read:
638 while (rentry-- > 0)
639 drm_gem_object_put(gobj_read[rentry]);
640 kfree(gobj_read);
641 free_bo_handles_read:
642 kfree(bo_handles_read);
643 free_syncobj:
644 while (entry-- > 0)
645 if (syncobj[entry])
646 drm_syncobj_put(syncobj[entry]);
647 kfree(syncobj);
648 free_syncobj_handles:
649 kfree(syncobj_handles);
650
651 if (queue)
652 amdgpu_userq_put(queue);
653
654 return r;
655 }
656
amdgpu_userq_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)657 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
658 struct drm_file *filp)
659 {
660 u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
661 u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
662 struct drm_amdgpu_userq_fence_info *fence_info = NULL;
663 struct drm_amdgpu_userq_wait *wait_info = data;
664 struct amdgpu_fpriv *fpriv = filp->driver_priv;
665 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
666 struct amdgpu_usermode_queue *waitq = NULL;
667 struct drm_gem_object **gobj_write;
668 struct drm_gem_object **gobj_read;
669 struct dma_fence **fences = NULL;
670 u16 num_points, num_fences = 0;
671 int r, i, rentry, wentry, cnt;
672 struct drm_exec exec;
673
674 if (!amdgpu_userq_enabled(dev))
675 return -ENOTSUPP;
676
677 if (wait_info->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
678 wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
679 wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
680 return -EINVAL;
681
682 num_read_bo_handles = wait_info->num_bo_read_handles;
683 bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
684 size_mul(sizeof(u32), num_read_bo_handles));
685 if (IS_ERR(bo_handles_read))
686 return PTR_ERR(bo_handles_read);
687
688 num_write_bo_handles = wait_info->num_bo_write_handles;
689 bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
690 size_mul(sizeof(u32), num_write_bo_handles));
691 if (IS_ERR(bo_handles_write)) {
692 r = PTR_ERR(bo_handles_write);
693 goto free_bo_handles_read;
694 }
695
696 num_syncobj = wait_info->num_syncobj_handles;
697 syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
698 size_mul(sizeof(u32), num_syncobj));
699 if (IS_ERR(syncobj_handles)) {
700 r = PTR_ERR(syncobj_handles);
701 goto free_bo_handles_write;
702 }
703
704 num_points = wait_info->num_syncobj_timeline_handles;
705 timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
706 sizeof(u32) * num_points);
707 if (IS_ERR(timeline_handles)) {
708 r = PTR_ERR(timeline_handles);
709 goto free_syncobj_handles;
710 }
711
712 timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
713 sizeof(u32) * num_points);
714 if (IS_ERR(timeline_points)) {
715 r = PTR_ERR(timeline_points);
716 goto free_timeline_handles;
717 }
718
719 gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
720 if (!gobj_read) {
721 r = -ENOMEM;
722 goto free_timeline_points;
723 }
724
725 for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
726 gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
727 if (!gobj_read[rentry]) {
728 r = -ENOENT;
729 goto put_gobj_read;
730 }
731 }
732
733 gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
734 if (!gobj_write) {
735 r = -ENOMEM;
736 goto put_gobj_read;
737 }
738
739 for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
740 gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
741 if (!gobj_write[wentry]) {
742 r = -ENOENT;
743 goto put_gobj_write;
744 }
745 }
746
747 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
748 (num_read_bo_handles + num_write_bo_handles));
749
750 /* Lock all BOs with retry handling */
751 drm_exec_until_all_locked(&exec) {
752 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
753 drm_exec_retry_on_contention(&exec);
754 if (r) {
755 drm_exec_fini(&exec);
756 goto put_gobj_write;
757 }
758
759 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
760 drm_exec_retry_on_contention(&exec);
761 if (r) {
762 drm_exec_fini(&exec);
763 goto put_gobj_write;
764 }
765 }
766
767 if (!wait_info->num_fences) {
768 if (num_points) {
769 struct dma_fence_unwrap iter;
770 struct dma_fence *fence;
771 struct dma_fence *f;
772
773 for (i = 0; i < num_points; i++) {
774 r = drm_syncobj_find_fence(filp, timeline_handles[i],
775 timeline_points[i],
776 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
777 &fence);
778 if (r)
779 goto exec_fini;
780
781 dma_fence_unwrap_for_each(f, &iter, fence)
782 num_fences++;
783
784 dma_fence_put(fence);
785 }
786 }
787
788 /* Count syncobj's fence */
789 for (i = 0; i < num_syncobj; i++) {
790 struct dma_fence *fence;
791
792 r = drm_syncobj_find_fence(filp, syncobj_handles[i],
793 0,
794 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
795 &fence);
796 if (r)
797 goto exec_fini;
798
799 num_fences++;
800 dma_fence_put(fence);
801 }
802
803 /* Count GEM objects fence */
804 for (i = 0; i < num_read_bo_handles; i++) {
805 struct dma_resv_iter resv_cursor;
806 struct dma_fence *fence;
807
808 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
809 DMA_RESV_USAGE_READ, fence)
810 num_fences++;
811 }
812
813 for (i = 0; i < num_write_bo_handles; i++) {
814 struct dma_resv_iter resv_cursor;
815 struct dma_fence *fence;
816
817 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
818 DMA_RESV_USAGE_WRITE, fence)
819 num_fences++;
820 }
821
822 /*
823 * Passing num_fences = 0 means that userspace doesn't want to
824 * retrieve userq_fence_info. If num_fences = 0 we skip filling
825 * userq_fence_info and return the actual number of fences on
826 * args->num_fences.
827 */
828 wait_info->num_fences = num_fences;
829 } else {
830 /* Array of fence info */
831 fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
832 if (!fence_info) {
833 r = -ENOMEM;
834 goto exec_fini;
835 }
836
837 /* Array of fences */
838 fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
839 if (!fences) {
840 r = -ENOMEM;
841 goto free_fence_info;
842 }
843
844 /* Retrieve GEM read objects fence */
845 for (i = 0; i < num_read_bo_handles; i++) {
846 struct dma_resv_iter resv_cursor;
847 struct dma_fence *fence;
848
849 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
850 DMA_RESV_USAGE_READ, fence) {
851 if (num_fences >= wait_info->num_fences) {
852 r = -EINVAL;
853 goto free_fences;
854 }
855
856 fences[num_fences++] = fence;
857 dma_fence_get(fence);
858 }
859 }
860
861 /* Retrieve GEM write objects fence */
862 for (i = 0; i < num_write_bo_handles; i++) {
863 struct dma_resv_iter resv_cursor;
864 struct dma_fence *fence;
865
866 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
867 DMA_RESV_USAGE_WRITE, fence) {
868 if (num_fences >= wait_info->num_fences) {
869 r = -EINVAL;
870 goto free_fences;
871 }
872
873 fences[num_fences++] = fence;
874 dma_fence_get(fence);
875 }
876 }
877
878 if (num_points) {
879 struct dma_fence_unwrap iter;
880 struct dma_fence *fence;
881 struct dma_fence *f;
882
883 for (i = 0; i < num_points; i++) {
884 r = drm_syncobj_find_fence(filp, timeline_handles[i],
885 timeline_points[i],
886 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
887 &fence);
888 if (r)
889 goto free_fences;
890
891 dma_fence_unwrap_for_each(f, &iter, fence) {
892 if (num_fences >= wait_info->num_fences) {
893 r = -EINVAL;
894 dma_fence_put(fence);
895 goto free_fences;
896 }
897
898 dma_fence_get(f);
899 fences[num_fences++] = f;
900 }
901
902 dma_fence_put(fence);
903 }
904 }
905
906 /* Retrieve syncobj's fence */
907 for (i = 0; i < num_syncobj; i++) {
908 struct dma_fence *fence;
909
910 r = drm_syncobj_find_fence(filp, syncobj_handles[i],
911 0,
912 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
913 &fence);
914 if (r)
915 goto free_fences;
916
917 if (num_fences >= wait_info->num_fences) {
918 r = -EINVAL;
919 dma_fence_put(fence);
920 goto free_fences;
921 }
922
923 fences[num_fences++] = fence;
924 }
925
926 /*
927 * Keep only the latest fences to reduce the number of values
928 * given back to userspace.
929 */
930 num_fences = dma_fence_dedup_array(fences, num_fences);
931
932 waitq = amdgpu_userq_get(userq_mgr, wait_info->waitq_id);
933 if (!waitq) {
934 r = -EINVAL;
935 goto free_fences;
936 }
937
938 for (i = 0, cnt = 0; i < num_fences; i++) {
939 struct amdgpu_userq_fence_driver *fence_drv;
940 struct amdgpu_userq_fence *userq_fence;
941 u32 index;
942
943 userq_fence = to_amdgpu_userq_fence(fences[i]);
944 if (!userq_fence) {
945 /*
946 * Just waiting on other driver fences should
947 * be good for now
948 */
949 r = dma_fence_wait(fences[i], true);
950 if (r) {
951 dma_fence_put(fences[i]);
952 goto free_fences;
953 }
954
955 dma_fence_put(fences[i]);
956 continue;
957 }
958
959 fence_drv = userq_fence->fence_drv;
960 /*
961 * We need to make sure the user queue release their reference
962 * to the fence drivers at some point before queue destruction.
963 * Otherwise, we would gather those references until we don't
964 * have any more space left and crash.
965 */
966 r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
967 xa_limit_32b, GFP_KERNEL);
968 if (r)
969 goto free_fences;
970
971 amdgpu_userq_fence_driver_get(fence_drv);
972
973 /* Store drm syncobj's gpu va address and value */
974 fence_info[cnt].va = fence_drv->va;
975 fence_info[cnt].value = fences[i]->seqno;
976
977 dma_fence_put(fences[i]);
978 /* Increment the actual userq fence count */
979 cnt++;
980 }
981
982 wait_info->num_fences = cnt;
983 /* Copy userq fence info to user space */
984 if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
985 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
986 r = -EFAULT;
987 goto free_fences;
988 }
989 }
990
991 free_fences:
992 if (fences) {
993 while (num_fences-- > 0)
994 dma_fence_put(fences[num_fences]);
995 kfree(fences);
996 }
997 free_fence_info:
998 kfree(fence_info);
999 exec_fini:
1000 drm_exec_fini(&exec);
1001 put_gobj_write:
1002 while (wentry-- > 0)
1003 drm_gem_object_put(gobj_write[wentry]);
1004 kfree(gobj_write);
1005 put_gobj_read:
1006 while (rentry-- > 0)
1007 drm_gem_object_put(gobj_read[rentry]);
1008 kfree(gobj_read);
1009 free_timeline_points:
1010 kfree(timeline_points);
1011 free_timeline_handles:
1012 kfree(timeline_handles);
1013 free_syncobj_handles:
1014 kfree(syncobj_handles);
1015 free_bo_handles_write:
1016 kfree(bo_handles_write);
1017 free_bo_handles_read:
1018 kfree(bo_handles_read);
1019
1020 if (waitq)
1021 amdgpu_userq_put(waitq);
1022
1023 return r;
1024 }
1025