xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c (revision c0d6f52f9b62479d61f8cd4faf9fb2f8bce6e301)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28 
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34 
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37 
38 int amdgpu_userq_fence_slab_init(void)
39 {
40 	amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
41 						    sizeof(struct amdgpu_userq_fence),
42 						    0,
43 						    SLAB_HWCACHE_ALIGN,
44 						    NULL);
45 	if (!amdgpu_userq_fence_slab)
46 		return -ENOMEM;
47 
48 	return 0;
49 }
50 
51 void amdgpu_userq_fence_slab_fini(void)
52 {
53 	rcu_barrier();
54 	kmem_cache_destroy(amdgpu_userq_fence_slab);
55 }
56 
57 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
58 {
59 	if (!f || f->ops != &amdgpu_userq_fence_ops)
60 		return NULL;
61 
62 	return container_of(f, struct amdgpu_userq_fence, base);
63 }
64 
65 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
66 {
67 	return le64_to_cpu(*fence_drv->cpu_addr);
68 }
69 
70 static void
71 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
72 			 u64 seq)
73 {
74 	if (fence_drv->cpu_addr)
75 		*fence_drv->cpu_addr = cpu_to_le64(seq);
76 }
77 
78 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
79 				    struct amdgpu_usermode_queue *userq)
80 {
81 	struct amdgpu_userq_fence_driver *fence_drv;
82 	unsigned long flags;
83 	int r;
84 
85 	fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
86 	if (!fence_drv)
87 		return -ENOMEM;
88 
89 	/* Acquire seq64 memory */
90 	r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
91 			       &fence_drv->cpu_addr);
92 	if (r)
93 		goto free_fence_drv;
94 
95 	memset(fence_drv->cpu_addr, 0, sizeof(u64));
96 
97 	kref_init(&fence_drv->refcount);
98 	INIT_LIST_HEAD(&fence_drv->fences);
99 	spin_lock_init(&fence_drv->fence_list_lock);
100 
101 	fence_drv->adev = adev;
102 	fence_drv->context = dma_fence_context_alloc(1);
103 	get_task_comm(fence_drv->timeline_name, current);
104 
105 	xa_lock_irqsave(&adev->userq_xa, flags);
106 	r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
107 			      fence_drv, GFP_KERNEL));
108 	xa_unlock_irqrestore(&adev->userq_xa, flags);
109 	if (r)
110 		goto free_seq64;
111 
112 	userq->fence_drv = fence_drv;
113 
114 	return 0;
115 
116 free_seq64:
117 	amdgpu_seq64_free(adev, fence_drv->va);
118 free_fence_drv:
119 	kfree(fence_drv);
120 
121 	return r;
122 }
123 
124 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
125 {
126 	struct amdgpu_userq_fence_driver *fence_drv;
127 	unsigned long index;
128 
129 	if (xa_empty(xa))
130 		return;
131 
132 	xa_lock(xa);
133 	xa_for_each(xa, index, fence_drv) {
134 		__xa_erase(xa, index);
135 		amdgpu_userq_fence_driver_put(fence_drv);
136 	}
137 
138 	xa_unlock(xa);
139 }
140 
141 void
142 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
143 {
144 	dma_fence_put(userq->last_fence);
145 
146 	amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
147 	xa_destroy(&userq->fence_drv_xa);
148 	/* Drop the fence_drv reference held by user queue */
149 	amdgpu_userq_fence_driver_put(userq->fence_drv);
150 }
151 
152 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
153 {
154 	struct amdgpu_userq_fence *userq_fence, *tmp;
155 	struct dma_fence *fence;
156 	unsigned long flags;
157 	u64 rptr;
158 	int i;
159 
160 	if (!fence_drv)
161 		return;
162 
163 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
164 	rptr = amdgpu_userq_fence_read(fence_drv);
165 
166 	list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
167 		fence = &userq_fence->base;
168 
169 		if (rptr < fence->seqno)
170 			break;
171 
172 		dma_fence_signal(fence);
173 
174 		for (i = 0; i < userq_fence->fence_drv_array_count; i++)
175 			amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
176 
177 		list_del(&userq_fence->link);
178 		dma_fence_put(fence);
179 	}
180 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
181 }
182 
183 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
184 {
185 	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
186 					 struct amdgpu_userq_fence_driver,
187 					 refcount);
188 	struct amdgpu_userq_fence_driver *xa_fence_drv;
189 	struct amdgpu_device *adev = fence_drv->adev;
190 	struct amdgpu_userq_fence *fence, *tmp;
191 	struct xarray *xa = &adev->userq_xa;
192 	unsigned long index, flags;
193 	struct dma_fence *f;
194 
195 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
196 	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
197 		f = &fence->base;
198 
199 		if (!dma_fence_is_signaled(f)) {
200 			dma_fence_set_error(f, -ECANCELED);
201 			dma_fence_signal(f);
202 		}
203 
204 		list_del(&fence->link);
205 		dma_fence_put(f);
206 	}
207 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
208 
209 	xa_lock_irqsave(xa, flags);
210 	xa_for_each(xa, index, xa_fence_drv)
211 		if (xa_fence_drv == fence_drv)
212 			__xa_erase(xa, index);
213 	xa_unlock_irqrestore(xa, flags);
214 
215 	/* Free seq64 memory */
216 	amdgpu_seq64_free(adev, fence_drv->va);
217 	kfree(fence_drv);
218 }
219 
220 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
221 {
222 	kref_get(&fence_drv->refcount);
223 }
224 
225 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
226 {
227 	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
228 }
229 
230 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
231 {
232 	*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
233 	return *userq_fence ? 0 : -ENOMEM;
234 }
235 
236 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
237 				     struct amdgpu_userq_fence *userq_fence,
238 				     u64 seq, struct dma_fence **f)
239 {
240 	struct amdgpu_userq_fence_driver *fence_drv;
241 	struct dma_fence *fence;
242 	unsigned long flags;
243 
244 	fence_drv = userq->fence_drv;
245 	if (!fence_drv)
246 		return -EINVAL;
247 
248 	spin_lock_init(&userq_fence->lock);
249 	INIT_LIST_HEAD(&userq_fence->link);
250 	fence = &userq_fence->base;
251 	userq_fence->fence_drv = fence_drv;
252 
253 	dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
254 			 fence_drv->context, seq);
255 
256 	amdgpu_userq_fence_driver_get(fence_drv);
257 	dma_fence_get(fence);
258 
259 	if (!xa_empty(&userq->fence_drv_xa)) {
260 		struct amdgpu_userq_fence_driver *stored_fence_drv;
261 		unsigned long index, count = 0;
262 		int i = 0;
263 
264 		xa_lock(&userq->fence_drv_xa);
265 		xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
266 			count++;
267 
268 		userq_fence->fence_drv_array =
269 			kvmalloc_array(count,
270 				       sizeof(struct amdgpu_userq_fence_driver *),
271 				       GFP_ATOMIC);
272 
273 		if (userq_fence->fence_drv_array) {
274 			xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
275 				userq_fence->fence_drv_array[i] = stored_fence_drv;
276 				__xa_erase(&userq->fence_drv_xa, index);
277 				i++;
278 			}
279 		}
280 
281 		userq_fence->fence_drv_array_count = i;
282 		xa_unlock(&userq->fence_drv_xa);
283 	} else {
284 		userq_fence->fence_drv_array = NULL;
285 		userq_fence->fence_drv_array_count = 0;
286 	}
287 
288 	/* Check if hardware has already processed the job */
289 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
290 	if (!dma_fence_is_signaled(fence))
291 		list_add_tail(&userq_fence->link, &fence_drv->fences);
292 	else
293 		dma_fence_put(fence);
294 
295 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
296 
297 	*f = fence;
298 
299 	return 0;
300 }
301 
302 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
303 {
304 	return "amdgpu_userq_fence";
305 }
306 
307 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
308 {
309 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
310 
311 	return fence->fence_drv->timeline_name;
312 }
313 
314 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
315 {
316 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
317 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
318 	u64 rptr, wptr;
319 
320 	rptr = amdgpu_userq_fence_read(fence_drv);
321 	wptr = fence->base.seqno;
322 
323 	if (rptr >= wptr)
324 		return true;
325 
326 	return false;
327 }
328 
329 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
330 {
331 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
332 	struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
333 	struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
334 
335 	/* Release the fence driver reference */
336 	amdgpu_userq_fence_driver_put(fence_drv);
337 
338 	kvfree(userq_fence->fence_drv_array);
339 	kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
340 }
341 
342 static void amdgpu_userq_fence_release(struct dma_fence *f)
343 {
344 	call_rcu(&f->rcu, amdgpu_userq_fence_free);
345 }
346 
347 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
348 	.get_driver_name = amdgpu_userq_fence_get_driver_name,
349 	.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
350 	.signaled = amdgpu_userq_fence_signaled,
351 	.release = amdgpu_userq_fence_release,
352 };
353 
354 /**
355  * amdgpu_userq_fence_read_wptr - Read the userq wptr value
356  *
357  * @adev: amdgpu_device pointer
358  * @queue: user mode queue structure pointer
359  * @wptr: write pointer value
360  *
361  * Read the wptr value from userq's MQD. The userq signal IOCTL
362  * creates a dma_fence for the shared buffers that expects the
363  * RPTR value written to seq64 memory >= WPTR.
364  *
365  * Returns wptr value on success, error on failure.
366  */
367 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
368 					struct amdgpu_usermode_queue *queue,
369 					u64 *wptr)
370 {
371 	struct amdgpu_bo_va_mapping *mapping;
372 	struct amdgpu_bo *bo;
373 	u64 addr, *ptr;
374 	int r;
375 
376 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
377 	if (r)
378 		return r;
379 
380 	addr = queue->userq_prop->wptr_gpu_addr;
381 	addr &= AMDGPU_GMC_HOLE_MASK;
382 
383 	mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
384 	if (!mapping) {
385 		amdgpu_bo_unreserve(queue->vm->root.bo);
386 		DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
387 		return -EINVAL;
388 	}
389 
390 	bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
391 	amdgpu_bo_unreserve(queue->vm->root.bo);
392 	r = amdgpu_bo_reserve(bo, true);
393 	if (r) {
394 		amdgpu_bo_unref(&bo);
395 		DRM_ERROR("Failed to reserve userqueue wptr bo");
396 		return r;
397 	}
398 
399 	r = amdgpu_bo_kmap(bo, (void **)&ptr);
400 	if (r) {
401 		DRM_ERROR("Failed mapping the userqueue wptr bo");
402 		goto map_error;
403 	}
404 
405 	*wptr = le64_to_cpu(*ptr);
406 
407 	amdgpu_bo_kunmap(bo);
408 	amdgpu_bo_unreserve(bo);
409 	amdgpu_bo_unref(&bo);
410 
411 	return 0;
412 
413 map_error:
414 	amdgpu_bo_unreserve(bo);
415 	amdgpu_bo_unref(&bo);
416 
417 	return r;
418 }
419 
420 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
421 {
422 	dma_fence_put(fence);
423 }
424 
425 static void
426 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
427 				    int error)
428 {
429 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
430 	unsigned long flags;
431 	struct dma_fence *f;
432 
433 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
434 
435 	f = rcu_dereference_protected(&fence->base,
436 				      lockdep_is_held(&fence_drv->fence_list_lock));
437 	if (f && !dma_fence_is_signaled_locked(f))
438 		dma_fence_set_error(f, error);
439 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
440 }
441 
442 void
443 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
444 {
445 	struct dma_fence *f = userq->last_fence;
446 
447 	if (f) {
448 		struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
449 		struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
450 		u64 wptr = fence->base.seqno;
451 
452 		amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
453 		amdgpu_userq_fence_write(fence_drv, wptr);
454 		amdgpu_userq_fence_driver_process(fence_drv);
455 
456 	}
457 }
458 
459 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
460 			      struct drm_file *filp)
461 {
462 	struct amdgpu_device *adev = drm_to_adev(dev);
463 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
464 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
465 	struct drm_amdgpu_userq_signal *args = data;
466 	struct drm_gem_object **gobj_write = NULL;
467 	struct drm_gem_object **gobj_read = NULL;
468 	struct amdgpu_usermode_queue *queue;
469 	struct amdgpu_userq_fence *userq_fence;
470 	struct drm_syncobj **syncobj = NULL;
471 	u32 *bo_handles_write, num_write_bo_handles;
472 	u32 *syncobj_handles, num_syncobj_handles;
473 	u32 *bo_handles_read, num_read_bo_handles;
474 	int r, i, entry, rentry, wentry;
475 	struct dma_fence *fence;
476 	struct drm_exec exec;
477 	u64 wptr;
478 
479 	if (!amdgpu_userq_enabled(dev))
480 		return -ENOTSUPP;
481 
482 	num_syncobj_handles = args->num_syncobj_handles;
483 	syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
484 				      size_mul(sizeof(u32), num_syncobj_handles));
485 	if (IS_ERR(syncobj_handles))
486 		return PTR_ERR(syncobj_handles);
487 
488 	/* Array of pointers to the looked up syncobjs */
489 	syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
490 	if (!syncobj) {
491 		r = -ENOMEM;
492 		goto free_syncobj_handles;
493 	}
494 
495 	for (entry = 0; entry < num_syncobj_handles; entry++) {
496 		syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
497 		if (!syncobj[entry]) {
498 			r = -ENOENT;
499 			goto free_syncobj;
500 		}
501 	}
502 
503 	num_read_bo_handles = args->num_bo_read_handles;
504 	bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
505 				      sizeof(u32) * num_read_bo_handles);
506 	if (IS_ERR(bo_handles_read)) {
507 		r = PTR_ERR(bo_handles_read);
508 		goto free_syncobj;
509 	}
510 
511 	/* Array of pointers to the GEM read objects */
512 	gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
513 	if (!gobj_read) {
514 		r = -ENOMEM;
515 		goto free_bo_handles_read;
516 	}
517 
518 	for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
519 		gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
520 		if (!gobj_read[rentry]) {
521 			r = -ENOENT;
522 			goto put_gobj_read;
523 		}
524 	}
525 
526 	num_write_bo_handles = args->num_bo_write_handles;
527 	bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
528 				       sizeof(u32) * num_write_bo_handles);
529 	if (IS_ERR(bo_handles_write)) {
530 		r = PTR_ERR(bo_handles_write);
531 		goto put_gobj_read;
532 	}
533 
534 	/* Array of pointers to the GEM write objects */
535 	gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
536 	if (!gobj_write) {
537 		r = -ENOMEM;
538 		goto free_bo_handles_write;
539 	}
540 
541 	for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
542 		gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
543 		if (!gobj_write[wentry]) {
544 			r = -ENOENT;
545 			goto put_gobj_write;
546 		}
547 	}
548 
549 	/* Retrieve the user queue */
550 	queue = xa_load(&userq_mgr->userq_xa, args->queue_id);
551 	if (!queue) {
552 		r = -ENOENT;
553 		goto put_gobj_write;
554 	}
555 
556 	r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
557 	if (r)
558 		goto put_gobj_write;
559 
560 	r = amdgpu_userq_fence_alloc(&userq_fence);
561 	if (r)
562 		goto put_gobj_write;
563 
564 	/* We are here means UQ is active, make sure the eviction fence is valid */
565 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
566 
567 	/* Create a new fence */
568 	r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
569 	if (r) {
570 		mutex_unlock(&userq_mgr->userq_mutex);
571 		kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
572 		goto put_gobj_write;
573 	}
574 
575 	dma_fence_put(queue->last_fence);
576 	queue->last_fence = dma_fence_get(fence);
577 	amdgpu_userq_start_hang_detect_work(queue);
578 	mutex_unlock(&userq_mgr->userq_mutex);
579 
580 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
581 		      (num_read_bo_handles + num_write_bo_handles));
582 
583 	/* Lock all BOs with retry handling */
584 	drm_exec_until_all_locked(&exec) {
585 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
586 		drm_exec_retry_on_contention(&exec);
587 		if (r) {
588 			amdgpu_userq_fence_cleanup(fence);
589 			goto exec_fini;
590 		}
591 
592 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
593 		drm_exec_retry_on_contention(&exec);
594 		if (r) {
595 			amdgpu_userq_fence_cleanup(fence);
596 			goto exec_fini;
597 		}
598 	}
599 
600 	for (i = 0; i < num_read_bo_handles; i++) {
601 		if (!gobj_read || !gobj_read[i]->resv)
602 			continue;
603 
604 		dma_resv_add_fence(gobj_read[i]->resv, fence,
605 				   DMA_RESV_USAGE_READ);
606 	}
607 
608 	for (i = 0; i < num_write_bo_handles; i++) {
609 		if (!gobj_write || !gobj_write[i]->resv)
610 			continue;
611 
612 		dma_resv_add_fence(gobj_write[i]->resv, fence,
613 				   DMA_RESV_USAGE_WRITE);
614 	}
615 
616 	/* Add the created fence to syncobj/BO's */
617 	for (i = 0; i < num_syncobj_handles; i++)
618 		drm_syncobj_replace_fence(syncobj[i], fence);
619 
620 	/* drop the reference acquired in fence creation function */
621 	dma_fence_put(fence);
622 
623 exec_fini:
624 	drm_exec_fini(&exec);
625 put_gobj_write:
626 	while (wentry-- > 0)
627 		drm_gem_object_put(gobj_write[wentry]);
628 	kfree(gobj_write);
629 free_bo_handles_write:
630 	kfree(bo_handles_write);
631 put_gobj_read:
632 	while (rentry-- > 0)
633 		drm_gem_object_put(gobj_read[rentry]);
634 	kfree(gobj_read);
635 free_bo_handles_read:
636 	kfree(bo_handles_read);
637 free_syncobj:
638 	while (entry-- > 0)
639 		if (syncobj[entry])
640 			drm_syncobj_put(syncobj[entry]);
641 	kfree(syncobj);
642 free_syncobj_handles:
643 	kfree(syncobj_handles);
644 
645 	return r;
646 }
647 
648 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
649 			    struct drm_file *filp)
650 {
651 	u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
652 	u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
653 	struct drm_amdgpu_userq_fence_info *fence_info = NULL;
654 	struct drm_amdgpu_userq_wait *wait_info = data;
655 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
656 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
657 	struct amdgpu_usermode_queue *waitq;
658 	struct drm_gem_object **gobj_write;
659 	struct drm_gem_object **gobj_read;
660 	struct dma_fence **fences = NULL;
661 	u16 num_points, num_fences = 0;
662 	int r, i, rentry, wentry, cnt;
663 	struct drm_exec exec;
664 
665 	if (!amdgpu_userq_enabled(dev))
666 		return -ENOTSUPP;
667 
668 	num_read_bo_handles = wait_info->num_bo_read_handles;
669 	bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
670 				      size_mul(sizeof(u32), num_read_bo_handles));
671 	if (IS_ERR(bo_handles_read))
672 		return PTR_ERR(bo_handles_read);
673 
674 	num_write_bo_handles = wait_info->num_bo_write_handles;
675 	bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
676 				       size_mul(sizeof(u32), num_write_bo_handles));
677 	if (IS_ERR(bo_handles_write)) {
678 		r = PTR_ERR(bo_handles_write);
679 		goto free_bo_handles_read;
680 	}
681 
682 	num_syncobj = wait_info->num_syncobj_handles;
683 	syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
684 				      size_mul(sizeof(u32), num_syncobj));
685 	if (IS_ERR(syncobj_handles)) {
686 		r = PTR_ERR(syncobj_handles);
687 		goto free_bo_handles_write;
688 	}
689 
690 	num_points = wait_info->num_syncobj_timeline_handles;
691 	timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
692 				       sizeof(u32) * num_points);
693 	if (IS_ERR(timeline_handles)) {
694 		r = PTR_ERR(timeline_handles);
695 		goto free_syncobj_handles;
696 	}
697 
698 	timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
699 				      sizeof(u32) * num_points);
700 	if (IS_ERR(timeline_points)) {
701 		r = PTR_ERR(timeline_points);
702 		goto free_timeline_handles;
703 	}
704 
705 	gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
706 	if (!gobj_read) {
707 		r = -ENOMEM;
708 		goto free_timeline_points;
709 	}
710 
711 	for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
712 		gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
713 		if (!gobj_read[rentry]) {
714 			r = -ENOENT;
715 			goto put_gobj_read;
716 		}
717 	}
718 
719 	gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
720 	if (!gobj_write) {
721 		r = -ENOMEM;
722 		goto put_gobj_read;
723 	}
724 
725 	for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
726 		gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
727 		if (!gobj_write[wentry]) {
728 			r = -ENOENT;
729 			goto put_gobj_write;
730 		}
731 	}
732 
733 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
734 		      (num_read_bo_handles + num_write_bo_handles));
735 
736 	/* Lock all BOs with retry handling */
737 	drm_exec_until_all_locked(&exec) {
738 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
739 		drm_exec_retry_on_contention(&exec);
740 		if (r) {
741 			drm_exec_fini(&exec);
742 			goto put_gobj_write;
743 		}
744 
745 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
746 		drm_exec_retry_on_contention(&exec);
747 		if (r) {
748 			drm_exec_fini(&exec);
749 			goto put_gobj_write;
750 		}
751 	}
752 
753 	if (!wait_info->num_fences) {
754 		if (num_points) {
755 			struct dma_fence_unwrap iter;
756 			struct dma_fence *fence;
757 			struct dma_fence *f;
758 
759 			for (i = 0; i < num_points; i++) {
760 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
761 							   timeline_points[i],
762 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
763 							   &fence);
764 				if (r)
765 					goto exec_fini;
766 
767 				dma_fence_unwrap_for_each(f, &iter, fence)
768 					num_fences++;
769 
770 				dma_fence_put(fence);
771 			}
772 		}
773 
774 		/* Count syncobj's fence */
775 		for (i = 0; i < num_syncobj; i++) {
776 			struct dma_fence *fence;
777 
778 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
779 						   0,
780 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
781 						   &fence);
782 			if (r)
783 				goto exec_fini;
784 
785 			num_fences++;
786 			dma_fence_put(fence);
787 		}
788 
789 		/* Count GEM objects fence */
790 		for (i = 0; i < num_read_bo_handles; i++) {
791 			struct dma_resv_iter resv_cursor;
792 			struct dma_fence *fence;
793 
794 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
795 						DMA_RESV_USAGE_READ, fence)
796 				num_fences++;
797 		}
798 
799 		for (i = 0; i < num_write_bo_handles; i++) {
800 			struct dma_resv_iter resv_cursor;
801 			struct dma_fence *fence;
802 
803 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
804 						DMA_RESV_USAGE_WRITE, fence)
805 				num_fences++;
806 		}
807 
808 		/*
809 		 * Passing num_fences = 0 means that userspace doesn't want to
810 		 * retrieve userq_fence_info. If num_fences = 0 we skip filling
811 		 * userq_fence_info and return the actual number of fences on
812 		 * args->num_fences.
813 		 */
814 		wait_info->num_fences = num_fences;
815 	} else {
816 		/* Array of fence info */
817 		fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
818 		if (!fence_info) {
819 			r = -ENOMEM;
820 			goto exec_fini;
821 		}
822 
823 		/* Array of fences */
824 		fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
825 		if (!fences) {
826 			r = -ENOMEM;
827 			goto free_fence_info;
828 		}
829 
830 		/* Retrieve GEM read objects fence */
831 		for (i = 0; i < num_read_bo_handles; i++) {
832 			struct dma_resv_iter resv_cursor;
833 			struct dma_fence *fence;
834 
835 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
836 						DMA_RESV_USAGE_READ, fence) {
837 				if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
838 					r = -EINVAL;
839 					goto free_fences;
840 				}
841 
842 				fences[num_fences++] = fence;
843 				dma_fence_get(fence);
844 			}
845 		}
846 
847 		/* Retrieve GEM write objects fence */
848 		for (i = 0; i < num_write_bo_handles; i++) {
849 			struct dma_resv_iter resv_cursor;
850 			struct dma_fence *fence;
851 
852 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
853 						DMA_RESV_USAGE_WRITE, fence) {
854 				if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
855 					r = -EINVAL;
856 					goto free_fences;
857 				}
858 
859 				fences[num_fences++] = fence;
860 				dma_fence_get(fence);
861 			}
862 		}
863 
864 		if (num_points) {
865 			struct dma_fence_unwrap iter;
866 			struct dma_fence *fence;
867 			struct dma_fence *f;
868 
869 			for (i = 0; i < num_points; i++) {
870 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
871 							   timeline_points[i],
872 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
873 							   &fence);
874 				if (r)
875 					goto free_fences;
876 
877 				dma_fence_unwrap_for_each(f, &iter, fence) {
878 					if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
879 						r = -EINVAL;
880 						goto free_fences;
881 					}
882 
883 					dma_fence_get(f);
884 					fences[num_fences++] = f;
885 				}
886 
887 				dma_fence_put(fence);
888 			}
889 		}
890 
891 		/* Retrieve syncobj's fence */
892 		for (i = 0; i < num_syncobj; i++) {
893 			struct dma_fence *fence;
894 
895 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
896 						   0,
897 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
898 						   &fence);
899 			if (r)
900 				goto free_fences;
901 
902 			if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
903 				r = -EINVAL;
904 				goto free_fences;
905 			}
906 
907 			fences[num_fences++] = fence;
908 		}
909 
910 		/*
911 		 * Keep only the latest fences to reduce the number of values
912 		 * given back to userspace.
913 		 */
914 		num_fences = dma_fence_dedup_array(fences, num_fences);
915 
916 		waitq = xa_load(&userq_mgr->userq_xa, wait_info->waitq_id);
917 		if (!waitq) {
918 			r = -EINVAL;
919 			goto free_fences;
920 		}
921 
922 		for (i = 0, cnt = 0; i < num_fences; i++) {
923 			struct amdgpu_userq_fence_driver *fence_drv;
924 			struct amdgpu_userq_fence *userq_fence;
925 			u32 index;
926 
927 			userq_fence = to_amdgpu_userq_fence(fences[i]);
928 			if (!userq_fence) {
929 				/*
930 				 * Just waiting on other driver fences should
931 				 * be good for now
932 				 */
933 				r = dma_fence_wait(fences[i], true);
934 				if (r) {
935 					dma_fence_put(fences[i]);
936 					goto free_fences;
937 				}
938 
939 				dma_fence_put(fences[i]);
940 				continue;
941 			}
942 
943 			fence_drv = userq_fence->fence_drv;
944 			/*
945 			 * We need to make sure the user queue release their reference
946 			 * to the fence drivers at some point before queue destruction.
947 			 * Otherwise, we would gather those references until we don't
948 			 * have any more space left and crash.
949 			 */
950 			r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
951 				     xa_limit_32b, GFP_KERNEL);
952 			if (r)
953 				goto free_fences;
954 
955 			amdgpu_userq_fence_driver_get(fence_drv);
956 
957 			/* Store drm syncobj's gpu va address and value */
958 			fence_info[cnt].va = fence_drv->va;
959 			fence_info[cnt].value = fences[i]->seqno;
960 
961 			dma_fence_put(fences[i]);
962 			/* Increment the actual userq fence count */
963 			cnt++;
964 		}
965 
966 		wait_info->num_fences = cnt;
967 		/* Copy userq fence info to user space */
968 		if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
969 				 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
970 			r = -EFAULT;
971 			goto free_fences;
972 		}
973 
974 		kfree(fences);
975 		kfree(fence_info);
976 	}
977 
978 	drm_exec_fini(&exec);
979 	for (i = 0; i < num_read_bo_handles; i++)
980 		drm_gem_object_put(gobj_read[i]);
981 	kfree(gobj_read);
982 
983 	for (i = 0; i < num_write_bo_handles; i++)
984 		drm_gem_object_put(gobj_write[i]);
985 	kfree(gobj_write);
986 
987 	kfree(timeline_points);
988 	kfree(timeline_handles);
989 	kfree(syncobj_handles);
990 	kfree(bo_handles_write);
991 	kfree(bo_handles_read);
992 
993 	return 0;
994 
995 free_fences:
996 	while (num_fences-- > 0)
997 		dma_fence_put(fences[num_fences]);
998 	kfree(fences);
999 free_fence_info:
1000 	kfree(fence_info);
1001 exec_fini:
1002 	drm_exec_fini(&exec);
1003 put_gobj_write:
1004 	while (wentry-- > 0)
1005 		drm_gem_object_put(gobj_write[wentry]);
1006 	kfree(gobj_write);
1007 put_gobj_read:
1008 	while (rentry-- > 0)
1009 		drm_gem_object_put(gobj_read[rentry]);
1010 	kfree(gobj_read);
1011 free_timeline_points:
1012 	kfree(timeline_points);
1013 free_timeline_handles:
1014 	kfree(timeline_handles);
1015 free_syncobj_handles:
1016 	kfree(syncobj_handles);
1017 free_bo_handles_write:
1018 	kfree(bo_handles_write);
1019 free_bo_handles_read:
1020 	kfree(bo_handles_read);
1021 
1022 	return r;
1023 }
1024