xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c (revision d9bc24d263fdb420f61a8a8b8bbb1a68f5a0f803)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28 
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34 
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37 
38 #define AMDGPU_USERQ_MAX_HANDLES	(1U << 16)
39 
40 int amdgpu_userq_fence_slab_init(void)
41 {
42 	amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
43 						    sizeof(struct amdgpu_userq_fence),
44 						    0,
45 						    SLAB_HWCACHE_ALIGN,
46 						    NULL);
47 	if (!amdgpu_userq_fence_slab)
48 		return -ENOMEM;
49 
50 	return 0;
51 }
52 
53 void amdgpu_userq_fence_slab_fini(void)
54 {
55 	rcu_barrier();
56 	kmem_cache_destroy(amdgpu_userq_fence_slab);
57 }
58 
59 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
60 {
61 	if (!f || f->ops != &amdgpu_userq_fence_ops)
62 		return NULL;
63 
64 	return container_of(f, struct amdgpu_userq_fence, base);
65 }
66 
67 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
68 {
69 	return le64_to_cpu(*fence_drv->cpu_addr);
70 }
71 
72 static void
73 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
74 			 u64 seq)
75 {
76 	if (fence_drv->cpu_addr)
77 		*fence_drv->cpu_addr = cpu_to_le64(seq);
78 }
79 
80 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
81 				    struct amdgpu_usermode_queue *userq)
82 {
83 	struct amdgpu_userq_fence_driver *fence_drv;
84 	unsigned long flags;
85 	int r;
86 
87 	fence_drv = kzalloc_obj(*fence_drv);
88 	if (!fence_drv)
89 		return -ENOMEM;
90 
91 	/* Acquire seq64 memory */
92 	r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
93 			       &fence_drv->cpu_addr);
94 	if (r)
95 		goto free_fence_drv;
96 
97 	memset(fence_drv->cpu_addr, 0, sizeof(u64));
98 
99 	kref_init(&fence_drv->refcount);
100 	INIT_LIST_HEAD(&fence_drv->fences);
101 	spin_lock_init(&fence_drv->fence_list_lock);
102 
103 	fence_drv->adev = adev;
104 	fence_drv->context = dma_fence_context_alloc(1);
105 	get_task_comm(fence_drv->timeline_name, current);
106 
107 	xa_lock_irqsave(&adev->userq_xa, flags);
108 	r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
109 			      fence_drv, GFP_KERNEL));
110 	xa_unlock_irqrestore(&adev->userq_xa, flags);
111 	if (r)
112 		goto free_seq64;
113 
114 	userq->fence_drv = fence_drv;
115 
116 	return 0;
117 
118 free_seq64:
119 	amdgpu_seq64_free(adev, fence_drv->va);
120 free_fence_drv:
121 	kfree(fence_drv);
122 
123 	return r;
124 }
125 
126 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
127 {
128 	struct amdgpu_userq_fence_driver *fence_drv;
129 	unsigned long index;
130 
131 	if (xa_empty(xa))
132 		return;
133 
134 	xa_lock(xa);
135 	xa_for_each(xa, index, fence_drv) {
136 		__xa_erase(xa, index);
137 		amdgpu_userq_fence_driver_put(fence_drv);
138 	}
139 
140 	xa_unlock(xa);
141 }
142 
143 void
144 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
145 {
146 	dma_fence_put(userq->last_fence);
147 
148 	amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
149 	xa_destroy(&userq->fence_drv_xa);
150 	/* Drop the fence_drv reference held by user queue */
151 	amdgpu_userq_fence_driver_put(userq->fence_drv);
152 }
153 
154 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
155 {
156 	struct amdgpu_userq_fence *userq_fence, *tmp;
157 	struct dma_fence *fence;
158 	unsigned long flags;
159 	u64 rptr;
160 	int i;
161 
162 	if (!fence_drv)
163 		return;
164 
165 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
166 	rptr = amdgpu_userq_fence_read(fence_drv);
167 
168 	list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
169 		fence = &userq_fence->base;
170 
171 		if (rptr < fence->seqno)
172 			break;
173 
174 		dma_fence_signal(fence);
175 
176 		for (i = 0; i < userq_fence->fence_drv_array_count; i++)
177 			amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
178 
179 		list_del(&userq_fence->link);
180 		dma_fence_put(fence);
181 	}
182 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
183 }
184 
185 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
186 {
187 	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
188 					 struct amdgpu_userq_fence_driver,
189 					 refcount);
190 	struct amdgpu_userq_fence_driver *xa_fence_drv;
191 	struct amdgpu_device *adev = fence_drv->adev;
192 	struct amdgpu_userq_fence *fence, *tmp;
193 	struct xarray *xa = &adev->userq_xa;
194 	unsigned long index, flags;
195 	struct dma_fence *f;
196 
197 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
198 	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
199 		f = &fence->base;
200 
201 		if (!dma_fence_is_signaled(f)) {
202 			dma_fence_set_error(f, -ECANCELED);
203 			dma_fence_signal(f);
204 		}
205 
206 		list_del(&fence->link);
207 		dma_fence_put(f);
208 	}
209 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
210 
211 	xa_lock_irqsave(xa, flags);
212 	xa_for_each(xa, index, xa_fence_drv)
213 		if (xa_fence_drv == fence_drv)
214 			__xa_erase(xa, index);
215 	xa_unlock_irqrestore(xa, flags);
216 
217 	/* Free seq64 memory */
218 	amdgpu_seq64_free(adev, fence_drv->va);
219 	kfree(fence_drv);
220 }
221 
222 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
223 {
224 	kref_get(&fence_drv->refcount);
225 }
226 
227 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
228 {
229 	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
230 }
231 
232 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
233 {
234 	*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
235 	return *userq_fence ? 0 : -ENOMEM;
236 }
237 
238 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
239 				     struct amdgpu_userq_fence *userq_fence,
240 				     u64 seq, struct dma_fence **f)
241 {
242 	struct amdgpu_userq_fence_driver *fence_drv;
243 	struct dma_fence *fence;
244 	unsigned long flags;
245 
246 	fence_drv = userq->fence_drv;
247 	if (!fence_drv)
248 		return -EINVAL;
249 
250 	spin_lock_init(&userq_fence->lock);
251 	INIT_LIST_HEAD(&userq_fence->link);
252 	fence = &userq_fence->base;
253 	userq_fence->fence_drv = fence_drv;
254 
255 	dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
256 			 fence_drv->context, seq);
257 
258 	amdgpu_userq_fence_driver_get(fence_drv);
259 	dma_fence_get(fence);
260 
261 	if (!xa_empty(&userq->fence_drv_xa)) {
262 		struct amdgpu_userq_fence_driver *stored_fence_drv;
263 		unsigned long index, count = 0;
264 		int i = 0;
265 
266 		xa_lock(&userq->fence_drv_xa);
267 		xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
268 			count++;
269 
270 		userq_fence->fence_drv_array =
271 			kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
272 				      GFP_ATOMIC);
273 
274 		if (userq_fence->fence_drv_array) {
275 			xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
276 				userq_fence->fence_drv_array[i] = stored_fence_drv;
277 				__xa_erase(&userq->fence_drv_xa, index);
278 				i++;
279 			}
280 		}
281 
282 		userq_fence->fence_drv_array_count = i;
283 		xa_unlock(&userq->fence_drv_xa);
284 	} else {
285 		userq_fence->fence_drv_array = NULL;
286 		userq_fence->fence_drv_array_count = 0;
287 	}
288 
289 	/* Check if hardware has already processed the job */
290 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
291 	if (!dma_fence_is_signaled(fence))
292 		list_add_tail(&userq_fence->link, &fence_drv->fences);
293 	else
294 		dma_fence_put(fence);
295 
296 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
297 
298 	*f = fence;
299 
300 	return 0;
301 }
302 
303 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
304 {
305 	return "amdgpu_userq_fence";
306 }
307 
308 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
309 {
310 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
311 
312 	return fence->fence_drv->timeline_name;
313 }
314 
315 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
316 {
317 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
318 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
319 	u64 rptr, wptr;
320 
321 	rptr = amdgpu_userq_fence_read(fence_drv);
322 	wptr = fence->base.seqno;
323 
324 	if (rptr >= wptr)
325 		return true;
326 
327 	return false;
328 }
329 
330 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
331 {
332 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
333 	struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
334 	struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
335 
336 	/* Release the fence driver reference */
337 	amdgpu_userq_fence_driver_put(fence_drv);
338 
339 	kvfree(userq_fence->fence_drv_array);
340 	kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
341 }
342 
343 static void amdgpu_userq_fence_release(struct dma_fence *f)
344 {
345 	call_rcu(&f->rcu, amdgpu_userq_fence_free);
346 }
347 
348 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
349 	.get_driver_name = amdgpu_userq_fence_get_driver_name,
350 	.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
351 	.signaled = amdgpu_userq_fence_signaled,
352 	.release = amdgpu_userq_fence_release,
353 };
354 
355 /**
356  * amdgpu_userq_fence_read_wptr - Read the userq wptr value
357  *
358  * @adev: amdgpu_device pointer
359  * @queue: user mode queue structure pointer
360  * @wptr: write pointer value
361  *
362  * Read the wptr value from userq's MQD. The userq signal IOCTL
363  * creates a dma_fence for the shared buffers that expects the
364  * RPTR value written to seq64 memory >= WPTR.
365  *
366  * Returns wptr value on success, error on failure.
367  */
368 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
369 					struct amdgpu_usermode_queue *queue,
370 					u64 *wptr)
371 {
372 	struct amdgpu_bo_va_mapping *mapping;
373 	struct amdgpu_bo *bo;
374 	u64 addr, *ptr;
375 	int r;
376 
377 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
378 	if (r)
379 		return r;
380 
381 	addr = queue->userq_prop->wptr_gpu_addr;
382 	addr &= AMDGPU_GMC_HOLE_MASK;
383 
384 	mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
385 	if (!mapping) {
386 		amdgpu_bo_unreserve(queue->vm->root.bo);
387 		DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
388 		return -EINVAL;
389 	}
390 
391 	bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
392 	amdgpu_bo_unreserve(queue->vm->root.bo);
393 	r = amdgpu_bo_reserve(bo, true);
394 	if (r) {
395 		amdgpu_bo_unref(&bo);
396 		DRM_ERROR("Failed to reserve userqueue wptr bo");
397 		return r;
398 	}
399 
400 	r = amdgpu_bo_kmap(bo, (void **)&ptr);
401 	if (r) {
402 		DRM_ERROR("Failed mapping the userqueue wptr bo");
403 		goto map_error;
404 	}
405 
406 	*wptr = le64_to_cpu(*ptr);
407 
408 	amdgpu_bo_kunmap(bo);
409 	amdgpu_bo_unreserve(bo);
410 	amdgpu_bo_unref(&bo);
411 
412 	return 0;
413 
414 map_error:
415 	amdgpu_bo_unreserve(bo);
416 	amdgpu_bo_unref(&bo);
417 
418 	return r;
419 }
420 
421 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
422 {
423 	dma_fence_put(fence);
424 }
425 
426 static void
427 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
428 				    int error)
429 {
430 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
431 	unsigned long flags;
432 	struct dma_fence *f;
433 
434 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
435 
436 	f = rcu_dereference_protected(&fence->base,
437 				      lockdep_is_held(&fence_drv->fence_list_lock));
438 	if (f && !dma_fence_is_signaled_locked(f))
439 		dma_fence_set_error(f, error);
440 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
441 }
442 
443 void
444 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
445 {
446 	struct dma_fence *f = userq->last_fence;
447 
448 	if (f) {
449 		struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
450 		struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
451 		u64 wptr = fence->base.seqno;
452 
453 		amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
454 		amdgpu_userq_fence_write(fence_drv, wptr);
455 		amdgpu_userq_fence_driver_process(fence_drv);
456 
457 	}
458 }
459 
460 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
461 			      struct drm_file *filp)
462 {
463 	struct amdgpu_device *adev = drm_to_adev(dev);
464 	struct drm_amdgpu_userq_signal *args = data;
465 	const unsigned int num_write_bo_handles = args->num_bo_write_handles;
466 	const unsigned int num_read_bo_handles = args->num_bo_read_handles;
467 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
468 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
469 	struct drm_gem_object **gobj_write, **gobj_read;
470 	u32 *syncobj_handles, num_syncobj_handles;
471 	struct amdgpu_userq_fence *userq_fence;
472 	struct amdgpu_usermode_queue *queue;
473 	struct drm_syncobj **syncobj = NULL;
474 	struct dma_fence *fence;
475 	struct drm_exec exec;
476 	int r, i, entry;
477 	u64 wptr;
478 
479 	if (!amdgpu_userq_enabled(dev))
480 		return -ENOTSUPP;
481 
482 	if (args->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
483 	    args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
484 	    args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
485 		return -EINVAL;
486 
487 	num_syncobj_handles = args->num_syncobj_handles;
488 	syncobj_handles = memdup_array_user(u64_to_user_ptr(args->syncobj_handles),
489 					    num_syncobj_handles, sizeof(u32));
490 	if (IS_ERR(syncobj_handles))
491 		return PTR_ERR(syncobj_handles);
492 
493 	/* Array of pointers to the looked up syncobjs */
494 	syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
495 	if (!syncobj) {
496 		r = -ENOMEM;
497 		goto free_syncobj_handles;
498 	}
499 
500 	for (entry = 0; entry < num_syncobj_handles; entry++) {
501 		syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
502 		if (!syncobj[entry]) {
503 			r = -ENOENT;
504 			goto free_syncobj;
505 		}
506 	}
507 
508 	r = drm_gem_objects_lookup(filp,
509 				   u64_to_user_ptr(args->bo_read_handles),
510 				   num_read_bo_handles,
511 				   &gobj_read);
512 	if (r)
513 		goto free_syncobj;
514 
515 	r = drm_gem_objects_lookup(filp,
516 				   u64_to_user_ptr(args->bo_write_handles),
517 				   num_write_bo_handles,
518 				   &gobj_write);
519 	if (r)
520 		goto put_gobj_read;
521 
522 	/* Retrieve the user queue */
523 	queue = xa_load(&userq_mgr->userq_xa, args->queue_id);
524 	if (!queue) {
525 		r = -ENOENT;
526 		goto put_gobj_write;
527 	}
528 
529 	r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
530 	if (r)
531 		goto put_gobj_write;
532 
533 	r = amdgpu_userq_fence_alloc(&userq_fence);
534 	if (r)
535 		goto put_gobj_write;
536 
537 	/* We are here means UQ is active, make sure the eviction fence is valid */
538 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
539 
540 	/* Create a new fence */
541 	r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
542 	if (r) {
543 		mutex_unlock(&userq_mgr->userq_mutex);
544 		kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
545 		goto put_gobj_write;
546 	}
547 
548 	dma_fence_put(queue->last_fence);
549 	queue->last_fence = dma_fence_get(fence);
550 	amdgpu_userq_start_hang_detect_work(queue);
551 	mutex_unlock(&userq_mgr->userq_mutex);
552 
553 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
554 		      (num_read_bo_handles + num_write_bo_handles));
555 
556 	/* Lock all BOs with retry handling */
557 	drm_exec_until_all_locked(&exec) {
558 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
559 		drm_exec_retry_on_contention(&exec);
560 		if (r) {
561 			amdgpu_userq_fence_cleanup(fence);
562 			goto exec_fini;
563 		}
564 
565 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
566 		drm_exec_retry_on_contention(&exec);
567 		if (r) {
568 			amdgpu_userq_fence_cleanup(fence);
569 			goto exec_fini;
570 		}
571 	}
572 
573 	for (i = 0; i < num_read_bo_handles; i++) {
574 		if (!gobj_read || !gobj_read[i]->resv)
575 			continue;
576 
577 		dma_resv_add_fence(gobj_read[i]->resv, fence,
578 				   DMA_RESV_USAGE_READ);
579 	}
580 
581 	for (i = 0; i < num_write_bo_handles; i++) {
582 		if (!gobj_write || !gobj_write[i]->resv)
583 			continue;
584 
585 		dma_resv_add_fence(gobj_write[i]->resv, fence,
586 				   DMA_RESV_USAGE_WRITE);
587 	}
588 
589 	/* Add the created fence to syncobj/BO's */
590 	for (i = 0; i < num_syncobj_handles; i++)
591 		drm_syncobj_replace_fence(syncobj[i], fence);
592 
593 	/* drop the reference acquired in fence creation function */
594 	dma_fence_put(fence);
595 
596 exec_fini:
597 	drm_exec_fini(&exec);
598 put_gobj_write:
599 	for (i = 0; i < num_write_bo_handles; i++)
600 		drm_gem_object_put(gobj_write[i]);
601 	kfree(gobj_write);
602 put_gobj_read:
603 	for (i = 0; i < num_read_bo_handles; i++)
604 		drm_gem_object_put(gobj_read[i]);
605 	kfree(gobj_read);
606 free_syncobj:
607 	while (entry-- > 0)
608 		if (syncobj[entry])
609 			drm_syncobj_put(syncobj[entry]);
610 	kfree(syncobj);
611 free_syncobj_handles:
612 	kfree(syncobj_handles);
613 
614 	return r;
615 }
616 
617 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
618 			    struct drm_file *filp)
619 {
620 	struct drm_amdgpu_userq_wait *wait_info = data;
621 	const unsigned int num_write_bo_handles = wait_info->num_bo_write_handles;
622 	const unsigned int num_read_bo_handles = wait_info->num_bo_read_handles;
623 	struct drm_amdgpu_userq_fence_info *fence_info = NULL;
624 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
625 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
626 	struct drm_gem_object **gobj_write, **gobj_read;
627 	u32 *timeline_points, *timeline_handles;
628 	struct amdgpu_usermode_queue *waitq;
629 	u32 *syncobj_handles, num_syncobj;
630 	struct dma_fence **fences = NULL;
631 	u16 num_points, num_fences = 0;
632 	struct drm_exec exec;
633 	int r, i, cnt;
634 
635 	if (!amdgpu_userq_enabled(dev))
636 		return -ENOTSUPP;
637 
638 	if (wait_info->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
639 	    wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
640 	    wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
641 		return -EINVAL;
642 
643 	num_syncobj = wait_info->num_syncobj_handles;
644 	syncobj_handles = memdup_array_user(u64_to_user_ptr(wait_info->syncobj_handles),
645 					    num_syncobj, sizeof(u32));
646 	if (IS_ERR(syncobj_handles))
647 		return PTR_ERR(syncobj_handles);
648 
649 
650 	num_points = wait_info->num_syncobj_timeline_handles;
651 	timeline_handles = memdup_array_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
652 					     num_points, sizeof(u32));
653 	if (IS_ERR(timeline_handles)) {
654 		r = PTR_ERR(timeline_handles);
655 		goto free_syncobj_handles;
656 	}
657 
658 	timeline_points = memdup_array_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
659 					    num_points, sizeof(u32));
660 
661 	if (IS_ERR(timeline_points)) {
662 		r = PTR_ERR(timeline_points);
663 		goto free_timeline_handles;
664 	}
665 
666 	r = drm_gem_objects_lookup(filp,
667 				   u64_to_user_ptr(wait_info->bo_read_handles),
668 				   num_read_bo_handles,
669 				   &gobj_read);
670 	if (r)
671 		goto free_timeline_points;
672 
673 	r = drm_gem_objects_lookup(filp,
674 				   u64_to_user_ptr(wait_info->bo_write_handles),
675 				   num_write_bo_handles,
676 				   &gobj_write);
677 	if (r)
678 		goto put_gobj_read;
679 
680 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
681 		      (num_read_bo_handles + num_write_bo_handles));
682 
683 	/* Lock all BOs with retry handling */
684 	drm_exec_until_all_locked(&exec) {
685 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
686 		drm_exec_retry_on_contention(&exec);
687 		if (r) {
688 			drm_exec_fini(&exec);
689 			goto put_gobj_write;
690 		}
691 
692 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
693 		drm_exec_retry_on_contention(&exec);
694 		if (r) {
695 			drm_exec_fini(&exec);
696 			goto put_gobj_write;
697 		}
698 	}
699 
700 	if (!wait_info->num_fences) {
701 		if (num_points) {
702 			struct dma_fence_unwrap iter;
703 			struct dma_fence *fence;
704 			struct dma_fence *f;
705 
706 			for (i = 0; i < num_points; i++) {
707 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
708 							   timeline_points[i],
709 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
710 							   &fence);
711 				if (r)
712 					goto exec_fini;
713 
714 				dma_fence_unwrap_for_each(f, &iter, fence)
715 					num_fences++;
716 
717 				dma_fence_put(fence);
718 			}
719 		}
720 
721 		/* Count syncobj's fence */
722 		for (i = 0; i < num_syncobj; i++) {
723 			struct dma_fence *fence;
724 
725 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
726 						   0,
727 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
728 						   &fence);
729 			if (r)
730 				goto exec_fini;
731 
732 			num_fences++;
733 			dma_fence_put(fence);
734 		}
735 
736 		/* Count GEM objects fence */
737 		for (i = 0; i < num_read_bo_handles; i++) {
738 			struct dma_resv_iter resv_cursor;
739 			struct dma_fence *fence;
740 
741 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
742 						DMA_RESV_USAGE_READ, fence)
743 				num_fences++;
744 		}
745 
746 		for (i = 0; i < num_write_bo_handles; i++) {
747 			struct dma_resv_iter resv_cursor;
748 			struct dma_fence *fence;
749 
750 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
751 						DMA_RESV_USAGE_WRITE, fence)
752 				num_fences++;
753 		}
754 
755 		/*
756 		 * Passing num_fences = 0 means that userspace doesn't want to
757 		 * retrieve userq_fence_info. If num_fences = 0 we skip filling
758 		 * userq_fence_info and return the actual number of fences on
759 		 * args->num_fences.
760 		 */
761 		wait_info->num_fences = num_fences;
762 	} else {
763 		/* Array of fence info */
764 		fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
765 		if (!fence_info) {
766 			r = -ENOMEM;
767 			goto exec_fini;
768 		}
769 
770 		/* Array of fences */
771 		fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
772 		if (!fences) {
773 			r = -ENOMEM;
774 			goto free_fence_info;
775 		}
776 
777 		/* Retrieve GEM read objects fence */
778 		for (i = 0; i < num_read_bo_handles; i++) {
779 			struct dma_resv_iter resv_cursor;
780 			struct dma_fence *fence;
781 
782 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
783 						DMA_RESV_USAGE_READ, fence) {
784 				if (num_fences >= wait_info->num_fences) {
785 					r = -EINVAL;
786 					goto free_fences;
787 				}
788 
789 				fences[num_fences++] = fence;
790 				dma_fence_get(fence);
791 			}
792 		}
793 
794 		/* Retrieve GEM write objects fence */
795 		for (i = 0; i < num_write_bo_handles; i++) {
796 			struct dma_resv_iter resv_cursor;
797 			struct dma_fence *fence;
798 
799 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
800 						DMA_RESV_USAGE_WRITE, fence) {
801 				if (num_fences >= wait_info->num_fences) {
802 					r = -EINVAL;
803 					goto free_fences;
804 				}
805 
806 				fences[num_fences++] = fence;
807 				dma_fence_get(fence);
808 			}
809 		}
810 
811 		if (num_points) {
812 			struct dma_fence_unwrap iter;
813 			struct dma_fence *fence;
814 			struct dma_fence *f;
815 
816 			for (i = 0; i < num_points; i++) {
817 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
818 							   timeline_points[i],
819 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
820 							   &fence);
821 				if (r)
822 					goto free_fences;
823 
824 				dma_fence_unwrap_for_each(f, &iter, fence) {
825 					if (num_fences >= wait_info->num_fences) {
826 						r = -EINVAL;
827 						dma_fence_put(fence);
828 						goto free_fences;
829 					}
830 
831 					dma_fence_get(f);
832 					fences[num_fences++] = f;
833 				}
834 
835 				dma_fence_put(fence);
836 			}
837 		}
838 
839 		/* Retrieve syncobj's fence */
840 		for (i = 0; i < num_syncobj; i++) {
841 			struct dma_fence *fence;
842 
843 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
844 						   0,
845 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
846 						   &fence);
847 			if (r)
848 				goto free_fences;
849 
850 			if (num_fences >= wait_info->num_fences) {
851 				r = -EINVAL;
852 				dma_fence_put(fence);
853 				goto free_fences;
854 			}
855 
856 			fences[num_fences++] = fence;
857 		}
858 
859 		/*
860 		 * Keep only the latest fences to reduce the number of values
861 		 * given back to userspace.
862 		 */
863 		num_fences = dma_fence_dedup_array(fences, num_fences);
864 
865 		waitq = xa_load(&userq_mgr->userq_xa, wait_info->waitq_id);
866 		if (!waitq) {
867 			r = -EINVAL;
868 			goto free_fences;
869 		}
870 
871 		for (i = 0, cnt = 0; i < num_fences; i++) {
872 			struct amdgpu_userq_fence_driver *fence_drv;
873 			struct amdgpu_userq_fence *userq_fence;
874 			u32 index;
875 
876 			userq_fence = to_amdgpu_userq_fence(fences[i]);
877 			if (!userq_fence) {
878 				/*
879 				 * Just waiting on other driver fences should
880 				 * be good for now
881 				 */
882 				r = dma_fence_wait(fences[i], true);
883 				if (r) {
884 					dma_fence_put(fences[i]);
885 					goto free_fences;
886 				}
887 
888 				dma_fence_put(fences[i]);
889 				continue;
890 			}
891 
892 			fence_drv = userq_fence->fence_drv;
893 			/*
894 			 * We need to make sure the user queue release their reference
895 			 * to the fence drivers at some point before queue destruction.
896 			 * Otherwise, we would gather those references until we don't
897 			 * have any more space left and crash.
898 			 */
899 			r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
900 				     xa_limit_32b, GFP_KERNEL);
901 			if (r)
902 				goto free_fences;
903 
904 			amdgpu_userq_fence_driver_get(fence_drv);
905 
906 			/* Store drm syncobj's gpu va address and value */
907 			fence_info[cnt].va = fence_drv->va;
908 			fence_info[cnt].value = fences[i]->seqno;
909 
910 			dma_fence_put(fences[i]);
911 			/* Increment the actual userq fence count */
912 			cnt++;
913 		}
914 
915 		wait_info->num_fences = cnt;
916 		/* Copy userq fence info to user space */
917 		if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
918 				 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
919 			r = -EFAULT;
920 			goto free_fences;
921 		}
922 	}
923 
924 free_fences:
925 	if (fences) {
926 		while (num_fences-- > 0)
927 			dma_fence_put(fences[num_fences]);
928 		kfree(fences);
929 	}
930 free_fence_info:
931 	kfree(fence_info);
932 exec_fini:
933 	drm_exec_fini(&exec);
934 put_gobj_write:
935 	for (i = 0; i < num_write_bo_handles; i++)
936 		drm_gem_object_put(gobj_write[i]);
937 	kfree(gobj_write);
938 put_gobj_read:
939 	for (i = 0; i < num_read_bo_handles; i++)
940 		drm_gem_object_put(gobj_read[i]);
941 	kfree(gobj_read);
942 free_timeline_points:
943 	kfree(timeline_points);
944 free_timeline_handles:
945 	kfree(timeline_handles);
946 free_syncobj_handles:
947 	kfree(syncobj_handles);
948 
949 	return r;
950 }
951