xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28 
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34 
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37 
38 #define AMDGPU_USERQ_MAX_HANDLES	(1U << 16)
39 
40 int amdgpu_userq_fence_slab_init(void)
41 {
42 	amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
43 						    sizeof(struct amdgpu_userq_fence),
44 						    0,
45 						    SLAB_HWCACHE_ALIGN,
46 						    NULL);
47 	if (!amdgpu_userq_fence_slab)
48 		return -ENOMEM;
49 
50 	return 0;
51 }
52 
53 void amdgpu_userq_fence_slab_fini(void)
54 {
55 	rcu_barrier();
56 	kmem_cache_destroy(amdgpu_userq_fence_slab);
57 }
58 
59 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
60 {
61 	if (!f || f->ops != &amdgpu_userq_fence_ops)
62 		return NULL;
63 
64 	return container_of(f, struct amdgpu_userq_fence, base);
65 }
66 
67 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
68 {
69 	return le64_to_cpu(*fence_drv->cpu_addr);
70 }
71 
72 static void
73 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
74 			 u64 seq)
75 {
76 	if (fence_drv->cpu_addr)
77 		*fence_drv->cpu_addr = cpu_to_le64(seq);
78 }
79 
80 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
81 				    struct amdgpu_usermode_queue *userq)
82 {
83 	struct amdgpu_userq_fence_driver *fence_drv;
84 	unsigned long flags;
85 	int r;
86 
87 	fence_drv = kzalloc_obj(*fence_drv);
88 	if (!fence_drv)
89 		return -ENOMEM;
90 
91 	/* Acquire seq64 memory */
92 	r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
93 			       &fence_drv->cpu_addr);
94 	if (r)
95 		goto free_fence_drv;
96 
97 	memset(fence_drv->cpu_addr, 0, sizeof(u64));
98 
99 	kref_init(&fence_drv->refcount);
100 	INIT_LIST_HEAD(&fence_drv->fences);
101 	spin_lock_init(&fence_drv->fence_list_lock);
102 
103 	fence_drv->adev = adev;
104 	fence_drv->context = dma_fence_context_alloc(1);
105 	get_task_comm(fence_drv->timeline_name, current);
106 
107 	xa_lock_irqsave(&adev->userq_xa, flags);
108 	r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
109 			      fence_drv, GFP_KERNEL));
110 	xa_unlock_irqrestore(&adev->userq_xa, flags);
111 	if (r)
112 		goto free_seq64;
113 
114 	userq->fence_drv = fence_drv;
115 
116 	return 0;
117 
118 free_seq64:
119 	amdgpu_seq64_free(adev, fence_drv->va);
120 free_fence_drv:
121 	kfree(fence_drv);
122 
123 	return r;
124 }
125 
126 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
127 {
128 	struct amdgpu_userq_fence_driver *fence_drv;
129 	unsigned long index;
130 
131 	if (xa_empty(xa))
132 		return;
133 
134 	xa_lock(xa);
135 	xa_for_each(xa, index, fence_drv) {
136 		__xa_erase(xa, index);
137 		amdgpu_userq_fence_driver_put(fence_drv);
138 	}
139 
140 	xa_unlock(xa);
141 }
142 
143 void
144 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
145 {
146 	dma_fence_put(userq->last_fence);
147 
148 	amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
149 	xa_destroy(&userq->fence_drv_xa);
150 	/* Drop the fence_drv reference held by user queue */
151 	amdgpu_userq_fence_driver_put(userq->fence_drv);
152 }
153 
154 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
155 {
156 	struct amdgpu_userq_fence *userq_fence, *tmp;
157 	struct dma_fence *fence;
158 	unsigned long flags;
159 	u64 rptr;
160 	int i;
161 
162 	if (!fence_drv)
163 		return;
164 
165 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
166 	rptr = amdgpu_userq_fence_read(fence_drv);
167 
168 	list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
169 		fence = &userq_fence->base;
170 
171 		if (rptr < fence->seqno)
172 			break;
173 
174 		dma_fence_signal(fence);
175 
176 		for (i = 0; i < userq_fence->fence_drv_array_count; i++)
177 			amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
178 
179 		list_del(&userq_fence->link);
180 		dma_fence_put(fence);
181 	}
182 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
183 }
184 
185 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
186 {
187 	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
188 					 struct amdgpu_userq_fence_driver,
189 					 refcount);
190 	struct amdgpu_userq_fence_driver *xa_fence_drv;
191 	struct amdgpu_device *adev = fence_drv->adev;
192 	struct amdgpu_userq_fence *fence, *tmp;
193 	struct xarray *xa = &adev->userq_xa;
194 	unsigned long index, flags;
195 	struct dma_fence *f;
196 
197 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
198 	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
199 		f = &fence->base;
200 
201 		if (!dma_fence_is_signaled(f)) {
202 			dma_fence_set_error(f, -ECANCELED);
203 			dma_fence_signal(f);
204 		}
205 
206 		list_del(&fence->link);
207 		dma_fence_put(f);
208 	}
209 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
210 
211 	xa_lock_irqsave(xa, flags);
212 	xa_for_each(xa, index, xa_fence_drv)
213 		if (xa_fence_drv == fence_drv)
214 			__xa_erase(xa, index);
215 	xa_unlock_irqrestore(xa, flags);
216 
217 	/* Free seq64 memory */
218 	amdgpu_seq64_free(adev, fence_drv->va);
219 	kfree(fence_drv);
220 }
221 
222 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
223 {
224 	kref_get(&fence_drv->refcount);
225 }
226 
227 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
228 {
229 	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
230 }
231 
232 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
233 {
234 	*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
235 	return *userq_fence ? 0 : -ENOMEM;
236 }
237 
238 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
239 				     struct amdgpu_userq_fence *userq_fence,
240 				     u64 seq, struct dma_fence **f)
241 {
242 	struct amdgpu_userq_fence_driver *fence_drv;
243 	struct dma_fence *fence;
244 	unsigned long flags;
245 
246 	fence_drv = userq->fence_drv;
247 	if (!fence_drv)
248 		return -EINVAL;
249 
250 	spin_lock_init(&userq_fence->lock);
251 	INIT_LIST_HEAD(&userq_fence->link);
252 	fence = &userq_fence->base;
253 	userq_fence->fence_drv = fence_drv;
254 
255 	dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
256 			 fence_drv->context, seq);
257 
258 	amdgpu_userq_fence_driver_get(fence_drv);
259 	dma_fence_get(fence);
260 
261 	if (!xa_empty(&userq->fence_drv_xa)) {
262 		struct amdgpu_userq_fence_driver *stored_fence_drv;
263 		unsigned long index, count = 0;
264 		int i = 0;
265 
266 		xa_lock(&userq->fence_drv_xa);
267 		xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
268 			count++;
269 
270 		userq_fence->fence_drv_array =
271 			kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
272 				      GFP_ATOMIC);
273 
274 		if (userq_fence->fence_drv_array) {
275 			xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
276 				userq_fence->fence_drv_array[i] = stored_fence_drv;
277 				__xa_erase(&userq->fence_drv_xa, index);
278 				i++;
279 			}
280 		}
281 
282 		userq_fence->fence_drv_array_count = i;
283 		xa_unlock(&userq->fence_drv_xa);
284 	} else {
285 		userq_fence->fence_drv_array = NULL;
286 		userq_fence->fence_drv_array_count = 0;
287 	}
288 
289 	/* Check if hardware has already processed the job */
290 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
291 	if (!dma_fence_is_signaled(fence))
292 		list_add_tail(&userq_fence->link, &fence_drv->fences);
293 	else
294 		dma_fence_put(fence);
295 
296 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
297 
298 	*f = fence;
299 
300 	return 0;
301 }
302 
303 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
304 {
305 	return "amdgpu_userq_fence";
306 }
307 
308 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
309 {
310 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
311 
312 	return fence->fence_drv->timeline_name;
313 }
314 
315 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
316 {
317 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
318 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
319 	u64 rptr, wptr;
320 
321 	rptr = amdgpu_userq_fence_read(fence_drv);
322 	wptr = fence->base.seqno;
323 
324 	if (rptr >= wptr)
325 		return true;
326 
327 	return false;
328 }
329 
330 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
331 {
332 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
333 	struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
334 	struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
335 
336 	/* Release the fence driver reference */
337 	amdgpu_userq_fence_driver_put(fence_drv);
338 
339 	kvfree(userq_fence->fence_drv_array);
340 	kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
341 }
342 
343 static void amdgpu_userq_fence_release(struct dma_fence *f)
344 {
345 	call_rcu(&f->rcu, amdgpu_userq_fence_free);
346 }
347 
348 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
349 	.get_driver_name = amdgpu_userq_fence_get_driver_name,
350 	.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
351 	.signaled = amdgpu_userq_fence_signaled,
352 	.release = amdgpu_userq_fence_release,
353 };
354 
355 /**
356  * amdgpu_userq_fence_read_wptr - Read the userq wptr value
357  *
358  * @adev: amdgpu_device pointer
359  * @queue: user mode queue structure pointer
360  * @wptr: write pointer value
361  *
362  * Read the wptr value from userq's MQD. The userq signal IOCTL
363  * creates a dma_fence for the shared buffers that expects the
364  * RPTR value written to seq64 memory >= WPTR.
365  *
366  * Returns wptr value on success, error on failure.
367  */
368 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
369 					struct amdgpu_usermode_queue *queue,
370 					u64 *wptr)
371 {
372 	struct amdgpu_bo_va_mapping *mapping;
373 	struct amdgpu_bo *bo;
374 	u64 addr, *ptr;
375 	int r;
376 
377 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
378 	if (r)
379 		return r;
380 
381 	addr = queue->userq_prop->wptr_gpu_addr;
382 	addr &= AMDGPU_GMC_HOLE_MASK;
383 
384 	mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
385 	if (!mapping) {
386 		amdgpu_bo_unreserve(queue->vm->root.bo);
387 		DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
388 		return -EINVAL;
389 	}
390 
391 	bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
392 	amdgpu_bo_unreserve(queue->vm->root.bo);
393 	r = amdgpu_bo_reserve(bo, true);
394 	if (r) {
395 		amdgpu_bo_unref(&bo);
396 		DRM_ERROR("Failed to reserve userqueue wptr bo");
397 		return r;
398 	}
399 
400 	r = amdgpu_bo_kmap(bo, (void **)&ptr);
401 	if (r) {
402 		DRM_ERROR("Failed mapping the userqueue wptr bo");
403 		goto map_error;
404 	}
405 
406 	*wptr = le64_to_cpu(*ptr);
407 
408 	amdgpu_bo_kunmap(bo);
409 	amdgpu_bo_unreserve(bo);
410 	amdgpu_bo_unref(&bo);
411 
412 	return 0;
413 
414 map_error:
415 	amdgpu_bo_unreserve(bo);
416 	amdgpu_bo_unref(&bo);
417 
418 	return r;
419 }
420 
421 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
422 {
423 	dma_fence_put(fence);
424 }
425 
426 static void
427 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
428 				    int error)
429 {
430 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
431 	unsigned long flags;
432 	struct dma_fence *f;
433 
434 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
435 
436 	f = rcu_dereference_protected(&fence->base,
437 				      lockdep_is_held(&fence_drv->fence_list_lock));
438 	if (f && !dma_fence_is_signaled_locked(f))
439 		dma_fence_set_error(f, error);
440 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
441 }
442 
443 void
444 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
445 {
446 	struct dma_fence *f = userq->last_fence;
447 
448 	if (f) {
449 		struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
450 		struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
451 		u64 wptr = fence->base.seqno;
452 
453 		amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
454 		amdgpu_userq_fence_write(fence_drv, wptr);
455 		amdgpu_userq_fence_driver_process(fence_drv);
456 
457 	}
458 }
459 
460 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
461 			      struct drm_file *filp)
462 {
463 	struct amdgpu_device *adev = drm_to_adev(dev);
464 	struct drm_amdgpu_userq_signal *args = data;
465 	const unsigned int num_write_bo_handles = args->num_bo_write_handles;
466 	const unsigned int num_read_bo_handles = args->num_bo_read_handles;
467 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
468 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
469 	struct drm_gem_object **gobj_write, **gobj_read;
470 	u32 *syncobj_handles, num_syncobj_handles;
471 	struct amdgpu_userq_fence *userq_fence;
472 	struct amdgpu_usermode_queue *queue = NULL;
473 	struct drm_syncobj **syncobj = NULL;
474 	struct dma_fence *fence;
475 	struct drm_exec exec;
476 	int r, i, entry;
477 	u64 wptr;
478 
479 	if (!amdgpu_userq_enabled(dev))
480 		return -ENOTSUPP;
481 
482 	if (args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
483 	    args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
484 		return -EINVAL;
485 
486 	num_syncobj_handles = args->num_syncobj_handles;
487 	syncobj_handles = memdup_array_user(u64_to_user_ptr(args->syncobj_handles),
488 					    num_syncobj_handles, sizeof(u32));
489 	if (IS_ERR(syncobj_handles))
490 		return PTR_ERR(syncobj_handles);
491 
492 	/* Array of pointers to the looked up syncobjs */
493 	syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
494 	if (!syncobj) {
495 		r = -ENOMEM;
496 		goto free_syncobj_handles;
497 	}
498 
499 	for (entry = 0; entry < num_syncobj_handles; entry++) {
500 		syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
501 		if (!syncobj[entry]) {
502 			r = -ENOENT;
503 			goto free_syncobj;
504 		}
505 	}
506 
507 	r = drm_gem_objects_lookup(filp,
508 				   u64_to_user_ptr(args->bo_read_handles),
509 				   num_read_bo_handles,
510 				   &gobj_read);
511 	if (r)
512 		goto free_syncobj;
513 
514 	r = drm_gem_objects_lookup(filp,
515 				   u64_to_user_ptr(args->bo_write_handles),
516 				   num_write_bo_handles,
517 				   &gobj_write);
518 	if (r)
519 		goto put_gobj_read;
520 
521 	/* Retrieve the user queue */
522 	queue = amdgpu_userq_get(userq_mgr, args->queue_id);
523 	if (!queue) {
524 		r = -ENOENT;
525 		goto put_gobj_write;
526 	}
527 
528 	r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
529 	if (r)
530 		goto put_gobj_write;
531 
532 	r = amdgpu_userq_fence_alloc(&userq_fence);
533 	if (r)
534 		goto put_gobj_write;
535 
536 	/* We are here means UQ is active, make sure the eviction fence is valid */
537 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
538 
539 	/* Create a new fence */
540 	r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
541 	if (r) {
542 		mutex_unlock(&userq_mgr->userq_mutex);
543 		kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
544 		goto put_gobj_write;
545 	}
546 
547 	dma_fence_put(queue->last_fence);
548 	queue->last_fence = dma_fence_get(fence);
549 	amdgpu_userq_start_hang_detect_work(queue);
550 	mutex_unlock(&userq_mgr->userq_mutex);
551 
552 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
553 		      (num_read_bo_handles + num_write_bo_handles));
554 
555 	/* Lock all BOs with retry handling */
556 	drm_exec_until_all_locked(&exec) {
557 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
558 		drm_exec_retry_on_contention(&exec);
559 		if (r) {
560 			amdgpu_userq_fence_cleanup(fence);
561 			goto exec_fini;
562 		}
563 
564 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
565 		drm_exec_retry_on_contention(&exec);
566 		if (r) {
567 			amdgpu_userq_fence_cleanup(fence);
568 			goto exec_fini;
569 		}
570 	}
571 
572 	for (i = 0; i < num_read_bo_handles; i++) {
573 		if (!gobj_read || !gobj_read[i]->resv)
574 			continue;
575 
576 		dma_resv_add_fence(gobj_read[i]->resv, fence,
577 				   DMA_RESV_USAGE_READ);
578 	}
579 
580 	for (i = 0; i < num_write_bo_handles; i++) {
581 		if (!gobj_write || !gobj_write[i]->resv)
582 			continue;
583 
584 		dma_resv_add_fence(gobj_write[i]->resv, fence,
585 				   DMA_RESV_USAGE_WRITE);
586 	}
587 
588 	/* Add the created fence to syncobj/BO's */
589 	for (i = 0; i < num_syncobj_handles; i++)
590 		drm_syncobj_replace_fence(syncobj[i], fence);
591 
592 	/* drop the reference acquired in fence creation function */
593 	dma_fence_put(fence);
594 
595 exec_fini:
596 	drm_exec_fini(&exec);
597 put_gobj_write:
598 	for (i = 0; i < num_write_bo_handles; i++)
599 		drm_gem_object_put(gobj_write[i]);
600 	kfree(gobj_write);
601 put_gobj_read:
602 	for (i = 0; i < num_read_bo_handles; i++)
603 		drm_gem_object_put(gobj_read[i]);
604 	kfree(gobj_read);
605 free_syncobj:
606 	while (entry-- > 0)
607 		if (syncobj[entry])
608 			drm_syncobj_put(syncobj[entry]);
609 	kfree(syncobj);
610 free_syncobj_handles:
611 	kfree(syncobj_handles);
612 
613 	if (queue)
614 		amdgpu_userq_put(queue);
615 
616 	return r;
617 }
618 
619 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
620 			    struct drm_file *filp)
621 {
622 	struct drm_amdgpu_userq_wait *wait_info = data;
623 	const unsigned int num_write_bo_handles = wait_info->num_bo_write_handles;
624 	const unsigned int num_read_bo_handles = wait_info->num_bo_read_handles;
625 	struct drm_amdgpu_userq_fence_info *fence_info = NULL;
626 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
627 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
628 	struct drm_gem_object **gobj_write, **gobj_read;
629 	u32 *timeline_points, *timeline_handles;
630 	struct amdgpu_usermode_queue *waitq = NULL;
631 	u32 *syncobj_handles, num_syncobj;
632 	struct dma_fence **fences = NULL;
633 	u16 num_points, num_fences = 0;
634 	struct drm_exec exec;
635 	int r, i, cnt;
636 
637 	if (!amdgpu_userq_enabled(dev))
638 		return -ENOTSUPP;
639 
640 	if (wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
641 	    wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
642 		return -EINVAL;
643 
644 	num_syncobj = wait_info->num_syncobj_handles;
645 	syncobj_handles = memdup_array_user(u64_to_user_ptr(wait_info->syncobj_handles),
646 					    num_syncobj, sizeof(u32));
647 	if (IS_ERR(syncobj_handles))
648 		return PTR_ERR(syncobj_handles);
649 
650 
651 	num_points = wait_info->num_syncobj_timeline_handles;
652 	timeline_handles = memdup_array_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
653 					     num_points, sizeof(u32));
654 	if (IS_ERR(timeline_handles)) {
655 		r = PTR_ERR(timeline_handles);
656 		goto free_syncobj_handles;
657 	}
658 
659 	timeline_points = memdup_array_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
660 					    num_points, sizeof(u32));
661 
662 	if (IS_ERR(timeline_points)) {
663 		r = PTR_ERR(timeline_points);
664 		goto free_timeline_handles;
665 	}
666 
667 	r = drm_gem_objects_lookup(filp,
668 				   u64_to_user_ptr(wait_info->bo_read_handles),
669 				   num_read_bo_handles,
670 				   &gobj_read);
671 	if (r)
672 		goto free_timeline_points;
673 
674 	r = drm_gem_objects_lookup(filp,
675 				   u64_to_user_ptr(wait_info->bo_write_handles),
676 				   num_write_bo_handles,
677 				   &gobj_write);
678 	if (r)
679 		goto put_gobj_read;
680 
681 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
682 		      (num_read_bo_handles + num_write_bo_handles));
683 
684 	/* Lock all BOs with retry handling */
685 	drm_exec_until_all_locked(&exec) {
686 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
687 		drm_exec_retry_on_contention(&exec);
688 		if (r) {
689 			drm_exec_fini(&exec);
690 			goto put_gobj_write;
691 		}
692 
693 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
694 		drm_exec_retry_on_contention(&exec);
695 		if (r) {
696 			drm_exec_fini(&exec);
697 			goto put_gobj_write;
698 		}
699 	}
700 
701 	if (!wait_info->num_fences) {
702 		if (num_points) {
703 			struct dma_fence_unwrap iter;
704 			struct dma_fence *fence;
705 			struct dma_fence *f;
706 
707 			for (i = 0; i < num_points; i++) {
708 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
709 							   timeline_points[i],
710 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
711 							   &fence);
712 				if (r)
713 					goto exec_fini;
714 
715 				dma_fence_unwrap_for_each(f, &iter, fence)
716 					num_fences++;
717 
718 				dma_fence_put(fence);
719 			}
720 		}
721 
722 		/* Count syncobj's fence */
723 		for (i = 0; i < num_syncobj; i++) {
724 			struct dma_fence *fence;
725 
726 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
727 						   0,
728 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
729 						   &fence);
730 			if (r)
731 				goto exec_fini;
732 
733 			num_fences++;
734 			dma_fence_put(fence);
735 		}
736 
737 		/* Count GEM objects fence */
738 		for (i = 0; i < num_read_bo_handles; i++) {
739 			struct dma_resv_iter resv_cursor;
740 			struct dma_fence *fence;
741 
742 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
743 						DMA_RESV_USAGE_READ, fence)
744 				num_fences++;
745 		}
746 
747 		for (i = 0; i < num_write_bo_handles; i++) {
748 			struct dma_resv_iter resv_cursor;
749 			struct dma_fence *fence;
750 
751 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
752 						DMA_RESV_USAGE_WRITE, fence)
753 				num_fences++;
754 		}
755 
756 		/*
757 		 * Passing num_fences = 0 means that userspace doesn't want to
758 		 * retrieve userq_fence_info. If num_fences = 0 we skip filling
759 		 * userq_fence_info and return the actual number of fences on
760 		 * args->num_fences.
761 		 */
762 		wait_info->num_fences = num_fences;
763 	} else {
764 		/* Array of fence info */
765 		fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
766 		if (!fence_info) {
767 			r = -ENOMEM;
768 			goto exec_fini;
769 		}
770 
771 		/* Array of fences */
772 		fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
773 		if (!fences) {
774 			r = -ENOMEM;
775 			goto free_fence_info;
776 		}
777 
778 		/* Retrieve GEM read objects fence */
779 		for (i = 0; i < num_read_bo_handles; i++) {
780 			struct dma_resv_iter resv_cursor;
781 			struct dma_fence *fence;
782 
783 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
784 						DMA_RESV_USAGE_READ, fence) {
785 				if (num_fences >= wait_info->num_fences) {
786 					r = -EINVAL;
787 					goto free_fences;
788 				}
789 
790 				fences[num_fences++] = fence;
791 				dma_fence_get(fence);
792 			}
793 		}
794 
795 		/* Retrieve GEM write objects fence */
796 		for (i = 0; i < num_write_bo_handles; i++) {
797 			struct dma_resv_iter resv_cursor;
798 			struct dma_fence *fence;
799 
800 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
801 						DMA_RESV_USAGE_WRITE, fence) {
802 				if (num_fences >= wait_info->num_fences) {
803 					r = -EINVAL;
804 					goto free_fences;
805 				}
806 
807 				fences[num_fences++] = fence;
808 				dma_fence_get(fence);
809 			}
810 		}
811 
812 		if (num_points) {
813 			struct dma_fence_unwrap iter;
814 			struct dma_fence *fence;
815 			struct dma_fence *f;
816 
817 			for (i = 0; i < num_points; i++) {
818 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
819 							   timeline_points[i],
820 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
821 							   &fence);
822 				if (r)
823 					goto free_fences;
824 
825 				dma_fence_unwrap_for_each(f, &iter, fence) {
826 					if (num_fences >= wait_info->num_fences) {
827 						r = -EINVAL;
828 						dma_fence_put(fence);
829 						goto free_fences;
830 					}
831 
832 					dma_fence_get(f);
833 					fences[num_fences++] = f;
834 				}
835 
836 				dma_fence_put(fence);
837 			}
838 		}
839 
840 		/* Retrieve syncobj's fence */
841 		for (i = 0; i < num_syncobj; i++) {
842 			struct dma_fence *fence;
843 
844 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
845 						   0,
846 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
847 						   &fence);
848 			if (r)
849 				goto free_fences;
850 
851 			if (num_fences >= wait_info->num_fences) {
852 				r = -EINVAL;
853 				dma_fence_put(fence);
854 				goto free_fences;
855 			}
856 
857 			fences[num_fences++] = fence;
858 		}
859 
860 		/*
861 		 * Keep only the latest fences to reduce the number of values
862 		 * given back to userspace.
863 		 */
864 		num_fences = dma_fence_dedup_array(fences, num_fences);
865 
866 		waitq = amdgpu_userq_get(userq_mgr, wait_info->waitq_id);
867 		if (!waitq) {
868 			r = -EINVAL;
869 			goto free_fences;
870 		}
871 
872 		for (i = 0, cnt = 0; i < num_fences; i++) {
873 			struct amdgpu_userq_fence_driver *fence_drv;
874 			struct amdgpu_userq_fence *userq_fence;
875 			u32 index;
876 
877 			userq_fence = to_amdgpu_userq_fence(fences[i]);
878 			if (!userq_fence) {
879 				/*
880 				 * Just waiting on other driver fences should
881 				 * be good for now
882 				 */
883 				r = dma_fence_wait(fences[i], true);
884 				if (r) {
885 					dma_fence_put(fences[i]);
886 					goto free_fences;
887 				}
888 
889 				dma_fence_put(fences[i]);
890 				continue;
891 			}
892 
893 			fence_drv = userq_fence->fence_drv;
894 			/*
895 			 * We need to make sure the user queue release their reference
896 			 * to the fence drivers at some point before queue destruction.
897 			 * Otherwise, we would gather those references until we don't
898 			 * have any more space left and crash.
899 			 */
900 			r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
901 				     xa_limit_32b, GFP_KERNEL);
902 			if (r)
903 				goto free_fences;
904 
905 			amdgpu_userq_fence_driver_get(fence_drv);
906 
907 			/* Store drm syncobj's gpu va address and value */
908 			fence_info[cnt].va = fence_drv->va;
909 			fence_info[cnt].value = fences[i]->seqno;
910 
911 			dma_fence_put(fences[i]);
912 			/* Increment the actual userq fence count */
913 			cnt++;
914 		}
915 
916 		wait_info->num_fences = cnt;
917 		/* Copy userq fence info to user space */
918 		if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
919 				 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
920 			r = -EFAULT;
921 			goto free_fences;
922 		}
923 	}
924 
925 free_fences:
926 	if (fences) {
927 		while (num_fences-- > 0)
928 			dma_fence_put(fences[num_fences]);
929 		kfree(fences);
930 	}
931 free_fence_info:
932 	kfree(fence_info);
933 exec_fini:
934 	drm_exec_fini(&exec);
935 put_gobj_write:
936 	for (i = 0; i < num_write_bo_handles; i++)
937 		drm_gem_object_put(gobj_write[i]);
938 	kfree(gobj_write);
939 put_gobj_read:
940 	for (i = 0; i < num_read_bo_handles; i++)
941 		drm_gem_object_put(gobj_read[i]);
942 	kfree(gobj_read);
943 free_timeline_points:
944 	kfree(timeline_points);
945 free_timeline_handles:
946 	kfree(timeline_handles);
947 free_syncobj_handles:
948 	kfree(syncobj_handles);
949 
950 	if (waitq)
951 		amdgpu_userq_put(waitq);
952 
953 	return r;
954 }
955