xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c (revision bd1886d6e4ca6b84041d17ba6e11d0f85f7ee1a4)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28 
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34 
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37 
38 #define AMDGPU_USERQ_MAX_HANDLES	(1U << 16)
39 
40 int amdgpu_userq_fence_slab_init(void)
41 {
42 	amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
43 						    sizeof(struct amdgpu_userq_fence),
44 						    0,
45 						    SLAB_HWCACHE_ALIGN,
46 						    NULL);
47 	if (!amdgpu_userq_fence_slab)
48 		return -ENOMEM;
49 
50 	return 0;
51 }
52 
53 void amdgpu_userq_fence_slab_fini(void)
54 {
55 	rcu_barrier();
56 	kmem_cache_destroy(amdgpu_userq_fence_slab);
57 }
58 
59 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
60 {
61 	if (!f || f->ops != &amdgpu_userq_fence_ops)
62 		return NULL;
63 
64 	return container_of(f, struct amdgpu_userq_fence, base);
65 }
66 
67 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
68 {
69 	return le64_to_cpu(*fence_drv->cpu_addr);
70 }
71 
72 static void
73 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
74 			 u64 seq)
75 {
76 	if (fence_drv->cpu_addr)
77 		*fence_drv->cpu_addr = cpu_to_le64(seq);
78 }
79 
80 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
81 				    struct amdgpu_userq_fence_driver **fence_drv_req)
82 {
83 	struct amdgpu_userq_fence_driver *fence_drv;
84 	int r;
85 
86 	if (!fence_drv_req)
87 		return -EINVAL;
88 	*fence_drv_req = NULL;
89 
90 	fence_drv = kzalloc_obj(*fence_drv);
91 	if (!fence_drv)
92 		return -ENOMEM;
93 
94 	/* Acquire seq64 memory */
95 	r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
96 			       &fence_drv->cpu_addr);
97 	if (r)
98 		goto free_fence_drv;
99 
100 	memset(fence_drv->cpu_addr, 0, sizeof(u64));
101 
102 	kref_init(&fence_drv->refcount);
103 	INIT_LIST_HEAD(&fence_drv->fences);
104 	spin_lock_init(&fence_drv->fence_list_lock);
105 
106 	fence_drv->adev = adev;
107 	fence_drv->context = dma_fence_context_alloc(1);
108 	get_task_comm(fence_drv->timeline_name, current);
109 
110 	*fence_drv_req = fence_drv;
111 
112 	return 0;
113 
114 free_fence_drv:
115 	kfree(fence_drv);
116 
117 	return r;
118 }
119 
120 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
121 {
122 	struct amdgpu_userq_fence_driver *fence_drv;
123 	unsigned long index;
124 
125 	if (xa_empty(xa))
126 		return;
127 
128 	xa_lock(xa);
129 	xa_for_each(xa, index, fence_drv) {
130 		__xa_erase(xa, index);
131 		amdgpu_userq_fence_driver_put(fence_drv);
132 	}
133 
134 	xa_unlock(xa);
135 }
136 
137 void
138 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
139 {
140 	dma_fence_put(userq->last_fence);
141 	userq->last_fence = NULL;
142 	amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
143 	xa_destroy(&userq->fence_drv_xa);
144 	/* Drop the queue's ownership reference to fence_drv explicitly */
145 	amdgpu_userq_fence_driver_put(userq->fence_drv);
146 }
147 
148 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
149 {
150 	struct amdgpu_userq_fence *userq_fence, *tmp;
151 	struct dma_fence *fence;
152 	unsigned long flags;
153 	u64 rptr;
154 	int i;
155 
156 	if (!fence_drv)
157 		return;
158 
159 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
160 	rptr = amdgpu_userq_fence_read(fence_drv);
161 
162 	list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
163 		fence = &userq_fence->base;
164 
165 		if (rptr < fence->seqno)
166 			break;
167 
168 		dma_fence_signal(fence);
169 
170 		for (i = 0; i < userq_fence->fence_drv_array_count; i++)
171 			amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
172 
173 		list_del(&userq_fence->link);
174 		dma_fence_put(fence);
175 	}
176 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
177 }
178 
179 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
180 {
181 	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
182 					 struct amdgpu_userq_fence_driver,
183 					 refcount);
184 	struct amdgpu_device *adev = fence_drv->adev;
185 	struct amdgpu_userq_fence *fence, *tmp;
186 	unsigned long flags;
187 	struct dma_fence *f;
188 
189 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
190 	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
191 		f = &fence->base;
192 
193 		if (!dma_fence_is_signaled(f)) {
194 			dma_fence_set_error(f, -ECANCELED);
195 			dma_fence_signal(f);
196 		}
197 
198 		list_del(&fence->link);
199 		dma_fence_put(f);
200 	}
201 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
202 
203 	/* Free seq64 memory */
204 	amdgpu_seq64_free(adev, fence_drv->va);
205 	kfree(fence_drv);
206 }
207 
208 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
209 {
210 	kref_get(&fence_drv->refcount);
211 }
212 
213 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
214 {
215 	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
216 }
217 
218 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
219 {
220 	*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
221 	return *userq_fence ? 0 : -ENOMEM;
222 }
223 
224 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
225 				     struct amdgpu_userq_fence *userq_fence,
226 				     u64 seq, struct dma_fence **f)
227 {
228 	struct amdgpu_userq_fence_driver *fence_drv;
229 	struct dma_fence *fence;
230 	unsigned long flags;
231 
232 	fence_drv = userq->fence_drv;
233 	if (!fence_drv)
234 		return -EINVAL;
235 
236 	spin_lock_init(&userq_fence->lock);
237 	INIT_LIST_HEAD(&userq_fence->link);
238 	fence = &userq_fence->base;
239 	userq_fence->fence_drv = fence_drv;
240 
241 	dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
242 			 fence_drv->context, seq);
243 
244 	amdgpu_userq_fence_driver_get(fence_drv);
245 	dma_fence_get(fence);
246 
247 	if (!xa_empty(&userq->fence_drv_xa)) {
248 		struct amdgpu_userq_fence_driver *stored_fence_drv;
249 		unsigned long index, count = 0;
250 		int i = 0;
251 
252 		xa_lock(&userq->fence_drv_xa);
253 		xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
254 			count++;
255 
256 		userq_fence->fence_drv_array =
257 			kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
258 				      GFP_ATOMIC);
259 
260 		if (userq_fence->fence_drv_array) {
261 			xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
262 				userq_fence->fence_drv_array[i] = stored_fence_drv;
263 				__xa_erase(&userq->fence_drv_xa, index);
264 				i++;
265 			}
266 		}
267 
268 		userq_fence->fence_drv_array_count = i;
269 		xa_unlock(&userq->fence_drv_xa);
270 	} else {
271 		userq_fence->fence_drv_array = NULL;
272 		userq_fence->fence_drv_array_count = 0;
273 	}
274 
275 	/* Check if hardware has already processed the job */
276 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
277 	if (!dma_fence_is_signaled(fence))
278 		list_add_tail(&userq_fence->link, &fence_drv->fences);
279 	else
280 		dma_fence_put(fence);
281 
282 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
283 
284 	*f = fence;
285 
286 	return 0;
287 }
288 
289 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
290 {
291 	return "amdgpu_userq_fence";
292 }
293 
294 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
295 {
296 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
297 
298 	return fence->fence_drv->timeline_name;
299 }
300 
301 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
302 {
303 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
304 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
305 	u64 rptr, wptr;
306 
307 	rptr = amdgpu_userq_fence_read(fence_drv);
308 	wptr = fence->base.seqno;
309 
310 	if (rptr >= wptr)
311 		return true;
312 
313 	return false;
314 }
315 
316 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
317 {
318 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
319 	struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
320 	struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
321 
322 	/* Release the fence driver reference */
323 	amdgpu_userq_fence_driver_put(fence_drv);
324 
325 	kvfree(userq_fence->fence_drv_array);
326 	kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
327 }
328 
329 static void amdgpu_userq_fence_release(struct dma_fence *f)
330 {
331 	call_rcu(&f->rcu, amdgpu_userq_fence_free);
332 }
333 
334 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
335 	.get_driver_name = amdgpu_userq_fence_get_driver_name,
336 	.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
337 	.signaled = amdgpu_userq_fence_signaled,
338 	.release = amdgpu_userq_fence_release,
339 };
340 
341 /**
342  * amdgpu_userq_fence_read_wptr - Read the userq wptr value
343  *
344  * @adev: amdgpu_device pointer
345  * @queue: user mode queue structure pointer
346  * @wptr: write pointer value
347  *
348  * Read the wptr value from userq's MQD. The userq signal IOCTL
349  * creates a dma_fence for the shared buffers that expects the
350  * RPTR value written to seq64 memory >= WPTR.
351  *
352  * Returns wptr value on success, error on failure.
353  */
354 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
355 					struct amdgpu_usermode_queue *queue,
356 					u64 *wptr)
357 {
358 	struct amdgpu_bo_va_mapping *mapping;
359 	struct amdgpu_bo *bo;
360 	u64 addr, *ptr;
361 	int r;
362 
363 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
364 	if (r)
365 		return r;
366 
367 	addr = queue->userq_prop->wptr_gpu_addr;
368 	addr &= AMDGPU_GMC_HOLE_MASK;
369 
370 	mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
371 	if (!mapping) {
372 		amdgpu_bo_unreserve(queue->vm->root.bo);
373 		DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
374 		return -EINVAL;
375 	}
376 
377 	bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
378 	amdgpu_bo_unreserve(queue->vm->root.bo);
379 	r = amdgpu_bo_reserve(bo, true);
380 	if (r) {
381 		amdgpu_bo_unref(&bo);
382 		DRM_ERROR("Failed to reserve userqueue wptr bo");
383 		return r;
384 	}
385 
386 	r = amdgpu_bo_kmap(bo, (void **)&ptr);
387 	if (r) {
388 		DRM_ERROR("Failed mapping the userqueue wptr bo");
389 		goto map_error;
390 	}
391 
392 	*wptr = le64_to_cpu(*ptr);
393 
394 	amdgpu_bo_kunmap(bo);
395 	amdgpu_bo_unreserve(bo);
396 	amdgpu_bo_unref(&bo);
397 
398 	return 0;
399 
400 map_error:
401 	amdgpu_bo_unreserve(bo);
402 	amdgpu_bo_unref(&bo);
403 
404 	return r;
405 }
406 
407 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
408 {
409 	dma_fence_put(fence);
410 }
411 
412 static void
413 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
414 				    int error)
415 {
416 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
417 	unsigned long flags;
418 	struct dma_fence *f;
419 
420 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
421 
422 	f = rcu_dereference_protected(&fence->base,
423 				      lockdep_is_held(&fence_drv->fence_list_lock));
424 	if (f && !dma_fence_is_signaled_locked(f))
425 		dma_fence_set_error(f, error);
426 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
427 }
428 
429 void
430 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
431 {
432 	struct dma_fence *f = userq->last_fence;
433 
434 	if (f) {
435 		struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
436 		struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
437 		u64 wptr = fence->base.seqno;
438 
439 		amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
440 		amdgpu_userq_fence_write(fence_drv, wptr);
441 		amdgpu_userq_fence_driver_process(fence_drv);
442 
443 	}
444 }
445 
446 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
447 			      struct drm_file *filp)
448 {
449 	struct amdgpu_device *adev = drm_to_adev(dev);
450 	struct drm_amdgpu_userq_signal *args = data;
451 	const unsigned int num_write_bo_handles = args->num_bo_write_handles;
452 	const unsigned int num_read_bo_handles = args->num_bo_read_handles;
453 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
454 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
455 	struct drm_gem_object **gobj_write, **gobj_read;
456 	u32 *syncobj_handles, num_syncobj_handles;
457 	struct amdgpu_userq_fence *userq_fence;
458 	struct amdgpu_usermode_queue *queue = NULL;
459 	struct drm_syncobj **syncobj = NULL;
460 	struct dma_fence *fence;
461 	struct drm_exec exec;
462 	int r, i, entry;
463 	u64 wptr;
464 
465 	if (!amdgpu_userq_enabled(dev))
466 		return -ENOTSUPP;
467 
468 	if (args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
469 	    args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
470 		return -EINVAL;
471 
472 	num_syncobj_handles = args->num_syncobj_handles;
473 	syncobj_handles = memdup_array_user(u64_to_user_ptr(args->syncobj_handles),
474 					    num_syncobj_handles, sizeof(u32));
475 	if (IS_ERR(syncobj_handles))
476 		return PTR_ERR(syncobj_handles);
477 
478 	/* Array of pointers to the looked up syncobjs */
479 	syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
480 	if (!syncobj) {
481 		r = -ENOMEM;
482 		goto free_syncobj_handles;
483 	}
484 
485 	for (entry = 0; entry < num_syncobj_handles; entry++) {
486 		syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
487 		if (!syncobj[entry]) {
488 			r = -ENOENT;
489 			goto free_syncobj;
490 		}
491 	}
492 
493 	r = drm_gem_objects_lookup(filp,
494 				   u64_to_user_ptr(args->bo_read_handles),
495 				   num_read_bo_handles,
496 				   &gobj_read);
497 	if (r)
498 		goto free_syncobj;
499 
500 	r = drm_gem_objects_lookup(filp,
501 				   u64_to_user_ptr(args->bo_write_handles),
502 				   num_write_bo_handles,
503 				   &gobj_write);
504 	if (r)
505 		goto put_gobj_read;
506 
507 	/* Retrieve the user queue */
508 	queue = amdgpu_userq_get(userq_mgr, args->queue_id);
509 	if (!queue) {
510 		r = -ENOENT;
511 		goto put_gobj_write;
512 	}
513 
514 	r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
515 	if (r)
516 		goto put_gobj_write;
517 
518 	r = amdgpu_userq_fence_alloc(&userq_fence);
519 	if (r)
520 		goto put_gobj_write;
521 
522 	/* We are here means UQ is active, make sure the eviction fence is valid */
523 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
524 
525 	/* Create a new fence */
526 	r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
527 	if (r) {
528 		mutex_unlock(&userq_mgr->userq_mutex);
529 		kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
530 		goto put_gobj_write;
531 	}
532 
533 	dma_fence_put(queue->last_fence);
534 	queue->last_fence = dma_fence_get(fence);
535 	amdgpu_userq_start_hang_detect_work(queue);
536 	mutex_unlock(&userq_mgr->userq_mutex);
537 
538 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
539 		      (num_read_bo_handles + num_write_bo_handles));
540 
541 	/* Lock all BOs with retry handling */
542 	drm_exec_until_all_locked(&exec) {
543 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
544 		drm_exec_retry_on_contention(&exec);
545 		if (r) {
546 			amdgpu_userq_fence_cleanup(fence);
547 			goto exec_fini;
548 		}
549 
550 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
551 		drm_exec_retry_on_contention(&exec);
552 		if (r) {
553 			amdgpu_userq_fence_cleanup(fence);
554 			goto exec_fini;
555 		}
556 	}
557 
558 	for (i = 0; i < num_read_bo_handles; i++) {
559 		if (!gobj_read || !gobj_read[i]->resv)
560 			continue;
561 
562 		dma_resv_add_fence(gobj_read[i]->resv, fence,
563 				   DMA_RESV_USAGE_READ);
564 	}
565 
566 	for (i = 0; i < num_write_bo_handles; i++) {
567 		if (!gobj_write || !gobj_write[i]->resv)
568 			continue;
569 
570 		dma_resv_add_fence(gobj_write[i]->resv, fence,
571 				   DMA_RESV_USAGE_WRITE);
572 	}
573 
574 	/* Add the created fence to syncobj/BO's */
575 	for (i = 0; i < num_syncobj_handles; i++)
576 		drm_syncobj_replace_fence(syncobj[i], fence);
577 
578 	/* drop the reference acquired in fence creation function */
579 	dma_fence_put(fence);
580 
581 exec_fini:
582 	drm_exec_fini(&exec);
583 put_gobj_write:
584 	for (i = 0; i < num_write_bo_handles; i++)
585 		drm_gem_object_put(gobj_write[i]);
586 	kvfree(gobj_write);
587 put_gobj_read:
588 	for (i = 0; i < num_read_bo_handles; i++)
589 		drm_gem_object_put(gobj_read[i]);
590 	kvfree(gobj_read);
591 free_syncobj:
592 	while (entry-- > 0)
593 		if (syncobj[entry])
594 			drm_syncobj_put(syncobj[entry]);
595 	kfree(syncobj);
596 free_syncobj_handles:
597 	kfree(syncobj_handles);
598 
599 	if (queue)
600 		amdgpu_userq_put(queue);
601 
602 	return r;
603 }
604 
605 /* Count the number of expected fences so userspace can alloc a buffer */
606 static int
607 amdgpu_userq_wait_count_fences(struct drm_file *filp,
608 			       struct drm_amdgpu_userq_wait *wait_info,
609 			       u32 *syncobj_handles, u32 *timeline_points,
610 			       u32 *timeline_handles,
611 			       struct drm_gem_object **gobj_write,
612 			       struct drm_gem_object **gobj_read)
613 {
614 	int num_read_bo_handles, num_write_bo_handles;
615 	struct dma_fence_unwrap iter;
616 	struct dma_fence *fence, *f;
617 	unsigned int num_fences = 0;
618 	struct drm_exec exec;
619 	int i, r;
620 
621 	/*
622 	 * This needs to be outside of the lock provided by drm_exec for
623 	 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT to work correctly.
624 	 */
625 
626 	/* Count timeline fences */
627 	for (i = 0; i < wait_info->num_syncobj_timeline_handles; i++) {
628 		r = drm_syncobj_find_fence(filp, timeline_handles[i],
629 					   timeline_points[i],
630 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
631 					   &fence);
632 		if (r)
633 			return r;
634 
635 		dma_fence_unwrap_for_each(f, &iter, fence)
636 			num_fences++;
637 
638 		dma_fence_put(fence);
639 	}
640 
641 	/* Count boolean fences */
642 	for (i = 0; i < wait_info->num_syncobj_handles; i++) {
643 		r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0,
644 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
645 					   &fence);
646 		if (r)
647 			return r;
648 
649 		num_fences++;
650 		dma_fence_put(fence);
651 	}
652 
653 	/* Lock all the GEM objects */
654 	/* TODO: It is actually not necessary to lock them */
655 	num_read_bo_handles = wait_info->num_bo_read_handles;
656 	num_write_bo_handles = wait_info->num_bo_write_handles;
657 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
658 		      num_read_bo_handles + num_write_bo_handles);
659 
660 	drm_exec_until_all_locked(&exec) {
661 		r = drm_exec_prepare_array(&exec, gobj_read,
662 					   num_read_bo_handles, 1);
663 		drm_exec_retry_on_contention(&exec);
664 		if (r)
665 			goto error_unlock;
666 
667 		r = drm_exec_prepare_array(&exec, gobj_write,
668 					   num_write_bo_handles, 1);
669 		drm_exec_retry_on_contention(&exec);
670 		if (r)
671 			goto error_unlock;
672 	}
673 
674 	/* Count read fences */
675 	for (i = 0; i < num_read_bo_handles; i++) {
676 		struct dma_resv_iter resv_cursor;
677 		struct dma_fence *fence;
678 
679 		dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
680 					DMA_RESV_USAGE_READ, fence)
681 			num_fences++;
682 	}
683 
684 	/* Count write fences */
685 	for (i = 0; i < num_write_bo_handles; i++) {
686 		struct dma_resv_iter resv_cursor;
687 		struct dma_fence *fence;
688 
689 		dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
690 					DMA_RESV_USAGE_WRITE, fence)
691 			num_fences++;
692 	}
693 
694 	wait_info->num_fences = min(num_fences, USHRT_MAX);
695 	r = 0;
696 
697 error_unlock:
698 	/* Unlock all the GEM objects */
699 	drm_exec_fini(&exec);
700 	return r;
701 }
702 
703 static int
704 amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait *wait_info,
705 			    struct dma_fence **fences, unsigned int *num_fences,
706 			    struct dma_fence *fence)
707 {
708 	/* As fallback shouldn't userspace allocate enough space */
709 	if (*num_fences >= wait_info->num_fences)
710 		return dma_fence_wait(fence, true);
711 
712 	fences[(*num_fences)++] = dma_fence_get(fence);
713 	return 0;
714 }
715 
716 static int
717 amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
718 				    struct drm_amdgpu_userq_wait *wait_info,
719 				    u32 *syncobj_handles, u32 *timeline_points,
720 				    u32 *timeline_handles,
721 				    struct drm_gem_object **gobj_write,
722 				    struct drm_gem_object **gobj_read)
723 {
724 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
725 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
726 	struct drm_amdgpu_userq_fence_info *fence_info;
727 	int num_read_bo_handles, num_write_bo_handles;
728 	struct amdgpu_usermode_queue *waitq;
729 	struct dma_fence **fences, *fence, *f;
730 	struct dma_fence_unwrap iter;
731 	int num_points, num_syncobj;
732 	unsigned int num_fences = 0;
733 	struct drm_exec exec;
734 	int i, cnt, r;
735 
736 	fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info),
737 				   GFP_KERNEL);
738 	if (!fence_info)
739 		return -ENOMEM;
740 
741 	fences = kmalloc_array(wait_info->num_fences, sizeof(*fences),
742 			       GFP_KERNEL);
743 	if (!fences) {
744 		r = -ENOMEM;
745 		goto free_fence_info;
746 	}
747 
748 	/* Retrieve timeline fences */
749 	num_points = wait_info->num_syncobj_timeline_handles;
750 	for (i = 0; i < num_points; i++) {
751 		r = drm_syncobj_find_fence(filp, timeline_handles[i],
752 					   timeline_points[i],
753 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
754 					   &fence);
755 		if (r)
756 			goto free_fences;
757 
758 		dma_fence_unwrap_for_each(f, &iter, fence) {
759 			r = amdgpu_userq_wait_add_fence(wait_info, fences,
760 							&num_fences, f);
761 			if (r) {
762 				dma_fence_put(fence);
763 				goto free_fences;
764 			}
765 		}
766 
767 		dma_fence_put(fence);
768 	}
769 
770 	/* Retrieve boolean fences */
771 	num_syncobj = wait_info->num_syncobj_handles;
772 	for (i = 0; i < num_syncobj; i++) {
773 		struct dma_fence *fence;
774 
775 		r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0,
776 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
777 					   &fence);
778 		if (r)
779 			goto free_fences;
780 
781 		r = amdgpu_userq_wait_add_fence(wait_info, fences,
782 						&num_fences, fence);
783 		dma_fence_put(fence);
784 		if (r)
785 			goto free_fences;
786 
787 	}
788 
789 	/* Lock all the GEM objects */
790 	num_read_bo_handles = wait_info->num_bo_read_handles;
791 	num_write_bo_handles = wait_info->num_bo_write_handles;
792 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
793 		      num_read_bo_handles + num_write_bo_handles);
794 
795 	drm_exec_until_all_locked(&exec) {
796 		r = drm_exec_prepare_array(&exec, gobj_read,
797 					   num_read_bo_handles, 1);
798 		drm_exec_retry_on_contention(&exec);
799 		if (r)
800 			goto error_unlock;
801 
802 		r = drm_exec_prepare_array(&exec, gobj_write,
803 					   num_write_bo_handles, 1);
804 		drm_exec_retry_on_contention(&exec);
805 		if (r)
806 			goto error_unlock;
807 	}
808 
809 	/* Retrieve GEM read objects fence */
810 	for (i = 0; i < num_read_bo_handles; i++) {
811 		struct dma_resv_iter resv_cursor;
812 		struct dma_fence *fence;
813 
814 		dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
815 					DMA_RESV_USAGE_READ, fence) {
816 			r = amdgpu_userq_wait_add_fence(wait_info, fences,
817 							&num_fences, fence);
818 			if (r)
819 				goto error_unlock;
820 		}
821 	}
822 
823 	/* Retrieve GEM write objects fence */
824 	for (i = 0; i < num_write_bo_handles; i++) {
825 		struct dma_resv_iter resv_cursor;
826 		struct dma_fence *fence;
827 
828 		dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
829 					DMA_RESV_USAGE_WRITE, fence) {
830 			r = amdgpu_userq_wait_add_fence(wait_info, fences,
831 							&num_fences, fence);
832 			if (r)
833 				goto error_unlock;
834 		}
835 	}
836 
837 	drm_exec_fini(&exec);
838 
839 	/*
840 	 * Keep only the latest fences to reduce the number of values
841 	 * given back to userspace.
842 	 */
843 	num_fences = dma_fence_dedup_array(fences, num_fences);
844 
845 	waitq = amdgpu_userq_get(userq_mgr, wait_info->waitq_id);
846 	if (!waitq) {
847 		r = -EINVAL;
848 		goto free_fences;
849 	}
850 
851 	for (i = 0, cnt = 0; i < num_fences; i++) {
852 		struct amdgpu_userq_fence_driver *fence_drv;
853 		struct amdgpu_userq_fence *userq_fence;
854 		u32 index;
855 
856 		userq_fence = to_amdgpu_userq_fence(fences[i]);
857 		if (!userq_fence) {
858 			/*
859 			 * Just waiting on other driver fences should
860 			 * be good for now
861 			 */
862 			r = dma_fence_wait(fences[i], true);
863 			if (r)
864 				goto put_waitq;
865 
866 			continue;
867 		}
868 
869 		fence_drv = userq_fence->fence_drv;
870 		/*
871 		 * We need to make sure the user queue release their reference
872 		 * to the fence drivers at some point before queue destruction.
873 		 * Otherwise, we would gather those references until we don't
874 		 * have any more space left and crash.
875 		 */
876 		r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
877 			     xa_limit_32b, GFP_KERNEL);
878 		if (r)
879 			goto put_waitq;
880 
881 		amdgpu_userq_fence_driver_get(fence_drv);
882 
883 		/* Store drm syncobj's gpu va address and value */
884 		fence_info[cnt].va = fence_drv->va;
885 		fence_info[cnt].value = fences[i]->seqno;
886 
887 		/* Increment the actual userq fence count */
888 		cnt++;
889 	}
890 	wait_info->num_fences = cnt;
891 
892 	/* Copy userq fence info to user space */
893 	if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
894 			 fence_info, cnt * sizeof(*fence_info)))
895 		r = -EFAULT;
896 	else
897 		r = 0;
898 
899 put_waitq:
900 	amdgpu_userq_put(waitq);
901 
902 free_fences:
903 	while (num_fences--)
904 		dma_fence_put(fences[num_fences]);
905 	kfree(fences);
906 
907 free_fence_info:
908 	kfree(fence_info);
909 	return r;
910 
911 error_unlock:
912 	drm_exec_fini(&exec);
913 	goto free_fences;
914 }
915 
916 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
917 			    struct drm_file *filp)
918 {
919 	int num_points, num_syncobj, num_read_bo_handles, num_write_bo_handles;
920 	u32 *syncobj_handles, *timeline_points, *timeline_handles;
921 	struct drm_amdgpu_userq_wait *wait_info = data;
922 	struct drm_gem_object **gobj_write;
923 	struct drm_gem_object **gobj_read;
924 	void __user *ptr;
925 	int r;
926 
927 	if (!amdgpu_userq_enabled(dev))
928 		return -ENOTSUPP;
929 
930 	if (wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
931 	    wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
932 		return -EINVAL;
933 
934 	num_syncobj = wait_info->num_syncobj_handles;
935 	ptr = u64_to_user_ptr(wait_info->syncobj_handles);
936 	syncobj_handles = memdup_array_user(ptr, num_syncobj, sizeof(u32));
937 	if (IS_ERR(syncobj_handles))
938 		return PTR_ERR(syncobj_handles);
939 
940 	num_points = wait_info->num_syncobj_timeline_handles;
941 	ptr = u64_to_user_ptr(wait_info->syncobj_timeline_handles);
942 	timeline_handles = memdup_array_user(ptr, num_points, sizeof(u32));
943 	if (IS_ERR(timeline_handles)) {
944 		r = PTR_ERR(timeline_handles);
945 		goto free_syncobj_handles;
946 	}
947 
948 	ptr = u64_to_user_ptr(wait_info->syncobj_timeline_points);
949 	timeline_points = memdup_array_user(ptr, num_points, sizeof(u32));
950 	if (IS_ERR(timeline_points)) {
951 		r = PTR_ERR(timeline_points);
952 		goto free_timeline_handles;
953 	}
954 
955 	num_read_bo_handles = wait_info->num_bo_read_handles;
956 	ptr = u64_to_user_ptr(wait_info->bo_read_handles);
957 	r = drm_gem_objects_lookup(filp, ptr, num_read_bo_handles, &gobj_read);
958 	if (r)
959 		goto free_timeline_points;
960 
961 	num_write_bo_handles = wait_info->num_bo_write_handles;
962 	ptr = u64_to_user_ptr(wait_info->bo_write_handles);
963 	r = drm_gem_objects_lookup(filp, ptr, num_write_bo_handles,
964 				   &gobj_write);
965 	if (r)
966 		goto put_gobj_read;
967 
968 	/*
969 	 * Passing num_fences = 0 means that userspace doesn't want to
970 	 * retrieve userq_fence_info. If num_fences = 0 we skip filling
971 	 * userq_fence_info and return the actual number of fences on
972 	 * args->num_fences.
973 	 */
974 	if (!wait_info->num_fences) {
975 		r = amdgpu_userq_wait_count_fences(filp, wait_info,
976 						   syncobj_handles,
977 						   timeline_points,
978 						   timeline_handles,
979 						   gobj_write,
980 						   gobj_read);
981 	} else {
982 		r = amdgpu_userq_wait_return_fence_info(filp, wait_info,
983 							syncobj_handles,
984 							timeline_points,
985 							timeline_handles,
986 							gobj_write,
987 							gobj_read);
988 	}
989 
990 	while (num_write_bo_handles--)
991 		drm_gem_object_put(gobj_write[num_write_bo_handles]);
992 	kvfree(gobj_write);
993 
994 put_gobj_read:
995 	while (num_read_bo_handles--)
996 		drm_gem_object_put(gobj_read[num_read_bo_handles]);
997 	kvfree(gobj_read);
998 
999 free_timeline_points:
1000 	kfree(timeline_points);
1001 free_timeline_handles:
1002 	kfree(timeline_handles);
1003 free_syncobj_handles:
1004 	kfree(syncobj_handles);
1005 	return r;
1006 }
1007