xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c (revision a075082a15e7f5c4889d0cbb51a4041c332cb00c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28 
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34 
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37 
38 #define AMDGPU_USERQ_MAX_HANDLES	(1U << 16)
39 
40 int amdgpu_userq_fence_slab_init(void)
41 {
42 	amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
43 						    sizeof(struct amdgpu_userq_fence),
44 						    0,
45 						    SLAB_HWCACHE_ALIGN,
46 						    NULL);
47 	if (!amdgpu_userq_fence_slab)
48 		return -ENOMEM;
49 
50 	return 0;
51 }
52 
53 void amdgpu_userq_fence_slab_fini(void)
54 {
55 	rcu_barrier();
56 	kmem_cache_destroy(amdgpu_userq_fence_slab);
57 }
58 
59 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
60 {
61 	if (!f || f->ops != &amdgpu_userq_fence_ops)
62 		return NULL;
63 
64 	return container_of(f, struct amdgpu_userq_fence, base);
65 }
66 
67 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
68 {
69 	return le64_to_cpu(*fence_drv->cpu_addr);
70 }
71 
72 static void
73 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
74 			 u64 seq)
75 {
76 	if (fence_drv->cpu_addr)
77 		*fence_drv->cpu_addr = cpu_to_le64(seq);
78 }
79 
80 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
81 				    struct amdgpu_usermode_queue *userq)
82 {
83 	struct amdgpu_userq_fence_driver *fence_drv;
84 	unsigned long flags;
85 	int r;
86 
87 	fence_drv = kzalloc_obj(*fence_drv);
88 	if (!fence_drv)
89 		return -ENOMEM;
90 
91 	/* Acquire seq64 memory */
92 	r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
93 			       &fence_drv->cpu_addr);
94 	if (r)
95 		goto free_fence_drv;
96 
97 	memset(fence_drv->cpu_addr, 0, sizeof(u64));
98 
99 	kref_init(&fence_drv->refcount);
100 	INIT_LIST_HEAD(&fence_drv->fences);
101 	spin_lock_init(&fence_drv->fence_list_lock);
102 
103 	fence_drv->adev = adev;
104 	fence_drv->context = dma_fence_context_alloc(1);
105 	get_task_comm(fence_drv->timeline_name, current);
106 
107 	xa_lock_irqsave(&adev->userq_xa, flags);
108 	r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
109 			      fence_drv, GFP_KERNEL));
110 	xa_unlock_irqrestore(&adev->userq_xa, flags);
111 	if (r)
112 		goto free_seq64;
113 
114 	userq->fence_drv = fence_drv;
115 
116 	return 0;
117 
118 free_seq64:
119 	amdgpu_seq64_free(adev, fence_drv->va);
120 free_fence_drv:
121 	kfree(fence_drv);
122 
123 	return r;
124 }
125 
126 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
127 {
128 	struct amdgpu_userq_fence_driver *fence_drv;
129 	unsigned long index;
130 
131 	if (xa_empty(xa))
132 		return;
133 
134 	xa_lock(xa);
135 	xa_for_each(xa, index, fence_drv) {
136 		__xa_erase(xa, index);
137 		amdgpu_userq_fence_driver_put(fence_drv);
138 	}
139 
140 	xa_unlock(xa);
141 }
142 
143 void
144 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
145 {
146 	dma_fence_put(userq->last_fence);
147 
148 	amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
149 	xa_destroy(&userq->fence_drv_xa);
150 	/* Drop the fence_drv reference held by user queue */
151 	amdgpu_userq_fence_driver_put(userq->fence_drv);
152 }
153 
154 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
155 {
156 	struct amdgpu_userq_fence *userq_fence, *tmp;
157 	struct dma_fence *fence;
158 	unsigned long flags;
159 	u64 rptr;
160 	int i;
161 
162 	if (!fence_drv)
163 		return;
164 
165 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
166 	rptr = amdgpu_userq_fence_read(fence_drv);
167 
168 	list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
169 		fence = &userq_fence->base;
170 
171 		if (rptr < fence->seqno)
172 			break;
173 
174 		dma_fence_signal(fence);
175 
176 		for (i = 0; i < userq_fence->fence_drv_array_count; i++)
177 			amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
178 
179 		list_del(&userq_fence->link);
180 		dma_fence_put(fence);
181 	}
182 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
183 }
184 
185 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
186 {
187 	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
188 					 struct amdgpu_userq_fence_driver,
189 					 refcount);
190 	struct amdgpu_userq_fence_driver *xa_fence_drv;
191 	struct amdgpu_device *adev = fence_drv->adev;
192 	struct amdgpu_userq_fence *fence, *tmp;
193 	struct xarray *xa = &adev->userq_xa;
194 	unsigned long index, flags;
195 	struct dma_fence *f;
196 
197 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
198 	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
199 		f = &fence->base;
200 
201 		if (!dma_fence_is_signaled(f)) {
202 			dma_fence_set_error(f, -ECANCELED);
203 			dma_fence_signal(f);
204 		}
205 
206 		list_del(&fence->link);
207 		dma_fence_put(f);
208 	}
209 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
210 
211 	xa_lock_irqsave(xa, flags);
212 	xa_for_each(xa, index, xa_fence_drv)
213 		if (xa_fence_drv == fence_drv)
214 			__xa_erase(xa, index);
215 	xa_unlock_irqrestore(xa, flags);
216 
217 	/* Free seq64 memory */
218 	amdgpu_seq64_free(adev, fence_drv->va);
219 	kfree(fence_drv);
220 }
221 
222 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
223 {
224 	kref_get(&fence_drv->refcount);
225 }
226 
227 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
228 {
229 	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
230 }
231 
232 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
233 {
234 	*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
235 	return *userq_fence ? 0 : -ENOMEM;
236 }
237 
238 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
239 				     struct amdgpu_userq_fence *userq_fence,
240 				     u64 seq, struct dma_fence **f)
241 {
242 	struct amdgpu_userq_fence_driver *fence_drv;
243 	struct dma_fence *fence;
244 	unsigned long flags;
245 
246 	fence_drv = userq->fence_drv;
247 	if (!fence_drv)
248 		return -EINVAL;
249 
250 	spin_lock_init(&userq_fence->lock);
251 	INIT_LIST_HEAD(&userq_fence->link);
252 	fence = &userq_fence->base;
253 	userq_fence->fence_drv = fence_drv;
254 
255 	dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
256 			 fence_drv->context, seq);
257 
258 	amdgpu_userq_fence_driver_get(fence_drv);
259 	dma_fence_get(fence);
260 
261 	if (!xa_empty(&userq->fence_drv_xa)) {
262 		struct amdgpu_userq_fence_driver *stored_fence_drv;
263 		unsigned long index, count = 0;
264 		int i = 0;
265 
266 		xa_lock(&userq->fence_drv_xa);
267 		xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
268 			count++;
269 
270 		userq_fence->fence_drv_array =
271 			kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
272 				      GFP_ATOMIC);
273 
274 		if (userq_fence->fence_drv_array) {
275 			xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
276 				userq_fence->fence_drv_array[i] = stored_fence_drv;
277 				__xa_erase(&userq->fence_drv_xa, index);
278 				i++;
279 			}
280 		}
281 
282 		userq_fence->fence_drv_array_count = i;
283 		xa_unlock(&userq->fence_drv_xa);
284 	} else {
285 		userq_fence->fence_drv_array = NULL;
286 		userq_fence->fence_drv_array_count = 0;
287 	}
288 
289 	/* Check if hardware has already processed the job */
290 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
291 	if (!dma_fence_is_signaled(fence))
292 		list_add_tail(&userq_fence->link, &fence_drv->fences);
293 	else
294 		dma_fence_put(fence);
295 
296 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
297 
298 	*f = fence;
299 
300 	return 0;
301 }
302 
303 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
304 {
305 	return "amdgpu_userq_fence";
306 }
307 
308 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
309 {
310 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
311 
312 	return fence->fence_drv->timeline_name;
313 }
314 
315 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
316 {
317 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
318 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
319 	u64 rptr, wptr;
320 
321 	rptr = amdgpu_userq_fence_read(fence_drv);
322 	wptr = fence->base.seqno;
323 
324 	if (rptr >= wptr)
325 		return true;
326 
327 	return false;
328 }
329 
330 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
331 {
332 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
333 	struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
334 	struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
335 
336 	/* Release the fence driver reference */
337 	amdgpu_userq_fence_driver_put(fence_drv);
338 
339 	kvfree(userq_fence->fence_drv_array);
340 	kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
341 }
342 
343 static void amdgpu_userq_fence_release(struct dma_fence *f)
344 {
345 	call_rcu(&f->rcu, amdgpu_userq_fence_free);
346 }
347 
348 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
349 	.get_driver_name = amdgpu_userq_fence_get_driver_name,
350 	.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
351 	.signaled = amdgpu_userq_fence_signaled,
352 	.release = amdgpu_userq_fence_release,
353 };
354 
355 /**
356  * amdgpu_userq_fence_read_wptr - Read the userq wptr value
357  *
358  * @adev: amdgpu_device pointer
359  * @queue: user mode queue structure pointer
360  * @wptr: write pointer value
361  *
362  * Read the wptr value from userq's MQD. The userq signal IOCTL
363  * creates a dma_fence for the shared buffers that expects the
364  * RPTR value written to seq64 memory >= WPTR.
365  *
366  * Returns wptr value on success, error on failure.
367  */
368 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
369 					struct amdgpu_usermode_queue *queue,
370 					u64 *wptr)
371 {
372 	struct amdgpu_bo_va_mapping *mapping;
373 	struct amdgpu_bo *bo;
374 	u64 addr, *ptr;
375 	int r;
376 
377 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
378 	if (r)
379 		return r;
380 
381 	addr = queue->userq_prop->wptr_gpu_addr;
382 	addr &= AMDGPU_GMC_HOLE_MASK;
383 
384 	mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
385 	if (!mapping) {
386 		amdgpu_bo_unreserve(queue->vm->root.bo);
387 		DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
388 		return -EINVAL;
389 	}
390 
391 	bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
392 	amdgpu_bo_unreserve(queue->vm->root.bo);
393 	r = amdgpu_bo_reserve(bo, true);
394 	if (r) {
395 		amdgpu_bo_unref(&bo);
396 		DRM_ERROR("Failed to reserve userqueue wptr bo");
397 		return r;
398 	}
399 
400 	r = amdgpu_bo_kmap(bo, (void **)&ptr);
401 	if (r) {
402 		DRM_ERROR("Failed mapping the userqueue wptr bo");
403 		goto map_error;
404 	}
405 
406 	*wptr = le64_to_cpu(*ptr);
407 
408 	amdgpu_bo_kunmap(bo);
409 	amdgpu_bo_unreserve(bo);
410 	amdgpu_bo_unref(&bo);
411 
412 	return 0;
413 
414 map_error:
415 	amdgpu_bo_unreserve(bo);
416 	amdgpu_bo_unref(&bo);
417 
418 	return r;
419 }
420 
421 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
422 {
423 	dma_fence_put(fence);
424 }
425 
426 static void
427 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
428 				    int error)
429 {
430 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
431 	unsigned long flags;
432 	struct dma_fence *f;
433 
434 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
435 
436 	f = rcu_dereference_protected(&fence->base,
437 				      lockdep_is_held(&fence_drv->fence_list_lock));
438 	if (f && !dma_fence_is_signaled_locked(f))
439 		dma_fence_set_error(f, error);
440 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
441 }
442 
443 void
444 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
445 {
446 	struct dma_fence *f = userq->last_fence;
447 
448 	if (f) {
449 		struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
450 		struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
451 		u64 wptr = fence->base.seqno;
452 
453 		amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
454 		amdgpu_userq_fence_write(fence_drv, wptr);
455 		amdgpu_userq_fence_driver_process(fence_drv);
456 
457 	}
458 }
459 
460 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
461 			      struct drm_file *filp)
462 {
463 	struct amdgpu_device *adev = drm_to_adev(dev);
464 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
465 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
466 	struct drm_amdgpu_userq_signal *args = data;
467 	struct drm_gem_object **gobj_write = NULL;
468 	struct drm_gem_object **gobj_read = NULL;
469 	struct amdgpu_usermode_queue *queue;
470 	struct amdgpu_userq_fence *userq_fence;
471 	struct drm_syncobj **syncobj = NULL;
472 	u32 *bo_handles_write, num_write_bo_handles;
473 	u32 *syncobj_handles, num_syncobj_handles;
474 	u32 *bo_handles_read, num_read_bo_handles;
475 	int r, i, entry, rentry, wentry;
476 	struct dma_fence *fence;
477 	struct drm_exec exec;
478 	u64 wptr;
479 
480 	if (!amdgpu_userq_enabled(dev))
481 		return -ENOTSUPP;
482 
483 	if (args->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
484 	    args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
485 	    args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
486 		return -EINVAL;
487 
488 	num_syncobj_handles = args->num_syncobj_handles;
489 	syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
490 				      size_mul(sizeof(u32), num_syncobj_handles));
491 	if (IS_ERR(syncobj_handles))
492 		return PTR_ERR(syncobj_handles);
493 
494 	/* Array of pointers to the looked up syncobjs */
495 	syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
496 	if (!syncobj) {
497 		r = -ENOMEM;
498 		goto free_syncobj_handles;
499 	}
500 
501 	for (entry = 0; entry < num_syncobj_handles; entry++) {
502 		syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
503 		if (!syncobj[entry]) {
504 			r = -ENOENT;
505 			goto free_syncobj;
506 		}
507 	}
508 
509 	num_read_bo_handles = args->num_bo_read_handles;
510 	bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
511 				      sizeof(u32) * num_read_bo_handles);
512 	if (IS_ERR(bo_handles_read)) {
513 		r = PTR_ERR(bo_handles_read);
514 		goto free_syncobj;
515 	}
516 
517 	/* Array of pointers to the GEM read objects */
518 	gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
519 	if (!gobj_read) {
520 		r = -ENOMEM;
521 		goto free_bo_handles_read;
522 	}
523 
524 	for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
525 		gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
526 		if (!gobj_read[rentry]) {
527 			r = -ENOENT;
528 			goto put_gobj_read;
529 		}
530 	}
531 
532 	num_write_bo_handles = args->num_bo_write_handles;
533 	bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
534 				       sizeof(u32) * num_write_bo_handles);
535 	if (IS_ERR(bo_handles_write)) {
536 		r = PTR_ERR(bo_handles_write);
537 		goto put_gobj_read;
538 	}
539 
540 	/* Array of pointers to the GEM write objects */
541 	gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
542 	if (!gobj_write) {
543 		r = -ENOMEM;
544 		goto free_bo_handles_write;
545 	}
546 
547 	for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
548 		gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
549 		if (!gobj_write[wentry]) {
550 			r = -ENOENT;
551 			goto put_gobj_write;
552 		}
553 	}
554 
555 	/* Retrieve the user queue */
556 	queue = xa_load(&userq_mgr->userq_xa, args->queue_id);
557 	if (!queue) {
558 		r = -ENOENT;
559 		goto put_gobj_write;
560 	}
561 
562 	r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
563 	if (r)
564 		goto put_gobj_write;
565 
566 	r = amdgpu_userq_fence_alloc(&userq_fence);
567 	if (r)
568 		goto put_gobj_write;
569 
570 	/* We are here means UQ is active, make sure the eviction fence is valid */
571 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
572 
573 	/* Create a new fence */
574 	r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
575 	if (r) {
576 		mutex_unlock(&userq_mgr->userq_mutex);
577 		kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
578 		goto put_gobj_write;
579 	}
580 
581 	dma_fence_put(queue->last_fence);
582 	queue->last_fence = dma_fence_get(fence);
583 	amdgpu_userq_start_hang_detect_work(queue);
584 	mutex_unlock(&userq_mgr->userq_mutex);
585 
586 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
587 		      (num_read_bo_handles + num_write_bo_handles));
588 
589 	/* Lock all BOs with retry handling */
590 	drm_exec_until_all_locked(&exec) {
591 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
592 		drm_exec_retry_on_contention(&exec);
593 		if (r) {
594 			amdgpu_userq_fence_cleanup(fence);
595 			goto exec_fini;
596 		}
597 
598 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
599 		drm_exec_retry_on_contention(&exec);
600 		if (r) {
601 			amdgpu_userq_fence_cleanup(fence);
602 			goto exec_fini;
603 		}
604 	}
605 
606 	for (i = 0; i < num_read_bo_handles; i++) {
607 		if (!gobj_read || !gobj_read[i]->resv)
608 			continue;
609 
610 		dma_resv_add_fence(gobj_read[i]->resv, fence,
611 				   DMA_RESV_USAGE_READ);
612 	}
613 
614 	for (i = 0; i < num_write_bo_handles; i++) {
615 		if (!gobj_write || !gobj_write[i]->resv)
616 			continue;
617 
618 		dma_resv_add_fence(gobj_write[i]->resv, fence,
619 				   DMA_RESV_USAGE_WRITE);
620 	}
621 
622 	/* Add the created fence to syncobj/BO's */
623 	for (i = 0; i < num_syncobj_handles; i++)
624 		drm_syncobj_replace_fence(syncobj[i], fence);
625 
626 	/* drop the reference acquired in fence creation function */
627 	dma_fence_put(fence);
628 
629 exec_fini:
630 	drm_exec_fini(&exec);
631 put_gobj_write:
632 	while (wentry-- > 0)
633 		drm_gem_object_put(gobj_write[wentry]);
634 	kfree(gobj_write);
635 free_bo_handles_write:
636 	kfree(bo_handles_write);
637 put_gobj_read:
638 	while (rentry-- > 0)
639 		drm_gem_object_put(gobj_read[rentry]);
640 	kfree(gobj_read);
641 free_bo_handles_read:
642 	kfree(bo_handles_read);
643 free_syncobj:
644 	while (entry-- > 0)
645 		if (syncobj[entry])
646 			drm_syncobj_put(syncobj[entry]);
647 	kfree(syncobj);
648 free_syncobj_handles:
649 	kfree(syncobj_handles);
650 
651 	return r;
652 }
653 
654 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
655 			    struct drm_file *filp)
656 {
657 	u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
658 	u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
659 	struct drm_amdgpu_userq_fence_info *fence_info = NULL;
660 	struct drm_amdgpu_userq_wait *wait_info = data;
661 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
662 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
663 	struct amdgpu_usermode_queue *waitq;
664 	struct drm_gem_object **gobj_write;
665 	struct drm_gem_object **gobj_read;
666 	struct dma_fence **fences = NULL;
667 	u16 num_points, num_fences = 0;
668 	int r, i, rentry, wentry, cnt;
669 	struct drm_exec exec;
670 
671 	if (!amdgpu_userq_enabled(dev))
672 		return -ENOTSUPP;
673 
674 	if (wait_info->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
675 	    wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
676 	    wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
677 		return -EINVAL;
678 
679 	num_read_bo_handles = wait_info->num_bo_read_handles;
680 	bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
681 				      size_mul(sizeof(u32), num_read_bo_handles));
682 	if (IS_ERR(bo_handles_read))
683 		return PTR_ERR(bo_handles_read);
684 
685 	num_write_bo_handles = wait_info->num_bo_write_handles;
686 	bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
687 				       size_mul(sizeof(u32), num_write_bo_handles));
688 	if (IS_ERR(bo_handles_write)) {
689 		r = PTR_ERR(bo_handles_write);
690 		goto free_bo_handles_read;
691 	}
692 
693 	num_syncobj = wait_info->num_syncobj_handles;
694 	syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
695 				      size_mul(sizeof(u32), num_syncobj));
696 	if (IS_ERR(syncobj_handles)) {
697 		r = PTR_ERR(syncobj_handles);
698 		goto free_bo_handles_write;
699 	}
700 
701 	num_points = wait_info->num_syncobj_timeline_handles;
702 	timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
703 				       sizeof(u32) * num_points);
704 	if (IS_ERR(timeline_handles)) {
705 		r = PTR_ERR(timeline_handles);
706 		goto free_syncobj_handles;
707 	}
708 
709 	timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
710 				      sizeof(u32) * num_points);
711 	if (IS_ERR(timeline_points)) {
712 		r = PTR_ERR(timeline_points);
713 		goto free_timeline_handles;
714 	}
715 
716 	gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
717 	if (!gobj_read) {
718 		r = -ENOMEM;
719 		goto free_timeline_points;
720 	}
721 
722 	for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
723 		gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
724 		if (!gobj_read[rentry]) {
725 			r = -ENOENT;
726 			goto put_gobj_read;
727 		}
728 	}
729 
730 	gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
731 	if (!gobj_write) {
732 		r = -ENOMEM;
733 		goto put_gobj_read;
734 	}
735 
736 	for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
737 		gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
738 		if (!gobj_write[wentry]) {
739 			r = -ENOENT;
740 			goto put_gobj_write;
741 		}
742 	}
743 
744 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
745 		      (num_read_bo_handles + num_write_bo_handles));
746 
747 	/* Lock all BOs with retry handling */
748 	drm_exec_until_all_locked(&exec) {
749 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
750 		drm_exec_retry_on_contention(&exec);
751 		if (r) {
752 			drm_exec_fini(&exec);
753 			goto put_gobj_write;
754 		}
755 
756 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
757 		drm_exec_retry_on_contention(&exec);
758 		if (r) {
759 			drm_exec_fini(&exec);
760 			goto put_gobj_write;
761 		}
762 	}
763 
764 	if (!wait_info->num_fences) {
765 		if (num_points) {
766 			struct dma_fence_unwrap iter;
767 			struct dma_fence *fence;
768 			struct dma_fence *f;
769 
770 			for (i = 0; i < num_points; i++) {
771 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
772 							   timeline_points[i],
773 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
774 							   &fence);
775 				if (r)
776 					goto exec_fini;
777 
778 				dma_fence_unwrap_for_each(f, &iter, fence)
779 					num_fences++;
780 
781 				dma_fence_put(fence);
782 			}
783 		}
784 
785 		/* Count syncobj's fence */
786 		for (i = 0; i < num_syncobj; i++) {
787 			struct dma_fence *fence;
788 
789 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
790 						   0,
791 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
792 						   &fence);
793 			if (r)
794 				goto exec_fini;
795 
796 			num_fences++;
797 			dma_fence_put(fence);
798 		}
799 
800 		/* Count GEM objects fence */
801 		for (i = 0; i < num_read_bo_handles; i++) {
802 			struct dma_resv_iter resv_cursor;
803 			struct dma_fence *fence;
804 
805 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
806 						DMA_RESV_USAGE_READ, fence)
807 				num_fences++;
808 		}
809 
810 		for (i = 0; i < num_write_bo_handles; i++) {
811 			struct dma_resv_iter resv_cursor;
812 			struct dma_fence *fence;
813 
814 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
815 						DMA_RESV_USAGE_WRITE, fence)
816 				num_fences++;
817 		}
818 
819 		/*
820 		 * Passing num_fences = 0 means that userspace doesn't want to
821 		 * retrieve userq_fence_info. If num_fences = 0 we skip filling
822 		 * userq_fence_info and return the actual number of fences on
823 		 * args->num_fences.
824 		 */
825 		wait_info->num_fences = num_fences;
826 	} else {
827 		/* Array of fence info */
828 		fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
829 		if (!fence_info) {
830 			r = -ENOMEM;
831 			goto exec_fini;
832 		}
833 
834 		/* Array of fences */
835 		fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
836 		if (!fences) {
837 			r = -ENOMEM;
838 			goto free_fence_info;
839 		}
840 
841 		/* Retrieve GEM read objects fence */
842 		for (i = 0; i < num_read_bo_handles; i++) {
843 			struct dma_resv_iter resv_cursor;
844 			struct dma_fence *fence;
845 
846 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
847 						DMA_RESV_USAGE_READ, fence) {
848 				if (num_fences >= wait_info->num_fences) {
849 					r = -EINVAL;
850 					goto free_fences;
851 				}
852 
853 				fences[num_fences++] = fence;
854 				dma_fence_get(fence);
855 			}
856 		}
857 
858 		/* Retrieve GEM write objects fence */
859 		for (i = 0; i < num_write_bo_handles; i++) {
860 			struct dma_resv_iter resv_cursor;
861 			struct dma_fence *fence;
862 
863 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
864 						DMA_RESV_USAGE_WRITE, fence) {
865 				if (num_fences >= wait_info->num_fences) {
866 					r = -EINVAL;
867 					goto free_fences;
868 				}
869 
870 				fences[num_fences++] = fence;
871 				dma_fence_get(fence);
872 			}
873 		}
874 
875 		if (num_points) {
876 			struct dma_fence_unwrap iter;
877 			struct dma_fence *fence;
878 			struct dma_fence *f;
879 
880 			for (i = 0; i < num_points; i++) {
881 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
882 							   timeline_points[i],
883 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
884 							   &fence);
885 				if (r)
886 					goto free_fences;
887 
888 				dma_fence_unwrap_for_each(f, &iter, fence) {
889 					if (num_fences >= wait_info->num_fences) {
890 						r = -EINVAL;
891 						dma_fence_put(fence);
892 						goto free_fences;
893 					}
894 
895 					dma_fence_get(f);
896 					fences[num_fences++] = f;
897 				}
898 
899 				dma_fence_put(fence);
900 			}
901 		}
902 
903 		/* Retrieve syncobj's fence */
904 		for (i = 0; i < num_syncobj; i++) {
905 			struct dma_fence *fence;
906 
907 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
908 						   0,
909 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
910 						   &fence);
911 			if (r)
912 				goto free_fences;
913 
914 			if (num_fences >= wait_info->num_fences) {
915 				r = -EINVAL;
916 				dma_fence_put(fence);
917 				goto free_fences;
918 			}
919 
920 			fences[num_fences++] = fence;
921 		}
922 
923 		/*
924 		 * Keep only the latest fences to reduce the number of values
925 		 * given back to userspace.
926 		 */
927 		num_fences = dma_fence_dedup_array(fences, num_fences);
928 
929 		waitq = xa_load(&userq_mgr->userq_xa, wait_info->waitq_id);
930 		if (!waitq) {
931 			r = -EINVAL;
932 			goto free_fences;
933 		}
934 
935 		for (i = 0, cnt = 0; i < num_fences; i++) {
936 			struct amdgpu_userq_fence_driver *fence_drv;
937 			struct amdgpu_userq_fence *userq_fence;
938 			u32 index;
939 
940 			userq_fence = to_amdgpu_userq_fence(fences[i]);
941 			if (!userq_fence) {
942 				/*
943 				 * Just waiting on other driver fences should
944 				 * be good for now
945 				 */
946 				r = dma_fence_wait(fences[i], true);
947 				if (r) {
948 					dma_fence_put(fences[i]);
949 					goto free_fences;
950 				}
951 
952 				dma_fence_put(fences[i]);
953 				continue;
954 			}
955 
956 			fence_drv = userq_fence->fence_drv;
957 			/*
958 			 * We need to make sure the user queue release their reference
959 			 * to the fence drivers at some point before queue destruction.
960 			 * Otherwise, we would gather those references until we don't
961 			 * have any more space left and crash.
962 			 */
963 			r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
964 				     xa_limit_32b, GFP_KERNEL);
965 			if (r)
966 				goto free_fences;
967 
968 			amdgpu_userq_fence_driver_get(fence_drv);
969 
970 			/* Store drm syncobj's gpu va address and value */
971 			fence_info[cnt].va = fence_drv->va;
972 			fence_info[cnt].value = fences[i]->seqno;
973 
974 			dma_fence_put(fences[i]);
975 			/* Increment the actual userq fence count */
976 			cnt++;
977 		}
978 
979 		wait_info->num_fences = cnt;
980 		/* Copy userq fence info to user space */
981 		if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
982 				 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
983 			r = -EFAULT;
984 			goto free_fences;
985 		}
986 
987 		kfree(fences);
988 		kfree(fence_info);
989 	}
990 
991 	drm_exec_fini(&exec);
992 	for (i = 0; i < num_read_bo_handles; i++)
993 		drm_gem_object_put(gobj_read[i]);
994 	kfree(gobj_read);
995 
996 	for (i = 0; i < num_write_bo_handles; i++)
997 		drm_gem_object_put(gobj_write[i]);
998 	kfree(gobj_write);
999 
1000 	kfree(timeline_points);
1001 	kfree(timeline_handles);
1002 	kfree(syncobj_handles);
1003 	kfree(bo_handles_write);
1004 	kfree(bo_handles_read);
1005 
1006 	return 0;
1007 
1008 free_fences:
1009 	while (num_fences-- > 0)
1010 		dma_fence_put(fences[num_fences]);
1011 	kfree(fences);
1012 free_fence_info:
1013 	kfree(fence_info);
1014 exec_fini:
1015 	drm_exec_fini(&exec);
1016 put_gobj_write:
1017 	while (wentry-- > 0)
1018 		drm_gem_object_put(gobj_write[wentry]);
1019 	kfree(gobj_write);
1020 put_gobj_read:
1021 	while (rentry-- > 0)
1022 		drm_gem_object_put(gobj_read[rentry]);
1023 	kfree(gobj_read);
1024 free_timeline_points:
1025 	kfree(timeline_points);
1026 free_timeline_handles:
1027 	kfree(timeline_handles);
1028 free_syncobj_handles:
1029 	kfree(syncobj_handles);
1030 free_bo_handles_write:
1031 	kfree(bo_handles_write);
1032 free_bo_handles_read:
1033 	kfree(bo_handles_read);
1034 
1035 	return r;
1036 }
1037