xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c (revision 75372d75a4e23783583998ed99d5009d555850da)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28 
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34 
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37 
38 int amdgpu_userq_fence_slab_init(void)
39 {
40 	amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
41 						    sizeof(struct amdgpu_userq_fence),
42 						    0,
43 						    SLAB_HWCACHE_ALIGN,
44 						    NULL);
45 	if (!amdgpu_userq_fence_slab)
46 		return -ENOMEM;
47 
48 	return 0;
49 }
50 
51 void amdgpu_userq_fence_slab_fini(void)
52 {
53 	rcu_barrier();
54 	kmem_cache_destroy(amdgpu_userq_fence_slab);
55 }
56 
57 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
58 {
59 	if (!f || f->ops != &amdgpu_userq_fence_ops)
60 		return NULL;
61 
62 	return container_of(f, struct amdgpu_userq_fence, base);
63 }
64 
65 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
66 {
67 	return le64_to_cpu(*fence_drv->cpu_addr);
68 }
69 
70 static void
71 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
72 			 u64 seq)
73 {
74 	if (fence_drv->cpu_addr)
75 		*fence_drv->cpu_addr = cpu_to_le64(seq);
76 }
77 
78 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
79 				    struct amdgpu_usermode_queue *userq)
80 {
81 	struct amdgpu_userq_fence_driver *fence_drv;
82 	unsigned long flags;
83 	int r;
84 
85 	fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
86 	if (!fence_drv)
87 		return -ENOMEM;
88 
89 	/* Acquire seq64 memory */
90 	r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
91 			       &fence_drv->cpu_addr);
92 	if (r)
93 		goto free_fence_drv;
94 
95 	memset(fence_drv->cpu_addr, 0, sizeof(u64));
96 
97 	kref_init(&fence_drv->refcount);
98 	INIT_LIST_HEAD(&fence_drv->fences);
99 	spin_lock_init(&fence_drv->fence_list_lock);
100 
101 	fence_drv->adev = adev;
102 	fence_drv->context = dma_fence_context_alloc(1);
103 	get_task_comm(fence_drv->timeline_name, current);
104 
105 	xa_lock_irqsave(&adev->userq_xa, flags);
106 	r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
107 			      fence_drv, GFP_KERNEL));
108 	xa_unlock_irqrestore(&adev->userq_xa, flags);
109 	if (r)
110 		goto free_seq64;
111 
112 	userq->fence_drv = fence_drv;
113 
114 	return 0;
115 
116 free_seq64:
117 	amdgpu_seq64_free(adev, fence_drv->va);
118 free_fence_drv:
119 	kfree(fence_drv);
120 
121 	return r;
122 }
123 
124 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
125 {
126 	struct amdgpu_userq_fence_driver *fence_drv;
127 	unsigned long index;
128 
129 	if (xa_empty(xa))
130 		return;
131 
132 	xa_lock(xa);
133 	xa_for_each(xa, index, fence_drv) {
134 		__xa_erase(xa, index);
135 		amdgpu_userq_fence_driver_put(fence_drv);
136 	}
137 
138 	xa_unlock(xa);
139 }
140 
141 void
142 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
143 {
144 	amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
145 	xa_destroy(&userq->fence_drv_xa);
146 	/* Drop the fence_drv reference held by user queue */
147 	amdgpu_userq_fence_driver_put(userq->fence_drv);
148 }
149 
150 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
151 {
152 	struct amdgpu_userq_fence *userq_fence, *tmp;
153 	struct dma_fence *fence;
154 	unsigned long flags;
155 	u64 rptr;
156 	int i;
157 
158 	if (!fence_drv)
159 		return;
160 
161 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
162 	rptr = amdgpu_userq_fence_read(fence_drv);
163 
164 	list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
165 		fence = &userq_fence->base;
166 
167 		if (rptr < fence->seqno)
168 			break;
169 
170 		dma_fence_signal(fence);
171 
172 		for (i = 0; i < userq_fence->fence_drv_array_count; i++)
173 			amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
174 
175 		list_del(&userq_fence->link);
176 		dma_fence_put(fence);
177 	}
178 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
179 }
180 
181 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
182 {
183 	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
184 					 struct amdgpu_userq_fence_driver,
185 					 refcount);
186 	struct amdgpu_userq_fence_driver *xa_fence_drv;
187 	struct amdgpu_device *adev = fence_drv->adev;
188 	struct amdgpu_userq_fence *fence, *tmp;
189 	struct xarray *xa = &adev->userq_xa;
190 	unsigned long index, flags;
191 	struct dma_fence *f;
192 
193 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
194 	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
195 		f = &fence->base;
196 
197 		if (!dma_fence_is_signaled(f)) {
198 			dma_fence_set_error(f, -ECANCELED);
199 			dma_fence_signal(f);
200 		}
201 
202 		list_del(&fence->link);
203 		dma_fence_put(f);
204 	}
205 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
206 
207 	xa_lock_irqsave(xa, flags);
208 	xa_for_each(xa, index, xa_fence_drv)
209 		if (xa_fence_drv == fence_drv)
210 			__xa_erase(xa, index);
211 	xa_unlock_irqrestore(xa, flags);
212 
213 	/* Free seq64 memory */
214 	amdgpu_seq64_free(adev, fence_drv->va);
215 	kfree(fence_drv);
216 }
217 
218 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
219 {
220 	kref_get(&fence_drv->refcount);
221 }
222 
223 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
224 {
225 	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
226 }
227 
228 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
229 {
230 	*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
231 	return *userq_fence ? 0 : -ENOMEM;
232 }
233 
234 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
235 				     struct amdgpu_userq_fence *userq_fence,
236 				     u64 seq, struct dma_fence **f)
237 {
238 	struct amdgpu_userq_fence_driver *fence_drv;
239 	struct dma_fence *fence;
240 	unsigned long flags;
241 
242 	fence_drv = userq->fence_drv;
243 	if (!fence_drv)
244 		return -EINVAL;
245 
246 	spin_lock_init(&userq_fence->lock);
247 	INIT_LIST_HEAD(&userq_fence->link);
248 	fence = &userq_fence->base;
249 	userq_fence->fence_drv = fence_drv;
250 
251 	dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
252 			 fence_drv->context, seq);
253 
254 	amdgpu_userq_fence_driver_get(fence_drv);
255 	dma_fence_get(fence);
256 
257 	if (!xa_empty(&userq->fence_drv_xa)) {
258 		struct amdgpu_userq_fence_driver *stored_fence_drv;
259 		unsigned long index, count = 0;
260 		int i = 0;
261 
262 		xa_lock(&userq->fence_drv_xa);
263 		xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
264 			count++;
265 
266 		userq_fence->fence_drv_array =
267 			kvmalloc_array(count,
268 				       sizeof(struct amdgpu_userq_fence_driver *),
269 				       GFP_ATOMIC);
270 
271 		if (userq_fence->fence_drv_array) {
272 			xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
273 				userq_fence->fence_drv_array[i] = stored_fence_drv;
274 				__xa_erase(&userq->fence_drv_xa, index);
275 				i++;
276 			}
277 		}
278 
279 		userq_fence->fence_drv_array_count = i;
280 		xa_unlock(&userq->fence_drv_xa);
281 	} else {
282 		userq_fence->fence_drv_array = NULL;
283 		userq_fence->fence_drv_array_count = 0;
284 	}
285 
286 	/* Check if hardware has already processed the job */
287 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
288 	if (!dma_fence_is_signaled(fence))
289 		list_add_tail(&userq_fence->link, &fence_drv->fences);
290 	else
291 		dma_fence_put(fence);
292 
293 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
294 
295 	*f = fence;
296 
297 	return 0;
298 }
299 
300 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
301 {
302 	return "amdgpu_userq_fence";
303 }
304 
305 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
306 {
307 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
308 
309 	return fence->fence_drv->timeline_name;
310 }
311 
312 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
313 {
314 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
315 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
316 	u64 rptr, wptr;
317 
318 	rptr = amdgpu_userq_fence_read(fence_drv);
319 	wptr = fence->base.seqno;
320 
321 	if (rptr >= wptr)
322 		return true;
323 
324 	return false;
325 }
326 
327 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
328 {
329 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
330 	struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
331 	struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
332 
333 	/* Release the fence driver reference */
334 	amdgpu_userq_fence_driver_put(fence_drv);
335 
336 	kvfree(userq_fence->fence_drv_array);
337 	kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
338 }
339 
340 static void amdgpu_userq_fence_release(struct dma_fence *f)
341 {
342 	call_rcu(&f->rcu, amdgpu_userq_fence_free);
343 }
344 
345 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
346 	.get_driver_name = amdgpu_userq_fence_get_driver_name,
347 	.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
348 	.signaled = amdgpu_userq_fence_signaled,
349 	.release = amdgpu_userq_fence_release,
350 };
351 
352 /**
353  * amdgpu_userq_fence_read_wptr - Read the userq wptr value
354  *
355  * @adev: amdgpu_device pointer
356  * @queue: user mode queue structure pointer
357  * @wptr: write pointer value
358  *
359  * Read the wptr value from userq's MQD. The userq signal IOCTL
360  * creates a dma_fence for the shared buffers that expects the
361  * RPTR value written to seq64 memory >= WPTR.
362  *
363  * Returns wptr value on success, error on failure.
364  */
365 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
366 					struct amdgpu_usermode_queue *queue,
367 					u64 *wptr)
368 {
369 	struct amdgpu_bo_va_mapping *mapping;
370 	struct amdgpu_bo *bo;
371 	u64 addr, *ptr;
372 	int r;
373 
374 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
375 	if (r)
376 		return r;
377 
378 	addr = queue->userq_prop->wptr_gpu_addr;
379 	addr &= AMDGPU_GMC_HOLE_MASK;
380 
381 	mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
382 	if (!mapping) {
383 		amdgpu_bo_unreserve(queue->vm->root.bo);
384 		DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
385 		return -EINVAL;
386 	}
387 
388 	bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
389 	amdgpu_bo_unreserve(queue->vm->root.bo);
390 	r = amdgpu_bo_reserve(bo, true);
391 	if (r) {
392 		amdgpu_bo_unref(&bo);
393 		DRM_ERROR("Failed to reserve userqueue wptr bo");
394 		return r;
395 	}
396 
397 	r = amdgpu_bo_kmap(bo, (void **)&ptr);
398 	if (r) {
399 		DRM_ERROR("Failed mapping the userqueue wptr bo");
400 		goto map_error;
401 	}
402 
403 	*wptr = le64_to_cpu(*ptr);
404 
405 	amdgpu_bo_kunmap(bo);
406 	amdgpu_bo_unreserve(bo);
407 	amdgpu_bo_unref(&bo);
408 
409 	return 0;
410 
411 map_error:
412 	amdgpu_bo_unreserve(bo);
413 	amdgpu_bo_unref(&bo);
414 
415 	return r;
416 }
417 
418 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
419 {
420 	dma_fence_put(fence);
421 }
422 
423 static void
424 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
425 				    int error)
426 {
427 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
428 	unsigned long flags;
429 	struct dma_fence *f;
430 
431 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
432 
433 	f = rcu_dereference_protected(&fence->base,
434 				      lockdep_is_held(&fence_drv->fence_list_lock));
435 	if (f && !dma_fence_is_signaled_locked(f))
436 		dma_fence_set_error(f, error);
437 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
438 }
439 
440 void
441 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
442 {
443 	struct dma_fence *f = userq->last_fence;
444 
445 	if (f) {
446 		struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
447 		struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
448 		u64 wptr = fence->base.seqno;
449 
450 		amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
451 		amdgpu_userq_fence_write(fence_drv, wptr);
452 		amdgpu_userq_fence_driver_process(fence_drv);
453 
454 	}
455 }
456 
457 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
458 			      struct drm_file *filp)
459 {
460 	struct amdgpu_device *adev = drm_to_adev(dev);
461 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
462 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
463 	struct drm_amdgpu_userq_signal *args = data;
464 	struct drm_gem_object **gobj_write = NULL;
465 	struct drm_gem_object **gobj_read = NULL;
466 	struct amdgpu_usermode_queue *queue;
467 	struct amdgpu_userq_fence *userq_fence;
468 	struct drm_syncobj **syncobj = NULL;
469 	u32 *bo_handles_write, num_write_bo_handles;
470 	u32 *syncobj_handles, num_syncobj_handles;
471 	u32 *bo_handles_read, num_read_bo_handles;
472 	int r, i, entry, rentry, wentry;
473 	struct dma_fence *fence;
474 	struct drm_exec exec;
475 	u64 wptr;
476 
477 	if (!amdgpu_userq_enabled(dev))
478 		return -ENOTSUPP;
479 
480 	num_syncobj_handles = args->num_syncobj_handles;
481 	syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
482 				      size_mul(sizeof(u32), num_syncobj_handles));
483 	if (IS_ERR(syncobj_handles))
484 		return PTR_ERR(syncobj_handles);
485 
486 	/* Array of pointers to the looked up syncobjs */
487 	syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
488 	if (!syncobj) {
489 		r = -ENOMEM;
490 		goto free_syncobj_handles;
491 	}
492 
493 	for (entry = 0; entry < num_syncobj_handles; entry++) {
494 		syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
495 		if (!syncobj[entry]) {
496 			r = -ENOENT;
497 			goto free_syncobj;
498 		}
499 	}
500 
501 	num_read_bo_handles = args->num_bo_read_handles;
502 	bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
503 				      sizeof(u32) * num_read_bo_handles);
504 	if (IS_ERR(bo_handles_read)) {
505 		r = PTR_ERR(bo_handles_read);
506 		goto free_syncobj;
507 	}
508 
509 	/* Array of pointers to the GEM read objects */
510 	gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
511 	if (!gobj_read) {
512 		r = -ENOMEM;
513 		goto free_bo_handles_read;
514 	}
515 
516 	for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
517 		gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
518 		if (!gobj_read[rentry]) {
519 			r = -ENOENT;
520 			goto put_gobj_read;
521 		}
522 	}
523 
524 	num_write_bo_handles = args->num_bo_write_handles;
525 	bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
526 				       sizeof(u32) * num_write_bo_handles);
527 	if (IS_ERR(bo_handles_write)) {
528 		r = PTR_ERR(bo_handles_write);
529 		goto put_gobj_read;
530 	}
531 
532 	/* Array of pointers to the GEM write objects */
533 	gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
534 	if (!gobj_write) {
535 		r = -ENOMEM;
536 		goto free_bo_handles_write;
537 	}
538 
539 	for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
540 		gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
541 		if (!gobj_write[wentry]) {
542 			r = -ENOENT;
543 			goto put_gobj_write;
544 		}
545 	}
546 
547 	/* Retrieve the user queue */
548 	queue = xa_load(&userq_mgr->userq_xa, args->queue_id);
549 	if (!queue) {
550 		r = -ENOENT;
551 		goto put_gobj_write;
552 	}
553 
554 	r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
555 	if (r)
556 		goto put_gobj_write;
557 
558 	r = amdgpu_userq_fence_alloc(&userq_fence);
559 	if (r)
560 		goto put_gobj_write;
561 
562 	/* We are here means UQ is active, make sure the eviction fence is valid */
563 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
564 
565 	/* Create a new fence */
566 	r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
567 	if (r) {
568 		mutex_unlock(&userq_mgr->userq_mutex);
569 		kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
570 		goto put_gobj_write;
571 	}
572 
573 	dma_fence_put(queue->last_fence);
574 	queue->last_fence = dma_fence_get(fence);
575 	mutex_unlock(&userq_mgr->userq_mutex);
576 
577 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
578 		      (num_read_bo_handles + num_write_bo_handles));
579 
580 	/* Lock all BOs with retry handling */
581 	drm_exec_until_all_locked(&exec) {
582 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
583 		drm_exec_retry_on_contention(&exec);
584 		if (r) {
585 			amdgpu_userq_fence_cleanup(fence);
586 			goto exec_fini;
587 		}
588 
589 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
590 		drm_exec_retry_on_contention(&exec);
591 		if (r) {
592 			amdgpu_userq_fence_cleanup(fence);
593 			goto exec_fini;
594 		}
595 	}
596 
597 	for (i = 0; i < num_read_bo_handles; i++) {
598 		if (!gobj_read || !gobj_read[i]->resv)
599 			continue;
600 
601 		dma_resv_add_fence(gobj_read[i]->resv, fence,
602 				   DMA_RESV_USAGE_READ);
603 	}
604 
605 	for (i = 0; i < num_write_bo_handles; i++) {
606 		if (!gobj_write || !gobj_write[i]->resv)
607 			continue;
608 
609 		dma_resv_add_fence(gobj_write[i]->resv, fence,
610 				   DMA_RESV_USAGE_WRITE);
611 	}
612 
613 	/* Add the created fence to syncobj/BO's */
614 	for (i = 0; i < num_syncobj_handles; i++)
615 		drm_syncobj_replace_fence(syncobj[i], fence);
616 
617 	/* drop the reference acquired in fence creation function */
618 	dma_fence_put(fence);
619 
620 exec_fini:
621 	drm_exec_fini(&exec);
622 put_gobj_write:
623 	while (wentry-- > 0)
624 		drm_gem_object_put(gobj_write[wentry]);
625 	kfree(gobj_write);
626 free_bo_handles_write:
627 	kfree(bo_handles_write);
628 put_gobj_read:
629 	while (rentry-- > 0)
630 		drm_gem_object_put(gobj_read[rentry]);
631 	kfree(gobj_read);
632 free_bo_handles_read:
633 	kfree(bo_handles_read);
634 free_syncobj:
635 	while (entry-- > 0)
636 		if (syncobj[entry])
637 			drm_syncobj_put(syncobj[entry]);
638 	kfree(syncobj);
639 free_syncobj_handles:
640 	kfree(syncobj_handles);
641 
642 	return r;
643 }
644 
645 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
646 			    struct drm_file *filp)
647 {
648 	u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
649 	u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
650 	struct drm_amdgpu_userq_fence_info *fence_info = NULL;
651 	struct drm_amdgpu_userq_wait *wait_info = data;
652 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
653 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
654 	struct amdgpu_usermode_queue *waitq;
655 	struct drm_gem_object **gobj_write;
656 	struct drm_gem_object **gobj_read;
657 	struct dma_fence **fences = NULL;
658 	u16 num_points, num_fences = 0;
659 	int r, i, rentry, wentry, cnt;
660 	struct drm_exec exec;
661 
662 	if (!amdgpu_userq_enabled(dev))
663 		return -ENOTSUPP;
664 
665 	num_read_bo_handles = wait_info->num_bo_read_handles;
666 	bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
667 				      size_mul(sizeof(u32), num_read_bo_handles));
668 	if (IS_ERR(bo_handles_read))
669 		return PTR_ERR(bo_handles_read);
670 
671 	num_write_bo_handles = wait_info->num_bo_write_handles;
672 	bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
673 				       size_mul(sizeof(u32), num_write_bo_handles));
674 	if (IS_ERR(bo_handles_write)) {
675 		r = PTR_ERR(bo_handles_write);
676 		goto free_bo_handles_read;
677 	}
678 
679 	num_syncobj = wait_info->num_syncobj_handles;
680 	syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
681 				      size_mul(sizeof(u32), num_syncobj));
682 	if (IS_ERR(syncobj_handles)) {
683 		r = PTR_ERR(syncobj_handles);
684 		goto free_bo_handles_write;
685 	}
686 
687 	num_points = wait_info->num_syncobj_timeline_handles;
688 	timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
689 				       sizeof(u32) * num_points);
690 	if (IS_ERR(timeline_handles)) {
691 		r = PTR_ERR(timeline_handles);
692 		goto free_syncobj_handles;
693 	}
694 
695 	timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
696 				      sizeof(u32) * num_points);
697 	if (IS_ERR(timeline_points)) {
698 		r = PTR_ERR(timeline_points);
699 		goto free_timeline_handles;
700 	}
701 
702 	gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
703 	if (!gobj_read) {
704 		r = -ENOMEM;
705 		goto free_timeline_points;
706 	}
707 
708 	for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
709 		gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
710 		if (!gobj_read[rentry]) {
711 			r = -ENOENT;
712 			goto put_gobj_read;
713 		}
714 	}
715 
716 	gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
717 	if (!gobj_write) {
718 		r = -ENOMEM;
719 		goto put_gobj_read;
720 	}
721 
722 	for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
723 		gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
724 		if (!gobj_write[wentry]) {
725 			r = -ENOENT;
726 			goto put_gobj_write;
727 		}
728 	}
729 
730 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
731 		      (num_read_bo_handles + num_write_bo_handles));
732 
733 	/* Lock all BOs with retry handling */
734 	drm_exec_until_all_locked(&exec) {
735 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
736 		drm_exec_retry_on_contention(&exec);
737 		if (r) {
738 			drm_exec_fini(&exec);
739 			goto put_gobj_write;
740 		}
741 
742 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
743 		drm_exec_retry_on_contention(&exec);
744 		if (r) {
745 			drm_exec_fini(&exec);
746 			goto put_gobj_write;
747 		}
748 	}
749 
750 	if (!wait_info->num_fences) {
751 		if (num_points) {
752 			struct dma_fence_unwrap iter;
753 			struct dma_fence *fence;
754 			struct dma_fence *f;
755 
756 			for (i = 0; i < num_points; i++) {
757 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
758 							   timeline_points[i],
759 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
760 							   &fence);
761 				if (r)
762 					goto exec_fini;
763 
764 				dma_fence_unwrap_for_each(f, &iter, fence)
765 					num_fences++;
766 
767 				dma_fence_put(fence);
768 			}
769 		}
770 
771 		/* Count syncobj's fence */
772 		for (i = 0; i < num_syncobj; i++) {
773 			struct dma_fence *fence;
774 
775 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
776 						   0,
777 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
778 						   &fence);
779 			if (r)
780 				goto exec_fini;
781 
782 			num_fences++;
783 			dma_fence_put(fence);
784 		}
785 
786 		/* Count GEM objects fence */
787 		for (i = 0; i < num_read_bo_handles; i++) {
788 			struct dma_resv_iter resv_cursor;
789 			struct dma_fence *fence;
790 
791 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
792 						DMA_RESV_USAGE_READ, fence)
793 				num_fences++;
794 		}
795 
796 		for (i = 0; i < num_write_bo_handles; i++) {
797 			struct dma_resv_iter resv_cursor;
798 			struct dma_fence *fence;
799 
800 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
801 						DMA_RESV_USAGE_WRITE, fence)
802 				num_fences++;
803 		}
804 
805 		/*
806 		 * Passing num_fences = 0 means that userspace doesn't want to
807 		 * retrieve userq_fence_info. If num_fences = 0 we skip filling
808 		 * userq_fence_info and return the actual number of fences on
809 		 * args->num_fences.
810 		 */
811 		wait_info->num_fences = num_fences;
812 	} else {
813 		/* Array of fence info */
814 		fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
815 		if (!fence_info) {
816 			r = -ENOMEM;
817 			goto exec_fini;
818 		}
819 
820 		/* Array of fences */
821 		fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
822 		if (!fences) {
823 			r = -ENOMEM;
824 			goto free_fence_info;
825 		}
826 
827 		/* Retrieve GEM read objects fence */
828 		for (i = 0; i < num_read_bo_handles; i++) {
829 			struct dma_resv_iter resv_cursor;
830 			struct dma_fence *fence;
831 
832 			dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
833 						DMA_RESV_USAGE_READ, fence) {
834 				if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
835 					r = -EINVAL;
836 					goto free_fences;
837 				}
838 
839 				fences[num_fences++] = fence;
840 				dma_fence_get(fence);
841 			}
842 		}
843 
844 		/* Retrieve GEM write objects fence */
845 		for (i = 0; i < num_write_bo_handles; i++) {
846 			struct dma_resv_iter resv_cursor;
847 			struct dma_fence *fence;
848 
849 			dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
850 						DMA_RESV_USAGE_WRITE, fence) {
851 				if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
852 					r = -EINVAL;
853 					goto free_fences;
854 				}
855 
856 				fences[num_fences++] = fence;
857 				dma_fence_get(fence);
858 			}
859 		}
860 
861 		if (num_points) {
862 			struct dma_fence_unwrap iter;
863 			struct dma_fence *fence;
864 			struct dma_fence *f;
865 
866 			for (i = 0; i < num_points; i++) {
867 				r = drm_syncobj_find_fence(filp, timeline_handles[i],
868 							   timeline_points[i],
869 							   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
870 							   &fence);
871 				if (r)
872 					goto free_fences;
873 
874 				dma_fence_unwrap_for_each(f, &iter, fence) {
875 					if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
876 						r = -EINVAL;
877 						goto free_fences;
878 					}
879 
880 					dma_fence_get(f);
881 					fences[num_fences++] = f;
882 				}
883 
884 				dma_fence_put(fence);
885 			}
886 		}
887 
888 		/* Retrieve syncobj's fence */
889 		for (i = 0; i < num_syncobj; i++) {
890 			struct dma_fence *fence;
891 
892 			r = drm_syncobj_find_fence(filp, syncobj_handles[i],
893 						   0,
894 						   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
895 						   &fence);
896 			if (r)
897 				goto free_fences;
898 
899 			if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
900 				r = -EINVAL;
901 				goto free_fences;
902 			}
903 
904 			fences[num_fences++] = fence;
905 		}
906 
907 		/*
908 		 * Keep only the latest fences to reduce the number of values
909 		 * given back to userspace.
910 		 */
911 		num_fences = dma_fence_dedup_array(fences, num_fences);
912 
913 		waitq = xa_load(&userq_mgr->userq_xa, wait_info->waitq_id);
914 		if (!waitq) {
915 			r = -EINVAL;
916 			goto free_fences;
917 		}
918 
919 		for (i = 0, cnt = 0; i < num_fences; i++) {
920 			struct amdgpu_userq_fence_driver *fence_drv;
921 			struct amdgpu_userq_fence *userq_fence;
922 			u32 index;
923 
924 			userq_fence = to_amdgpu_userq_fence(fences[i]);
925 			if (!userq_fence) {
926 				/*
927 				 * Just waiting on other driver fences should
928 				 * be good for now
929 				 */
930 				r = dma_fence_wait(fences[i], true);
931 				if (r) {
932 					dma_fence_put(fences[i]);
933 					goto free_fences;
934 				}
935 
936 				dma_fence_put(fences[i]);
937 				continue;
938 			}
939 
940 			fence_drv = userq_fence->fence_drv;
941 			/*
942 			 * We need to make sure the user queue release their reference
943 			 * to the fence drivers at some point before queue destruction.
944 			 * Otherwise, we would gather those references until we don't
945 			 * have any more space left and crash.
946 			 */
947 			r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
948 				     xa_limit_32b, GFP_KERNEL);
949 			if (r)
950 				goto free_fences;
951 
952 			amdgpu_userq_fence_driver_get(fence_drv);
953 
954 			/* Store drm syncobj's gpu va address and value */
955 			fence_info[cnt].va = fence_drv->va;
956 			fence_info[cnt].value = fences[i]->seqno;
957 
958 			dma_fence_put(fences[i]);
959 			/* Increment the actual userq fence count */
960 			cnt++;
961 		}
962 
963 		wait_info->num_fences = cnt;
964 		/* Copy userq fence info to user space */
965 		if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
966 				 fence_info, wait_info->num_fences * sizeof(*fence_info))) {
967 			r = -EFAULT;
968 			goto free_fences;
969 		}
970 
971 		kfree(fences);
972 		kfree(fence_info);
973 	}
974 
975 	drm_exec_fini(&exec);
976 	for (i = 0; i < num_read_bo_handles; i++)
977 		drm_gem_object_put(gobj_read[i]);
978 	kfree(gobj_read);
979 
980 	for (i = 0; i < num_write_bo_handles; i++)
981 		drm_gem_object_put(gobj_write[i]);
982 	kfree(gobj_write);
983 
984 	kfree(timeline_points);
985 	kfree(timeline_handles);
986 	kfree(syncobj_handles);
987 	kfree(bo_handles_write);
988 	kfree(bo_handles_read);
989 
990 	return 0;
991 
992 free_fences:
993 	while (num_fences-- > 0)
994 		dma_fence_put(fences[num_fences]);
995 	kfree(fences);
996 free_fence_info:
997 	kfree(fence_info);
998 exec_fini:
999 	drm_exec_fini(&exec);
1000 put_gobj_write:
1001 	while (wentry-- > 0)
1002 		drm_gem_object_put(gobj_write[wentry]);
1003 	kfree(gobj_write);
1004 put_gobj_read:
1005 	while (rentry-- > 0)
1006 		drm_gem_object_put(gobj_read[rentry]);
1007 	kfree(gobj_read);
1008 free_timeline_points:
1009 	kfree(timeline_points);
1010 free_timeline_handles:
1011 	kfree(timeline_handles);
1012 free_syncobj_handles:
1013 	kfree(syncobj_handles);
1014 free_bo_handles_write:
1015 	kfree(bo_handles_write);
1016 free_bo_handles_read:
1017 	kfree(bo_handles_read);
1018 
1019 	return r;
1020 }
1021