xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28 
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31 
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34 
35 static const struct dma_fence_ops amdgpu_userq_fence_ops;
36 static struct kmem_cache *amdgpu_userq_fence_slab;
37 
38 #define AMDGPU_USERQ_MAX_HANDLES	(1U << 16)
39 
amdgpu_userq_fence_slab_init(void)40 int amdgpu_userq_fence_slab_init(void)
41 {
42 	amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
43 						    sizeof(struct amdgpu_userq_fence),
44 						    0,
45 						    SLAB_HWCACHE_ALIGN,
46 						    NULL);
47 	if (!amdgpu_userq_fence_slab)
48 		return -ENOMEM;
49 
50 	return 0;
51 }
52 
amdgpu_userq_fence_slab_fini(void)53 void amdgpu_userq_fence_slab_fini(void)
54 {
55 	rcu_barrier();
56 	kmem_cache_destroy(amdgpu_userq_fence_slab);
57 }
58 
to_amdgpu_userq_fence(struct dma_fence * f)59 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
60 {
61 	if (!f || f->ops != &amdgpu_userq_fence_ops)
62 		return NULL;
63 
64 	return container_of(f, struct amdgpu_userq_fence, base);
65 }
66 
amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver * fence_drv)67 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
68 {
69 	return le64_to_cpu(*fence_drv->cpu_addr);
70 }
71 
72 static void
amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver * fence_drv,u64 seq)73 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
74 			 u64 seq)
75 {
76 	if (fence_drv->cpu_addr)
77 		*fence_drv->cpu_addr = cpu_to_le64(seq);
78 }
79 
amdgpu_userq_fence_driver_alloc(struct amdgpu_device * adev,struct amdgpu_userq_fence_driver ** fence_drv_req)80 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
81 				    struct amdgpu_userq_fence_driver **fence_drv_req)
82 {
83 	struct amdgpu_userq_fence_driver *fence_drv;
84 	int r;
85 
86 	if (!fence_drv_req)
87 		return -EINVAL;
88 	*fence_drv_req = NULL;
89 
90 	fence_drv = kzalloc_obj(*fence_drv);
91 	if (!fence_drv)
92 		return -ENOMEM;
93 
94 	/* Acquire seq64 memory */
95 	r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
96 			       &fence_drv->cpu_addr);
97 	if (r)
98 		goto free_fence_drv;
99 
100 	memset(fence_drv->cpu_addr, 0, sizeof(u64));
101 
102 	kref_init(&fence_drv->refcount);
103 	INIT_LIST_HEAD(&fence_drv->fences);
104 	spin_lock_init(&fence_drv->fence_list_lock);
105 
106 	fence_drv->adev = adev;
107 	fence_drv->context = dma_fence_context_alloc(1);
108 	get_task_comm(fence_drv->timeline_name, current);
109 
110 	*fence_drv_req = fence_drv;
111 
112 	return 0;
113 
114 free_fence_drv:
115 	kfree(fence_drv);
116 
117 	return r;
118 }
119 
amdgpu_userq_walk_and_drop_fence_drv(struct xarray * xa)120 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
121 {
122 	struct amdgpu_userq_fence_driver *fence_drv;
123 	unsigned long index;
124 
125 	if (xa_empty(xa))
126 		return;
127 
128 	xa_lock(xa);
129 	xa_for_each(xa, index, fence_drv) {
130 		__xa_erase(xa, index);
131 		amdgpu_userq_fence_driver_put(fence_drv);
132 	}
133 
134 	xa_unlock(xa);
135 }
136 
137 void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue * userq)138 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
139 {
140 	dma_fence_put(userq->last_fence);
141 	userq->last_fence = NULL;
142 	amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
143 	xa_destroy(&userq->fence_drv_xa);
144 	/* Drop the queue's ownership reference to fence_drv explicitly */
145 	amdgpu_userq_fence_driver_put(userq->fence_drv);
146 }
147 
148 static void
amdgpu_userq_fence_put_fence_drv_array(struct amdgpu_userq_fence * userq_fence)149 amdgpu_userq_fence_put_fence_drv_array(struct amdgpu_userq_fence *userq_fence)
150 {
151 	unsigned long i;
152 	for (i = 0; i < userq_fence->fence_drv_array_count; i++)
153 		amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
154 	userq_fence->fence_drv_array_count = 0;
155 }
156 
amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver * fence_drv)157 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
158 {
159 	struct amdgpu_userq_fence *userq_fence, *tmp;
160 	LIST_HEAD(to_be_signaled);
161 	struct dma_fence *fence;
162 	unsigned long flags;
163 	u64 rptr;
164 
165 	if (!fence_drv)
166 		return;
167 
168 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
169 	rptr = amdgpu_userq_fence_read(fence_drv);
170 
171 	list_for_each_entry(userq_fence, &fence_drv->fences, link) {
172 		if (rptr < userq_fence->base.seqno)
173 			break;
174 	}
175 
176 	list_cut_before(&to_be_signaled, &fence_drv->fences,
177 				&userq_fence->link);
178 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
179 
180 	list_for_each_entry_safe(userq_fence, tmp, &to_be_signaled, link) {
181 		fence = &userq_fence->base;
182 		list_del_init(&userq_fence->link);
183 		dma_fence_signal(fence);
184 		/* Drop fence_drv_array outside fence_list_lock
185 		 * to avoid the recursion lock.
186 		 */
187 		amdgpu_userq_fence_put_fence_drv_array(userq_fence);
188 		dma_fence_put(fence);
189 	}
190 
191 }
192 
amdgpu_userq_fence_driver_destroy(struct kref * ref)193 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
194 {
195 	struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
196 					 struct amdgpu_userq_fence_driver,
197 					 refcount);
198 	struct amdgpu_device *adev = fence_drv->adev;
199 	struct amdgpu_userq_fence *fence, *tmp;
200 	unsigned long flags;
201 	struct dma_fence *f;
202 
203 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
204 	list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
205 		f = &fence->base;
206 
207 		if (!dma_fence_is_signaled(f)) {
208 			dma_fence_set_error(f, -ECANCELED);
209 			dma_fence_signal(f);
210 		}
211 
212 		list_del(&fence->link);
213 		dma_fence_put(f);
214 	}
215 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
216 
217 	/* Free seq64 memory */
218 	amdgpu_seq64_free(adev, fence_drv->va);
219 	kfree(fence_drv);
220 }
221 
amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver * fence_drv)222 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
223 {
224 	kref_get(&fence_drv->refcount);
225 }
226 
amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver * fence_drv)227 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
228 {
229 	kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
230 }
231 
amdgpu_userq_fence_alloc(struct amdgpu_userq_fence ** userq_fence)232 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
233 {
234 	*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
235 	return *userq_fence ? 0 : -ENOMEM;
236 }
237 
amdgpu_userq_fence_create(struct amdgpu_usermode_queue * userq,struct amdgpu_userq_fence * userq_fence,u64 seq,struct dma_fence ** f)238 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
239 				     struct amdgpu_userq_fence *userq_fence,
240 				     u64 seq, struct dma_fence **f)
241 {
242 	struct amdgpu_userq_fence_driver *fence_drv;
243 	struct dma_fence *fence;
244 	unsigned long flags;
245 	bool signaled = false;
246 
247 	fence_drv = userq->fence_drv;
248 	if (!fence_drv)
249 		return -EINVAL;
250 
251 	spin_lock_init(&userq_fence->lock);
252 	INIT_LIST_HEAD(&userq_fence->link);
253 	fence = &userq_fence->base;
254 	userq_fence->fence_drv = fence_drv;
255 
256 	dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
257 			 fence_drv->context, seq);
258 
259 	amdgpu_userq_fence_driver_get(fence_drv);
260 	dma_fence_get(fence);
261 
262 	if (!xa_empty(&userq->fence_drv_xa)) {
263 		struct amdgpu_userq_fence_driver *stored_fence_drv;
264 		unsigned long index, count = 0;
265 		int i = 0;
266 
267 		xa_lock(&userq->fence_drv_xa);
268 		xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
269 			count++;
270 
271 		userq_fence->fence_drv_array =
272 			kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
273 				      GFP_ATOMIC);
274 
275 		if (userq_fence->fence_drv_array) {
276 			xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
277 				userq_fence->fence_drv_array[i] = stored_fence_drv;
278 				__xa_erase(&userq->fence_drv_xa, index);
279 				i++;
280 			}
281 		}
282 
283 		userq_fence->fence_drv_array_count = i;
284 		xa_unlock(&userq->fence_drv_xa);
285 	} else {
286 		userq_fence->fence_drv_array = NULL;
287 		userq_fence->fence_drv_array_count = 0;
288 	}
289 
290 	/* Check if hardware has already processed the job */
291 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
292 	if (!dma_fence_is_signaled(fence)) {
293 		list_add_tail(&userq_fence->link, &fence_drv->fences);
294 	} else {
295 		signaled = true;
296 		dma_fence_put(fence);
297 	}
298 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
299 
300 	if (signaled)
301 		amdgpu_userq_fence_put_fence_drv_array(userq_fence);
302 
303 	*f = fence;
304 
305 	return 0;
306 }
307 
amdgpu_userq_fence_get_driver_name(struct dma_fence * f)308 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
309 {
310 	return "amdgpu_userq_fence";
311 }
312 
amdgpu_userq_fence_get_timeline_name(struct dma_fence * f)313 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
314 {
315 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
316 
317 	return fence->fence_drv->timeline_name;
318 }
319 
amdgpu_userq_fence_signaled(struct dma_fence * f)320 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
321 {
322 	struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
323 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
324 	u64 rptr, wptr;
325 
326 	rptr = amdgpu_userq_fence_read(fence_drv);
327 	wptr = fence->base.seqno;
328 
329 	if (rptr >= wptr)
330 		return true;
331 
332 	return false;
333 }
334 
amdgpu_userq_fence_free(struct rcu_head * rcu)335 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
336 {
337 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
338 	struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
339 	struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
340 
341 	/* Release the fence driver reference */
342 	amdgpu_userq_fence_driver_put(fence_drv);
343 
344 	kvfree(userq_fence->fence_drv_array);
345 	kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
346 }
347 
amdgpu_userq_fence_release(struct dma_fence * f)348 static void amdgpu_userq_fence_release(struct dma_fence *f)
349 {
350 	call_rcu(&f->rcu, amdgpu_userq_fence_free);
351 }
352 
353 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
354 	.get_driver_name = amdgpu_userq_fence_get_driver_name,
355 	.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
356 	.signaled = amdgpu_userq_fence_signaled,
357 	.release = amdgpu_userq_fence_release,
358 };
359 
360 /**
361  * amdgpu_userq_fence_read_wptr - Read the userq wptr value
362  *
363  * @adev: amdgpu_device pointer
364  * @queue: user mode queue structure pointer
365  * @wptr: write pointer value
366  *
367  * Read the wptr value from userq's MQD. The userq signal IOCTL
368  * creates a dma_fence for the shared buffers that expects the
369  * RPTR value written to seq64 memory >= WPTR.
370  *
371  * Returns wptr value on success, error on failure.
372  */
amdgpu_userq_fence_read_wptr(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 * wptr)373 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
374 					struct amdgpu_usermode_queue *queue,
375 					u64 *wptr)
376 {
377 	struct amdgpu_bo_va_mapping *mapping;
378 	struct amdgpu_bo *bo;
379 	u64 addr, *ptr;
380 	int r;
381 
382 	r = amdgpu_bo_reserve(queue->vm->root.bo, false);
383 	if (r)
384 		return r;
385 
386 	addr = queue->userq_prop->wptr_gpu_addr;
387 	addr &= AMDGPU_GMC_HOLE_MASK;
388 
389 	mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
390 	if (!mapping) {
391 		amdgpu_bo_unreserve(queue->vm->root.bo);
392 		DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
393 		return -EINVAL;
394 	}
395 
396 	bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
397 	amdgpu_bo_unreserve(queue->vm->root.bo);
398 	r = amdgpu_bo_reserve(bo, true);
399 	if (r) {
400 		amdgpu_bo_unref(&bo);
401 		DRM_ERROR("Failed to reserve userqueue wptr bo");
402 		return r;
403 	}
404 
405 	r = amdgpu_bo_kmap(bo, (void **)&ptr);
406 	if (r) {
407 		DRM_ERROR("Failed mapping the userqueue wptr bo");
408 		goto map_error;
409 	}
410 
411 	*wptr = le64_to_cpu(*ptr);
412 
413 	amdgpu_bo_kunmap(bo);
414 	amdgpu_bo_unreserve(bo);
415 	amdgpu_bo_unref(&bo);
416 
417 	return 0;
418 
419 map_error:
420 	amdgpu_bo_unreserve(bo);
421 	amdgpu_bo_unref(&bo);
422 
423 	return r;
424 }
425 
amdgpu_userq_fence_cleanup(struct dma_fence * fence)426 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
427 {
428 	dma_fence_put(fence);
429 }
430 
431 static void
amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence * fence,int error)432 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
433 				    int error)
434 {
435 	struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
436 	unsigned long flags;
437 	struct dma_fence *f;
438 
439 	spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
440 
441 	f = rcu_dereference_protected(&fence->base,
442 				      lockdep_is_held(&fence_drv->fence_list_lock));
443 	if (f && !dma_fence_is_signaled_locked(f))
444 		dma_fence_set_error(f, error);
445 	spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
446 }
447 
448 void
amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue * userq)449 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
450 {
451 	struct dma_fence *f = userq->last_fence;
452 
453 	if (f) {
454 		struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
455 		struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
456 		u64 wptr = fence->base.seqno;
457 
458 		amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
459 		amdgpu_userq_fence_write(fence_drv, wptr);
460 		amdgpu_userq_fence_driver_process(fence_drv);
461 
462 	}
463 }
464 
amdgpu_userq_signal_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)465 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
466 			      struct drm_file *filp)
467 {
468 	struct amdgpu_device *adev = drm_to_adev(dev);
469 	struct drm_amdgpu_userq_signal *args = data;
470 	const unsigned int num_write_bo_handles = args->num_bo_write_handles;
471 	const unsigned int num_read_bo_handles = args->num_bo_read_handles;
472 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
473 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
474 	struct drm_gem_object **gobj_write, **gobj_read;
475 	u32 *syncobj_handles, num_syncobj_handles;
476 	struct amdgpu_userq_fence *userq_fence;
477 	struct amdgpu_usermode_queue *queue = NULL;
478 	struct drm_syncobj **syncobj = NULL;
479 	struct dma_fence *fence;
480 	struct drm_exec exec;
481 	int r, i, entry;
482 	u64 wptr;
483 
484 	if (!amdgpu_userq_enabled(dev))
485 		return -ENOTSUPP;
486 
487 	if (args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
488 	    args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
489 		return -EINVAL;
490 
491 	num_syncobj_handles = args->num_syncobj_handles;
492 	syncobj_handles = memdup_array_user(u64_to_user_ptr(args->syncobj_handles),
493 					    num_syncobj_handles, sizeof(u32));
494 	if (IS_ERR(syncobj_handles))
495 		return PTR_ERR(syncobj_handles);
496 
497 	/* Array of pointers to the looked up syncobjs */
498 	syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
499 	if (!syncobj) {
500 		r = -ENOMEM;
501 		goto free_syncobj_handles;
502 	}
503 
504 	for (entry = 0; entry < num_syncobj_handles; entry++) {
505 		syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
506 		if (!syncobj[entry]) {
507 			r = -ENOENT;
508 			goto free_syncobj;
509 		}
510 	}
511 
512 	r = drm_gem_objects_lookup(filp,
513 				   u64_to_user_ptr(args->bo_read_handles),
514 				   num_read_bo_handles,
515 				   &gobj_read);
516 	if (r)
517 		goto free_syncobj;
518 
519 	r = drm_gem_objects_lookup(filp,
520 				   u64_to_user_ptr(args->bo_write_handles),
521 				   num_write_bo_handles,
522 				   &gobj_write);
523 	if (r)
524 		goto put_gobj_read;
525 
526 	/* Retrieve the user queue */
527 	queue = amdgpu_userq_get(userq_mgr, args->queue_id);
528 	if (!queue) {
529 		r = -ENOENT;
530 		goto put_gobj_write;
531 	}
532 
533 	r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
534 	if (r)
535 		goto put_gobj_write;
536 
537 	r = amdgpu_userq_fence_alloc(&userq_fence);
538 	if (r)
539 		goto put_gobj_write;
540 
541 	/* We are here means UQ is active, make sure the eviction fence is valid */
542 	amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
543 
544 	/* Create a new fence */
545 	r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
546 	if (r) {
547 		mutex_unlock(&userq_mgr->userq_mutex);
548 		kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
549 		goto put_gobj_write;
550 	}
551 
552 	dma_fence_put(queue->last_fence);
553 	queue->last_fence = dma_fence_get(fence);
554 	amdgpu_userq_start_hang_detect_work(queue);
555 	mutex_unlock(&userq_mgr->userq_mutex);
556 
557 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
558 		      (num_read_bo_handles + num_write_bo_handles));
559 
560 	/* Lock all BOs with retry handling */
561 	drm_exec_until_all_locked(&exec) {
562 		r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
563 		drm_exec_retry_on_contention(&exec);
564 		if (r) {
565 			amdgpu_userq_fence_cleanup(fence);
566 			goto exec_fini;
567 		}
568 
569 		r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
570 		drm_exec_retry_on_contention(&exec);
571 		if (r) {
572 			amdgpu_userq_fence_cleanup(fence);
573 			goto exec_fini;
574 		}
575 	}
576 
577 	for (i = 0; i < num_read_bo_handles; i++) {
578 		if (!gobj_read || !gobj_read[i]->resv)
579 			continue;
580 
581 		dma_resv_add_fence(gobj_read[i]->resv, fence,
582 				   DMA_RESV_USAGE_READ);
583 	}
584 
585 	for (i = 0; i < num_write_bo_handles; i++) {
586 		if (!gobj_write || !gobj_write[i]->resv)
587 			continue;
588 
589 		dma_resv_add_fence(gobj_write[i]->resv, fence,
590 				   DMA_RESV_USAGE_WRITE);
591 	}
592 
593 	/* Add the created fence to syncobj/BO's */
594 	for (i = 0; i < num_syncobj_handles; i++)
595 		drm_syncobj_replace_fence(syncobj[i], fence);
596 
597 	/* drop the reference acquired in fence creation function */
598 	dma_fence_put(fence);
599 
600 exec_fini:
601 	drm_exec_fini(&exec);
602 put_gobj_write:
603 	for (i = 0; i < num_write_bo_handles; i++)
604 		drm_gem_object_put(gobj_write[i]);
605 	kvfree(gobj_write);
606 put_gobj_read:
607 	for (i = 0; i < num_read_bo_handles; i++)
608 		drm_gem_object_put(gobj_read[i]);
609 	kvfree(gobj_read);
610 free_syncobj:
611 	while (entry-- > 0)
612 		if (syncobj[entry])
613 			drm_syncobj_put(syncobj[entry]);
614 	kfree(syncobj);
615 free_syncobj_handles:
616 	kfree(syncobj_handles);
617 
618 	if (queue)
619 		amdgpu_userq_put(queue);
620 
621 	return r;
622 }
623 
624 /* Count the number of expected fences so userspace can alloc a buffer */
625 static int
amdgpu_userq_wait_count_fences(struct drm_file * filp,struct drm_amdgpu_userq_wait * wait_info,u32 * syncobj_handles,u32 * timeline_points,u32 * timeline_handles,struct drm_gem_object ** gobj_write,struct drm_gem_object ** gobj_read)626 amdgpu_userq_wait_count_fences(struct drm_file *filp,
627 			       struct drm_amdgpu_userq_wait *wait_info,
628 			       u32 *syncobj_handles, u32 *timeline_points,
629 			       u32 *timeline_handles,
630 			       struct drm_gem_object **gobj_write,
631 			       struct drm_gem_object **gobj_read)
632 {
633 	int num_read_bo_handles, num_write_bo_handles;
634 	struct dma_fence_unwrap iter;
635 	struct dma_fence *fence, *f;
636 	unsigned int num_fences = 0;
637 	struct drm_exec exec;
638 	int i, r;
639 
640 	/*
641 	 * This needs to be outside of the lock provided by drm_exec for
642 	 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT to work correctly.
643 	 */
644 
645 	/* Count timeline fences */
646 	for (i = 0; i < wait_info->num_syncobj_timeline_handles; i++) {
647 		r = drm_syncobj_find_fence(filp, timeline_handles[i],
648 					   timeline_points[i],
649 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
650 					   &fence);
651 		if (r)
652 			return r;
653 
654 		dma_fence_unwrap_for_each(f, &iter, fence)
655 			num_fences++;
656 
657 		dma_fence_put(fence);
658 	}
659 
660 	/* Count boolean fences */
661 	for (i = 0; i < wait_info->num_syncobj_handles; i++) {
662 		r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0,
663 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
664 					   &fence);
665 		if (r)
666 			return r;
667 
668 		num_fences++;
669 		dma_fence_put(fence);
670 	}
671 
672 	/* Lock all the GEM objects */
673 	/* TODO: It is actually not necessary to lock them */
674 	num_read_bo_handles = wait_info->num_bo_read_handles;
675 	num_write_bo_handles = wait_info->num_bo_write_handles;
676 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
677 		      num_read_bo_handles + num_write_bo_handles);
678 
679 	drm_exec_until_all_locked(&exec) {
680 		r = drm_exec_prepare_array(&exec, gobj_read,
681 					   num_read_bo_handles, 1);
682 		drm_exec_retry_on_contention(&exec);
683 		if (r)
684 			goto error_unlock;
685 
686 		r = drm_exec_prepare_array(&exec, gobj_write,
687 					   num_write_bo_handles, 1);
688 		drm_exec_retry_on_contention(&exec);
689 		if (r)
690 			goto error_unlock;
691 	}
692 
693 	/* Count read fences */
694 	for (i = 0; i < num_read_bo_handles; i++) {
695 		struct dma_resv_iter resv_cursor;
696 		struct dma_fence *fence;
697 
698 		dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
699 					DMA_RESV_USAGE_READ, fence)
700 			num_fences++;
701 	}
702 
703 	/* Count write fences */
704 	for (i = 0; i < num_write_bo_handles; i++) {
705 		struct dma_resv_iter resv_cursor;
706 		struct dma_fence *fence;
707 
708 		dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
709 					DMA_RESV_USAGE_WRITE, fence)
710 			num_fences++;
711 	}
712 
713 	wait_info->num_fences = min(num_fences, USHRT_MAX);
714 	r = 0;
715 
716 error_unlock:
717 	/* Unlock all the GEM objects */
718 	drm_exec_fini(&exec);
719 	return r;
720 }
721 
722 static int
amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait * wait_info,struct dma_fence ** fences,unsigned int * num_fences,struct dma_fence * fence)723 amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait *wait_info,
724 			    struct dma_fence **fences, unsigned int *num_fences,
725 			    struct dma_fence *fence)
726 {
727 	/* As fallback shouldn't userspace allocate enough space */
728 	if (*num_fences >= wait_info->num_fences)
729 		return dma_fence_wait(fence, true);
730 
731 	fences[(*num_fences)++] = dma_fence_get(fence);
732 	return 0;
733 }
734 
735 static int
amdgpu_userq_wait_return_fence_info(struct drm_file * filp,struct drm_amdgpu_userq_wait * wait_info,u32 * syncobj_handles,u32 * timeline_points,u32 * timeline_handles,struct drm_gem_object ** gobj_write,struct drm_gem_object ** gobj_read)736 amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
737 				    struct drm_amdgpu_userq_wait *wait_info,
738 				    u32 *syncobj_handles, u32 *timeline_points,
739 				    u32 *timeline_handles,
740 				    struct drm_gem_object **gobj_write,
741 				    struct drm_gem_object **gobj_read)
742 {
743 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
744 	struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
745 	struct drm_amdgpu_userq_fence_info *fence_info;
746 	int num_read_bo_handles, num_write_bo_handles;
747 	struct amdgpu_usermode_queue *waitq;
748 	struct dma_fence **fences, *fence, *f;
749 	struct dma_fence_unwrap iter;
750 	int num_points, num_syncobj;
751 	unsigned int num_fences = 0;
752 	struct drm_exec exec;
753 	int i, cnt, r;
754 
755 	fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info),
756 				   GFP_KERNEL);
757 	if (!fence_info)
758 		return -ENOMEM;
759 
760 	fences = kmalloc_array(wait_info->num_fences, sizeof(*fences),
761 			       GFP_KERNEL);
762 	if (!fences) {
763 		r = -ENOMEM;
764 		goto free_fence_info;
765 	}
766 
767 	/* Retrieve timeline fences */
768 	num_points = wait_info->num_syncobj_timeline_handles;
769 	for (i = 0; i < num_points; i++) {
770 		r = drm_syncobj_find_fence(filp, timeline_handles[i],
771 					   timeline_points[i],
772 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
773 					   &fence);
774 		if (r)
775 			goto free_fences;
776 
777 		dma_fence_unwrap_for_each(f, &iter, fence) {
778 			r = amdgpu_userq_wait_add_fence(wait_info, fences,
779 							&num_fences, f);
780 			if (r) {
781 				dma_fence_put(fence);
782 				goto free_fences;
783 			}
784 		}
785 
786 		dma_fence_put(fence);
787 	}
788 
789 	/* Retrieve boolean fences */
790 	num_syncobj = wait_info->num_syncobj_handles;
791 	for (i = 0; i < num_syncobj; i++) {
792 		struct dma_fence *fence;
793 
794 		r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0,
795 					   DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
796 					   &fence);
797 		if (r)
798 			goto free_fences;
799 
800 		r = amdgpu_userq_wait_add_fence(wait_info, fences,
801 						&num_fences, fence);
802 		dma_fence_put(fence);
803 		if (r)
804 			goto free_fences;
805 
806 	}
807 
808 	/* Lock all the GEM objects */
809 	num_read_bo_handles = wait_info->num_bo_read_handles;
810 	num_write_bo_handles = wait_info->num_bo_write_handles;
811 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
812 		      num_read_bo_handles + num_write_bo_handles);
813 
814 	drm_exec_until_all_locked(&exec) {
815 		r = drm_exec_prepare_array(&exec, gobj_read,
816 					   num_read_bo_handles, 1);
817 		drm_exec_retry_on_contention(&exec);
818 		if (r)
819 			goto error_unlock;
820 
821 		r = drm_exec_prepare_array(&exec, gobj_write,
822 					   num_write_bo_handles, 1);
823 		drm_exec_retry_on_contention(&exec);
824 		if (r)
825 			goto error_unlock;
826 	}
827 
828 	/* Retrieve GEM read objects fence */
829 	for (i = 0; i < num_read_bo_handles; i++) {
830 		struct dma_resv_iter resv_cursor;
831 		struct dma_fence *fence;
832 
833 		dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
834 					DMA_RESV_USAGE_READ, fence) {
835 			r = amdgpu_userq_wait_add_fence(wait_info, fences,
836 							&num_fences, fence);
837 			if (r)
838 				goto error_unlock;
839 		}
840 	}
841 
842 	/* Retrieve GEM write objects fence */
843 	for (i = 0; i < num_write_bo_handles; i++) {
844 		struct dma_resv_iter resv_cursor;
845 		struct dma_fence *fence;
846 
847 		dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
848 					DMA_RESV_USAGE_WRITE, fence) {
849 			r = amdgpu_userq_wait_add_fence(wait_info, fences,
850 							&num_fences, fence);
851 			if (r)
852 				goto error_unlock;
853 		}
854 	}
855 
856 	drm_exec_fini(&exec);
857 
858 	/*
859 	 * Keep only the latest fences to reduce the number of values
860 	 * given back to userspace.
861 	 */
862 	num_fences = dma_fence_dedup_array(fences, num_fences);
863 
864 	waitq = amdgpu_userq_get(userq_mgr, wait_info->waitq_id);
865 	if (!waitq) {
866 		r = -EINVAL;
867 		goto free_fences;
868 	}
869 
870 	for (i = 0, cnt = 0; i < num_fences; i++) {
871 		struct amdgpu_userq_fence_driver *fence_drv;
872 		struct amdgpu_userq_fence *userq_fence;
873 		u32 index;
874 
875 		userq_fence = to_amdgpu_userq_fence(fences[i]);
876 		if (!userq_fence) {
877 			/*
878 			 * Just waiting on other driver fences should
879 			 * be good for now
880 			 */
881 			r = dma_fence_wait(fences[i], true);
882 			if (r)
883 				goto put_waitq;
884 
885 			continue;
886 		}
887 
888 		fence_drv = userq_fence->fence_drv;
889 		/*
890 		 * We need to make sure the user queue release their reference
891 		 * to the fence drivers at some point before queue destruction.
892 		 * Otherwise, we would gather those references until we don't
893 		 * have any more space left and crash.
894 		 */
895 		r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
896 			     xa_limit_32b, GFP_KERNEL);
897 		if (r)
898 			goto put_waitq;
899 
900 		amdgpu_userq_fence_driver_get(fence_drv);
901 
902 		/* Store drm syncobj's gpu va address and value */
903 		fence_info[cnt].va = fence_drv->va;
904 		fence_info[cnt].value = fences[i]->seqno;
905 
906 		/* Increment the actual userq fence count */
907 		cnt++;
908 	}
909 	wait_info->num_fences = cnt;
910 
911 	/* Copy userq fence info to user space */
912 	if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
913 			 fence_info, cnt * sizeof(*fence_info)))
914 		r = -EFAULT;
915 	else
916 		r = 0;
917 
918 put_waitq:
919 	amdgpu_userq_put(waitq);
920 
921 free_fences:
922 	while (num_fences--)
923 		dma_fence_put(fences[num_fences]);
924 	kfree(fences);
925 
926 free_fence_info:
927 	kfree(fence_info);
928 	return r;
929 
930 error_unlock:
931 	drm_exec_fini(&exec);
932 	goto free_fences;
933 }
934 
amdgpu_userq_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)935 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
936 			    struct drm_file *filp)
937 {
938 	int num_points, num_syncobj, num_read_bo_handles, num_write_bo_handles;
939 	u32 *syncobj_handles, *timeline_points, *timeline_handles;
940 	struct drm_amdgpu_userq_wait *wait_info = data;
941 	struct drm_gem_object **gobj_write;
942 	struct drm_gem_object **gobj_read;
943 	void __user *ptr;
944 	int r;
945 
946 	if (!amdgpu_userq_enabled(dev))
947 		return -ENOTSUPP;
948 
949 	if (wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
950 	    wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
951 		return -EINVAL;
952 
953 	num_syncobj = wait_info->num_syncobj_handles;
954 	ptr = u64_to_user_ptr(wait_info->syncobj_handles);
955 	syncobj_handles = memdup_array_user(ptr, num_syncobj, sizeof(u32));
956 	if (IS_ERR(syncobj_handles))
957 		return PTR_ERR(syncobj_handles);
958 
959 	num_points = wait_info->num_syncobj_timeline_handles;
960 	ptr = u64_to_user_ptr(wait_info->syncobj_timeline_handles);
961 	timeline_handles = memdup_array_user(ptr, num_points, sizeof(u32));
962 	if (IS_ERR(timeline_handles)) {
963 		r = PTR_ERR(timeline_handles);
964 		goto free_syncobj_handles;
965 	}
966 
967 	ptr = u64_to_user_ptr(wait_info->syncobj_timeline_points);
968 	timeline_points = memdup_array_user(ptr, num_points, sizeof(u32));
969 	if (IS_ERR(timeline_points)) {
970 		r = PTR_ERR(timeline_points);
971 		goto free_timeline_handles;
972 	}
973 
974 	num_read_bo_handles = wait_info->num_bo_read_handles;
975 	ptr = u64_to_user_ptr(wait_info->bo_read_handles);
976 	r = drm_gem_objects_lookup(filp, ptr, num_read_bo_handles, &gobj_read);
977 	if (r)
978 		goto free_timeline_points;
979 
980 	num_write_bo_handles = wait_info->num_bo_write_handles;
981 	ptr = u64_to_user_ptr(wait_info->bo_write_handles);
982 	r = drm_gem_objects_lookup(filp, ptr, num_write_bo_handles,
983 				   &gobj_write);
984 	if (r)
985 		goto put_gobj_read;
986 
987 	/*
988 	 * Passing num_fences = 0 means that userspace doesn't want to
989 	 * retrieve userq_fence_info. If num_fences = 0 we skip filling
990 	 * userq_fence_info and return the actual number of fences on
991 	 * args->num_fences.
992 	 */
993 	if (!wait_info->num_fences) {
994 		r = amdgpu_userq_wait_count_fences(filp, wait_info,
995 						   syncobj_handles,
996 						   timeline_points,
997 						   timeline_handles,
998 						   gobj_write,
999 						   gobj_read);
1000 	} else {
1001 		r = amdgpu_userq_wait_return_fence_info(filp, wait_info,
1002 							syncobj_handles,
1003 							timeline_points,
1004 							timeline_handles,
1005 							gobj_write,
1006 							gobj_read);
1007 	}
1008 
1009 	while (num_write_bo_handles--)
1010 		drm_gem_object_put(gobj_write[num_write_bo_handles]);
1011 	kvfree(gobj_write);
1012 
1013 put_gobj_read:
1014 	while (num_read_bo_handles--)
1015 		drm_gem_object_put(gobj_read[num_read_bo_handles]);
1016 	kvfree(gobj_read);
1017 
1018 free_timeline_points:
1019 	kfree(timeline_points);
1020 free_timeline_handles:
1021 	kfree(timeline_handles);
1022 free_syncobj_handles:
1023 	kfree(syncobj_handles);
1024 	return r;
1025 }
1026