1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/kref.h>
26 #include <linux/slab.h>
27 #include <linux/dma-fence-unwrap.h>
28
29 #include <drm/drm_exec.h>
30 #include <drm/drm_syncobj.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_userq_fence.h"
34
35 #define AMDGPU_USERQ_MAX_HANDLES (1U << 16)
36
37 static const struct dma_fence_ops amdgpu_userq_fence_ops;
38
to_amdgpu_userq_fence(struct dma_fence * f)39 static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
40 {
41 if (!f || f->ops != &amdgpu_userq_fence_ops)
42 return NULL;
43
44 return container_of(f, struct amdgpu_userq_fence, base);
45 }
46
amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver * fence_drv)47 static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
48 {
49 return le64_to_cpu(*fence_drv->cpu_addr);
50 }
51
52 static void
amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver * fence_drv,u64 seq)53 amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
54 u64 seq)
55 {
56 if (fence_drv->cpu_addr)
57 *fence_drv->cpu_addr = cpu_to_le64(seq);
58 }
59
amdgpu_userq_fence_driver_alloc(struct amdgpu_device * adev,struct amdgpu_userq_fence_driver ** fence_drv_req)60 int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
61 struct amdgpu_userq_fence_driver **fence_drv_req)
62 {
63 struct amdgpu_userq_fence_driver *fence_drv;
64 int r;
65
66 if (!fence_drv_req)
67 return -EINVAL;
68 *fence_drv_req = NULL;
69
70 fence_drv = kzalloc_obj(*fence_drv);
71 if (!fence_drv)
72 return -ENOMEM;
73
74 /* Acquire seq64 memory */
75 r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
76 &fence_drv->cpu_addr);
77 if (r)
78 goto free_fence_drv;
79
80 memset(fence_drv->cpu_addr, 0, sizeof(u64));
81
82 kref_init(&fence_drv->refcount);
83 INIT_LIST_HEAD(&fence_drv->fences);
84 spin_lock_init(&fence_drv->fence_list_lock);
85
86 fence_drv->adev = adev;
87 fence_drv->context = dma_fence_context_alloc(1);
88 get_task_comm(fence_drv->timeline_name, current);
89
90 *fence_drv_req = fence_drv;
91
92 return 0;
93
94 free_fence_drv:
95 kfree(fence_drv);
96
97 return r;
98 }
99
amdgpu_userq_walk_and_drop_fence_drv(struct xarray * xa)100 static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
101 {
102 struct amdgpu_userq_fence_driver *fence_drv;
103 unsigned long index;
104
105 if (xa_empty(xa))
106 return;
107
108 xa_lock(xa);
109 xa_for_each(xa, index, fence_drv) {
110 __xa_erase(xa, index);
111 amdgpu_userq_fence_driver_put(fence_drv);
112 }
113
114 xa_unlock(xa);
115 }
116
117 void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue * userq)118 amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
119 {
120 dma_fence_put(userq->last_fence);
121 userq->last_fence = NULL;
122 amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
123 xa_destroy(&userq->fence_drv_xa);
124 /* Drop the queue's ownership reference to fence_drv explicitly */
125 amdgpu_userq_fence_driver_put(userq->fence_drv);
126 }
127
128 static void
amdgpu_userq_fence_put_fence_drv_array(struct amdgpu_userq_fence * userq_fence)129 amdgpu_userq_fence_put_fence_drv_array(struct amdgpu_userq_fence *userq_fence)
130 {
131 unsigned long i;
132 for (i = 0; i < userq_fence->fence_drv_array_count; i++)
133 amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
134 userq_fence->fence_drv_array_count = 0;
135 }
136
amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver * fence_drv)137 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
138 {
139 struct amdgpu_userq_fence *userq_fence, *tmp;
140 LIST_HEAD(to_be_signaled);
141 struct dma_fence *fence;
142 unsigned long flags;
143 u64 rptr;
144
145 if (!fence_drv)
146 return;
147
148 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
149 rptr = amdgpu_userq_fence_read(fence_drv);
150
151 list_for_each_entry(userq_fence, &fence_drv->fences, link) {
152 if (rptr < userq_fence->base.seqno)
153 break;
154 }
155
156 list_cut_before(&to_be_signaled, &fence_drv->fences,
157 &userq_fence->link);
158 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
159
160 list_for_each_entry_safe(userq_fence, tmp, &to_be_signaled, link) {
161 fence = &userq_fence->base;
162 list_del_init(&userq_fence->link);
163 dma_fence_signal(fence);
164 /* Drop fence_drv_array outside fence_list_lock
165 * to avoid the recursion lock.
166 */
167 amdgpu_userq_fence_put_fence_drv_array(userq_fence);
168 dma_fence_put(fence);
169 }
170
171 }
172
amdgpu_userq_fence_driver_destroy(struct kref * ref)173 void amdgpu_userq_fence_driver_destroy(struct kref *ref)
174 {
175 struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
176 struct amdgpu_userq_fence_driver,
177 refcount);
178 struct amdgpu_device *adev = fence_drv->adev;
179 struct amdgpu_userq_fence *fence, *tmp;
180 unsigned long flags;
181 struct dma_fence *f;
182
183 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
184 list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
185 f = &fence->base;
186
187 if (!dma_fence_is_signaled(f)) {
188 dma_fence_set_error(f, -ECANCELED);
189 dma_fence_signal(f);
190 }
191
192 list_del(&fence->link);
193 dma_fence_put(f);
194 }
195 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
196
197 /* Free seq64 memory */
198 amdgpu_seq64_free(adev, fence_drv->va);
199 kfree(fence_drv);
200 }
201
amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver * fence_drv)202 void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
203 {
204 kref_get(&fence_drv->refcount);
205 }
206
amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver * fence_drv)207 void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
208 {
209 kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
210 }
211
amdgpu_userq_fence_alloc(struct amdgpu_userq_fence ** userq_fence)212 static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
213 {
214 *userq_fence = kmalloc(sizeof(**userq_fence), GFP_KERNEL);
215 return *userq_fence ? 0 : -ENOMEM;
216 }
217
amdgpu_userq_fence_create(struct amdgpu_usermode_queue * userq,struct amdgpu_userq_fence * userq_fence,u64 seq,struct dma_fence ** f)218 static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
219 struct amdgpu_userq_fence *userq_fence,
220 u64 seq, struct dma_fence **f)
221 {
222 struct amdgpu_userq_fence_driver *fence_drv;
223 struct dma_fence *fence;
224 unsigned long flags;
225 bool signaled = false;
226
227 fence_drv = userq->fence_drv;
228 if (!fence_drv)
229 return -EINVAL;
230
231 spin_lock_init(&userq_fence->lock);
232 INIT_LIST_HEAD(&userq_fence->link);
233 fence = &userq_fence->base;
234 userq_fence->fence_drv = fence_drv;
235
236 dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
237 fence_drv->context, seq);
238
239 amdgpu_userq_fence_driver_get(fence_drv);
240 dma_fence_get(fence);
241
242 if (!xa_empty(&userq->fence_drv_xa)) {
243 struct amdgpu_userq_fence_driver *stored_fence_drv;
244 unsigned long index, count = 0;
245 int i = 0;
246
247 xa_lock(&userq->fence_drv_xa);
248 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
249 count++;
250
251 userq_fence->fence_drv_array =
252 kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
253 GFP_ATOMIC);
254
255 if (userq_fence->fence_drv_array) {
256 xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
257 userq_fence->fence_drv_array[i] = stored_fence_drv;
258 __xa_erase(&userq->fence_drv_xa, index);
259 i++;
260 }
261 }
262
263 userq_fence->fence_drv_array_count = i;
264 xa_unlock(&userq->fence_drv_xa);
265 } else {
266 userq_fence->fence_drv_array = NULL;
267 userq_fence->fence_drv_array_count = 0;
268 }
269
270 /* Check if hardware has already processed the job */
271 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
272 if (!dma_fence_is_signaled(fence)) {
273 list_add_tail(&userq_fence->link, &fence_drv->fences);
274 } else {
275 signaled = true;
276 dma_fence_put(fence);
277 }
278 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
279
280 if (signaled)
281 amdgpu_userq_fence_put_fence_drv_array(userq_fence);
282
283 *f = fence;
284
285 return 0;
286 }
287
amdgpu_userq_fence_get_driver_name(struct dma_fence * f)288 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
289 {
290 return "amdgpu_userq_fence";
291 }
292
amdgpu_userq_fence_get_timeline_name(struct dma_fence * f)293 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
294 {
295 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
296
297 return fence->fence_drv->timeline_name;
298 }
299
amdgpu_userq_fence_signaled(struct dma_fence * f)300 static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
301 {
302 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
303 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
304 u64 rptr, wptr;
305
306 rptr = amdgpu_userq_fence_read(fence_drv);
307 wptr = fence->base.seqno;
308
309 if (rptr >= wptr)
310 return true;
311
312 return false;
313 }
314
amdgpu_userq_fence_free(struct rcu_head * rcu)315 static void amdgpu_userq_fence_free(struct rcu_head *rcu)
316 {
317 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
318 struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
319 struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
320
321 /* Release the fence driver reference */
322 amdgpu_userq_fence_driver_put(fence_drv);
323
324 kvfree(userq_fence->fence_drv_array);
325 kfree(userq_fence);
326 }
327
amdgpu_userq_fence_release(struct dma_fence * f)328 static void amdgpu_userq_fence_release(struct dma_fence *f)
329 {
330 call_rcu(&f->rcu, amdgpu_userq_fence_free);
331 }
332
333 static const struct dma_fence_ops amdgpu_userq_fence_ops = {
334 .get_driver_name = amdgpu_userq_fence_get_driver_name,
335 .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
336 .signaled = amdgpu_userq_fence_signaled,
337 .release = amdgpu_userq_fence_release,
338 };
339
340 /**
341 * amdgpu_userq_fence_read_wptr - Read the userq wptr value
342 *
343 * @adev: amdgpu_device pointer
344 * @queue: user mode queue structure pointer
345 * @wptr: write pointer value
346 *
347 * Read the wptr value from userq's MQD. The userq signal IOCTL
348 * creates a dma_fence for the shared buffers that expects the
349 * RPTR value written to seq64 memory >= WPTR.
350 *
351 * Returns wptr value on success, error on failure.
352 */
amdgpu_userq_fence_read_wptr(struct amdgpu_device * adev,struct amdgpu_usermode_queue * queue,u64 * wptr)353 static int amdgpu_userq_fence_read_wptr(struct amdgpu_device *adev,
354 struct amdgpu_usermode_queue *queue,
355 u64 *wptr)
356 {
357 struct amdgpu_bo_va_mapping *mapping;
358 struct amdgpu_bo *bo;
359 u64 addr, *ptr;
360 int r;
361
362 r = amdgpu_bo_reserve(queue->vm->root.bo, false);
363 if (r)
364 return r;
365
366 addr = queue->userq_prop->wptr_gpu_addr;
367 addr &= AMDGPU_GMC_HOLE_MASK;
368
369 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
370 if (!mapping) {
371 amdgpu_bo_unreserve(queue->vm->root.bo);
372 DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
373 return -EINVAL;
374 }
375
376 bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
377 amdgpu_bo_unreserve(queue->vm->root.bo);
378 r = amdgpu_bo_reserve(bo, true);
379 if (r) {
380 amdgpu_bo_unref(&bo);
381 DRM_ERROR("Failed to reserve userqueue wptr bo");
382 return r;
383 }
384
385 r = amdgpu_bo_kmap(bo, (void **)&ptr);
386 if (r) {
387 DRM_ERROR("Failed mapping the userqueue wptr bo");
388 goto map_error;
389 }
390
391 *wptr = le64_to_cpu(*ptr);
392
393 amdgpu_bo_kunmap(bo);
394 amdgpu_bo_unreserve(bo);
395 amdgpu_bo_unref(&bo);
396
397 return 0;
398
399 map_error:
400 amdgpu_bo_unreserve(bo);
401 amdgpu_bo_unref(&bo);
402
403 return r;
404 }
405
amdgpu_userq_fence_cleanup(struct dma_fence * fence)406 static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
407 {
408 dma_fence_put(fence);
409 }
410
411 static void
amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence * fence,int error)412 amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
413 int error)
414 {
415 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
416 unsigned long flags;
417 struct dma_fence *f;
418
419 spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
420
421 f = rcu_dereference_protected(&fence->base,
422 lockdep_is_held(&fence_drv->fence_list_lock));
423 if (f && !dma_fence_is_signaled_locked(f))
424 dma_fence_set_error(f, error);
425 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
426 }
427
428 void
amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue * userq)429 amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
430 {
431 struct dma_fence *f = userq->last_fence;
432
433 if (f) {
434 struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
435 struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
436 u64 wptr = fence->base.seqno;
437
438 amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
439 amdgpu_userq_fence_write(fence_drv, wptr);
440 amdgpu_userq_fence_driver_process(fence_drv);
441
442 }
443 }
444
amdgpu_userq_signal_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)445 int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
446 struct drm_file *filp)
447 {
448 struct amdgpu_device *adev = drm_to_adev(dev);
449 struct drm_amdgpu_userq_signal *args = data;
450 const unsigned int num_write_bo_handles = args->num_bo_write_handles;
451 const unsigned int num_read_bo_handles = args->num_bo_read_handles;
452 struct amdgpu_fpriv *fpriv = filp->driver_priv;
453 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
454 struct drm_gem_object **gobj_write, **gobj_read;
455 u32 *syncobj_handles, num_syncobj_handles;
456 struct amdgpu_userq_fence *userq_fence;
457 struct amdgpu_usermode_queue *queue = NULL;
458 struct drm_syncobj **syncobj = NULL;
459 struct dma_fence *fence;
460 struct drm_exec exec;
461 int r, i, entry;
462 u64 wptr;
463
464 if (!amdgpu_userq_enabled(dev))
465 return -ENOTSUPP;
466
467 if (args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
468 args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
469 return -EINVAL;
470
471 num_syncobj_handles = args->num_syncobj_handles;
472 syncobj_handles = memdup_array_user(u64_to_user_ptr(args->syncobj_handles),
473 num_syncobj_handles, sizeof(u32));
474 if (IS_ERR(syncobj_handles))
475 return PTR_ERR(syncobj_handles);
476
477 /* Array of pointers to the looked up syncobjs */
478 syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
479 if (!syncobj) {
480 r = -ENOMEM;
481 goto free_syncobj_handles;
482 }
483
484 for (entry = 0; entry < num_syncobj_handles; entry++) {
485 syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
486 if (!syncobj[entry]) {
487 r = -ENOENT;
488 goto free_syncobj;
489 }
490 }
491
492 r = drm_gem_objects_lookup(filp,
493 u64_to_user_ptr(args->bo_read_handles),
494 num_read_bo_handles,
495 &gobj_read);
496 if (r)
497 goto free_syncobj;
498
499 r = drm_gem_objects_lookup(filp,
500 u64_to_user_ptr(args->bo_write_handles),
501 num_write_bo_handles,
502 &gobj_write);
503 if (r)
504 goto put_gobj_read;
505
506 /* Retrieve the user queue */
507 queue = amdgpu_userq_get(userq_mgr, args->queue_id);
508 if (!queue) {
509 r = -ENOENT;
510 goto put_gobj_write;
511 }
512
513 r = amdgpu_userq_fence_read_wptr(adev, queue, &wptr);
514 if (r)
515 goto put_gobj_write;
516
517 r = amdgpu_userq_fence_alloc(&userq_fence);
518 if (r)
519 goto put_gobj_write;
520
521 /* We are here means UQ is active, make sure the eviction fence is valid */
522 amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
523
524 /* Create a new fence */
525 r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
526 if (r) {
527 mutex_unlock(&userq_mgr->userq_mutex);
528 kfree(userq_fence);
529 goto put_gobj_write;
530 }
531
532 dma_fence_put(queue->last_fence);
533 queue->last_fence = dma_fence_get(fence);
534 amdgpu_userq_start_hang_detect_work(queue);
535 mutex_unlock(&userq_mgr->userq_mutex);
536
537 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
538 (num_read_bo_handles + num_write_bo_handles));
539
540 /* Lock all BOs with retry handling */
541 drm_exec_until_all_locked(&exec) {
542 r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
543 drm_exec_retry_on_contention(&exec);
544 if (r) {
545 amdgpu_userq_fence_cleanup(fence);
546 goto exec_fini;
547 }
548
549 r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
550 drm_exec_retry_on_contention(&exec);
551 if (r) {
552 amdgpu_userq_fence_cleanup(fence);
553 goto exec_fini;
554 }
555 }
556
557 for (i = 0; i < num_read_bo_handles; i++) {
558 if (!gobj_read || !gobj_read[i]->resv)
559 continue;
560
561 dma_resv_add_fence(gobj_read[i]->resv, fence,
562 DMA_RESV_USAGE_READ);
563 }
564
565 for (i = 0; i < num_write_bo_handles; i++) {
566 if (!gobj_write || !gobj_write[i]->resv)
567 continue;
568
569 dma_resv_add_fence(gobj_write[i]->resv, fence,
570 DMA_RESV_USAGE_WRITE);
571 }
572
573 /* Add the created fence to syncobj/BO's */
574 for (i = 0; i < num_syncobj_handles; i++)
575 drm_syncobj_replace_fence(syncobj[i], fence);
576
577 /* drop the reference acquired in fence creation function */
578 dma_fence_put(fence);
579
580 exec_fini:
581 drm_exec_fini(&exec);
582 put_gobj_write:
583 for (i = 0; i < num_write_bo_handles; i++)
584 drm_gem_object_put(gobj_write[i]);
585 kvfree(gobj_write);
586 put_gobj_read:
587 for (i = 0; i < num_read_bo_handles; i++)
588 drm_gem_object_put(gobj_read[i]);
589 kvfree(gobj_read);
590 free_syncobj:
591 while (entry-- > 0)
592 if (syncobj[entry])
593 drm_syncobj_put(syncobj[entry]);
594 kfree(syncobj);
595 free_syncobj_handles:
596 kfree(syncobj_handles);
597
598 if (queue)
599 amdgpu_userq_put(queue);
600
601 return r;
602 }
603
604 /* Count the number of expected fences so userspace can alloc a buffer */
605 static int
amdgpu_userq_wait_count_fences(struct drm_file * filp,struct drm_amdgpu_userq_wait * wait_info,u32 * syncobj_handles,u32 * timeline_points,u32 * timeline_handles,struct drm_gem_object ** gobj_write,struct drm_gem_object ** gobj_read)606 amdgpu_userq_wait_count_fences(struct drm_file *filp,
607 struct drm_amdgpu_userq_wait *wait_info,
608 u32 *syncobj_handles, u32 *timeline_points,
609 u32 *timeline_handles,
610 struct drm_gem_object **gobj_write,
611 struct drm_gem_object **gobj_read)
612 {
613 int num_read_bo_handles, num_write_bo_handles;
614 struct dma_fence_unwrap iter;
615 struct dma_fence *fence, *f;
616 unsigned int num_fences = 0;
617 struct drm_exec exec;
618 int i, r;
619
620 /*
621 * This needs to be outside of the lock provided by drm_exec for
622 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT to work correctly.
623 */
624
625 /* Count timeline fences */
626 for (i = 0; i < wait_info->num_syncobj_timeline_handles; i++) {
627 r = drm_syncobj_find_fence(filp, timeline_handles[i],
628 timeline_points[i],
629 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
630 &fence);
631 if (r)
632 return r;
633
634 dma_fence_unwrap_for_each(f, &iter, fence)
635 num_fences++;
636
637 dma_fence_put(fence);
638 }
639
640 /* Count boolean fences */
641 for (i = 0; i < wait_info->num_syncobj_handles; i++) {
642 r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0,
643 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
644 &fence);
645 if (r)
646 return r;
647
648 num_fences++;
649 dma_fence_put(fence);
650 }
651
652 /* Lock all the GEM objects */
653 /* TODO: It is actually not necessary to lock them */
654 num_read_bo_handles = wait_info->num_bo_read_handles;
655 num_write_bo_handles = wait_info->num_bo_write_handles;
656 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
657 num_read_bo_handles + num_write_bo_handles);
658
659 drm_exec_until_all_locked(&exec) {
660 r = drm_exec_prepare_array(&exec, gobj_read,
661 num_read_bo_handles, 1);
662 drm_exec_retry_on_contention(&exec);
663 if (r)
664 goto error_unlock;
665
666 r = drm_exec_prepare_array(&exec, gobj_write,
667 num_write_bo_handles, 1);
668 drm_exec_retry_on_contention(&exec);
669 if (r)
670 goto error_unlock;
671 }
672
673 /* Count read fences */
674 for (i = 0; i < num_read_bo_handles; i++) {
675 struct dma_resv_iter resv_cursor;
676 struct dma_fence *fence;
677
678 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
679 DMA_RESV_USAGE_READ, fence)
680 num_fences++;
681 }
682
683 /* Count write fences */
684 for (i = 0; i < num_write_bo_handles; i++) {
685 struct dma_resv_iter resv_cursor;
686 struct dma_fence *fence;
687
688 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
689 DMA_RESV_USAGE_WRITE, fence)
690 num_fences++;
691 }
692
693 wait_info->num_fences = min(num_fences, USHRT_MAX);
694 r = 0;
695
696 error_unlock:
697 /* Unlock all the GEM objects */
698 drm_exec_fini(&exec);
699 return r;
700 }
701
702 static int
amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait * wait_info,struct dma_fence ** fences,unsigned int * num_fences,struct dma_fence * fence)703 amdgpu_userq_wait_add_fence(struct drm_amdgpu_userq_wait *wait_info,
704 struct dma_fence **fences, unsigned int *num_fences,
705 struct dma_fence *fence)
706 {
707 /* As fallback shouldn't userspace allocate enough space */
708 if (*num_fences >= wait_info->num_fences)
709 return dma_fence_wait(fence, true);
710
711 fences[(*num_fences)++] = dma_fence_get(fence);
712 return 0;
713 }
714
715 static int
amdgpu_userq_wait_return_fence_info(struct drm_file * filp,struct drm_amdgpu_userq_wait * wait_info,u32 * syncobj_handles,u32 * timeline_points,u32 * timeline_handles,struct drm_gem_object ** gobj_write,struct drm_gem_object ** gobj_read)716 amdgpu_userq_wait_return_fence_info(struct drm_file *filp,
717 struct drm_amdgpu_userq_wait *wait_info,
718 u32 *syncobj_handles, u32 *timeline_points,
719 u32 *timeline_handles,
720 struct drm_gem_object **gobj_write,
721 struct drm_gem_object **gobj_read)
722 {
723 struct amdgpu_fpriv *fpriv = filp->driver_priv;
724 struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
725 struct drm_amdgpu_userq_fence_info *fence_info;
726 int num_read_bo_handles, num_write_bo_handles;
727 struct amdgpu_usermode_queue *waitq;
728 struct dma_fence **fences, *fence, *f;
729 struct dma_fence_unwrap iter;
730 int num_points, num_syncobj;
731 unsigned int num_fences = 0;
732 struct drm_exec exec;
733 int i, cnt, r;
734
735 fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info),
736 GFP_KERNEL);
737 if (!fence_info)
738 return -ENOMEM;
739
740 fences = kmalloc_array(wait_info->num_fences, sizeof(*fences),
741 GFP_KERNEL);
742 if (!fences) {
743 r = -ENOMEM;
744 goto free_fence_info;
745 }
746
747 /* Retrieve timeline fences */
748 num_points = wait_info->num_syncobj_timeline_handles;
749 for (i = 0; i < num_points; i++) {
750 r = drm_syncobj_find_fence(filp, timeline_handles[i],
751 timeline_points[i],
752 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
753 &fence);
754 if (r)
755 goto free_fences;
756
757 dma_fence_unwrap_for_each(f, &iter, fence) {
758 r = amdgpu_userq_wait_add_fence(wait_info, fences,
759 &num_fences, f);
760 if (r) {
761 dma_fence_put(fence);
762 goto free_fences;
763 }
764 }
765
766 dma_fence_put(fence);
767 }
768
769 /* Retrieve boolean fences */
770 num_syncobj = wait_info->num_syncobj_handles;
771 for (i = 0; i < num_syncobj; i++) {
772 struct dma_fence *fence;
773
774 r = drm_syncobj_find_fence(filp, syncobj_handles[i], 0,
775 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
776 &fence);
777 if (r)
778 goto free_fences;
779
780 r = amdgpu_userq_wait_add_fence(wait_info, fences,
781 &num_fences, fence);
782 dma_fence_put(fence);
783 if (r)
784 goto free_fences;
785
786 }
787
788 /* Lock all the GEM objects */
789 num_read_bo_handles = wait_info->num_bo_read_handles;
790 num_write_bo_handles = wait_info->num_bo_write_handles;
791 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
792 num_read_bo_handles + num_write_bo_handles);
793
794 drm_exec_until_all_locked(&exec) {
795 r = drm_exec_prepare_array(&exec, gobj_read,
796 num_read_bo_handles, 1);
797 drm_exec_retry_on_contention(&exec);
798 if (r)
799 goto error_unlock;
800
801 r = drm_exec_prepare_array(&exec, gobj_write,
802 num_write_bo_handles, 1);
803 drm_exec_retry_on_contention(&exec);
804 if (r)
805 goto error_unlock;
806 }
807
808 /* Retrieve GEM read objects fence */
809 for (i = 0; i < num_read_bo_handles; i++) {
810 struct dma_resv_iter resv_cursor;
811 struct dma_fence *fence;
812
813 dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
814 DMA_RESV_USAGE_READ, fence) {
815 r = amdgpu_userq_wait_add_fence(wait_info, fences,
816 &num_fences, fence);
817 if (r)
818 goto error_unlock;
819 }
820 }
821
822 /* Retrieve GEM write objects fence */
823 for (i = 0; i < num_write_bo_handles; i++) {
824 struct dma_resv_iter resv_cursor;
825 struct dma_fence *fence;
826
827 dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
828 DMA_RESV_USAGE_WRITE, fence) {
829 r = amdgpu_userq_wait_add_fence(wait_info, fences,
830 &num_fences, fence);
831 if (r)
832 goto error_unlock;
833 }
834 }
835
836 drm_exec_fini(&exec);
837
838 /*
839 * Keep only the latest fences to reduce the number of values
840 * given back to userspace.
841 */
842 num_fences = dma_fence_dedup_array(fences, num_fences);
843
844 waitq = amdgpu_userq_get(userq_mgr, wait_info->waitq_id);
845 if (!waitq) {
846 r = -EINVAL;
847 goto free_fences;
848 }
849
850 for (i = 0, cnt = 0; i < num_fences; i++) {
851 struct amdgpu_userq_fence_driver *fence_drv;
852 struct amdgpu_userq_fence *userq_fence;
853 u32 index;
854
855 userq_fence = to_amdgpu_userq_fence(fences[i]);
856 if (!userq_fence) {
857 /*
858 * Just waiting on other driver fences should
859 * be good for now
860 */
861 r = dma_fence_wait(fences[i], true);
862 if (r)
863 goto put_waitq;
864
865 continue;
866 }
867
868 fence_drv = userq_fence->fence_drv;
869 /*
870 * We need to make sure the user queue release their reference
871 * to the fence drivers at some point before queue destruction.
872 * Otherwise, we would gather those references until we don't
873 * have any more space left and crash.
874 */
875 r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
876 xa_limit_32b, GFP_KERNEL);
877 if (r)
878 goto put_waitq;
879
880 amdgpu_userq_fence_driver_get(fence_drv);
881
882 /* Store drm syncobj's gpu va address and value */
883 fence_info[cnt].va = fence_drv->va;
884 fence_info[cnt].value = fences[i]->seqno;
885
886 /* Increment the actual userq fence count */
887 cnt++;
888 }
889 wait_info->num_fences = cnt;
890
891 /* Copy userq fence info to user space */
892 if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
893 fence_info, cnt * sizeof(*fence_info)))
894 r = -EFAULT;
895 else
896 r = 0;
897
898 put_waitq:
899 amdgpu_userq_put(waitq);
900
901 free_fences:
902 while (num_fences--)
903 dma_fence_put(fences[num_fences]);
904 kfree(fences);
905
906 free_fence_info:
907 kfree(fence_info);
908 return r;
909
910 error_unlock:
911 drm_exec_fini(&exec);
912 goto free_fences;
913 }
914
amdgpu_userq_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)915 int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
916 struct drm_file *filp)
917 {
918 int num_points, num_syncobj, num_read_bo_handles, num_write_bo_handles;
919 u32 *syncobj_handles, *timeline_points, *timeline_handles;
920 struct drm_amdgpu_userq_wait *wait_info = data;
921 struct drm_gem_object **gobj_write;
922 struct drm_gem_object **gobj_read;
923 void __user *ptr;
924 int r;
925
926 if (!amdgpu_userq_enabled(dev))
927 return -ENOTSUPP;
928
929 if (wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
930 wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
931 return -EINVAL;
932
933 num_syncobj = wait_info->num_syncobj_handles;
934 ptr = u64_to_user_ptr(wait_info->syncobj_handles);
935 syncobj_handles = memdup_array_user(ptr, num_syncobj, sizeof(u32));
936 if (IS_ERR(syncobj_handles))
937 return PTR_ERR(syncobj_handles);
938
939 num_points = wait_info->num_syncobj_timeline_handles;
940 ptr = u64_to_user_ptr(wait_info->syncobj_timeline_handles);
941 timeline_handles = memdup_array_user(ptr, num_points, sizeof(u32));
942 if (IS_ERR(timeline_handles)) {
943 r = PTR_ERR(timeline_handles);
944 goto free_syncobj_handles;
945 }
946
947 ptr = u64_to_user_ptr(wait_info->syncobj_timeline_points);
948 timeline_points = memdup_array_user(ptr, num_points, sizeof(u32));
949 if (IS_ERR(timeline_points)) {
950 r = PTR_ERR(timeline_points);
951 goto free_timeline_handles;
952 }
953
954 num_read_bo_handles = wait_info->num_bo_read_handles;
955 ptr = u64_to_user_ptr(wait_info->bo_read_handles);
956 r = drm_gem_objects_lookup(filp, ptr, num_read_bo_handles, &gobj_read);
957 if (r)
958 goto free_timeline_points;
959
960 num_write_bo_handles = wait_info->num_bo_write_handles;
961 ptr = u64_to_user_ptr(wait_info->bo_write_handles);
962 r = drm_gem_objects_lookup(filp, ptr, num_write_bo_handles,
963 &gobj_write);
964 if (r)
965 goto put_gobj_read;
966
967 /*
968 * Passing num_fences = 0 means that userspace doesn't want to
969 * retrieve userq_fence_info. If num_fences = 0 we skip filling
970 * userq_fence_info and return the actual number of fences on
971 * args->num_fences.
972 */
973 if (!wait_info->num_fences) {
974 r = amdgpu_userq_wait_count_fences(filp, wait_info,
975 syncobj_handles,
976 timeline_points,
977 timeline_handles,
978 gobj_write,
979 gobj_read);
980 } else {
981 r = amdgpu_userq_wait_return_fence_info(filp, wait_info,
982 syncobj_handles,
983 timeline_points,
984 timeline_handles,
985 gobj_write,
986 gobj_read);
987 }
988
989 while (num_write_bo_handles--)
990 drm_gem_object_put(gobj_write[num_write_bo_handles]);
991 kvfree(gobj_write);
992
993 put_gobj_read:
994 while (num_read_bo_handles--)
995 drm_gem_object_put(gobj_read[num_read_bo_handles]);
996 kvfree(gobj_read);
997
998 free_timeline_points:
999 kfree(timeline_points);
1000 free_timeline_handles:
1001 kfree(timeline_handles);
1002 free_syncobj_handles:
1003 kfree(syncobj_handles);
1004 return r;
1005 }
1006