1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include <linux/dma-fence-unwrap.h>
8 #include <linux/file.h>
9 #include <linux/sync_file.h>
10 #include <linux/uaccess.h>
11
12 #include <drm/drm_drv.h>
13 #include <drm/drm_file.h>
14 #include <drm/drm_syncobj.h>
15
16 #include "msm_drv.h"
17 #include "msm_gpu.h"
18 #include "msm_gem.h"
19 #include "msm_gpu_trace.h"
20 #include "msm_syncobj.h"
21
22 /* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
23 * error msgs for debugging, but we don't spam dmesg by default
24 */
25 #define SUBMIT_ERROR(err, submit, fmt, ...) \
26 UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
27
28 /*
29 * Cmdstream submission:
30 */
31
submit_create(struct drm_device * dev,struct msm_gpu * gpu,struct msm_gpu_submitqueue * queue,uint32_t nr_bos,uint32_t nr_cmds,u64 drm_client_id)32 static struct msm_gem_submit *submit_create(struct drm_device *dev,
33 struct msm_gpu *gpu,
34 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
35 uint32_t nr_cmds, u64 drm_client_id)
36 {
37 static atomic_t ident = ATOMIC_INIT(0);
38 struct msm_gem_submit *submit;
39 uint64_t sz;
40 int ret;
41
42 sz = struct_size(submit, bos, nr_bos) +
43 ((u64)nr_cmds * sizeof(submit->cmd[0]));
44
45 if (sz > SIZE_MAX)
46 return ERR_PTR(-ENOMEM);
47
48 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
49 if (!submit)
50 return ERR_PTR(-ENOMEM);
51
52 submit->hw_fence = msm_fence_alloc();
53 if (IS_ERR(submit->hw_fence)) {
54 ret = PTR_ERR(submit->hw_fence);
55 kfree(submit);
56 return ERR_PTR(ret);
57 }
58
59 ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue,
60 drm_client_id);
61 if (ret) {
62 kfree(submit->hw_fence);
63 kfree(submit);
64 return ERR_PTR(ret);
65 }
66
67 kref_init(&submit->ref);
68 submit->dev = dev;
69 submit->vm = msm_context_vm(dev, queue->ctx);
70 submit->gpu = gpu;
71 submit->cmd = (void *)&submit->bos[nr_bos];
72 submit->queue = queue;
73 submit->pid = get_pid(task_pid(current));
74 submit->ring = gpu->rb[queue->ring_nr];
75 submit->fault_dumped = false;
76
77 /* Get a unique identifier for the submission for logging purposes */
78 submit->ident = atomic_inc_return(&ident) - 1;
79
80 INIT_LIST_HEAD(&submit->node);
81
82 return submit;
83 }
84
__msm_gem_submit_destroy(struct kref * kref)85 void __msm_gem_submit_destroy(struct kref *kref)
86 {
87 struct msm_gem_submit *submit =
88 container_of(kref, struct msm_gem_submit, ref);
89 unsigned i;
90
91 /*
92 * In error paths, we could unref the submit without calling
93 * drm_sched_entity_push_job(), so msm_job_free() will never
94 * get called. Since drm_sched_job_cleanup() will NULL out
95 * s_fence, we can use that to detect this case.
96 */
97 if (submit->base.s_fence)
98 drm_sched_job_cleanup(&submit->base);
99
100 if (submit->fence_id) {
101 spin_lock(&submit->queue->idr_lock);
102 idr_remove(&submit->queue->fence_idr, submit->fence_id);
103 spin_unlock(&submit->queue->idr_lock);
104 }
105
106 dma_fence_put(submit->user_fence);
107
108 /*
109 * If the submit is freed before msm_job_run(), then hw_fence is
110 * just some pre-allocated memory, not a reference counted fence.
111 * Once the job runs and the hw_fence is initialized, it will
112 * have a refcount of at least one, since the submit holds a ref
113 * to the hw_fence.
114 */
115 if (kref_read(&submit->hw_fence->refcount) == 0) {
116 kfree(submit->hw_fence);
117 } else {
118 dma_fence_put(submit->hw_fence);
119 }
120
121 put_pid(submit->pid);
122 msm_submitqueue_put(submit->queue);
123
124 for (i = 0; i < submit->nr_cmds; i++)
125 kfree(submit->cmd[i].relocs);
126
127 kfree(submit);
128 }
129
submit_lookup_objects(struct msm_gem_submit * submit,struct drm_msm_gem_submit * args,struct drm_file * file)130 static int submit_lookup_objects(struct msm_gem_submit *submit,
131 struct drm_msm_gem_submit *args, struct drm_file *file)
132 {
133 unsigned i;
134 int ret = 0;
135
136 for (i = 0; i < args->nr_bos; i++) {
137 struct drm_msm_gem_submit_bo submit_bo;
138 void __user *userptr =
139 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
140
141 /* make sure we don't have garbage flags, in case we hit
142 * error path before flags is initialized:
143 */
144 submit->bos[i].flags = 0;
145
146 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
147 ret = -EFAULT;
148 i = 0;
149 goto out;
150 }
151
152 /* at least one of READ and/or WRITE flags should be set: */
153 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
154
155 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
156 !(submit_bo.flags & MANDATORY_FLAGS)) {
157 ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags);
158 i = 0;
159 goto out;
160 }
161
162 submit->bos[i].handle = submit_bo.handle;
163 submit->bos[i].flags = submit_bo.flags;
164 }
165
166 spin_lock(&file->table_lock);
167
168 for (i = 0; i < args->nr_bos; i++) {
169 struct drm_gem_object *obj;
170
171 /* normally use drm_gem_object_lookup(), but for bulk lookup
172 * all under single table_lock just hit object_idr directly:
173 */
174 obj = idr_find(&file->object_idr, submit->bos[i].handle);
175 if (!obj) {
176 ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
177 goto out_unlock;
178 }
179
180 drm_gem_object_get(obj);
181
182 submit->bos[i].obj = obj;
183 }
184
185 out_unlock:
186 spin_unlock(&file->table_lock);
187
188 out:
189 submit->nr_bos = i;
190
191 return ret;
192 }
193
submit_lookup_cmds(struct msm_gem_submit * submit,struct drm_msm_gem_submit * args,struct drm_file * file)194 static int submit_lookup_cmds(struct msm_gem_submit *submit,
195 struct drm_msm_gem_submit *args, struct drm_file *file)
196 {
197 struct msm_context *ctx = file->driver_priv;
198 unsigned i;
199 size_t sz;
200 int ret = 0;
201
202 for (i = 0; i < args->nr_cmds; i++) {
203 struct drm_msm_gem_submit_cmd submit_cmd;
204 void __user *userptr =
205 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
206
207 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
208 if (ret) {
209 ret = -EFAULT;
210 goto out;
211 }
212
213 /* validate input from userspace: */
214 switch (submit_cmd.type) {
215 case MSM_SUBMIT_CMD_BUF:
216 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
217 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
218 break;
219 default:
220 return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type);
221 }
222
223 if (submit_cmd.size % 4) {
224 ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n",
225 submit_cmd.size);
226 goto out;
227 }
228
229 if (msm_context_is_vmbind(ctx)) {
230 if (submit_cmd.nr_relocs) {
231 ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero");
232 goto out;
233 }
234
235 if (submit_cmd.submit_idx || submit_cmd.submit_offset) {
236 ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero");
237 goto out;
238 }
239
240 submit->cmd[i].iova = submit_cmd.iova;
241 }
242
243 submit->cmd[i].type = submit_cmd.type;
244 submit->cmd[i].size = submit_cmd.size / 4;
245 submit->cmd[i].offset = submit_cmd.submit_offset / 4;
246 submit->cmd[i].idx = submit_cmd.submit_idx;
247 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
248
249 userptr = u64_to_user_ptr(submit_cmd.relocs);
250
251 sz = array_size(submit_cmd.nr_relocs,
252 sizeof(struct drm_msm_gem_submit_reloc));
253 /* check for overflow: */
254 if (sz == SIZE_MAX) {
255 ret = -ENOMEM;
256 goto out;
257 }
258 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
259 if (!submit->cmd[i].relocs) {
260 ret = -ENOMEM;
261 goto out;
262 }
263 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
264 if (ret) {
265 ret = -EFAULT;
266 goto out;
267 }
268 }
269
270 out:
271 return ret;
272 }
273
submit_lock_objects_vmbind(struct msm_gem_submit * submit)274 static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
275 {
276 unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
277 struct drm_exec *exec = &submit->exec;
278 int ret = 0;
279
280 drm_exec_init(&submit->exec, flags, submit->nr_bos);
281
282 drm_exec_until_all_locked (&submit->exec) {
283 ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
284 drm_exec_retry_on_contention(exec);
285 if (ret)
286 break;
287
288 ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
289 drm_exec_retry_on_contention(exec);
290 if (ret)
291 break;
292 }
293
294 return ret;
295 }
296
297 /* This is where we make sure all the bo's are reserved and pin'd: */
submit_lock_objects(struct msm_gem_submit * submit)298 static int submit_lock_objects(struct msm_gem_submit *submit)
299 {
300 unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
301 int ret = 0;
302
303 if (msm_context_is_vmbind(submit->queue->ctx))
304 return submit_lock_objects_vmbind(submit);
305
306 drm_exec_init(&submit->exec, flags, submit->nr_bos);
307
308 drm_exec_until_all_locked (&submit->exec) {
309 ret = drm_exec_lock_obj(&submit->exec,
310 drm_gpuvm_resv_obj(submit->vm));
311 drm_exec_retry_on_contention(&submit->exec);
312 if (ret)
313 break;
314 for (unsigned i = 0; i < submit->nr_bos; i++) {
315 struct drm_gem_object *obj = submit->bos[i].obj;
316 ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
317 drm_exec_retry_on_contention(&submit->exec);
318 if (ret)
319 break;
320 }
321 }
322
323 return ret;
324 }
325
submit_fence_sync(struct msm_gem_submit * submit)326 static int submit_fence_sync(struct msm_gem_submit *submit)
327 {
328 int i, ret = 0;
329
330 for (i = 0; i < submit->nr_bos; i++) {
331 struct drm_gem_object *obj = submit->bos[i].obj;
332 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
333
334 /* Otherwise userspace can ask for implicit sync to be
335 * disabled on specific buffers. This is useful for internal
336 * usermode driver managed buffers, suballocation, etc.
337 */
338 if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
339 continue;
340
341 ret = drm_sched_job_add_implicit_dependencies(&submit->base,
342 obj,
343 write);
344 if (ret)
345 break;
346 }
347
348 return ret;
349 }
350
submit_pin_objects(struct msm_gem_submit * submit)351 static int submit_pin_objects(struct msm_gem_submit *submit)
352 {
353 struct msm_drm_private *priv = submit->dev->dev_private;
354 int i, ret = 0;
355
356 for (i = 0; i < submit->nr_bos; i++) {
357 struct drm_gem_object *obj = submit->bos[i].obj;
358 struct drm_gpuva *vma;
359
360 /* if locking succeeded, pin bo: */
361 vma = msm_gem_get_vma_locked(obj, submit->vm);
362 if (IS_ERR(vma)) {
363 ret = PTR_ERR(vma);
364 break;
365 }
366
367 ret = msm_gem_pin_vma_locked(obj, vma);
368 if (ret)
369 break;
370
371 submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo);
372 submit->bos[i].iova = vma->va.addr;
373 }
374
375 /*
376 * A second loop while holding the LRU lock (a) avoids acquiring/dropping
377 * the LRU lock for each individual bo, while (b) avoiding holding the
378 * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
379 * get_pages() which could trigger reclaim.. and if we held the LRU lock
380 * could trigger deadlock with the shrinker).
381 */
382 mutex_lock(&priv->lru.lock);
383 for (i = 0; i < submit->nr_bos; i++) {
384 msm_gem_pin_obj_locked(submit->bos[i].obj);
385 }
386 mutex_unlock(&priv->lru.lock);
387
388 submit->bos_pinned = true;
389
390 return ret;
391 }
392
submit_unpin_objects(struct msm_gem_submit * submit)393 static void submit_unpin_objects(struct msm_gem_submit *submit)
394 {
395 if (!submit->bos_pinned)
396 return;
397
398 for (int i = 0; i < submit->nr_bos; i++) {
399 struct drm_gem_object *obj = submit->bos[i].obj;
400
401 msm_gem_unpin_locked(obj);
402 }
403
404 submit->bos_pinned = false;
405 }
406
submit_attach_object_fences(struct msm_gem_submit * submit)407 static void submit_attach_object_fences(struct msm_gem_submit *submit)
408 {
409 struct msm_gem_vm *vm = to_msm_vm(submit->vm);
410 struct dma_fence *last_fence;
411
412 if (msm_context_is_vmbind(submit->queue->ctx)) {
413 drm_gpuvm_resv_add_fence(submit->vm, &submit->exec,
414 submit->user_fence,
415 DMA_RESV_USAGE_BOOKKEEP,
416 DMA_RESV_USAGE_BOOKKEEP);
417 return;
418 }
419
420 for (unsigned i = 0; i < submit->nr_bos; i++) {
421 struct drm_gem_object *obj = submit->bos[i].obj;
422
423 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
424 dma_resv_add_fence(obj->resv, submit->user_fence,
425 DMA_RESV_USAGE_WRITE);
426 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
427 dma_resv_add_fence(obj->resv, submit->user_fence,
428 DMA_RESV_USAGE_READ);
429 }
430
431 last_fence = vm->last_fence;
432 vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
433 dma_fence_put(last_fence);
434 }
435
submit_bo(struct msm_gem_submit * submit,uint32_t idx,struct drm_gem_object ** obj,uint64_t * iova)436 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
437 struct drm_gem_object **obj, uint64_t *iova)
438 {
439 if (idx >= submit->nr_bos) {
440 return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n",
441 idx, submit->nr_bos);
442 }
443
444 if (obj)
445 *obj = submit->bos[idx].obj;
446 if (iova)
447 *iova = submit->bos[idx].iova;
448
449 return 0;
450 }
451
452 /* process the reloc's and patch up the cmdstream as needed: */
submit_reloc(struct msm_gem_submit * submit,struct drm_gem_object * obj,uint32_t offset,uint32_t nr_relocs,struct drm_msm_gem_submit_reloc * relocs)453 static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj,
454 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
455 {
456 uint32_t i, last_offset = 0;
457 uint32_t *ptr;
458 int ret = 0;
459
460 if (offset % 4)
461 return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset);
462
463 /* For now, just map the entire thing. Eventually we probably
464 * to do it page-by-page, w/ kmap() if not vmap()d..
465 */
466 ptr = msm_gem_get_vaddr_locked(obj);
467
468 if (IS_ERR(ptr)) {
469 ret = PTR_ERR(ptr);
470 DBG("failed to map: %d", ret);
471 return ret;
472 }
473
474 for (i = 0; i < nr_relocs; i++) {
475 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
476 uint32_t off;
477 uint64_t iova;
478
479 if (submit_reloc.submit_offset % 4) {
480 ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n",
481 submit_reloc.submit_offset);
482 goto out;
483 }
484
485 /* offset in dwords: */
486 off = submit_reloc.submit_offset / 4;
487
488 if ((off >= (obj->size / 4)) ||
489 (off < last_offset)) {
490 ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i);
491 goto out;
492 }
493
494 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
495 if (ret)
496 goto out;
497
498 iova += submit_reloc.reloc_offset;
499
500 if (submit_reloc.shift < 0)
501 iova >>= -submit_reloc.shift;
502 else
503 iova <<= submit_reloc.shift;
504
505 ptr[off] = iova | submit_reloc.or;
506
507 last_offset = off;
508 }
509
510 out:
511 msm_gem_put_vaddr_locked(obj);
512
513 return ret;
514 }
515
516 /* Cleanup submit at end of ioctl. In the error case, this also drops
517 * references, unpins, and drops active refcnt. In the non-error case,
518 * this is done when the submit is retired.
519 */
submit_cleanup(struct msm_gem_submit * submit,bool error)520 static void submit_cleanup(struct msm_gem_submit *submit, bool error)
521 {
522 if (error)
523 submit_unpin_objects(submit);
524
525 if (submit->exec.objects)
526 drm_exec_fini(&submit->exec);
527
528 /* if job wasn't enqueued to scheduler, early retirement: */
529 if (error)
530 msm_submit_retire(submit);
531 }
532
msm_submit_retire(struct msm_gem_submit * submit)533 void msm_submit_retire(struct msm_gem_submit *submit)
534 {
535 int i;
536
537 for (i = 0; i < submit->nr_bos; i++) {
538 struct drm_gem_object *obj = submit->bos[i].obj;
539 struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo;
540
541 msm_gem_lock(obj);
542 drm_gpuvm_bo_put(vm_bo);
543 msm_gem_unlock(obj);
544 drm_gem_object_put(obj);
545 }
546 }
547
msm_ioctl_gem_submit(struct drm_device * dev,void * data,struct drm_file * file)548 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
549 struct drm_file *file)
550 {
551 struct msm_drm_private *priv = dev->dev_private;
552 struct drm_msm_gem_submit *args = data;
553 struct msm_context *ctx = file->driver_priv;
554 struct msm_gem_submit *submit = NULL;
555 struct msm_gpu *gpu = priv->gpu;
556 struct msm_gpu_submitqueue *queue;
557 struct msm_ringbuffer *ring;
558 struct msm_syncobj_post_dep *post_deps = NULL;
559 struct drm_syncobj **syncobjs_to_reset = NULL;
560 struct sync_file *sync_file = NULL;
561 unsigned cmds_to_parse;
562 int out_fence_fd = -1;
563 unsigned i;
564 int ret;
565
566 if (!gpu)
567 return -ENXIO;
568
569 if (args->pad)
570 return -EINVAL;
571
572 if (to_msm_vm(ctx->vm)->unusable)
573 return UERR(EPIPE, dev, "context is unusable");
574
575 /* for now, we just have 3d pipe.. eventually this would need to
576 * be more clever to dispatch to appropriate gpu module:
577 */
578 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
579 return UERR(EINVAL, dev, "invalid pipe");
580
581 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
582 return UERR(EINVAL, dev, "invalid flags");
583
584 if (args->flags & MSM_SUBMIT_SUDO) {
585 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
586 !capable(CAP_SYS_RAWIO))
587 return -EINVAL;
588 }
589
590 queue = msm_submitqueue_get(ctx, args->queueid);
591 if (!queue)
592 return -ENOENT;
593
594 if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) {
595 ret = UERR(EINVAL, dev, "Invalid queue type");
596 goto out_post_unlock;
597 }
598
599 ring = gpu->rb[queue->ring_nr];
600
601 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
602 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
603 if (out_fence_fd < 0) {
604 ret = out_fence_fd;
605 goto out_post_unlock;
606 }
607 }
608
609 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds,
610 file->client_id);
611 if (IS_ERR(submit)) {
612 ret = PTR_ERR(submit);
613 goto out_post_unlock;
614 }
615
616 trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
617 args->nr_bos, args->nr_cmds);
618
619 ret = mutex_lock_interruptible(&queue->lock);
620 if (ret)
621 goto out_post_unlock;
622
623 if (args->flags & MSM_SUBMIT_SUDO)
624 submit->in_rb = true;
625
626 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
627 struct dma_fence *in_fence;
628
629 in_fence = sync_file_get_fence(args->fence_fd);
630
631 if (!in_fence) {
632 ret = UERR(EINVAL, dev, "invalid in-fence");
633 goto out_unlock;
634 }
635
636 ret = drm_sched_job_add_dependency(&submit->base, in_fence);
637 if (ret)
638 goto out_unlock;
639 }
640
641 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
642 syncobjs_to_reset = msm_syncobj_parse_deps(dev, &submit->base,
643 file, args->in_syncobjs,
644 args->nr_in_syncobjs,
645 args->syncobj_stride);
646 if (IS_ERR(syncobjs_to_reset)) {
647 ret = PTR_ERR(syncobjs_to_reset);
648 goto out_unlock;
649 }
650 }
651
652 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
653 post_deps = msm_syncobj_parse_post_deps(dev, file,
654 args->out_syncobjs,
655 args->nr_out_syncobjs,
656 args->syncobj_stride);
657 if (IS_ERR(post_deps)) {
658 ret = PTR_ERR(post_deps);
659 goto out_unlock;
660 }
661 }
662
663 ret = submit_lookup_objects(submit, args, file);
664 if (ret)
665 goto out;
666
667 ret = submit_lookup_cmds(submit, args, file);
668 if (ret)
669 goto out;
670
671 /* copy_*_user while holding a ww ticket upsets lockdep */
672 ret = submit_lock_objects(submit);
673 if (ret)
674 goto out;
675
676 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
677 ret = submit_fence_sync(submit);
678 if (ret)
679 goto out;
680 }
681
682 ret = submit_pin_objects(submit);
683 if (ret)
684 goto out;
685
686 cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds;
687
688 for (i = 0; i < cmds_to_parse; i++) {
689 struct drm_gem_object *obj;
690 uint64_t iova;
691
692 ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova);
693 if (ret)
694 goto out;
695
696 if (!submit->cmd[i].size ||
697 (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
698 ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n",
699 submit->cmd[i].size * 4);
700 goto out;
701 }
702
703 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
704
705 if (likely(!submit->cmd[i].nr_relocs))
706 continue;
707
708 if (!gpu->allow_relocs) {
709 ret = UERR(EINVAL, dev, "relocs not allowed\n");
710 goto out;
711 }
712
713 ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
714 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
715 if (ret)
716 goto out;
717 }
718
719 submit->nr_cmds = args->nr_cmds;
720
721 idr_preload(GFP_KERNEL);
722
723 spin_lock(&queue->idr_lock);
724
725 /*
726 * If using userspace provided seqno fence, validate that the id
727 * is available before arming sched job. Since access to fence_idr
728 * is serialized on the queue lock, the slot should be still avail
729 * after the job is armed
730 */
731 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
732 (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
733 spin_unlock(&queue->idr_lock);
734 idr_preload_end();
735 ret = UERR(EINVAL, dev, "invalid in-fence-sn");
736 goto out;
737 }
738
739 drm_sched_job_arm(&submit->base);
740
741 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
742
743 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
744 /*
745 * Userspace has assigned the seqno fence that it wants
746 * us to use. It is an error to pick a fence sequence
747 * number that is not available.
748 */
749 submit->fence_id = args->fence;
750 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
751 &submit->fence_id, submit->fence_id,
752 GFP_NOWAIT);
753 /*
754 * We've already validated that the fence_id slot is valid,
755 * so if idr_alloc_u32 failed, it is a kernel bug
756 */
757 WARN_ON(ret);
758 } else {
759 /*
760 * Allocate an id which can be used by WAIT_FENCE ioctl to map
761 * back to the underlying fence.
762 */
763 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
764 submit->user_fence, 1,
765 INT_MAX, GFP_NOWAIT);
766 }
767
768 spin_unlock(&queue->idr_lock);
769 idr_preload_end();
770
771 if (submit->fence_id < 0) {
772 ret = submit->fence_id;
773 submit->fence_id = 0;
774 }
775
776 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
777 sync_file = sync_file_create(submit->user_fence);
778 if (!sync_file)
779 ret = -ENOMEM;
780 }
781
782 if (ret)
783 goto out;
784
785 submit_attach_object_fences(submit);
786
787 if (msm_context_is_vmbind(ctx)) {
788 /*
789 * If we are not using VM_BIND, submit_pin_vmas() will validate
790 * just the BOs attached to the submit. In that case we don't
791 * need to validate the _entire_ vm, because userspace tracked
792 * what BOs are associated with the submit.
793 */
794 ret = drm_gpuvm_validate(submit->vm, &submit->exec);
795 if (ret)
796 goto out;
797 }
798
799 /* The scheduler owns a ref now: */
800 msm_gem_submit_get(submit);
801
802 msm_rd_dump_submit(priv->rd, submit, NULL);
803
804 drm_sched_entity_push_job(&submit->base);
805
806 args->fence = submit->fence_id;
807 queue->last_fence = submit->fence_id;
808
809 msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
810 msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, submit->user_fence);
811
812 out:
813 submit_cleanup(submit, !!ret);
814 out_unlock:
815 mutex_unlock(&queue->lock);
816 out_post_unlock:
817 if (ret) {
818 if (out_fence_fd >= 0)
819 put_unused_fd(out_fence_fd);
820 if (sync_file)
821 fput(sync_file->file);
822 } else if (sync_file) {
823 fd_install(out_fence_fd, sync_file->file);
824 args->fence_fd = out_fence_fd;
825 }
826
827 if (!IS_ERR_OR_NULL(submit)) {
828 msm_gem_submit_put(submit);
829 } else {
830 /*
831 * If the submit hasn't yet taken ownership of the queue
832 * then we need to drop the reference ourself:
833 */
834 msm_submitqueue_put(queue);
835 }
836 if (!IS_ERR_OR_NULL(post_deps)) {
837 for (i = 0; i < args->nr_out_syncobjs; ++i) {
838 kfree(post_deps[i].chain);
839 drm_syncobj_put(post_deps[i].syncobj);
840 }
841 kfree(post_deps);
842 }
843
844 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
845 for (i = 0; i < args->nr_in_syncobjs; ++i) {
846 if (syncobjs_to_reset[i])
847 drm_syncobj_put(syncobjs_to_reset[i]);
848 }
849 kfree(syncobjs_to_reset);
850 }
851
852 return ret;
853 }
854