xref: /linux/drivers/gpu/drm/nouveau/nouveau_exec.c (revision 31499b0192cea06bbfe2782f288ac5cfe3dc9167)
1b88baab8SDanilo Krummrich // SPDX-License-Identifier: MIT
2b88baab8SDanilo Krummrich 
3b88baab8SDanilo Krummrich #include <drm/drm_exec.h>
4b88baab8SDanilo Krummrich 
5b88baab8SDanilo Krummrich #include "nouveau_drv.h"
6b88baab8SDanilo Krummrich #include "nouveau_gem.h"
7b88baab8SDanilo Krummrich #include "nouveau_mem.h"
8b88baab8SDanilo Krummrich #include "nouveau_dma.h"
9b88baab8SDanilo Krummrich #include "nouveau_exec.h"
10b88baab8SDanilo Krummrich #include "nouveau_abi16.h"
11b88baab8SDanilo Krummrich #include "nouveau_chan.h"
12b88baab8SDanilo Krummrich #include "nouveau_sched.h"
13b88baab8SDanilo Krummrich #include "nouveau_uvmm.h"
14b88baab8SDanilo Krummrich 
15b88baab8SDanilo Krummrich /**
16b88baab8SDanilo Krummrich  * DOC: Overview
17b88baab8SDanilo Krummrich  *
18b88baab8SDanilo Krummrich  * Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
19b88baab8SDanilo Krummrich  * DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
20b88baab8SDanilo Krummrich  *
21b88baab8SDanilo Krummrich  * In order to use the UAPI firstly a user client must initialize the VA space
22b88baab8SDanilo Krummrich  * using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
23b88baab8SDanilo Krummrich  * should be managed by the kernel and which by the UMD.
24b88baab8SDanilo Krummrich  *
25b88baab8SDanilo Krummrich  * The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
26b88baab8SDanilo Krummrich  * userspace-managable portion of the VA space. It provides operations to map
27b88baab8SDanilo Krummrich  * and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
28b88baab8SDanilo Krummrich  * backed by a GEM object and the kernel will ignore GEM handles provided
29b88baab8SDanilo Krummrich  * alongside a sparse mapping.
30b88baab8SDanilo Krummrich  *
31b88baab8SDanilo Krummrich  * Userspace may request memory backed mappings either within or outside of the
32b88baab8SDanilo Krummrich  * bounds (but not crossing those bounds) of a previously mapped sparse
33b88baab8SDanilo Krummrich  * mapping. Subsequently requested memory backed mappings within a sparse
34b88baab8SDanilo Krummrich  * mapping will take precedence over the corresponding range of the sparse
35b88baab8SDanilo Krummrich  * mapping. If such memory backed mappings are unmapped the kernel will make
36b88baab8SDanilo Krummrich  * sure that the corresponding sparse mapping will take their place again.
37b88baab8SDanilo Krummrich  * Requests to unmap a sparse mapping that still contains memory backed mappings
38b88baab8SDanilo Krummrich  * will result in those memory backed mappings being unmapped first.
39b88baab8SDanilo Krummrich  *
40b88baab8SDanilo Krummrich  * Unmap requests are not bound to the range of existing mappings and can even
41b88baab8SDanilo Krummrich  * overlap the bounds of sparse mappings. For such a request the kernel will
42b88baab8SDanilo Krummrich  * make sure to unmap all memory backed mappings within the given range,
43b88baab8SDanilo Krummrich  * splitting up memory backed mappings which are only partially contained
44b88baab8SDanilo Krummrich  * within the given range. Unmap requests with the sparse flag set must match
45b88baab8SDanilo Krummrich  * the range of a previously mapped sparse mapping exactly though.
46b88baab8SDanilo Krummrich  *
47b88baab8SDanilo Krummrich  * While the kernel generally permits arbitrary sequences and ranges of memory
48b88baab8SDanilo Krummrich  * backed mappings being mapped and unmapped, either within a single or multiple
49b88baab8SDanilo Krummrich  * VM_BIND ioctl calls, there are some restrictions for sparse mappings.
50b88baab8SDanilo Krummrich  *
51b88baab8SDanilo Krummrich  * The kernel does not permit to:
52b88baab8SDanilo Krummrich  *   - unmap non-existent sparse mappings
53b88baab8SDanilo Krummrich  *   - unmap a sparse mapping and map a new sparse mapping overlapping the range
54b88baab8SDanilo Krummrich  *     of the previously unmapped sparse mapping within the same VM_BIND ioctl
55b88baab8SDanilo Krummrich  *   - unmap a sparse mapping and map new memory backed mappings overlapping the
56b88baab8SDanilo Krummrich  *     range of the previously unmapped sparse mapping within the same VM_BIND
57b88baab8SDanilo Krummrich  *     ioctl
58b88baab8SDanilo Krummrich  *
59b88baab8SDanilo Krummrich  * When using the VM_BIND ioctl to request the kernel to map memory to a given
60b88baab8SDanilo Krummrich  * virtual address in the GPU's VA space there is no guarantee that the actual
61b88baab8SDanilo Krummrich  * mappings are created in the GPU's MMU. If the given memory is swapped out
62b88baab8SDanilo Krummrich  * at the time the bind operation is executed the kernel will stash the mapping
63b88baab8SDanilo Krummrich  * details into it's internal alloctor and create the actual MMU mappings once
64b88baab8SDanilo Krummrich  * the memory is swapped back in. While this is transparent for userspace, it is
65b88baab8SDanilo Krummrich  * guaranteed that all the backing memory is swapped back in and all the memory
66b88baab8SDanilo Krummrich  * mappings, as requested by userspace previously, are actually mapped once the
67b88baab8SDanilo Krummrich  * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
68b88baab8SDanilo Krummrich  *
69b88baab8SDanilo Krummrich  * A VM_BIND job can be executed either synchronously or asynchronously. If
70b88baab8SDanilo Krummrich  * exectued asynchronously, userspace may provide a list of syncobjs this job
71b88baab8SDanilo Krummrich  * will wait for and/or a list of syncobj the kernel will signal once the
72b88baab8SDanilo Krummrich  * VM_BIND job finished execution. If executed synchronously the ioctl will
73b88baab8SDanilo Krummrich  * block until the bind job is finished. For synchronous jobs the kernel will
74b88baab8SDanilo Krummrich  * not permit any syncobjs submitted to the kernel.
75b88baab8SDanilo Krummrich  *
76b88baab8SDanilo Krummrich  * To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
77b88baab8SDanilo Krummrich  * jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
78b88baab8SDanilo Krummrich  * the option to synchronize them with syncobjs.
79b88baab8SDanilo Krummrich  *
80b88baab8SDanilo Krummrich  * Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
81b88baab8SDanilo Krummrich  *
82b88baab8SDanilo Krummrich  * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
83b88baab8SDanilo Krummrich  * an up to date view of the VA space. However, the actual mappings might still
84b88baab8SDanilo Krummrich  * be pending. Hence, EXEC jobs require to have the particular fences - of
85b88baab8SDanilo Krummrich  * the corresponding VM_BIND jobs they depent on - attached to them.
86b88baab8SDanilo Krummrich  */
87b88baab8SDanilo Krummrich 
88b88baab8SDanilo Krummrich static int
89b88baab8SDanilo Krummrich nouveau_exec_job_submit(struct nouveau_job *job)
90b88baab8SDanilo Krummrich {
91b88baab8SDanilo Krummrich 	struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
92b88baab8SDanilo Krummrich 	struct nouveau_cli *cli = job->cli;
93b88baab8SDanilo Krummrich 	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
94b88baab8SDanilo Krummrich 	struct drm_exec *exec = &job->exec;
95b88baab8SDanilo Krummrich 	struct drm_gem_object *obj;
96b88baab8SDanilo Krummrich 	unsigned long index;
97b88baab8SDanilo Krummrich 	int ret;
98b88baab8SDanilo Krummrich 
99978474dcSDanilo Krummrich 	/* Create a new fence, but do not emit yet. */
100978474dcSDanilo Krummrich 	ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
101b88baab8SDanilo Krummrich 	if (ret)
102b88baab8SDanilo Krummrich 		return ret;
103b88baab8SDanilo Krummrich 
104b88baab8SDanilo Krummrich 	nouveau_uvmm_lock(uvmm);
105b88baab8SDanilo Krummrich 	drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
106b88baab8SDanilo Krummrich 			    DRM_EXEC_IGNORE_DUPLICATES);
107b88baab8SDanilo Krummrich 	drm_exec_until_all_locked(exec) {
108b88baab8SDanilo Krummrich 		struct drm_gpuva *va;
109b88baab8SDanilo Krummrich 
110b88baab8SDanilo Krummrich 		drm_gpuva_for_each_va(va, &uvmm->umgr) {
111b88baab8SDanilo Krummrich 			if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
112b88baab8SDanilo Krummrich 				continue;
113b88baab8SDanilo Krummrich 
114b88baab8SDanilo Krummrich 			ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
115b88baab8SDanilo Krummrich 			drm_exec_retry_on_contention(exec);
116b88baab8SDanilo Krummrich 			if (ret)
117b88baab8SDanilo Krummrich 				goto err_uvmm_unlock;
118b88baab8SDanilo Krummrich 		}
119b88baab8SDanilo Krummrich 	}
120b88baab8SDanilo Krummrich 	nouveau_uvmm_unlock(uvmm);
121b88baab8SDanilo Krummrich 
122b88baab8SDanilo Krummrich 	drm_exec_for_each_locked_object(exec, index, obj) {
123b88baab8SDanilo Krummrich 		struct nouveau_bo *nvbo = nouveau_gem_object(obj);
124b88baab8SDanilo Krummrich 
125b88baab8SDanilo Krummrich 		ret = nouveau_bo_validate(nvbo, true, false);
126b88baab8SDanilo Krummrich 		if (ret)
127b88baab8SDanilo Krummrich 			goto err_exec_fini;
128b88baab8SDanilo Krummrich 	}
129b88baab8SDanilo Krummrich 
130b88baab8SDanilo Krummrich 	return 0;
131b88baab8SDanilo Krummrich 
132b88baab8SDanilo Krummrich err_uvmm_unlock:
133b88baab8SDanilo Krummrich 	nouveau_uvmm_unlock(uvmm);
134b88baab8SDanilo Krummrich err_exec_fini:
135b88baab8SDanilo Krummrich 	drm_exec_fini(exec);
136b88baab8SDanilo Krummrich 	return ret;
137b88baab8SDanilo Krummrich 
138b88baab8SDanilo Krummrich }
139b88baab8SDanilo Krummrich 
140b88baab8SDanilo Krummrich static void
141b88baab8SDanilo Krummrich nouveau_exec_job_armed_submit(struct nouveau_job *job)
142b88baab8SDanilo Krummrich {
143b88baab8SDanilo Krummrich 	struct drm_exec *exec = &job->exec;
144b88baab8SDanilo Krummrich 	struct drm_gem_object *obj;
145b88baab8SDanilo Krummrich 	unsigned long index;
146b88baab8SDanilo Krummrich 
147b88baab8SDanilo Krummrich 	drm_exec_for_each_locked_object(exec, index, obj)
148b88baab8SDanilo Krummrich 		dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
149b88baab8SDanilo Krummrich 
150b88baab8SDanilo Krummrich 	drm_exec_fini(exec);
151b88baab8SDanilo Krummrich }
152b88baab8SDanilo Krummrich 
153b88baab8SDanilo Krummrich static struct dma_fence *
154b88baab8SDanilo Krummrich nouveau_exec_job_run(struct nouveau_job *job)
155b88baab8SDanilo Krummrich {
156b88baab8SDanilo Krummrich 	struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
157b88baab8SDanilo Krummrich 	struct nouveau_channel *chan = exec_job->chan;
158b88baab8SDanilo Krummrich 	struct nouveau_fence *fence = exec_job->fence;
159b88baab8SDanilo Krummrich 	int i, ret;
160b88baab8SDanilo Krummrich 
161b88baab8SDanilo Krummrich 	ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
162b88baab8SDanilo Krummrich 	if (ret) {
163b88baab8SDanilo Krummrich 		NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
164b88baab8SDanilo Krummrich 		return ERR_PTR(ret);
165b88baab8SDanilo Krummrich 	}
166b88baab8SDanilo Krummrich 
167b88baab8SDanilo Krummrich 	for (i = 0; i < exec_job->push.count; i++) {
168443f9e0bSDanilo Krummrich 		struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
169443f9e0bSDanilo Krummrich 		bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
170443f9e0bSDanilo Krummrich 
171443f9e0bSDanilo Krummrich 		nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
172b88baab8SDanilo Krummrich 	}
173b88baab8SDanilo Krummrich 
174978474dcSDanilo Krummrich 	ret = nouveau_fence_emit(fence);
175b88baab8SDanilo Krummrich 	if (ret) {
176978474dcSDanilo Krummrich 		nouveau_fence_unref(&exec_job->fence);
177b88baab8SDanilo Krummrich 		NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
178b88baab8SDanilo Krummrich 		WIND_RING(chan);
179b88baab8SDanilo Krummrich 		return ERR_PTR(ret);
180b88baab8SDanilo Krummrich 	}
181b88baab8SDanilo Krummrich 
182978474dcSDanilo Krummrich 	/* The fence was emitted successfully, set the job's fence pointer to
183978474dcSDanilo Krummrich 	 * NULL in order to avoid freeing it up when the job is cleaned up.
184978474dcSDanilo Krummrich 	 */
185b88baab8SDanilo Krummrich 	exec_job->fence = NULL;
186b88baab8SDanilo Krummrich 
187b88baab8SDanilo Krummrich 	return &fence->base;
188b88baab8SDanilo Krummrich }
189b88baab8SDanilo Krummrich 
190b88baab8SDanilo Krummrich static void
191b88baab8SDanilo Krummrich nouveau_exec_job_free(struct nouveau_job *job)
192b88baab8SDanilo Krummrich {
193b88baab8SDanilo Krummrich 	struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
194b88baab8SDanilo Krummrich 
195b88baab8SDanilo Krummrich 	nouveau_job_free(job);
196b88baab8SDanilo Krummrich 
197978474dcSDanilo Krummrich 	kfree(exec_job->fence);
198b88baab8SDanilo Krummrich 	kfree(exec_job->push.s);
199b88baab8SDanilo Krummrich 	kfree(exec_job);
200b88baab8SDanilo Krummrich }
201b88baab8SDanilo Krummrich 
202b88baab8SDanilo Krummrich static enum drm_gpu_sched_stat
203b88baab8SDanilo Krummrich nouveau_exec_job_timeout(struct nouveau_job *job)
204b88baab8SDanilo Krummrich {
205b88baab8SDanilo Krummrich 	struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
206b88baab8SDanilo Krummrich 	struct nouveau_channel *chan = exec_job->chan;
207b88baab8SDanilo Krummrich 
208b88baab8SDanilo Krummrich 	if (unlikely(!atomic_read(&chan->killed)))
209b88baab8SDanilo Krummrich 		nouveau_channel_kill(chan);
210b88baab8SDanilo Krummrich 
211b88baab8SDanilo Krummrich 	NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
212b88baab8SDanilo Krummrich 		  chan->chid);
213b88baab8SDanilo Krummrich 
214b88baab8SDanilo Krummrich 	nouveau_sched_entity_fini(job->entity);
215b88baab8SDanilo Krummrich 
216*31499b01SDanilo Krummrich 	return DRM_GPU_SCHED_STAT_NOMINAL;
217b88baab8SDanilo Krummrich }
218b88baab8SDanilo Krummrich 
219b88baab8SDanilo Krummrich static struct nouveau_job_ops nouveau_exec_job_ops = {
220b88baab8SDanilo Krummrich 	.submit = nouveau_exec_job_submit,
221b88baab8SDanilo Krummrich 	.armed_submit = nouveau_exec_job_armed_submit,
222b88baab8SDanilo Krummrich 	.run = nouveau_exec_job_run,
223b88baab8SDanilo Krummrich 	.free = nouveau_exec_job_free,
224b88baab8SDanilo Krummrich 	.timeout = nouveau_exec_job_timeout,
225b88baab8SDanilo Krummrich };
226b88baab8SDanilo Krummrich 
227b88baab8SDanilo Krummrich int
228b88baab8SDanilo Krummrich nouveau_exec_job_init(struct nouveau_exec_job **pjob,
229b88baab8SDanilo Krummrich 		      struct nouveau_exec_job_args *__args)
230b88baab8SDanilo Krummrich {
231b88baab8SDanilo Krummrich 	struct nouveau_exec_job *job;
232b88baab8SDanilo Krummrich 	struct nouveau_job_args args = {};
233443f9e0bSDanilo Krummrich 	int i, ret;
234443f9e0bSDanilo Krummrich 
235443f9e0bSDanilo Krummrich 	for (i = 0; i < __args->push.count; i++) {
236443f9e0bSDanilo Krummrich 		struct drm_nouveau_exec_push *p = &__args->push.s[i];
237443f9e0bSDanilo Krummrich 
238443f9e0bSDanilo Krummrich 		if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
239443f9e0bSDanilo Krummrich 			NV_PRINTK(err, nouveau_cli(__args->file_priv),
240443f9e0bSDanilo Krummrich 				  "pushbuf size exceeds limit: 0x%x max 0x%x\n",
241443f9e0bSDanilo Krummrich 				  p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
242443f9e0bSDanilo Krummrich 			return -EINVAL;
243443f9e0bSDanilo Krummrich 		}
244443f9e0bSDanilo Krummrich 	}
245b88baab8SDanilo Krummrich 
246b88baab8SDanilo Krummrich 	job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
247b88baab8SDanilo Krummrich 	if (!job)
248b88baab8SDanilo Krummrich 		return -ENOMEM;
249b88baab8SDanilo Krummrich 
250b88baab8SDanilo Krummrich 	job->push.count = __args->push.count;
251b88baab8SDanilo Krummrich 	if (__args->push.count) {
252b88baab8SDanilo Krummrich 		job->push.s = kmemdup(__args->push.s,
253b88baab8SDanilo Krummrich 				      sizeof(*__args->push.s) *
254b88baab8SDanilo Krummrich 				      __args->push.count,
255b88baab8SDanilo Krummrich 				      GFP_KERNEL);
256b88baab8SDanilo Krummrich 		if (!job->push.s) {
257b88baab8SDanilo Krummrich 			ret = -ENOMEM;
258b88baab8SDanilo Krummrich 			goto err_free_job;
259b88baab8SDanilo Krummrich 		}
260b88baab8SDanilo Krummrich 	}
261b88baab8SDanilo Krummrich 
262b88baab8SDanilo Krummrich 	job->chan = __args->chan;
263b88baab8SDanilo Krummrich 
264b88baab8SDanilo Krummrich 	args.sched_entity = __args->sched_entity;
265b88baab8SDanilo Krummrich 	args.file_priv = __args->file_priv;
266b88baab8SDanilo Krummrich 
267b88baab8SDanilo Krummrich 	args.in_sync.count = __args->in_sync.count;
268b88baab8SDanilo Krummrich 	args.in_sync.s = __args->in_sync.s;
269b88baab8SDanilo Krummrich 
270b88baab8SDanilo Krummrich 	args.out_sync.count = __args->out_sync.count;
271b88baab8SDanilo Krummrich 	args.out_sync.s = __args->out_sync.s;
272b88baab8SDanilo Krummrich 
273b88baab8SDanilo Krummrich 	args.ops = &nouveau_exec_job_ops;
274b88baab8SDanilo Krummrich 	args.resv_usage = DMA_RESV_USAGE_WRITE;
275b88baab8SDanilo Krummrich 
276b88baab8SDanilo Krummrich 	ret = nouveau_job_init(&job->base, &args);
277b88baab8SDanilo Krummrich 	if (ret)
278b88baab8SDanilo Krummrich 		goto err_free_pushs;
279b88baab8SDanilo Krummrich 
280b88baab8SDanilo Krummrich 	return 0;
281b88baab8SDanilo Krummrich 
282b88baab8SDanilo Krummrich err_free_pushs:
283b88baab8SDanilo Krummrich 	kfree(job->push.s);
284b88baab8SDanilo Krummrich err_free_job:
285b88baab8SDanilo Krummrich 	kfree(job);
286b88baab8SDanilo Krummrich 	*pjob = NULL;
287b88baab8SDanilo Krummrich 
288b88baab8SDanilo Krummrich 	return ret;
289b88baab8SDanilo Krummrich }
290b88baab8SDanilo Krummrich 
291b88baab8SDanilo Krummrich static int
292b88baab8SDanilo Krummrich nouveau_exec(struct nouveau_exec_job_args *args)
293b88baab8SDanilo Krummrich {
294b88baab8SDanilo Krummrich 	struct nouveau_exec_job *job;
295b88baab8SDanilo Krummrich 	int ret;
296b88baab8SDanilo Krummrich 
297b88baab8SDanilo Krummrich 	ret = nouveau_exec_job_init(&job, args);
298b88baab8SDanilo Krummrich 	if (ret)
299b88baab8SDanilo Krummrich 		return ret;
300b88baab8SDanilo Krummrich 
301b88baab8SDanilo Krummrich 	ret = nouveau_job_submit(&job->base);
302b88baab8SDanilo Krummrich 	if (ret)
303b88baab8SDanilo Krummrich 		goto err_job_fini;
304b88baab8SDanilo Krummrich 
305b88baab8SDanilo Krummrich 	return 0;
306b88baab8SDanilo Krummrich 
307b88baab8SDanilo Krummrich err_job_fini:
308b88baab8SDanilo Krummrich 	nouveau_job_fini(&job->base);
309b88baab8SDanilo Krummrich 	return ret;
310b88baab8SDanilo Krummrich }
311b88baab8SDanilo Krummrich 
312b88baab8SDanilo Krummrich static int
313b88baab8SDanilo Krummrich nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
314e39701e3SDanilo Krummrich 		   struct drm_nouveau_exec *req)
315b88baab8SDanilo Krummrich {
316b88baab8SDanilo Krummrich 	struct drm_nouveau_sync **s;
317b88baab8SDanilo Krummrich 	u32 inc = req->wait_count;
318b88baab8SDanilo Krummrich 	u64 ins = req->wait_ptr;
319b88baab8SDanilo Krummrich 	u32 outc = req->sig_count;
320b88baab8SDanilo Krummrich 	u64 outs = req->sig_ptr;
321b88baab8SDanilo Krummrich 	u32 pushc = req->push_count;
322b88baab8SDanilo Krummrich 	u64 pushs = req->push_ptr;
323b88baab8SDanilo Krummrich 	int ret;
324b88baab8SDanilo Krummrich 
325b88baab8SDanilo Krummrich 	if (pushc) {
326b88baab8SDanilo Krummrich 		args->push.count = pushc;
327b88baab8SDanilo Krummrich 		args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
328b88baab8SDanilo Krummrich 		if (IS_ERR(args->push.s))
329b88baab8SDanilo Krummrich 			return PTR_ERR(args->push.s);
330b88baab8SDanilo Krummrich 	}
331b88baab8SDanilo Krummrich 
332b88baab8SDanilo Krummrich 	if (inc) {
333b88baab8SDanilo Krummrich 		s = &args->in_sync.s;
334b88baab8SDanilo Krummrich 
335b88baab8SDanilo Krummrich 		args->in_sync.count = inc;
336b88baab8SDanilo Krummrich 		*s = u_memcpya(ins, inc, sizeof(**s));
337b88baab8SDanilo Krummrich 		if (IS_ERR(*s)) {
338b88baab8SDanilo Krummrich 			ret = PTR_ERR(*s);
339b88baab8SDanilo Krummrich 			goto err_free_pushs;
340b88baab8SDanilo Krummrich 		}
341b88baab8SDanilo Krummrich 	}
342b88baab8SDanilo Krummrich 
343b88baab8SDanilo Krummrich 	if (outc) {
344b88baab8SDanilo Krummrich 		s = &args->out_sync.s;
345b88baab8SDanilo Krummrich 
346b88baab8SDanilo Krummrich 		args->out_sync.count = outc;
347b88baab8SDanilo Krummrich 		*s = u_memcpya(outs, outc, sizeof(**s));
348b88baab8SDanilo Krummrich 		if (IS_ERR(*s)) {
349b88baab8SDanilo Krummrich 			ret = PTR_ERR(*s);
350b88baab8SDanilo Krummrich 			goto err_free_ins;
351b88baab8SDanilo Krummrich 		}
352b88baab8SDanilo Krummrich 	}
353b88baab8SDanilo Krummrich 
354b88baab8SDanilo Krummrich 	return 0;
355b88baab8SDanilo Krummrich 
356b88baab8SDanilo Krummrich err_free_pushs:
357b88baab8SDanilo Krummrich 	u_free(args->push.s);
358b88baab8SDanilo Krummrich err_free_ins:
359b88baab8SDanilo Krummrich 	u_free(args->in_sync.s);
360b88baab8SDanilo Krummrich 	return ret;
361b88baab8SDanilo Krummrich }
362b88baab8SDanilo Krummrich 
363b88baab8SDanilo Krummrich static void
364b88baab8SDanilo Krummrich nouveau_exec_ufree(struct nouveau_exec_job_args *args)
365b88baab8SDanilo Krummrich {
366b88baab8SDanilo Krummrich 	u_free(args->push.s);
367b88baab8SDanilo Krummrich 	u_free(args->in_sync.s);
368b88baab8SDanilo Krummrich 	u_free(args->out_sync.s);
369b88baab8SDanilo Krummrich }
370b88baab8SDanilo Krummrich 
371b88baab8SDanilo Krummrich int
372b88baab8SDanilo Krummrich nouveau_exec_ioctl_exec(struct drm_device *dev,
373e39701e3SDanilo Krummrich 			void *data,
374b88baab8SDanilo Krummrich 			struct drm_file *file_priv)
375b88baab8SDanilo Krummrich {
376b88baab8SDanilo Krummrich 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
377b88baab8SDanilo Krummrich 	struct nouveau_cli *cli = nouveau_cli(file_priv);
378b88baab8SDanilo Krummrich 	struct nouveau_abi16_chan *chan16;
379b88baab8SDanilo Krummrich 	struct nouveau_channel *chan = NULL;
380b88baab8SDanilo Krummrich 	struct nouveau_exec_job_args args = {};
381e39701e3SDanilo Krummrich 	struct drm_nouveau_exec *req = data;
382b88baab8SDanilo Krummrich 	int ret = 0;
383b88baab8SDanilo Krummrich 
384b88baab8SDanilo Krummrich 	if (unlikely(!abi16))
385b88baab8SDanilo Krummrich 		return -ENOMEM;
386b88baab8SDanilo Krummrich 
387b88baab8SDanilo Krummrich 	/* abi16 locks already */
388b88baab8SDanilo Krummrich 	if (unlikely(!nouveau_cli_uvmm(cli)))
389b88baab8SDanilo Krummrich 		return nouveau_abi16_put(abi16, -ENOSYS);
390b88baab8SDanilo Krummrich 
391b88baab8SDanilo Krummrich 	list_for_each_entry(chan16, &abi16->channels, head) {
392b88baab8SDanilo Krummrich 		if (chan16->chan->chid == req->channel) {
393b88baab8SDanilo Krummrich 			chan = chan16->chan;
394b88baab8SDanilo Krummrich 			break;
395b88baab8SDanilo Krummrich 		}
396b88baab8SDanilo Krummrich 	}
397b88baab8SDanilo Krummrich 
398b88baab8SDanilo Krummrich 	if (!chan)
399b88baab8SDanilo Krummrich 		return nouveau_abi16_put(abi16, -ENOENT);
400b88baab8SDanilo Krummrich 
401b88baab8SDanilo Krummrich 	if (unlikely(atomic_read(&chan->killed)))
402b88baab8SDanilo Krummrich 		return nouveau_abi16_put(abi16, -ENODEV);
403b88baab8SDanilo Krummrich 
404b88baab8SDanilo Krummrich 	if (!chan->dma.ib_max)
405b88baab8SDanilo Krummrich 		return nouveau_abi16_put(abi16, -ENOSYS);
406b88baab8SDanilo Krummrich 
407b88baab8SDanilo Krummrich 	if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
408b88baab8SDanilo Krummrich 		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
409b88baab8SDanilo Krummrich 			 req->push_count, NOUVEAU_GEM_MAX_PUSH);
410b88baab8SDanilo Krummrich 		return nouveau_abi16_put(abi16, -EINVAL);
411b88baab8SDanilo Krummrich 	}
412b88baab8SDanilo Krummrich 
413b88baab8SDanilo Krummrich 	ret = nouveau_exec_ucopy(&args, req);
414b88baab8SDanilo Krummrich 	if (ret)
415b88baab8SDanilo Krummrich 		goto out;
416b88baab8SDanilo Krummrich 
417b88baab8SDanilo Krummrich 	args.sched_entity = &chan16->sched_entity;
418b88baab8SDanilo Krummrich 	args.file_priv = file_priv;
419b88baab8SDanilo Krummrich 	args.chan = chan;
420b88baab8SDanilo Krummrich 
421b88baab8SDanilo Krummrich 	ret = nouveau_exec(&args);
422b88baab8SDanilo Krummrich 	if (ret)
423b88baab8SDanilo Krummrich 		goto out_free_args;
424b88baab8SDanilo Krummrich 
425b88baab8SDanilo Krummrich out_free_args:
426b88baab8SDanilo Krummrich 	nouveau_exec_ufree(&args);
427b88baab8SDanilo Krummrich out:
428b88baab8SDanilo Krummrich 	return nouveau_abi16_put(abi16, ret);
429b88baab8SDanilo Krummrich }
430