xref: /linux/drivers/gpu/drm/v3d/v3d_submit.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2014-2018 Broadcom
4  * Copyright (C) 2023 Raspberry Pi
5  */
6 
7 #include <drm/drm_print.h>
8 #include <drm/drm_syncobj.h>
9 
10 #include "v3d_drv.h"
11 #include "v3d_regs.h"
12 #include "v3d_trace.h"
13 
14 /* Takes the reservation lock on all the BOs being referenced, so that
15  * we can attach fences and update the reservations after pushing the job
16  * to the queue.
17  *
18  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
19  * (all of which are on render->unref_list). They're entirely private
20  * to v3d, so we don't attach dma-buf fences to them.
21  */
22 static int
v3d_lock_bo_reservations(struct v3d_job * job,struct ww_acquire_ctx * acquire_ctx)23 v3d_lock_bo_reservations(struct v3d_job *job,
24 			 struct ww_acquire_ctx *acquire_ctx)
25 {
26 	int i, ret;
27 
28 	ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
29 	if (ret)
30 		return ret;
31 
32 	for (i = 0; i < job->bo_count; i++) {
33 		ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
34 		if (ret)
35 			goto fail;
36 
37 		ret = drm_sched_job_add_implicit_dependencies(&job->base,
38 							      job->bo[i], true);
39 		if (ret)
40 			goto fail;
41 	}
42 
43 	return 0;
44 
45 fail:
46 	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
47 	return ret;
48 }
49 
50 /**
51  * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
52  * referenced by the job.
53  * @dev: DRM device
54  * @file_priv: DRM file for this fd
55  * @job: V3D job being set up
56  * @bo_handles: GEM handles
57  * @bo_count: Number of GEM handles passed in
58  *
59  * The command validator needs to reference BOs by their index within
60  * the submitted job's BO list. This does the validation of the job's
61  * BO list and reference counting for the lifetime of the job.
62  *
63  * Note that this function doesn't need to unreference the BOs on
64  * failure, because that will happen at `v3d_job_free()`.
65  */
66 static int
v3d_lookup_bos(struct drm_device * dev,struct drm_file * file_priv,struct v3d_job * job,u64 bo_handles,u32 bo_count)67 v3d_lookup_bos(struct drm_device *dev,
68 	       struct drm_file *file_priv,
69 	       struct v3d_job *job,
70 	       u64 bo_handles,
71 	       u32 bo_count)
72 {
73 	job->bo_count = bo_count;
74 
75 	if (!job->bo_count) {
76 		/* See comment on bo_index for why we have to check
77 		 * this.
78 		 */
79 		drm_warn(dev, "Rendering requires BOs\n");
80 		return -EINVAL;
81 	}
82 
83 	return drm_gem_objects_lookup(file_priv,
84 				      (void __user *)(uintptr_t)bo_handles,
85 				      job->bo_count, &job->bo);
86 }
87 
88 static void
v3d_job_free(struct kref * ref)89 v3d_job_free(struct kref *ref)
90 {
91 	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
92 	int i;
93 
94 	if (job->bo) {
95 		for (i = 0; i < job->bo_count; i++)
96 			drm_gem_object_put(job->bo[i]);
97 		kvfree(job->bo);
98 	}
99 
100 	dma_fence_put(job->irq_fence);
101 	dma_fence_put(job->done_fence);
102 
103 	if (job->perfmon)
104 		v3d_perfmon_put(job->perfmon);
105 
106 	kfree(job);
107 }
108 
109 static void
v3d_render_job_free(struct kref * ref)110 v3d_render_job_free(struct kref *ref)
111 {
112 	struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
113 						  base.refcount);
114 	struct v3d_bo *bo, *save;
115 
116 	list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
117 		drm_gem_object_put(&bo->base.base);
118 	}
119 
120 	v3d_job_free(ref);
121 }
122 
v3d_job_cleanup(struct v3d_job * job)123 void v3d_job_cleanup(struct v3d_job *job)
124 {
125 	if (!job)
126 		return;
127 
128 	drm_sched_job_cleanup(&job->base);
129 	v3d_job_put(job);
130 }
131 
v3d_job_put(struct v3d_job * job)132 void v3d_job_put(struct v3d_job *job)
133 {
134 	if (!job)
135 		return;
136 
137 	kref_put(&job->refcount, job->free);
138 }
139 
140 static int
v3d_job_allocate(struct v3d_dev * v3d,void ** container,size_t size)141 v3d_job_allocate(struct v3d_dev *v3d, void **container, size_t size)
142 {
143 	*container = kcalloc(1, size, GFP_KERNEL);
144 	if (!*container) {
145 		drm_err(&v3d->drm, "Cannot allocate memory for V3D job.\n");
146 		return -ENOMEM;
147 	}
148 
149 	return 0;
150 }
151 
152 static void
v3d_job_deallocate(void ** container)153 v3d_job_deallocate(void **container)
154 {
155 	kfree(*container);
156 	*container = NULL;
157 }
158 
159 static int
v3d_job_init(struct v3d_dev * v3d,struct drm_file * file_priv,struct v3d_job * job,void (* free)(struct kref * ref),u32 in_sync,struct v3d_submit_ext * se,enum v3d_queue queue)160 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
161 	     struct v3d_job *job, void (*free)(struct kref *ref),
162 	     u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
163 {
164 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
165 	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
166 	int ret, i;
167 
168 	job->v3d = v3d;
169 	job->free = free;
170 	job->file_priv = v3d_priv;
171 
172 	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
173 				 1, v3d_priv, file_priv->client_id);
174 	if (ret)
175 		return ret;
176 
177 	if (has_multisync) {
178 		if (se->in_sync_count && se->wait_stage == queue) {
179 			struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
180 
181 			for (i = 0; i < se->in_sync_count; i++) {
182 				struct drm_v3d_sem in;
183 
184 				if (copy_from_user(&in, handle++, sizeof(in))) {
185 					ret = -EFAULT;
186 					drm_dbg(&v3d->drm, "Failed to copy wait dep handle.\n");
187 					goto fail_deps;
188 				}
189 				ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
190 
191 				// TODO: Investigate why this was filtered out for the IOCTL.
192 				if (ret && ret != -ENOENT)
193 					goto fail_deps;
194 			}
195 		}
196 	} else {
197 		ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
198 
199 		// TODO: Investigate why this was filtered out for the IOCTL.
200 		if (ret && ret != -ENOENT)
201 			goto fail_deps;
202 	}
203 
204 	kref_init(&job->refcount);
205 
206 	return 0;
207 
208 fail_deps:
209 	drm_sched_job_cleanup(&job->base);
210 	return ret;
211 }
212 
213 static void
v3d_push_job(struct v3d_job * job)214 v3d_push_job(struct v3d_job *job)
215 {
216 	drm_sched_job_arm(&job->base);
217 
218 	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
219 
220 	/* put by scheduler job completion */
221 	kref_get(&job->refcount);
222 
223 	drm_sched_entity_push_job(&job->base);
224 }
225 
226 static void
v3d_attach_fences_and_unlock_reservation(struct drm_file * file_priv,struct v3d_job * job,struct ww_acquire_ctx * acquire_ctx,u32 out_sync,struct v3d_submit_ext * se,struct dma_fence * done_fence)227 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
228 					 struct v3d_job *job,
229 					 struct ww_acquire_ctx *acquire_ctx,
230 					 u32 out_sync,
231 					 struct v3d_submit_ext *se,
232 					 struct dma_fence *done_fence)
233 {
234 	struct drm_syncobj *sync_out;
235 	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
236 	int i;
237 
238 	for (i = 0; i < job->bo_count; i++) {
239 		/* XXX: Use shared fences for read-only objects. */
240 		dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
241 				   DMA_RESV_USAGE_WRITE);
242 	}
243 
244 	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
245 
246 	/* Update the return sync object for the job */
247 	/* If it only supports a single signal semaphore*/
248 	if (!has_multisync) {
249 		sync_out = drm_syncobj_find(file_priv, out_sync);
250 		if (sync_out) {
251 			drm_syncobj_replace_fence(sync_out, done_fence);
252 			drm_syncobj_put(sync_out);
253 		}
254 		return;
255 	}
256 
257 	/* If multiple semaphores extension is supported */
258 	if (se->out_sync_count) {
259 		for (i = 0; i < se->out_sync_count; i++) {
260 			drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
261 						  done_fence);
262 			drm_syncobj_put(se->out_syncs[i].syncobj);
263 		}
264 		kvfree(se->out_syncs);
265 	}
266 }
267 
268 static int
v3d_setup_csd_jobs_and_bos(struct drm_file * file_priv,struct v3d_dev * v3d,struct drm_v3d_submit_csd * args,struct v3d_csd_job ** job,struct v3d_job ** clean_job,struct v3d_submit_ext * se,struct ww_acquire_ctx * acquire_ctx)269 v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
270 			   struct v3d_dev *v3d,
271 			   struct drm_v3d_submit_csd *args,
272 			   struct v3d_csd_job **job,
273 			   struct v3d_job **clean_job,
274 			   struct v3d_submit_ext *se,
275 			   struct ww_acquire_ctx *acquire_ctx)
276 {
277 	int ret;
278 
279 	ret = v3d_job_allocate(v3d, (void *)job, sizeof(**job));
280 	if (ret)
281 		return ret;
282 
283 	ret = v3d_job_init(v3d, file_priv, &(*job)->base,
284 			   v3d_job_free, args->in_sync, se, V3D_CSD);
285 	if (ret) {
286 		v3d_job_deallocate((void *)job);
287 		return ret;
288 	}
289 
290 	ret = v3d_job_allocate(v3d, (void *)clean_job, sizeof(**clean_job));
291 	if (ret)
292 		return ret;
293 
294 	ret = v3d_job_init(v3d, file_priv, *clean_job,
295 			   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
296 	if (ret) {
297 		v3d_job_deallocate((void *)clean_job);
298 		return ret;
299 	}
300 
301 	(*job)->args = *args;
302 
303 	ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
304 			     args->bo_handles, args->bo_handle_count);
305 	if (ret)
306 		return ret;
307 
308 	return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
309 }
310 
311 static void
v3d_put_multisync_post_deps(struct v3d_submit_ext * se)312 v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
313 {
314 	unsigned int i;
315 
316 	if (!(se && se->out_sync_count))
317 		return;
318 
319 	for (i = 0; i < se->out_sync_count; i++)
320 		drm_syncobj_put(se->out_syncs[i].syncobj);
321 	kvfree(se->out_syncs);
322 }
323 
324 static int
v3d_get_multisync_post_deps(struct drm_file * file_priv,struct v3d_submit_ext * se,u32 count,u64 handles)325 v3d_get_multisync_post_deps(struct drm_file *file_priv,
326 			    struct v3d_submit_ext *se,
327 			    u32 count, u64 handles)
328 {
329 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
330 	struct v3d_dev *v3d = v3d_priv->v3d;
331 	struct drm_v3d_sem __user *post_deps;
332 	int i, ret;
333 
334 	if (!count)
335 		return 0;
336 
337 	se->out_syncs = (struct v3d_submit_outsync *)
338 			kvmalloc_objs(struct v3d_submit_outsync, count);
339 	if (!se->out_syncs)
340 		return -ENOMEM;
341 
342 	post_deps = u64_to_user_ptr(handles);
343 
344 	for (i = 0; i < count; i++) {
345 		struct drm_v3d_sem out;
346 
347 		if (copy_from_user(&out, post_deps++, sizeof(out))) {
348 			ret = -EFAULT;
349 			drm_dbg(&v3d->drm, "Failed to copy post dep handles\n");
350 			goto fail;
351 		}
352 
353 		se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
354 							    out.handle);
355 		if (!se->out_syncs[i].syncobj) {
356 			ret = -EINVAL;
357 			goto fail;
358 		}
359 	}
360 	se->out_sync_count = count;
361 
362 	return 0;
363 
364 fail:
365 	for (i--; i >= 0; i--)
366 		drm_syncobj_put(se->out_syncs[i].syncobj);
367 	kvfree(se->out_syncs);
368 
369 	return ret;
370 }
371 
372 /* Get data for multiple binary semaphores synchronization. Parse syncobj
373  * to be signaled when job completes (out_sync).
374  */
375 static int
v3d_get_multisync_submit_deps(struct drm_file * file_priv,struct drm_v3d_extension __user * ext,struct v3d_submit_ext * se)376 v3d_get_multisync_submit_deps(struct drm_file *file_priv,
377 			      struct drm_v3d_extension __user *ext,
378 			      struct v3d_submit_ext *se)
379 {
380 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
381 	struct v3d_dev *v3d = v3d_priv->v3d;
382 	struct drm_v3d_multi_sync multisync;
383 	int ret;
384 
385 	if (se->in_sync_count || se->out_sync_count) {
386 		drm_dbg(&v3d->drm, "Two multisync extensions were added to the same job.");
387 		return -EINVAL;
388 	}
389 
390 	if (copy_from_user(&multisync, ext, sizeof(multisync)))
391 		return -EFAULT;
392 
393 	if (multisync.pad)
394 		return -EINVAL;
395 
396 	ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
397 					  multisync.out_syncs);
398 	if (ret)
399 		return ret;
400 
401 	se->in_sync_count = multisync.in_sync_count;
402 	se->in_syncs = multisync.in_syncs;
403 	se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
404 	se->wait_stage = multisync.wait_stage;
405 
406 	return 0;
407 }
408 
409 /* Returns false if the CPU job has an invalid configuration. */
410 static bool
v3d_validate_cpu_job(struct drm_file * file_priv,struct v3d_cpu_job * job)411 v3d_validate_cpu_job(struct drm_file *file_priv, struct v3d_cpu_job *job)
412 {
413 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
414 	struct v3d_dev *v3d = v3d_priv->v3d;
415 
416 	if (!job) {
417 		drm_dbg(&v3d->drm, "CPU job extension was attached to a GPU job.\n");
418 		return false;
419 	}
420 
421 	if (job->job_type) {
422 		drm_dbg(&v3d->drm, "Two CPU job extensions were added to the same CPU job.\n");
423 		return false;
424 	}
425 
426 	return true;
427 }
428 
429 /* Get data for the indirect CSD job submission. */
430 static int
v3d_get_cpu_indirect_csd_params(struct drm_file * file_priv,struct drm_v3d_extension __user * ext,struct v3d_cpu_job * job)431 v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
432 				struct drm_v3d_extension __user *ext,
433 				struct v3d_cpu_job *job)
434 {
435 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
436 	struct v3d_dev *v3d = v3d_priv->v3d;
437 	struct drm_v3d_indirect_csd indirect_csd;
438 	struct v3d_indirect_csd_info *info = &job->indirect_csd;
439 
440 	if (!v3d_validate_cpu_job(file_priv, job))
441 		return -EINVAL;
442 
443 	if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
444 		return -EFAULT;
445 
446 	if (!v3d_has_csd(v3d)) {
447 		drm_warn(&v3d->drm, "Attempting CSD submit on non-CSD hardware.\n");
448 		return -EINVAL;
449 	}
450 
451 	job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
452 	info->offset = indirect_csd.offset;
453 	info->wg_size = indirect_csd.wg_size;
454 	memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
455 	       sizeof(indirect_csd.wg_uniform_offsets));
456 
457 	info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
458 
459 	return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
460 					  &info->job, &info->clean_job,
461 					  NULL, &info->acquire_ctx);
462 }
463 
464 /* Get data for the query timestamp job submission. */
465 static int
v3d_get_cpu_timestamp_query_params(struct drm_file * file_priv,struct drm_v3d_extension __user * ext,struct v3d_cpu_job * job)466 v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
467 				   struct drm_v3d_extension __user *ext,
468 				   struct v3d_cpu_job *job)
469 {
470 	u32 __user *offsets, *syncs;
471 	struct drm_v3d_timestamp_query timestamp;
472 	struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
473 	unsigned int i;
474 	int err;
475 
476 	if (!v3d_validate_cpu_job(file_priv, job))
477 		return -EINVAL;
478 
479 	if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
480 		return -EFAULT;
481 
482 	if (timestamp.pad)
483 		return -EINVAL;
484 
485 	job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
486 
487 	query_info->queries = kvmalloc_objs(struct v3d_timestamp_query,
488 					    timestamp.count);
489 	if (!query_info->queries)
490 		return -ENOMEM;
491 
492 	offsets = u64_to_user_ptr(timestamp.offsets);
493 	syncs = u64_to_user_ptr(timestamp.syncs);
494 
495 	for (i = 0; i < timestamp.count; i++) {
496 		u32 offset, sync;
497 
498 		if (get_user(offset, offsets++)) {
499 			err = -EFAULT;
500 			goto error;
501 		}
502 
503 		query_info->queries[i].offset = offset;
504 
505 		if (get_user(sync, syncs++)) {
506 			err = -EFAULT;
507 			goto error;
508 		}
509 
510 		query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
511 								  sync);
512 		if (!query_info->queries[i].syncobj) {
513 			err = -ENOENT;
514 			goto error;
515 		}
516 	}
517 	query_info->count = timestamp.count;
518 
519 	return 0;
520 
521 error:
522 	v3d_timestamp_query_info_free(&job->timestamp_query, i);
523 	return err;
524 }
525 
526 static int
v3d_get_cpu_reset_timestamp_params(struct drm_file * file_priv,struct drm_v3d_extension __user * ext,struct v3d_cpu_job * job)527 v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
528 				   struct drm_v3d_extension __user *ext,
529 				   struct v3d_cpu_job *job)
530 {
531 	u32 __user *syncs;
532 	struct drm_v3d_reset_timestamp_query reset;
533 	struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
534 	unsigned int i;
535 	int err;
536 
537 	if (!v3d_validate_cpu_job(file_priv, job))
538 		return -EINVAL;
539 
540 	if (copy_from_user(&reset, ext, sizeof(reset)))
541 		return -EFAULT;
542 
543 	job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
544 
545 	query_info->queries = kvmalloc_objs(struct v3d_timestamp_query,
546 					    reset.count);
547 	if (!query_info->queries)
548 		return -ENOMEM;
549 
550 	syncs = u64_to_user_ptr(reset.syncs);
551 
552 	for (i = 0; i < reset.count; i++) {
553 		u32 sync;
554 
555 		query_info->queries[i].offset = reset.offset + 8 * i;
556 
557 		if (get_user(sync, syncs++)) {
558 			err = -EFAULT;
559 			goto error;
560 		}
561 
562 		query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
563 								  sync);
564 		if (!query_info->queries[i].syncobj) {
565 			err = -ENOENT;
566 			goto error;
567 		}
568 	}
569 	query_info->count = reset.count;
570 
571 	return 0;
572 
573 error:
574 	v3d_timestamp_query_info_free(&job->timestamp_query, i);
575 	return err;
576 }
577 
578 /* Get data for the copy timestamp query results job submission. */
579 static int
v3d_get_cpu_copy_query_results_params(struct drm_file * file_priv,struct drm_v3d_extension __user * ext,struct v3d_cpu_job * job)580 v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
581 				      struct drm_v3d_extension __user *ext,
582 				      struct v3d_cpu_job *job)
583 {
584 	u32 __user *offsets, *syncs;
585 	struct drm_v3d_copy_timestamp_query copy;
586 	struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
587 	unsigned int i;
588 	int err;
589 
590 	if (!v3d_validate_cpu_job(file_priv, job))
591 		return -EINVAL;
592 
593 	if (copy_from_user(&copy, ext, sizeof(copy)))
594 		return -EFAULT;
595 
596 	if (copy.pad)
597 		return -EINVAL;
598 
599 	job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
600 
601 	query_info->queries = kvmalloc_objs(struct v3d_timestamp_query,
602 					    copy.count);
603 	if (!query_info->queries)
604 		return -ENOMEM;
605 
606 	offsets = u64_to_user_ptr(copy.offsets);
607 	syncs = u64_to_user_ptr(copy.syncs);
608 
609 	for (i = 0; i < copy.count; i++) {
610 		u32 offset, sync;
611 
612 		if (get_user(offset, offsets++)) {
613 			err = -EFAULT;
614 			goto error;
615 		}
616 
617 		query_info->queries[i].offset = offset;
618 
619 		if (get_user(sync, syncs++)) {
620 			err = -EFAULT;
621 			goto error;
622 		}
623 
624 		query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
625 								  sync);
626 		if (!query_info->queries[i].syncobj) {
627 			err = -ENOENT;
628 			goto error;
629 		}
630 	}
631 	query_info->count = copy.count;
632 
633 	job->copy.do_64bit = copy.do_64bit;
634 	job->copy.do_partial = copy.do_partial;
635 	job->copy.availability_bit = copy.availability_bit;
636 	job->copy.offset = copy.offset;
637 	job->copy.stride = copy.stride;
638 
639 	return 0;
640 
641 error:
642 	v3d_timestamp_query_info_free(&job->timestamp_query, i);
643 	return err;
644 }
645 
646 static int
v3d_copy_query_info(struct v3d_performance_query_info * query_info,unsigned int count,unsigned int nperfmons,u32 __user * syncs,u64 __user * kperfmon_ids,struct drm_file * file_priv)647 v3d_copy_query_info(struct v3d_performance_query_info *query_info,
648 		    unsigned int count,
649 		    unsigned int nperfmons,
650 		    u32 __user *syncs,
651 		    u64 __user *kperfmon_ids,
652 		    struct drm_file *file_priv)
653 {
654 	unsigned int i, j;
655 	int err;
656 
657 	for (i = 0; i < count; i++) {
658 		struct v3d_performance_query *query = &query_info->queries[i];
659 		u32 __user *ids_pointer;
660 		u32 sync, id;
661 		u64 ids;
662 
663 		if (get_user(sync, syncs++)) {
664 			err = -EFAULT;
665 			goto error;
666 		}
667 
668 		if (get_user(ids, kperfmon_ids++)) {
669 			err = -EFAULT;
670 			goto error;
671 		}
672 
673 		query->kperfmon_ids =
674 			kvmalloc_array(nperfmons,
675 				       sizeof(struct v3d_performance_query *),
676 				       GFP_KERNEL);
677 		if (!query->kperfmon_ids) {
678 			err = -ENOMEM;
679 			goto error;
680 		}
681 
682 		ids_pointer = u64_to_user_ptr(ids);
683 
684 		for (j = 0; j < nperfmons; j++) {
685 			if (get_user(id, ids_pointer++)) {
686 				kvfree(query->kperfmon_ids);
687 				err = -EFAULT;
688 				goto error;
689 			}
690 
691 			query->kperfmon_ids[j] = id;
692 		}
693 
694 		query->syncobj = drm_syncobj_find(file_priv, sync);
695 		if (!query->syncobj) {
696 			kvfree(query->kperfmon_ids);
697 			err = -ENOENT;
698 			goto error;
699 		}
700 	}
701 
702 	return 0;
703 
704 error:
705 	v3d_performance_query_info_free(query_info, i);
706 	return err;
707 }
708 
709 static int
v3d_get_cpu_reset_performance_params(struct drm_file * file_priv,struct drm_v3d_extension __user * ext,struct v3d_cpu_job * job)710 v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
711 				     struct drm_v3d_extension __user *ext,
712 				     struct v3d_cpu_job *job)
713 {
714 	struct v3d_performance_query_info *query_info = &job->performance_query;
715 	struct drm_v3d_reset_performance_query reset;
716 	int err;
717 
718 	if (!v3d_validate_cpu_job(file_priv, job))
719 		return -EINVAL;
720 
721 	if (copy_from_user(&reset, ext, sizeof(reset)))
722 		return -EFAULT;
723 
724 	job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
725 
726 	query_info->queries =
727 		kvmalloc_objs(struct v3d_performance_query, reset.count);
728 	if (!query_info->queries)
729 		return -ENOMEM;
730 
731 	err = v3d_copy_query_info(query_info,
732 				  reset.count,
733 				  reset.nperfmons,
734 				  u64_to_user_ptr(reset.syncs),
735 				  u64_to_user_ptr(reset.kperfmon_ids),
736 				  file_priv);
737 	if (err)
738 		return err;
739 
740 	query_info->count = reset.count;
741 	query_info->nperfmons = reset.nperfmons;
742 
743 	return 0;
744 }
745 
746 static int
v3d_get_cpu_copy_performance_query_params(struct drm_file * file_priv,struct drm_v3d_extension __user * ext,struct v3d_cpu_job * job)747 v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
748 					  struct drm_v3d_extension __user *ext,
749 					  struct v3d_cpu_job *job)
750 {
751 	struct v3d_performance_query_info *query_info = &job->performance_query;
752 	struct drm_v3d_copy_performance_query copy;
753 	int err;
754 
755 	if (!v3d_validate_cpu_job(file_priv, job))
756 		return -EINVAL;
757 
758 	if (copy_from_user(&copy, ext, sizeof(copy)))
759 		return -EFAULT;
760 
761 	if (copy.pad)
762 		return -EINVAL;
763 
764 	job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
765 
766 	query_info->queries =
767 		kvmalloc_objs(struct v3d_performance_query, copy.count);
768 	if (!query_info->queries)
769 		return -ENOMEM;
770 
771 	err = v3d_copy_query_info(query_info,
772 				  copy.count,
773 				  copy.nperfmons,
774 				  u64_to_user_ptr(copy.syncs),
775 				  u64_to_user_ptr(copy.kperfmon_ids),
776 				  file_priv);
777 	if (err)
778 		return err;
779 
780 	query_info->count = copy.count;
781 	query_info->nperfmons = copy.nperfmons;
782 	query_info->ncounters = copy.ncounters;
783 
784 	job->copy.do_64bit = copy.do_64bit;
785 	job->copy.do_partial = copy.do_partial;
786 	job->copy.availability_bit = copy.availability_bit;
787 	job->copy.offset = copy.offset;
788 	job->copy.stride = copy.stride;
789 
790 	return 0;
791 }
792 
793 /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
794  * according to the extension id (name).
795  */
796 static int
v3d_get_extensions(struct drm_file * file_priv,u64 ext_handles,struct v3d_submit_ext * se,struct v3d_cpu_job * job)797 v3d_get_extensions(struct drm_file *file_priv,
798 		   u64 ext_handles,
799 		   struct v3d_submit_ext *se,
800 		   struct v3d_cpu_job *job)
801 {
802 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
803 	struct v3d_dev *v3d = v3d_priv->v3d;
804 	struct drm_v3d_extension __user *user_ext;
805 	int ret;
806 
807 	user_ext = u64_to_user_ptr(ext_handles);
808 	while (user_ext) {
809 		struct drm_v3d_extension ext;
810 
811 		if (copy_from_user(&ext, user_ext, sizeof(ext))) {
812 			drm_dbg(&v3d->drm, "Failed to copy submit extension\n");
813 			return -EFAULT;
814 		}
815 
816 		switch (ext.id) {
817 		case DRM_V3D_EXT_ID_MULTI_SYNC:
818 			ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
819 			break;
820 		case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
821 			ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
822 			break;
823 		case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
824 			ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
825 			break;
826 		case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
827 			ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
828 			break;
829 		case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
830 			ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
831 			break;
832 		case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
833 			ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
834 			break;
835 		case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
836 			ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
837 			break;
838 		default:
839 			drm_dbg(&v3d->drm, "Unknown V3D extension ID: %d\n", ext.id);
840 			return -EINVAL;
841 		}
842 
843 		if (ret)
844 			return ret;
845 
846 		user_ext = u64_to_user_ptr(ext.next);
847 	}
848 
849 	return 0;
850 }
851 
852 /**
853  * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
854  * @dev: DRM device
855  * @data: ioctl argument
856  * @file_priv: DRM file for this fd
857  *
858  * This is the main entrypoint for userspace to submit a 3D frame to
859  * the GPU.  Userspace provides the binner command list (if
860  * applicable), and the kernel sets up the render command list to draw
861  * to the framebuffer described in the ioctl, using the command lists
862  * that the 3D engine's binner will produce.
863  */
864 int
v3d_submit_cl_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)865 v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
866 		    struct drm_file *file_priv)
867 {
868 	struct v3d_dev *v3d = to_v3d_dev(dev);
869 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
870 	struct drm_v3d_submit_cl *args = data;
871 	struct v3d_submit_ext se = {0};
872 	struct v3d_bin_job *bin = NULL;
873 	struct v3d_render_job *render = NULL;
874 	struct v3d_job *clean_job = NULL;
875 	struct v3d_job *last_job;
876 	struct ww_acquire_ctx acquire_ctx;
877 	int ret = 0;
878 
879 	trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
880 
881 	if (args->pad)
882 		return -EINVAL;
883 
884 	if (args->flags &&
885 	    args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
886 			    DRM_V3D_SUBMIT_EXTENSION)) {
887 		drm_dbg(dev, "invalid flags: %d\n", args->flags);
888 		return -EINVAL;
889 	}
890 
891 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
892 		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
893 		if (ret) {
894 			drm_dbg(dev, "Failed to get extensions.\n");
895 			return ret;
896 		}
897 	}
898 
899 	ret = v3d_job_allocate(v3d, (void *)&render, sizeof(*render));
900 	if (ret)
901 		return ret;
902 
903 	ret = v3d_job_init(v3d, file_priv, &render->base,
904 			   v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
905 	if (ret) {
906 		v3d_job_deallocate((void *)&render);
907 		goto fail;
908 	}
909 
910 	render->start = args->rcl_start;
911 	render->end = args->rcl_end;
912 	INIT_LIST_HEAD(&render->unref_list);
913 
914 	if (args->bcl_start != args->bcl_end) {
915 		ret = v3d_job_allocate(v3d, (void *)&bin, sizeof(*bin));
916 		if (ret)
917 			goto fail;
918 
919 		ret = v3d_job_init(v3d, file_priv, &bin->base,
920 				   v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
921 		if (ret) {
922 			v3d_job_deallocate((void *)&bin);
923 			goto fail;
924 		}
925 
926 		bin->start = args->bcl_start;
927 		bin->end = args->bcl_end;
928 		bin->qma = args->qma;
929 		bin->qms = args->qms;
930 		bin->qts = args->qts;
931 		bin->render = render;
932 	}
933 
934 	if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
935 		ret = v3d_job_allocate(v3d, (void *)&clean_job, sizeof(*clean_job));
936 		if (ret)
937 			goto fail;
938 
939 		ret = v3d_job_init(v3d, file_priv, clean_job,
940 				   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
941 		if (ret) {
942 			v3d_job_deallocate((void *)&clean_job);
943 			goto fail;
944 		}
945 
946 		last_job = clean_job;
947 	} else {
948 		last_job = &render->base;
949 	}
950 
951 	ret = v3d_lookup_bos(dev, file_priv, last_job,
952 			     args->bo_handles, args->bo_handle_count);
953 	if (ret)
954 		goto fail;
955 
956 	ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
957 	if (ret)
958 		goto fail;
959 
960 	if (args->perfmon_id) {
961 		if (v3d->global_perfmon) {
962 			ret = -EAGAIN;
963 			goto fail_perfmon;
964 		}
965 
966 		render->base.perfmon = v3d_perfmon_find(v3d_priv,
967 							args->perfmon_id);
968 
969 		if (!render->base.perfmon) {
970 			ret = -ENOENT;
971 			goto fail_perfmon;
972 		}
973 	}
974 
975 	mutex_lock(&v3d->sched_lock);
976 	if (bin) {
977 		bin->base.perfmon = render->base.perfmon;
978 		v3d_perfmon_get(bin->base.perfmon);
979 		v3d_push_job(&bin->base);
980 
981 		ret = drm_sched_job_add_dependency(&render->base.base,
982 						   dma_fence_get(bin->base.done_fence));
983 		if (ret)
984 			goto fail_unreserve;
985 	}
986 
987 	v3d_push_job(&render->base);
988 
989 	if (clean_job) {
990 		struct dma_fence *render_fence =
991 			dma_fence_get(render->base.done_fence);
992 		ret = drm_sched_job_add_dependency(&clean_job->base,
993 						   render_fence);
994 		if (ret)
995 			goto fail_unreserve;
996 		clean_job->perfmon = render->base.perfmon;
997 		v3d_perfmon_get(clean_job->perfmon);
998 		v3d_push_job(clean_job);
999 	}
1000 
1001 	mutex_unlock(&v3d->sched_lock);
1002 
1003 	v3d_attach_fences_and_unlock_reservation(file_priv,
1004 						 last_job,
1005 						 &acquire_ctx,
1006 						 args->out_sync,
1007 						 &se,
1008 						 last_job->done_fence);
1009 
1010 	v3d_job_put(&bin->base);
1011 	v3d_job_put(&render->base);
1012 	v3d_job_put(clean_job);
1013 
1014 	return 0;
1015 
1016 fail_unreserve:
1017 	mutex_unlock(&v3d->sched_lock);
1018 fail_perfmon:
1019 	drm_gem_unlock_reservations(last_job->bo,
1020 				    last_job->bo_count, &acquire_ctx);
1021 fail:
1022 	v3d_job_cleanup((void *)bin);
1023 	v3d_job_cleanup((void *)render);
1024 	v3d_job_cleanup(clean_job);
1025 	v3d_put_multisync_post_deps(&se);
1026 
1027 	return ret;
1028 }
1029 
1030 /**
1031  * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
1032  * @dev: DRM device
1033  * @data: ioctl argument
1034  * @file_priv: DRM file for this fd
1035  *
1036  * Userspace provides the register setup for the TFU, which we don't
1037  * need to validate since the TFU is behind the MMU.
1038  */
1039 int
v3d_submit_tfu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1040 v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
1041 		     struct drm_file *file_priv)
1042 {
1043 	struct v3d_dev *v3d = to_v3d_dev(dev);
1044 	struct drm_v3d_submit_tfu *args = data;
1045 	struct v3d_submit_ext se = {0};
1046 	struct v3d_tfu_job *job = NULL;
1047 	struct ww_acquire_ctx acquire_ctx;
1048 	int ret = 0;
1049 
1050 	trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
1051 
1052 	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
1053 		drm_dbg(dev, "invalid flags: %d\n", args->flags);
1054 		return -EINVAL;
1055 	}
1056 
1057 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
1058 		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
1059 		if (ret) {
1060 			drm_dbg(dev, "Failed to get extensions.\n");
1061 			return ret;
1062 		}
1063 	}
1064 
1065 	ret = v3d_job_allocate(v3d, (void *)&job, sizeof(*job));
1066 	if (ret)
1067 		return ret;
1068 
1069 	ret = v3d_job_init(v3d, file_priv, &job->base,
1070 			   v3d_job_free, args->in_sync, &se, V3D_TFU);
1071 	if (ret) {
1072 		v3d_job_deallocate((void *)&job);
1073 		goto fail;
1074 	}
1075 
1076 	job->base.bo = kzalloc_objs(*job->base.bo, ARRAY_SIZE(args->bo_handles));
1077 	if (!job->base.bo) {
1078 		ret = -ENOMEM;
1079 		goto fail;
1080 	}
1081 
1082 	job->args = *args;
1083 
1084 	for (job->base.bo_count = 0;
1085 	     job->base.bo_count < ARRAY_SIZE(args->bo_handles);
1086 	     job->base.bo_count++) {
1087 		struct drm_gem_object *bo;
1088 
1089 		if (!args->bo_handles[job->base.bo_count])
1090 			break;
1091 
1092 		bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
1093 		if (!bo) {
1094 			drm_dbg(dev, "Failed to look up GEM BO %d: %d\n",
1095 				job->base.bo_count,
1096 				args->bo_handles[job->base.bo_count]);
1097 			ret = -ENOENT;
1098 			goto fail;
1099 		}
1100 		job->base.bo[job->base.bo_count] = bo;
1101 	}
1102 
1103 	ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
1104 	if (ret)
1105 		goto fail;
1106 
1107 	mutex_lock(&v3d->sched_lock);
1108 	v3d_push_job(&job->base);
1109 	mutex_unlock(&v3d->sched_lock);
1110 
1111 	v3d_attach_fences_and_unlock_reservation(file_priv,
1112 						 &job->base, &acquire_ctx,
1113 						 args->out_sync,
1114 						 &se,
1115 						 job->base.done_fence);
1116 
1117 	v3d_job_put(&job->base);
1118 
1119 	return 0;
1120 
1121 fail:
1122 	v3d_job_cleanup((void *)job);
1123 	v3d_put_multisync_post_deps(&se);
1124 
1125 	return ret;
1126 }
1127 
1128 /**
1129  * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
1130  * @dev: DRM device
1131  * @data: ioctl argument
1132  * @file_priv: DRM file for this fd
1133  *
1134  * Userspace provides the register setup for the CSD, which we don't
1135  * need to validate since the CSD is behind the MMU.
1136  */
1137 int
v3d_submit_csd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1138 v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
1139 		     struct drm_file *file_priv)
1140 {
1141 	struct v3d_dev *v3d = to_v3d_dev(dev);
1142 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
1143 	struct drm_v3d_submit_csd *args = data;
1144 	struct v3d_submit_ext se = {0};
1145 	struct v3d_csd_job *job = NULL;
1146 	struct v3d_job *clean_job = NULL;
1147 	struct ww_acquire_ctx acquire_ctx;
1148 	int ret;
1149 
1150 	trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
1151 
1152 	if (args->pad)
1153 		return -EINVAL;
1154 
1155 	if (!v3d_has_csd(v3d)) {
1156 		drm_warn(dev, "Attempting CSD submit on non-CSD hardware\n");
1157 		return -EINVAL;
1158 	}
1159 
1160 	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
1161 		drm_dbg(dev, "invalid flags: %d\n", args->flags);
1162 		return -EINVAL;
1163 	}
1164 
1165 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
1166 		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
1167 		if (ret) {
1168 			drm_dbg(dev, "Failed to get extensions.\n");
1169 			return ret;
1170 		}
1171 	}
1172 
1173 	ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
1174 					 &job, &clean_job, &se,
1175 					 &acquire_ctx);
1176 	if (ret)
1177 		goto fail;
1178 
1179 	if (args->perfmon_id) {
1180 		if (v3d->global_perfmon) {
1181 			ret = -EAGAIN;
1182 			goto fail_perfmon;
1183 		}
1184 
1185 		job->base.perfmon = v3d_perfmon_find(v3d_priv,
1186 						     args->perfmon_id);
1187 		if (!job->base.perfmon) {
1188 			ret = -ENOENT;
1189 			goto fail_perfmon;
1190 		}
1191 	}
1192 
1193 	mutex_lock(&v3d->sched_lock);
1194 	v3d_push_job(&job->base);
1195 
1196 	ret = drm_sched_job_add_dependency(&clean_job->base,
1197 					   dma_fence_get(job->base.done_fence));
1198 	if (ret)
1199 		goto fail_unreserve;
1200 
1201 	v3d_push_job(clean_job);
1202 	mutex_unlock(&v3d->sched_lock);
1203 
1204 	v3d_attach_fences_and_unlock_reservation(file_priv,
1205 						 clean_job,
1206 						 &acquire_ctx,
1207 						 args->out_sync,
1208 						 &se,
1209 						 clean_job->done_fence);
1210 
1211 	v3d_job_put(&job->base);
1212 	v3d_job_put(clean_job);
1213 
1214 	return 0;
1215 
1216 fail_unreserve:
1217 	mutex_unlock(&v3d->sched_lock);
1218 fail_perfmon:
1219 	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
1220 				    &acquire_ctx);
1221 fail:
1222 	v3d_job_cleanup((void *)job);
1223 	v3d_job_cleanup(clean_job);
1224 	v3d_put_multisync_post_deps(&se);
1225 
1226 	return ret;
1227 }
1228 
1229 static const unsigned int cpu_job_bo_handle_count[] = {
1230 	[V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
1231 	[V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
1232 	[V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
1233 	[V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
1234 	[V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
1235 	[V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
1236 };
1237 
1238 /**
1239  * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
1240  * @dev: DRM device
1241  * @data: ioctl argument
1242  * @file_priv: DRM file for this fd
1243  *
1244  * Userspace specifies the CPU job type and data required to perform its
1245  * operations through the drm_v3d_extension struct.
1246  */
1247 int
v3d_submit_cpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1248 v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
1249 		     struct drm_file *file_priv)
1250 {
1251 	struct v3d_dev *v3d = to_v3d_dev(dev);
1252 	struct drm_v3d_submit_cpu *args = data;
1253 	struct v3d_submit_ext se = {0};
1254 	struct v3d_submit_ext *out_se = NULL;
1255 	struct v3d_cpu_job *cpu_job = NULL;
1256 	struct v3d_csd_job *csd_job = NULL;
1257 	struct v3d_job *clean_job = NULL;
1258 	struct ww_acquire_ctx acquire_ctx;
1259 	int ret;
1260 
1261 	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
1262 		drm_dbg(dev, "Invalid flags: %d\n", args->flags);
1263 		return -EINVAL;
1264 	}
1265 
1266 	ret = v3d_job_allocate(v3d, (void *)&cpu_job, sizeof(*cpu_job));
1267 	if (ret)
1268 		return ret;
1269 
1270 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
1271 		ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
1272 		if (ret) {
1273 			drm_dbg(dev, "Failed to get extensions.\n");
1274 			goto fail;
1275 		}
1276 	}
1277 
1278 	/* Every CPU job must have a CPU job user extension */
1279 	if (!cpu_job->job_type) {
1280 		drm_dbg(dev, "CPU job must have a CPU job user extension.\n");
1281 		ret = -EINVAL;
1282 		goto fail;
1283 	}
1284 
1285 	if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
1286 		drm_dbg(dev, "This CPU job was not submitted with the proper number of BOs.\n");
1287 		ret = -EINVAL;
1288 		goto fail;
1289 	}
1290 
1291 	trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
1292 
1293 	ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
1294 			   v3d_job_free, 0, &se, V3D_CPU);
1295 	if (ret) {
1296 		v3d_job_deallocate((void *)&cpu_job);
1297 		goto fail;
1298 	}
1299 
1300 	clean_job = cpu_job->indirect_csd.clean_job;
1301 	csd_job = cpu_job->indirect_csd.job;
1302 
1303 	if (args->bo_handle_count) {
1304 		ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
1305 				     args->bo_handles, args->bo_handle_count);
1306 		if (ret)
1307 			goto fail;
1308 
1309 		ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
1310 		if (ret)
1311 			goto fail;
1312 	}
1313 
1314 	mutex_lock(&v3d->sched_lock);
1315 	v3d_push_job(&cpu_job->base);
1316 
1317 	switch (cpu_job->job_type) {
1318 	case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
1319 		ret = drm_sched_job_add_dependency(&csd_job->base.base,
1320 						   dma_fence_get(cpu_job->base.done_fence));
1321 		if (ret)
1322 			goto fail_unreserve;
1323 
1324 		v3d_push_job(&csd_job->base);
1325 
1326 		ret = drm_sched_job_add_dependency(&clean_job->base,
1327 						   dma_fence_get(csd_job->base.done_fence));
1328 		if (ret)
1329 			goto fail_unreserve;
1330 
1331 		v3d_push_job(clean_job);
1332 
1333 		break;
1334 	default:
1335 		break;
1336 	}
1337 	mutex_unlock(&v3d->sched_lock);
1338 
1339 	out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
1340 
1341 	v3d_attach_fences_and_unlock_reservation(file_priv,
1342 						 &cpu_job->base,
1343 						 &acquire_ctx, 0,
1344 						 out_se, cpu_job->base.done_fence);
1345 
1346 	switch (cpu_job->job_type) {
1347 	case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
1348 		v3d_attach_fences_and_unlock_reservation(file_priv,
1349 							 clean_job,
1350 							 &cpu_job->indirect_csd.acquire_ctx,
1351 							 0, &se, clean_job->done_fence);
1352 		break;
1353 	default:
1354 		break;
1355 	}
1356 
1357 	v3d_job_put(&cpu_job->base);
1358 	v3d_job_put(&csd_job->base);
1359 	v3d_job_put(clean_job);
1360 
1361 	return 0;
1362 
1363 fail_unreserve:
1364 	mutex_unlock(&v3d->sched_lock);
1365 
1366 	drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
1367 				    &acquire_ctx);
1368 
1369 	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
1370 				    &cpu_job->indirect_csd.acquire_ctx);
1371 
1372 fail:
1373 	v3d_job_cleanup((void *)cpu_job);
1374 	v3d_job_cleanup((void *)csd_job);
1375 	v3d_job_cleanup(clean_job);
1376 	v3d_put_multisync_post_deps(&se);
1377 	kvfree(cpu_job->timestamp_query.queries);
1378 	kvfree(cpu_job->performance_query.queries);
1379 
1380 	return ret;
1381 }
1382