xref: /linux/drivers/gpu/drm/v3d/v3d_submit.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2014-2018 Broadcom
4  * Copyright (C) 2023 Raspberry Pi
5  */
6 
7 #include <drm/drm_print.h>
8 #include <drm/drm_syncobj.h>
9 
10 #include "v3d_drv.h"
11 #include "v3d_regs.h"
12 #include "v3d_trace.h"
13 
14 /* Takes the reservation lock on all the BOs being referenced, so that
15  * we can attach fences and update the reservations after pushing the job
16  * to the queue.
17  *
18  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
19  * (all of which are on render->unref_list). They're entirely private
20  * to v3d, so we don't attach dma-buf fences to them.
21  */
22 static int
23 v3d_lock_bo_reservations(struct v3d_job *job,
24 			 struct ww_acquire_ctx *acquire_ctx)
25 {
26 	int i, ret;
27 
28 	ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
29 	if (ret)
30 		return ret;
31 
32 	for (i = 0; i < job->bo_count; i++) {
33 		ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
34 		if (ret)
35 			goto fail;
36 
37 		ret = drm_sched_job_add_implicit_dependencies(&job->base,
38 							      job->bo[i], true);
39 		if (ret)
40 			goto fail;
41 	}
42 
43 	return 0;
44 
45 fail:
46 	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
47 	return ret;
48 }
49 
50 /**
51  * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
52  * referenced by the job.
53  * @dev: DRM device
54  * @file_priv: DRM file for this fd
55  * @job: V3D job being set up
56  * @bo_handles: GEM handles
57  * @bo_count: Number of GEM handles passed in
58  *
59  * The command validator needs to reference BOs by their index within
60  * the submitted job's BO list. This does the validation of the job's
61  * BO list and reference counting for the lifetime of the job.
62  *
63  * Note that this function doesn't need to unreference the BOs on
64  * failure, because that will happen at `v3d_job_free()`.
65  */
66 static int
67 v3d_lookup_bos(struct drm_device *dev,
68 	       struct drm_file *file_priv,
69 	       struct v3d_job *job,
70 	       u64 bo_handles,
71 	       u32 bo_count)
72 {
73 	job->bo_count = bo_count;
74 
75 	if (!job->bo_count) {
76 		/* See comment on bo_index for why we have to check
77 		 * this.
78 		 */
79 		drm_warn(dev, "Rendering requires BOs\n");
80 		return -EINVAL;
81 	}
82 
83 	return drm_gem_objects_lookup(file_priv,
84 				      (void __user *)(uintptr_t)bo_handles,
85 				      job->bo_count, &job->bo);
86 }
87 
88 static void
89 v3d_job_free(struct kref *ref)
90 {
91 	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
92 	int i;
93 
94 	if (job->bo) {
95 		for (i = 0; i < job->bo_count; i++)
96 			drm_gem_object_put(job->bo[i]);
97 		kvfree(job->bo);
98 	}
99 
100 	dma_fence_put(job->irq_fence);
101 	dma_fence_put(job->done_fence);
102 
103 	if (job->perfmon)
104 		v3d_perfmon_put(job->perfmon);
105 
106 	kfree(job);
107 }
108 
109 static void
110 v3d_render_job_free(struct kref *ref)
111 {
112 	struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
113 						  base.refcount);
114 	struct v3d_bo *bo, *save;
115 
116 	list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
117 		drm_gem_object_put(&bo->base.base);
118 	}
119 
120 	v3d_job_free(ref);
121 }
122 
123 void v3d_job_cleanup(struct v3d_job *job)
124 {
125 	if (!job)
126 		return;
127 
128 	drm_sched_job_cleanup(&job->base);
129 	v3d_job_put(job);
130 }
131 
132 void v3d_job_put(struct v3d_job *job)
133 {
134 	if (!job)
135 		return;
136 
137 	kref_put(&job->refcount, job->free);
138 }
139 
140 static int
141 v3d_job_allocate(struct v3d_dev *v3d, void **container, size_t size)
142 {
143 	*container = kcalloc(1, size, GFP_KERNEL);
144 	if (!*container) {
145 		drm_err(&v3d->drm, "Cannot allocate memory for V3D job.\n");
146 		return -ENOMEM;
147 	}
148 
149 	return 0;
150 }
151 
152 static void
153 v3d_job_deallocate(void **container)
154 {
155 	kfree(*container);
156 	*container = NULL;
157 }
158 
159 static int
160 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
161 	     struct v3d_job *job, void (*free)(struct kref *ref),
162 	     u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
163 {
164 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
165 	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
166 	int ret, i;
167 
168 	job->v3d = v3d;
169 	job->free = free;
170 	job->file_priv = v3d_priv;
171 
172 	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
173 				 1, v3d_priv, file_priv->client_id);
174 	if (ret)
175 		return ret;
176 
177 	if (has_multisync) {
178 		if (se->in_sync_count && se->wait_stage == queue) {
179 			struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
180 
181 			for (i = 0; i < se->in_sync_count; i++) {
182 				struct drm_v3d_sem in;
183 
184 				if (copy_from_user(&in, handle++, sizeof(in))) {
185 					ret = -EFAULT;
186 					drm_dbg(&v3d->drm, "Failed to copy wait dep handle.\n");
187 					goto fail_deps;
188 				}
189 				ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
190 
191 				// TODO: Investigate why this was filtered out for the IOCTL.
192 				if (ret && ret != -ENOENT)
193 					goto fail_deps;
194 			}
195 		}
196 	} else {
197 		ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
198 
199 		// TODO: Investigate why this was filtered out for the IOCTL.
200 		if (ret && ret != -ENOENT)
201 			goto fail_deps;
202 	}
203 
204 	kref_init(&job->refcount);
205 
206 	return 0;
207 
208 fail_deps:
209 	drm_sched_job_cleanup(&job->base);
210 	return ret;
211 }
212 
213 static void
214 v3d_push_job(struct v3d_job *job)
215 {
216 	drm_sched_job_arm(&job->base);
217 
218 	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
219 
220 	/* put by scheduler job completion */
221 	kref_get(&job->refcount);
222 
223 	drm_sched_entity_push_job(&job->base);
224 }
225 
226 static void
227 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
228 					 struct v3d_job *job,
229 					 struct ww_acquire_ctx *acquire_ctx,
230 					 u32 out_sync,
231 					 struct v3d_submit_ext *se,
232 					 struct dma_fence *done_fence)
233 {
234 	struct drm_syncobj *sync_out;
235 	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
236 	int i;
237 
238 	for (i = 0; i < job->bo_count; i++) {
239 		/* XXX: Use shared fences for read-only objects. */
240 		dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
241 				   DMA_RESV_USAGE_WRITE);
242 	}
243 
244 	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
245 
246 	/* Update the return sync object for the job */
247 	/* If it only supports a single signal semaphore*/
248 	if (!has_multisync) {
249 		sync_out = drm_syncobj_find(file_priv, out_sync);
250 		if (sync_out) {
251 			drm_syncobj_replace_fence(sync_out, done_fence);
252 			drm_syncobj_put(sync_out);
253 		}
254 		return;
255 	}
256 
257 	/* If multiple semaphores extension is supported */
258 	if (se->out_sync_count) {
259 		for (i = 0; i < se->out_sync_count; i++) {
260 			drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
261 						  done_fence);
262 			drm_syncobj_put(se->out_syncs[i].syncobj);
263 		}
264 		kvfree(se->out_syncs);
265 	}
266 }
267 
268 static int
269 v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
270 			   struct v3d_dev *v3d,
271 			   struct drm_v3d_submit_csd *args,
272 			   struct v3d_csd_job **job,
273 			   struct v3d_job **clean_job,
274 			   struct v3d_submit_ext *se,
275 			   struct ww_acquire_ctx *acquire_ctx)
276 {
277 	int ret;
278 
279 	ret = v3d_job_allocate(v3d, (void *)job, sizeof(**job));
280 	if (ret)
281 		return ret;
282 
283 	ret = v3d_job_init(v3d, file_priv, &(*job)->base,
284 			   v3d_job_free, args->in_sync, se, V3D_CSD);
285 	if (ret) {
286 		v3d_job_deallocate((void *)job);
287 		return ret;
288 	}
289 
290 	ret = v3d_job_allocate(v3d, (void *)clean_job, sizeof(**clean_job));
291 	if (ret)
292 		return ret;
293 
294 	ret = v3d_job_init(v3d, file_priv, *clean_job,
295 			   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
296 	if (ret) {
297 		v3d_job_deallocate((void *)clean_job);
298 		return ret;
299 	}
300 
301 	(*job)->args = *args;
302 
303 	ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
304 			     args->bo_handles, args->bo_handle_count);
305 	if (ret)
306 		return ret;
307 
308 	return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
309 }
310 
311 static void
312 v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
313 {
314 	unsigned int i;
315 
316 	if (!(se && se->out_sync_count))
317 		return;
318 
319 	for (i = 0; i < se->out_sync_count; i++)
320 		drm_syncobj_put(se->out_syncs[i].syncobj);
321 	kvfree(se->out_syncs);
322 }
323 
324 static int
325 v3d_get_multisync_post_deps(struct drm_file *file_priv,
326 			    struct v3d_submit_ext *se,
327 			    u32 count, u64 handles)
328 {
329 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
330 	struct v3d_dev *v3d = v3d_priv->v3d;
331 	struct drm_v3d_sem __user *post_deps;
332 	int i, ret;
333 
334 	if (!count)
335 		return 0;
336 
337 	se->out_syncs = (struct v3d_submit_outsync *)
338 			kvmalloc_array(count,
339 				       sizeof(struct v3d_submit_outsync),
340 				       GFP_KERNEL);
341 	if (!se->out_syncs)
342 		return -ENOMEM;
343 
344 	post_deps = u64_to_user_ptr(handles);
345 
346 	for (i = 0; i < count; i++) {
347 		struct drm_v3d_sem out;
348 
349 		if (copy_from_user(&out, post_deps++, sizeof(out))) {
350 			ret = -EFAULT;
351 			drm_dbg(&v3d->drm, "Failed to copy post dep handles\n");
352 			goto fail;
353 		}
354 
355 		se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
356 							    out.handle);
357 		if (!se->out_syncs[i].syncobj) {
358 			ret = -EINVAL;
359 			goto fail;
360 		}
361 	}
362 	se->out_sync_count = count;
363 
364 	return 0;
365 
366 fail:
367 	for (i--; i >= 0; i--)
368 		drm_syncobj_put(se->out_syncs[i].syncobj);
369 	kvfree(se->out_syncs);
370 
371 	return ret;
372 }
373 
374 /* Get data for multiple binary semaphores synchronization. Parse syncobj
375  * to be signaled when job completes (out_sync).
376  */
377 static int
378 v3d_get_multisync_submit_deps(struct drm_file *file_priv,
379 			      struct drm_v3d_extension __user *ext,
380 			      struct v3d_submit_ext *se)
381 {
382 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
383 	struct v3d_dev *v3d = v3d_priv->v3d;
384 	struct drm_v3d_multi_sync multisync;
385 	int ret;
386 
387 	if (se->in_sync_count || se->out_sync_count) {
388 		drm_dbg(&v3d->drm, "Two multisync extensions were added to the same job.");
389 		return -EINVAL;
390 	}
391 
392 	if (copy_from_user(&multisync, ext, sizeof(multisync)))
393 		return -EFAULT;
394 
395 	if (multisync.pad)
396 		return -EINVAL;
397 
398 	ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
399 					  multisync.out_syncs);
400 	if (ret)
401 		return ret;
402 
403 	se->in_sync_count = multisync.in_sync_count;
404 	se->in_syncs = multisync.in_syncs;
405 	se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
406 	se->wait_stage = multisync.wait_stage;
407 
408 	return 0;
409 }
410 
411 /* Returns false if the CPU job has an invalid configuration. */
412 static bool
413 v3d_validate_cpu_job(struct drm_file *file_priv, struct v3d_cpu_job *job)
414 {
415 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
416 	struct v3d_dev *v3d = v3d_priv->v3d;
417 
418 	if (!job) {
419 		drm_dbg(&v3d->drm, "CPU job extension was attached to a GPU job.\n");
420 		return false;
421 	}
422 
423 	if (job->job_type) {
424 		drm_dbg(&v3d->drm, "Two CPU job extensions were added to the same CPU job.\n");
425 		return false;
426 	}
427 
428 	return true;
429 }
430 
431 /* Get data for the indirect CSD job submission. */
432 static int
433 v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
434 				struct drm_v3d_extension __user *ext,
435 				struct v3d_cpu_job *job)
436 {
437 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
438 	struct v3d_dev *v3d = v3d_priv->v3d;
439 	struct drm_v3d_indirect_csd indirect_csd;
440 	struct v3d_indirect_csd_info *info = &job->indirect_csd;
441 
442 	if (!v3d_validate_cpu_job(file_priv, job))
443 		return -EINVAL;
444 
445 	if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
446 		return -EFAULT;
447 
448 	if (!v3d_has_csd(v3d)) {
449 		drm_warn(&v3d->drm, "Attempting CSD submit on non-CSD hardware.\n");
450 		return -EINVAL;
451 	}
452 
453 	job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
454 	info->offset = indirect_csd.offset;
455 	info->wg_size = indirect_csd.wg_size;
456 	memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
457 	       sizeof(indirect_csd.wg_uniform_offsets));
458 
459 	info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
460 
461 	return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
462 					  &info->job, &info->clean_job,
463 					  NULL, &info->acquire_ctx);
464 }
465 
466 /* Get data for the query timestamp job submission. */
467 static int
468 v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
469 				   struct drm_v3d_extension __user *ext,
470 				   struct v3d_cpu_job *job)
471 {
472 	u32 __user *offsets, *syncs;
473 	struct drm_v3d_timestamp_query timestamp;
474 	struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
475 	unsigned int i;
476 	int err;
477 
478 	if (!v3d_validate_cpu_job(file_priv, job))
479 		return -EINVAL;
480 
481 	if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
482 		return -EFAULT;
483 
484 	if (timestamp.pad)
485 		return -EINVAL;
486 
487 	job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
488 
489 	query_info->queries = kvmalloc_array(timestamp.count,
490 					     sizeof(struct v3d_timestamp_query),
491 					     GFP_KERNEL);
492 	if (!query_info->queries)
493 		return -ENOMEM;
494 
495 	offsets = u64_to_user_ptr(timestamp.offsets);
496 	syncs = u64_to_user_ptr(timestamp.syncs);
497 
498 	for (i = 0; i < timestamp.count; i++) {
499 		u32 offset, sync;
500 
501 		if (get_user(offset, offsets++)) {
502 			err = -EFAULT;
503 			goto error;
504 		}
505 
506 		query_info->queries[i].offset = offset;
507 
508 		if (get_user(sync, syncs++)) {
509 			err = -EFAULT;
510 			goto error;
511 		}
512 
513 		query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
514 								  sync);
515 		if (!query_info->queries[i].syncobj) {
516 			err = -ENOENT;
517 			goto error;
518 		}
519 	}
520 	query_info->count = timestamp.count;
521 
522 	return 0;
523 
524 error:
525 	v3d_timestamp_query_info_free(&job->timestamp_query, i);
526 	return err;
527 }
528 
529 static int
530 v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
531 				   struct drm_v3d_extension __user *ext,
532 				   struct v3d_cpu_job *job)
533 {
534 	u32 __user *syncs;
535 	struct drm_v3d_reset_timestamp_query reset;
536 	struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
537 	unsigned int i;
538 	int err;
539 
540 	if (!v3d_validate_cpu_job(file_priv, job))
541 		return -EINVAL;
542 
543 	if (copy_from_user(&reset, ext, sizeof(reset)))
544 		return -EFAULT;
545 
546 	job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
547 
548 	query_info->queries = kvmalloc_array(reset.count,
549 					     sizeof(struct v3d_timestamp_query),
550 					     GFP_KERNEL);
551 	if (!query_info->queries)
552 		return -ENOMEM;
553 
554 	syncs = u64_to_user_ptr(reset.syncs);
555 
556 	for (i = 0; i < reset.count; i++) {
557 		u32 sync;
558 
559 		query_info->queries[i].offset = reset.offset + 8 * i;
560 
561 		if (get_user(sync, syncs++)) {
562 			err = -EFAULT;
563 			goto error;
564 		}
565 
566 		query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
567 								  sync);
568 		if (!query_info->queries[i].syncobj) {
569 			err = -ENOENT;
570 			goto error;
571 		}
572 	}
573 	query_info->count = reset.count;
574 
575 	return 0;
576 
577 error:
578 	v3d_timestamp_query_info_free(&job->timestamp_query, i);
579 	return err;
580 }
581 
582 /* Get data for the copy timestamp query results job submission. */
583 static int
584 v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
585 				      struct drm_v3d_extension __user *ext,
586 				      struct v3d_cpu_job *job)
587 {
588 	u32 __user *offsets, *syncs;
589 	struct drm_v3d_copy_timestamp_query copy;
590 	struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
591 	unsigned int i;
592 	int err;
593 
594 	if (!v3d_validate_cpu_job(file_priv, job))
595 		return -EINVAL;
596 
597 	if (copy_from_user(&copy, ext, sizeof(copy)))
598 		return -EFAULT;
599 
600 	if (copy.pad)
601 		return -EINVAL;
602 
603 	job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
604 
605 	query_info->queries = kvmalloc_array(copy.count,
606 					     sizeof(struct v3d_timestamp_query),
607 					     GFP_KERNEL);
608 	if (!query_info->queries)
609 		return -ENOMEM;
610 
611 	offsets = u64_to_user_ptr(copy.offsets);
612 	syncs = u64_to_user_ptr(copy.syncs);
613 
614 	for (i = 0; i < copy.count; i++) {
615 		u32 offset, sync;
616 
617 		if (get_user(offset, offsets++)) {
618 			err = -EFAULT;
619 			goto error;
620 		}
621 
622 		query_info->queries[i].offset = offset;
623 
624 		if (get_user(sync, syncs++)) {
625 			err = -EFAULT;
626 			goto error;
627 		}
628 
629 		query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
630 								  sync);
631 		if (!query_info->queries[i].syncobj) {
632 			err = -ENOENT;
633 			goto error;
634 		}
635 	}
636 	query_info->count = copy.count;
637 
638 	job->copy.do_64bit = copy.do_64bit;
639 	job->copy.do_partial = copy.do_partial;
640 	job->copy.availability_bit = copy.availability_bit;
641 	job->copy.offset = copy.offset;
642 	job->copy.stride = copy.stride;
643 
644 	return 0;
645 
646 error:
647 	v3d_timestamp_query_info_free(&job->timestamp_query, i);
648 	return err;
649 }
650 
651 static int
652 v3d_copy_query_info(struct v3d_performance_query_info *query_info,
653 		    unsigned int count,
654 		    unsigned int nperfmons,
655 		    u32 __user *syncs,
656 		    u64 __user *kperfmon_ids,
657 		    struct drm_file *file_priv)
658 {
659 	unsigned int i, j;
660 	int err;
661 
662 	for (i = 0; i < count; i++) {
663 		struct v3d_performance_query *query = &query_info->queries[i];
664 		u32 __user *ids_pointer;
665 		u32 sync, id;
666 		u64 ids;
667 
668 		if (get_user(sync, syncs++)) {
669 			err = -EFAULT;
670 			goto error;
671 		}
672 
673 		if (get_user(ids, kperfmon_ids++)) {
674 			err = -EFAULT;
675 			goto error;
676 		}
677 
678 		query->kperfmon_ids =
679 			kvmalloc_array(nperfmons,
680 				       sizeof(struct v3d_performance_query *),
681 				       GFP_KERNEL);
682 		if (!query->kperfmon_ids) {
683 			err = -ENOMEM;
684 			goto error;
685 		}
686 
687 		ids_pointer = u64_to_user_ptr(ids);
688 
689 		for (j = 0; j < nperfmons; j++) {
690 			if (get_user(id, ids_pointer++)) {
691 				kvfree(query->kperfmon_ids);
692 				err = -EFAULT;
693 				goto error;
694 			}
695 
696 			query->kperfmon_ids[j] = id;
697 		}
698 
699 		query->syncobj = drm_syncobj_find(file_priv, sync);
700 		if (!query->syncobj) {
701 			kvfree(query->kperfmon_ids);
702 			err = -ENOENT;
703 			goto error;
704 		}
705 	}
706 
707 	return 0;
708 
709 error:
710 	v3d_performance_query_info_free(query_info, i);
711 	return err;
712 }
713 
714 static int
715 v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
716 				     struct drm_v3d_extension __user *ext,
717 				     struct v3d_cpu_job *job)
718 {
719 	struct v3d_performance_query_info *query_info = &job->performance_query;
720 	struct drm_v3d_reset_performance_query reset;
721 	int err;
722 
723 	if (!v3d_validate_cpu_job(file_priv, job))
724 		return -EINVAL;
725 
726 	if (copy_from_user(&reset, ext, sizeof(reset)))
727 		return -EFAULT;
728 
729 	job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
730 
731 	query_info->queries =
732 		kvmalloc_array(reset.count,
733 			       sizeof(struct v3d_performance_query),
734 			       GFP_KERNEL);
735 	if (!query_info->queries)
736 		return -ENOMEM;
737 
738 	err = v3d_copy_query_info(query_info,
739 				  reset.count,
740 				  reset.nperfmons,
741 				  u64_to_user_ptr(reset.syncs),
742 				  u64_to_user_ptr(reset.kperfmon_ids),
743 				  file_priv);
744 	if (err)
745 		return err;
746 
747 	query_info->count = reset.count;
748 	query_info->nperfmons = reset.nperfmons;
749 
750 	return 0;
751 }
752 
753 static int
754 v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
755 					  struct drm_v3d_extension __user *ext,
756 					  struct v3d_cpu_job *job)
757 {
758 	struct v3d_performance_query_info *query_info = &job->performance_query;
759 	struct drm_v3d_copy_performance_query copy;
760 	int err;
761 
762 	if (!v3d_validate_cpu_job(file_priv, job))
763 		return -EINVAL;
764 
765 	if (copy_from_user(&copy, ext, sizeof(copy)))
766 		return -EFAULT;
767 
768 	if (copy.pad)
769 		return -EINVAL;
770 
771 	job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
772 
773 	query_info->queries =
774 		kvmalloc_array(copy.count,
775 			       sizeof(struct v3d_performance_query),
776 			       GFP_KERNEL);
777 	if (!query_info->queries)
778 		return -ENOMEM;
779 
780 	err = v3d_copy_query_info(query_info,
781 				  copy.count,
782 				  copy.nperfmons,
783 				  u64_to_user_ptr(copy.syncs),
784 				  u64_to_user_ptr(copy.kperfmon_ids),
785 				  file_priv);
786 	if (err)
787 		return err;
788 
789 	query_info->count = copy.count;
790 	query_info->nperfmons = copy.nperfmons;
791 	query_info->ncounters = copy.ncounters;
792 
793 	job->copy.do_64bit = copy.do_64bit;
794 	job->copy.do_partial = copy.do_partial;
795 	job->copy.availability_bit = copy.availability_bit;
796 	job->copy.offset = copy.offset;
797 	job->copy.stride = copy.stride;
798 
799 	return 0;
800 }
801 
802 /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
803  * according to the extension id (name).
804  */
805 static int
806 v3d_get_extensions(struct drm_file *file_priv,
807 		   u64 ext_handles,
808 		   struct v3d_submit_ext *se,
809 		   struct v3d_cpu_job *job)
810 {
811 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
812 	struct v3d_dev *v3d = v3d_priv->v3d;
813 	struct drm_v3d_extension __user *user_ext;
814 	int ret;
815 
816 	user_ext = u64_to_user_ptr(ext_handles);
817 	while (user_ext) {
818 		struct drm_v3d_extension ext;
819 
820 		if (copy_from_user(&ext, user_ext, sizeof(ext))) {
821 			drm_dbg(&v3d->drm, "Failed to copy submit extension\n");
822 			return -EFAULT;
823 		}
824 
825 		switch (ext.id) {
826 		case DRM_V3D_EXT_ID_MULTI_SYNC:
827 			ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
828 			break;
829 		case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
830 			ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
831 			break;
832 		case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
833 			ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
834 			break;
835 		case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
836 			ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
837 			break;
838 		case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
839 			ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
840 			break;
841 		case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
842 			ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
843 			break;
844 		case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
845 			ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
846 			break;
847 		default:
848 			drm_dbg(&v3d->drm, "Unknown V3D extension ID: %d\n", ext.id);
849 			return -EINVAL;
850 		}
851 
852 		if (ret)
853 			return ret;
854 
855 		user_ext = u64_to_user_ptr(ext.next);
856 	}
857 
858 	return 0;
859 }
860 
861 /**
862  * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
863  * @dev: DRM device
864  * @data: ioctl argument
865  * @file_priv: DRM file for this fd
866  *
867  * This is the main entrypoint for userspace to submit a 3D frame to
868  * the GPU.  Userspace provides the binner command list (if
869  * applicable), and the kernel sets up the render command list to draw
870  * to the framebuffer described in the ioctl, using the command lists
871  * that the 3D engine's binner will produce.
872  */
873 int
874 v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
875 		    struct drm_file *file_priv)
876 {
877 	struct v3d_dev *v3d = to_v3d_dev(dev);
878 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
879 	struct drm_v3d_submit_cl *args = data;
880 	struct v3d_submit_ext se = {0};
881 	struct v3d_bin_job *bin = NULL;
882 	struct v3d_render_job *render = NULL;
883 	struct v3d_job *clean_job = NULL;
884 	struct v3d_job *last_job;
885 	struct ww_acquire_ctx acquire_ctx;
886 	int ret = 0;
887 
888 	trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
889 
890 	if (args->pad)
891 		return -EINVAL;
892 
893 	if (args->flags &&
894 	    args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
895 			    DRM_V3D_SUBMIT_EXTENSION)) {
896 		drm_dbg(dev, "invalid flags: %d\n", args->flags);
897 		return -EINVAL;
898 	}
899 
900 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
901 		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
902 		if (ret) {
903 			drm_dbg(dev, "Failed to get extensions.\n");
904 			return ret;
905 		}
906 	}
907 
908 	ret = v3d_job_allocate(v3d, (void *)&render, sizeof(*render));
909 	if (ret)
910 		return ret;
911 
912 	ret = v3d_job_init(v3d, file_priv, &render->base,
913 			   v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
914 	if (ret) {
915 		v3d_job_deallocate((void *)&render);
916 		goto fail;
917 	}
918 
919 	render->start = args->rcl_start;
920 	render->end = args->rcl_end;
921 	INIT_LIST_HEAD(&render->unref_list);
922 
923 	if (args->bcl_start != args->bcl_end) {
924 		ret = v3d_job_allocate(v3d, (void *)&bin, sizeof(*bin));
925 		if (ret)
926 			goto fail;
927 
928 		ret = v3d_job_init(v3d, file_priv, &bin->base,
929 				   v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
930 		if (ret) {
931 			v3d_job_deallocate((void *)&bin);
932 			goto fail;
933 		}
934 
935 		bin->start = args->bcl_start;
936 		bin->end = args->bcl_end;
937 		bin->qma = args->qma;
938 		bin->qms = args->qms;
939 		bin->qts = args->qts;
940 		bin->render = render;
941 	}
942 
943 	if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
944 		ret = v3d_job_allocate(v3d, (void *)&clean_job, sizeof(*clean_job));
945 		if (ret)
946 			goto fail;
947 
948 		ret = v3d_job_init(v3d, file_priv, clean_job,
949 				   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
950 		if (ret) {
951 			v3d_job_deallocate((void *)&clean_job);
952 			goto fail;
953 		}
954 
955 		last_job = clean_job;
956 	} else {
957 		last_job = &render->base;
958 	}
959 
960 	ret = v3d_lookup_bos(dev, file_priv, last_job,
961 			     args->bo_handles, args->bo_handle_count);
962 	if (ret)
963 		goto fail;
964 
965 	ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
966 	if (ret)
967 		goto fail;
968 
969 	if (args->perfmon_id) {
970 		if (v3d->global_perfmon) {
971 			ret = -EAGAIN;
972 			goto fail_perfmon;
973 		}
974 
975 		render->base.perfmon = v3d_perfmon_find(v3d_priv,
976 							args->perfmon_id);
977 
978 		if (!render->base.perfmon) {
979 			ret = -ENOENT;
980 			goto fail_perfmon;
981 		}
982 	}
983 
984 	mutex_lock(&v3d->sched_lock);
985 	if (bin) {
986 		bin->base.perfmon = render->base.perfmon;
987 		v3d_perfmon_get(bin->base.perfmon);
988 		v3d_push_job(&bin->base);
989 
990 		ret = drm_sched_job_add_dependency(&render->base.base,
991 						   dma_fence_get(bin->base.done_fence));
992 		if (ret)
993 			goto fail_unreserve;
994 	}
995 
996 	v3d_push_job(&render->base);
997 
998 	if (clean_job) {
999 		struct dma_fence *render_fence =
1000 			dma_fence_get(render->base.done_fence);
1001 		ret = drm_sched_job_add_dependency(&clean_job->base,
1002 						   render_fence);
1003 		if (ret)
1004 			goto fail_unreserve;
1005 		clean_job->perfmon = render->base.perfmon;
1006 		v3d_perfmon_get(clean_job->perfmon);
1007 		v3d_push_job(clean_job);
1008 	}
1009 
1010 	mutex_unlock(&v3d->sched_lock);
1011 
1012 	v3d_attach_fences_and_unlock_reservation(file_priv,
1013 						 last_job,
1014 						 &acquire_ctx,
1015 						 args->out_sync,
1016 						 &se,
1017 						 last_job->done_fence);
1018 
1019 	v3d_job_put(&bin->base);
1020 	v3d_job_put(&render->base);
1021 	v3d_job_put(clean_job);
1022 
1023 	return 0;
1024 
1025 fail_unreserve:
1026 	mutex_unlock(&v3d->sched_lock);
1027 fail_perfmon:
1028 	drm_gem_unlock_reservations(last_job->bo,
1029 				    last_job->bo_count, &acquire_ctx);
1030 fail:
1031 	v3d_job_cleanup((void *)bin);
1032 	v3d_job_cleanup((void *)render);
1033 	v3d_job_cleanup(clean_job);
1034 	v3d_put_multisync_post_deps(&se);
1035 
1036 	return ret;
1037 }
1038 
1039 /**
1040  * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
1041  * @dev: DRM device
1042  * @data: ioctl argument
1043  * @file_priv: DRM file for this fd
1044  *
1045  * Userspace provides the register setup for the TFU, which we don't
1046  * need to validate since the TFU is behind the MMU.
1047  */
1048 int
1049 v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
1050 		     struct drm_file *file_priv)
1051 {
1052 	struct v3d_dev *v3d = to_v3d_dev(dev);
1053 	struct drm_v3d_submit_tfu *args = data;
1054 	struct v3d_submit_ext se = {0};
1055 	struct v3d_tfu_job *job = NULL;
1056 	struct ww_acquire_ctx acquire_ctx;
1057 	int ret = 0;
1058 
1059 	trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
1060 
1061 	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
1062 		drm_dbg(dev, "invalid flags: %d\n", args->flags);
1063 		return -EINVAL;
1064 	}
1065 
1066 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
1067 		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
1068 		if (ret) {
1069 			drm_dbg(dev, "Failed to get extensions.\n");
1070 			return ret;
1071 		}
1072 	}
1073 
1074 	ret = v3d_job_allocate(v3d, (void *)&job, sizeof(*job));
1075 	if (ret)
1076 		return ret;
1077 
1078 	ret = v3d_job_init(v3d, file_priv, &job->base,
1079 			   v3d_job_free, args->in_sync, &se, V3D_TFU);
1080 	if (ret) {
1081 		v3d_job_deallocate((void *)&job);
1082 		goto fail;
1083 	}
1084 
1085 	job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
1086 			       sizeof(*job->base.bo), GFP_KERNEL);
1087 	if (!job->base.bo) {
1088 		ret = -ENOMEM;
1089 		goto fail;
1090 	}
1091 
1092 	job->args = *args;
1093 
1094 	for (job->base.bo_count = 0;
1095 	     job->base.bo_count < ARRAY_SIZE(args->bo_handles);
1096 	     job->base.bo_count++) {
1097 		struct drm_gem_object *bo;
1098 
1099 		if (!args->bo_handles[job->base.bo_count])
1100 			break;
1101 
1102 		bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
1103 		if (!bo) {
1104 			drm_dbg(dev, "Failed to look up GEM BO %d: %d\n",
1105 				job->base.bo_count,
1106 				args->bo_handles[job->base.bo_count]);
1107 			ret = -ENOENT;
1108 			goto fail;
1109 		}
1110 		job->base.bo[job->base.bo_count] = bo;
1111 	}
1112 
1113 	ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
1114 	if (ret)
1115 		goto fail;
1116 
1117 	mutex_lock(&v3d->sched_lock);
1118 	v3d_push_job(&job->base);
1119 	mutex_unlock(&v3d->sched_lock);
1120 
1121 	v3d_attach_fences_and_unlock_reservation(file_priv,
1122 						 &job->base, &acquire_ctx,
1123 						 args->out_sync,
1124 						 &se,
1125 						 job->base.done_fence);
1126 
1127 	v3d_job_put(&job->base);
1128 
1129 	return 0;
1130 
1131 fail:
1132 	v3d_job_cleanup((void *)job);
1133 	v3d_put_multisync_post_deps(&se);
1134 
1135 	return ret;
1136 }
1137 
1138 /**
1139  * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
1140  * @dev: DRM device
1141  * @data: ioctl argument
1142  * @file_priv: DRM file for this fd
1143  *
1144  * Userspace provides the register setup for the CSD, which we don't
1145  * need to validate since the CSD is behind the MMU.
1146  */
1147 int
1148 v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
1149 		     struct drm_file *file_priv)
1150 {
1151 	struct v3d_dev *v3d = to_v3d_dev(dev);
1152 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
1153 	struct drm_v3d_submit_csd *args = data;
1154 	struct v3d_submit_ext se = {0};
1155 	struct v3d_csd_job *job = NULL;
1156 	struct v3d_job *clean_job = NULL;
1157 	struct ww_acquire_ctx acquire_ctx;
1158 	int ret;
1159 
1160 	trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
1161 
1162 	if (args->pad)
1163 		return -EINVAL;
1164 
1165 	if (!v3d_has_csd(v3d)) {
1166 		drm_warn(dev, "Attempting CSD submit on non-CSD hardware\n");
1167 		return -EINVAL;
1168 	}
1169 
1170 	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
1171 		drm_dbg(dev, "invalid flags: %d\n", args->flags);
1172 		return -EINVAL;
1173 	}
1174 
1175 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
1176 		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
1177 		if (ret) {
1178 			drm_dbg(dev, "Failed to get extensions.\n");
1179 			return ret;
1180 		}
1181 	}
1182 
1183 	ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
1184 					 &job, &clean_job, &se,
1185 					 &acquire_ctx);
1186 	if (ret)
1187 		goto fail;
1188 
1189 	if (args->perfmon_id) {
1190 		if (v3d->global_perfmon) {
1191 			ret = -EAGAIN;
1192 			goto fail_perfmon;
1193 		}
1194 
1195 		job->base.perfmon = v3d_perfmon_find(v3d_priv,
1196 						     args->perfmon_id);
1197 		if (!job->base.perfmon) {
1198 			ret = -ENOENT;
1199 			goto fail_perfmon;
1200 		}
1201 	}
1202 
1203 	mutex_lock(&v3d->sched_lock);
1204 	v3d_push_job(&job->base);
1205 
1206 	ret = drm_sched_job_add_dependency(&clean_job->base,
1207 					   dma_fence_get(job->base.done_fence));
1208 	if (ret)
1209 		goto fail_unreserve;
1210 
1211 	v3d_push_job(clean_job);
1212 	mutex_unlock(&v3d->sched_lock);
1213 
1214 	v3d_attach_fences_and_unlock_reservation(file_priv,
1215 						 clean_job,
1216 						 &acquire_ctx,
1217 						 args->out_sync,
1218 						 &se,
1219 						 clean_job->done_fence);
1220 
1221 	v3d_job_put(&job->base);
1222 	v3d_job_put(clean_job);
1223 
1224 	return 0;
1225 
1226 fail_unreserve:
1227 	mutex_unlock(&v3d->sched_lock);
1228 fail_perfmon:
1229 	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
1230 				    &acquire_ctx);
1231 fail:
1232 	v3d_job_cleanup((void *)job);
1233 	v3d_job_cleanup(clean_job);
1234 	v3d_put_multisync_post_deps(&se);
1235 
1236 	return ret;
1237 }
1238 
1239 static const unsigned int cpu_job_bo_handle_count[] = {
1240 	[V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
1241 	[V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
1242 	[V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
1243 	[V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
1244 	[V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
1245 	[V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
1246 };
1247 
1248 /**
1249  * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
1250  * @dev: DRM device
1251  * @data: ioctl argument
1252  * @file_priv: DRM file for this fd
1253  *
1254  * Userspace specifies the CPU job type and data required to perform its
1255  * operations through the drm_v3d_extension struct.
1256  */
1257 int
1258 v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
1259 		     struct drm_file *file_priv)
1260 {
1261 	struct v3d_dev *v3d = to_v3d_dev(dev);
1262 	struct drm_v3d_submit_cpu *args = data;
1263 	struct v3d_submit_ext se = {0};
1264 	struct v3d_submit_ext *out_se = NULL;
1265 	struct v3d_cpu_job *cpu_job = NULL;
1266 	struct v3d_csd_job *csd_job = NULL;
1267 	struct v3d_job *clean_job = NULL;
1268 	struct ww_acquire_ctx acquire_ctx;
1269 	int ret;
1270 
1271 	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
1272 		drm_dbg(dev, "Invalid flags: %d\n", args->flags);
1273 		return -EINVAL;
1274 	}
1275 
1276 	ret = v3d_job_allocate(v3d, (void *)&cpu_job, sizeof(*cpu_job));
1277 	if (ret)
1278 		return ret;
1279 
1280 	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
1281 		ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
1282 		if (ret) {
1283 			drm_dbg(dev, "Failed to get extensions.\n");
1284 			goto fail;
1285 		}
1286 	}
1287 
1288 	/* Every CPU job must have a CPU job user extension */
1289 	if (!cpu_job->job_type) {
1290 		drm_dbg(dev, "CPU job must have a CPU job user extension.\n");
1291 		ret = -EINVAL;
1292 		goto fail;
1293 	}
1294 
1295 	if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
1296 		drm_dbg(dev, "This CPU job was not submitted with the proper number of BOs.\n");
1297 		ret = -EINVAL;
1298 		goto fail;
1299 	}
1300 
1301 	trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
1302 
1303 	ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
1304 			   v3d_job_free, 0, &se, V3D_CPU);
1305 	if (ret) {
1306 		v3d_job_deallocate((void *)&cpu_job);
1307 		goto fail;
1308 	}
1309 
1310 	clean_job = cpu_job->indirect_csd.clean_job;
1311 	csd_job = cpu_job->indirect_csd.job;
1312 
1313 	if (args->bo_handle_count) {
1314 		ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
1315 				     args->bo_handles, args->bo_handle_count);
1316 		if (ret)
1317 			goto fail;
1318 
1319 		ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
1320 		if (ret)
1321 			goto fail;
1322 	}
1323 
1324 	mutex_lock(&v3d->sched_lock);
1325 	v3d_push_job(&cpu_job->base);
1326 
1327 	switch (cpu_job->job_type) {
1328 	case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
1329 		ret = drm_sched_job_add_dependency(&csd_job->base.base,
1330 						   dma_fence_get(cpu_job->base.done_fence));
1331 		if (ret)
1332 			goto fail_unreserve;
1333 
1334 		v3d_push_job(&csd_job->base);
1335 
1336 		ret = drm_sched_job_add_dependency(&clean_job->base,
1337 						   dma_fence_get(csd_job->base.done_fence));
1338 		if (ret)
1339 			goto fail_unreserve;
1340 
1341 		v3d_push_job(clean_job);
1342 
1343 		break;
1344 	default:
1345 		break;
1346 	}
1347 	mutex_unlock(&v3d->sched_lock);
1348 
1349 	out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
1350 
1351 	v3d_attach_fences_and_unlock_reservation(file_priv,
1352 						 &cpu_job->base,
1353 						 &acquire_ctx, 0,
1354 						 out_se, cpu_job->base.done_fence);
1355 
1356 	switch (cpu_job->job_type) {
1357 	case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
1358 		v3d_attach_fences_and_unlock_reservation(file_priv,
1359 							 clean_job,
1360 							 &cpu_job->indirect_csd.acquire_ctx,
1361 							 0, &se, clean_job->done_fence);
1362 		break;
1363 	default:
1364 		break;
1365 	}
1366 
1367 	v3d_job_put(&cpu_job->base);
1368 	v3d_job_put(&csd_job->base);
1369 	v3d_job_put(clean_job);
1370 
1371 	return 0;
1372 
1373 fail_unreserve:
1374 	mutex_unlock(&v3d->sched_lock);
1375 
1376 	drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
1377 				    &acquire_ctx);
1378 
1379 	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
1380 				    &cpu_job->indirect_csd.acquire_ctx);
1381 
1382 fail:
1383 	v3d_job_cleanup((void *)cpu_job);
1384 	v3d_job_cleanup((void *)csd_job);
1385 	v3d_job_cleanup(clean_job);
1386 	v3d_put_multisync_post_deps(&se);
1387 	kvfree(cpu_job->timestamp_query.queries);
1388 	kvfree(cpu_job->performance_query.queries);
1389 
1390 	return ret;
1391 }
1392