xref: /linux/drivers/gpu/drm/imagination/pvr_job.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_context.h"
5 #include "pvr_device.h"
6 #include "pvr_drv.h"
7 #include "pvr_gem.h"
8 #include "pvr_hwrt.h"
9 #include "pvr_job.h"
10 #include "pvr_mmu.h"
11 #include "pvr_power.h"
12 #include "pvr_rogue_fwif.h"
13 #include "pvr_rogue_fwif_client.h"
14 #include "pvr_stream.h"
15 #include "pvr_stream_defs.h"
16 #include "pvr_sync.h"
17 
18 #include <drm/drm_exec.h>
19 #include <drm/drm_gem.h>
20 #include <linux/types.h>
21 #include <uapi/drm/pvr_drm.h>
22 
23 static void pvr_job_release(struct kref *kref)
24 {
25 	struct pvr_job *job = container_of(kref, struct pvr_job, ref_count);
26 
27 	xa_erase(&job->pvr_dev->job_ids, job->id);
28 
29 	pvr_hwrt_data_put(job->hwrt);
30 	pvr_context_put(job->ctx);
31 
32 	WARN_ON(job->paired_job);
33 
34 	pvr_queue_job_cleanup(job);
35 	pvr_job_release_pm_ref(job);
36 
37 	kfree(job->cmd);
38 	kfree(job);
39 }
40 
41 /**
42  * pvr_job_put() - Release reference on job
43  * @job: Target job.
44  */
45 void
46 pvr_job_put(struct pvr_job *job)
47 {
48 	if (job)
49 		kref_put(&job->ref_count, pvr_job_release);
50 }
51 
52 /**
53  * pvr_job_process_stream() - Build job FW structure from stream
54  * @pvr_dev: Device pointer.
55  * @cmd_defs: Stream definition.
56  * @stream: Pointer to command stream.
57  * @stream_size: Size of command stream, in bytes.
58  * @job: Pointer to job.
59  *
60  * Caller is responsible for freeing the output structure.
61  *
62  * Returns:
63  *  * 0 on success,
64  *  * -%ENOMEM on out of memory, or
65  *  * -%EINVAL on malformed stream.
66  */
67 static int
68 pvr_job_process_stream(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
69 		       void *stream, u32 stream_size, struct pvr_job *job)
70 {
71 	int err;
72 
73 	job->cmd = kzalloc(cmd_defs->dest_size, GFP_KERNEL);
74 	if (!job->cmd)
75 		return -ENOMEM;
76 
77 	job->cmd_len = cmd_defs->dest_size;
78 
79 	err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, job->cmd);
80 	if (err)
81 		kfree(job->cmd);
82 
83 	return err;
84 }
85 
86 static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job,
87 			   const struct pvr_stream_cmd_defs *stream_def,
88 			   u64 stream_userptr, u32 stream_len)
89 {
90 	void *stream;
91 	int err;
92 
93 	stream = memdup_user(u64_to_user_ptr(stream_userptr), stream_len);
94 	if (IS_ERR(stream))
95 		return PTR_ERR(stream);
96 
97 	err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job);
98 
99 	kfree(stream);
100 	return err;
101 }
102 
103 static u32
104 convert_geom_flags(u32 in_flags)
105 {
106 	u32 out_flags = 0;
107 
108 	if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST)
109 		out_flags |= ROGUE_GEOM_FLAGS_FIRSTKICK;
110 	if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST)
111 		out_flags |= ROGUE_GEOM_FLAGS_LASTKICK;
112 	if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
113 		out_flags |= ROGUE_GEOM_FLAGS_SINGLE_CORE;
114 
115 	return out_flags;
116 }
117 
118 static u32
119 convert_frag_flags(u32 in_flags)
120 {
121 	u32 out_flags = 0;
122 
123 	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE)
124 		out_flags |= ROGUE_FRAG_FLAGS_SINGLE_CORE;
125 	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER)
126 		out_flags |= ROGUE_FRAG_FLAGS_DEPTHBUFFER;
127 	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER)
128 		out_flags |= ROGUE_FRAG_FLAGS_STENCILBUFFER;
129 	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP)
130 		out_flags |= ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP;
131 	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER)
132 		out_flags |= ROGUE_FRAG_FLAGS_SCRATCHBUFFER;
133 	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS)
134 		out_flags |= ROGUE_FRAG_FLAGS_GET_VIS_RESULTS;
135 	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
136 		out_flags |= ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE;
137 
138 	return out_flags;
139 }
140 
141 static int
142 pvr_geom_job_fw_cmd_init(struct pvr_job *job,
143 			 struct drm_pvr_job *args)
144 {
145 	struct rogue_fwif_cmd_geom *cmd;
146 	int err;
147 
148 	if (args->flags & ~DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK)
149 		return -EINVAL;
150 
151 	if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
152 		return -EINVAL;
153 
154 	if (!job->hwrt)
155 		return -EINVAL;
156 
157 	job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_GEOM;
158 	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_geom_stream,
159 			      args->cmd_stream, args->cmd_stream_len);
160 	if (err)
161 		return err;
162 
163 	cmd = job->cmd;
164 	cmd->cmd_shared.cmn.frame_num = 0;
165 	cmd->flags = convert_geom_flags(args->flags);
166 	pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
167 	return 0;
168 }
169 
170 static int
171 pvr_frag_job_fw_cmd_init(struct pvr_job *job,
172 			 struct drm_pvr_job *args)
173 {
174 	struct rogue_fwif_cmd_frag *cmd;
175 	int err;
176 
177 	if (args->flags & ~DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK)
178 		return -EINVAL;
179 
180 	if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
181 		return -EINVAL;
182 
183 	if (!job->hwrt)
184 		return -EINVAL;
185 
186 	job->fw_ccb_cmd_type = (args->flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER) ?
187 			       ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR :
188 			       ROGUE_FWIF_CCB_CMD_TYPE_FRAG;
189 	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_frag_stream,
190 			      args->cmd_stream, args->cmd_stream_len);
191 	if (err)
192 		return err;
193 
194 	cmd = job->cmd;
195 	cmd->cmd_shared.cmn.frame_num = 0;
196 	cmd->flags = convert_frag_flags(args->flags);
197 	pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
198 	return 0;
199 }
200 
201 static u32
202 convert_compute_flags(u32 in_flags)
203 {
204 	u32 out_flags = 0;
205 
206 	if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP)
207 		out_flags |= ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP;
208 	if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
209 		out_flags |= ROGUE_COMPUTE_FLAG_SINGLE_CORE;
210 
211 	return out_flags;
212 }
213 
214 static int
215 pvr_compute_job_fw_cmd_init(struct pvr_job *job,
216 			    struct drm_pvr_job *args)
217 {
218 	struct rogue_fwif_cmd_compute *cmd;
219 	int err;
220 
221 	if (args->flags & ~DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK)
222 		return -EINVAL;
223 
224 	if (job->ctx->type != DRM_PVR_CTX_TYPE_COMPUTE)
225 		return -EINVAL;
226 
227 	job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_CDM;
228 	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_compute_stream,
229 			      args->cmd_stream, args->cmd_stream_len);
230 	if (err)
231 		return err;
232 
233 	cmd = job->cmd;
234 	cmd->common.frame_num = 0;
235 	cmd->flags = convert_compute_flags(args->flags);
236 	return 0;
237 }
238 
239 static u32
240 convert_transfer_flags(u32 in_flags)
241 {
242 	u32 out_flags = 0;
243 
244 	if (in_flags & DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE)
245 		out_flags |= ROGUE_TRANSFER_FLAGS_SINGLE_CORE;
246 
247 	return out_flags;
248 }
249 
250 static int
251 pvr_transfer_job_fw_cmd_init(struct pvr_job *job,
252 			     struct drm_pvr_job *args)
253 {
254 	struct rogue_fwif_cmd_transfer *cmd;
255 	int err;
256 
257 	if (args->flags & ~DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK)
258 		return -EINVAL;
259 
260 	if (job->ctx->type != DRM_PVR_CTX_TYPE_TRANSFER_FRAG)
261 		return -EINVAL;
262 
263 	job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D;
264 	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_transfer_stream,
265 			      args->cmd_stream, args->cmd_stream_len);
266 	if (err)
267 		return err;
268 
269 	cmd = job->cmd;
270 	cmd->common.frame_num = 0;
271 	cmd->flags = convert_transfer_flags(args->flags);
272 	return 0;
273 }
274 
275 static int
276 pvr_job_fw_cmd_init(struct pvr_job *job,
277 		    struct drm_pvr_job *args)
278 {
279 	switch (args->type) {
280 	case DRM_PVR_JOB_TYPE_GEOMETRY:
281 		return pvr_geom_job_fw_cmd_init(job, args);
282 
283 	case DRM_PVR_JOB_TYPE_FRAGMENT:
284 		return pvr_frag_job_fw_cmd_init(job, args);
285 
286 	case DRM_PVR_JOB_TYPE_COMPUTE:
287 		return pvr_compute_job_fw_cmd_init(job, args);
288 
289 	case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
290 		return pvr_transfer_job_fw_cmd_init(job, args);
291 
292 	default:
293 		return -EINVAL;
294 	}
295 }
296 
297 /**
298  * struct pvr_job_data - Helper container for pairing jobs with the
299  * sync_ops supplied for them by the user.
300  */
301 struct pvr_job_data {
302 	/** @job: Pointer to the job. */
303 	struct pvr_job *job;
304 
305 	/** @sync_ops: Pointer to the sync_ops associated with @job. */
306 	struct drm_pvr_sync_op *sync_ops;
307 
308 	/** @sync_op_count: Number of members of @sync_ops. */
309 	u32 sync_op_count;
310 };
311 
312 /**
313  * prepare_job_syncs() - Prepare all sync objects for a single job.
314  * @pvr_file: PowerVR file.
315  * @job_data: Precreated job and sync_ops array.
316  * @signal_array: xarray to receive signal sync objects.
317  *
318  * Returns:
319  *  * 0 on success, or
320  *  * Any error code returned by pvr_sync_signal_array_collect_ops(),
321  *    pvr_sync_add_deps_to_job(), drm_sched_job_add_resv_dependencies() or
322  *    pvr_sync_signal_array_update_fences().
323  */
324 static int
325 prepare_job_syncs(struct pvr_file *pvr_file,
326 		  struct pvr_job_data *job_data,
327 		  struct xarray *signal_array)
328 {
329 	struct dma_fence *done_fence;
330 	int err = pvr_sync_signal_array_collect_ops(signal_array,
331 						    from_pvr_file(pvr_file),
332 						    job_data->sync_op_count,
333 						    job_data->sync_ops);
334 
335 	if (err)
336 		return err;
337 
338 	err = pvr_sync_add_deps_to_job(pvr_file, &job_data->job->base,
339 				       job_data->sync_op_count,
340 				       job_data->sync_ops, signal_array);
341 	if (err)
342 		return err;
343 
344 	if (job_data->job->hwrt) {
345 		/* The geometry job writes the HWRT region headers, which are
346 		 * then read by the fragment job.
347 		 */
348 		struct drm_gem_object *obj =
349 			gem_from_pvr_gem(job_data->job->hwrt->fw_obj->gem);
350 		enum dma_resv_usage usage =
351 			dma_resv_usage_rw(job_data->job->type ==
352 					  DRM_PVR_JOB_TYPE_GEOMETRY);
353 
354 		dma_resv_lock(obj->resv, NULL);
355 		err = drm_sched_job_add_resv_dependencies(&job_data->job->base,
356 							  obj->resv, usage);
357 		dma_resv_unlock(obj->resv);
358 		if (err)
359 			return err;
360 	}
361 
362 	/* We need to arm the job to get the job done fence. */
363 	done_fence = pvr_queue_job_arm(job_data->job);
364 
365 	err = pvr_sync_signal_array_update_fences(signal_array,
366 						  job_data->sync_op_count,
367 						  job_data->sync_ops,
368 						  done_fence);
369 	return err;
370 }
371 
372 /**
373  * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs.
374  * @pvr_file: PowerVR file.
375  * @job_data: Array of precreated jobs and their sync_ops.
376  * @job_count: Number of jobs.
377  * @signal_array: xarray to receive signal sync objects.
378  *
379  * Returns:
380  *  * 0 on success, or
381  *  * Any error code returned by pvr_vm_bind_job_prepare_syncs().
382  */
383 static int
384 prepare_job_syncs_for_each(struct pvr_file *pvr_file,
385 			   struct pvr_job_data *job_data,
386 			   u32 *job_count,
387 			   struct xarray *signal_array)
388 {
389 	for (u32 i = 0; i < *job_count; i++) {
390 		int err = prepare_job_syncs(pvr_file, &job_data[i],
391 					    signal_array);
392 
393 		if (err) {
394 			*job_count = i;
395 			return err;
396 		}
397 	}
398 
399 	return 0;
400 }
401 
402 static struct pvr_job *
403 create_job(struct pvr_device *pvr_dev,
404 	   struct pvr_file *pvr_file,
405 	   struct drm_pvr_job *args)
406 {
407 	struct pvr_job *job = NULL;
408 	int err;
409 
410 	if (!args->cmd_stream || !args->cmd_stream_len)
411 		return ERR_PTR(-EINVAL);
412 
413 	if (args->type != DRM_PVR_JOB_TYPE_GEOMETRY &&
414 	    args->type != DRM_PVR_JOB_TYPE_FRAGMENT &&
415 	    (args->hwrt.set_handle || args->hwrt.data_index))
416 		return ERR_PTR(-EINVAL);
417 
418 	job = kzalloc(sizeof(*job), GFP_KERNEL);
419 	if (!job)
420 		return ERR_PTR(-ENOMEM);
421 
422 	kref_init(&job->ref_count);
423 	job->type = args->type;
424 	job->pvr_dev = pvr_dev;
425 
426 	err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL);
427 	if (err)
428 		goto err_put_job;
429 
430 	job->ctx = pvr_context_lookup(pvr_file, args->context_handle);
431 	if (!job->ctx) {
432 		err = -EINVAL;
433 		goto err_put_job;
434 	}
435 
436 	if (args->hwrt.set_handle) {
437 		job->hwrt = pvr_hwrt_data_lookup(pvr_file, args->hwrt.set_handle,
438 						 args->hwrt.data_index);
439 		if (!job->hwrt) {
440 			err = -EINVAL;
441 			goto err_put_job;
442 		}
443 	}
444 
445 	err = pvr_job_fw_cmd_init(job, args);
446 	if (err)
447 		goto err_put_job;
448 
449 	err = pvr_queue_job_init(job);
450 	if (err)
451 		goto err_put_job;
452 
453 	return job;
454 
455 err_put_job:
456 	pvr_job_put(job);
457 	return ERR_PTR(err);
458 }
459 
460 /**
461  * pvr_job_data_fini() - Cleanup all allocs used to set up job submission.
462  * @job_data: Job data array.
463  * @job_count: Number of members of @job_data.
464  */
465 static void
466 pvr_job_data_fini(struct pvr_job_data *job_data, u32 job_count)
467 {
468 	for (u32 i = 0; i < job_count; i++) {
469 		pvr_job_put(job_data[i].job);
470 		kvfree(job_data[i].sync_ops);
471 	}
472 }
473 
474 /**
475  * pvr_job_data_init() - Init an array of created jobs, associating them with
476  * the appropriate sync_ops args, which will be copied in.
477  * @pvr_dev: Target PowerVR device.
478  * @pvr_file: Pointer to PowerVR file structure.
479  * @job_args: Job args array copied from user.
480  * @job_count: Number of members of @job_args.
481  * @job_data_out: Job data array.
482  */
483 static int pvr_job_data_init(struct pvr_device *pvr_dev,
484 			     struct pvr_file *pvr_file,
485 			     struct drm_pvr_job *job_args,
486 			     u32 *job_count,
487 			     struct pvr_job_data *job_data_out)
488 {
489 	int err = 0, i = 0;
490 
491 	for (; i < *job_count; i++) {
492 		job_data_out[i].job =
493 			create_job(pvr_dev, pvr_file, &job_args[i]);
494 		err = PTR_ERR_OR_ZERO(job_data_out[i].job);
495 
496 		if (err) {
497 			*job_count = i;
498 			job_data_out[i].job = NULL;
499 			goto err_cleanup;
500 		}
501 
502 		err = PVR_UOBJ_GET_ARRAY(job_data_out[i].sync_ops,
503 					 &job_args[i].sync_ops);
504 		if (err) {
505 			*job_count = i;
506 
507 			/* Ensure the job created above is also cleaned up. */
508 			i++;
509 			goto err_cleanup;
510 		}
511 
512 		job_data_out[i].sync_op_count = job_args[i].sync_ops.count;
513 	}
514 
515 	return 0;
516 
517 err_cleanup:
518 	pvr_job_data_fini(job_data_out, i);
519 
520 	return err;
521 }
522 
523 static void
524 push_jobs(struct pvr_job_data *job_data, u32 job_count)
525 {
526 	for (u32 i = 0; i < job_count; i++)
527 		pvr_queue_job_push(job_data[i].job);
528 }
529 
530 static int
531 prepare_fw_obj_resv(struct drm_exec *exec, struct pvr_fw_object *fw_obj)
532 {
533 	return drm_exec_prepare_obj(exec, gem_from_pvr_gem(fw_obj->gem), 1);
534 }
535 
536 static int
537 jobs_lock_all_objs(struct drm_exec *exec, struct pvr_job_data *job_data,
538 		   u32 job_count)
539 {
540 	for (u32 i = 0; i < job_count; i++) {
541 		struct pvr_job *job = job_data[i].job;
542 
543 		/* Grab a lock on a the context, to guard against
544 		 * concurrent submission to the same queue.
545 		 */
546 		int err = drm_exec_lock_obj(exec,
547 					    gem_from_pvr_gem(job->ctx->fw_obj->gem));
548 
549 		if (err)
550 			return err;
551 
552 		if (job->hwrt) {
553 			err = prepare_fw_obj_resv(exec,
554 						  job->hwrt->fw_obj);
555 			if (err)
556 				return err;
557 		}
558 	}
559 
560 	return 0;
561 }
562 
563 static int
564 prepare_job_resvs_for_each(struct drm_exec *exec, struct pvr_job_data *job_data,
565 			   u32 job_count)
566 {
567 	drm_exec_until_all_locked(exec) {
568 		int err = jobs_lock_all_objs(exec, job_data, job_count);
569 
570 		drm_exec_retry_on_contention(exec);
571 		if (err)
572 			return err;
573 	}
574 
575 	return 0;
576 }
577 
578 static void
579 update_job_resvs(struct pvr_job *job)
580 {
581 	if (job->hwrt) {
582 		enum dma_resv_usage usage = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ?
583 					    DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
584 		struct drm_gem_object *obj = gem_from_pvr_gem(job->hwrt->fw_obj->gem);
585 
586 		dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage);
587 	}
588 }
589 
590 static void
591 update_job_resvs_for_each(struct pvr_job_data *job_data, u32 job_count)
592 {
593 	for (u32 i = 0; i < job_count; i++)
594 		update_job_resvs(job_data[i].job);
595 }
596 
597 static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
598 {
599 	struct pvr_job *geom_job = a, *frag_job = b;
600 	struct dma_fence *fence;
601 	unsigned long index;
602 
603 	/* Geometry and fragment jobs can be combined if they are queued to the
604 	 * same context and targeting the same HWRT.
605 	 */
606 	if (a->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
607 	    b->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
608 	    a->ctx != b->ctx ||
609 	    a->hwrt != b->hwrt)
610 		return false;
611 
612 	xa_for_each(&frag_job->base.dependencies, index, fence) {
613 		/* We combine when we see an explicit geom -> frag dep. */
614 		if (&geom_job->base.s_fence->scheduled == fence)
615 			return true;
616 	}
617 
618 	return false;
619 }
620 
621 static struct dma_fence *
622 get_last_queued_job_scheduled_fence(struct pvr_queue *queue,
623 				    struct pvr_job_data *job_data,
624 				    u32 cur_job_pos)
625 {
626 	/* We iterate over the current job array in reverse order to grab the
627 	 * last to-be-queued job targeting the same queue.
628 	 */
629 	for (u32 i = cur_job_pos; i > 0; i--) {
630 		struct pvr_job *job = job_data[i - 1].job;
631 
632 		if (job->ctx == queue->ctx && job->type == queue->type)
633 			return dma_fence_get(&job->base.s_fence->scheduled);
634 	}
635 
636 	/* If we didn't find any, we just return the last queued job scheduled
637 	 * fence attached to the queue.
638 	 */
639 	return dma_fence_get(queue->last_queued_job_scheduled_fence);
640 }
641 
642 static int
643 pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
644 {
645 	for (u32 i = 0; i < *job_count - 1; i++) {
646 		struct pvr_job *geom_job = job_data[i].job;
647 		struct pvr_job *frag_job = job_data[i + 1].job;
648 		struct pvr_queue *frag_queue;
649 		struct dma_fence *f;
650 
651 		if (!can_combine_jobs(job_data[i].job, job_data[i + 1].job))
652 			continue;
653 
654 		/* The fragment job will be submitted by the geometry queue. We
655 		 * need to make sure it comes after all the other fragment jobs
656 		 * queued before it.
657 		 */
658 		frag_queue = pvr_context_get_queue_for_job(frag_job->ctx,
659 							   frag_job->type);
660 		f = get_last_queued_job_scheduled_fence(frag_queue, job_data,
661 							i);
662 		if (f) {
663 			int err = drm_sched_job_add_dependency(&geom_job->base,
664 							       f);
665 			if (err) {
666 				*job_count = i;
667 				return err;
668 			}
669 		}
670 
671 		/* The KCCB slot will be reserved by the geometry job, so we can
672 		 * drop the KCCB fence on the fragment job.
673 		 */
674 		pvr_kccb_fence_put(frag_job->kccb_fence);
675 		frag_job->kccb_fence = NULL;
676 
677 		geom_job->paired_job = frag_job;
678 		frag_job->paired_job = geom_job;
679 
680 		/* Skip the fragment job we just paired to the geometry job. */
681 		i++;
682 	}
683 
684 	return 0;
685 }
686 
687 /**
688  * pvr_submit_jobs() - Submit jobs to the GPU
689  * @pvr_dev: Target PowerVR device.
690  * @pvr_file: Pointer to PowerVR file structure.
691  * @args: Ioctl args.
692  *
693  * This initial implementation is entirely synchronous; on return the GPU will
694  * be idle. This will not be the case for future implementations.
695  *
696  * Returns:
697  *  * 0 on success,
698  *  * -%EFAULT if arguments can not be copied from user space, or
699  *  * -%EINVAL on invalid arguments, or
700  *  * Any other error.
701  */
702 int
703 pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
704 		struct drm_pvr_ioctl_submit_jobs_args *args)
705 {
706 	struct pvr_job_data *job_data = NULL;
707 	struct drm_pvr_job *job_args;
708 	struct xarray signal_array;
709 	u32 jobs_alloced = 0;
710 	struct drm_exec exec;
711 	int err;
712 
713 	if (!args->jobs.count)
714 		return -EINVAL;
715 
716 	err = PVR_UOBJ_GET_ARRAY(job_args, &args->jobs);
717 	if (err)
718 		return err;
719 
720 	job_data = kvmalloc_array(args->jobs.count, sizeof(*job_data),
721 				  GFP_KERNEL | __GFP_ZERO);
722 	if (!job_data) {
723 		err = -ENOMEM;
724 		goto out_free;
725 	}
726 
727 	err = pvr_job_data_init(pvr_dev, pvr_file, job_args, &args->jobs.count,
728 				job_data);
729 	if (err)
730 		goto out_free;
731 
732 	jobs_alloced = args->jobs.count;
733 
734 	/*
735 	 * Flush MMU if needed - this has been deferred until now to avoid
736 	 * overuse of this expensive operation.
737 	 */
738 	err = pvr_mmu_flush_exec(pvr_dev, false);
739 	if (err)
740 		goto out_job_data_cleanup;
741 
742 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0);
743 
744 	xa_init_flags(&signal_array, XA_FLAGS_ALLOC);
745 
746 	err = prepare_job_syncs_for_each(pvr_file, job_data, &args->jobs.count,
747 					 &signal_array);
748 	if (err)
749 		goto out_exec_fini;
750 
751 	err = prepare_job_resvs_for_each(&exec, job_data, args->jobs.count);
752 	if (err)
753 		goto out_exec_fini;
754 
755 	err = pvr_jobs_link_geom_frag(job_data, &args->jobs.count);
756 	if (err)
757 		goto out_exec_fini;
758 
759 	/* Anything after that point must succeed because we start exposing job
760 	 * finished fences to the outside world.
761 	 */
762 	update_job_resvs_for_each(job_data, args->jobs.count);
763 	push_jobs(job_data, args->jobs.count);
764 	pvr_sync_signal_array_push_fences(&signal_array);
765 	err = 0;
766 
767 out_exec_fini:
768 	drm_exec_fini(&exec);
769 	pvr_sync_signal_array_cleanup(&signal_array);
770 
771 out_job_data_cleanup:
772 	pvr_job_data_fini(job_data, jobs_alloced);
773 
774 out_free:
775 	kvfree(job_data);
776 	kvfree(job_args);
777 
778 	return err;
779 }
780