xref: /linux/drivers/accel/ivpu/ivpu_job.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_file.h>
7 
8 #include <linux/bitfield.h>
9 #include <linux/highmem.h>
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <uapi/drm/ivpu_accel.h>
13 
14 #include "ivpu_drv.h"
15 #include "ivpu_fw.h"
16 #include "ivpu_hw.h"
17 #include "ivpu_ipc.h"
18 #include "ivpu_job.h"
19 #include "ivpu_jsm_msg.h"
20 #include "ivpu_pm.h"
21 #include "ivpu_trace.h"
22 #include "vpu_boot_api.h"
23 
24 #define CMD_BUF_IDX	     0
25 #define JOB_MAX_BUFFER_COUNT 65535
26 
27 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
28 {
29 	ivpu_hw_db_set(vdev, cmdq->db_id);
30 }
31 
32 static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
33 					  struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
34 {
35 	u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
36 	u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
37 
38 	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
39 	    ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
40 		return 0;
41 
42 	cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
43 						   primary_size, DRM_IVPU_BO_WC);
44 	if (!cmdq->primary_preempt_buf) {
45 		ivpu_err(vdev, "Failed to create primary preemption buffer\n");
46 		return -ENOMEM;
47 	}
48 
49 	cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
50 						     secondary_size, DRM_IVPU_BO_WC);
51 	if (!cmdq->secondary_preempt_buf) {
52 		ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
53 		goto err_free_primary;
54 	}
55 
56 	return 0;
57 
58 err_free_primary:
59 	ivpu_bo_free(cmdq->primary_preempt_buf);
60 	cmdq->primary_preempt_buf = NULL;
61 	return -ENOMEM;
62 }
63 
64 static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
65 					 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
66 {
67 	if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
68 		return;
69 
70 	if (cmdq->primary_preempt_buf)
71 		ivpu_bo_free(cmdq->primary_preempt_buf);
72 	if (cmdq->secondary_preempt_buf)
73 		ivpu_bo_free(cmdq->secondary_preempt_buf);
74 }
75 
76 static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
77 {
78 	struct ivpu_device *vdev = file_priv->vdev;
79 	struct ivpu_cmdq *cmdq;
80 	int ret;
81 
82 	cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
83 	if (!cmdq)
84 		return NULL;
85 
86 	ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
87 			      GFP_KERNEL);
88 	if (ret < 0) {
89 		ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
90 		goto err_free_cmdq;
91 	}
92 
93 	ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
94 			      &file_priv->cmdq_id_next, GFP_KERNEL);
95 	if (ret < 0) {
96 		ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
97 		goto err_erase_db_xa;
98 	}
99 
100 	cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
101 	if (!cmdq->mem)
102 		goto err_erase_cmdq_xa;
103 
104 	ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
105 	if (ret)
106 		ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
107 
108 	return cmdq;
109 
110 err_erase_cmdq_xa:
111 	xa_erase(&file_priv->cmdq_xa, cmdq->id);
112 err_erase_db_xa:
113 	xa_erase(&vdev->db_xa, cmdq->db_id);
114 err_free_cmdq:
115 	kfree(cmdq);
116 	return NULL;
117 }
118 
119 static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
120 {
121 	if (!cmdq)
122 		return;
123 
124 	ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
125 	ivpu_bo_free(cmdq->mem);
126 	xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
127 	kfree(cmdq);
128 }
129 
130 static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
131 			      u8 priority)
132 {
133 	struct ivpu_device *vdev = file_priv->vdev;
134 	int ret;
135 
136 	ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
137 				       task_pid_nr(current), engine,
138 				       cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
139 	if (ret)
140 		return ret;
141 
142 	ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
143 							priority);
144 	if (ret)
145 		return ret;
146 
147 	return 0;
148 }
149 
150 static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
151 {
152 	struct ivpu_device *vdev = file_priv->vdev;
153 	int ret;
154 
155 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
156 		ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
157 					       cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
158 	else
159 		ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
160 					   cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
161 
162 	if (!ret)
163 		ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
164 			 cmdq->db_id, cmdq->id, file_priv->ctx.id);
165 
166 	return ret;
167 }
168 
169 static int
170 ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
171 {
172 	struct ivpu_device *vdev = file_priv->vdev;
173 	struct vpu_job_queue_header *jobq_header;
174 	int ret;
175 
176 	lockdep_assert_held(&file_priv->lock);
177 
178 	if (cmdq->db_registered)
179 		return 0;
180 
181 	cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
182 				  sizeof(struct vpu_job_queue_entry));
183 
184 	cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
185 	jobq_header = &cmdq->jobq->header;
186 	jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
187 	jobq_header->head = 0;
188 	jobq_header->tail = 0;
189 	if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
190 		ivpu_dbg(vdev, JOB, "Turbo mode enabled");
191 		jobq_header->flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
192 	}
193 
194 	wmb(); /* Flush WC buffer for jobq->header */
195 
196 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
197 		ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
198 		if (ret)
199 			return ret;
200 	}
201 
202 	ret = ivpu_register_db(file_priv, cmdq);
203 	if (ret)
204 		return ret;
205 
206 	cmdq->db_registered = true;
207 
208 	return 0;
209 }
210 
211 static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
212 {
213 	struct ivpu_device *vdev = file_priv->vdev;
214 	int ret;
215 
216 	lockdep_assert_held(&file_priv->lock);
217 
218 	if (!cmdq->db_registered)
219 		return 0;
220 
221 	cmdq->db_registered = false;
222 
223 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
224 		ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
225 		if (!ret)
226 			ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
227 	}
228 
229 	ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
230 	if (!ret)
231 		ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
232 
233 	return 0;
234 }
235 
236 static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
237 {
238 	struct ivpu_cmdq *cmdq;
239 	unsigned long cmdq_id;
240 	int ret;
241 
242 	lockdep_assert_held(&file_priv->lock);
243 
244 	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
245 		if (cmdq->priority == priority)
246 			break;
247 
248 	if (!cmdq) {
249 		cmdq = ivpu_cmdq_alloc(file_priv);
250 		if (!cmdq)
251 			return NULL;
252 		cmdq->priority = priority;
253 	}
254 
255 	ret = ivpu_cmdq_init(file_priv, cmdq, priority);
256 	if (ret)
257 		return NULL;
258 
259 	return cmdq;
260 }
261 
262 void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
263 {
264 	struct ivpu_cmdq *cmdq;
265 	unsigned long cmdq_id;
266 
267 	lockdep_assert_held(&file_priv->lock);
268 
269 	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
270 		xa_erase(&file_priv->cmdq_xa, cmdq_id);
271 		ivpu_cmdq_fini(file_priv, cmdq);
272 		ivpu_cmdq_free(file_priv, cmdq);
273 	}
274 }
275 
276 /*
277  * Mark the doorbell as unregistered
278  * This function needs to be called when the VPU hardware is restarted
279  * and FW loses job queue state. The next time job queue is used it
280  * will be registered again.
281  */
282 static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
283 {
284 	struct ivpu_cmdq *cmdq;
285 	unsigned long cmdq_id;
286 
287 	mutex_lock(&file_priv->lock);
288 
289 	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
290 		cmdq->db_registered = false;
291 
292 	mutex_unlock(&file_priv->lock);
293 }
294 
295 void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
296 {
297 	struct ivpu_file_priv *file_priv;
298 	unsigned long ctx_id;
299 
300 	mutex_lock(&vdev->context_list_lock);
301 
302 	xa_for_each(&vdev->context_xa, ctx_id, file_priv)
303 		ivpu_cmdq_reset(file_priv);
304 
305 	mutex_unlock(&vdev->context_list_lock);
306 }
307 
308 static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
309 {
310 	struct ivpu_cmdq *cmdq;
311 	unsigned long cmdq_id;
312 
313 	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
314 		ivpu_cmdq_fini(file_priv, cmdq);
315 }
316 
317 void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
318 {
319 	struct ivpu_device *vdev = file_priv->vdev;
320 
321 	lockdep_assert_held(&file_priv->lock);
322 
323 	ivpu_cmdq_fini_all(file_priv);
324 
325 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
326 		ivpu_jsm_context_release(vdev, file_priv->ctx.id);
327 }
328 
329 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
330 {
331 	struct ivpu_device *vdev = job->vdev;
332 	struct vpu_job_queue_header *header = &cmdq->jobq->header;
333 	struct vpu_job_queue_entry *entry;
334 	u32 tail = READ_ONCE(header->tail);
335 	u32 next_entry = (tail + 1) % cmdq->entry_count;
336 
337 	/* Check if there is space left in job queue */
338 	if (next_entry == header->head) {
339 		ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
340 			 job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
341 		return -EBUSY;
342 	}
343 
344 	entry = &cmdq->jobq->slot[tail].job;
345 	entry->batch_buf_addr = job->cmd_buf_vpu_addr;
346 	entry->job_id = job->job_id;
347 	entry->flags = 0;
348 	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
349 		entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
350 
351 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
352 		if (cmdq->primary_preempt_buf) {
353 			entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
354 			entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
355 		}
356 
357 		if (cmdq->secondary_preempt_buf) {
358 			entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
359 			entry->secondary_preempt_buf_size =
360 				ivpu_bo_size(cmdq->secondary_preempt_buf);
361 		}
362 	}
363 
364 	wmb(); /* Ensure that tail is updated after filling entry */
365 	header->tail = next_entry;
366 	wmb(); /* Flush WC buffer for jobq header */
367 
368 	return 0;
369 }
370 
371 struct ivpu_fence {
372 	struct dma_fence base;
373 	spinlock_t lock; /* protects base */
374 	struct ivpu_device *vdev;
375 };
376 
377 static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
378 {
379 	return container_of(fence, struct ivpu_fence, base);
380 }
381 
382 static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
383 {
384 	return DRIVER_NAME;
385 }
386 
387 static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
388 {
389 	struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
390 
391 	return dev_name(ivpu_fence->vdev->drm.dev);
392 }
393 
394 static const struct dma_fence_ops ivpu_fence_ops = {
395 	.get_driver_name = ivpu_fence_get_driver_name,
396 	.get_timeline_name = ivpu_fence_get_timeline_name,
397 };
398 
399 static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
400 {
401 	struct ivpu_fence *fence;
402 
403 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
404 	if (!fence)
405 		return NULL;
406 
407 	fence->vdev = vdev;
408 	spin_lock_init(&fence->lock);
409 	dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
410 
411 	return &fence->base;
412 }
413 
414 static void ivpu_job_destroy(struct ivpu_job *job)
415 {
416 	struct ivpu_device *vdev = job->vdev;
417 	u32 i;
418 
419 	ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
420 		 job->job_id, job->file_priv->ctx.id, job->engine_idx);
421 
422 	for (i = 0; i < job->bo_count; i++)
423 		if (job->bos[i])
424 			drm_gem_object_put(&job->bos[i]->base.base);
425 
426 	dma_fence_put(job->done_fence);
427 	ivpu_file_priv_put(&job->file_priv);
428 	kfree(job);
429 }
430 
431 static struct ivpu_job *
432 ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
433 {
434 	struct ivpu_device *vdev = file_priv->vdev;
435 	struct ivpu_job *job;
436 
437 	job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
438 	if (!job)
439 		return NULL;
440 
441 	job->vdev = vdev;
442 	job->engine_idx = engine_idx;
443 	job->bo_count = bo_count;
444 	job->done_fence = ivpu_fence_create(vdev);
445 	if (!job->done_fence) {
446 		ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
447 		goto err_free_job;
448 	}
449 
450 	job->file_priv = ivpu_file_priv_get(file_priv);
451 
452 	trace_job("create", job);
453 	ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
454 	return job;
455 
456 err_free_job:
457 	kfree(job);
458 	return NULL;
459 }
460 
461 static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id)
462 {
463 	struct ivpu_job *job;
464 
465 	xa_lock(&vdev->submitted_jobs_xa);
466 	job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
467 
468 	if (xa_empty(&vdev->submitted_jobs_xa) && job) {
469 		vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
470 					    vdev->busy_time);
471 	}
472 
473 	xa_unlock(&vdev->submitted_jobs_xa);
474 
475 	return job;
476 }
477 
478 static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
479 {
480 	struct ivpu_job *job;
481 
482 	job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
483 	if (!job)
484 		return -ENOENT;
485 
486 	if (job->file_priv->has_mmu_faults)
487 		job_status = DRM_IVPU_JOB_STATUS_ABORTED;
488 
489 	job->bos[CMD_BUF_IDX]->job_status = job_status;
490 	dma_fence_signal(job->done_fence);
491 
492 	trace_job("done", job);
493 	ivpu_dbg(vdev, JOB, "Job complete:  id %3u ctx %2d engine %d status 0x%x\n",
494 		 job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
495 
496 	ivpu_job_destroy(job);
497 	ivpu_stop_job_timeout_detection(vdev);
498 
499 	ivpu_rpm_put(vdev);
500 	return 0;
501 }
502 
503 void ivpu_jobs_abort_all(struct ivpu_device *vdev)
504 {
505 	struct ivpu_job *job;
506 	unsigned long id;
507 
508 	xa_for_each(&vdev->submitted_jobs_xa, id, job)
509 		ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
510 }
511 
512 static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
513 {
514 	struct ivpu_file_priv *file_priv = job->file_priv;
515 	struct ivpu_device *vdev = job->vdev;
516 	struct ivpu_cmdq *cmdq;
517 	bool is_first_job;
518 	int ret;
519 
520 	ret = ivpu_rpm_get(vdev);
521 	if (ret < 0)
522 		return ret;
523 
524 	mutex_lock(&file_priv->lock);
525 
526 	cmdq = ivpu_cmdq_acquire(file_priv, priority);
527 	if (!cmdq) {
528 		ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
529 				      file_priv->ctx.id, job->engine_idx, priority);
530 		ret = -EINVAL;
531 		goto err_unlock_file_priv;
532 	}
533 
534 	xa_lock(&vdev->submitted_jobs_xa);
535 	is_first_job = xa_empty(&vdev->submitted_jobs_xa);
536 	ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
537 				&file_priv->job_id_next, GFP_KERNEL);
538 	if (ret < 0) {
539 		ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
540 			 file_priv->ctx.id);
541 		ret = -EBUSY;
542 		goto err_unlock_submitted_jobs_xa;
543 	}
544 
545 	ret = ivpu_cmdq_push_job(cmdq, job);
546 	if (ret)
547 		goto err_erase_xa;
548 
549 	ivpu_start_job_timeout_detection(vdev);
550 
551 	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
552 		cmdq->jobq->header.head = cmdq->jobq->header.tail;
553 		wmb(); /* Flush WC buffer for jobq header */
554 	} else {
555 		ivpu_cmdq_ring_db(vdev, cmdq);
556 		if (is_first_job)
557 			vdev->busy_start_ts = ktime_get();
558 	}
559 
560 	trace_job("submit", job);
561 	ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
562 		 job->job_id, file_priv->ctx.id, job->engine_idx, priority,
563 		 job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
564 
565 	xa_unlock(&vdev->submitted_jobs_xa);
566 
567 	mutex_unlock(&file_priv->lock);
568 
569 	if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
570 		ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
571 
572 	return 0;
573 
574 err_erase_xa:
575 	__xa_erase(&vdev->submitted_jobs_xa, job->job_id);
576 err_unlock_submitted_jobs_xa:
577 	xa_unlock(&vdev->submitted_jobs_xa);
578 err_unlock_file_priv:
579 	mutex_unlock(&file_priv->lock);
580 	ivpu_rpm_put(vdev);
581 	return ret;
582 }
583 
584 static int
585 ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
586 				u32 buf_count, u32 commands_offset)
587 {
588 	struct ivpu_file_priv *file_priv = file->driver_priv;
589 	struct ivpu_device *vdev = file_priv->vdev;
590 	struct ww_acquire_ctx acquire_ctx;
591 	enum dma_resv_usage usage;
592 	struct ivpu_bo *bo;
593 	int ret;
594 	u32 i;
595 
596 	for (i = 0; i < buf_count; i++) {
597 		struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
598 
599 		if (!obj)
600 			return -ENOENT;
601 
602 		job->bos[i] = to_ivpu_bo(obj);
603 
604 		ret = ivpu_bo_pin(job->bos[i]);
605 		if (ret)
606 			return ret;
607 	}
608 
609 	bo = job->bos[CMD_BUF_IDX];
610 	if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
611 		ivpu_warn(vdev, "Buffer is already in use\n");
612 		return -EBUSY;
613 	}
614 
615 	if (commands_offset >= ivpu_bo_size(bo)) {
616 		ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
617 		return -EINVAL;
618 	}
619 
620 	job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
621 
622 	ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
623 					&acquire_ctx);
624 	if (ret) {
625 		ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
626 		return ret;
627 	}
628 
629 	for (i = 0; i < buf_count; i++) {
630 		ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
631 		if (ret) {
632 			ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
633 			goto unlock_reservations;
634 		}
635 	}
636 
637 	for (i = 0; i < buf_count; i++) {
638 		usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
639 		dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
640 	}
641 
642 unlock_reservations:
643 	drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
644 
645 	wmb(); /* Flush write combining buffers */
646 
647 	return ret;
648 }
649 
650 static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
651 {
652 	if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
653 		return DRM_IVPU_JOB_PRIORITY_NORMAL;
654 
655 	return priority - 1;
656 }
657 
658 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
659 {
660 	struct ivpu_file_priv *file_priv = file->driver_priv;
661 	struct ivpu_device *vdev = file_priv->vdev;
662 	struct drm_ivpu_submit *params = data;
663 	struct ivpu_job *job;
664 	u32 *buf_handles;
665 	int idx, ret;
666 	u8 priority;
667 
668 	if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
669 		return -EINVAL;
670 
671 	if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
672 		return -EINVAL;
673 
674 	if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
675 		return -EINVAL;
676 
677 	if (!IS_ALIGNED(params->commands_offset, 8))
678 		return -EINVAL;
679 
680 	if (!file_priv->ctx.id)
681 		return -EINVAL;
682 
683 	if (file_priv->has_mmu_faults)
684 		return -EBADFD;
685 
686 	buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
687 	if (!buf_handles)
688 		return -ENOMEM;
689 
690 	ret = copy_from_user(buf_handles,
691 			     (void __user *)params->buffers_ptr,
692 			     params->buffer_count * sizeof(u32));
693 	if (ret) {
694 		ret = -EFAULT;
695 		goto err_free_handles;
696 	}
697 
698 	if (!drm_dev_enter(&vdev->drm, &idx)) {
699 		ret = -ENODEV;
700 		goto err_free_handles;
701 	}
702 
703 	ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
704 		 file_priv->ctx.id, params->buffer_count);
705 
706 	job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
707 	if (!job) {
708 		ivpu_err(vdev, "Failed to create job\n");
709 		ret = -ENOMEM;
710 		goto err_exit_dev;
711 	}
712 
713 	ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
714 					      params->commands_offset);
715 	if (ret) {
716 		ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
717 		goto err_destroy_job;
718 	}
719 
720 	priority = ivpu_job_to_hws_priority(file_priv, params->priority);
721 
722 	down_read(&vdev->pm->reset_lock);
723 	ret = ivpu_job_submit(job, priority);
724 	up_read(&vdev->pm->reset_lock);
725 	if (ret)
726 		goto err_signal_fence;
727 
728 	drm_dev_exit(idx);
729 	kfree(buf_handles);
730 	return ret;
731 
732 err_signal_fence:
733 	dma_fence_signal(job->done_fence);
734 err_destroy_job:
735 	ivpu_job_destroy(job);
736 err_exit_dev:
737 	drm_dev_exit(idx);
738 err_free_handles:
739 	kfree(buf_handles);
740 	return ret;
741 }
742 
743 static void
744 ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
745 		       struct vpu_jsm_msg *jsm_msg)
746 {
747 	struct vpu_ipc_msg_payload_job_done *payload;
748 	int ret;
749 
750 	if (!jsm_msg) {
751 		ivpu_err(vdev, "IPC message has no JSM payload\n");
752 		return;
753 	}
754 
755 	if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
756 		ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
757 		return;
758 	}
759 
760 	payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
761 	ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
762 	if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
763 		ivpu_start_job_timeout_detection(vdev);
764 }
765 
766 void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
767 {
768 	ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
769 			      VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
770 }
771 
772 void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
773 {
774 	ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
775 }
776