ivpu_job.c (60a2f25de7b8b785baee2932db932ae9a5b8c86d) ivpu_job.c (8a27ad81f7d3a4bc30e00e334a369b69c5f8da90)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <drm/drm_file.h>
7
8#include <linux/bitfield.h>
9#include <linux/highmem.h>
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <uapi/drm/ivpu_accel.h>
13
14#include "ivpu_drv.h"
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <drm/drm_file.h>
7
8#include <linux/bitfield.h>
9#include <linux/highmem.h>
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <uapi/drm/ivpu_accel.h>
13
14#include "ivpu_drv.h"
15#include "ivpu_fw.h"
15#include "ivpu_hw.h"
16#include "ivpu_ipc.h"
17#include "ivpu_job.h"
18#include "ivpu_jsm_msg.h"
19#include "ivpu_pm.h"
16#include "ivpu_hw.h"
17#include "ivpu_ipc.h"
18#include "ivpu_job.h"
19#include "ivpu_jsm_msg.h"
20#include "ivpu_pm.h"
21#include "vpu_boot_api.h"
20
21#define CMD_BUF_IDX 0
22#define JOB_ID_JOB_MASK GENMASK(7, 0)
23#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
24#define JOB_MAX_BUFFER_COUNT 65535
25
26static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
27{
22
23#define CMD_BUF_IDX 0
24#define JOB_ID_JOB_MASK GENMASK(7, 0)
25#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
26#define JOB_MAX_BUFFER_COUNT 65535
27
28static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
29{
28 ivpu_hw_reg_db_set(vdev, cmdq->db_id);
30 ivpu_hw_db_set(vdev, cmdq->db_id);
29}
30
31}
32
31static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
33static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
34 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
32{
35{
36 u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
37 u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
38 struct ivpu_addr_range range;
39
40 if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
41 return 0;
42
43 range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX);
44 range.end = vdev->hw->ranges.user.end;
45 cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, primary_size,
46 DRM_IVPU_BO_WC);
47 if (!cmdq->primary_preempt_buf) {
48 ivpu_err(vdev, "Failed to create primary preemption buffer\n");
49 return -ENOMEM;
50 }
51
52 range.start = vdev->hw->ranges.shave.end - (secondary_size * IVPU_NUM_CMDQS_PER_CTX);
53 range.end = vdev->hw->ranges.shave.end;
54 cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, secondary_size,
55 DRM_IVPU_BO_WC);
56 if (!cmdq->secondary_preempt_buf) {
57 ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
58 goto err_free_primary;
59 }
60
61 return 0;
62
63err_free_primary:
64 ivpu_bo_free(cmdq->primary_preempt_buf);
65 return -ENOMEM;
66}
67
68static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
69 struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
70{
71 if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
72 return;
73
74 drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
75 drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
76 ivpu_bo_free(cmdq->primary_preempt_buf);
77 ivpu_bo_free(cmdq->secondary_preempt_buf);
78}
79
80static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
81{
33 struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
34 struct ivpu_device *vdev = file_priv->vdev;
82 struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
83 struct ivpu_device *vdev = file_priv->vdev;
35 struct vpu_job_queue_header *jobq_header;
36 struct ivpu_cmdq *cmdq;
37 int ret;
38
39 cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
40 if (!cmdq)
41 return NULL;
42
43 ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
44 if (ret) {
45 ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
46 goto err_free_cmdq;
47 }
48
49 cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
50 if (!cmdq->mem)
51 goto err_erase_xa;
52
84 struct ivpu_cmdq *cmdq;
85 int ret;
86
87 cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
88 if (!cmdq)
89 return NULL;
90
91 ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
92 if (ret) {
93 ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
94 goto err_free_cmdq;
95 }
96
97 cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
98 if (!cmdq->mem)
99 goto err_erase_xa;
100
53 cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
54 sizeof(struct vpu_job_queue_entry));
101 ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
102 if (ret)
103 goto err_free_cmdq_mem;
55
104
56 cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
57 jobq_header = &cmdq->jobq->header;
58 jobq_header->engine_idx = engine;
59 jobq_header->head = 0;
60 jobq_header->tail = 0;
61 wmb(); /* Flush WC buffer for jobq->header */
62
63 return cmdq;
64
105 return cmdq;
106
107err_free_cmdq_mem:
108 ivpu_bo_free(cmdq->mem);
65err_erase_xa:
66 xa_erase(&vdev->db_xa, cmdq->db_id);
67err_free_cmdq:
68 kfree(cmdq);
69 return NULL;
70}
71
72static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
73{
74 if (!cmdq)
75 return;
76
109err_erase_xa:
110 xa_erase(&vdev->db_xa, cmdq->db_id);
111err_free_cmdq:
112 kfree(cmdq);
113 return NULL;
114}
115
116static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
117{
118 if (!cmdq)
119 return;
120
121 ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
77 ivpu_bo_free(cmdq->mem);
78 xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
79 kfree(cmdq);
80}
81
122 ivpu_bo_free(cmdq->mem);
123 xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
124 kfree(cmdq);
125}
126
82static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
127static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
128 u8 priority)
83{
84 struct ivpu_device *vdev = file_priv->vdev;
129{
130 struct ivpu_device *vdev = file_priv->vdev;
85 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
86 int ret;
87
131 int ret;
132
133 ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
134 task_pid_nr(current), engine,
135 cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
136 if (ret)
137 return ret;
138
139 ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
140 priority);
141 if (ret)
142 return ret;
143
144 return 0;
145}
146
147static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
148{
149 struct ivpu_device *vdev = file_priv->vdev;
150 int ret;
151
152 if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
153 ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
154 cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
155 else
156 ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
157 cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
158
159 if (!ret)
160 ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
161
162 return ret;
163}
164
165static int
166ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
167{
168 struct ivpu_device *vdev = file_priv->vdev;
169 struct vpu_job_queue_header *jobq_header;
170 int ret;
171
88 lockdep_assert_held(&file_priv->lock);
89
172 lockdep_assert_held(&file_priv->lock);
173
174 if (cmdq->db_registered)
175 return 0;
176
177 cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
178 sizeof(struct vpu_job_queue_entry));
179
180 cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
181 jobq_header = &cmdq->jobq->header;
182 jobq_header->engine_idx = engine;
183 jobq_header->head = 0;
184 jobq_header->tail = 0;
185 wmb(); /* Flush WC buffer for jobq->header */
186
187 if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
188 ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
189 if (ret)
190 return ret;
191 }
192
193 ret = ivpu_register_db(file_priv, cmdq);
194 if (ret)
195 return ret;
196
197 cmdq->db_registered = true;
198
199 return 0;
200}
201
202static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
203{
204 struct ivpu_device *vdev = file_priv->vdev;
205 int ret;
206
207 lockdep_assert_held(&file_priv->lock);
208
209 if (!cmdq->db_registered)
210 return 0;
211
212 cmdq->db_registered = false;
213
214 if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
215 ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
216 if (!ret)
217 ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
218 }
219
220 ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
221 if (!ret)
222 ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
223
224 return 0;
225}
226
227static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
228 u8 priority)
229{
230 int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
231 struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
232 int ret;
233
234 lockdep_assert_held(&file_priv->lock);
235
90 if (!cmdq) {
236 if (!cmdq) {
91 cmdq = ivpu_cmdq_alloc(file_priv, engine);
237 cmdq = ivpu_cmdq_alloc(file_priv);
92 if (!cmdq)
93 return NULL;
238 if (!cmdq)
239 return NULL;
94 file_priv->cmdq[engine] = cmdq;
240 file_priv->cmdq[cmdq_idx] = cmdq;
95 }
96
241 }
242
97 if (cmdq->db_registered)
98 return cmdq;
99
100 ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
101 cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
243 ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
102 if (ret)
103 return NULL;
104
244 if (ret)
245 return NULL;
246
105 cmdq->db_registered = true;
106
107 return cmdq;
108}
109
247 return cmdq;
248}
249
110static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
250static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
111{
251{
112 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
252 int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
253 struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
113
114 lockdep_assert_held(&file_priv->lock);
115
116 if (cmdq) {
254
255 lockdep_assert_held(&file_priv->lock);
256
257 if (cmdq) {
117 file_priv->cmdq[engine] = NULL;
118 if (cmdq->db_registered)
119 ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
120
258 file_priv->cmdq[cmdq_idx] = NULL;
259 ivpu_cmdq_fini(file_priv, cmdq);
121 ivpu_cmdq_free(file_priv, cmdq);
122 }
123}
124
125void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
126{
260 ivpu_cmdq_free(file_priv, cmdq);
261 }
262}
263
264void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
265{
127 int i;
266 u16 engine;
267 u8 priority;
128
129 lockdep_assert_held(&file_priv->lock);
130
268
269 lockdep_assert_held(&file_priv->lock);
270
131 for (i = 0; i < IVPU_NUM_ENGINES; i++)
132 ivpu_cmdq_release_locked(file_priv, i);
271 for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
272 for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
273 ivpu_cmdq_release_locked(file_priv, engine, priority);
133}
134
135/*
274}
275
276/*
136 * Mark the doorbell as unregistered and reset job queue pointers.
277 * Mark the doorbell as unregistered
137 * This function needs to be called when the VPU hardware is restarted
138 * and FW loses job queue state. The next time job queue is used it
139 * will be registered again.
140 */
278 * This function needs to be called when the VPU hardware is restarted
279 * and FW loses job queue state. The next time job queue is used it
280 * will be registered again.
281 */
141static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
282static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
142{
283{
143 struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
284 u16 engine;
285 u8 priority;
144
286
145 lockdep_assert_held(&file_priv->lock);
146
147 if (cmdq) {
148 cmdq->db_registered = false;
149 cmdq->jobq->header.head = 0;
150 cmdq->jobq->header.tail = 0;
151 wmb(); /* Flush WC buffer for jobq header */
152 }
153}
154
155static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
156{
157 int i;
158
159 mutex_lock(&file_priv->lock);
160
287 mutex_lock(&file_priv->lock);
288
161 for (i = 0; i < IVPU_NUM_ENGINES; i++)
162 ivpu_cmdq_reset_locked(file_priv, i);
289 for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
290 for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
291 int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
292 struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
163
293
294 if (cmdq)
295 cmdq->db_registered = false;
296 }
297 }
298
164 mutex_unlock(&file_priv->lock);
165}
166
167void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
168{
169 struct ivpu_file_priv *file_priv;
170 unsigned long ctx_id;
171
172 mutex_lock(&vdev->context_list_lock);
173
174 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
299 mutex_unlock(&file_priv->lock);
300}
301
302void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
303{
304 struct ivpu_file_priv *file_priv;
305 unsigned long ctx_id;
306
307 mutex_lock(&vdev->context_list_lock);
308
309 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
175 ivpu_cmdq_reset_all(file_priv);
310 ivpu_cmdq_reset(file_priv);
176
177 mutex_unlock(&vdev->context_list_lock);
311
312 mutex_unlock(&vdev->context_list_lock);
178
179}
180
181static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
182{
183 struct ivpu_device *vdev = job->vdev;
184 struct vpu_job_queue_header *header = &cmdq->jobq->header;
185 struct vpu_job_queue_entry *entry;
186 u32 tail = READ_ONCE(header->tail);

--- 7 unchanged lines hidden (view full) ---

194 }
195
196 entry = &cmdq->jobq->job[tail];
197 entry->batch_buf_addr = job->cmd_buf_vpu_addr;
198 entry->job_id = job->job_id;
199 entry->flags = 0;
200 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
201 entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
313}
314
315static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
316{
317 struct ivpu_device *vdev = job->vdev;
318 struct vpu_job_queue_header *header = &cmdq->jobq->header;
319 struct vpu_job_queue_entry *entry;
320 u32 tail = READ_ONCE(header->tail);

--- 7 unchanged lines hidden (view full) ---

328 }
329
330 entry = &cmdq->jobq->job[tail];
331 entry->batch_buf_addr = job->cmd_buf_vpu_addr;
332 entry->job_id = job->job_id;
333 entry->flags = 0;
334 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
335 entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
336
337 if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW &&
338 (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
339 entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
340 entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
341 entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
342 entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
343 }
344
202 wmb(); /* Ensure that tail is updated after filling entry */
203 header->tail = next_entry;
204 wmb(); /* Flush WC buffer for jobq header */
205
206 return 0;
207}
208
209struct ivpu_fence {

--- 80 unchanged lines hidden (view full) ---

290 ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
291 return job;
292
293err_free_job:
294 kfree(job);
295 return NULL;
296}
297
345 wmb(); /* Ensure that tail is updated after filling entry */
346 header->tail = next_entry;
347 wmb(); /* Flush WC buffer for jobq header */
348
349 return 0;
350}
351
352struct ivpu_fence {

--- 80 unchanged lines hidden (view full) ---

433 ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
434 return job;
435
436err_free_job:
437 kfree(job);
438 return NULL;
439}
440
441static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id)
442{
443 struct ivpu_job *job;
444
445 xa_lock(&vdev->submitted_jobs_xa);
446 job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
447
448 if (xa_empty(&vdev->submitted_jobs_xa) && job) {
449 vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
450 vdev->busy_time);
451 }
452
453 xa_unlock(&vdev->submitted_jobs_xa);
454
455 return job;
456}
457
298static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
299{
300 struct ivpu_job *job;
301
458static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
459{
460 struct ivpu_job *job;
461
302 job = xa_erase(&vdev->submitted_jobs_xa, job_id);
462 job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
303 if (!job)
304 return -ENOENT;
305
306 if (job->file_priv->has_mmu_faults)
307 job_status = DRM_IVPU_JOB_STATUS_ABORTED;
308
309 job->bos[CMD_BUF_IDX]->job_status = job_status;
310 dma_fence_signal(job->done_fence);

--- 12 unchanged lines hidden (view full) ---

323{
324 struct ivpu_job *job;
325 unsigned long id;
326
327 xa_for_each(&vdev->submitted_jobs_xa, id, job)
328 ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
329}
330
463 if (!job)
464 return -ENOENT;
465
466 if (job->file_priv->has_mmu_faults)
467 job_status = DRM_IVPU_JOB_STATUS_ABORTED;
468
469 job->bos[CMD_BUF_IDX]->job_status = job_status;
470 dma_fence_signal(job->done_fence);

--- 12 unchanged lines hidden (view full) ---

483{
484 struct ivpu_job *job;
485 unsigned long id;
486
487 xa_for_each(&vdev->submitted_jobs_xa, id, job)
488 ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
489}
490
331static int ivpu_job_submit(struct ivpu_job *job)
491static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
332{
333 struct ivpu_file_priv *file_priv = job->file_priv;
334 struct ivpu_device *vdev = job->vdev;
335 struct xa_limit job_id_range;
336 struct ivpu_cmdq *cmdq;
492{
493 struct ivpu_file_priv *file_priv = job->file_priv;
494 struct ivpu_device *vdev = job->vdev;
495 struct xa_limit job_id_range;
496 struct ivpu_cmdq *cmdq;
497 bool is_first_job;
337 int ret;
338
339 ret = ivpu_rpm_get(vdev);
340 if (ret < 0)
341 return ret;
342
343 mutex_lock(&file_priv->lock);
344
498 int ret;
499
500 ret = ivpu_rpm_get(vdev);
501 if (ret < 0)
502 return ret;
503
504 mutex_lock(&file_priv->lock);
505
345 cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
506 cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority);
346 if (!cmdq) {
507 if (!cmdq) {
347 ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
348 file_priv->ctx.id, job->engine_idx);
508 ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
509 file_priv->ctx.id, job->engine_idx, priority);
349 ret = -EINVAL;
350 goto err_unlock_file_priv;
351 }
352
353 job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
354 job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
355
356 xa_lock(&vdev->submitted_jobs_xa);
510 ret = -EINVAL;
511 goto err_unlock_file_priv;
512 }
513
514 job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
515 job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
516
517 xa_lock(&vdev->submitted_jobs_xa);
518 is_first_job = xa_empty(&vdev->submitted_jobs_xa);
357 ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
358 if (ret) {
359 ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
360 file_priv->ctx.id);
361 ret = -EBUSY;
362 goto err_unlock_submitted_jobs_xa;
363 }
364
365 ret = ivpu_cmdq_push_job(cmdq, job);
366 if (ret)
367 goto err_erase_xa;
368
369 ivpu_start_job_timeout_detection(vdev);
370
371 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
372 cmdq->jobq->header.head = cmdq->jobq->header.tail;
373 wmb(); /* Flush WC buffer for jobq header */
374 } else {
375 ivpu_cmdq_ring_db(vdev, cmdq);
519 ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
520 if (ret) {
521 ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
522 file_priv->ctx.id);
523 ret = -EBUSY;
524 goto err_unlock_submitted_jobs_xa;
525 }
526
527 ret = ivpu_cmdq_push_job(cmdq, job);
528 if (ret)
529 goto err_erase_xa;
530
531 ivpu_start_job_timeout_detection(vdev);
532
533 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
534 cmdq->jobq->header.head = cmdq->jobq->header.tail;
535 wmb(); /* Flush WC buffer for jobq header */
536 } else {
537 ivpu_cmdq_ring_db(vdev, cmdq);
538 if (is_first_job)
539 vdev->busy_start_ts = ktime_get();
376 }
377
540 }
541
378 ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
379 job->job_id, file_priv->ctx.id, job->engine_idx,
542 ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
543 job->job_id, file_priv->ctx.id, job->engine_idx, priority,
380 job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
381
382 xa_unlock(&vdev->submitted_jobs_xa);
383
384 mutex_unlock(&file_priv->lock);
385
386 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
387 ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);

--- 71 unchanged lines hidden (view full) ---

459unlock_reservations:
460 drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
461
462 wmb(); /* Flush write combining buffers */
463
464 return ret;
465}
466
544 job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
545
546 xa_unlock(&vdev->submitted_jobs_xa);
547
548 mutex_unlock(&file_priv->lock);
549
550 if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
551 ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);

--- 71 unchanged lines hidden (view full) ---

623unlock_reservations:
624 drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
625
626 wmb(); /* Flush write combining buffers */
627
628 return ret;
629}
630
631static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
632{
633 if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
634 return DRM_IVPU_JOB_PRIORITY_NORMAL;
635
636 return priority - 1;
637}
638
467int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
468{
469 struct ivpu_file_priv *file_priv = file->driver_priv;
470 struct ivpu_device *vdev = file_priv->vdev;
471 struct drm_ivpu_submit *params = data;
472 struct ivpu_job *job;
473 u32 *buf_handles;
474 int idx, ret;
639int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
640{
641 struct ivpu_file_priv *file_priv = file->driver_priv;
642 struct ivpu_device *vdev = file_priv->vdev;
643 struct drm_ivpu_submit *params = data;
644 struct ivpu_job *job;
645 u32 *buf_handles;
646 int idx, ret;
647 u8 priority;
475
476 if (params->engine > DRM_IVPU_ENGINE_COPY)
477 return -EINVAL;
478
479 if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
480 return -EINVAL;
481
482 if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)

--- 37 unchanged lines hidden (view full) ---

520
521 ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
522 params->commands_offset);
523 if (ret) {
524 ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
525 goto err_destroy_job;
526 }
527
648
649 if (params->engine > DRM_IVPU_ENGINE_COPY)
650 return -EINVAL;
651
652 if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
653 return -EINVAL;
654
655 if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)

--- 37 unchanged lines hidden (view full) ---

693
694 ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
695 params->commands_offset);
696 if (ret) {
697 ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
698 goto err_destroy_job;
699 }
700
701 priority = ivpu_job_to_hws_priority(file_priv, params->priority);
702
528 down_read(&vdev->pm->reset_lock);
703 down_read(&vdev->pm->reset_lock);
529 ret = ivpu_job_submit(job);
704 ret = ivpu_job_submit(job, priority);
530 up_read(&vdev->pm->reset_lock);
531 if (ret)
532 goto err_signal_fence;
533
534 drm_dev_exit(idx);
535 kfree(buf_handles);
536 return ret;
537

--- 44 unchanged lines hidden ---
705 up_read(&vdev->pm->reset_lock);
706 if (ret)
707 goto err_signal_fence;
708
709 drm_dev_exit(idx);
710 kfree(buf_handles);
711 return ret;
712

--- 44 unchanged lines hidden ---