1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_ring_ops.h"
7
8 #include <generated/xe_wa_oob.h>
9
10 #include "instructions/xe_gpu_commands.h"
11 #include "instructions/xe_mi_commands.h"
12 #include "regs/xe_engine_regs.h"
13 #include "regs/xe_gt_regs.h"
14 #include "xe_exec_queue.h"
15 #include "xe_gt_types.h"
16 #include "xe_lrc.h"
17 #include "xe_sched_job.h"
18 #include "xe_sriov.h"
19 #include "xe_vm_types.h"
20 #include "xe_vm.h"
21 #include "xe_wa.h"
22
23 /*
24 * 3D-related flags that can't be set on _engines_ that lack access to the 3D
25 * pipeline (i.e., CCS engines).
26 */
27 #define PIPE_CONTROL_3D_ENGINE_FLAGS (\
28 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
29 PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
30 PIPE_CONTROL_TILE_CACHE_FLUSH | \
31 PIPE_CONTROL_DEPTH_STALL | \
32 PIPE_CONTROL_STALL_AT_SCOREBOARD | \
33 PIPE_CONTROL_PSD_SYNC | \
34 PIPE_CONTROL_AMFS_FLUSH | \
35 PIPE_CONTROL_VF_CACHE_INVALIDATE | \
36 PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
37
38 /* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
39 #define PIPE_CONTROL_3D_ARCH_FLAGS ( \
40 PIPE_CONTROL_3D_ENGINE_FLAGS | \
41 PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
42 PIPE_CONTROL_FLUSH_ENABLE | \
43 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
44 PIPE_CONTROL_DC_FLUSH_ENABLE)
45
preparser_disable(bool state)46 static u32 preparser_disable(bool state)
47 {
48 return MI_ARB_CHECK | BIT(8) | state;
49 }
50
51 static u32 *
__emit_aux_table_inv(u32 * cmd,const struct xe_reg reg,u32 adj_offset)52 __emit_aux_table_inv(u32 *cmd, const struct xe_reg reg, u32 adj_offset)
53 {
54 *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1) |
55 MI_LRI_MMIO_REMAP_EN;
56 *cmd++ = reg.addr + adj_offset;
57 *cmd++ = AUX_INV;
58 *cmd++ = MI_SEMAPHORE_WAIT_TOKEN | MI_SEMAPHORE_REGISTER_POLL |
59 MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_EQ_SDD;
60 *cmd++ = 0;
61 *cmd++ = reg.addr + adj_offset;
62 *cmd++ = 0;
63 *cmd++ = 0;
64
65 return cmd;
66 }
67
emit_aux_table_inv_render_compute(struct xe_gt * gt,u32 * cmd)68 static u32 *emit_aux_table_inv_render_compute(struct xe_gt *gt, u32 *cmd)
69 {
70 return __emit_aux_table_inv(cmd, CCS_AUX_INV, gt->mmio.adj_offset);
71 }
72
emit_aux_table_inv_video_decode(struct xe_gt * gt,u32 * cmd)73 static u32 *emit_aux_table_inv_video_decode(struct xe_gt *gt, u32 *cmd)
74 {
75 return __emit_aux_table_inv(cmd, VD0_AUX_INV, gt->mmio.adj_offset);
76 }
77
emit_aux_table_inv_video_enhance(struct xe_gt * gt,u32 * cmd)78 static u32 *emit_aux_table_inv_video_enhance(struct xe_gt *gt, u32 *cmd)
79 {
80 return __emit_aux_table_inv(cmd, VE0_AUX_INV, gt->mmio.adj_offset);
81 }
82
emit_aux_table_inv(struct xe_hw_engine * hwe,u32 * dw,int i)83 static int emit_aux_table_inv(struct xe_hw_engine *hwe, u32 *dw, int i)
84 {
85 struct xe_gt *gt = hwe->gt;
86 u32 *(*emit)(struct xe_gt *gt, u32 *cmd) =
87 gt->ring_ops[hwe->class]->emit_aux_table_inv;
88
89 if (emit)
90 return emit(gt, dw + i) - dw;
91 else
92 return i;
93 }
94
emit_user_interrupt(u32 * dw,int i)95 static int emit_user_interrupt(u32 *dw, int i)
96 {
97 dw[i++] = MI_USER_INTERRUPT;
98 dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
99 dw[i++] = MI_ARB_CHECK;
100
101 return i;
102 }
103
emit_store_imm_ggtt(u32 addr,u32 value,u32 * dw,int i)104 static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
105 {
106 dw[i++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
107 dw[i++] = addr;
108 dw[i++] = 0;
109 dw[i++] = value;
110
111 return i;
112 }
113
emit_flush_dw(u32 * dw,int i)114 static int emit_flush_dw(u32 *dw, int i)
115 {
116 dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
117 dw[i++] = 0;
118 dw[i++] = 0;
119 dw[i++] = 0;
120
121 return i;
122 }
123
emit_flush_imm_ggtt(u32 addr,u32 value,u32 flags,u32 * dw,int i)124 static int emit_flush_imm_ggtt(u32 addr, u32 value, u32 flags, u32 *dw, int i)
125 {
126 dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
127 flags;
128 dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
129 dw[i++] = 0;
130 dw[i++] = value;
131
132 return i;
133 }
134
emit_bb_start(u64 batch_addr,u32 ppgtt_flag,u32 * dw,int i)135 static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
136 {
137 dw[i++] = MI_BATCH_BUFFER_START | ppgtt_flag | XE_INSTR_NUM_DW(3);
138 dw[i++] = lower_32_bits(batch_addr);
139 dw[i++] = upper_32_bits(batch_addr);
140
141 return i;
142 }
143
emit_flush_invalidate(u32 addr,u32 val,u32 flush_flags,u32 * dw,int i)144 static int emit_flush_invalidate(u32 addr, u32 val, u32 flush_flags, u32 *dw, int i)
145 {
146 dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW |
147 MI_FLUSH_IMM_DW | (flush_flags & MI_INVALIDATE_TLB) ?: 0;
148
149 dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
150 dw[i++] = 0;
151 dw[i++] = val;
152
153 return i;
154 }
155
156 static int
emit_pipe_control(u32 * dw,int i,u32 bit_group_0,u32 bit_group_1,u32 offset,u32 value)157 emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset, u32 value)
158 {
159 dw[i++] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
160 dw[i++] = bit_group_1;
161 dw[i++] = offset;
162 dw[i++] = 0;
163 dw[i++] = value;
164 dw[i++] = 0;
165
166 return i;
167 }
168
emit_pipe_invalidate(struct xe_exec_queue * q,u32 mask_flags,bool invalidate_tlb,u32 * dw,int i)169 static int emit_pipe_invalidate(struct xe_exec_queue *q, u32 mask_flags,
170 bool invalidate_tlb, u32 *dw, int i)
171 {
172 u32 flags0 = 0;
173 u32 flags1 = PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
174 PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
175 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
176 PIPE_CONTROL_VF_CACHE_INVALIDATE |
177 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
178 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
179 PIPE_CONTROL_QW_WRITE |
180 PIPE_CONTROL_STORE_DATA_INDEX;
181
182 if (invalidate_tlb)
183 flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
184
185 if (xe_exec_queue_is_multi_queue(q))
186 flags0 |= PIPE_CONTROL0_QUEUE_DRAIN_MODE;
187 else
188 flags1 |= PIPE_CONTROL_CS_STALL;
189
190 flags1 &= ~mask_flags;
191
192 if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
193 flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
194
195 return emit_pipe_control(dw, i, flags0, flags1,
196 LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
197 }
198
emit_store_imm_ppgtt_posted(u64 addr,u64 value,u32 * dw,int i)199 static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
200 u32 *dw, int i)
201 {
202 dw[i++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(1);
203 dw[i++] = lower_32_bits(addr);
204 dw[i++] = upper_32_bits(addr);
205 dw[i++] = lower_32_bits(value);
206 dw[i++] = upper_32_bits(value);
207
208 return i;
209 }
210
emit_render_cache_flush(struct xe_sched_job * job,u32 * dw,int i)211 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
212 {
213 struct xe_exec_queue *q = job->q;
214 struct xe_gt *gt = q->gt;
215 bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
216 u32 flags0, flags1;
217
218 if (XE_GT_WA(gt, 14016712196))
219 i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
220 LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
221
222 flags0 = PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
223 flags1 = (PIPE_CONTROL_TILE_CACHE_FLUSH |
224 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
225 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
226 PIPE_CONTROL_DC_FLUSH_ENABLE |
227 PIPE_CONTROL_FLUSH_ENABLE);
228
229 if (XE_GT_WA(gt, 1409600907))
230 flags1 |= PIPE_CONTROL_DEPTH_STALL;
231
232 if (lacks_render)
233 flags1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
234 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
235 flags1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
236
237 if (xe_exec_queue_is_multi_queue(q))
238 flags0 |= PIPE_CONTROL0_QUEUE_DRAIN_MODE;
239 else
240 flags1 |= PIPE_CONTROL_CS_STALL;
241
242 return emit_pipe_control(dw, i, flags0, flags1, 0, 0);
243 }
244
emit_pipe_imm_ggtt(struct xe_exec_queue * q,u32 addr,u32 value,bool stall_only,u32 * dw,int i)245 static int emit_pipe_imm_ggtt(struct xe_exec_queue *q, u32 addr, u32 value,
246 bool stall_only, u32 *dw, int i)
247 {
248 u32 flags0 = 0, flags1 = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE;
249
250 if (!stall_only)
251 flags1 |= PIPE_CONTROL_FLUSH_ENABLE;
252
253 if (xe_exec_queue_is_multi_queue(q))
254 flags0 |= PIPE_CONTROL0_QUEUE_DRAIN_MODE;
255 else
256 flags1 |= PIPE_CONTROL_CS_STALL;
257
258 return emit_pipe_control(dw, i, flags0, flags1, addr, value);
259 }
260
get_ppgtt_flag(struct xe_sched_job * job)261 static u32 get_ppgtt_flag(struct xe_sched_job *job)
262 {
263 if (job->q->vm && !job->ggtt)
264 return BIT(8);
265
266 return 0;
267 }
268
emit_copy_timestamp(struct xe_device * xe,struct xe_lrc * lrc,u32 * dw,int i)269 static int emit_copy_timestamp(struct xe_device *xe, struct xe_lrc *lrc,
270 u32 *dw, int i)
271 {
272 dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
273 dw[i++] = RING_CTX_TIMESTAMP(0).addr;
274 dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
275 dw[i++] = 0;
276
277 /*
278 * Ensure CTX timestamp >= Job timestamp during VF sampling to avoid
279 * arithmetic wraparound in TDR.
280 */
281 if (IS_SRIOV_VF(xe)) {
282 dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT |
283 MI_SRM_ADD_CS_OFFSET;
284 dw[i++] = RING_CTX_TIMESTAMP(0).addr;
285 dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
286 dw[i++] = 0;
287 }
288
289 return i;
290 }
291
emit_fake_watchdog(struct xe_lrc * lrc,u32 * dw,int i)292 static int emit_fake_watchdog(struct xe_lrc *lrc, u32 *dw, int i)
293 {
294 /*
295 * Setup a watchdog with impossible condition to always trigger an
296 * hardware interrupt that would force the GuC to reset the engine.
297 */
298
299 dw[i++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | MI_LRI_LRM_CS_MMIO;
300 dw[i++] = PR_CTR_THRSH(0).addr;
301 dw[i++] = 2; /* small threshold */
302 dw[i++] = PR_CTR_CTRL(0).addr;
303 dw[i++] = CTR_LOGIC_OP(START);
304
305 dw[i++] = MI_SEMAPHORE_WAIT | MI_SEMW_GGTT | MI_SEMW_POLL | MI_SEMW_COMPARE(SAD_EQ_SDD);
306 dw[i++] = 0xdead; /* this should never be seen */
307 dw[i++] = lower_32_bits(xe_lrc_ggtt_addr(lrc));
308 dw[i++] = upper_32_bits(xe_lrc_ggtt_addr(lrc));
309 dw[i++] = 0; /* unused token */
310
311 dw[i++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1) | MI_LRI_LRM_CS_MMIO;
312 dw[i++] = PR_CTR_CTRL(0).addr;
313 dw[i++] = CTR_LOGIC_OP(STOP);
314
315 return i;
316 }
317
318 /* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
__emit_job_gen12_simple(struct xe_sched_job * job,struct xe_lrc * lrc,u64 batch_addr,u32 * head,u32 seqno)319 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
320 u64 batch_addr, u32 *head, u32 seqno)
321 {
322 u32 dw[MAX_JOB_SIZE_DW], i = 0;
323 u32 ppgtt_flag = get_ppgtt_flag(job);
324 struct xe_gt *gt = job->q->gt;
325
326 *head = lrc->ring.tail;
327
328 if (job->ring_ops_force_reset)
329 i = emit_fake_watchdog(lrc, dw, i);
330
331 i = emit_copy_timestamp(gt_to_xe(gt), lrc, dw, i);
332
333 if (job->ring_ops_flush_tlb) {
334 dw[i++] = preparser_disable(true);
335 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
336 seqno, MI_INVALIDATE_TLB, dw, i);
337 dw[i++] = preparser_disable(false);
338 } else {
339 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
340 seqno, dw, i);
341 }
342
343 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
344
345 /* Don't preempt fence signaling */
346 dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
347
348 if (job->user_fence.used) {
349 i = emit_flush_dw(dw, i);
350 i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
351 job->user_fence.value,
352 dw, i);
353 }
354
355 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
356
357 i = emit_user_interrupt(dw, i);
358
359 xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
360
361 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
362 }
363
has_aux_ccs(struct xe_device * xe)364 static bool has_aux_ccs(struct xe_device *xe)
365 {
366 /*
367 * PVC is a special case that has no compression of either type
368 * (FlatCCS or AuxCCS). Also, AuxCCS is no longer used from Xe2
369 * onward, so any future platforms with no FlatCCS will not have
370 * AuxCCS, and we explicitly do not want to support it on MTL.
371 */
372 if (GRAPHICS_VERx100(xe) >= 1270 || xe->info.platform == XE_PVC)
373 return false;
374
375 return !xe->info.has_flat_ccs;
376 }
377
__emit_job_gen12_video(struct xe_sched_job * job,struct xe_lrc * lrc,u64 batch_addr,u32 * head,u32 seqno)378 static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
379 u64 batch_addr, u32 *head, u32 seqno)
380 {
381 u32 dw[MAX_JOB_SIZE_DW], i = 0;
382 u32 ppgtt_flag = get_ppgtt_flag(job);
383 struct xe_gt *gt = job->q->gt;
384 struct xe_device *xe = gt_to_xe(gt);
385
386 *head = lrc->ring.tail;
387
388 if (job->ring_ops_force_reset)
389 i = emit_fake_watchdog(lrc, dw, i);
390
391 i = emit_copy_timestamp(xe, lrc, dw, i);
392
393 dw[i++] = preparser_disable(true);
394
395 /* hsdes: 1809175790 */
396 i = emit_aux_table_inv(job->q->hwe, dw, i);
397
398 if (job->ring_ops_flush_tlb)
399 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
400 seqno, MI_INVALIDATE_TLB, dw, i);
401
402 dw[i++] = preparser_disable(false);
403
404 if (!job->ring_ops_flush_tlb)
405 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
406 seqno, dw, i);
407
408 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
409
410 /* Don't preempt fence signaling */
411 dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
412
413 if (job->user_fence.used) {
414 i = emit_flush_dw(dw, i);
415 i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
416 job->user_fence.value,
417 dw, i);
418 }
419
420 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
421
422 i = emit_user_interrupt(dw, i);
423
424 xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
425
426 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
427 }
428
__emit_job_gen12_render_compute(struct xe_sched_job * job,struct xe_lrc * lrc,u64 batch_addr,u32 * head,u32 seqno)429 static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
430 struct xe_lrc *lrc,
431 u64 batch_addr, u32 *head,
432 u32 seqno)
433 {
434 u32 dw[MAX_JOB_SIZE_DW], i = 0;
435 u32 ppgtt_flag = get_ppgtt_flag(job);
436 struct xe_gt *gt = job->q->gt;
437 struct xe_device *xe = gt_to_xe(gt);
438 bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
439 u32 mask_flags = 0;
440
441 *head = lrc->ring.tail;
442
443 if (job->ring_ops_force_reset)
444 i = emit_fake_watchdog(lrc, dw, i);
445
446 i = emit_copy_timestamp(xe, lrc, dw, i);
447
448 /*
449 * On AuxCCS platforms the invalidation of the Aux table requires
450 * quiescing the memory traffic beforehand.
451 */
452 if (has_aux_ccs(xe))
453 i = emit_render_cache_flush(job, dw, i);
454
455 dw[i++] = preparser_disable(true);
456 if (lacks_render)
457 mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
458 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
459 mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
460
461 /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
462 i = emit_pipe_invalidate(job->q, mask_flags, job->ring_ops_flush_tlb, dw, i);
463
464 /* hsdes: 1809175790 */
465 i = emit_aux_table_inv(job->q->hwe, dw, i);
466
467 dw[i++] = preparser_disable(false);
468
469 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
470 seqno, dw, i);
471
472 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
473
474 /* Don't preempt fence signaling */
475 dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
476
477 i = emit_render_cache_flush(job, dw, i);
478
479 if (job->user_fence.used)
480 i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
481 job->user_fence.value,
482 dw, i);
483
484 i = emit_pipe_imm_ggtt(job->q, xe_lrc_seqno_ggtt_addr(lrc), seqno, lacks_render, dw, i);
485
486 i = emit_user_interrupt(dw, i);
487
488 xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
489
490 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
491 }
492
emit_migration_job_gen12(struct xe_sched_job * job,struct xe_lrc * lrc,u32 * head,u32 seqno)493 static void emit_migration_job_gen12(struct xe_sched_job *job,
494 struct xe_lrc *lrc, u32 *head,
495 u32 seqno)
496 {
497 struct xe_gt *gt = job->q->gt;
498 struct xe_device *xe = gt_to_xe(gt);
499 u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
500 u32 dw[MAX_JOB_SIZE_DW], i = 0;
501
502 *head = lrc->ring.tail;
503
504 xe_gt_assert(gt, !job->ring_ops_force_reset);
505
506 i = emit_copy_timestamp(xe, lrc, dw, i);
507
508 i = emit_store_imm_ggtt(saddr, seqno, dw, i);
509
510 dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
511
512 i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
513
514 dw[i++] = preparser_disable(true);
515 i = emit_flush_invalidate(saddr, seqno, job->migrate_flush_flags, dw, i);
516 dw[i++] = preparser_disable(false);
517
518 i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
519
520 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno,
521 job->migrate_flush_flags,
522 dw, i);
523
524 i = emit_user_interrupt(dw, i);
525
526 xe_gt_assert(job->q->gt, i <= MAX_JOB_SIZE_DW);
527
528 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
529 }
530
emit_job_gen12_gsc(struct xe_sched_job * job)531 static void emit_job_gen12_gsc(struct xe_sched_job *job)
532 {
533 struct xe_gt *gt = job->q->gt;
534
535 xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */
536
537 __emit_job_gen12_simple(job, job->q->lrc[0],
538 job->ptrs[0].batch_addr,
539 &job->ptrs[0].head,
540 xe_sched_job_lrc_seqno(job));
541 }
542
emit_job_gen12_copy(struct xe_sched_job * job)543 static void emit_job_gen12_copy(struct xe_sched_job *job)
544 {
545 int i;
546
547 if (xe_sched_job_is_migration(job->q)) {
548 emit_migration_job_gen12(job, job->q->lrc[0],
549 &job->ptrs[0].head,
550 xe_sched_job_lrc_seqno(job));
551 return;
552 }
553
554 for (i = 0; i < job->q->width; ++i)
555 __emit_job_gen12_simple(job, job->q->lrc[i],
556 job->ptrs[i].batch_addr,
557 &job->ptrs[i].head,
558 xe_sched_job_lrc_seqno(job));
559 }
560
emit_job_gen12_video(struct xe_sched_job * job)561 static void emit_job_gen12_video(struct xe_sched_job *job)
562 {
563 int i;
564
565 /* FIXME: Not doing parallel handshake for now */
566 for (i = 0; i < job->q->width; ++i)
567 __emit_job_gen12_video(job, job->q->lrc[i],
568 job->ptrs[i].batch_addr,
569 &job->ptrs[i].head,
570 xe_sched_job_lrc_seqno(job));
571 }
572
emit_job_gen12_render_compute(struct xe_sched_job * job)573 static void emit_job_gen12_render_compute(struct xe_sched_job *job)
574 {
575 int i;
576
577 for (i = 0; i < job->q->width; ++i)
578 __emit_job_gen12_render_compute(job, job->q->lrc[i],
579 job->ptrs[i].batch_addr,
580 &job->ptrs[i].head,
581 xe_sched_job_lrc_seqno(job));
582 }
583
584 static const struct xe_ring_ops ring_ops_gen12_gsc = {
585 .emit_job = emit_job_gen12_gsc,
586 };
587
588 static const struct xe_ring_ops ring_ops_gen12_copy = {
589 .emit_job = emit_job_gen12_copy,
590 };
591
592 static const struct xe_ring_ops ring_ops_gen12_video_decode = {
593 .emit_job = emit_job_gen12_video,
594 };
595
596 static const struct xe_ring_ops ring_ops_gen12_video_enhance = {
597 .emit_job = emit_job_gen12_video,
598 };
599
600 static const struct xe_ring_ops ring_ops_gen12_render_compute = {
601 .emit_job = emit_job_gen12_render_compute,
602 };
603
604 static const struct xe_ring_ops auxccs_ring_ops_gen12_video_decode = {
605 .emit_job = emit_job_gen12_video,
606 .emit_aux_table_inv = emit_aux_table_inv_video_decode,
607 };
608
609 static const struct xe_ring_ops auxccs_ring_ops_gen12_video_enhance = {
610 .emit_job = emit_job_gen12_video,
611 .emit_aux_table_inv = emit_aux_table_inv_video_enhance,
612 };
613
614 static const struct xe_ring_ops auxccs_ring_ops_gen12_render_compute = {
615 .emit_job = emit_job_gen12_render_compute,
616 .emit_aux_table_inv = emit_aux_table_inv_render_compute,
617 };
618
619 const struct xe_ring_ops *
xe_ring_ops_get(struct xe_gt * gt,enum xe_engine_class class)620 xe_ring_ops_get(struct xe_gt *gt, enum xe_engine_class class)
621 {
622 struct xe_device *xe = gt_to_xe(gt);
623
624 switch (class) {
625 case XE_ENGINE_CLASS_OTHER:
626 return &ring_ops_gen12_gsc;
627 case XE_ENGINE_CLASS_COPY:
628 return &ring_ops_gen12_copy;
629 case XE_ENGINE_CLASS_VIDEO_DECODE:
630 if (has_aux_ccs(xe))
631 return &auxccs_ring_ops_gen12_video_decode;
632 else
633 return &ring_ops_gen12_video_decode;
634 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
635 if (has_aux_ccs(xe))
636 return &auxccs_ring_ops_gen12_video_enhance;
637 else
638 return &ring_ops_gen12_video_enhance;
639 case XE_ENGINE_CLASS_RENDER:
640 case XE_ENGINE_CLASS_COMPUTE:
641 if (has_aux_ccs(xe))
642 return &auxccs_ring_ops_gen12_render_compute;
643 else
644 return &ring_ops_gen12_render_compute;
645 default:
646 return NULL;
647 }
648 }
649