xref: /linux/drivers/gpu/drm/xe/xe_ring_ops.c (revision dfb31428444b00824b161d8c0741d4868552813a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_ring_ops.h"
7 
8 #include <generated/xe_wa_oob.h>
9 
10 #include "instructions/xe_gpu_commands.h"
11 #include "instructions/xe_mi_commands.h"
12 #include "regs/xe_engine_regs.h"
13 #include "regs/xe_gt_regs.h"
14 #include "xe_exec_queue.h"
15 #include "xe_gt_types.h"
16 #include "xe_lrc.h"
17 #include "xe_sched_job.h"
18 #include "xe_sriov.h"
19 #include "xe_vm_types.h"
20 #include "xe_vm.h"
21 #include "xe_wa.h"
22 
23 /*
24  * 3D-related flags that can't be set on _engines_ that lack access to the 3D
25  * pipeline (i.e., CCS engines).
26  */
27 #define PIPE_CONTROL_3D_ENGINE_FLAGS (\
28 		PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
29 		PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
30 		PIPE_CONTROL_TILE_CACHE_FLUSH | \
31 		PIPE_CONTROL_DEPTH_STALL | \
32 		PIPE_CONTROL_STALL_AT_SCOREBOARD | \
33 		PIPE_CONTROL_PSD_SYNC | \
34 		PIPE_CONTROL_AMFS_FLUSH | \
35 		PIPE_CONTROL_VF_CACHE_INVALIDATE | \
36 		PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
37 
38 /* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
39 #define PIPE_CONTROL_3D_ARCH_FLAGS ( \
40 		PIPE_CONTROL_3D_ENGINE_FLAGS | \
41 		PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
42 		PIPE_CONTROL_FLUSH_ENABLE | \
43 		PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
44 		PIPE_CONTROL_DC_FLUSH_ENABLE)
45 
preparser_disable(bool state)46 static u32 preparser_disable(bool state)
47 {
48 	return MI_ARB_CHECK | BIT(8) | state;
49 }
50 
emit_aux_table_inv(struct xe_gt * gt,struct xe_reg reg,u32 * dw,int i)51 static int emit_aux_table_inv(struct xe_gt *gt, struct xe_reg reg,
52 			      u32 *dw, int i)
53 {
54 	dw[i++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1) | MI_LRI_MMIO_REMAP_EN;
55 	dw[i++] = reg.addr + gt->mmio.adj_offset;
56 	dw[i++] = AUX_INV;
57 	dw[i++] = MI_NOOP;
58 
59 	return i;
60 }
61 
emit_user_interrupt(u32 * dw,int i)62 static int emit_user_interrupt(u32 *dw, int i)
63 {
64 	dw[i++] = MI_USER_INTERRUPT;
65 	dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
66 	dw[i++] = MI_ARB_CHECK;
67 
68 	return i;
69 }
70 
emit_store_imm_ggtt(u32 addr,u32 value,u32 * dw,int i)71 static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
72 {
73 	dw[i++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
74 	dw[i++] = addr;
75 	dw[i++] = 0;
76 	dw[i++] = value;
77 
78 	return i;
79 }
80 
emit_flush_dw(u32 * dw,int i)81 static int emit_flush_dw(u32 *dw, int i)
82 {
83 	dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
84 	dw[i++] = 0;
85 	dw[i++] = 0;
86 	dw[i++] = 0;
87 
88 	return i;
89 }
90 
emit_flush_imm_ggtt(u32 addr,u32 value,u32 flags,u32 * dw,int i)91 static int emit_flush_imm_ggtt(u32 addr, u32 value, u32 flags, u32 *dw, int i)
92 {
93 	dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
94 		  flags;
95 	dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
96 	dw[i++] = 0;
97 	dw[i++] = value;
98 
99 	return i;
100 }
101 
emit_bb_start(u64 batch_addr,u32 ppgtt_flag,u32 * dw,int i)102 static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
103 {
104 	dw[i++] = MI_BATCH_BUFFER_START | ppgtt_flag | XE_INSTR_NUM_DW(3);
105 	dw[i++] = lower_32_bits(batch_addr);
106 	dw[i++] = upper_32_bits(batch_addr);
107 
108 	return i;
109 }
110 
emit_flush_invalidate(u32 addr,u32 val,u32 flush_flags,u32 * dw,int i)111 static int emit_flush_invalidate(u32 addr, u32 val, u32 flush_flags, u32 *dw, int i)
112 {
113 	dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW |
114 		  MI_FLUSH_IMM_DW | (flush_flags & MI_INVALIDATE_TLB) ?: 0;
115 
116 	dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
117 	dw[i++] = 0;
118 	dw[i++] = val;
119 
120 	return i;
121 }
122 
123 static int
emit_pipe_control(u32 * dw,int i,u32 bit_group_0,u32 bit_group_1,u32 offset,u32 value)124 emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset, u32 value)
125 {
126 	dw[i++] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
127 	dw[i++] = bit_group_1;
128 	dw[i++] = offset;
129 	dw[i++] = 0;
130 	dw[i++] = value;
131 	dw[i++] = 0;
132 
133 	return i;
134 }
135 
emit_pipe_invalidate(struct xe_exec_queue * q,u32 mask_flags,bool invalidate_tlb,u32 * dw,int i)136 static int emit_pipe_invalidate(struct xe_exec_queue *q, u32 mask_flags,
137 				bool invalidate_tlb, u32 *dw, int i)
138 {
139 	u32 flags0 = 0;
140 	u32 flags1 = PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
141 		PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
142 		PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
143 		PIPE_CONTROL_VF_CACHE_INVALIDATE |
144 		PIPE_CONTROL_CONST_CACHE_INVALIDATE |
145 		PIPE_CONTROL_STATE_CACHE_INVALIDATE |
146 		PIPE_CONTROL_QW_WRITE |
147 		PIPE_CONTROL_STORE_DATA_INDEX;
148 
149 	if (invalidate_tlb)
150 		flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
151 
152 	if (xe_exec_queue_is_multi_queue(q))
153 		flags0 |= PIPE_CONTROL0_QUEUE_DRAIN_MODE;
154 	else
155 		flags1 |= PIPE_CONTROL_CS_STALL;
156 
157 	flags1 &= ~mask_flags;
158 
159 	if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
160 		flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
161 
162 	return emit_pipe_control(dw, i, flags0, flags1,
163 				 LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
164 }
165 
emit_store_imm_ppgtt_posted(u64 addr,u64 value,u32 * dw,int i)166 static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
167 				       u32 *dw, int i)
168 {
169 	dw[i++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(1);
170 	dw[i++] = lower_32_bits(addr);
171 	dw[i++] = upper_32_bits(addr);
172 	dw[i++] = lower_32_bits(value);
173 	dw[i++] = upper_32_bits(value);
174 
175 	return i;
176 }
177 
emit_render_cache_flush(struct xe_sched_job * job,u32 * dw,int i)178 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
179 {
180 	struct xe_exec_queue *q = job->q;
181 	struct xe_gt *gt = q->gt;
182 	bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
183 	u32 flags0, flags1;
184 
185 	if (XE_GT_WA(gt, 14016712196))
186 		i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
187 				      LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
188 
189 	flags0 = PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
190 	flags1 = (PIPE_CONTROL_TILE_CACHE_FLUSH |
191 		 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
192 		 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
193 		 PIPE_CONTROL_DC_FLUSH_ENABLE |
194 		 PIPE_CONTROL_FLUSH_ENABLE);
195 
196 	if (XE_GT_WA(gt, 1409600907))
197 		flags1 |= PIPE_CONTROL_DEPTH_STALL;
198 
199 	if (lacks_render)
200 		flags1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
201 	else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
202 		flags1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
203 
204 	if (xe_exec_queue_is_multi_queue(q))
205 		flags0 |= PIPE_CONTROL0_QUEUE_DRAIN_MODE;
206 	else
207 		flags1 |= PIPE_CONTROL_CS_STALL;
208 
209 	return emit_pipe_control(dw, i, flags0, flags1, 0, 0);
210 }
211 
emit_pipe_imm_ggtt(struct xe_exec_queue * q,u32 addr,u32 value,bool stall_only,u32 * dw,int i)212 static int emit_pipe_imm_ggtt(struct xe_exec_queue *q, u32 addr, u32 value,
213 			      bool stall_only, u32 *dw, int i)
214 {
215 	u32 flags0 = 0, flags1 = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE;
216 
217 	if (!stall_only)
218 		flags1 |= PIPE_CONTROL_FLUSH_ENABLE;
219 
220 	if (xe_exec_queue_is_multi_queue(q))
221 		flags0 |= PIPE_CONTROL0_QUEUE_DRAIN_MODE;
222 	else
223 		flags1 |= PIPE_CONTROL_CS_STALL;
224 
225 	return emit_pipe_control(dw, i, flags0, flags1, addr, value);
226 }
227 
get_ppgtt_flag(struct xe_sched_job * job)228 static u32 get_ppgtt_flag(struct xe_sched_job *job)
229 {
230 	if (job->q->vm && !job->ggtt)
231 		return BIT(8);
232 
233 	return 0;
234 }
235 
emit_copy_timestamp(struct xe_device * xe,struct xe_lrc * lrc,u32 * dw,int i)236 static int emit_copy_timestamp(struct xe_device *xe, struct xe_lrc *lrc,
237 			       u32 *dw, int i)
238 {
239 	dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
240 	dw[i++] = RING_CTX_TIMESTAMP(0).addr;
241 	dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
242 	dw[i++] = 0;
243 
244 	/*
245 	 * Ensure CTX timestamp >= Job timestamp during VF sampling to avoid
246 	 * arithmetic wraparound in TDR.
247 	 */
248 	if (IS_SRIOV_VF(xe)) {
249 		dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT |
250 			MI_SRM_ADD_CS_OFFSET;
251 		dw[i++] = RING_CTX_TIMESTAMP(0).addr;
252 		dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
253 		dw[i++] = 0;
254 	}
255 
256 	return i;
257 }
258 
259 /* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
__emit_job_gen12_simple(struct xe_sched_job * job,struct xe_lrc * lrc,u64 batch_addr,u32 * head,u32 seqno)260 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
261 				    u64 batch_addr, u32 *head, u32 seqno)
262 {
263 	u32 dw[MAX_JOB_SIZE_DW], i = 0;
264 	u32 ppgtt_flag = get_ppgtt_flag(job);
265 	struct xe_gt *gt = job->q->gt;
266 
267 	*head = lrc->ring.tail;
268 
269 	i = emit_copy_timestamp(gt_to_xe(gt), lrc, dw, i);
270 
271 	if (job->ring_ops_flush_tlb) {
272 		dw[i++] = preparser_disable(true);
273 		i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
274 					seqno, MI_INVALIDATE_TLB, dw, i);
275 		dw[i++] = preparser_disable(false);
276 	} else {
277 		i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
278 					seqno, dw, i);
279 	}
280 
281 	i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
282 
283 	/* Don't preempt fence signaling */
284 	dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
285 
286 	if (job->user_fence.used) {
287 		i = emit_flush_dw(dw, i);
288 		i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
289 						job->user_fence.value,
290 						dw, i);
291 	}
292 
293 	i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
294 
295 	i = emit_user_interrupt(dw, i);
296 
297 	xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
298 
299 	xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
300 }
301 
has_aux_ccs(struct xe_device * xe)302 static bool has_aux_ccs(struct xe_device *xe)
303 {
304 	/*
305 	 * PVC is a special case that has no compression of either type
306 	 * (FlatCCS or AuxCCS).  Also, AuxCCS is no longer used from Xe2
307 	 * onward, so any future platforms with no FlatCCS will not have
308 	 * AuxCCS either.
309 	 */
310 	if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
311 		return false;
312 
313 	return !xe->info.has_flat_ccs;
314 }
315 
__emit_job_gen12_video(struct xe_sched_job * job,struct xe_lrc * lrc,u64 batch_addr,u32 * head,u32 seqno)316 static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
317 				   u64 batch_addr, u32 *head, u32 seqno)
318 {
319 	u32 dw[MAX_JOB_SIZE_DW], i = 0;
320 	u32 ppgtt_flag = get_ppgtt_flag(job);
321 	struct xe_gt *gt = job->q->gt;
322 	struct xe_device *xe = gt_to_xe(gt);
323 	bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
324 
325 	*head = lrc->ring.tail;
326 
327 	i = emit_copy_timestamp(xe, lrc, dw, i);
328 
329 	dw[i++] = preparser_disable(true);
330 
331 	/* hsdes: 1809175790 */
332 	if (has_aux_ccs(xe)) {
333 		if (decode)
334 			i = emit_aux_table_inv(gt, VD0_AUX_INV, dw, i);
335 		else
336 			i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
337 	}
338 
339 	if (job->ring_ops_flush_tlb)
340 		i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
341 					seqno, MI_INVALIDATE_TLB, dw, i);
342 
343 	dw[i++] = preparser_disable(false);
344 
345 	if (!job->ring_ops_flush_tlb)
346 		i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
347 					seqno, dw, i);
348 
349 	i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
350 
351 	/* Don't preempt fence signaling */
352 	dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
353 
354 	if (job->user_fence.used) {
355 		i = emit_flush_dw(dw, i);
356 		i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
357 						job->user_fence.value,
358 						dw, i);
359 	}
360 
361 	i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
362 
363 	i = emit_user_interrupt(dw, i);
364 
365 	xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
366 
367 	xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
368 }
369 
__emit_job_gen12_render_compute(struct xe_sched_job * job,struct xe_lrc * lrc,u64 batch_addr,u32 * head,u32 seqno)370 static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
371 					    struct xe_lrc *lrc,
372 					    u64 batch_addr, u32 *head,
373 					    u32 seqno)
374 {
375 	u32 dw[MAX_JOB_SIZE_DW], i = 0;
376 	u32 ppgtt_flag = get_ppgtt_flag(job);
377 	struct xe_gt *gt = job->q->gt;
378 	struct xe_device *xe = gt_to_xe(gt);
379 	bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
380 	u32 mask_flags = 0;
381 
382 	*head = lrc->ring.tail;
383 
384 	i = emit_copy_timestamp(xe, lrc, dw, i);
385 
386 	dw[i++] = preparser_disable(true);
387 	if (lacks_render)
388 		mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
389 	else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
390 		mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
391 
392 	/* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
393 	i = emit_pipe_invalidate(job->q, mask_flags, job->ring_ops_flush_tlb, dw, i);
394 
395 	/* hsdes: 1809175790 */
396 	if (has_aux_ccs(xe))
397 		i = emit_aux_table_inv(gt, CCS_AUX_INV, dw, i);
398 
399 	dw[i++] = preparser_disable(false);
400 
401 	i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
402 				seqno, dw, i);
403 
404 	i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
405 
406 	/* Don't preempt fence signaling */
407 	dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
408 
409 	i = emit_render_cache_flush(job, dw, i);
410 
411 	if (job->user_fence.used)
412 		i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
413 						job->user_fence.value,
414 						dw, i);
415 
416 	i = emit_pipe_imm_ggtt(job->q, xe_lrc_seqno_ggtt_addr(lrc), seqno, lacks_render, dw, i);
417 
418 	i = emit_user_interrupt(dw, i);
419 
420 	xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
421 
422 	xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
423 }
424 
emit_migration_job_gen12(struct xe_sched_job * job,struct xe_lrc * lrc,u32 * head,u32 seqno)425 static void emit_migration_job_gen12(struct xe_sched_job *job,
426 				     struct xe_lrc *lrc, u32 *head,
427 				     u32 seqno)
428 {
429 	struct xe_gt *gt = job->q->gt;
430 	struct xe_device *xe = gt_to_xe(gt);
431 	u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
432 	u32 dw[MAX_JOB_SIZE_DW], i = 0;
433 
434 	*head = lrc->ring.tail;
435 
436 	i = emit_copy_timestamp(xe, lrc, dw, i);
437 
438 	i = emit_store_imm_ggtt(saddr, seqno, dw, i);
439 
440 	dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
441 
442 	i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
443 
444 	dw[i++] = preparser_disable(true);
445 	i = emit_flush_invalidate(saddr, seqno, job->migrate_flush_flags, dw, i);
446 	dw[i++] = preparser_disable(false);
447 
448 	i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
449 
450 	i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno,
451 				job->migrate_flush_flags,
452 				dw, i);
453 
454 	i = emit_user_interrupt(dw, i);
455 
456 	xe_gt_assert(job->q->gt, i <= MAX_JOB_SIZE_DW);
457 
458 	xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
459 }
460 
emit_job_gen12_gsc(struct xe_sched_job * job)461 static void emit_job_gen12_gsc(struct xe_sched_job *job)
462 {
463 	struct xe_gt *gt = job->q->gt;
464 
465 	xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */
466 
467 	__emit_job_gen12_simple(job, job->q->lrc[0],
468 				job->ptrs[0].batch_addr,
469 				&job->ptrs[0].head,
470 				xe_sched_job_lrc_seqno(job));
471 }
472 
emit_job_gen12_copy(struct xe_sched_job * job)473 static void emit_job_gen12_copy(struct xe_sched_job *job)
474 {
475 	int i;
476 
477 	if (xe_sched_job_is_migration(job->q)) {
478 		emit_migration_job_gen12(job, job->q->lrc[0],
479 					 &job->ptrs[0].head,
480 					 xe_sched_job_lrc_seqno(job));
481 		return;
482 	}
483 
484 	for (i = 0; i < job->q->width; ++i)
485 		__emit_job_gen12_simple(job, job->q->lrc[i],
486 					job->ptrs[i].batch_addr,
487 					&job->ptrs[i].head,
488 					xe_sched_job_lrc_seqno(job));
489 }
490 
emit_job_gen12_video(struct xe_sched_job * job)491 static void emit_job_gen12_video(struct xe_sched_job *job)
492 {
493 	int i;
494 
495 	/* FIXME: Not doing parallel handshake for now */
496 	for (i = 0; i < job->q->width; ++i)
497 		__emit_job_gen12_video(job, job->q->lrc[i],
498 				       job->ptrs[i].batch_addr,
499 				       &job->ptrs[i].head,
500 				       xe_sched_job_lrc_seqno(job));
501 }
502 
emit_job_gen12_render_compute(struct xe_sched_job * job)503 static void emit_job_gen12_render_compute(struct xe_sched_job *job)
504 {
505 	int i;
506 
507 	for (i = 0; i < job->q->width; ++i)
508 		__emit_job_gen12_render_compute(job, job->q->lrc[i],
509 						job->ptrs[i].batch_addr,
510 						&job->ptrs[i].head,
511 						xe_sched_job_lrc_seqno(job));
512 }
513 
514 static const struct xe_ring_ops ring_ops_gen12_gsc = {
515 	.emit_job = emit_job_gen12_gsc,
516 };
517 
518 static const struct xe_ring_ops ring_ops_gen12_copy = {
519 	.emit_job = emit_job_gen12_copy,
520 };
521 
522 static const struct xe_ring_ops ring_ops_gen12_video = {
523 	.emit_job = emit_job_gen12_video,
524 };
525 
526 static const struct xe_ring_ops ring_ops_gen12_render_compute = {
527 	.emit_job = emit_job_gen12_render_compute,
528 };
529 
530 const struct xe_ring_ops *
xe_ring_ops_get(struct xe_gt * gt,enum xe_engine_class class)531 xe_ring_ops_get(struct xe_gt *gt, enum xe_engine_class class)
532 {
533 	switch (class) {
534 	case XE_ENGINE_CLASS_OTHER:
535 		return &ring_ops_gen12_gsc;
536 	case XE_ENGINE_CLASS_COPY:
537 		return &ring_ops_gen12_copy;
538 	case XE_ENGINE_CLASS_VIDEO_DECODE:
539 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
540 		return &ring_ops_gen12_video;
541 	case XE_ENGINE_CLASS_RENDER:
542 	case XE_ENGINE_CLASS_COMPUTE:
543 		return &ring_ops_gen12_render_compute;
544 	default:
545 		return NULL;
546 	}
547 }
548