1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_ring_ops.h" 7 8 #include <generated/xe_wa_oob.h> 9 10 #include "instructions/xe_mi_commands.h" 11 #include "regs/xe_engine_regs.h" 12 #include "regs/xe_gpu_commands.h" 13 #include "regs/xe_gt_regs.h" 14 #include "regs/xe_lrc_layout.h" 15 #include "xe_exec_queue_types.h" 16 #include "xe_gt.h" 17 #include "xe_lrc.h" 18 #include "xe_macros.h" 19 #include "xe_sched_job.h" 20 #include "xe_vm_types.h" 21 #include "xe_vm.h" 22 #include "xe_wa.h" 23 24 /* 25 * 3D-related flags that can't be set on _engines_ that lack access to the 3D 26 * pipeline (i.e., CCS engines). 27 */ 28 #define PIPE_CONTROL_3D_ENGINE_FLAGS (\ 29 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \ 30 PIPE_CONTROL_DEPTH_CACHE_FLUSH | \ 31 PIPE_CONTROL_TILE_CACHE_FLUSH | \ 32 PIPE_CONTROL_DEPTH_STALL | \ 33 PIPE_CONTROL_STALL_AT_SCOREBOARD | \ 34 PIPE_CONTROL_PSD_SYNC | \ 35 PIPE_CONTROL_AMFS_FLUSH | \ 36 PIPE_CONTROL_VF_CACHE_INVALIDATE | \ 37 PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET) 38 39 /* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */ 40 #define PIPE_CONTROL_3D_ARCH_FLAGS ( \ 41 PIPE_CONTROL_3D_ENGINE_FLAGS | \ 42 PIPE_CONTROL_INDIRECT_STATE_DISABLE | \ 43 PIPE_CONTROL_FLUSH_ENABLE | \ 44 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \ 45 PIPE_CONTROL_DC_FLUSH_ENABLE) 46 47 static u32 preparser_disable(bool state) 48 { 49 return MI_ARB_CHECK | BIT(8) | state; 50 } 51 52 static int emit_aux_table_inv(struct xe_gt *gt, struct xe_reg reg, 53 u32 *dw, int i) 54 { 55 dw[i++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1) | MI_LRI_MMIO_REMAP_EN; 56 dw[i++] = reg.addr + gt->mmio.adj_offset; 57 dw[i++] = AUX_INV; 58 dw[i++] = MI_NOOP; 59 60 return i; 61 } 62 63 static int emit_user_interrupt(u32 *dw, int i) 64 { 65 dw[i++] = MI_USER_INTERRUPT; 66 dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE; 67 dw[i++] = MI_ARB_CHECK; 68 69 return i; 70 } 71 72 static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i) 73 { 74 dw[i++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); 75 dw[i++] = addr; 76 dw[i++] = 0; 77 dw[i++] = value; 78 79 return i; 80 } 81 82 static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb, 83 u32 *dw, int i) 84 { 85 dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW | 86 (invalidate_tlb ? MI_INVALIDATE_TLB : 0); 87 dw[i++] = addr | MI_FLUSH_DW_USE_GTT; 88 dw[i++] = 0; 89 dw[i++] = value; 90 91 return i; 92 } 93 94 static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i) 95 { 96 dw[i++] = MI_BATCH_BUFFER_START | ppgtt_flag | XE_INSTR_NUM_DW(3); 97 dw[i++] = lower_32_bits(batch_addr); 98 dw[i++] = upper_32_bits(batch_addr); 99 100 return i; 101 } 102 103 static int emit_flush_invalidate(u32 flag, u32 *dw, int i) 104 { 105 dw[i] = MI_FLUSH_DW; 106 dw[i] |= flag; 107 dw[i++] |= MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW | 108 MI_FLUSH_DW_STORE_INDEX; 109 110 dw[i++] = LRC_PPHWSP_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 111 dw[i++] = 0; 112 dw[i++] = ~0U; 113 114 return i; 115 } 116 117 static int 118 emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset, u32 value) 119 { 120 dw[i++] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; 121 dw[i++] = bit_group_1; 122 dw[i++] = offset; 123 dw[i++] = 0; 124 dw[i++] = value; 125 dw[i++] = 0; 126 127 return i; 128 } 129 130 static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw, 131 int i) 132 { 133 u32 flags = PIPE_CONTROL_CS_STALL | 134 PIPE_CONTROL_COMMAND_CACHE_INVALIDATE | 135 PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE | 136 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 137 PIPE_CONTROL_VF_CACHE_INVALIDATE | 138 PIPE_CONTROL_CONST_CACHE_INVALIDATE | 139 PIPE_CONTROL_STATE_CACHE_INVALIDATE | 140 PIPE_CONTROL_QW_WRITE | 141 PIPE_CONTROL_STORE_DATA_INDEX; 142 143 if (invalidate_tlb) 144 flags |= PIPE_CONTROL_TLB_INVALIDATE; 145 146 flags &= ~mask_flags; 147 148 return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0); 149 } 150 151 static int emit_store_imm_ppgtt_posted(u64 addr, u64 value, 152 u32 *dw, int i) 153 { 154 dw[i++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(1); 155 dw[i++] = lower_32_bits(addr); 156 dw[i++] = upper_32_bits(addr); 157 dw[i++] = lower_32_bits(value); 158 dw[i++] = upper_32_bits(value); 159 160 return i; 161 } 162 163 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i) 164 { 165 struct xe_gt *gt = job->q->gt; 166 bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK); 167 u32 flags; 168 169 flags = (PIPE_CONTROL_CS_STALL | 170 PIPE_CONTROL_TILE_CACHE_FLUSH | 171 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 172 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 173 PIPE_CONTROL_DC_FLUSH_ENABLE | 174 PIPE_CONTROL_FLUSH_ENABLE); 175 176 if (XE_WA(gt, 1409600907)) 177 flags |= PIPE_CONTROL_DEPTH_STALL; 178 179 if (lacks_render) 180 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; 181 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE) 182 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; 183 184 return emit_pipe_control(dw, i, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0, 0); 185 } 186 187 static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int i) 188 { 189 if (hwe->class != XE_ENGINE_CLASS_RENDER) 190 return i; 191 192 if (XE_WA(hwe->gt, 16020292621)) 193 i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC, 194 RING_NOPID(hwe->mmio_base).addr, 0); 195 196 return i; 197 } 198 199 static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw, 200 int i) 201 { 202 u32 flags = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_GLOBAL_GTT_IVB | 203 PIPE_CONTROL_QW_WRITE; 204 205 if (!stall_only) 206 flags |= PIPE_CONTROL_FLUSH_ENABLE; 207 208 return emit_pipe_control(dw, i, 0, flags, addr, value); 209 } 210 211 static u32 get_ppgtt_flag(struct xe_sched_job *job) 212 { 213 return job->q->vm ? BIT(8) : 0; 214 } 215 216 /* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */ 217 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc, 218 u64 batch_addr, u32 seqno) 219 { 220 u32 dw[MAX_JOB_SIZE_DW], i = 0; 221 u32 ppgtt_flag = get_ppgtt_flag(job); 222 struct xe_vm *vm = job->q->vm; 223 struct xe_gt *gt = job->q->gt; 224 225 if (vm && vm->batch_invalidate_tlb) { 226 dw[i++] = preparser_disable(true); 227 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 228 seqno, true, dw, i); 229 dw[i++] = preparser_disable(false); 230 } else { 231 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 232 seqno, dw, i); 233 } 234 235 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); 236 237 if (job->user_fence.used) 238 i = emit_store_imm_ppgtt_posted(job->user_fence.addr, 239 job->user_fence.value, 240 dw, i); 241 242 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); 243 244 i = emit_user_interrupt(dw, i); 245 246 xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW); 247 248 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); 249 } 250 251 static bool has_aux_ccs(struct xe_device *xe) 252 { 253 /* 254 * PVC is a special case that has no compression of either type 255 * (FlatCCS or AuxCCS). Also, AuxCCS is no longer used from Xe2 256 * onward, so any future platforms with no FlatCCS will not have 257 * AuxCCS either. 258 */ 259 if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) 260 return false; 261 262 return !xe->info.has_flat_ccs; 263 } 264 265 static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, 266 u64 batch_addr, u32 seqno) 267 { 268 u32 dw[MAX_JOB_SIZE_DW], i = 0; 269 u32 ppgtt_flag = get_ppgtt_flag(job); 270 struct xe_gt *gt = job->q->gt; 271 struct xe_device *xe = gt_to_xe(gt); 272 bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE; 273 struct xe_vm *vm = job->q->vm; 274 275 dw[i++] = preparser_disable(true); 276 277 /* hsdes: 1809175790 */ 278 if (has_aux_ccs(xe)) { 279 if (decode) 280 i = emit_aux_table_inv(gt, VD0_AUX_INV, dw, i); 281 else 282 i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i); 283 } 284 285 if (vm && vm->batch_invalidate_tlb) 286 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 287 seqno, true, dw, i); 288 289 dw[i++] = preparser_disable(false); 290 291 if (!vm || !vm->batch_invalidate_tlb) 292 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 293 seqno, dw, i); 294 295 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); 296 297 if (job->user_fence.used) 298 i = emit_store_imm_ppgtt_posted(job->user_fence.addr, 299 job->user_fence.value, 300 dw, i); 301 302 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); 303 304 i = emit_user_interrupt(dw, i); 305 306 xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW); 307 308 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); 309 } 310 311 static void __emit_job_gen12_render_compute(struct xe_sched_job *job, 312 struct xe_lrc *lrc, 313 u64 batch_addr, u32 seqno) 314 { 315 u32 dw[MAX_JOB_SIZE_DW], i = 0; 316 u32 ppgtt_flag = get_ppgtt_flag(job); 317 struct xe_gt *gt = job->q->gt; 318 struct xe_device *xe = gt_to_xe(gt); 319 bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK); 320 struct xe_vm *vm = job->q->vm; 321 u32 mask_flags = 0; 322 323 dw[i++] = preparser_disable(true); 324 if (lacks_render) 325 mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS; 326 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE) 327 mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS; 328 329 /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */ 330 i = emit_pipe_invalidate(mask_flags, vm && vm->batch_invalidate_tlb, dw, i); 331 332 /* hsdes: 1809175790 */ 333 if (has_aux_ccs(xe)) 334 i = emit_aux_table_inv(gt, CCS_AUX_INV, dw, i); 335 336 dw[i++] = preparser_disable(false); 337 338 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 339 seqno, dw, i); 340 341 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); 342 343 i = emit_render_cache_flush(job, dw, i); 344 345 if (job->user_fence.used) 346 i = emit_store_imm_ppgtt_posted(job->user_fence.addr, 347 job->user_fence.value, 348 dw, i); 349 350 i = emit_pipe_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, lacks_render, dw, i); 351 352 i = emit_user_interrupt(dw, i); 353 354 i = emit_pipe_control_to_ring_end(job->q->hwe, dw, i); 355 356 xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW); 357 358 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); 359 } 360 361 static void emit_migration_job_gen12(struct xe_sched_job *job, 362 struct xe_lrc *lrc, u32 seqno) 363 { 364 u32 dw[MAX_JOB_SIZE_DW], i = 0; 365 366 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), 367 seqno, dw, i); 368 369 dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */ 370 371 i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i); 372 373 /* XXX: Do we need this? Leaving for now. */ 374 dw[i++] = preparser_disable(true); 375 i = emit_flush_invalidate(0, dw, i); 376 dw[i++] = preparser_disable(false); 377 378 i = emit_bb_start(job->batch_addr[1], BIT(8), dw, i); 379 380 dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | job->migrate_flush_flags | 381 MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW; 382 dw[i++] = xe_lrc_seqno_ggtt_addr(lrc) | MI_FLUSH_DW_USE_GTT; 383 dw[i++] = 0; 384 dw[i++] = seqno; /* value */ 385 386 i = emit_user_interrupt(dw, i); 387 388 xe_gt_assert(job->q->gt, i <= MAX_JOB_SIZE_DW); 389 390 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); 391 } 392 393 static void emit_job_gen12_gsc(struct xe_sched_job *job) 394 { 395 struct xe_gt *gt = job->q->gt; 396 397 xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */ 398 399 __emit_job_gen12_simple(job, job->q->lrc, 400 job->batch_addr[0], 401 xe_sched_job_seqno(job)); 402 } 403 404 static void emit_job_gen12_copy(struct xe_sched_job *job) 405 { 406 int i; 407 408 if (xe_sched_job_is_migration(job->q)) { 409 emit_migration_job_gen12(job, job->q->lrc, 410 xe_sched_job_seqno(job)); 411 return; 412 } 413 414 for (i = 0; i < job->q->width; ++i) 415 __emit_job_gen12_simple(job, job->q->lrc + i, 416 job->batch_addr[i], 417 xe_sched_job_seqno(job)); 418 } 419 420 static void emit_job_gen12_video(struct xe_sched_job *job) 421 { 422 int i; 423 424 /* FIXME: Not doing parallel handshake for now */ 425 for (i = 0; i < job->q->width; ++i) 426 __emit_job_gen12_video(job, job->q->lrc + i, 427 job->batch_addr[i], 428 xe_sched_job_seqno(job)); 429 } 430 431 static void emit_job_gen12_render_compute(struct xe_sched_job *job) 432 { 433 int i; 434 435 for (i = 0; i < job->q->width; ++i) 436 __emit_job_gen12_render_compute(job, job->q->lrc + i, 437 job->batch_addr[i], 438 xe_sched_job_seqno(job)); 439 } 440 441 static const struct xe_ring_ops ring_ops_gen12_gsc = { 442 .emit_job = emit_job_gen12_gsc, 443 }; 444 445 static const struct xe_ring_ops ring_ops_gen12_copy = { 446 .emit_job = emit_job_gen12_copy, 447 }; 448 449 static const struct xe_ring_ops ring_ops_gen12_video = { 450 .emit_job = emit_job_gen12_video, 451 }; 452 453 static const struct xe_ring_ops ring_ops_gen12_render_compute = { 454 .emit_job = emit_job_gen12_render_compute, 455 }; 456 457 const struct xe_ring_ops * 458 xe_ring_ops_get(struct xe_gt *gt, enum xe_engine_class class) 459 { 460 switch (class) { 461 case XE_ENGINE_CLASS_OTHER: 462 return &ring_ops_gen12_gsc; 463 case XE_ENGINE_CLASS_COPY: 464 return &ring_ops_gen12_copy; 465 case XE_ENGINE_CLASS_VIDEO_DECODE: 466 case XE_ENGINE_CLASS_VIDEO_ENHANCE: 467 return &ring_ops_gen12_video; 468 case XE_ENGINE_CLASS_RENDER: 469 case XE_ENGINE_CLASS_COMPUTE: 470 return &ring_ops_gen12_render_compute; 471 default: 472 return NULL; 473 } 474 } 475