Lines Matching full:cs
85 u32 *cs;
91 cs = intel_ring_begin(rq, 4);
92 if (IS_ERR(cs)) {
94 return PTR_ERR(cs);
97 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
98 *cs++ = offset;
99 *cs++ = 0;
100 *cs++ = 1;
102 intel_ring_advance(rq, cs);
415 u32 *cs;
437 cs = intel_ring_begin(rq, 4 * MAX_IDX);
438 if (IS_ERR(cs)) {
439 err = PTR_ERR(cs);
444 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
445 *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
446 *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
447 *cs++ = 0;
451 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
452 *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
453 *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
454 *cs++ = 0;
471 cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
472 if (IS_ERR(cs)) {
473 err = PTR_ERR(cs);
478 if (cs[n] != expected[n]) {
480 engine->name, n, cs[n], expected[n]);
536 u32 *cs;
543 cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
544 if (IS_ERR(cs)) {
546 return PTR_ERR(cs);
549 *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
551 *cs++ = CS_GPR(ce->engine, n);
552 *cs++ = STACK_MAGIC;
554 *cs++ = MI_NOOP;
556 intel_ring_advance(rq, cs);
571 u32 *cs;
579 cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
580 if (IS_ERR(cs)) {
582 return ERR_CAST(cs);
585 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
586 *cs++ = MI_NOOP;
588 *cs++ = MI_SEMAPHORE_WAIT |
592 *cs++ = 0;
593 *cs++ = offset;
594 *cs++ = 0;
597 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
598 *cs++ = CS_GPR(ce->engine, n);
599 *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
600 *cs++ = 0;
622 u32 *cs;
669 cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
670 if (IS_ERR(cs)) {
671 err = PTR_ERR(cs);
676 if (cs[n]) {
680 cs[n]);
744 u32 *cs;
751 cs = intel_ring_begin(rq, 10);
752 if (IS_ERR(cs)) {
753 err = PTR_ERR(cs);
757 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
758 *cs++ = MI_NOOP;
760 *cs++ = MI_SEMAPHORE_WAIT |
764 *cs++ = 0;
765 *cs++ = offset;
766 *cs++ = 0;
768 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
769 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
770 *cs++ = offset + idx * sizeof(u32);
771 *cs++ = 0;
773 intel_ring_advance(rq, cs);
976 u32 dw, x, *cs, *hw;
983 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
984 if (IS_ERR(cs)) {
986 return ERR_CAST(cs);
1038 *cs++ = MI_STORE_REGISTER_MEM_GEN8;
1039 *cs++ = hw[dw];
1040 *cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
1041 *cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
1049 *cs++ = MI_BATCH_BUFFER_END;
1067 u32 *cs;
1100 cs = intel_ring_begin(rq, 14);
1101 if (IS_ERR(cs)) {
1102 err = PTR_ERR(cs);
1106 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1107 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1108 *cs++ = lower_32_bits(i915_vma_offset(b_before));
1109 *cs++ = upper_32_bits(i915_vma_offset(b_before));
1111 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1112 *cs++ = MI_SEMAPHORE_WAIT |
1116 *cs++ = 0;
1117 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1119 *cs++ = 0;
1120 *cs++ = MI_NOOP;
1122 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1123 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1124 *cs++ = lower_32_bits(i915_vma_offset(b_after));
1125 *cs++ = upper_32_bits(i915_vma_offset(b_after));
1127 intel_ring_advance(rq, cs);
1147 u32 dw, *cs, *hw;
1154 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
1155 if (IS_ERR(cs)) {
1157 return ERR_CAST(cs);
1196 *cs++ = MI_LOAD_REGISTER_IMM(len);
1198 *cs++ = hw[dw];
1199 *cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine,
1207 *cs++ = MI_BATCH_BUFFER_END;
1221 u32 *cs;
1238 cs = intel_ring_begin(rq, 8);
1239 if (IS_ERR(cs)) {
1240 err = PTR_ERR(cs);
1244 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1245 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
1246 *cs++ = lower_32_bits(i915_vma_offset(batch));
1247 *cs++ = upper_32_bits(i915_vma_offset(batch));
1249 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1250 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
1252 *cs++ = 0;
1253 *cs++ = 1;
1255 intel_ring_advance(rq, cs);
1592 u32 *cs, bool per_ctx)
1594 *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
1597 *cs++ = i915_mmio_reg_offset(RING_START(0));
1598 *cs++ = i915_ggtt_offset(ce->state) +
1602 *cs++ = 0;
1604 return cs;
1608 emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
1610 return emit_wabb_ctx_canary(ce, cs, false);
1614 emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
1616 return emit_wabb_ctx_canary(ce, cs, true);
1622 u32 *cs = context_wabb(ce, per_ctx);
1624 cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;