Lines Matching full:bb

120 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */  in xe_migrate_vm_addr()
559 struct xe_bb *bb, u32 at_pt, in emit_pte() argument
583 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in emit_pte()
584 bb->cs[bb->len++] = ofs; in emit_pte()
585 bb->cs[bb->len++] = 0; in emit_pte()
613 bb->cs[bb->len++] = lower_32_bits(addr); in emit_pte()
614 bb->cs[bb->len++] = upper_32_bits(addr); in emit_pte()
623 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, in emit_copy_ccs() argument
629 u32 *cs = bb->cs + bb->len; in emit_copy_ccs()
660 bb->len = cs - bb->cs; in emit_copy_ccs()
664 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, in emit_copy() argument
682 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2); in emit_copy()
683 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs; in emit_copy()
684 bb->cs[bb->len++] = 0; in emit_copy()
685 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4; in emit_copy()
686 bb->cs[bb->len++] = lower_32_bits(dst_ofs); in emit_copy()
687 bb->cs[bb->len++] = upper_32_bits(dst_ofs); in emit_copy()
688 bb->cs[bb->len++] = 0; in emit_copy()
689 bb->cs[bb->len++] = pitch | mocs; in emit_copy()
690 bb->cs[bb->len++] = lower_32_bits(src_ofs); in emit_copy()
691 bb->cs[bb->len++] = upper_32_bits(src_ofs); in emit_copy()
700 struct xe_bb *bb, in xe_migrate_ccs_copy() argument
719 emit_copy_ccs(gt, bb, in xe_migrate_ccs_copy()
732 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs, in xe_migrate_ccs_copy()
811 struct xe_bb *bb; in xe_migrate_copy() local
853 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_copy()
854 if (IS_ERR(bb)) { in xe_migrate_copy()
855 err = PTR_ERR(bb); in xe_migrate_copy()
862 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs, in xe_migrate_copy()
868 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs, in xe_migrate_copy()
872 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); in xe_migrate_copy()
874 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_copy()
875 update_idx = bb->len; in xe_migrate_copy()
878 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); in xe_migrate_copy()
881 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, in xe_migrate_copy()
887 job = xe_bb_create_migration_job(m->q, bb, in xe_migrate_copy()
917 xe_bb_free(bb, fence); in xe_migrate_copy()
924 xe_bb_free(bb, NULL); in xe_migrate_copy()
939 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, in emit_clear_link_copy() argument
943 u32 *cs = bb->cs + bb->len; in emit_clear_link_copy()
957 xe_gt_assert(gt, cs - bb->cs == len + bb->len); in emit_clear_link_copy()
959 bb->len += len; in emit_clear_link_copy()
962 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb, in emit_clear_main_copy() argument
966 u32 *cs = bb->cs + bb->len; in emit_clear_main_copy()
998 xe_gt_assert(gt, cs - bb->cs == len + bb->len); in emit_clear_main_copy()
1000 bb->len += len; in emit_clear_main_copy()
1025 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, in emit_clear() argument
1029 emit_clear_link_copy(gt, bb, src_ofs, size, pitch); in emit_clear()
1031 emit_clear_main_copy(gt, bb, src_ofs, size, pitch, in emit_clear()
1085 struct xe_bb *bb; in xe_migrate_clear() local
1110 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_clear()
1111 if (IS_ERR(bb)) { in xe_migrate_clear()
1112 err = PTR_ERR(bb); in xe_migrate_clear()
1121 emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs, in xe_migrate_clear()
1124 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_clear()
1125 update_idx = bb->len; in xe_migrate_clear()
1128 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram); in xe_migrate_clear()
1131 emit_copy_ccs(gt, bb, clear_L0_ofs, true, in xe_migrate_clear()
1136 job = xe_bb_create_migration_job(m->q, bb, in xe_migrate_clear()
1169 xe_bb_free(bb, fence); in xe_migrate_clear()
1175 xe_bb_free(bb, NULL); in xe_migrate_clear()
1192 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, in write_pgtable() argument
1221 /* Ensure populatefn can do memset64 by aligning bb->cs */ in write_pgtable()
1222 if (!(bb->len & 1)) in write_pgtable()
1223 bb->cs[bb->len++] = MI_NOOP; in write_pgtable()
1225 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in write_pgtable()
1226 bb->cs[bb->len++] = lower_32_bits(addr); in write_pgtable()
1227 bb->cs[bb->len++] = upper_32_bits(addr); in write_pgtable()
1229 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, in write_pgtable()
1232 ops->clear(pt_update, tile, NULL, bb->cs + bb->len, in write_pgtable()
1235 bb->len += chunk * 2; in write_pgtable()
1318 struct xe_bb *bb; in __xe_migrate_update_pgtables() local
1347 bb = xe_bb_new(gt, batch_size, usm); in __xe_migrate_update_pgtables()
1348 if (IS_ERR(bb)) in __xe_migrate_update_pgtables()
1349 return ERR_CAST(bb); in __xe_migrate_update_pgtables()
1388 bb->cs[bb->len++] = MI_STORE_DATA_IMM | in __xe_migrate_update_pgtables()
1390 bb->cs[bb->len++] = ofs; in __xe_migrate_update_pgtables()
1391 bb->cs[bb->len++] = 0; /* upper_32_bits */ in __xe_migrate_update_pgtables()
1413 bb->cs[bb->len++] = lower_32_bits(addr); in __xe_migrate_update_pgtables()
1414 bb->cs[bb->len++] = upper_32_bits(addr); in __xe_migrate_update_pgtables()
1425 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in __xe_migrate_update_pgtables()
1426 update_idx = bb->len; in __xe_migrate_update_pgtables()
1438 write_pgtable(tile, bb, addr + in __xe_migrate_update_pgtables()
1445 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in __xe_migrate_update_pgtables()
1446 update_idx = bb->len; in __xe_migrate_update_pgtables()
1454 write_pgtable(tile, bb, 0, pt_op, &updates[j], in __xe_migrate_update_pgtables()
1459 job = xe_bb_create_migration_job(pt_update_ops->q, bb, in __xe_migrate_update_pgtables()
1483 xe_bb_free(bb, fence); in __xe_migrate_update_pgtables()
1493 xe_bb_free(bb, NULL); in __xe_migrate_update_pgtables()