Lines Matching full:bb

111 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */  in xe_migrate_vm_addr()
584 struct xe_bb *bb, u32 at_pt, in emit_pte() argument
608 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in emit_pte()
609 bb->cs[bb->len++] = ofs; in emit_pte()
610 bb->cs[bb->len++] = 0; in emit_pte()
638 bb->cs[bb->len++] = lower_32_bits(addr); in emit_pte()
639 bb->cs[bb->len++] = upper_32_bits(addr); in emit_pte()
648 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, in emit_copy_ccs() argument
654 u32 *cs = bb->cs + bb->len; in emit_copy_ccs()
685 bb->len = cs - bb->cs; in emit_copy_ccs()
689 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, in emit_copy() argument
708 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2); in emit_copy()
709 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs; in emit_copy()
710 bb->cs[bb->len++] = 0; in emit_copy()
711 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4; in emit_copy()
712 bb->cs[bb->len++] = lower_32_bits(dst_ofs); in emit_copy()
713 bb->cs[bb->len++] = upper_32_bits(dst_ofs); in emit_copy()
714 bb->cs[bb->len++] = 0; in emit_copy()
715 bb->cs[bb->len++] = pitch | mocs; in emit_copy()
716 bb->cs[bb->len++] = lower_32_bits(src_ofs); in emit_copy()
717 bb->cs[bb->len++] = upper_32_bits(src_ofs); in emit_copy()
726 struct xe_bb *bb, in xe_migrate_ccs_copy() argument
745 emit_copy_ccs(gt, bb, in xe_migrate_ccs_copy()
758 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs, in xe_migrate_ccs_copy()
839 struct xe_bb *bb; in xe_migrate_copy() local
886 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_copy()
887 if (IS_ERR(bb)) { in xe_migrate_copy()
888 err = PTR_ERR(bb); in xe_migrate_copy()
895 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, in xe_migrate_copy()
901 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs, in xe_migrate_copy()
905 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); in xe_migrate_copy()
907 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_copy()
908 update_idx = bb->len; in xe_migrate_copy()
911 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); in xe_migrate_copy()
914 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, in xe_migrate_copy()
920 job = xe_bb_create_migration_job(m->q, bb, in xe_migrate_copy()
950 xe_bb_free(bb, fence); in xe_migrate_copy()
957 xe_bb_free(bb, NULL); in xe_migrate_copy()
1003 * @read_write : Creates BB commands for CCS read/write.
1026 struct xe_bb *bb = NULL; in xe_migrate_ccs_rw_copy() local
1063 bb = xe_bb_ccs_new(gt, batch_size, read_write); in xe_migrate_ccs_rw_copy()
1064 if (IS_ERR(bb)) { in xe_migrate_ccs_rw_copy()
1065 drm_err(&xe->drm, "BB allocation failed.\n"); in xe_migrate_ccs_rw_copy()
1066 err = PTR_ERR(bb); in xe_migrate_ccs_rw_copy()
1100 emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src); in xe_migrate_ccs_rw_copy()
1102 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); in xe_migrate_ccs_rw_copy()
1104 bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags); in xe_migrate_ccs_rw_copy()
1105 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt, in xe_migrate_ccs_rw_copy()
1108 bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags); in xe_migrate_ccs_rw_copy()
1113 xe_assert(xe, (batch_size_allocated == bb->len)); in xe_migrate_ccs_rw_copy()
1114 src_bo->bb_ccs[read_write] = bb; in xe_migrate_ccs_rw_copy()
1133 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, in emit_clear_link_copy() argument
1137 u32 *cs = bb->cs + bb->len; in emit_clear_link_copy()
1151 xe_gt_assert(gt, cs - bb->cs == len + bb->len); in emit_clear_link_copy()
1153 bb->len += len; in emit_clear_link_copy()
1156 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb, in emit_clear_main_copy() argument
1160 u32 *cs = bb->cs + bb->len; in emit_clear_main_copy()
1192 xe_gt_assert(gt, cs - bb->cs == len + bb->len); in emit_clear_main_copy()
1194 bb->len += len; in emit_clear_main_copy()
1219 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, in emit_clear() argument
1223 emit_clear_link_copy(gt, bb, src_ofs, size, pitch); in emit_clear()
1225 emit_clear_main_copy(gt, bb, src_ofs, size, pitch, in emit_clear()
1279 struct xe_bb *bb; in xe_migrate_clear() local
1304 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_clear()
1305 if (IS_ERR(bb)) { in xe_migrate_clear()
1306 err = PTR_ERR(bb); in xe_migrate_clear()
1315 emit_pte(m, bb, clear_L0_pt, clear_vram, in xe_migrate_clear()
1320 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_clear()
1321 update_idx = bb->len; in xe_migrate_clear()
1324 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram); in xe_migrate_clear()
1327 emit_copy_ccs(gt, bb, clear_L0_ofs, true, in xe_migrate_clear()
1332 job = xe_bb_create_migration_job(m->q, bb, in xe_migrate_clear()
1365 xe_bb_free(bb, fence); in xe_migrate_clear()
1371 xe_bb_free(bb, NULL); in xe_migrate_clear()
1388 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, in write_pgtable() argument
1417 /* Ensure populatefn can do memset64 by aligning bb->cs */ in write_pgtable()
1418 if (!(bb->len & 1)) in write_pgtable()
1419 bb->cs[bb->len++] = MI_NOOP; in write_pgtable()
1421 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in write_pgtable()
1422 bb->cs[bb->len++] = lower_32_bits(addr); in write_pgtable()
1423 bb->cs[bb->len++] = upper_32_bits(addr); in write_pgtable()
1425 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, in write_pgtable()
1428 ops->clear(pt_update, tile, NULL, bb->cs + bb->len, in write_pgtable()
1431 bb->len += chunk * 2; in write_pgtable()
1514 struct xe_bb *bb; in __xe_migrate_update_pgtables() local
1543 bb = xe_bb_new(gt, batch_size, usm); in __xe_migrate_update_pgtables()
1544 if (IS_ERR(bb)) in __xe_migrate_update_pgtables()
1545 return ERR_CAST(bb); in __xe_migrate_update_pgtables()
1585 bb->cs[bb->len++] = MI_STORE_DATA_IMM | in __xe_migrate_update_pgtables()
1587 bb->cs[bb->len++] = ofs; in __xe_migrate_update_pgtables()
1588 bb->cs[bb->len++] = 0; /* upper_32_bits */ in __xe_migrate_update_pgtables()
1610 bb->cs[bb->len++] = lower_32_bits(addr); in __xe_migrate_update_pgtables()
1611 bb->cs[bb->len++] = upper_32_bits(addr); in __xe_migrate_update_pgtables()
1622 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in __xe_migrate_update_pgtables()
1623 update_idx = bb->len; in __xe_migrate_update_pgtables()
1635 write_pgtable(tile, bb, addr + in __xe_migrate_update_pgtables()
1642 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in __xe_migrate_update_pgtables()
1643 update_idx = bb->len; in __xe_migrate_update_pgtables()
1651 write_pgtable(tile, bb, 0, pt_op, &updates[j], in __xe_migrate_update_pgtables()
1656 job = xe_bb_create_migration_job(pt_update_ops->q, bb, in __xe_migrate_update_pgtables()
1682 xe_bb_free(bb, fence); in __xe_migrate_update_pgtables()
1692 xe_bb_free(bb, NULL); in __xe_migrate_update_pgtables()
1767 struct xe_bb *bb, u32 pt_offset, in build_pt_update_batch_sram() argument
1779 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in build_pt_update_batch_sram()
1780 bb->cs[bb->len++] = pt_offset; in build_pt_update_batch_sram()
1781 bb->cs[bb->len++] = 0; in build_pt_update_batch_sram()
1795 bb->cs[bb->len++] = lower_32_bits(addr); in build_pt_update_batch_sram()
1796 bb->cs[bb->len++] = upper_32_bits(addr); in build_pt_update_batch_sram()
1825 struct xe_bb *bb; in xe_migrate_vram() local
1842 bb = xe_bb_new(gt, batch_size, use_usm_batch); in xe_migrate_vram()
1843 if (IS_ERR(bb)) { in xe_migrate_vram()
1844 err = PTR_ERR(bb); in xe_migrate_vram()
1866 build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE, in xe_migrate_vram()
1878 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_vram()
1879 update_idx = bb->len; in xe_migrate_vram()
1881 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch); in xe_migrate_vram()
1883 job = xe_bb_create_migration_job(m->q, bb, in xe_migrate_vram()
1902 xe_bb_free(bb, fence); in xe_migrate_vram()
1907 xe_bb_free(bb, NULL); in xe_migrate_vram()