Lines Matching refs:bb

119 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
598 struct xe_bb *bb, u32 at_pt,
622 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
623 bb->cs[bb->len++] = ofs;
624 bb->cs[bb->len++] = 0;
652 bb->cs[bb->len++] = lower_32_bits(addr);
653 bb->cs[bb->len++] = upper_32_bits(addr);
662 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
668 u32 *cs = bb->cs + bb->len;
699 bb->len = cs - bb->cs;
703 static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
722 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
723 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
724 bb->cs[bb->len++] = 0;
725 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
726 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
727 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
728 bb->cs[bb->len++] = 0;
729 bb->cs[bb->len++] = pitch | mocs;
730 bb->cs[bb->len++] = lower_32_bits(src_ofs);
731 bb->cs[bb->len++] = upper_32_bits(src_ofs);
735 static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
764 bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
765 bb->cs[bb->len++] = width - 1;
766 bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
767 bb->cs[bb->len++] = pitch - 1;
768 bb->cs[bb->len++] = pitch - 1;
769 bb->cs[bb->len++] = lower_32_bits(src_ofs);
770 bb->cs[bb->len++] = upper_32_bits(src_ofs);
771 bb->cs[bb->len++] = lower_32_bits(dst_ofs);
772 bb->cs[bb->len++] = upper_32_bits(dst_ofs);
773 bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
777 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
784 emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
786 emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
795 struct xe_bb *bb,
814 emit_copy_ccs(gt, bb,
827 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
908 struct xe_bb *bb;
955 bb = xe_bb_new(gt, batch_size, usm);
956 if (IS_ERR(bb)) {
957 err = PTR_ERR(bb);
964 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
970 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
974 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
976 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
977 update_idx = bb->len;
980 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
983 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
989 job = xe_bb_create_migration_job(m->q, bb,
1019 xe_bb_free(bb, fence);
1026 xe_bb_free(bb, NULL);
1107 struct xe_bb *bb = NULL;
1144 bb = xe_bb_ccs_new(gt, batch_size, read_write);
1145 if (IS_ERR(bb)) {
1147 err = PTR_ERR(bb);
1181 emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
1183 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
1185 bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1186 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
1189 bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1194 xe_assert(xe, (batch_size_allocated == bb->len));
1195 src_bo->bb_ccs[read_write] = bb;
1265 struct xe_bb *bb;
1283 bb = xe_bb_new(gt, batch_size, usm);
1284 if (IS_ERR(bb)) {
1285 err = PTR_ERR(bb);
1292 emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
1294 emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
1296 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1297 update_idx = bb->len;
1300 emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
1302 emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
1304 job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
1307 xe_bb_free(bb, NULL);
1329 xe_bb_free(bb, fence);
1336 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1340 u32 *cs = bb->cs + bb->len;
1354 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1356 bb->len += len;
1359 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
1363 u32 *cs = bb->cs + bb->len;
1395 xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1397 bb->len += len;
1422 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1426 emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1428 emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1482 struct xe_bb *bb;
1507 bb = xe_bb_new(gt, batch_size, usm);
1508 if (IS_ERR(bb)) {
1509 err = PTR_ERR(bb);
1518 emit_pte(m, bb, clear_L0_pt, clear_vram,
1523 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1524 update_idx = bb->len;
1527 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1530 emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1535 job = xe_bb_create_migration_job(m->q, bb,
1568 xe_bb_free(bb, fence);
1574 xe_bb_free(bb, NULL);
1591 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1620 /* Ensure populatefn can do memset64 by aligning bb->cs */
1621 if (!(bb->len & 1))
1622 bb->cs[bb->len++] = MI_NOOP;
1624 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1625 bb->cs[bb->len++] = lower_32_bits(addr);
1626 bb->cs[bb->len++] = upper_32_bits(addr);
1628 ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1631 ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1634 bb->len += chunk * 2;
1717 struct xe_bb *bb;
1746 bb = xe_bb_new(gt, batch_size, usm);
1747 if (IS_ERR(bb))
1748 return ERR_CAST(bb);
1788 bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1790 bb->cs[bb->len++] = ofs;
1791 bb->cs[bb->len++] = 0; /* upper_32_bits */
1813 bb->cs[bb->len++] = lower_32_bits(addr);
1814 bb->cs[bb->len++] = upper_32_bits(addr);
1825 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1826 update_idx = bb->len;
1838 write_pgtable(tile, bb, addr +
1845 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1846 update_idx = bb->len;
1854 write_pgtable(tile, bb, 0, pt_op, &updates[j],
1859 job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1885 xe_bb_free(bb, fence);
1895 xe_bb_free(bb, NULL);
1970 struct xe_bb *bb, u32 pt_offset,
1988 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1989 bb->cs[bb->len++] = pt_offset;
1990 bb->cs[bb->len++] = 0;
2008 bb->cs[bb->len++] = lower_32_bits(pte);
2009 bb->cs[bb->len++] = upper_32_bits(pte);
2075 struct xe_bb *bb;
2093 bb = xe_bb_new(gt, batch_size, use_usm_batch);
2094 if (IS_ERR(bb)) {
2095 err = PTR_ERR(bb);
2118 build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
2121 build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
2139 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
2140 update_idx = bb->len;
2142 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
2144 job = xe_bb_create_migration_job(m->q, bb,
2171 xe_bb_free(bb, fence);
2176 xe_bb_free(bb, NULL);