Lines Matching full:bb

120 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */  in xe_migrate_vm_addr()
560 struct xe_bb *bb, u32 at_pt, in emit_pte() argument
584 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in emit_pte()
585 bb->cs[bb->len++] = ofs; in emit_pte()
586 bb->cs[bb->len++] = 0; in emit_pte()
614 bb->cs[bb->len++] = lower_32_bits(addr); in emit_pte()
615 bb->cs[bb->len++] = upper_32_bits(addr); in emit_pte()
624 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, in emit_copy_ccs() argument
630 u32 *cs = bb->cs + bb->len; in emit_copy_ccs()
661 bb->len = cs - bb->cs; in emit_copy_ccs()
665 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, in emit_copy() argument
683 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2); in emit_copy()
684 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs; in emit_copy()
685 bb->cs[bb->len++] = 0; in emit_copy()
686 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4; in emit_copy()
687 bb->cs[bb->len++] = lower_32_bits(dst_ofs); in emit_copy()
688 bb->cs[bb->len++] = upper_32_bits(dst_ofs); in emit_copy()
689 bb->cs[bb->len++] = 0; in emit_copy()
690 bb->cs[bb->len++] = pitch | mocs; in emit_copy()
691 bb->cs[bb->len++] = lower_32_bits(src_ofs); in emit_copy()
692 bb->cs[bb->len++] = upper_32_bits(src_ofs); in emit_copy()
701 struct xe_bb *bb, in xe_migrate_ccs_copy() argument
720 emit_copy_ccs(gt, bb, in xe_migrate_ccs_copy()
733 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs, in xe_migrate_ccs_copy()
812 struct xe_bb *bb; in xe_migrate_copy() local
854 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_copy()
855 if (IS_ERR(bb)) { in xe_migrate_copy()
856 err = PTR_ERR(bb); in xe_migrate_copy()
863 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs, in xe_migrate_copy()
869 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs, in xe_migrate_copy()
873 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); in xe_migrate_copy()
875 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_copy()
876 update_idx = bb->len; in xe_migrate_copy()
879 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); in xe_migrate_copy()
882 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, in xe_migrate_copy()
888 job = xe_bb_create_migration_job(m->q, bb, in xe_migrate_copy()
918 xe_bb_free(bb, fence); in xe_migrate_copy()
925 xe_bb_free(bb, NULL); in xe_migrate_copy()
940 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, in emit_clear_link_copy() argument
944 u32 *cs = bb->cs + bb->len; in emit_clear_link_copy()
958 xe_gt_assert(gt, cs - bb->cs == len + bb->len); in emit_clear_link_copy()
960 bb->len += len; in emit_clear_link_copy()
963 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb, in emit_clear_main_copy() argument
967 u32 *cs = bb->cs + bb->len; in emit_clear_main_copy()
999 xe_gt_assert(gt, cs - bb->cs == len + bb->len); in emit_clear_main_copy()
1001 bb->len += len; in emit_clear_main_copy()
1026 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, in emit_clear() argument
1030 emit_clear_link_copy(gt, bb, src_ofs, size, pitch); in emit_clear()
1032 emit_clear_main_copy(gt, bb, src_ofs, size, pitch, in emit_clear()
1086 struct xe_bb *bb; in xe_migrate_clear() local
1111 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_clear()
1112 if (IS_ERR(bb)) { in xe_migrate_clear()
1113 err = PTR_ERR(bb); in xe_migrate_clear()
1122 emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs, in xe_migrate_clear()
1125 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in xe_migrate_clear()
1126 update_idx = bb->len; in xe_migrate_clear()
1129 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram); in xe_migrate_clear()
1132 emit_copy_ccs(gt, bb, clear_L0_ofs, true, in xe_migrate_clear()
1137 job = xe_bb_create_migration_job(m->q, bb, in xe_migrate_clear()
1170 xe_bb_free(bb, fence); in xe_migrate_clear()
1176 xe_bb_free(bb, NULL); in xe_migrate_clear()
1193 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, in write_pgtable() argument
1222 /* Ensure populatefn can do memset64 by aligning bb->cs */ in write_pgtable()
1223 if (!(bb->len & 1)) in write_pgtable()
1224 bb->cs[bb->len++] = MI_NOOP; in write_pgtable()
1226 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); in write_pgtable()
1227 bb->cs[bb->len++] = lower_32_bits(addr); in write_pgtable()
1228 bb->cs[bb->len++] = upper_32_bits(addr); in write_pgtable()
1230 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, in write_pgtable()
1233 ops->clear(pt_update, tile, NULL, bb->cs + bb->len, in write_pgtable()
1236 bb->len += chunk * 2; in write_pgtable()
1319 struct xe_bb *bb; in __xe_migrate_update_pgtables() local
1348 bb = xe_bb_new(gt, batch_size, usm); in __xe_migrate_update_pgtables()
1349 if (IS_ERR(bb)) in __xe_migrate_update_pgtables()
1350 return ERR_CAST(bb); in __xe_migrate_update_pgtables()
1390 bb->cs[bb->len++] = MI_STORE_DATA_IMM | in __xe_migrate_update_pgtables()
1392 bb->cs[bb->len++] = ofs; in __xe_migrate_update_pgtables()
1393 bb->cs[bb->len++] = 0; /* upper_32_bits */ in __xe_migrate_update_pgtables()
1415 bb->cs[bb->len++] = lower_32_bits(addr); in __xe_migrate_update_pgtables()
1416 bb->cs[bb->len++] = upper_32_bits(addr); in __xe_migrate_update_pgtables()
1427 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in __xe_migrate_update_pgtables()
1428 update_idx = bb->len; in __xe_migrate_update_pgtables()
1440 write_pgtable(tile, bb, addr + in __xe_migrate_update_pgtables()
1447 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; in __xe_migrate_update_pgtables()
1448 update_idx = bb->len; in __xe_migrate_update_pgtables()
1456 write_pgtable(tile, bb, 0, pt_op, &updates[j], in __xe_migrate_update_pgtables()
1461 job = xe_bb_create_migration_job(pt_update_ops->q, bb, in __xe_migrate_update_pgtables()
1485 xe_bb_free(bb, fence); in __xe_migrate_update_pgtables()
1495 xe_bb_free(bb, NULL); in __xe_migrate_update_pgtables()