xref: /linux/drivers/gpu/drm/xe/xe_migrate.c (revision 1b5d39e6672fdee158c3306f5cb2df8975c77e5a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "xe_migrate.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_pagemap.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include <generated/xe_wa_oob.h>
17 
18 #include "instructions/xe_gpu_commands.h"
19 #include "instructions/xe_mi_commands.h"
20 #include "regs/xe_gtt_defs.h"
21 #include "tests/xe_test.h"
22 #include "xe_assert.h"
23 #include "xe_bb.h"
24 #include "xe_bo.h"
25 #include "xe_exec_queue.h"
26 #include "xe_ggtt.h"
27 #include "xe_gt.h"
28 #include "xe_hw_engine.h"
29 #include "xe_lrc.h"
30 #include "xe_map.h"
31 #include "xe_mocs.h"
32 #include "xe_printk.h"
33 #include "xe_pt.h"
34 #include "xe_res_cursor.h"
35 #include "xe_sa.h"
36 #include "xe_sched_job.h"
37 #include "xe_sriov_vf_ccs.h"
38 #include "xe_sync.h"
39 #include "xe_trace_bo.h"
40 #include "xe_validation.h"
41 #include "xe_vm.h"
42 #include "xe_vram.h"
43 
44 /**
45  * struct xe_migrate - migrate context.
46  */
47 struct xe_migrate {
48 	/** @q: Default exec queue used for migration */
49 	struct xe_exec_queue *q;
50 	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
51 	struct xe_tile *tile;
52 	/** @job_mutex: Timeline mutex for @eng. */
53 	struct mutex job_mutex;
54 	/** @pt_bo: Page-table buffer object. */
55 	struct xe_bo *pt_bo;
56 	/** @batch_base_ofs: VM offset of the migration batch buffer */
57 	u64 batch_base_ofs;
58 	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
59 	u64 usm_batch_base_ofs;
60 	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
61 	u64 cleared_mem_ofs;
62 	/** @large_page_copy_ofs: VM offset of 2M pages used for large copies */
63 	u64 large_page_copy_ofs;
64 	/**
65 	 * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for
66 	 * large copies
67 	 */
68 	u64 large_page_copy_pdes;
69 	/**
70 	 * @fence: dma-fence representing the last migration job batch.
71 	 * Protected by @job_mutex.
72 	 */
73 	struct dma_fence *fence;
74 	/**
75 	 * @vm_update_sa: For integrated, used to suballocate page-tables
76 	 * out of the pt_bo.
77 	 */
78 	struct drm_suballoc_manager vm_update_sa;
79 	/** @min_chunk_size: For dgfx, Minimum chunk size */
80 	u64 min_chunk_size;
81 };
82 
83 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
84 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
85 #define NUM_KERNEL_PDE 15
86 #define NUM_PT_SLOTS 32
87 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
88 #define MAX_NUM_PTE 512
89 #define IDENTITY_OFFSET 256ULL
90 
91 /*
92  * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
93  * legal value accepted.  Since that instruction field is always stored in
94  * (val-2) format, this translates to 0x400 dwords for the true maximum length
95  * of the instruction.  Subtracting the instruction header (1 dword) and
96  * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
97  */
98 #define MAX_PTE_PER_SDI 0x1FEU
99 
100 static void xe_migrate_fini(void *arg)
101 {
102 	struct xe_migrate *m = arg;
103 
104 	xe_vm_lock(m->q->vm, false);
105 	xe_bo_unpin(m->pt_bo);
106 	xe_vm_unlock(m->q->vm);
107 
108 	dma_fence_put(m->fence);
109 	xe_bo_put(m->pt_bo);
110 	drm_suballoc_manager_fini(&m->vm_update_sa);
111 	mutex_destroy(&m->job_mutex);
112 	xe_vm_close_and_put(m->q->vm);
113 	xe_exec_queue_put(m->q);
114 }
115 
116 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
117 {
118 	XE_WARN_ON(slot >= NUM_PT_SLOTS);
119 
120 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
121 	return (slot + 1ULL) << xe_pt_shift(level + 1);
122 }
123 
124 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
125 {
126 	/*
127 	 * Remove the DPA to get a correct offset into identity table for the
128 	 * migrate offset
129 	 */
130 	u64 identity_offset = IDENTITY_OFFSET;
131 
132 	if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
133 		identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
134 							(xe->mem.vram), SZ_1G);
135 
136 	addr -= xe_vram_region_dpa_base(xe->mem.vram);
137 	return addr + (identity_offset << xe_pt_shift(2));
138 }
139 
140 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
141 					u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
142 {
143 	struct xe_vram_region *vram = xe->mem.vram;
144 	resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
145 	u64 pos, ofs, flags;
146 	u64 entry;
147 	/* XXX: Unclear if this should be usable_size? */
148 	u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
149 	u32 level = 2;
150 
151 	ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
152 	flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
153 					    true, 0);
154 
155 	xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
156 
157 	/*
158 	 * Use 1GB pages when possible, last chunk always use 2M
159 	 * pages as mixing reserved memory (stolen, WOCPM) with a single
160 	 * mapping is not allowed on certain platforms.
161 	 */
162 	for (pos = dpa_base; pos < vram_limit;
163 	     pos += SZ_1G, ofs += 8) {
164 		if (pos + SZ_1G >= vram_limit) {
165 			entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
166 			xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
167 
168 			flags = vm->pt_ops->pte_encode_addr(xe, 0,
169 							    pat_index,
170 							    level - 1,
171 							    true, 0);
172 
173 			for (ofs = pt_2m_ofs; pos < vram_limit;
174 			     pos += SZ_2M, ofs += 8)
175 				xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
176 			break;	/* Ensure pos == vram_limit assert correct */
177 		}
178 
179 		xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
180 	}
181 
182 	xe_assert(xe, pos == vram_limit);
183 }
184 
185 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
186 				 struct xe_vm *vm, struct drm_exec *exec)
187 {
188 	struct xe_device *xe = tile_to_xe(tile);
189 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
190 	u8 id = tile->id;
191 	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
192 #define VRAM_IDENTITY_MAP_COUNT	2
193 	u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
194 #undef VRAM_IDENTITY_MAP_COUNT
195 	u32 map_ofs, level, i;
196 	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
197 	u64 entry, pt29_ofs;
198 
199 	/* Can't bump NUM_PT_SLOTS too high */
200 	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
201 	/* Must be a multiple of 64K to support all platforms */
202 	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
203 	/* And one slot reserved for the 4KiB page table updates */
204 	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
205 
206 	/* Need to be sure everything fits in the first PT, or create more */
207 	xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
208 
209 	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
210 				  num_entries * XE_PAGE_SIZE,
211 				  ttm_bo_type_kernel,
212 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
213 				  XE_BO_FLAG_PAGETABLE, exec);
214 	if (IS_ERR(bo))
215 		return PTR_ERR(bo);
216 
217 	/* PT30 & PT31 reserved for 2M identity map */
218 	pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
219 	entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
220 	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
221 
222 	map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
223 
224 	/* Map the entire BO in our level 0 pt */
225 	for (i = 0, level = 0; i < num_entries; level++) {
226 		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
227 						  pat_index, 0);
228 
229 		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
230 
231 		if (vm->flags & XE_VM_FLAG_64K)
232 			i += 16;
233 		else
234 			i += 1;
235 	}
236 
237 	if (!IS_DGFX(xe)) {
238 		/* Write out batch too */
239 		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
240 		for (i = 0; i < xe_bo_size(batch);
241 		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
242 		     XE_PAGE_SIZE) {
243 			entry = vm->pt_ops->pte_encode_bo(batch, i,
244 							  pat_index, 0);
245 
246 			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
247 				  entry);
248 			level++;
249 		}
250 		if (xe->info.has_usm) {
251 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
252 
253 			batch = tile->primary_gt->usm.bb_pool->bo;
254 			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
255 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
256 
257 			for (i = 0; i < xe_bo_size(batch);
258 			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
259 			     XE_PAGE_SIZE) {
260 				entry = vm->pt_ops->pte_encode_bo(batch, i,
261 								  pat_index, 0);
262 
263 				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
264 					  entry);
265 				level++;
266 			}
267 		}
268 	} else {
269 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
270 
271 		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
272 
273 		if (xe->info.has_usm) {
274 			batch = tile->primary_gt->usm.bb_pool->bo;
275 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
276 			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
277 		}
278 	}
279 
280 	for (level = 1; level < num_level; level++) {
281 		u32 flags = 0;
282 
283 		if (vm->flags & XE_VM_FLAG_64K && level == 1)
284 			flags = XE_PDE_64K;
285 
286 		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
287 						  XE_PAGE_SIZE);
288 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
289 			  entry | flags);
290 	}
291 
292 	/* Write PDE's that point to our BO. */
293 	for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
294 		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
295 
296 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
297 			  (i + 1) * 8, u64, entry);
298 	}
299 
300 	/* Reserve 2M PDEs */
301 	level = 1;
302 	m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level);
303 	m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level +
304 		NUM_PT_SLOTS * 8;
305 
306 	/* Set up a 1GiB NULL mapping at 255GiB offset. */
307 	level = 2;
308 	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
309 		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
310 		  | XE_PTE_NULL);
311 	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
312 
313 	/* Identity map the entire vram at 256GiB offset */
314 	if (IS_DGFX(xe)) {
315 		u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
316 		resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
317 
318 		xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
319 					    pat_index, pt30_ofs);
320 		xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
321 
322 		/*
323 		 * Identity map the entire vram for compressed pat_index for xe2+
324 		 * if flat ccs is enabled.
325 		 */
326 		if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
327 			u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
328 			u64 vram_offset = IDENTITY_OFFSET +
329 				DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
330 			u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
331 
332 			xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
333 							  IDENTITY_OFFSET / 2) * SZ_1G);
334 			xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
335 						    comp_pat_index, pt31_ofs);
336 		}
337 	}
338 
339 	/*
340 	 * Example layout created above, with root level = 3:
341 	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
342 	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
343 	 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
344 	 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
345 	 *
346 	 * This makes the lowest part of the VM point to the pagetables.
347 	 * Hence the lowest 2M in the vm should point to itself, with a few writes
348 	 * and flushes, other parts of the VM can be used either for copying and
349 	 * clearing.
350 	 *
351 	 * For performance, the kernel reserves PDE's, so about 20 are left
352 	 * for async VM updates.
353 	 *
354 	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
355 	 * everywhere, this allows lockless updates to scratch pages by using
356 	 * the different addresses in VM.
357 	 */
358 #define NUM_VMUSA_UNIT_PER_PAGE	32
359 #define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
360 #define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
361 	drm_suballoc_manager_init(&m->vm_update_sa,
362 				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
363 				  NUM_VMUSA_UNIT_PER_PAGE, 0);
364 
365 	m->pt_bo = bo;
366 	return 0;
367 }
368 
369 /*
370  * Including the reserved copy engine is required to avoid deadlocks due to
371  * migrate jobs servicing the faults gets stuck behind the job that faulted.
372  */
373 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
374 {
375 	u32 logical_mask = 0;
376 	struct xe_hw_engine *hwe;
377 	enum xe_hw_engine_id id;
378 
379 	for_each_hw_engine(hwe, gt, id) {
380 		if (hwe->class != XE_ENGINE_CLASS_COPY)
381 			continue;
382 
383 		if (xe_gt_is_usm_hwe(gt, hwe))
384 			logical_mask |= BIT(hwe->logical_instance);
385 	}
386 
387 	return logical_mask;
388 }
389 
390 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
391 {
392 	return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
393 }
394 
395 /**
396  * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
397  * @tile: &xe_tile
398  *
399  * Allocates a &xe_migrate for a given tile.
400  *
401  * Return: &xe_migrate on success, or NULL when out of memory.
402  */
403 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
404 {
405 	struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
406 
407 	if (m)
408 		m->tile = tile;
409 	return m;
410 }
411 
412 static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
413 {
414 	struct xe_device *xe = tile_to_xe(tile);
415 	struct xe_validation_ctx ctx;
416 	struct drm_exec exec;
417 	int err = 0;
418 
419 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
420 		err = xe_vm_drm_exec_lock(vm, &exec);
421 		drm_exec_retry_on_contention(&exec);
422 		err = xe_migrate_prepare_vm(tile, m, vm, &exec);
423 		drm_exec_retry_on_contention(&exec);
424 		xe_validation_retry_on_oom(&ctx, &err);
425 	}
426 
427 	return err;
428 }
429 
430 /**
431  * xe_migrate_init() - Initialize a migrate context
432  * @m: The migration context
433  *
434  * Return: 0 if successful, negative error code on failure
435  */
436 int xe_migrate_init(struct xe_migrate *m)
437 {
438 	struct xe_tile *tile = m->tile;
439 	struct xe_gt *primary_gt = tile->primary_gt;
440 	struct xe_device *xe = tile_to_xe(tile);
441 	struct xe_vm *vm;
442 	int err;
443 
444 	/* Special layout, prepared below.. */
445 	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
446 			  XE_VM_FLAG_SET_TILE_ID(tile), NULL);
447 	if (IS_ERR(vm))
448 		return PTR_ERR(vm);
449 
450 	err = xe_migrate_lock_prepare_vm(tile, m, vm);
451 	if (err)
452 		goto err_out;
453 
454 	if (xe->info.has_usm) {
455 		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
456 							   XE_ENGINE_CLASS_COPY,
457 							   primary_gt->usm.reserved_bcs_instance,
458 							   false);
459 		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
460 
461 		if (!hwe || !logical_mask) {
462 			err = -EINVAL;
463 			goto err_out;
464 		}
465 
466 		/*
467 		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
468 		 * PVC, may want to revisit if performance is needed.
469 		 */
470 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
471 					    EXEC_QUEUE_FLAG_KERNEL |
472 					    EXEC_QUEUE_FLAG_PERMANENT |
473 					    EXEC_QUEUE_FLAG_HIGH_PRIORITY |
474 					    EXEC_QUEUE_FLAG_MIGRATE, 0);
475 	} else {
476 		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
477 						  XE_ENGINE_CLASS_COPY,
478 						  EXEC_QUEUE_FLAG_KERNEL |
479 						  EXEC_QUEUE_FLAG_PERMANENT |
480 						  EXEC_QUEUE_FLAG_MIGRATE, 0);
481 	}
482 	if (IS_ERR(m->q)) {
483 		err = PTR_ERR(m->q);
484 		goto err_out;
485 	}
486 
487 	mutex_init(&m->job_mutex);
488 	fs_reclaim_acquire(GFP_KERNEL);
489 	might_lock(&m->job_mutex);
490 	fs_reclaim_release(GFP_KERNEL);
491 
492 	err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
493 	if (err)
494 		return err;
495 
496 	if (IS_DGFX(xe)) {
497 		if (xe_migrate_needs_ccs_emit(xe))
498 			/* min chunk size corresponds to 4K of CCS Metadata */
499 			m->min_chunk_size = SZ_4K * SZ_64K /
500 				xe_device_ccs_bytes(xe, SZ_64K);
501 		else
502 			/* Somewhat arbitrary to avoid a huge amount of blits */
503 			m->min_chunk_size = SZ_64K;
504 		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
505 		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
506 			(unsigned long long)m->min_chunk_size);
507 	}
508 
509 	return err;
510 
511 err_out:
512 	xe_vm_close_and_put(vm);
513 	return err;
514 
515 }
516 
517 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
518 {
519 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
520 		return MAX_CCS_LIMITED_TRANSFER;
521 
522 	return MAX_PREEMPTDISABLE_TRANSFER;
523 }
524 
525 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
526 {
527 	struct xe_device *xe = tile_to_xe(m->tile);
528 	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
529 
530 	if (mem_type_is_vram(cur->mem_type)) {
531 		/*
532 		 * VRAM we want to blit in chunks with sizes aligned to
533 		 * min_chunk_size in order for the offset to CCS metadata to be
534 		 * page-aligned. If it's the last chunk it may be smaller.
535 		 *
536 		 * Another constraint is that we need to limit the blit to
537 		 * the VRAM block size, unless size is smaller than
538 		 * min_chunk_size.
539 		 */
540 		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
541 
542 		size = min_t(u64, size, chunk);
543 		if (size > m->min_chunk_size)
544 			size = round_down(size, m->min_chunk_size);
545 	}
546 
547 	return size;
548 }
549 
550 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
551 {
552 	/* If the chunk is not fragmented, allow identity map. */
553 	return cur->size >= size;
554 }
555 
556 #define PTE_UPDATE_FLAG_IS_VRAM		BIT(0)
557 #define PTE_UPDATE_FLAG_IS_COMP_PTE	BIT(1)
558 
559 static u32 pte_update_size(struct xe_migrate *m,
560 			   u32 flags,
561 			   struct ttm_resource *res,
562 			   struct xe_res_cursor *cur,
563 			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
564 			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
565 {
566 	u32 cmds = 0;
567 	bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
568 	bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
569 
570 	*L0_pt = pt_ofs;
571 	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
572 		/* Offset into identity map. */
573 		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
574 					      cur->start + vram_region_gpu_offset(res),
575 					      is_comp_pte);
576 		cmds += cmd_size;
577 	} else {
578 		/* Clip L0 to available size */
579 		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
580 		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
581 
582 		*L0 = size;
583 		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
584 
585 		/* MI_STORE_DATA_IMM */
586 		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
587 
588 		/* PDE qwords */
589 		cmds += num_4k_pages * 2;
590 
591 		/* Each chunk has a single blit command */
592 		cmds += cmd_size;
593 	}
594 
595 	return cmds;
596 }
597 
598 static void emit_pte(struct xe_migrate *m,
599 		     struct xe_bb *bb, u32 at_pt,
600 		     bool is_vram, bool is_comp_pte,
601 		     struct xe_res_cursor *cur,
602 		     u32 size, struct ttm_resource *res)
603 {
604 	struct xe_device *xe = tile_to_xe(m->tile);
605 	struct xe_vm *vm = m->q->vm;
606 	u16 pat_index;
607 	u32 ptes;
608 	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
609 	u64 cur_ofs;
610 
611 	/* Indirect access needs compression enabled uncached PAT index */
612 	if (GRAPHICS_VERx100(xe) >= 2000)
613 		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
614 					  xe->pat.idx[XE_CACHE_WB];
615 	else
616 		pat_index = xe->pat.idx[XE_CACHE_WB];
617 
618 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
619 
620 	while (ptes) {
621 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
622 
623 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
624 		bb->cs[bb->len++] = ofs;
625 		bb->cs[bb->len++] = 0;
626 
627 		cur_ofs = ofs;
628 		ofs += chunk * 8;
629 		ptes -= chunk;
630 
631 		while (chunk--) {
632 			u64 addr, flags = 0;
633 			bool devmem = false;
634 
635 			addr = xe_res_dma(cur) & PAGE_MASK;
636 			if (is_vram) {
637 				if (vm->flags & XE_VM_FLAG_64K) {
638 					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
639 
640 					xe_assert(xe, (va & (SZ_64K - 1)) ==
641 						  (addr & (SZ_64K - 1)));
642 
643 					flags |= XE_PTE_PS64;
644 				}
645 
646 				addr += vram_region_gpu_offset(res);
647 				devmem = true;
648 			}
649 
650 			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
651 							   addr, pat_index,
652 							   0, devmem, flags);
653 			bb->cs[bb->len++] = lower_32_bits(addr);
654 			bb->cs[bb->len++] = upper_32_bits(addr);
655 
656 			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
657 			cur_ofs += 8;
658 		}
659 	}
660 }
661 
662 #define EMIT_COPY_CCS_DW 5
663 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
664 			  u64 dst_ofs, bool dst_is_indirect,
665 			  u64 src_ofs, bool src_is_indirect,
666 			  u32 size)
667 {
668 	struct xe_device *xe = gt_to_xe(gt);
669 	u32 *cs = bb->cs + bb->len;
670 	u32 num_ccs_blks;
671 	u32 num_pages;
672 	u32 ccs_copy_size;
673 	u32 mocs;
674 
675 	if (GRAPHICS_VERx100(xe) >= 2000) {
676 		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
677 		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
678 
679 		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
680 		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
681 
682 	} else {
683 		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
684 					    NUM_CCS_BYTES_PER_BLOCK);
685 		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
686 
687 		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
688 		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
689 	}
690 
691 	*cs++ = XY_CTRL_SURF_COPY_BLT |
692 		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
693 		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
694 		ccs_copy_size;
695 	*cs++ = lower_32_bits(src_ofs);
696 	*cs++ = upper_32_bits(src_ofs) | mocs;
697 	*cs++ = lower_32_bits(dst_ofs);
698 	*cs++ = upper_32_bits(dst_ofs) | mocs;
699 
700 	bb->len = cs - bb->cs;
701 }
702 
703 #define EMIT_COPY_DW 10
704 static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
705 			      u64 dst_ofs, unsigned int size,
706 			      unsigned int pitch)
707 {
708 	struct xe_device *xe = gt_to_xe(gt);
709 	u32 mocs = 0;
710 	u32 tile_y = 0;
711 
712 	xe_gt_assert(gt, !(pitch & 3));
713 	xe_gt_assert(gt, size / pitch <= S16_MAX);
714 	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
715 	xe_gt_assert(gt, pitch <= U16_MAX);
716 
717 	if (GRAPHICS_VER(xe) >= 20)
718 		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
719 
720 	if (GRAPHICS_VERx100(xe) >= 1250)
721 		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
722 
723 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
724 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
725 	bb->cs[bb->len++] = 0;
726 	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
727 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
728 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
729 	bb->cs[bb->len++] = 0;
730 	bb->cs[bb->len++] = pitch | mocs;
731 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
732 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
733 }
734 
735 #define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */
736 static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
737 			  u64 dst_ofs, unsigned int size, unsigned int pitch)
738 {
739 	u32 mode, copy_type, width;
740 
741 	xe_gt_assert(gt, IS_ALIGNED(size, pitch));
742 	xe_gt_assert(gt, pitch <= U16_MAX);
743 	xe_gt_assert(gt, pitch);
744 	xe_gt_assert(gt, size);
745 
746 	if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) &&
747 	    IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) &&
748 	    IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) {
749 		mode = MEM_COPY_PAGE_COPY_MODE;
750 		copy_type = 0; /* linear copy */
751 		width = size / PAGE_COPY_MODE_PS;
752 	} else if (pitch > 1) {
753 		xe_gt_assert(gt, size / pitch <= U16_MAX);
754 		mode = 0; /* BYTE_COPY */
755 		copy_type = MEM_COPY_MATRIX_COPY;
756 		width = pitch;
757 	} else {
758 		mode = 0; /* BYTE_COPY */
759 		copy_type = 0; /* linear copy */
760 		width = size;
761 	}
762 
763 	xe_gt_assert(gt, width <= U16_MAX);
764 
765 	bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
766 	bb->cs[bb->len++] = width - 1;
767 	bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
768 	bb->cs[bb->len++] = pitch - 1;
769 	bb->cs[bb->len++] = pitch - 1;
770 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
771 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
772 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
773 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
774 	bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
775 			    FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index);
776 }
777 
778 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
779 		      u64 src_ofs, u64 dst_ofs, unsigned int size,
780 		      unsigned int pitch)
781 {
782 	struct xe_device *xe = gt_to_xe(gt);
783 
784 	if (xe->info.has_mem_copy_instr)
785 		emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
786 	else
787 		emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
788 }
789 
790 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
791 {
792 	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
793 }
794 
795 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
796 			       struct xe_bb *bb,
797 			       u64 src_ofs, bool src_is_indirect,
798 			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
799 			       u64 ccs_ofs, bool copy_ccs)
800 {
801 	struct xe_gt *gt = m->tile->primary_gt;
802 	u32 flush_flags = 0;
803 
804 	if (!copy_ccs && dst_is_indirect) {
805 		/*
806 		 * If the src is already in vram, then it should already
807 		 * have been cleared by us, or has been populated by the
808 		 * user. Make sure we copy the CCS aux state as-is.
809 		 *
810 		 * Otherwise if the bo doesn't have any CCS metadata attached,
811 		 * we still need to clear it for security reasons.
812 		 */
813 		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
814 
815 		emit_copy_ccs(gt, bb,
816 			      dst_ofs, true,
817 			      ccs_src_ofs, src_is_indirect, dst_size);
818 
819 		flush_flags = MI_FLUSH_DW_CCS;
820 	} else if (copy_ccs) {
821 		if (!src_is_indirect)
822 			src_ofs = ccs_ofs;
823 		else if (!dst_is_indirect)
824 			dst_ofs = ccs_ofs;
825 
826 		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
827 
828 		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
829 			      src_is_indirect, dst_size);
830 		if (dst_is_indirect)
831 			flush_flags = MI_FLUSH_DW_CCS;
832 	}
833 
834 	return flush_flags;
835 }
836 
837 /**
838  * xe_migrate_copy() - Copy content of TTM resources.
839  * @m: The migration context.
840  * @src_bo: The buffer object @src is currently bound to.
841  * @dst_bo: If copying between resources created for the same bo, set this to
842  * the same value as @src_bo. If copying between buffer objects, set it to
843  * the buffer object @dst is currently bound to.
844  * @src: The source TTM resource.
845  * @dst: The dst TTM resource.
846  * @copy_only_ccs: If true copy only CCS metadata
847  *
848  * Copies the contents of @src to @dst: On flat CCS devices,
849  * the CCS metadata is copied as well if needed, or if not present,
850  * the CCS metadata of @dst is cleared for security reasons.
851  *
852  * Return: Pointer to a dma_fence representing the last copy batch, or
853  * an error pointer on failure. If there is a failure, any copy operation
854  * started by the function call has been synced.
855  */
856 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
857 				  struct xe_bo *src_bo,
858 				  struct xe_bo *dst_bo,
859 				  struct ttm_resource *src,
860 				  struct ttm_resource *dst,
861 				  bool copy_only_ccs)
862 {
863 	struct xe_gt *gt = m->tile->primary_gt;
864 	struct xe_device *xe = gt_to_xe(gt);
865 	struct dma_fence *fence = NULL;
866 	u64 size = xe_bo_size(src_bo);
867 	struct xe_res_cursor src_it, dst_it, ccs_it;
868 	u64 src_L0_ofs, dst_L0_ofs;
869 	u32 src_L0_pt, dst_L0_pt;
870 	u64 src_L0, dst_L0;
871 	int pass = 0;
872 	int err;
873 	bool src_is_pltt = src->mem_type == XE_PL_TT;
874 	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
875 	bool src_is_vram = mem_type_is_vram(src->mem_type);
876 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
877 	bool type_device = src_bo->ttm.type == ttm_bo_type_device;
878 	bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
879 	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
880 		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
881 	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
882 	bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
883 		GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
884 
885 	/* Copying CCS between two different BOs is not supported yet. */
886 	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
887 		return ERR_PTR(-EINVAL);
888 
889 	if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
890 		return ERR_PTR(-EINVAL);
891 
892 	if (!src_is_vram)
893 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
894 	else
895 		xe_res_first(src, 0, size, &src_it);
896 	if (!dst_is_vram)
897 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
898 	else
899 		xe_res_first(dst, 0, size, &dst_it);
900 
901 	if (copy_system_ccs)
902 		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
903 				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
904 				&ccs_it);
905 
906 	while (size) {
907 		u32 batch_size = 1; /* MI_BATCH_BUFFER_END */
908 		struct xe_sched_job *job;
909 		struct xe_bb *bb;
910 		u32 flush_flags = 0;
911 		u32 update_idx;
912 		u64 ccs_ofs, ccs_size;
913 		u32 ccs_pt;
914 		u32 pte_flags;
915 
916 		bool usm = xe->info.has_usm;
917 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
918 
919 		src_L0 = xe_migrate_res_sizes(m, &src_it);
920 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
921 
922 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
923 			pass++, src_L0, dst_L0);
924 
925 		src_L0 = min(src_L0, dst_L0);
926 
927 		pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
928 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
929 		batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
930 					      &src_L0_ofs, &src_L0_pt, 0, 0,
931 					      avail_pts);
932 		if (copy_only_ccs) {
933 			dst_L0_ofs = src_L0_ofs;
934 		} else {
935 			pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
936 			batch_size += pte_update_size(m, pte_flags, dst,
937 						      &dst_it, &src_L0,
938 						      &dst_L0_ofs, &dst_L0_pt,
939 						      0, avail_pts, avail_pts);
940 		}
941 
942 		if (copy_system_ccs) {
943 			xe_assert(xe, type_device);
944 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
945 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
946 						      &ccs_ofs, &ccs_pt, 0,
947 						      2 * avail_pts,
948 						      avail_pts);
949 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
950 		}
951 
952 		/* Add copy commands size here */
953 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
954 			((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
955 
956 		bb = xe_bb_new(gt, batch_size, usm);
957 		if (IS_ERR(bb)) {
958 			err = PTR_ERR(bb);
959 			goto err_sync;
960 		}
961 
962 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
963 			xe_res_next(&src_it, src_L0);
964 		else
965 			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
966 				 &src_it, src_L0, src);
967 
968 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
969 			xe_res_next(&dst_it, src_L0);
970 		else if (!copy_only_ccs)
971 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
972 				 &dst_it, src_L0, dst);
973 
974 		if (copy_system_ccs)
975 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
976 
977 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
978 		update_idx = bb->len;
979 
980 		if (!copy_only_ccs)
981 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
982 
983 		if (needs_ccs_emit)
984 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
985 							  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
986 							  dst_L0_ofs,
987 							  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
988 							  src_L0, ccs_ofs, copy_ccs);
989 
990 		job = xe_bb_create_migration_job(m->q, bb,
991 						 xe_migrate_batch_base(m, usm),
992 						 update_idx);
993 		if (IS_ERR(job)) {
994 			err = PTR_ERR(job);
995 			goto err;
996 		}
997 
998 		xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
999 		if (!fence) {
1000 			err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
1001 						    DMA_RESV_USAGE_BOOKKEEP);
1002 			if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
1003 				err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
1004 							    DMA_RESV_USAGE_BOOKKEEP);
1005 			if (err)
1006 				goto err_job;
1007 		}
1008 
1009 		mutex_lock(&m->job_mutex);
1010 		xe_sched_job_arm(job);
1011 		dma_fence_put(fence);
1012 		fence = dma_fence_get(&job->drm.s_fence->finished);
1013 		xe_sched_job_push(job);
1014 
1015 		dma_fence_put(m->fence);
1016 		m->fence = dma_fence_get(fence);
1017 
1018 		mutex_unlock(&m->job_mutex);
1019 
1020 		xe_bb_free(bb, fence);
1021 		size -= src_L0;
1022 		continue;
1023 
1024 err_job:
1025 		xe_sched_job_put(job);
1026 err:
1027 		xe_bb_free(bb, NULL);
1028 
1029 err_sync:
1030 		/* Sync partial copy if any. FIXME: under job_mutex? */
1031 		if (fence) {
1032 			dma_fence_wait(fence, false);
1033 			dma_fence_put(fence);
1034 		}
1035 
1036 		return ERR_PTR(err);
1037 	}
1038 
1039 	return fence;
1040 }
1041 
1042 /**
1043  * xe_migrate_lrc() - Get the LRC from migrate context.
1044  * @migrate: Migrate context.
1045  *
1046  * Return: Pointer to LRC on success, error on failure
1047  */
1048 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
1049 {
1050 	return migrate->q->lrc[0];
1051 }
1052 
1053 static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
1054 {
1055 	/*
1056 	 * The migrate VM is self-referential so it can modify its own PTEs (see
1057 	 * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
1058 	 * entries for kernel operations (copies, clears, CCS migrate), and
1059 	 * suballocate the rest to user operations (binds/unbinds). With
1060 	 * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
1061 	 * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
1062 	 */
1063 	return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
1064 }
1065 
1066 static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
1067 {
1068 	u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
1069 
1070 	dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
1071 		  MI_FLUSH_IMM_DW | flags;
1072 	dw[i++] = lower_32_bits(addr);
1073 	dw[i++] = upper_32_bits(addr);
1074 	dw[i++] = MI_NOOP;
1075 	dw[i++] = MI_NOOP;
1076 
1077 	return i;
1078 }
1079 
1080 /**
1081  * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
1082  * @tile: Tile whose migration context to be used.
1083  * @q : Execution to be used along with migration context.
1084  * @src_bo: The buffer object @src is currently bound to.
1085  * @read_write : Creates BB commands for CCS read/write.
1086  *
1087  * Creates batch buffer instructions to copy CCS metadata from CCS pool to
1088  * memory and vice versa.
1089  *
1090  * This function should only be called for IGPU.
1091  *
1092  * Return: 0 if successful, negative error code on failure.
1093  */
1094 int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
1095 			   struct xe_bo *src_bo,
1096 			   enum xe_sriov_vf_ccs_rw_ctxs read_write)
1097 
1098 {
1099 	bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
1100 	bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
1101 	struct ttm_resource *src = src_bo->ttm.resource;
1102 	struct xe_migrate *m = tile->migrate;
1103 	struct xe_gt *gt = tile->primary_gt;
1104 	u32 batch_size, batch_size_allocated;
1105 	struct xe_device *xe = gt_to_xe(gt);
1106 	struct xe_res_cursor src_it, ccs_it;
1107 	struct xe_sriov_vf_ccs_ctx *ctx;
1108 	struct xe_sa_manager *bb_pool;
1109 	u64 size = xe_bo_size(src_bo);
1110 	struct xe_bb *bb = NULL;
1111 	u64 src_L0, src_L0_ofs;
1112 	u32 src_L0_pt;
1113 	int err;
1114 
1115 	ctx = &xe->sriov.vf.ccs.contexts[read_write];
1116 
1117 	xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
1118 
1119 	xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
1120 			PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
1121 			&ccs_it);
1122 
1123 	/* Calculate Batch buffer size */
1124 	batch_size = 0;
1125 	while (size) {
1126 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1127 		u64 ccs_ofs, ccs_size;
1128 		u32 ccs_pt;
1129 
1130 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1131 
1132 		src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
1133 
1134 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1135 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1136 					      avail_pts);
1137 
1138 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1139 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1140 					      &ccs_pt, 0, avail_pts, avail_pts);
1141 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1142 
1143 		/* Add copy commands size here */
1144 		batch_size += EMIT_COPY_CCS_DW;
1145 
1146 		size -= src_L0;
1147 	}
1148 
1149 	bb_pool = ctx->mem.ccs_bb_pool;
1150 	guard(mutex) (xe_sa_bo_swap_guard(bb_pool));
1151 	xe_sa_bo_swap_shadow(bb_pool);
1152 
1153 	bb = xe_bb_ccs_new(gt, batch_size, read_write);
1154 	if (IS_ERR(bb)) {
1155 		drm_err(&xe->drm, "BB allocation failed.\n");
1156 		err = PTR_ERR(bb);
1157 		return err;
1158 	}
1159 
1160 	batch_size_allocated = batch_size;
1161 	size = xe_bo_size(src_bo);
1162 	batch_size = 0;
1163 
1164 	/*
1165 	 * Emit PTE and copy commands here.
1166 	 * The CCS copy command can only support limited size. If the size to be
1167 	 * copied is more than the limit, divide copy into chunks. So, calculate
1168 	 * sizes here again before copy command is emitted.
1169 	 */
1170 	while (size) {
1171 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1172 		u32 flush_flags = 0;
1173 		u64 ccs_ofs, ccs_size;
1174 		u32 ccs_pt;
1175 
1176 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1177 
1178 		src_L0 = xe_migrate_res_sizes(m, &src_it);
1179 
1180 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1181 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1182 					      avail_pts);
1183 
1184 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1185 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1186 					      &ccs_pt, 0, avail_pts, avail_pts);
1187 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1188 		batch_size += EMIT_COPY_CCS_DW;
1189 
1190 		emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
1191 
1192 		emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
1193 
1194 		bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1195 		flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
1196 						  src_L0_ofs, dst_is_pltt,
1197 						  src_L0, ccs_ofs, true);
1198 		bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1199 
1200 		size -= src_L0;
1201 	}
1202 
1203 	xe_assert(xe, (batch_size_allocated == bb->len));
1204 	src_bo->bb_ccs[read_write] = bb;
1205 
1206 	xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1207 	xe_sa_bo_sync_shadow(bb->bo);
1208 	return 0;
1209 }
1210 
1211 /**
1212  * xe_migrate_ccs_rw_copy_clear() - Clear the CCS read/write batch buffer
1213  * content.
1214  * @src_bo: The buffer object @src is currently bound to.
1215  * @read_write : Creates BB commands for CCS read/write.
1216  *
1217  * Directly clearing the BB lacks atomicity and can lead to undefined
1218  * behavior if the vCPU is halted mid-operation during the clearing
1219  * process. To avoid this issue, we use a shadow buffer object approach.
1220  *
1221  * First swap the SA BO address with the shadow BO, perform the clearing
1222  * operation on the BB, update the shadow BO in the ring buffer, then
1223  * sync the shadow and the actual buffer to maintain consistency.
1224  *
1225  * Returns: None.
1226  */
1227 void xe_migrate_ccs_rw_copy_clear(struct xe_bo *src_bo,
1228 				  enum xe_sriov_vf_ccs_rw_ctxs read_write)
1229 {
1230 	struct xe_bb *bb = src_bo->bb_ccs[read_write];
1231 	struct xe_device *xe = xe_bo_device(src_bo);
1232 	struct xe_sriov_vf_ccs_ctx *ctx;
1233 	struct xe_sa_manager *bb_pool;
1234 	u32 *cs;
1235 
1236 	xe_assert(xe, IS_SRIOV_VF(xe));
1237 
1238 	ctx = &xe->sriov.vf.ccs.contexts[read_write];
1239 	bb_pool = ctx->mem.ccs_bb_pool;
1240 
1241 	guard(mutex) (xe_sa_bo_swap_guard(bb_pool));
1242 	xe_sa_bo_swap_shadow(bb_pool);
1243 
1244 	cs = xe_sa_bo_cpu_addr(bb->bo);
1245 	memset(cs, MI_NOOP, bb->len * sizeof(u32));
1246 	xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1247 
1248 	xe_sa_bo_sync_shadow(bb->bo);
1249 
1250 	xe_bb_free(bb, NULL);
1251 	src_bo->bb_ccs[read_write] = NULL;
1252 }
1253 
1254 /**
1255  * xe_get_migrate_exec_queue() - Get the execution queue from migrate context.
1256  * @migrate: Migrate context.
1257  *
1258  * Return: Pointer to execution queue on success, error on failure
1259  */
1260 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
1261 {
1262 	return migrate->q;
1263 }
1264 
1265 /**
1266  * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
1267  * @vram_bo: The VRAM buffer object.
1268  * @vram_offset: The VRAM offset.
1269  * @sysmem_bo: The sysmem buffer object.
1270  * @sysmem_offset: The sysmem offset.
1271  * @size: The size of VRAM chunk to copy.
1272  * @dir: The direction of the copy operation.
1273  *
1274  * Copies a portion of a buffer object between VRAM and system memory.
1275  * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
1276  * copying to system memory.
1277  *
1278  * Return: Pointer to a dma_fence representing the last copy batch, or
1279  * an error pointer on failure. If there is a failure, any copy operation
1280  * started by the function call has been synced.
1281  */
1282 struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
1283 					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
1284 					     u64 size, enum xe_migrate_copy_dir dir)
1285 {
1286 	struct xe_device *xe = xe_bo_device(vram_bo);
1287 	struct xe_tile *tile = vram_bo->tile;
1288 	struct xe_gt *gt = tile->primary_gt;
1289 	struct xe_migrate *m = tile->migrate;
1290 	struct dma_fence *fence = NULL;
1291 	struct ttm_resource *vram = vram_bo->ttm.resource;
1292 	struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
1293 	struct xe_res_cursor vram_it, sysmem_it;
1294 	u64 vram_L0_ofs, sysmem_L0_ofs;
1295 	u32 vram_L0_pt, sysmem_L0_pt;
1296 	u64 vram_L0, sysmem_L0;
1297 	bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
1298 	bool use_comp_pat = to_sysmem &&
1299 		GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
1300 	int pass = 0;
1301 	int err;
1302 
1303 	xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
1304 	xe_assert(xe, xe_bo_is_vram(vram_bo));
1305 	xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
1306 	xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
1307 	xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
1308 
1309 	xe_res_first(vram, vram_offset, size, &vram_it);
1310 	xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
1311 
1312 	while (size) {
1313 		u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
1314 		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
1315 		struct xe_sched_job *job;
1316 		struct xe_bb *bb;
1317 		u32 update_idx;
1318 		bool usm = xe->info.has_usm;
1319 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1320 
1321 		sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
1322 		vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
1323 
1324 		xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0);
1325 
1326 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
1327 		batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
1328 					      &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
1329 
1330 		batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
1331 					      &sysmem_L0_pt, 0, avail_pts, avail_pts);
1332 		batch_size += EMIT_COPY_DW;
1333 
1334 		bb = xe_bb_new(gt, batch_size, usm);
1335 		if (IS_ERR(bb)) {
1336 			err = PTR_ERR(bb);
1337 			return ERR_PTR(err);
1338 		}
1339 
1340 		if (xe_migrate_allow_identity(vram_L0, &vram_it))
1341 			xe_res_next(&vram_it, vram_L0);
1342 		else
1343 			emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
1344 
1345 		emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
1346 
1347 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1348 		update_idx = bb->len;
1349 
1350 		if (to_sysmem)
1351 			emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
1352 		else
1353 			emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
1354 
1355 		job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
1356 						 update_idx);
1357 		if (IS_ERR(job)) {
1358 			xe_bb_free(bb, NULL);
1359 			err = PTR_ERR(job);
1360 			return ERR_PTR(err);
1361 		}
1362 
1363 		xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1364 
1365 		xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv,
1366 						     DMA_RESV_USAGE_BOOKKEEP));
1367 		xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
1368 						     DMA_RESV_USAGE_BOOKKEEP));
1369 
1370 		scoped_guard(mutex, &m->job_mutex) {
1371 			xe_sched_job_arm(job);
1372 			dma_fence_put(fence);
1373 			fence = dma_fence_get(&job->drm.s_fence->finished);
1374 			xe_sched_job_push(job);
1375 
1376 			dma_fence_put(m->fence);
1377 			m->fence = dma_fence_get(fence);
1378 		}
1379 
1380 		xe_bb_free(bb, fence);
1381 		size -= vram_L0;
1382 	}
1383 
1384 	return fence;
1385 }
1386 
1387 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1388 				 u32 size, u32 pitch)
1389 {
1390 	struct xe_device *xe = gt_to_xe(gt);
1391 	u32 *cs = bb->cs + bb->len;
1392 	u32 len = PVC_MEM_SET_CMD_LEN_DW;
1393 
1394 	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
1395 	*cs++ = pitch - 1;
1396 	*cs++ = (size / pitch) - 1;
1397 	*cs++ = pitch - 1;
1398 	*cs++ = lower_32_bits(src_ofs);
1399 	*cs++ = upper_32_bits(src_ofs);
1400 	if (GRAPHICS_VERx100(xe) >= 2000)
1401 		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1402 	else
1403 		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1404 
1405 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1406 
1407 	bb->len += len;
1408 }
1409 
1410 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
1411 				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
1412 {
1413 	struct xe_device *xe = gt_to_xe(gt);
1414 	u32 *cs = bb->cs + bb->len;
1415 	u32 len = XY_FAST_COLOR_BLT_DW;
1416 
1417 	if (GRAPHICS_VERx100(xe) < 1250)
1418 		len = 11;
1419 
1420 	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
1421 		(len - 2);
1422 	if (GRAPHICS_VERx100(xe) >= 2000)
1423 		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
1424 			(pitch - 1);
1425 	else
1426 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
1427 			(pitch - 1);
1428 	*cs++ = 0;
1429 	*cs++ = (size / pitch) << 16 | pitch / 4;
1430 	*cs++ = lower_32_bits(src_ofs);
1431 	*cs++ = upper_32_bits(src_ofs);
1432 	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
1433 	*cs++ = 0;
1434 	*cs++ = 0;
1435 	*cs++ = 0;
1436 	*cs++ = 0;
1437 
1438 	if (len > 11) {
1439 		*cs++ = 0;
1440 		*cs++ = 0;
1441 		*cs++ = 0;
1442 		*cs++ = 0;
1443 		*cs++ = 0;
1444 	}
1445 
1446 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1447 
1448 	bb->len += len;
1449 }
1450 
1451 static bool has_service_copy_support(struct xe_gt *gt)
1452 {
1453 	/*
1454 	 * What we care about is whether the architecture was designed with
1455 	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
1456 	 * instructions) so check the architectural engine list rather than the
1457 	 * actual list since these instructions are usable on BCS0 even if
1458 	 * all of the actual service copy engines (BCS1-BCS8) have been fused
1459 	 * off.
1460 	 */
1461 	return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
1462 					      XE_HW_ENGINE_BCS1);
1463 }
1464 
1465 static u32 emit_clear_cmd_len(struct xe_gt *gt)
1466 {
1467 	if (has_service_copy_support(gt))
1468 		return PVC_MEM_SET_CMD_LEN_DW;
1469 	else
1470 		return XY_FAST_COLOR_BLT_DW;
1471 }
1472 
1473 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1474 		       u32 size, u32 pitch, bool is_vram)
1475 {
1476 	if (has_service_copy_support(gt))
1477 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1478 	else
1479 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1480 				     is_vram);
1481 }
1482 
1483 /**
1484  * xe_migrate_clear() - Copy content of TTM resources.
1485  * @m: The migration context.
1486  * @bo: The buffer object @dst is currently bound to.
1487  * @dst: The dst TTM resource to be cleared.
1488  * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1489  *
1490  * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1491  * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1492  * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1493  * TODO: Eliminate the @bo argument.
1494  *
1495  * Return: Pointer to a dma_fence representing the last clear batch, or
1496  * an error pointer on failure. If there is a failure, any clear operation
1497  * started by the function call has been synced.
1498  */
1499 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1500 				   struct xe_bo *bo,
1501 				   struct ttm_resource *dst,
1502 				   u32 clear_flags)
1503 {
1504 	bool clear_vram = mem_type_is_vram(dst->mem_type);
1505 	bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1506 	bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1507 	struct xe_gt *gt = m->tile->primary_gt;
1508 	struct xe_device *xe = gt_to_xe(gt);
1509 	bool clear_only_system_ccs = false;
1510 	struct dma_fence *fence = NULL;
1511 	u64 size = xe_bo_size(bo);
1512 	struct xe_res_cursor src_it;
1513 	struct ttm_resource *src = dst;
1514 	int err;
1515 
1516 	if (WARN_ON(!clear_bo_data && !clear_ccs))
1517 		return NULL;
1518 
1519 	if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1520 		clear_only_system_ccs = true;
1521 
1522 	if (!clear_vram)
1523 		xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
1524 	else
1525 		xe_res_first(src, 0, xe_bo_size(bo), &src_it);
1526 
1527 	while (size) {
1528 		u64 clear_L0_ofs;
1529 		u32 clear_L0_pt;
1530 		u32 flush_flags = 0;
1531 		u64 clear_L0;
1532 		struct xe_sched_job *job;
1533 		struct xe_bb *bb;
1534 		u32 batch_size, update_idx;
1535 		u32 pte_flags;
1536 
1537 		bool usm = xe->info.has_usm;
1538 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1539 
1540 		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1541 
1542 		/* Calculate final sizes and batch size.. */
1543 		pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1544 		batch_size = 1 +
1545 			pte_update_size(m, pte_flags, src, &src_it,
1546 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1547 					clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1548 					avail_pts);
1549 
1550 		if (xe_migrate_needs_ccs_emit(xe))
1551 			batch_size += EMIT_COPY_CCS_DW;
1552 
1553 		/* Clear commands */
1554 
1555 		if (WARN_ON_ONCE(!clear_L0))
1556 			break;
1557 
1558 		bb = xe_bb_new(gt, batch_size, usm);
1559 		if (IS_ERR(bb)) {
1560 			err = PTR_ERR(bb);
1561 			goto err_sync;
1562 		}
1563 
1564 		size -= clear_L0;
1565 		/* Preemption is enabled again by the ring ops. */
1566 		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
1567 			xe_res_next(&src_it, clear_L0);
1568 		} else {
1569 			emit_pte(m, bb, clear_L0_pt, clear_vram,
1570 				 clear_only_system_ccs, &src_it, clear_L0, dst);
1571 			flush_flags |= MI_INVALIDATE_TLB;
1572 		}
1573 
1574 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1575 		update_idx = bb->len;
1576 
1577 		if (clear_bo_data)
1578 			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1579 
1580 		if (xe_migrate_needs_ccs_emit(xe)) {
1581 			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1582 				      m->cleared_mem_ofs, false, clear_L0);
1583 			flush_flags |= MI_FLUSH_DW_CCS;
1584 		}
1585 
1586 		job = xe_bb_create_migration_job(m->q, bb,
1587 						 xe_migrate_batch_base(m, usm),
1588 						 update_idx);
1589 		if (IS_ERR(job)) {
1590 			err = PTR_ERR(job);
1591 			goto err;
1592 		}
1593 
1594 		xe_sched_job_add_migrate_flush(job, flush_flags);
1595 		if (!fence) {
1596 			/*
1597 			 * There can't be anything userspace related at this
1598 			 * point, so we just need to respect any potential move
1599 			 * fences, which are always tracked as
1600 			 * DMA_RESV_USAGE_KERNEL.
1601 			 */
1602 			err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1603 						    DMA_RESV_USAGE_KERNEL);
1604 			if (err)
1605 				goto err_job;
1606 		}
1607 
1608 		mutex_lock(&m->job_mutex);
1609 		xe_sched_job_arm(job);
1610 		dma_fence_put(fence);
1611 		fence = dma_fence_get(&job->drm.s_fence->finished);
1612 		xe_sched_job_push(job);
1613 
1614 		dma_fence_put(m->fence);
1615 		m->fence = dma_fence_get(fence);
1616 
1617 		mutex_unlock(&m->job_mutex);
1618 
1619 		xe_bb_free(bb, fence);
1620 		continue;
1621 
1622 err_job:
1623 		xe_sched_job_put(job);
1624 err:
1625 		xe_bb_free(bb, NULL);
1626 err_sync:
1627 		/* Sync partial copies if any. FIXME: job_mutex? */
1628 		if (fence) {
1629 			dma_fence_wait(fence, false);
1630 			dma_fence_put(fence);
1631 		}
1632 
1633 		return ERR_PTR(err);
1634 	}
1635 
1636 	if (clear_ccs)
1637 		bo->ccs_cleared = true;
1638 
1639 	return fence;
1640 }
1641 
1642 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1643 			  const struct xe_vm_pgtable_update_op *pt_op,
1644 			  const struct xe_vm_pgtable_update *update,
1645 			  struct xe_migrate_pt_update *pt_update)
1646 {
1647 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1648 	u32 chunk;
1649 	u32 ofs = update->ofs, size = update->qwords;
1650 
1651 	/*
1652 	 * If we have 512 entries (max), we would populate it ourselves,
1653 	 * and update the PDE above it to the new pointer.
1654 	 * The only time this can only happen if we have to update the top
1655 	 * PDE. This requires a BO that is almost vm->size big.
1656 	 *
1657 	 * This shouldn't be possible in practice.. might change when 16K
1658 	 * pages are used. Hence the assert.
1659 	 */
1660 	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1661 	if (!ppgtt_ofs)
1662 		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1663 						xe_bo_addr(update->pt_bo, 0,
1664 							   XE_PAGE_SIZE), false);
1665 
1666 	do {
1667 		u64 addr = ppgtt_ofs + ofs * 8;
1668 
1669 		chunk = min(size, MAX_PTE_PER_SDI);
1670 
1671 		/* Ensure populatefn can do memset64 by aligning bb->cs */
1672 		if (!(bb->len & 1))
1673 			bb->cs[bb->len++] = MI_NOOP;
1674 
1675 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1676 		bb->cs[bb->len++] = lower_32_bits(addr);
1677 		bb->cs[bb->len++] = upper_32_bits(addr);
1678 		if (pt_op->bind)
1679 			ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1680 				      ofs, chunk, update);
1681 		else
1682 			ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1683 				   ofs, chunk, update);
1684 
1685 		bb->len += chunk * 2;
1686 		ofs += chunk;
1687 		size -= chunk;
1688 	} while (size);
1689 }
1690 
1691 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1692 {
1693 	return xe_vm_get(m->q->vm);
1694 }
1695 
1696 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1697 struct migrate_test_params {
1698 	struct xe_test_priv base;
1699 	bool force_gpu;
1700 };
1701 
1702 #define to_migrate_test_params(_priv) \
1703 	container_of(_priv, struct migrate_test_params, base)
1704 #endif
1705 
1706 static struct dma_fence *
1707 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1708 			       struct xe_migrate_pt_update *pt_update)
1709 {
1710 	XE_TEST_DECLARE(struct migrate_test_params *test =
1711 			to_migrate_test_params
1712 			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1713 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1714 	struct xe_vm *vm = pt_update->vops->vm;
1715 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1716 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1717 	int err;
1718 	u32 i, j;
1719 
1720 	if (XE_TEST_ONLY(test && test->force_gpu))
1721 		return ERR_PTR(-ETIME);
1722 
1723 	if (ops->pre_commit) {
1724 		pt_update->job = NULL;
1725 		err = ops->pre_commit(pt_update);
1726 		if (err)
1727 			return ERR_PTR(err);
1728 	}
1729 
1730 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1731 		const struct xe_vm_pgtable_update_op *pt_op =
1732 			&pt_update_ops->ops[i];
1733 
1734 		for (j = 0; j < pt_op->num_entries; j++) {
1735 			const struct xe_vm_pgtable_update *update =
1736 				&pt_op->entries[j];
1737 
1738 			if (pt_op->bind)
1739 				ops->populate(pt_update, m->tile,
1740 					      &update->pt_bo->vmap, NULL,
1741 					      update->ofs, update->qwords,
1742 					      update);
1743 			else
1744 				ops->clear(pt_update, m->tile,
1745 					   &update->pt_bo->vmap, NULL,
1746 					   update->ofs, update->qwords, update);
1747 		}
1748 	}
1749 
1750 	trace_xe_vm_cpu_bind(vm);
1751 	xe_device_wmb(vm->xe);
1752 
1753 	return dma_fence_get_stub();
1754 }
1755 
1756 static struct dma_fence *
1757 __xe_migrate_update_pgtables(struct xe_migrate *m,
1758 			     struct xe_migrate_pt_update *pt_update,
1759 			     struct xe_vm_pgtable_update_ops *pt_update_ops)
1760 {
1761 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1762 	struct xe_tile *tile = m->tile;
1763 	struct xe_gt *gt = tile->primary_gt;
1764 	struct xe_device *xe = tile_to_xe(tile);
1765 	struct xe_sched_job *job;
1766 	struct dma_fence *fence;
1767 	struct drm_suballoc *sa_bo = NULL;
1768 	struct xe_bb *bb;
1769 	u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1770 	u32 num_updates = 0, current_update = 0;
1771 	u64 addr;
1772 	int err = 0;
1773 	bool is_migrate = pt_update_ops->q == m->q;
1774 	bool usm = is_migrate && xe->info.has_usm;
1775 
1776 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1777 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1778 		struct xe_vm_pgtable_update *updates = pt_op->entries;
1779 
1780 		num_updates += pt_op->num_entries;
1781 		for (j = 0; j < pt_op->num_entries; ++j) {
1782 			u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1783 						    MAX_PTE_PER_SDI);
1784 
1785 			/* align noop + MI_STORE_DATA_IMM cmd prefix */
1786 			batch_size += 4 * num_cmds + updates[j].qwords * 2;
1787 		}
1788 	}
1789 
1790 	/* fixed + PTE entries */
1791 	if (IS_DGFX(xe))
1792 		batch_size += 2;
1793 	else
1794 		batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1795 			num_updates * 2;
1796 
1797 	bb = xe_bb_new(gt, batch_size, usm);
1798 	if (IS_ERR(bb))
1799 		return ERR_CAST(bb);
1800 
1801 	/* For sysmem PTE's, need to map them in our hole.. */
1802 	if (!IS_DGFX(xe)) {
1803 		u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1804 		u32 ptes, ofs;
1805 
1806 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1807 		if (!is_migrate) {
1808 			u32 num_units = DIV_ROUND_UP(num_updates,
1809 						     NUM_VMUSA_WRITES_PER_UNIT);
1810 
1811 			if (num_units > m->vm_update_sa.size) {
1812 				err = -ENOBUFS;
1813 				goto err_bb;
1814 			}
1815 			sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1816 						 GFP_KERNEL, true, 0);
1817 			if (IS_ERR(sa_bo)) {
1818 				err = PTR_ERR(sa_bo);
1819 				goto err_bb;
1820 			}
1821 
1822 			ppgtt_ofs = NUM_KERNEL_PDE +
1823 				(drm_suballoc_soffset(sa_bo) /
1824 				 NUM_VMUSA_UNIT_PER_PAGE);
1825 			page_ofs = (drm_suballoc_soffset(sa_bo) %
1826 				    NUM_VMUSA_UNIT_PER_PAGE) *
1827 				VM_SA_UPDATE_UNIT_SIZE;
1828 		}
1829 
1830 		/* Map our PT's to gtt */
1831 		i = 0;
1832 		j = 0;
1833 		ptes = num_updates;
1834 		ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1835 		while (ptes) {
1836 			u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1837 			u32 idx = 0;
1838 
1839 			bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1840 				MI_SDI_NUM_QW(chunk);
1841 			bb->cs[bb->len++] = ofs;
1842 			bb->cs[bb->len++] = 0; /* upper_32_bits */
1843 
1844 			for (; i < pt_update_ops->num_ops; ++i) {
1845 				struct xe_vm_pgtable_update_op *pt_op =
1846 					&pt_update_ops->ops[i];
1847 				struct xe_vm_pgtable_update *updates = pt_op->entries;
1848 
1849 				for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1850 					struct xe_vm *vm = pt_update->vops->vm;
1851 					struct xe_bo *pt_bo = updates[j].pt_bo;
1852 
1853 					if (idx == chunk)
1854 						goto next_cmd;
1855 
1856 					xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
1857 
1858 					/* Map a PT at most once */
1859 					if (pt_bo->update_index < 0)
1860 						pt_bo->update_index = current_update;
1861 
1862 					addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1863 									 pat_index, 0);
1864 					bb->cs[bb->len++] = lower_32_bits(addr);
1865 					bb->cs[bb->len++] = upper_32_bits(addr);
1866 				}
1867 
1868 				j = 0;
1869 			}
1870 
1871 next_cmd:
1872 			ptes -= chunk;
1873 			ofs += chunk * sizeof(u64);
1874 		}
1875 
1876 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1877 		update_idx = bb->len;
1878 
1879 		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1880 			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1881 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1882 			struct xe_vm_pgtable_update_op *pt_op =
1883 				&pt_update_ops->ops[i];
1884 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1885 
1886 			for (j = 0; j < pt_op->num_entries; ++j) {
1887 				struct xe_bo *pt_bo = updates[j].pt_bo;
1888 
1889 				write_pgtable(tile, bb, addr +
1890 					      pt_bo->update_index * XE_PAGE_SIZE,
1891 					      pt_op, &updates[j], pt_update);
1892 			}
1893 		}
1894 	} else {
1895 		/* phys pages, no preamble required */
1896 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1897 		update_idx = bb->len;
1898 
1899 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1900 			struct xe_vm_pgtable_update_op *pt_op =
1901 				&pt_update_ops->ops[i];
1902 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1903 
1904 			for (j = 0; j < pt_op->num_entries; ++j)
1905 				write_pgtable(tile, bb, 0, pt_op, &updates[j],
1906 					      pt_update);
1907 		}
1908 	}
1909 
1910 	job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1911 					 xe_migrate_batch_base(m, usm),
1912 					 update_idx);
1913 	if (IS_ERR(job)) {
1914 		err = PTR_ERR(job);
1915 		goto err_sa;
1916 	}
1917 
1918 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1919 
1920 	if (ops->pre_commit) {
1921 		pt_update->job = job;
1922 		err = ops->pre_commit(pt_update);
1923 		if (err)
1924 			goto err_job;
1925 	}
1926 	if (is_migrate)
1927 		mutex_lock(&m->job_mutex);
1928 
1929 	xe_sched_job_arm(job);
1930 	fence = dma_fence_get(&job->drm.s_fence->finished);
1931 	xe_sched_job_push(job);
1932 
1933 	if (is_migrate)
1934 		mutex_unlock(&m->job_mutex);
1935 
1936 	xe_bb_free(bb, fence);
1937 	drm_suballoc_free(sa_bo, fence);
1938 
1939 	return fence;
1940 
1941 err_job:
1942 	xe_sched_job_put(job);
1943 err_sa:
1944 	drm_suballoc_free(sa_bo, NULL);
1945 err_bb:
1946 	xe_bb_free(bb, NULL);
1947 	return ERR_PTR(err);
1948 }
1949 
1950 /**
1951  * xe_migrate_update_pgtables() - Pipelined page-table update
1952  * @m: The migrate context.
1953  * @pt_update: PT update arguments
1954  *
1955  * Perform a pipelined page-table update. The update descriptors are typically
1956  * built under the same lock critical section as a call to this function. If
1957  * using the default engine for the updates, they will be performed in the
1958  * order they grab the job_mutex. If different engines are used, external
1959  * synchronization is needed for overlapping updates to maintain page-table
1960  * consistency. Note that the meaning of "overlapping" is that the updates
1961  * touch the same page-table, which might be a higher-level page-directory.
1962  * If no pipelining is needed, then updates may be performed by the cpu.
1963  *
1964  * Return: A dma_fence that, when signaled, indicates the update completion.
1965  */
1966 struct dma_fence *
1967 xe_migrate_update_pgtables(struct xe_migrate *m,
1968 			   struct xe_migrate_pt_update *pt_update)
1969 
1970 {
1971 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1972 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1973 	struct dma_fence *fence;
1974 
1975 	fence =  xe_migrate_update_pgtables_cpu(m, pt_update);
1976 
1977 	/* -ETIME indicates a job is needed, anything else is legit error */
1978 	if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
1979 		return fence;
1980 
1981 	return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
1982 }
1983 
1984 /**
1985  * xe_migrate_wait() - Complete all operations using the xe_migrate context
1986  * @m: Migrate context to wait for.
1987  *
1988  * Waits until the GPU no longer uses the migrate context's default engine
1989  * or its page-table objects. FIXME: What about separate page-table update
1990  * engines?
1991  */
1992 void xe_migrate_wait(struct xe_migrate *m)
1993 {
1994 	if (m->fence)
1995 		dma_fence_wait(m->fence, false);
1996 }
1997 
1998 static u32 pte_update_cmd_size(u64 size)
1999 {
2000 	u32 num_dword;
2001 	u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
2002 
2003 	XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
2004 
2005 	/*
2006 	 * MI_STORE_DATA_IMM command is used to update page table. Each
2007 	 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To
2008 	 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need:
2009 	 *
2010 	 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
2011 	 * - 2 dword for the page table's physical location
2012 	 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords)
2013 	 */
2014 	num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI);
2015 	num_dword += entries * 2;
2016 
2017 	return num_dword;
2018 }
2019 
2020 static void build_pt_update_batch_sram(struct xe_migrate *m,
2021 				       struct xe_bb *bb, u32 pt_offset,
2022 				       struct drm_pagemap_addr *sram_addr,
2023 				       u32 size, int level)
2024 {
2025 	u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
2026 	u64 gpu_page_size = 0x1ull << xe_pt_shift(level);
2027 	u32 ptes;
2028 	int i = 0;
2029 
2030 	xe_tile_assert(m->tile, PAGE_ALIGNED(size));
2031 
2032 	ptes = DIV_ROUND_UP(size, gpu_page_size);
2033 	while (ptes) {
2034 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
2035 
2036 		if (!level)
2037 			chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
2038 
2039 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
2040 		bb->cs[bb->len++] = pt_offset;
2041 		bb->cs[bb->len++] = 0;
2042 
2043 		pt_offset += chunk * 8;
2044 		ptes -= chunk;
2045 
2046 		while (chunk--) {
2047 			u64 addr = sram_addr[i].addr;
2048 			u64 pte;
2049 
2050 			xe_tile_assert(m->tile, sram_addr[i].proto ==
2051 				       DRM_INTERCONNECT_SYSTEM);
2052 			xe_tile_assert(m->tile, addr);
2053 			xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
2054 
2055 again:
2056 			pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
2057 								addr, pat_index,
2058 								level, false, 0);
2059 			bb->cs[bb->len++] = lower_32_bits(pte);
2060 			bb->cs[bb->len++] = upper_32_bits(pte);
2061 
2062 			if (gpu_page_size < PAGE_SIZE) {
2063 				addr += XE_PAGE_SIZE;
2064 				if (!PAGE_ALIGNED(addr)) {
2065 					chunk--;
2066 					goto again;
2067 				}
2068 				i++;
2069 			} else {
2070 				i += gpu_page_size / PAGE_SIZE;
2071 			}
2072 		}
2073 	}
2074 }
2075 
2076 static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
2077 				    unsigned long size)
2078 {
2079 	u32 large_size = (0x1 << xe_pt_shift(1));
2080 	unsigned long i, incr = large_size / PAGE_SIZE;
2081 
2082 	for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
2083 		if (PAGE_SIZE << sram_addr[i].order != large_size)
2084 			return false;
2085 
2086 	return true;
2087 }
2088 
2089 #define XE_CACHELINE_BYTES	64ull
2090 #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
2091 
2092 static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len)
2093 {
2094 	u32 pitch;
2095 
2096 	if (IS_ALIGNED(len, PAGE_SIZE))
2097 		pitch = PAGE_SIZE;
2098 	else if (IS_ALIGNED(len, SZ_4K))
2099 		pitch = SZ_4K;
2100 	else if (IS_ALIGNED(len, SZ_256))
2101 		pitch = SZ_256;
2102 	else if (IS_ALIGNED(len, 4))
2103 		pitch = 4;
2104 	else
2105 		pitch = 1;
2106 
2107 	xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr);
2108 	return pitch;
2109 }
2110 
2111 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
2112 					 unsigned long len,
2113 					 unsigned long sram_offset,
2114 					 struct drm_pagemap_addr *sram_addr,
2115 					 u64 vram_addr,
2116 					 const enum xe_migrate_copy_dir dir)
2117 {
2118 	struct xe_gt *gt = m->tile->primary_gt;
2119 	struct xe_device *xe = gt_to_xe(gt);
2120 	bool use_usm_batch = xe->info.has_usm;
2121 	struct dma_fence *fence = NULL;
2122 	u32 batch_size = 1;
2123 	u64 src_L0_ofs, dst_L0_ofs;
2124 	struct xe_sched_job *job;
2125 	struct xe_bb *bb;
2126 	u32 update_idx, pt_slot = 0;
2127 	unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
2128 	unsigned int pitch = xe_migrate_copy_pitch(xe, len);
2129 	int err;
2130 	unsigned long i, j;
2131 	bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
2132 
2133 	if (!xe->info.has_mem_copy_instr &&
2134 	    drm_WARN_ON(&xe->drm,
2135 			(!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK))
2136 		return ERR_PTR(-EOPNOTSUPP);
2137 
2138 	xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
2139 
2140 	batch_size += pte_update_cmd_size(npages << PAGE_SHIFT);
2141 	batch_size += EMIT_COPY_DW;
2142 
2143 	bb = xe_bb_new(gt, batch_size, use_usm_batch);
2144 	if (IS_ERR(bb)) {
2145 		err = PTR_ERR(bb);
2146 		return ERR_PTR(err);
2147 	}
2148 
2149 	/*
2150 	 * If the order of a struct drm_pagemap_addr entry is greater than 0,
2151 	 * the entry is populated by GPU pagemap but subsequent entries within
2152 	 * the range of that order are not populated.
2153 	 * build_pt_update_batch_sram() expects a fully populated array of
2154 	 * struct drm_pagemap_addr. Ensure this is the case even with higher
2155 	 * orders.
2156 	 */
2157 	for (i = 0; !use_pde && i < npages;) {
2158 		unsigned int order = sram_addr[i].order;
2159 
2160 		for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
2161 			if (!sram_addr[i + j].addr)
2162 				sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
2163 
2164 		i += NR_PAGES(order);
2165 	}
2166 
2167 	if (use_pde)
2168 		build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
2169 					   sram_addr, npages << PAGE_SHIFT, 1);
2170 	else
2171 		build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
2172 					   sram_addr, npages << PAGE_SHIFT, 0);
2173 
2174 	if (dir == XE_MIGRATE_COPY_TO_VRAM) {
2175 		if (use_pde)
2176 			src_L0_ofs = m->large_page_copy_ofs + sram_offset;
2177 		else
2178 			src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2179 		dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2180 
2181 	} else {
2182 		src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2183 		if (use_pde)
2184 			dst_L0_ofs = m->large_page_copy_ofs + sram_offset;
2185 		else
2186 			dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2187 	}
2188 
2189 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
2190 	update_idx = bb->len;
2191 
2192 	emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
2193 
2194 	job = xe_bb_create_migration_job(m->q, bb,
2195 					 xe_migrate_batch_base(m, use_usm_batch),
2196 					 update_idx);
2197 	if (IS_ERR(job)) {
2198 		err = PTR_ERR(job);
2199 		goto err;
2200 	}
2201 
2202 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
2203 
2204 	mutex_lock(&m->job_mutex);
2205 	xe_sched_job_arm(job);
2206 	fence = dma_fence_get(&job->drm.s_fence->finished);
2207 	xe_sched_job_push(job);
2208 
2209 	dma_fence_put(m->fence);
2210 	m->fence = dma_fence_get(fence);
2211 	mutex_unlock(&m->job_mutex);
2212 
2213 	xe_bb_free(bb, fence);
2214 
2215 	return fence;
2216 
2217 err:
2218 	xe_bb_free(bb, NULL);
2219 
2220 	return ERR_PTR(err);
2221 }
2222 
2223 /**
2224  * xe_migrate_to_vram() - Migrate to VRAM
2225  * @m: The migration context.
2226  * @npages: Number of pages to migrate.
2227  * @src_addr: Array of DMA information (source of migrate)
2228  * @dst_addr: Device physical address of VRAM (destination of migrate)
2229  *
2230  * Copy from an array dma addresses to a VRAM device physical address
2231  *
2232  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2233  * failure
2234  */
2235 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
2236 				     unsigned long npages,
2237 				     struct drm_pagemap_addr *src_addr,
2238 				     u64 dst_addr)
2239 {
2240 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
2241 			       XE_MIGRATE_COPY_TO_VRAM);
2242 }
2243 
2244 /**
2245  * xe_migrate_from_vram() - Migrate from VRAM
2246  * @m: The migration context.
2247  * @npages: Number of pages to migrate.
2248  * @src_addr: Device physical address of VRAM (source of migrate)
2249  * @dst_addr: Array of DMA information (destination of migrate)
2250  *
2251  * Copy from a VRAM device physical address to an array dma addresses
2252  *
2253  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2254  * failure
2255  */
2256 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
2257 				       unsigned long npages,
2258 				       u64 src_addr,
2259 				       struct drm_pagemap_addr *dst_addr)
2260 {
2261 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
2262 			       XE_MIGRATE_COPY_TO_SRAM);
2263 }
2264 
2265 static void xe_migrate_dma_unmap(struct xe_device *xe,
2266 				 struct drm_pagemap_addr *pagemap_addr,
2267 				 int len, int write)
2268 {
2269 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2270 
2271 	for (i = 0; i < npages; ++i) {
2272 		if (!pagemap_addr[i].addr)
2273 			break;
2274 
2275 		dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
2276 			       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2277 	}
2278 	kfree(pagemap_addr);
2279 }
2280 
2281 static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
2282 						   void *buf, int len,
2283 						   int write)
2284 {
2285 	struct drm_pagemap_addr *pagemap_addr;
2286 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2287 
2288 	pagemap_addr = kcalloc(npages, sizeof(*pagemap_addr), GFP_KERNEL);
2289 	if (!pagemap_addr)
2290 		return ERR_PTR(-ENOMEM);
2291 
2292 	for (i = 0; i < npages; ++i) {
2293 		dma_addr_t addr;
2294 		struct page *page;
2295 		enum dma_data_direction dir = write ? DMA_TO_DEVICE :
2296 						      DMA_FROM_DEVICE;
2297 
2298 		if (is_vmalloc_addr(buf))
2299 			page = vmalloc_to_page(buf);
2300 		else
2301 			page = virt_to_page(buf);
2302 
2303 		addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
2304 		if (dma_mapping_error(xe->drm.dev, addr))
2305 			goto err_fault;
2306 
2307 		pagemap_addr[i] =
2308 			drm_pagemap_addr_encode(addr,
2309 						DRM_INTERCONNECT_SYSTEM,
2310 						0, dir);
2311 		buf += PAGE_SIZE;
2312 	}
2313 
2314 	return pagemap_addr;
2315 
2316 err_fault:
2317 	xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
2318 	return ERR_PTR(-EFAULT);
2319 }
2320 
2321 /**
2322  * xe_migrate_access_memory - Access memory of a BO via GPU
2323  *
2324  * @m: The migration context.
2325  * @bo: buffer object
2326  * @offset: access offset into buffer object
2327  * @buf: pointer to caller memory to read into or write from
2328  * @len: length of access
2329  * @write: write access
2330  *
2331  * Access memory of a BO via GPU either reading in or writing from a passed in
2332  * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
2333  * read to or write from pointer.
2334  *
2335  * Returns:
2336  * 0 if successful, negative error code on failure.
2337  */
2338 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
2339 			     unsigned long offset, void *buf, int len,
2340 			     int write)
2341 {
2342 	struct xe_tile *tile = m->tile;
2343 	struct xe_device *xe = tile_to_xe(tile);
2344 	struct xe_res_cursor cursor;
2345 	struct dma_fence *fence = NULL;
2346 	struct drm_pagemap_addr *pagemap_addr;
2347 	unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
2348 	int bytes_left = len, current_page = 0;
2349 	void *orig_buf = buf;
2350 
2351 	xe_bo_assert_held(bo);
2352 
2353 	/* Use bounce buffer for small access and unaligned access */
2354 	if (!xe->info.has_mem_copy_instr &&
2355 	    (!IS_ALIGNED(len, 4) ||
2356 	     !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
2357 	     !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) {
2358 		int buf_offset = 0;
2359 		void *bounce;
2360 		int err;
2361 
2362 		BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
2363 		bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
2364 		if (!bounce)
2365 			return -ENOMEM;
2366 
2367 		/*
2368 		 * Less than ideal for large unaligned access but this should be
2369 		 * fairly rare, can fixup if this becomes common.
2370 		 */
2371 		do {
2372 			int copy_bytes = min_t(int, bytes_left,
2373 					       XE_CACHELINE_BYTES -
2374 					       (offset & XE_CACHELINE_MASK));
2375 			int ptr_offset = offset & XE_CACHELINE_MASK;
2376 
2377 			err = xe_migrate_access_memory(m, bo,
2378 						       offset &
2379 						       ~XE_CACHELINE_MASK,
2380 						       bounce,
2381 						       XE_CACHELINE_BYTES, 0);
2382 			if (err)
2383 				break;
2384 
2385 			if (write) {
2386 				memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
2387 
2388 				err = xe_migrate_access_memory(m, bo,
2389 							       offset & ~XE_CACHELINE_MASK,
2390 							       bounce,
2391 							       XE_CACHELINE_BYTES, write);
2392 				if (err)
2393 					break;
2394 			} else {
2395 				memcpy(buf + buf_offset, bounce + ptr_offset,
2396 				       copy_bytes);
2397 			}
2398 
2399 			bytes_left -= copy_bytes;
2400 			buf_offset += copy_bytes;
2401 			offset += copy_bytes;
2402 		} while (bytes_left);
2403 
2404 		kfree(bounce);
2405 		return err;
2406 	}
2407 
2408 	pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
2409 	if (IS_ERR(pagemap_addr))
2410 		return PTR_ERR(pagemap_addr);
2411 
2412 	xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
2413 
2414 	do {
2415 		struct dma_fence *__fence;
2416 		u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
2417 			cursor.start;
2418 		int current_bytes;
2419 		u32 pitch;
2420 
2421 		if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
2422 			current_bytes = min_t(int, bytes_left,
2423 					      MAX_PREEMPTDISABLE_TRANSFER);
2424 		else
2425 			current_bytes = min_t(int, bytes_left, cursor.size);
2426 
2427 		pitch = xe_migrate_copy_pitch(xe, current_bytes);
2428 		if (xe->info.has_mem_copy_instr)
2429 			current_bytes = min_t(int, current_bytes, U16_MAX * pitch);
2430 		else
2431 			current_bytes = min_t(int, current_bytes,
2432 					      round_down(S16_MAX * pitch,
2433 							 XE_CACHELINE_BYTES));
2434 
2435 		__fence = xe_migrate_vram(m, current_bytes,
2436 					  (unsigned long)buf & ~PAGE_MASK,
2437 					  &pagemap_addr[current_page],
2438 					  vram_addr, write ?
2439 					  XE_MIGRATE_COPY_TO_VRAM :
2440 					  XE_MIGRATE_COPY_TO_SRAM);
2441 		if (IS_ERR(__fence)) {
2442 			if (fence) {
2443 				dma_fence_wait(fence, false);
2444 				dma_fence_put(fence);
2445 			}
2446 			fence = __fence;
2447 			goto out_err;
2448 		}
2449 
2450 		dma_fence_put(fence);
2451 		fence = __fence;
2452 
2453 		buf += current_bytes;
2454 		offset += current_bytes;
2455 		current_page = (int)(buf - orig_buf) / PAGE_SIZE;
2456 		bytes_left -= current_bytes;
2457 		if (bytes_left)
2458 			xe_res_next(&cursor, current_bytes);
2459 	} while (bytes_left);
2460 
2461 	dma_fence_wait(fence, false);
2462 	dma_fence_put(fence);
2463 
2464 out_err:
2465 	xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
2466 	return IS_ERR(fence) ? PTR_ERR(fence) : 0;
2467 }
2468 
2469 /**
2470  * xe_migrate_job_lock() - Lock migrate job lock
2471  * @m: The migration context.
2472  * @q: Queue associated with the operation which requires a lock
2473  *
2474  * Lock the migrate job lock if the queue is a migration queue, otherwise
2475  * assert the VM's dma-resv is held (user queue's have own locking).
2476  */
2477 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
2478 {
2479 	bool is_migrate = q == m->q;
2480 
2481 	if (is_migrate)
2482 		mutex_lock(&m->job_mutex);
2483 	else
2484 		xe_vm_assert_held(q->vm);	/* User queues VM's should be locked */
2485 }
2486 
2487 /**
2488  * xe_migrate_job_unlock() - Unlock migrate job lock
2489  * @m: The migration context.
2490  * @q: Queue associated with the operation which requires a lock
2491  *
2492  * Unlock the migrate job lock if the queue is a migration queue, otherwise
2493  * assert the VM's dma-resv is held (user queue's have own locking).
2494  */
2495 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
2496 {
2497 	bool is_migrate = q == m->q;
2498 
2499 	if (is_migrate)
2500 		mutex_unlock(&m->job_mutex);
2501 	else
2502 		xe_vm_assert_held(q->vm);	/* User queues VM's should be locked */
2503 }
2504 
2505 #if IS_ENABLED(CONFIG_PROVE_LOCKING)
2506 /**
2507  * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue
2508  * @q: Migrate queue
2509  */
2510 void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
2511 {
2512 	struct xe_migrate *m = gt_to_tile(q->gt)->migrate;
2513 
2514 	xe_gt_assert(q->gt, q == m->q);
2515 	lockdep_assert_held(&m->job_mutex);
2516 }
2517 #endif
2518 
2519 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2520 #include "tests/xe_migrate.c"
2521 #endif
2522