xref: /linux/drivers/gpu/drm/xe/xe_migrate.c (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "xe_migrate.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_pagemap.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include <generated/xe_wa_oob.h>
17 
18 #include "instructions/xe_gpu_commands.h"
19 #include "instructions/xe_mi_commands.h"
20 #include "regs/xe_gtt_defs.h"
21 #include "tests/xe_test.h"
22 #include "xe_assert.h"
23 #include "xe_bb.h"
24 #include "xe_bo.h"
25 #include "xe_exec_queue.h"
26 #include "xe_ggtt.h"
27 #include "xe_gt.h"
28 #include "xe_gt_printk.h"
29 #include "xe_hw_engine.h"
30 #include "xe_lrc.h"
31 #include "xe_map.h"
32 #include "xe_mocs.h"
33 #include "xe_printk.h"
34 #include "xe_pt.h"
35 #include "xe_res_cursor.h"
36 #include "xe_sa.h"
37 #include "xe_sched_job.h"
38 #include "xe_sriov_vf_ccs.h"
39 #include "xe_svm.h"
40 #include "xe_sync.h"
41 #include "xe_trace_bo.h"
42 #include "xe_validation.h"
43 #include "xe_vm.h"
44 #include "xe_vram.h"
45 
46 /**
47  * struct xe_migrate - migrate context.
48  */
49 struct xe_migrate {
50 	/** @q: Default exec queue used for migration */
51 	struct xe_exec_queue *q;
52 	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
53 	struct xe_tile *tile;
54 	/** @job_mutex: Timeline mutex for @eng. */
55 	struct mutex job_mutex;
56 	/** @pt_bo: Page-table buffer object. */
57 	struct xe_bo *pt_bo;
58 	/** @batch_base_ofs: VM offset of the migration batch buffer */
59 	u64 batch_base_ofs;
60 	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
61 	u64 usm_batch_base_ofs;
62 	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
63 	u64 cleared_mem_ofs;
64 	/** @large_page_copy_ofs: VM offset of 2M pages used for large copies */
65 	u64 large_page_copy_ofs;
66 	/**
67 	 * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for
68 	 * large copies
69 	 */
70 	u64 large_page_copy_pdes;
71 	/**
72 	 * @fence: dma-fence representing the last migration job batch.
73 	 * Protected by @job_mutex.
74 	 */
75 	struct dma_fence *fence;
76 	/**
77 	 * @vm_update_sa: For integrated, used to suballocate page-tables
78 	 * out of the pt_bo.
79 	 */
80 	struct drm_suballoc_manager vm_update_sa;
81 	/** @min_chunk_size: For dgfx, Minimum chunk size */
82 	u64 min_chunk_size;
83 };
84 
85 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
86 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
87 #define NUM_KERNEL_PDE 15
88 #define NUM_PT_SLOTS 32
89 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
90 #define MAX_NUM_PTE 512
91 #define IDENTITY_OFFSET 256ULL
92 
93 /*
94  * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
95  * legal value accepted.  Since that instruction field is always stored in
96  * (val-2) format, this translates to 0x400 dwords for the true maximum length
97  * of the instruction.  Subtracting the instruction header (1 dword) and
98  * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
99  */
100 #define MAX_PTE_PER_SDI 0x1FEU
101 
102 static void xe_migrate_fini(void *arg)
103 {
104 	struct xe_migrate *m = arg;
105 
106 	xe_vm_lock(m->q->vm, false);
107 	xe_bo_unpin(m->pt_bo);
108 	xe_vm_unlock(m->q->vm);
109 
110 	dma_fence_put(m->fence);
111 	xe_bo_put(m->pt_bo);
112 	drm_suballoc_manager_fini(&m->vm_update_sa);
113 	mutex_destroy(&m->job_mutex);
114 	xe_vm_close_and_put(m->q->vm);
115 	xe_exec_queue_put(m->q);
116 }
117 
118 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
119 {
120 	XE_WARN_ON(slot >= NUM_PT_SLOTS);
121 
122 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
123 	return (slot + 1ULL) << xe_pt_shift(level + 1);
124 }
125 
126 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
127 {
128 	/*
129 	 * Remove the DPA to get a correct offset into identity table for the
130 	 * migrate offset
131 	 */
132 	u64 identity_offset = IDENTITY_OFFSET;
133 
134 	if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
135 		identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
136 							(xe->mem.vram), SZ_1G);
137 
138 	addr -= xe_vram_region_dpa_base(xe->mem.vram);
139 	return addr + (identity_offset << xe_pt_shift(2));
140 }
141 
142 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
143 					u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
144 {
145 	struct xe_vram_region *vram = xe->mem.vram;
146 	resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
147 	u64 pos, ofs, flags;
148 	u64 entry;
149 	/* XXX: Unclear if this should be usable_size? */
150 	u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
151 	u32 level = 2;
152 
153 	ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
154 	flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
155 					    true, 0);
156 
157 	xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
158 
159 	/*
160 	 * Use 1GB pages when possible, last chunk always use 2M
161 	 * pages as mixing reserved memory (stolen, WOCPM) with a single
162 	 * mapping is not allowed on certain platforms.
163 	 */
164 	for (pos = dpa_base; pos < vram_limit;
165 	     pos += SZ_1G, ofs += 8) {
166 		if (pos + SZ_1G >= vram_limit) {
167 			entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
168 			xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
169 
170 			flags = vm->pt_ops->pte_encode_addr(xe, 0,
171 							    pat_index,
172 							    level - 1,
173 							    true, 0);
174 
175 			for (ofs = pt_2m_ofs; pos < vram_limit;
176 			     pos += SZ_2M, ofs += 8)
177 				xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
178 			break;	/* Ensure pos == vram_limit assert correct */
179 		}
180 
181 		xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
182 	}
183 
184 	xe_assert(xe, pos == vram_limit);
185 }
186 
187 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
188 				 struct xe_vm *vm, struct drm_exec *exec)
189 {
190 	struct xe_device *xe = tile_to_xe(tile);
191 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
192 	u8 id = tile->id;
193 	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
194 #define VRAM_IDENTITY_MAP_COUNT	2
195 	u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
196 #undef VRAM_IDENTITY_MAP_COUNT
197 	u32 map_ofs, level, i;
198 	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
199 	u64 entry, pt29_ofs;
200 
201 	/* Can't bump NUM_PT_SLOTS too high */
202 	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
203 	/* Must be a multiple of 64K to support all platforms */
204 	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
205 	/* And one slot reserved for the 4KiB page table updates */
206 	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
207 
208 	/* Need to be sure everything fits in the first PT, or create more */
209 	xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
210 
211 	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
212 				  num_entries * XE_PAGE_SIZE,
213 				  ttm_bo_type_kernel,
214 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
215 				  XE_BO_FLAG_PAGETABLE, exec);
216 	if (IS_ERR(bo))
217 		return PTR_ERR(bo);
218 
219 	/* PT30 & PT31 reserved for 2M identity map */
220 	pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
221 	entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
222 	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
223 
224 	map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
225 
226 	/* Map the entire BO in our level 0 pt */
227 	for (i = 0, level = 0; i < num_entries; level++) {
228 		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
229 						  pat_index, 0);
230 
231 		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
232 
233 		if (vm->flags & XE_VM_FLAG_64K)
234 			i += 16;
235 		else
236 			i += 1;
237 	}
238 
239 	if (!IS_DGFX(xe)) {
240 		/* Write out batch too */
241 		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
242 		for (i = 0; i < xe_bo_size(batch);
243 		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
244 		     XE_PAGE_SIZE) {
245 			entry = vm->pt_ops->pte_encode_bo(batch, i,
246 							  pat_index, 0);
247 
248 			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
249 				  entry);
250 			level++;
251 		}
252 		if (xe->info.has_usm) {
253 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
254 
255 			batch = tile->primary_gt->usm.bb_pool->bo;
256 			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
257 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
258 
259 			for (i = 0; i < xe_bo_size(batch);
260 			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
261 			     XE_PAGE_SIZE) {
262 				entry = vm->pt_ops->pte_encode_bo(batch, i,
263 								  pat_index, 0);
264 
265 				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
266 					  entry);
267 				level++;
268 			}
269 		}
270 	} else {
271 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
272 
273 		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
274 
275 		if (xe->info.has_usm) {
276 			batch = tile->primary_gt->usm.bb_pool->bo;
277 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
278 			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
279 		}
280 	}
281 
282 	for (level = 1; level < num_level; level++) {
283 		u32 flags = 0;
284 
285 		if (vm->flags & XE_VM_FLAG_64K && level == 1)
286 			flags = XE_PDE_64K;
287 
288 		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
289 						  XE_PAGE_SIZE);
290 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
291 			  entry | flags);
292 	}
293 
294 	/* Write PDE's that point to our BO. */
295 	for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
296 		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
297 
298 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
299 			  (i + 1) * 8, u64, entry);
300 	}
301 
302 	/* Reserve 2M PDEs */
303 	level = 1;
304 	m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level);
305 	m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level +
306 		NUM_PT_SLOTS * 8;
307 
308 	/* Set up a 1GiB NULL mapping at 255GiB offset. */
309 	level = 2;
310 	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
311 		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
312 		  | XE_PTE_NULL);
313 	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
314 
315 	/* Identity map the entire vram at 256GiB offset */
316 	if (IS_DGFX(xe)) {
317 		u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
318 		resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
319 
320 		xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
321 					    pat_index, pt30_ofs);
322 		xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
323 
324 		/*
325 		 * Identity map the entire vram for compressed pat_index for xe2+
326 		 * if flat ccs is enabled.
327 		 */
328 		if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
329 			u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
330 			u64 vram_offset = IDENTITY_OFFSET +
331 				DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
332 			u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
333 
334 			xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
335 							  IDENTITY_OFFSET / 2) * SZ_1G);
336 			xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
337 						    comp_pat_index, pt31_ofs);
338 		}
339 	}
340 
341 	/*
342 	 * Example layout created above, with root level = 3:
343 	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
344 	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
345 	 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
346 	 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
347 	 *
348 	 * This makes the lowest part of the VM point to the pagetables.
349 	 * Hence the lowest 2M in the vm should point to itself, with a few writes
350 	 * and flushes, other parts of the VM can be used either for copying and
351 	 * clearing.
352 	 *
353 	 * For performance, the kernel reserves PDE's, so about 20 are left
354 	 * for async VM updates.
355 	 *
356 	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
357 	 * everywhere, this allows lockless updates to scratch pages by using
358 	 * the different addresses in VM.
359 	 */
360 #define NUM_VMUSA_UNIT_PER_PAGE	32
361 #define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
362 #define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
363 	drm_suballoc_manager_init(&m->vm_update_sa,
364 				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
365 				  NUM_VMUSA_UNIT_PER_PAGE, 0);
366 
367 	m->pt_bo = bo;
368 	return 0;
369 }
370 
371 /*
372  * Including the reserved copy engine is required to avoid deadlocks due to
373  * migrate jobs servicing the faults gets stuck behind the job that faulted.
374  */
375 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
376 {
377 	u32 logical_mask = 0;
378 	struct xe_hw_engine *hwe;
379 	enum xe_hw_engine_id id;
380 
381 	for_each_hw_engine(hwe, gt, id) {
382 		if (hwe->class != XE_ENGINE_CLASS_COPY)
383 			continue;
384 
385 		if (xe_gt_is_usm_hwe(gt, hwe))
386 			logical_mask |= BIT(hwe->logical_instance);
387 	}
388 
389 	return logical_mask;
390 }
391 
392 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
393 {
394 	return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
395 }
396 
397 /**
398  * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
399  * @tile: &xe_tile
400  *
401  * Allocates a &xe_migrate for a given tile.
402  *
403  * Return: &xe_migrate on success, or NULL when out of memory.
404  */
405 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
406 {
407 	struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
408 
409 	if (m)
410 		m->tile = tile;
411 	return m;
412 }
413 
414 static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
415 {
416 	struct xe_device *xe = tile_to_xe(tile);
417 	struct xe_validation_ctx ctx;
418 	struct drm_exec exec;
419 	int err = 0;
420 
421 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
422 		err = xe_vm_drm_exec_lock(vm, &exec);
423 		drm_exec_retry_on_contention(&exec);
424 		err = xe_migrate_prepare_vm(tile, m, vm, &exec);
425 		drm_exec_retry_on_contention(&exec);
426 		xe_validation_retry_on_oom(&ctx, &err);
427 	}
428 
429 	return err;
430 }
431 
432 /**
433  * xe_migrate_init() - Initialize a migrate context
434  * @m: The migration context
435  *
436  * Return: 0 if successful, negative error code on failure
437  */
438 int xe_migrate_init(struct xe_migrate *m)
439 {
440 	struct xe_tile *tile = m->tile;
441 	struct xe_gt *primary_gt = tile->primary_gt;
442 	struct xe_device *xe = tile_to_xe(tile);
443 	struct xe_vm *vm;
444 	int err;
445 
446 	/* Special layout, prepared below.. */
447 	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
448 			  XE_VM_FLAG_SET_TILE_ID(tile), NULL);
449 	if (IS_ERR(vm))
450 		return PTR_ERR(vm);
451 
452 	err = xe_migrate_lock_prepare_vm(tile, m, vm);
453 	if (err)
454 		goto err_out;
455 
456 	if (xe->info.has_usm) {
457 		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
458 							   XE_ENGINE_CLASS_COPY,
459 							   primary_gt->usm.reserved_bcs_instance,
460 							   false);
461 		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
462 
463 		if (!hwe || !logical_mask) {
464 			err = -EINVAL;
465 			goto err_out;
466 		}
467 
468 		/*
469 		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
470 		 * PVC, may want to revisit if performance is needed.
471 		 */
472 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
473 					    EXEC_QUEUE_FLAG_KERNEL |
474 					    EXEC_QUEUE_FLAG_PERMANENT |
475 					    EXEC_QUEUE_FLAG_HIGH_PRIORITY |
476 					    EXEC_QUEUE_FLAG_MIGRATE |
477 					    EXEC_QUEUE_FLAG_LOW_LATENCY, 0);
478 	} else {
479 		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
480 						  XE_ENGINE_CLASS_COPY,
481 						  EXEC_QUEUE_FLAG_KERNEL |
482 						  EXEC_QUEUE_FLAG_PERMANENT |
483 						  EXEC_QUEUE_FLAG_MIGRATE, 0);
484 	}
485 	if (IS_ERR(m->q)) {
486 		err = PTR_ERR(m->q);
487 		goto err_out;
488 	}
489 
490 	mutex_init(&m->job_mutex);
491 	fs_reclaim_acquire(GFP_KERNEL);
492 	might_lock(&m->job_mutex);
493 	fs_reclaim_release(GFP_KERNEL);
494 
495 	err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
496 	if (err)
497 		return err;
498 
499 	if (IS_DGFX(xe)) {
500 		if (xe_migrate_needs_ccs_emit(xe))
501 			/* min chunk size corresponds to 4K of CCS Metadata */
502 			m->min_chunk_size = SZ_4K * SZ_64K /
503 				xe_device_ccs_bytes(xe, SZ_64K);
504 		else
505 			/* Somewhat arbitrary to avoid a huge amount of blits */
506 			m->min_chunk_size = SZ_64K;
507 		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
508 		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
509 			(unsigned long long)m->min_chunk_size);
510 	}
511 
512 	return err;
513 
514 err_out:
515 	xe_vm_close_and_put(vm);
516 	return err;
517 
518 }
519 
520 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
521 {
522 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
523 		return MAX_CCS_LIMITED_TRANSFER;
524 
525 	return MAX_PREEMPTDISABLE_TRANSFER;
526 }
527 
528 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
529 {
530 	struct xe_device *xe = tile_to_xe(m->tile);
531 	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
532 
533 	if (mem_type_is_vram(cur->mem_type)) {
534 		/*
535 		 * VRAM we want to blit in chunks with sizes aligned to
536 		 * min_chunk_size in order for the offset to CCS metadata to be
537 		 * page-aligned. If it's the last chunk it may be smaller.
538 		 *
539 		 * Another constraint is that we need to limit the blit to
540 		 * the VRAM block size, unless size is smaller than
541 		 * min_chunk_size.
542 		 */
543 		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
544 
545 		size = min_t(u64, size, chunk);
546 		if (size > m->min_chunk_size)
547 			size = round_down(size, m->min_chunk_size);
548 	}
549 
550 	return size;
551 }
552 
553 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
554 {
555 	/* If the chunk is not fragmented, allow identity map. */
556 	return cur->size >= size;
557 }
558 
559 #define PTE_UPDATE_FLAG_IS_VRAM		BIT(0)
560 #define PTE_UPDATE_FLAG_IS_COMP_PTE	BIT(1)
561 
562 static u32 pte_update_size(struct xe_migrate *m,
563 			   u32 flags,
564 			   struct ttm_resource *res,
565 			   struct xe_res_cursor *cur,
566 			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
567 			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
568 {
569 	u32 cmds = 0;
570 	bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
571 	bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
572 
573 	*L0_pt = pt_ofs;
574 	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
575 		/* Offset into identity map. */
576 		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
577 					      cur->start + vram_region_gpu_offset(res),
578 					      is_comp_pte);
579 		cmds += cmd_size;
580 	} else {
581 		/* Clip L0 to available size */
582 		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
583 		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
584 
585 		*L0 = size;
586 		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
587 
588 		/* MI_STORE_DATA_IMM */
589 		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
590 
591 		/* PDE qwords */
592 		cmds += num_4k_pages * 2;
593 
594 		/* Each chunk has a single blit command */
595 		cmds += cmd_size;
596 	}
597 
598 	return cmds;
599 }
600 
601 static void emit_pte(struct xe_migrate *m,
602 		     struct xe_bb *bb, u32 at_pt,
603 		     bool is_vram, bool is_comp_pte,
604 		     struct xe_res_cursor *cur,
605 		     u32 size, struct ttm_resource *res)
606 {
607 	struct xe_device *xe = tile_to_xe(m->tile);
608 	struct xe_vm *vm = m->q->vm;
609 	u16 pat_index;
610 	u32 ptes;
611 	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
612 	u64 cur_ofs;
613 
614 	/* Indirect access needs compression enabled uncached PAT index */
615 	if (GRAPHICS_VERx100(xe) >= 2000)
616 		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
617 					  xe->pat.idx[XE_CACHE_WB];
618 	else
619 		pat_index = xe->pat.idx[XE_CACHE_WB];
620 
621 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
622 
623 	while (ptes) {
624 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
625 
626 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
627 		bb->cs[bb->len++] = ofs;
628 		bb->cs[bb->len++] = 0;
629 
630 		cur_ofs = ofs;
631 		ofs += chunk * 8;
632 		ptes -= chunk;
633 
634 		while (chunk--) {
635 			u64 addr, flags = 0;
636 			bool devmem = false;
637 
638 			addr = xe_res_dma(cur) & PAGE_MASK;
639 			if (is_vram) {
640 				if (vm->flags & XE_VM_FLAG_64K) {
641 					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
642 
643 					xe_assert(xe, (va & (SZ_64K - 1)) ==
644 						  (addr & (SZ_64K - 1)));
645 
646 					flags |= XE_PTE_PS64;
647 				}
648 
649 				addr += vram_region_gpu_offset(res);
650 				devmem = true;
651 			}
652 
653 			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
654 							   addr, pat_index,
655 							   0, devmem, flags);
656 			bb->cs[bb->len++] = lower_32_bits(addr);
657 			bb->cs[bb->len++] = upper_32_bits(addr);
658 
659 			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
660 			cur_ofs += 8;
661 		}
662 	}
663 }
664 
665 #define EMIT_COPY_CCS_DW 5
666 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
667 			  u64 dst_ofs, bool dst_is_indirect,
668 			  u64 src_ofs, bool src_is_indirect,
669 			  u32 size)
670 {
671 	struct xe_device *xe = gt_to_xe(gt);
672 	u32 *cs = bb->cs + bb->len;
673 	u32 num_ccs_blks;
674 	u32 num_pages;
675 	u32 ccs_copy_size;
676 	u32 mocs;
677 
678 	if (GRAPHICS_VERx100(xe) >= 2000) {
679 		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
680 		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
681 
682 		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
683 		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
684 
685 	} else {
686 		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
687 					    NUM_CCS_BYTES_PER_BLOCK);
688 		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
689 
690 		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
691 		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
692 	}
693 
694 	*cs++ = XY_CTRL_SURF_COPY_BLT |
695 		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
696 		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
697 		ccs_copy_size;
698 	*cs++ = lower_32_bits(src_ofs);
699 	*cs++ = upper_32_bits(src_ofs) | mocs;
700 	*cs++ = lower_32_bits(dst_ofs);
701 	*cs++ = upper_32_bits(dst_ofs) | mocs;
702 
703 	bb->len = cs - bb->cs;
704 }
705 
706 #define EMIT_COPY_DW 10
707 static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
708 			      u64 dst_ofs, unsigned int size,
709 			      unsigned int pitch)
710 {
711 	struct xe_device *xe = gt_to_xe(gt);
712 	u32 mocs = 0;
713 	u32 tile_y = 0;
714 
715 	xe_gt_assert(gt, !(pitch & 3));
716 	xe_gt_assert(gt, size / pitch <= S16_MAX);
717 	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
718 	xe_gt_assert(gt, pitch <= U16_MAX);
719 
720 	if (GRAPHICS_VER(xe) >= 20)
721 		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
722 
723 	if (GRAPHICS_VERx100(xe) >= 1250)
724 		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
725 
726 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
727 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
728 	bb->cs[bb->len++] = 0;
729 	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
730 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
731 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
732 	bb->cs[bb->len++] = 0;
733 	bb->cs[bb->len++] = pitch | mocs;
734 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
735 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
736 }
737 
738 #define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */
739 static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
740 			  u64 dst_ofs, unsigned int size, unsigned int pitch)
741 {
742 	u32 mode, copy_type, width;
743 
744 	xe_gt_assert(gt, IS_ALIGNED(size, pitch));
745 	xe_gt_assert(gt, pitch <= U16_MAX);
746 	xe_gt_assert(gt, pitch);
747 	xe_gt_assert(gt, size);
748 
749 	if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) &&
750 	    IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) &&
751 	    IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) {
752 		mode = MEM_COPY_PAGE_COPY_MODE;
753 		copy_type = 0; /* linear copy */
754 		width = size / PAGE_COPY_MODE_PS;
755 	} else if (pitch > 1) {
756 		xe_gt_assert(gt, size / pitch <= U16_MAX);
757 		mode = 0; /* BYTE_COPY */
758 		copy_type = MEM_COPY_MATRIX_COPY;
759 		width = pitch;
760 	} else {
761 		mode = 0; /* BYTE_COPY */
762 		copy_type = 0; /* linear copy */
763 		width = size;
764 	}
765 
766 	xe_gt_assert(gt, width <= U16_MAX);
767 
768 	bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
769 	bb->cs[bb->len++] = width - 1;
770 	bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
771 	bb->cs[bb->len++] = pitch - 1;
772 	bb->cs[bb->len++] = pitch - 1;
773 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
774 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
775 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
776 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
777 	bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
778 			    FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index);
779 }
780 
781 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
782 		      u64 src_ofs, u64 dst_ofs, unsigned int size,
783 		      unsigned int pitch)
784 {
785 	struct xe_device *xe = gt_to_xe(gt);
786 
787 	if (xe->info.has_mem_copy_instr)
788 		emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
789 	else
790 		emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
791 }
792 
793 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
794 {
795 	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
796 }
797 
798 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
799 			       struct xe_bb *bb,
800 			       u64 src_ofs, bool src_is_indirect,
801 			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
802 			       u64 ccs_ofs, bool copy_ccs)
803 {
804 	struct xe_gt *gt = m->tile->primary_gt;
805 	u32 flush_flags = 0;
806 
807 	if (!copy_ccs && dst_is_indirect) {
808 		/*
809 		 * If the src is already in vram, then it should already
810 		 * have been cleared by us, or has been populated by the
811 		 * user. Make sure we copy the CCS aux state as-is.
812 		 *
813 		 * Otherwise if the bo doesn't have any CCS metadata attached,
814 		 * we still need to clear it for security reasons.
815 		 */
816 		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
817 
818 		emit_copy_ccs(gt, bb,
819 			      dst_ofs, true,
820 			      ccs_src_ofs, src_is_indirect, dst_size);
821 
822 		flush_flags = MI_FLUSH_DW_CCS;
823 	} else if (copy_ccs) {
824 		if (!src_is_indirect)
825 			src_ofs = ccs_ofs;
826 		else if (!dst_is_indirect)
827 			dst_ofs = ccs_ofs;
828 
829 		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
830 
831 		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
832 			      src_is_indirect, dst_size);
833 		if (dst_is_indirect)
834 			flush_flags = MI_FLUSH_DW_CCS;
835 	}
836 
837 	return flush_flags;
838 }
839 
840 /**
841  * xe_migrate_copy() - Copy content of TTM resources.
842  * @m: The migration context.
843  * @src_bo: The buffer object @src is currently bound to.
844  * @dst_bo: If copying between resources created for the same bo, set this to
845  * the same value as @src_bo. If copying between buffer objects, set it to
846  * the buffer object @dst is currently bound to.
847  * @src: The source TTM resource.
848  * @dst: The dst TTM resource.
849  * @copy_only_ccs: If true copy only CCS metadata
850  *
851  * Copies the contents of @src to @dst: On flat CCS devices,
852  * the CCS metadata is copied as well if needed, or if not present,
853  * the CCS metadata of @dst is cleared for security reasons.
854  *
855  * Return: Pointer to a dma_fence representing the last copy batch, or
856  * an error pointer on failure. If there is a failure, any copy operation
857  * started by the function call has been synced.
858  */
859 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
860 				  struct xe_bo *src_bo,
861 				  struct xe_bo *dst_bo,
862 				  struct ttm_resource *src,
863 				  struct ttm_resource *dst,
864 				  bool copy_only_ccs)
865 {
866 	struct xe_gt *gt = m->tile->primary_gt;
867 	struct xe_device *xe = gt_to_xe(gt);
868 	struct dma_fence *fence = NULL;
869 	u64 size = xe_bo_size(src_bo);
870 	struct xe_res_cursor src_it, dst_it, ccs_it;
871 	u64 src_L0_ofs, dst_L0_ofs;
872 	u32 src_L0_pt, dst_L0_pt;
873 	u64 src_L0, dst_L0;
874 	int pass = 0;
875 	int err;
876 	bool src_is_pltt = src->mem_type == XE_PL_TT;
877 	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
878 	bool src_is_vram = mem_type_is_vram(src->mem_type);
879 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
880 	bool type_device = src_bo->ttm.type == ttm_bo_type_device;
881 	bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
882 	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
883 		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
884 	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
885 	bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
886 		GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
887 
888 	/* Copying CCS between two different BOs is not supported yet. */
889 	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
890 		return ERR_PTR(-EINVAL);
891 
892 	if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
893 		return ERR_PTR(-EINVAL);
894 
895 	if (!src_is_vram)
896 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
897 	else
898 		xe_res_first(src, 0, size, &src_it);
899 	if (!dst_is_vram)
900 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
901 	else
902 		xe_res_first(dst, 0, size, &dst_it);
903 
904 	if (copy_system_ccs)
905 		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
906 				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
907 				&ccs_it);
908 
909 	while (size) {
910 		u32 batch_size = 1; /* MI_BATCH_BUFFER_END */
911 		struct xe_sched_job *job;
912 		struct xe_bb *bb;
913 		u32 flush_flags = 0;
914 		u32 update_idx;
915 		u64 ccs_ofs, ccs_size;
916 		u32 ccs_pt;
917 		u32 pte_flags;
918 
919 		bool usm = xe->info.has_usm;
920 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
921 
922 		src_L0 = xe_migrate_res_sizes(m, &src_it);
923 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
924 
925 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
926 			pass++, src_L0, dst_L0);
927 
928 		src_L0 = min(src_L0, dst_L0);
929 
930 		pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
931 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
932 		batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
933 					      &src_L0_ofs, &src_L0_pt, 0, 0,
934 					      avail_pts);
935 		if (copy_only_ccs) {
936 			dst_L0_ofs = src_L0_ofs;
937 		} else {
938 			pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
939 			batch_size += pte_update_size(m, pte_flags, dst,
940 						      &dst_it, &src_L0,
941 						      &dst_L0_ofs, &dst_L0_pt,
942 						      0, avail_pts, avail_pts);
943 		}
944 
945 		if (copy_system_ccs) {
946 			xe_assert(xe, type_device);
947 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
948 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
949 						      &ccs_ofs, &ccs_pt, 0,
950 						      2 * avail_pts,
951 						      avail_pts);
952 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
953 		}
954 
955 		/* Add copy commands size here */
956 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
957 			((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
958 
959 		bb = xe_bb_new(gt, batch_size, usm);
960 		if (IS_ERR(bb)) {
961 			err = PTR_ERR(bb);
962 			goto err_sync;
963 		}
964 
965 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
966 			xe_res_next(&src_it, src_L0);
967 		else
968 			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
969 				 &src_it, src_L0, src);
970 
971 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
972 			xe_res_next(&dst_it, src_L0);
973 		else if (!copy_only_ccs)
974 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
975 				 &dst_it, src_L0, dst);
976 
977 		if (copy_system_ccs)
978 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
979 
980 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
981 		update_idx = bb->len;
982 
983 		if (!copy_only_ccs)
984 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
985 
986 		if (needs_ccs_emit)
987 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
988 							  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
989 							  dst_L0_ofs,
990 							  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
991 							  src_L0, ccs_ofs, copy_ccs);
992 
993 		job = xe_bb_create_migration_job(m->q, bb,
994 						 xe_migrate_batch_base(m, usm),
995 						 update_idx);
996 		if (IS_ERR(job)) {
997 			err = PTR_ERR(job);
998 			goto err;
999 		}
1000 
1001 		xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
1002 		if (!fence) {
1003 			err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
1004 						    DMA_RESV_USAGE_BOOKKEEP);
1005 			if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
1006 				err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
1007 							    DMA_RESV_USAGE_BOOKKEEP);
1008 			if (err)
1009 				goto err_job;
1010 		}
1011 
1012 		mutex_lock(&m->job_mutex);
1013 		xe_sched_job_arm(job);
1014 		dma_fence_put(fence);
1015 		fence = dma_fence_get(&job->drm.s_fence->finished);
1016 		xe_sched_job_push(job);
1017 
1018 		dma_fence_put(m->fence);
1019 		m->fence = dma_fence_get(fence);
1020 
1021 		mutex_unlock(&m->job_mutex);
1022 
1023 		xe_bb_free(bb, fence);
1024 		size -= src_L0;
1025 		continue;
1026 
1027 err_job:
1028 		xe_sched_job_put(job);
1029 err:
1030 		xe_bb_free(bb, NULL);
1031 
1032 err_sync:
1033 		/* Sync partial copy if any. FIXME: under job_mutex? */
1034 		if (fence) {
1035 			dma_fence_wait(fence, false);
1036 			dma_fence_put(fence);
1037 		}
1038 
1039 		return ERR_PTR(err);
1040 	}
1041 
1042 	return fence;
1043 }
1044 
1045 /**
1046  * xe_migrate_lrc() - Get the LRC from migrate context.
1047  * @migrate: Migrate context.
1048  *
1049  * Return: Pointer to LRC on success, error on failure
1050  */
1051 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
1052 {
1053 	return migrate->q->lrc[0];
1054 }
1055 
1056 static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
1057 {
1058 	/*
1059 	 * The migrate VM is self-referential so it can modify its own PTEs (see
1060 	 * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
1061 	 * entries for kernel operations (copies, clears, CCS migrate), and
1062 	 * suballocate the rest to user operations (binds/unbinds). With
1063 	 * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
1064 	 * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
1065 	 */
1066 	return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
1067 }
1068 
1069 static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
1070 {
1071 	u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
1072 
1073 	dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
1074 		  MI_FLUSH_IMM_DW | flags;
1075 	dw[i++] = lower_32_bits(addr);
1076 	dw[i++] = upper_32_bits(addr);
1077 	dw[i++] = MI_NOOP;
1078 	dw[i++] = MI_NOOP;
1079 
1080 	return i;
1081 }
1082 
1083 /**
1084  * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
1085  * @tile: Tile whose migration context to be used.
1086  * @q : Execution to be used along with migration context.
1087  * @src_bo: The buffer object @src is currently bound to.
1088  * @read_write : Creates BB commands for CCS read/write.
1089  *
1090  * Creates batch buffer instructions to copy CCS metadata from CCS pool to
1091  * memory and vice versa.
1092  *
1093  * This function should only be called for IGPU.
1094  *
1095  * Return: 0 if successful, negative error code on failure.
1096  */
1097 int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
1098 			   struct xe_bo *src_bo,
1099 			   enum xe_sriov_vf_ccs_rw_ctxs read_write)
1100 
1101 {
1102 	bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
1103 	bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
1104 	struct ttm_resource *src = src_bo->ttm.resource;
1105 	struct xe_migrate *m = tile->migrate;
1106 	struct xe_gt *gt = tile->primary_gt;
1107 	u32 batch_size, batch_size_allocated;
1108 	struct xe_device *xe = gt_to_xe(gt);
1109 	struct xe_res_cursor src_it, ccs_it;
1110 	struct xe_sriov_vf_ccs_ctx *ctx;
1111 	struct xe_sa_manager *bb_pool;
1112 	u64 size = xe_bo_size(src_bo);
1113 	struct xe_bb *bb = NULL;
1114 	u64 src_L0, src_L0_ofs;
1115 	u32 src_L0_pt;
1116 	int err;
1117 
1118 	ctx = &xe->sriov.vf.ccs.contexts[read_write];
1119 
1120 	xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
1121 
1122 	xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
1123 			PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
1124 			&ccs_it);
1125 
1126 	/* Calculate Batch buffer size */
1127 	batch_size = 0;
1128 	while (size) {
1129 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1130 		u64 ccs_ofs, ccs_size;
1131 		u32 ccs_pt;
1132 
1133 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1134 
1135 		src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
1136 
1137 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1138 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1139 					      avail_pts);
1140 
1141 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1142 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1143 					      &ccs_pt, 0, avail_pts, avail_pts);
1144 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1145 
1146 		/* Add copy commands size here */
1147 		batch_size += EMIT_COPY_CCS_DW;
1148 
1149 		size -= src_L0;
1150 	}
1151 
1152 	bb = xe_bb_alloc(gt);
1153 	if (IS_ERR(bb))
1154 		return PTR_ERR(bb);
1155 
1156 	bb_pool = ctx->mem.ccs_bb_pool;
1157 	scoped_guard(mutex, xe_sa_bo_swap_guard(bb_pool)) {
1158 		xe_sa_bo_swap_shadow(bb_pool);
1159 
1160 		err = xe_bb_init(bb, bb_pool, batch_size);
1161 		if (err) {
1162 			xe_gt_err(gt, "BB allocation failed.\n");
1163 			xe_bb_free(bb, NULL);
1164 			return err;
1165 		}
1166 
1167 		batch_size_allocated = batch_size;
1168 		size = xe_bo_size(src_bo);
1169 		batch_size = 0;
1170 
1171 		/*
1172 		 * Emit PTE and copy commands here.
1173 		 * The CCS copy command can only support limited size. If the size to be
1174 		 * copied is more than the limit, divide copy into chunks. So, calculate
1175 		 * sizes here again before copy command is emitted.
1176 		 */
1177 
1178 		while (size) {
1179 			batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1180 			u32 flush_flags = 0;
1181 			u64 ccs_ofs, ccs_size;
1182 			u32 ccs_pt;
1183 
1184 			u32 avail_pts = max_mem_transfer_per_pass(xe) /
1185 					LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1186 
1187 			src_L0 = xe_migrate_res_sizes(m, &src_it);
1188 
1189 			batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1190 						      &src_L0_ofs, &src_L0_pt, 0, 0,
1191 						      avail_pts);
1192 
1193 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
1194 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1195 						      &ccs_pt, 0, avail_pts, avail_pts);
1196 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1197 			batch_size += EMIT_COPY_CCS_DW;
1198 
1199 			emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
1200 
1201 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
1202 
1203 			bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1204 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
1205 							  src_L0_ofs, dst_is_pltt,
1206 							  src_L0, ccs_ofs, true);
1207 			bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1208 
1209 			size -= src_L0;
1210 		}
1211 
1212 		xe_assert(xe, (batch_size_allocated == bb->len));
1213 		src_bo->bb_ccs[read_write] = bb;
1214 
1215 		xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1216 		xe_sa_bo_sync_shadow(bb->bo);
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 /**
1223  * xe_migrate_ccs_rw_copy_clear() - Clear the CCS read/write batch buffer
1224  * content.
1225  * @src_bo: The buffer object @src is currently bound to.
1226  * @read_write : Creates BB commands for CCS read/write.
1227  *
1228  * Directly clearing the BB lacks atomicity and can lead to undefined
1229  * behavior if the vCPU is halted mid-operation during the clearing
1230  * process. To avoid this issue, we use a shadow buffer object approach.
1231  *
1232  * First swap the SA BO address with the shadow BO, perform the clearing
1233  * operation on the BB, update the shadow BO in the ring buffer, then
1234  * sync the shadow and the actual buffer to maintain consistency.
1235  *
1236  * Returns: None.
1237  */
1238 void xe_migrate_ccs_rw_copy_clear(struct xe_bo *src_bo,
1239 				  enum xe_sriov_vf_ccs_rw_ctxs read_write)
1240 {
1241 	struct xe_bb *bb = src_bo->bb_ccs[read_write];
1242 	struct xe_device *xe = xe_bo_device(src_bo);
1243 	struct xe_sriov_vf_ccs_ctx *ctx;
1244 	struct xe_sa_manager *bb_pool;
1245 	u32 *cs;
1246 
1247 	xe_assert(xe, IS_SRIOV_VF(xe));
1248 
1249 	ctx = &xe->sriov.vf.ccs.contexts[read_write];
1250 	bb_pool = ctx->mem.ccs_bb_pool;
1251 
1252 	guard(mutex) (xe_sa_bo_swap_guard(bb_pool));
1253 	xe_sa_bo_swap_shadow(bb_pool);
1254 
1255 	cs = xe_sa_bo_cpu_addr(bb->bo);
1256 	memset(cs, MI_NOOP, bb->len * sizeof(u32));
1257 	xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1258 
1259 	xe_sa_bo_sync_shadow(bb->bo);
1260 
1261 	xe_bb_free(bb, NULL);
1262 	src_bo->bb_ccs[read_write] = NULL;
1263 }
1264 
1265 /**
1266  * xe_migrate_exec_queue() - Get the execution queue from migrate context.
1267  * @migrate: Migrate context.
1268  *
1269  * Return: Pointer to execution queue on success, error on failure
1270  */
1271 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
1272 {
1273 	return migrate->q;
1274 }
1275 
1276 /**
1277  * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
1278  * @vram_bo: The VRAM buffer object.
1279  * @vram_offset: The VRAM offset.
1280  * @sysmem_bo: The sysmem buffer object.
1281  * @sysmem_offset: The sysmem offset.
1282  * @size: The size of VRAM chunk to copy.
1283  * @dir: The direction of the copy operation.
1284  *
1285  * Copies a portion of a buffer object between VRAM and system memory.
1286  * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
1287  * copying to system memory.
1288  *
1289  * Return: Pointer to a dma_fence representing the last copy batch, or
1290  * an error pointer on failure. If there is a failure, any copy operation
1291  * started by the function call has been synced.
1292  */
1293 struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
1294 					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
1295 					     u64 size, enum xe_migrate_copy_dir dir)
1296 {
1297 	struct xe_device *xe = xe_bo_device(vram_bo);
1298 	struct xe_tile *tile = vram_bo->tile;
1299 	struct xe_gt *gt = tile->primary_gt;
1300 	struct xe_migrate *m = tile->migrate;
1301 	struct dma_fence *fence = NULL;
1302 	struct ttm_resource *vram = vram_bo->ttm.resource;
1303 	struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
1304 	struct xe_res_cursor vram_it, sysmem_it;
1305 	u64 vram_L0_ofs, sysmem_L0_ofs;
1306 	u32 vram_L0_pt, sysmem_L0_pt;
1307 	u64 vram_L0, sysmem_L0;
1308 	bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
1309 	bool use_comp_pat = to_sysmem &&
1310 		GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
1311 	int pass = 0;
1312 	int err;
1313 
1314 	xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
1315 	xe_assert(xe, xe_bo_is_vram(vram_bo));
1316 	xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
1317 	xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
1318 	xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
1319 
1320 	xe_res_first(vram, vram_offset, size, &vram_it);
1321 	xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
1322 
1323 	while (size) {
1324 		u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
1325 		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
1326 		struct xe_sched_job *job;
1327 		struct xe_bb *bb;
1328 		u32 update_idx;
1329 		bool usm = xe->info.has_usm;
1330 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1331 
1332 		sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
1333 		vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
1334 
1335 		xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0);
1336 
1337 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
1338 		batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
1339 					      &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
1340 
1341 		batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
1342 					      &sysmem_L0_pt, 0, avail_pts, avail_pts);
1343 		batch_size += EMIT_COPY_DW;
1344 
1345 		bb = xe_bb_new(gt, batch_size, usm);
1346 		if (IS_ERR(bb)) {
1347 			err = PTR_ERR(bb);
1348 			return ERR_PTR(err);
1349 		}
1350 
1351 		if (xe_migrate_allow_identity(vram_L0, &vram_it))
1352 			xe_res_next(&vram_it, vram_L0);
1353 		else
1354 			emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
1355 
1356 		emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
1357 
1358 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1359 		update_idx = bb->len;
1360 
1361 		if (to_sysmem)
1362 			emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
1363 		else
1364 			emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
1365 
1366 		job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
1367 						 update_idx);
1368 		if (IS_ERR(job)) {
1369 			xe_bb_free(bb, NULL);
1370 			err = PTR_ERR(job);
1371 			return ERR_PTR(err);
1372 		}
1373 
1374 		xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1375 
1376 		xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv,
1377 						     DMA_RESV_USAGE_BOOKKEEP));
1378 		xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
1379 						     DMA_RESV_USAGE_BOOKKEEP));
1380 
1381 		scoped_guard(mutex, &m->job_mutex) {
1382 			xe_sched_job_arm(job);
1383 			dma_fence_put(fence);
1384 			fence = dma_fence_get(&job->drm.s_fence->finished);
1385 			xe_sched_job_push(job);
1386 
1387 			dma_fence_put(m->fence);
1388 			m->fence = dma_fence_get(fence);
1389 		}
1390 
1391 		xe_bb_free(bb, fence);
1392 		size -= vram_L0;
1393 	}
1394 
1395 	return fence;
1396 }
1397 
1398 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1399 				 u32 size, u32 pitch)
1400 {
1401 	struct xe_device *xe = gt_to_xe(gt);
1402 	u32 *cs = bb->cs + bb->len;
1403 	u32 len = PVC_MEM_SET_CMD_LEN_DW;
1404 
1405 	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
1406 	*cs++ = pitch - 1;
1407 	*cs++ = (size / pitch) - 1;
1408 	*cs++ = pitch - 1;
1409 	*cs++ = lower_32_bits(src_ofs);
1410 	*cs++ = upper_32_bits(src_ofs);
1411 	if (GRAPHICS_VERx100(xe) >= 2000)
1412 		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1413 	else
1414 		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1415 
1416 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1417 
1418 	bb->len += len;
1419 }
1420 
1421 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
1422 				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
1423 {
1424 	struct xe_device *xe = gt_to_xe(gt);
1425 	u32 *cs = bb->cs + bb->len;
1426 	u32 len = XY_FAST_COLOR_BLT_DW;
1427 
1428 	if (GRAPHICS_VERx100(xe) < 1250)
1429 		len = 11;
1430 
1431 	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
1432 		(len - 2);
1433 	if (GRAPHICS_VERx100(xe) >= 2000)
1434 		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
1435 			(pitch - 1);
1436 	else
1437 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
1438 			(pitch - 1);
1439 	*cs++ = 0;
1440 	*cs++ = (size / pitch) << 16 | pitch / 4;
1441 	*cs++ = lower_32_bits(src_ofs);
1442 	*cs++ = upper_32_bits(src_ofs);
1443 	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
1444 	*cs++ = 0;
1445 	*cs++ = 0;
1446 	*cs++ = 0;
1447 	*cs++ = 0;
1448 
1449 	if (len > 11) {
1450 		*cs++ = 0;
1451 		*cs++ = 0;
1452 		*cs++ = 0;
1453 		*cs++ = 0;
1454 		*cs++ = 0;
1455 	}
1456 
1457 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1458 
1459 	bb->len += len;
1460 }
1461 
1462 static bool has_service_copy_support(struct xe_gt *gt)
1463 {
1464 	/*
1465 	 * What we care about is whether the architecture was designed with
1466 	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
1467 	 * instructions) so check the architectural engine list rather than the
1468 	 * actual list since these instructions are usable on BCS0 even if
1469 	 * all of the actual service copy engines (BCS1-BCS8) have been fused
1470 	 * off.
1471 	 */
1472 	return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
1473 					      XE_HW_ENGINE_BCS1);
1474 }
1475 
1476 static u32 emit_clear_cmd_len(struct xe_gt *gt)
1477 {
1478 	if (has_service_copy_support(gt))
1479 		return PVC_MEM_SET_CMD_LEN_DW;
1480 	else
1481 		return XY_FAST_COLOR_BLT_DW;
1482 }
1483 
1484 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1485 		       u32 size, u32 pitch, bool is_vram)
1486 {
1487 	if (has_service_copy_support(gt))
1488 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1489 	else
1490 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1491 				     is_vram);
1492 }
1493 
1494 /**
1495  * xe_migrate_clear() - Copy content of TTM resources.
1496  * @m: The migration context.
1497  * @bo: The buffer object @dst is currently bound to.
1498  * @dst: The dst TTM resource to be cleared.
1499  * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1500  *
1501  * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1502  * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1503  * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1504  * TODO: Eliminate the @bo argument.
1505  *
1506  * Return: Pointer to a dma_fence representing the last clear batch, or
1507  * an error pointer on failure. If there is a failure, any clear operation
1508  * started by the function call has been synced.
1509  */
1510 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1511 				   struct xe_bo *bo,
1512 				   struct ttm_resource *dst,
1513 				   u32 clear_flags)
1514 {
1515 	bool clear_vram = mem_type_is_vram(dst->mem_type);
1516 	bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1517 	bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1518 	struct xe_gt *gt = m->tile->primary_gt;
1519 	struct xe_device *xe = gt_to_xe(gt);
1520 	bool clear_only_system_ccs = false;
1521 	struct dma_fence *fence = NULL;
1522 	u64 size = xe_bo_size(bo);
1523 	struct xe_res_cursor src_it;
1524 	struct ttm_resource *src = dst;
1525 	int err;
1526 
1527 	if (WARN_ON(!clear_bo_data && !clear_ccs))
1528 		return NULL;
1529 
1530 	if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1531 		clear_only_system_ccs = true;
1532 
1533 	if (!clear_vram)
1534 		xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
1535 	else
1536 		xe_res_first(src, 0, xe_bo_size(bo), &src_it);
1537 
1538 	while (size) {
1539 		u64 clear_L0_ofs;
1540 		u32 clear_L0_pt;
1541 		u32 flush_flags = 0;
1542 		u64 clear_L0;
1543 		struct xe_sched_job *job;
1544 		struct xe_bb *bb;
1545 		u32 batch_size, update_idx;
1546 		u32 pte_flags;
1547 
1548 		bool usm = xe->info.has_usm;
1549 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1550 
1551 		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1552 
1553 		/* Calculate final sizes and batch size.. */
1554 		pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1555 		batch_size = 1 +
1556 			pte_update_size(m, pte_flags, src, &src_it,
1557 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1558 					clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1559 					avail_pts);
1560 
1561 		if (xe_migrate_needs_ccs_emit(xe))
1562 			batch_size += EMIT_COPY_CCS_DW;
1563 
1564 		/* Clear commands */
1565 
1566 		if (WARN_ON_ONCE(!clear_L0))
1567 			break;
1568 
1569 		bb = xe_bb_new(gt, batch_size, usm);
1570 		if (IS_ERR(bb)) {
1571 			err = PTR_ERR(bb);
1572 			goto err_sync;
1573 		}
1574 
1575 		size -= clear_L0;
1576 		/* Preemption is enabled again by the ring ops. */
1577 		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
1578 			xe_res_next(&src_it, clear_L0);
1579 		} else {
1580 			emit_pte(m, bb, clear_L0_pt, clear_vram,
1581 				 clear_only_system_ccs, &src_it, clear_L0, dst);
1582 			flush_flags |= MI_INVALIDATE_TLB;
1583 		}
1584 
1585 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1586 		update_idx = bb->len;
1587 
1588 		if (clear_bo_data)
1589 			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1590 
1591 		if (xe_migrate_needs_ccs_emit(xe)) {
1592 			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1593 				      m->cleared_mem_ofs, false, clear_L0);
1594 			flush_flags |= MI_FLUSH_DW_CCS;
1595 		}
1596 
1597 		job = xe_bb_create_migration_job(m->q, bb,
1598 						 xe_migrate_batch_base(m, usm),
1599 						 update_idx);
1600 		if (IS_ERR(job)) {
1601 			err = PTR_ERR(job);
1602 			goto err;
1603 		}
1604 
1605 		xe_sched_job_add_migrate_flush(job, flush_flags);
1606 		if (!fence) {
1607 			/*
1608 			 * There can't be anything userspace related at this
1609 			 * point, so we just need to respect any potential move
1610 			 * fences, which are always tracked as
1611 			 * DMA_RESV_USAGE_KERNEL.
1612 			 */
1613 			err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1614 						    DMA_RESV_USAGE_KERNEL);
1615 			if (err)
1616 				goto err_job;
1617 		}
1618 
1619 		mutex_lock(&m->job_mutex);
1620 		xe_sched_job_arm(job);
1621 		dma_fence_put(fence);
1622 		fence = dma_fence_get(&job->drm.s_fence->finished);
1623 		xe_sched_job_push(job);
1624 
1625 		dma_fence_put(m->fence);
1626 		m->fence = dma_fence_get(fence);
1627 
1628 		mutex_unlock(&m->job_mutex);
1629 
1630 		xe_bb_free(bb, fence);
1631 		continue;
1632 
1633 err_job:
1634 		xe_sched_job_put(job);
1635 err:
1636 		xe_bb_free(bb, NULL);
1637 err_sync:
1638 		/* Sync partial copies if any. FIXME: job_mutex? */
1639 		if (fence) {
1640 			dma_fence_wait(fence, false);
1641 			dma_fence_put(fence);
1642 		}
1643 
1644 		return ERR_PTR(err);
1645 	}
1646 
1647 	if (clear_ccs)
1648 		bo->ccs_cleared = true;
1649 
1650 	return fence;
1651 }
1652 
1653 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1654 			  const struct xe_vm_pgtable_update_op *pt_op,
1655 			  const struct xe_vm_pgtable_update *update,
1656 			  struct xe_migrate_pt_update *pt_update)
1657 {
1658 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1659 	u32 chunk;
1660 	u32 ofs = update->ofs, size = update->qwords;
1661 
1662 	/*
1663 	 * If we have 512 entries (max), we would populate it ourselves,
1664 	 * and update the PDE above it to the new pointer.
1665 	 * The only time this can only happen if we have to update the top
1666 	 * PDE. This requires a BO that is almost vm->size big.
1667 	 *
1668 	 * This shouldn't be possible in practice.. might change when 16K
1669 	 * pages are used. Hence the assert.
1670 	 */
1671 	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1672 	if (!ppgtt_ofs)
1673 		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1674 						xe_bo_addr(update->pt_bo, 0,
1675 							   XE_PAGE_SIZE), false);
1676 
1677 	do {
1678 		u64 addr = ppgtt_ofs + ofs * 8;
1679 
1680 		chunk = min(size, MAX_PTE_PER_SDI);
1681 
1682 		/* Ensure populatefn can do memset64 by aligning bb->cs */
1683 		if (!(bb->len & 1))
1684 			bb->cs[bb->len++] = MI_NOOP;
1685 
1686 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1687 		bb->cs[bb->len++] = lower_32_bits(addr);
1688 		bb->cs[bb->len++] = upper_32_bits(addr);
1689 		if (pt_op->bind)
1690 			ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1691 				      ofs, chunk, update);
1692 		else
1693 			ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1694 				   ofs, chunk, update);
1695 
1696 		bb->len += chunk * 2;
1697 		ofs += chunk;
1698 		size -= chunk;
1699 	} while (size);
1700 }
1701 
1702 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1703 {
1704 	return xe_vm_get(m->q->vm);
1705 }
1706 
1707 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1708 struct migrate_test_params {
1709 	struct xe_test_priv base;
1710 	bool force_gpu;
1711 };
1712 
1713 #define to_migrate_test_params(_priv) \
1714 	container_of(_priv, struct migrate_test_params, base)
1715 #endif
1716 
1717 static struct dma_fence *
1718 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1719 			       struct xe_migrate_pt_update *pt_update)
1720 {
1721 	XE_TEST_DECLARE(struct migrate_test_params *test =
1722 			to_migrate_test_params
1723 			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1724 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1725 	struct xe_vm *vm = pt_update->vops->vm;
1726 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1727 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1728 	int err;
1729 	u32 i, j;
1730 
1731 	if (XE_TEST_ONLY(test && test->force_gpu))
1732 		return ERR_PTR(-ETIME);
1733 
1734 	if (ops->pre_commit) {
1735 		pt_update->job = NULL;
1736 		err = ops->pre_commit(pt_update);
1737 		if (err)
1738 			return ERR_PTR(err);
1739 	}
1740 
1741 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1742 		const struct xe_vm_pgtable_update_op *pt_op =
1743 			&pt_update_ops->ops[i];
1744 
1745 		for (j = 0; j < pt_op->num_entries; j++) {
1746 			const struct xe_vm_pgtable_update *update =
1747 				&pt_op->entries[j];
1748 
1749 			if (pt_op->bind)
1750 				ops->populate(pt_update, m->tile,
1751 					      &update->pt_bo->vmap, NULL,
1752 					      update->ofs, update->qwords,
1753 					      update);
1754 			else
1755 				ops->clear(pt_update, m->tile,
1756 					   &update->pt_bo->vmap, NULL,
1757 					   update->ofs, update->qwords, update);
1758 		}
1759 	}
1760 
1761 	trace_xe_vm_cpu_bind(vm);
1762 	xe_device_wmb(vm->xe);
1763 
1764 	return dma_fence_get_stub();
1765 }
1766 
1767 static struct dma_fence *
1768 __xe_migrate_update_pgtables(struct xe_migrate *m,
1769 			     struct xe_migrate_pt_update *pt_update,
1770 			     struct xe_vm_pgtable_update_ops *pt_update_ops)
1771 {
1772 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1773 	struct xe_tile *tile = m->tile;
1774 	struct xe_gt *gt = tile->primary_gt;
1775 	struct xe_device *xe = tile_to_xe(tile);
1776 	struct xe_sched_job *job;
1777 	struct dma_fence *fence;
1778 	struct drm_suballoc *sa_bo = NULL;
1779 	struct xe_bb *bb;
1780 	u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1781 	u32 num_updates = 0, current_update = 0;
1782 	u64 addr;
1783 	int err = 0;
1784 	bool is_migrate = pt_update_ops->q == m->q;
1785 	bool usm = is_migrate && xe->info.has_usm;
1786 
1787 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1788 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1789 		struct xe_vm_pgtable_update *updates = pt_op->entries;
1790 
1791 		num_updates += pt_op->num_entries;
1792 		for (j = 0; j < pt_op->num_entries; ++j) {
1793 			u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1794 						    MAX_PTE_PER_SDI);
1795 
1796 			/* align noop + MI_STORE_DATA_IMM cmd prefix */
1797 			batch_size += 4 * num_cmds + updates[j].qwords * 2;
1798 		}
1799 	}
1800 
1801 	/* fixed + PTE entries */
1802 	if (IS_DGFX(xe))
1803 		batch_size += 2;
1804 	else
1805 		batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1806 			num_updates * 2;
1807 
1808 	bb = xe_bb_new(gt, batch_size, usm);
1809 	if (IS_ERR(bb))
1810 		return ERR_CAST(bb);
1811 
1812 	/* For sysmem PTE's, need to map them in our hole.. */
1813 	if (!IS_DGFX(xe)) {
1814 		u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1815 		u32 ptes, ofs;
1816 
1817 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1818 		if (!is_migrate) {
1819 			u32 num_units = DIV_ROUND_UP(num_updates,
1820 						     NUM_VMUSA_WRITES_PER_UNIT);
1821 
1822 			if (num_units > m->vm_update_sa.size) {
1823 				err = -ENOBUFS;
1824 				goto err_bb;
1825 			}
1826 			sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1827 						 GFP_KERNEL, true, 0);
1828 			if (IS_ERR(sa_bo)) {
1829 				err = PTR_ERR(sa_bo);
1830 				goto err_bb;
1831 			}
1832 
1833 			ppgtt_ofs = NUM_KERNEL_PDE +
1834 				(drm_suballoc_soffset(sa_bo) /
1835 				 NUM_VMUSA_UNIT_PER_PAGE);
1836 			page_ofs = (drm_suballoc_soffset(sa_bo) %
1837 				    NUM_VMUSA_UNIT_PER_PAGE) *
1838 				VM_SA_UPDATE_UNIT_SIZE;
1839 		}
1840 
1841 		/* Map our PT's to gtt */
1842 		i = 0;
1843 		j = 0;
1844 		ptes = num_updates;
1845 		ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1846 		while (ptes) {
1847 			u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1848 			u32 idx = 0;
1849 
1850 			bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1851 				MI_SDI_NUM_QW(chunk);
1852 			bb->cs[bb->len++] = ofs;
1853 			bb->cs[bb->len++] = 0; /* upper_32_bits */
1854 
1855 			for (; i < pt_update_ops->num_ops; ++i) {
1856 				struct xe_vm_pgtable_update_op *pt_op =
1857 					&pt_update_ops->ops[i];
1858 				struct xe_vm_pgtable_update *updates = pt_op->entries;
1859 
1860 				for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1861 					struct xe_vm *vm = pt_update->vops->vm;
1862 					struct xe_bo *pt_bo = updates[j].pt_bo;
1863 
1864 					if (idx == chunk)
1865 						goto next_cmd;
1866 
1867 					xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
1868 
1869 					/* Map a PT at most once */
1870 					if (pt_bo->update_index < 0)
1871 						pt_bo->update_index = current_update;
1872 
1873 					addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1874 									 pat_index, 0);
1875 					bb->cs[bb->len++] = lower_32_bits(addr);
1876 					bb->cs[bb->len++] = upper_32_bits(addr);
1877 				}
1878 
1879 				j = 0;
1880 			}
1881 
1882 next_cmd:
1883 			ptes -= chunk;
1884 			ofs += chunk * sizeof(u64);
1885 		}
1886 
1887 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1888 		update_idx = bb->len;
1889 
1890 		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1891 			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1892 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1893 			struct xe_vm_pgtable_update_op *pt_op =
1894 				&pt_update_ops->ops[i];
1895 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1896 
1897 			for (j = 0; j < pt_op->num_entries; ++j) {
1898 				struct xe_bo *pt_bo = updates[j].pt_bo;
1899 
1900 				write_pgtable(tile, bb, addr +
1901 					      pt_bo->update_index * XE_PAGE_SIZE,
1902 					      pt_op, &updates[j], pt_update);
1903 			}
1904 		}
1905 	} else {
1906 		/* phys pages, no preamble required */
1907 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1908 		update_idx = bb->len;
1909 
1910 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1911 			struct xe_vm_pgtable_update_op *pt_op =
1912 				&pt_update_ops->ops[i];
1913 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1914 
1915 			for (j = 0; j < pt_op->num_entries; ++j)
1916 				write_pgtable(tile, bb, 0, pt_op, &updates[j],
1917 					      pt_update);
1918 		}
1919 	}
1920 
1921 	job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1922 					 xe_migrate_batch_base(m, usm),
1923 					 update_idx);
1924 	if (IS_ERR(job)) {
1925 		err = PTR_ERR(job);
1926 		goto err_sa;
1927 	}
1928 
1929 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1930 
1931 	if (ops->pre_commit) {
1932 		pt_update->job = job;
1933 		err = ops->pre_commit(pt_update);
1934 		if (err)
1935 			goto err_job;
1936 	}
1937 	if (is_migrate)
1938 		mutex_lock(&m->job_mutex);
1939 
1940 	xe_sched_job_arm(job);
1941 	fence = dma_fence_get(&job->drm.s_fence->finished);
1942 	xe_sched_job_push(job);
1943 
1944 	if (is_migrate)
1945 		mutex_unlock(&m->job_mutex);
1946 
1947 	xe_bb_free(bb, fence);
1948 	drm_suballoc_free(sa_bo, fence);
1949 
1950 	return fence;
1951 
1952 err_job:
1953 	xe_sched_job_put(job);
1954 err_sa:
1955 	drm_suballoc_free(sa_bo, NULL);
1956 err_bb:
1957 	xe_bb_free(bb, NULL);
1958 	return ERR_PTR(err);
1959 }
1960 
1961 /**
1962  * xe_migrate_update_pgtables() - Pipelined page-table update
1963  * @m: The migrate context.
1964  * @pt_update: PT update arguments
1965  *
1966  * Perform a pipelined page-table update. The update descriptors are typically
1967  * built under the same lock critical section as a call to this function. If
1968  * using the default engine for the updates, they will be performed in the
1969  * order they grab the job_mutex. If different engines are used, external
1970  * synchronization is needed for overlapping updates to maintain page-table
1971  * consistency. Note that the meaning of "overlapping" is that the updates
1972  * touch the same page-table, which might be a higher-level page-directory.
1973  * If no pipelining is needed, then updates may be performed by the cpu.
1974  *
1975  * Return: A dma_fence that, when signaled, indicates the update completion.
1976  */
1977 struct dma_fence *
1978 xe_migrate_update_pgtables(struct xe_migrate *m,
1979 			   struct xe_migrate_pt_update *pt_update)
1980 
1981 {
1982 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1983 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1984 	struct dma_fence *fence;
1985 
1986 	fence =  xe_migrate_update_pgtables_cpu(m, pt_update);
1987 
1988 	/* -ETIME indicates a job is needed, anything else is legit error */
1989 	if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
1990 		return fence;
1991 
1992 	return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
1993 }
1994 
1995 /**
1996  * xe_migrate_wait() - Complete all operations using the xe_migrate context
1997  * @m: Migrate context to wait for.
1998  *
1999  * Waits until the GPU no longer uses the migrate context's default engine
2000  * or its page-table objects. FIXME: What about separate page-table update
2001  * engines?
2002  */
2003 void xe_migrate_wait(struct xe_migrate *m)
2004 {
2005 	if (m->fence)
2006 		dma_fence_wait(m->fence, false);
2007 }
2008 
2009 static u32 pte_update_cmd_size(u64 size)
2010 {
2011 	u32 num_dword;
2012 	u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
2013 
2014 	XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
2015 
2016 	/*
2017 	 * MI_STORE_DATA_IMM command is used to update page table. Each
2018 	 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To
2019 	 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need:
2020 	 *
2021 	 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
2022 	 * - 2 dword for the page table's physical location
2023 	 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords)
2024 	 */
2025 	num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI);
2026 	num_dword += entries * 2;
2027 
2028 	return num_dword;
2029 }
2030 
2031 static void build_pt_update_batch_sram(struct xe_migrate *m,
2032 				       struct xe_bb *bb, u32 pt_offset,
2033 				       struct drm_pagemap_addr *sram_addr,
2034 				       u32 size, int level)
2035 {
2036 	u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
2037 	u64 gpu_page_size = 0x1ull << xe_pt_shift(level);
2038 	u32 ptes;
2039 	int i = 0;
2040 
2041 	xe_tile_assert(m->tile, PAGE_ALIGNED(size));
2042 
2043 	ptes = DIV_ROUND_UP(size, gpu_page_size);
2044 	while (ptes) {
2045 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
2046 
2047 		if (!level)
2048 			chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
2049 
2050 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
2051 		bb->cs[bb->len++] = pt_offset;
2052 		bb->cs[bb->len++] = 0;
2053 
2054 		pt_offset += chunk * 8;
2055 		ptes -= chunk;
2056 
2057 		while (chunk--) {
2058 			u64 addr = sram_addr[i].addr;
2059 			u64 pte;
2060 
2061 			xe_tile_assert(m->tile, sram_addr[i].proto ==
2062 				       DRM_INTERCONNECT_SYSTEM ||
2063 				       sram_addr[i].proto == XE_INTERCONNECT_P2P);
2064 			xe_tile_assert(m->tile, addr);
2065 			xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
2066 
2067 again:
2068 			pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
2069 								addr, pat_index,
2070 								level, false, 0);
2071 			bb->cs[bb->len++] = lower_32_bits(pte);
2072 			bb->cs[bb->len++] = upper_32_bits(pte);
2073 
2074 			if (gpu_page_size < PAGE_SIZE) {
2075 				addr += XE_PAGE_SIZE;
2076 				if (!PAGE_ALIGNED(addr)) {
2077 					chunk--;
2078 					goto again;
2079 				}
2080 				i++;
2081 			} else {
2082 				i += gpu_page_size / PAGE_SIZE;
2083 			}
2084 		}
2085 	}
2086 }
2087 
2088 static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
2089 				    unsigned long size)
2090 {
2091 	u32 large_size = (0x1 << xe_pt_shift(1));
2092 	unsigned long i, incr = large_size / PAGE_SIZE;
2093 
2094 	for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
2095 		if (PAGE_SIZE << sram_addr[i].order != large_size)
2096 			return false;
2097 
2098 	return true;
2099 }
2100 
2101 #define XE_CACHELINE_BYTES	64ull
2102 #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
2103 
2104 static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len)
2105 {
2106 	u32 pitch;
2107 
2108 	if (IS_ALIGNED(len, PAGE_SIZE))
2109 		pitch = PAGE_SIZE;
2110 	else if (IS_ALIGNED(len, SZ_4K))
2111 		pitch = SZ_4K;
2112 	else if (IS_ALIGNED(len, SZ_256))
2113 		pitch = SZ_256;
2114 	else if (IS_ALIGNED(len, 4))
2115 		pitch = 4;
2116 	else
2117 		pitch = 1;
2118 
2119 	xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr);
2120 	return pitch;
2121 }
2122 
2123 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
2124 					 unsigned long len,
2125 					 unsigned long sram_offset,
2126 					 struct drm_pagemap_addr *sram_addr,
2127 					 u64 vram_addr,
2128 					 struct dma_fence *deps,
2129 					 const enum xe_migrate_copy_dir dir)
2130 {
2131 	struct xe_gt *gt = m->tile->primary_gt;
2132 	struct xe_device *xe = gt_to_xe(gt);
2133 	bool use_usm_batch = xe->info.has_usm;
2134 	struct dma_fence *fence = NULL;
2135 	u32 batch_size = 1;
2136 	u64 src_L0_ofs, dst_L0_ofs;
2137 	struct xe_sched_job *job;
2138 	struct xe_bb *bb;
2139 	u32 update_idx, pt_slot = 0;
2140 	unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
2141 	unsigned int pitch = xe_migrate_copy_pitch(xe, len);
2142 	int err;
2143 	unsigned long i, j;
2144 	bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
2145 
2146 	if (!xe->info.has_mem_copy_instr &&
2147 	    drm_WARN_ON(&xe->drm,
2148 			(!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK))
2149 		return ERR_PTR(-EOPNOTSUPP);
2150 
2151 	xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
2152 
2153 	batch_size += pte_update_cmd_size(npages << PAGE_SHIFT);
2154 	batch_size += EMIT_COPY_DW;
2155 
2156 	bb = xe_bb_new(gt, batch_size, use_usm_batch);
2157 	if (IS_ERR(bb)) {
2158 		err = PTR_ERR(bb);
2159 		return ERR_PTR(err);
2160 	}
2161 
2162 	/*
2163 	 * If the order of a struct drm_pagemap_addr entry is greater than 0,
2164 	 * the entry is populated by GPU pagemap but subsequent entries within
2165 	 * the range of that order are not populated.
2166 	 * build_pt_update_batch_sram() expects a fully populated array of
2167 	 * struct drm_pagemap_addr. Ensure this is the case even with higher
2168 	 * orders.
2169 	 */
2170 	for (i = 0; !use_pde && i < npages;) {
2171 		unsigned int order = sram_addr[i].order;
2172 
2173 		for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
2174 			if (!sram_addr[i + j].addr)
2175 				sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
2176 
2177 		i += NR_PAGES(order);
2178 	}
2179 
2180 	if (use_pde)
2181 		build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
2182 					   sram_addr, npages << PAGE_SHIFT, 1);
2183 	else
2184 		build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
2185 					   sram_addr, npages << PAGE_SHIFT, 0);
2186 
2187 	if (dir == XE_MIGRATE_COPY_TO_VRAM) {
2188 		if (use_pde)
2189 			src_L0_ofs = m->large_page_copy_ofs + sram_offset;
2190 		else
2191 			src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2192 		dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2193 
2194 	} else {
2195 		src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2196 		if (use_pde)
2197 			dst_L0_ofs = m->large_page_copy_ofs + sram_offset;
2198 		else
2199 			dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2200 	}
2201 
2202 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
2203 	update_idx = bb->len;
2204 
2205 	emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
2206 
2207 	job = xe_bb_create_migration_job(m->q, bb,
2208 					 xe_migrate_batch_base(m, use_usm_batch),
2209 					 update_idx);
2210 	if (IS_ERR(job)) {
2211 		err = PTR_ERR(job);
2212 		goto err;
2213 	}
2214 
2215 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
2216 
2217 	if (deps && !dma_fence_is_signaled(deps)) {
2218 		dma_fence_get(deps);
2219 		err = drm_sched_job_add_dependency(&job->drm, deps);
2220 		if (err)
2221 			dma_fence_wait(deps, false);
2222 		err = 0;
2223 	}
2224 
2225 	mutex_lock(&m->job_mutex);
2226 	xe_sched_job_arm(job);
2227 	fence = dma_fence_get(&job->drm.s_fence->finished);
2228 	xe_sched_job_push(job);
2229 
2230 	dma_fence_put(m->fence);
2231 	m->fence = dma_fence_get(fence);
2232 	mutex_unlock(&m->job_mutex);
2233 
2234 	xe_bb_free(bb, fence);
2235 
2236 	return fence;
2237 
2238 err:
2239 	xe_bb_free(bb, NULL);
2240 
2241 	return ERR_PTR(err);
2242 }
2243 
2244 /**
2245  * xe_migrate_to_vram() - Migrate to VRAM
2246  * @m: The migration context.
2247  * @npages: Number of pages to migrate.
2248  * @src_addr: Array of DMA information (source of migrate)
2249  * @dst_addr: Device physical address of VRAM (destination of migrate)
2250  * @deps: struct dma_fence representing the dependencies that need
2251  * to be signaled before migration.
2252  *
2253  * Copy from an array dma addresses to a VRAM device physical address
2254  *
2255  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2256  * failure
2257  */
2258 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
2259 				     unsigned long npages,
2260 				     struct drm_pagemap_addr *src_addr,
2261 				     u64 dst_addr,
2262 				     struct dma_fence *deps)
2263 {
2264 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
2265 			       deps, XE_MIGRATE_COPY_TO_VRAM);
2266 }
2267 
2268 /**
2269  * xe_migrate_from_vram() - Migrate from VRAM
2270  * @m: The migration context.
2271  * @npages: Number of pages to migrate.
2272  * @src_addr: Device physical address of VRAM (source of migrate)
2273  * @dst_addr: Array of DMA information (destination of migrate)
2274  * @deps: struct dma_fence representing the dependencies that need
2275  * to be signaled before migration.
2276  *
2277  * Copy from a VRAM device physical address to an array dma addresses
2278  *
2279  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2280  * failure
2281  */
2282 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
2283 				       unsigned long npages,
2284 				       u64 src_addr,
2285 				       struct drm_pagemap_addr *dst_addr,
2286 				       struct dma_fence *deps)
2287 {
2288 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
2289 			       deps, XE_MIGRATE_COPY_TO_SRAM);
2290 }
2291 
2292 static void xe_migrate_dma_unmap(struct xe_device *xe,
2293 				 struct drm_pagemap_addr *pagemap_addr,
2294 				 int len, int write)
2295 {
2296 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2297 
2298 	for (i = 0; i < npages; ++i) {
2299 		if (!pagemap_addr[i].addr)
2300 			break;
2301 
2302 		dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
2303 			       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2304 	}
2305 	kfree(pagemap_addr);
2306 }
2307 
2308 static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
2309 						   void *buf, int len,
2310 						   int write)
2311 {
2312 	struct drm_pagemap_addr *pagemap_addr;
2313 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2314 
2315 	pagemap_addr = kzalloc_objs(*pagemap_addr, npages);
2316 	if (!pagemap_addr)
2317 		return ERR_PTR(-ENOMEM);
2318 
2319 	for (i = 0; i < npages; ++i) {
2320 		dma_addr_t addr;
2321 		struct page *page;
2322 		enum dma_data_direction dir = write ? DMA_TO_DEVICE :
2323 						      DMA_FROM_DEVICE;
2324 
2325 		if (is_vmalloc_addr(buf))
2326 			page = vmalloc_to_page(buf);
2327 		else
2328 			page = virt_to_page(buf);
2329 
2330 		addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
2331 		if (dma_mapping_error(xe->drm.dev, addr))
2332 			goto err_fault;
2333 
2334 		pagemap_addr[i] =
2335 			drm_pagemap_addr_encode(addr,
2336 						DRM_INTERCONNECT_SYSTEM,
2337 						0, dir);
2338 		buf += PAGE_SIZE;
2339 	}
2340 
2341 	return pagemap_addr;
2342 
2343 err_fault:
2344 	xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
2345 	return ERR_PTR(-EFAULT);
2346 }
2347 
2348 /**
2349  * xe_migrate_access_memory - Access memory of a BO via GPU
2350  *
2351  * @m: The migration context.
2352  * @bo: buffer object
2353  * @offset: access offset into buffer object
2354  * @buf: pointer to caller memory to read into or write from
2355  * @len: length of access
2356  * @write: write access
2357  *
2358  * Access memory of a BO via GPU either reading in or writing from a passed in
2359  * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
2360  * read to or write from pointer.
2361  *
2362  * Returns:
2363  * 0 if successful, negative error code on failure.
2364  */
2365 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
2366 			     unsigned long offset, void *buf, int len,
2367 			     int write)
2368 {
2369 	struct xe_tile *tile = m->tile;
2370 	struct xe_device *xe = tile_to_xe(tile);
2371 	struct xe_res_cursor cursor;
2372 	struct dma_fence *fence = NULL;
2373 	struct drm_pagemap_addr *pagemap_addr;
2374 	unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
2375 	int bytes_left = len, current_page = 0;
2376 	void *orig_buf = buf;
2377 
2378 	xe_bo_assert_held(bo);
2379 
2380 	/* Use bounce buffer for small access and unaligned access */
2381 	if (!xe->info.has_mem_copy_instr &&
2382 	    (!IS_ALIGNED(len, 4) ||
2383 	     !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
2384 	     !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) {
2385 		int buf_offset = 0;
2386 		void *bounce;
2387 		int err;
2388 
2389 		BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
2390 		bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
2391 		if (!bounce)
2392 			return -ENOMEM;
2393 
2394 		/*
2395 		 * Less than ideal for large unaligned access but this should be
2396 		 * fairly rare, can fixup if this becomes common.
2397 		 */
2398 		do {
2399 			int copy_bytes = min_t(int, bytes_left,
2400 					       XE_CACHELINE_BYTES -
2401 					       (offset & XE_CACHELINE_MASK));
2402 			int ptr_offset = offset & XE_CACHELINE_MASK;
2403 
2404 			err = xe_migrate_access_memory(m, bo,
2405 						       offset &
2406 						       ~XE_CACHELINE_MASK,
2407 						       bounce,
2408 						       XE_CACHELINE_BYTES, 0);
2409 			if (err)
2410 				break;
2411 
2412 			if (write) {
2413 				memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
2414 
2415 				err = xe_migrate_access_memory(m, bo,
2416 							       offset & ~XE_CACHELINE_MASK,
2417 							       bounce,
2418 							       XE_CACHELINE_BYTES, write);
2419 				if (err)
2420 					break;
2421 			} else {
2422 				memcpy(buf + buf_offset, bounce + ptr_offset,
2423 				       copy_bytes);
2424 			}
2425 
2426 			bytes_left -= copy_bytes;
2427 			buf_offset += copy_bytes;
2428 			offset += copy_bytes;
2429 		} while (bytes_left);
2430 
2431 		kfree(bounce);
2432 		return err;
2433 	}
2434 
2435 	pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
2436 	if (IS_ERR(pagemap_addr))
2437 		return PTR_ERR(pagemap_addr);
2438 
2439 	xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
2440 
2441 	do {
2442 		struct dma_fence *__fence;
2443 		u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
2444 			cursor.start;
2445 		int current_bytes;
2446 		u32 pitch;
2447 
2448 		if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
2449 			current_bytes = min_t(int, bytes_left,
2450 					      MAX_PREEMPTDISABLE_TRANSFER);
2451 		else
2452 			current_bytes = min_t(int, bytes_left, cursor.size);
2453 
2454 		pitch = xe_migrate_copy_pitch(xe, current_bytes);
2455 		if (xe->info.has_mem_copy_instr)
2456 			current_bytes = min_t(int, current_bytes, U16_MAX * pitch);
2457 		else
2458 			current_bytes = min_t(int, current_bytes,
2459 					      round_down(S16_MAX * pitch,
2460 							 XE_CACHELINE_BYTES));
2461 
2462 		__fence = xe_migrate_vram(m, current_bytes,
2463 					  (unsigned long)buf & ~PAGE_MASK,
2464 					  &pagemap_addr[current_page],
2465 					  vram_addr, NULL, write ?
2466 					  XE_MIGRATE_COPY_TO_VRAM :
2467 					  XE_MIGRATE_COPY_TO_SRAM);
2468 		if (IS_ERR(__fence)) {
2469 			if (fence) {
2470 				dma_fence_wait(fence, false);
2471 				dma_fence_put(fence);
2472 			}
2473 			fence = __fence;
2474 			goto out_err;
2475 		}
2476 
2477 		dma_fence_put(fence);
2478 		fence = __fence;
2479 
2480 		buf += current_bytes;
2481 		offset += current_bytes;
2482 		current_page = (int)(buf - orig_buf) / PAGE_SIZE;
2483 		bytes_left -= current_bytes;
2484 		if (bytes_left)
2485 			xe_res_next(&cursor, current_bytes);
2486 	} while (bytes_left);
2487 
2488 	dma_fence_wait(fence, false);
2489 	dma_fence_put(fence);
2490 
2491 out_err:
2492 	xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
2493 	return IS_ERR(fence) ? PTR_ERR(fence) : 0;
2494 }
2495 
2496 /**
2497  * xe_migrate_job_lock() - Lock migrate job lock
2498  * @m: The migration context.
2499  * @q: Queue associated with the operation which requires a lock
2500  *
2501  * Lock the migrate job lock if the queue is a migration queue, otherwise
2502  * assert the VM's dma-resv is held (user queue's have own locking).
2503  */
2504 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
2505 {
2506 	bool is_migrate = q == m->q;
2507 
2508 	if (is_migrate)
2509 		mutex_lock(&m->job_mutex);
2510 	else
2511 		xe_vm_assert_held(q->user_vm);	/* User queues VM's should be locked */
2512 }
2513 
2514 /**
2515  * xe_migrate_job_unlock() - Unlock migrate job lock
2516  * @m: The migration context.
2517  * @q: Queue associated with the operation which requires a lock
2518  *
2519  * Unlock the migrate job lock if the queue is a migration queue, otherwise
2520  * assert the VM's dma-resv is held (user queue's have own locking).
2521  */
2522 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
2523 {
2524 	bool is_migrate = q == m->q;
2525 
2526 	if (is_migrate)
2527 		mutex_unlock(&m->job_mutex);
2528 	else
2529 		xe_vm_assert_held(q->user_vm);	/* User queues VM's should be locked */
2530 }
2531 
2532 #if IS_ENABLED(CONFIG_PROVE_LOCKING)
2533 /**
2534  * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue
2535  * @q: Migrate queue
2536  */
2537 void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
2538 {
2539 	struct xe_migrate *m = gt_to_tile(q->gt)->migrate;
2540 
2541 	xe_gt_assert(q->gt, q == m->q);
2542 	lockdep_assert_held(&m->job_mutex);
2543 }
2544 #endif
2545 
2546 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2547 #include "tests/xe_migrate.c"
2548 #endif
2549