xref: /linux/drivers/gpu/drm/xe/xe_migrate.c (revision 44343e8b250abb2f6bfd615493ca07a7f11f3cc2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "xe_migrate.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_pagemap.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include <generated/xe_wa_oob.h>
17 
18 #include "instructions/xe_gpu_commands.h"
19 #include "instructions/xe_mi_commands.h"
20 #include "regs/xe_gtt_defs.h"
21 #include "tests/xe_test.h"
22 #include "xe_assert.h"
23 #include "xe_bb.h"
24 #include "xe_bo.h"
25 #include "xe_exec_queue.h"
26 #include "xe_ggtt.h"
27 #include "xe_gt.h"
28 #include "xe_hw_engine.h"
29 #include "xe_lrc.h"
30 #include "xe_map.h"
31 #include "xe_mocs.h"
32 #include "xe_pt.h"
33 #include "xe_res_cursor.h"
34 #include "xe_sa.h"
35 #include "xe_sched_job.h"
36 #include "xe_sync.h"
37 #include "xe_trace_bo.h"
38 #include "xe_vm.h"
39 #include "xe_vram.h"
40 
41 /**
42  * struct xe_migrate - migrate context.
43  */
44 struct xe_migrate {
45 	/** @q: Default exec queue used for migration */
46 	struct xe_exec_queue *q;
47 	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
48 	struct xe_tile *tile;
49 	/** @job_mutex: Timeline mutex for @eng. */
50 	struct mutex job_mutex;
51 	/** @pt_bo: Page-table buffer object. */
52 	struct xe_bo *pt_bo;
53 	/** @batch_base_ofs: VM offset of the migration batch buffer */
54 	u64 batch_base_ofs;
55 	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
56 	u64 usm_batch_base_ofs;
57 	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
58 	u64 cleared_mem_ofs;
59 	/**
60 	 * @fence: dma-fence representing the last migration job batch.
61 	 * Protected by @job_mutex.
62 	 */
63 	struct dma_fence *fence;
64 	/**
65 	 * @vm_update_sa: For integrated, used to suballocate page-tables
66 	 * out of the pt_bo.
67 	 */
68 	struct drm_suballoc_manager vm_update_sa;
69 	/** @min_chunk_size: For dgfx, Minimum chunk size */
70 	u64 min_chunk_size;
71 };
72 
73 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
74 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
75 #define NUM_KERNEL_PDE 15
76 #define NUM_PT_SLOTS 32
77 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
78 #define MAX_NUM_PTE 512
79 #define IDENTITY_OFFSET 256ULL
80 
81 /*
82  * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
83  * legal value accepted.  Since that instruction field is always stored in
84  * (val-2) format, this translates to 0x400 dwords for the true maximum length
85  * of the instruction.  Subtracting the instruction header (1 dword) and
86  * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
87  */
88 #define MAX_PTE_PER_SDI 0x1FEU
89 
90 static void xe_migrate_fini(void *arg)
91 {
92 	struct xe_migrate *m = arg;
93 
94 	xe_vm_lock(m->q->vm, false);
95 	xe_bo_unpin(m->pt_bo);
96 	xe_vm_unlock(m->q->vm);
97 
98 	dma_fence_put(m->fence);
99 	xe_bo_put(m->pt_bo);
100 	drm_suballoc_manager_fini(&m->vm_update_sa);
101 	mutex_destroy(&m->job_mutex);
102 	xe_vm_close_and_put(m->q->vm);
103 	xe_exec_queue_put(m->q);
104 }
105 
106 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
107 {
108 	XE_WARN_ON(slot >= NUM_PT_SLOTS);
109 
110 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
111 	return (slot + 1ULL) << xe_pt_shift(level + 1);
112 }
113 
114 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
115 {
116 	/*
117 	 * Remove the DPA to get a correct offset into identity table for the
118 	 * migrate offset
119 	 */
120 	u64 identity_offset = IDENTITY_OFFSET;
121 
122 	if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
123 		identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
124 							(xe->mem.vram), SZ_1G);
125 
126 	addr -= xe_vram_region_dpa_base(xe->mem.vram);
127 	return addr + (identity_offset << xe_pt_shift(2));
128 }
129 
130 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
131 					u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
132 {
133 	struct xe_vram_region *vram = xe->mem.vram;
134 	resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
135 	u64 pos, ofs, flags;
136 	u64 entry;
137 	/* XXX: Unclear if this should be usable_size? */
138 	u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
139 	u32 level = 2;
140 
141 	ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
142 	flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
143 					    true, 0);
144 
145 	xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
146 
147 	/*
148 	 * Use 1GB pages when possible, last chunk always use 2M
149 	 * pages as mixing reserved memory (stolen, WOCPM) with a single
150 	 * mapping is not allowed on certain platforms.
151 	 */
152 	for (pos = dpa_base; pos < vram_limit;
153 	     pos += SZ_1G, ofs += 8) {
154 		if (pos + SZ_1G >= vram_limit) {
155 			entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
156 			xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
157 
158 			flags = vm->pt_ops->pte_encode_addr(xe, 0,
159 							    pat_index,
160 							    level - 1,
161 							    true, 0);
162 
163 			for (ofs = pt_2m_ofs; pos < vram_limit;
164 			     pos += SZ_2M, ofs += 8)
165 				xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
166 			break;	/* Ensure pos == vram_limit assert correct */
167 		}
168 
169 		xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
170 	}
171 
172 	xe_assert(xe, pos == vram_limit);
173 }
174 
175 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
176 				 struct xe_vm *vm)
177 {
178 	struct xe_device *xe = tile_to_xe(tile);
179 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
180 	u8 id = tile->id;
181 	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
182 #define VRAM_IDENTITY_MAP_COUNT	2
183 	u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
184 #undef VRAM_IDENTITY_MAP_COUNT
185 	u32 map_ofs, level, i;
186 	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
187 	u64 entry, pt29_ofs;
188 
189 	/* Can't bump NUM_PT_SLOTS too high */
190 	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
191 	/* Must be a multiple of 64K to support all platforms */
192 	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
193 	/* And one slot reserved for the 4KiB page table updates */
194 	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
195 
196 	/* Need to be sure everything fits in the first PT, or create more */
197 	xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
198 
199 	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
200 				  num_entries * XE_PAGE_SIZE,
201 				  ttm_bo_type_kernel,
202 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
203 				  XE_BO_FLAG_PAGETABLE);
204 	if (IS_ERR(bo))
205 		return PTR_ERR(bo);
206 
207 	/* PT30 & PT31 reserved for 2M identity map */
208 	pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
209 	entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
210 	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
211 
212 	map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
213 
214 	/* Map the entire BO in our level 0 pt */
215 	for (i = 0, level = 0; i < num_entries; level++) {
216 		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
217 						  pat_index, 0);
218 
219 		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
220 
221 		if (vm->flags & XE_VM_FLAG_64K)
222 			i += 16;
223 		else
224 			i += 1;
225 	}
226 
227 	if (!IS_DGFX(xe)) {
228 		/* Write out batch too */
229 		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
230 		for (i = 0; i < xe_bo_size(batch);
231 		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
232 		     XE_PAGE_SIZE) {
233 			entry = vm->pt_ops->pte_encode_bo(batch, i,
234 							  pat_index, 0);
235 
236 			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
237 				  entry);
238 			level++;
239 		}
240 		if (xe->info.has_usm) {
241 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
242 
243 			batch = tile->primary_gt->usm.bb_pool->bo;
244 			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
245 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
246 
247 			for (i = 0; i < xe_bo_size(batch);
248 			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
249 			     XE_PAGE_SIZE) {
250 				entry = vm->pt_ops->pte_encode_bo(batch, i,
251 								  pat_index, 0);
252 
253 				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
254 					  entry);
255 				level++;
256 			}
257 		}
258 	} else {
259 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
260 
261 		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
262 
263 		if (xe->info.has_usm) {
264 			batch = tile->primary_gt->usm.bb_pool->bo;
265 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
266 			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
267 		}
268 	}
269 
270 	for (level = 1; level < num_level; level++) {
271 		u32 flags = 0;
272 
273 		if (vm->flags & XE_VM_FLAG_64K && level == 1)
274 			flags = XE_PDE_64K;
275 
276 		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
277 						  XE_PAGE_SIZE);
278 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
279 			  entry | flags);
280 	}
281 
282 	/* Write PDE's that point to our BO. */
283 	for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
284 		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
285 
286 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
287 			  (i + 1) * 8, u64, entry);
288 	}
289 
290 	/* Set up a 1GiB NULL mapping at 255GiB offset. */
291 	level = 2;
292 	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
293 		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
294 		  | XE_PTE_NULL);
295 	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
296 
297 	/* Identity map the entire vram at 256GiB offset */
298 	if (IS_DGFX(xe)) {
299 		u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
300 		resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
301 
302 		xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
303 					    pat_index, pt30_ofs);
304 		xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
305 
306 		/*
307 		 * Identity map the entire vram for compressed pat_index for xe2+
308 		 * if flat ccs is enabled.
309 		 */
310 		if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
311 			u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
312 			u64 vram_offset = IDENTITY_OFFSET +
313 				DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
314 			u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
315 
316 			xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
317 							  IDENTITY_OFFSET / 2) * SZ_1G);
318 			xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
319 						    comp_pat_index, pt31_ofs);
320 		}
321 	}
322 
323 	/*
324 	 * Example layout created above, with root level = 3:
325 	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
326 	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
327 	 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
328 	 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
329 	 *
330 	 * This makes the lowest part of the VM point to the pagetables.
331 	 * Hence the lowest 2M in the vm should point to itself, with a few writes
332 	 * and flushes, other parts of the VM can be used either for copying and
333 	 * clearing.
334 	 *
335 	 * For performance, the kernel reserves PDE's, so about 20 are left
336 	 * for async VM updates.
337 	 *
338 	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
339 	 * everywhere, this allows lockless updates to scratch pages by using
340 	 * the different addresses in VM.
341 	 */
342 #define NUM_VMUSA_UNIT_PER_PAGE	32
343 #define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
344 #define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
345 	drm_suballoc_manager_init(&m->vm_update_sa,
346 				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
347 				  NUM_VMUSA_UNIT_PER_PAGE, 0);
348 
349 	m->pt_bo = bo;
350 	return 0;
351 }
352 
353 /*
354  * Including the reserved copy engine is required to avoid deadlocks due to
355  * migrate jobs servicing the faults gets stuck behind the job that faulted.
356  */
357 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
358 {
359 	u32 logical_mask = 0;
360 	struct xe_hw_engine *hwe;
361 	enum xe_hw_engine_id id;
362 
363 	for_each_hw_engine(hwe, gt, id) {
364 		if (hwe->class != XE_ENGINE_CLASS_COPY)
365 			continue;
366 
367 		if (xe_gt_is_usm_hwe(gt, hwe))
368 			logical_mask |= BIT(hwe->logical_instance);
369 	}
370 
371 	return logical_mask;
372 }
373 
374 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
375 {
376 	return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
377 }
378 
379 /**
380  * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
381  * @tile: &xe_tile
382  *
383  * Allocates a &xe_migrate for a given tile.
384  *
385  * Return: &xe_migrate on success, or NULL when out of memory.
386  */
387 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
388 {
389 	struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
390 
391 	if (m)
392 		m->tile = tile;
393 	return m;
394 }
395 
396 /**
397  * xe_migrate_init() - Initialize a migrate context
398  * @m: The migration context
399  *
400  * Return: 0 if successful, negative error code on failure
401  */
402 int xe_migrate_init(struct xe_migrate *m)
403 {
404 	struct xe_tile *tile = m->tile;
405 	struct xe_gt *primary_gt = tile->primary_gt;
406 	struct xe_device *xe = tile_to_xe(tile);
407 	struct xe_vm *vm;
408 	int err;
409 
410 	/* Special layout, prepared below.. */
411 	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
412 			  XE_VM_FLAG_SET_TILE_ID(tile), NULL);
413 	if (IS_ERR(vm))
414 		return PTR_ERR(vm);
415 
416 	xe_vm_lock(vm, false);
417 	err = xe_migrate_prepare_vm(tile, m, vm);
418 	xe_vm_unlock(vm);
419 	if (err)
420 		goto err_out;
421 
422 	if (xe->info.has_usm) {
423 		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
424 							   XE_ENGINE_CLASS_COPY,
425 							   primary_gt->usm.reserved_bcs_instance,
426 							   false);
427 		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
428 
429 		if (!hwe || !logical_mask) {
430 			err = -EINVAL;
431 			goto err_out;
432 		}
433 
434 		/*
435 		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
436 		 * PVC, may want to revisit if performance is needed.
437 		 */
438 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
439 					    EXEC_QUEUE_FLAG_KERNEL |
440 					    EXEC_QUEUE_FLAG_PERMANENT |
441 					    EXEC_QUEUE_FLAG_HIGH_PRIORITY |
442 					    EXEC_QUEUE_FLAG_MIGRATE, 0);
443 	} else {
444 		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
445 						  XE_ENGINE_CLASS_COPY,
446 						  EXEC_QUEUE_FLAG_KERNEL |
447 						  EXEC_QUEUE_FLAG_PERMANENT |
448 						  EXEC_QUEUE_FLAG_MIGRATE, 0);
449 	}
450 	if (IS_ERR(m->q)) {
451 		err = PTR_ERR(m->q);
452 		goto err_out;
453 	}
454 
455 	mutex_init(&m->job_mutex);
456 	fs_reclaim_acquire(GFP_KERNEL);
457 	might_lock(&m->job_mutex);
458 	fs_reclaim_release(GFP_KERNEL);
459 
460 	err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
461 	if (err)
462 		return err;
463 
464 	if (IS_DGFX(xe)) {
465 		if (xe_migrate_needs_ccs_emit(xe))
466 			/* min chunk size corresponds to 4K of CCS Metadata */
467 			m->min_chunk_size = SZ_4K * SZ_64K /
468 				xe_device_ccs_bytes(xe, SZ_64K);
469 		else
470 			/* Somewhat arbitrary to avoid a huge amount of blits */
471 			m->min_chunk_size = SZ_64K;
472 		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
473 		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
474 			(unsigned long long)m->min_chunk_size);
475 	}
476 
477 	return err;
478 
479 err_out:
480 	xe_vm_close_and_put(vm);
481 	return err;
482 
483 }
484 
485 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
486 {
487 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
488 		return MAX_CCS_LIMITED_TRANSFER;
489 
490 	return MAX_PREEMPTDISABLE_TRANSFER;
491 }
492 
493 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
494 {
495 	struct xe_device *xe = tile_to_xe(m->tile);
496 	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
497 
498 	if (mem_type_is_vram(cur->mem_type)) {
499 		/*
500 		 * VRAM we want to blit in chunks with sizes aligned to
501 		 * min_chunk_size in order for the offset to CCS metadata to be
502 		 * page-aligned. If it's the last chunk it may be smaller.
503 		 *
504 		 * Another constraint is that we need to limit the blit to
505 		 * the VRAM block size, unless size is smaller than
506 		 * min_chunk_size.
507 		 */
508 		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
509 
510 		size = min_t(u64, size, chunk);
511 		if (size > m->min_chunk_size)
512 			size = round_down(size, m->min_chunk_size);
513 	}
514 
515 	return size;
516 }
517 
518 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
519 {
520 	/* If the chunk is not fragmented, allow identity map. */
521 	return cur->size >= size;
522 }
523 
524 #define PTE_UPDATE_FLAG_IS_VRAM		BIT(0)
525 #define PTE_UPDATE_FLAG_IS_COMP_PTE	BIT(1)
526 
527 static u32 pte_update_size(struct xe_migrate *m,
528 			   u32 flags,
529 			   struct ttm_resource *res,
530 			   struct xe_res_cursor *cur,
531 			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
532 			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
533 {
534 	u32 cmds = 0;
535 	bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
536 	bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
537 
538 	*L0_pt = pt_ofs;
539 	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
540 		/* Offset into identity map. */
541 		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
542 					      cur->start + vram_region_gpu_offset(res),
543 					      is_comp_pte);
544 		cmds += cmd_size;
545 	} else {
546 		/* Clip L0 to available size */
547 		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
548 		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
549 
550 		*L0 = size;
551 		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
552 
553 		/* MI_STORE_DATA_IMM */
554 		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
555 
556 		/* PDE qwords */
557 		cmds += num_4k_pages * 2;
558 
559 		/* Each chunk has a single blit command */
560 		cmds += cmd_size;
561 	}
562 
563 	return cmds;
564 }
565 
566 static void emit_pte(struct xe_migrate *m,
567 		     struct xe_bb *bb, u32 at_pt,
568 		     bool is_vram, bool is_comp_pte,
569 		     struct xe_res_cursor *cur,
570 		     u32 size, struct ttm_resource *res)
571 {
572 	struct xe_device *xe = tile_to_xe(m->tile);
573 	struct xe_vm *vm = m->q->vm;
574 	u16 pat_index;
575 	u32 ptes;
576 	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
577 	u64 cur_ofs;
578 
579 	/* Indirect access needs compression enabled uncached PAT index */
580 	if (GRAPHICS_VERx100(xe) >= 2000)
581 		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
582 					  xe->pat.idx[XE_CACHE_WB];
583 	else
584 		pat_index = xe->pat.idx[XE_CACHE_WB];
585 
586 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
587 
588 	while (ptes) {
589 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
590 
591 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
592 		bb->cs[bb->len++] = ofs;
593 		bb->cs[bb->len++] = 0;
594 
595 		cur_ofs = ofs;
596 		ofs += chunk * 8;
597 		ptes -= chunk;
598 
599 		while (chunk--) {
600 			u64 addr, flags = 0;
601 			bool devmem = false;
602 
603 			addr = xe_res_dma(cur) & PAGE_MASK;
604 			if (is_vram) {
605 				if (vm->flags & XE_VM_FLAG_64K) {
606 					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
607 
608 					xe_assert(xe, (va & (SZ_64K - 1)) ==
609 						  (addr & (SZ_64K - 1)));
610 
611 					flags |= XE_PTE_PS64;
612 				}
613 
614 				addr += vram_region_gpu_offset(res);
615 				devmem = true;
616 			}
617 
618 			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
619 							   addr, pat_index,
620 							   0, devmem, flags);
621 			bb->cs[bb->len++] = lower_32_bits(addr);
622 			bb->cs[bb->len++] = upper_32_bits(addr);
623 
624 			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
625 			cur_ofs += 8;
626 		}
627 	}
628 }
629 
630 #define EMIT_COPY_CCS_DW 5
631 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
632 			  u64 dst_ofs, bool dst_is_indirect,
633 			  u64 src_ofs, bool src_is_indirect,
634 			  u32 size)
635 {
636 	struct xe_device *xe = gt_to_xe(gt);
637 	u32 *cs = bb->cs + bb->len;
638 	u32 num_ccs_blks;
639 	u32 num_pages;
640 	u32 ccs_copy_size;
641 	u32 mocs;
642 
643 	if (GRAPHICS_VERx100(xe) >= 2000) {
644 		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
645 		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
646 
647 		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
648 		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
649 
650 	} else {
651 		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
652 					    NUM_CCS_BYTES_PER_BLOCK);
653 		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
654 
655 		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
656 		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
657 	}
658 
659 	*cs++ = XY_CTRL_SURF_COPY_BLT |
660 		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
661 		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
662 		ccs_copy_size;
663 	*cs++ = lower_32_bits(src_ofs);
664 	*cs++ = upper_32_bits(src_ofs) | mocs;
665 	*cs++ = lower_32_bits(dst_ofs);
666 	*cs++ = upper_32_bits(dst_ofs) | mocs;
667 
668 	bb->len = cs - bb->cs;
669 }
670 
671 #define EMIT_COPY_DW 10
672 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
673 		      u64 src_ofs, u64 dst_ofs, unsigned int size,
674 		      unsigned int pitch)
675 {
676 	struct xe_device *xe = gt_to_xe(gt);
677 	u32 mocs = 0;
678 	u32 tile_y = 0;
679 
680 	xe_gt_assert(gt, !(pitch & 3));
681 	xe_gt_assert(gt, size / pitch <= S16_MAX);
682 	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
683 	xe_gt_assert(gt, pitch <= U16_MAX);
684 
685 	if (GRAPHICS_VER(xe) >= 20)
686 		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
687 
688 	if (GRAPHICS_VERx100(xe) >= 1250)
689 		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
690 
691 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
692 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
693 	bb->cs[bb->len++] = 0;
694 	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
695 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
696 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
697 	bb->cs[bb->len++] = 0;
698 	bb->cs[bb->len++] = pitch | mocs;
699 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
700 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
701 }
702 
703 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
704 {
705 	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
706 }
707 
708 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
709 			       struct xe_bb *bb,
710 			       u64 src_ofs, bool src_is_indirect,
711 			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
712 			       u64 ccs_ofs, bool copy_ccs)
713 {
714 	struct xe_gt *gt = m->tile->primary_gt;
715 	u32 flush_flags = 0;
716 
717 	if (!copy_ccs && dst_is_indirect) {
718 		/*
719 		 * If the src is already in vram, then it should already
720 		 * have been cleared by us, or has been populated by the
721 		 * user. Make sure we copy the CCS aux state as-is.
722 		 *
723 		 * Otherwise if the bo doesn't have any CCS metadata attached,
724 		 * we still need to clear it for security reasons.
725 		 */
726 		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
727 
728 		emit_copy_ccs(gt, bb,
729 			      dst_ofs, true,
730 			      ccs_src_ofs, src_is_indirect, dst_size);
731 
732 		flush_flags = MI_FLUSH_DW_CCS;
733 	} else if (copy_ccs) {
734 		if (!src_is_indirect)
735 			src_ofs = ccs_ofs;
736 		else if (!dst_is_indirect)
737 			dst_ofs = ccs_ofs;
738 
739 		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
740 
741 		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
742 			      src_is_indirect, dst_size);
743 		if (dst_is_indirect)
744 			flush_flags = MI_FLUSH_DW_CCS;
745 	}
746 
747 	return flush_flags;
748 }
749 
750 /**
751  * xe_migrate_copy() - Copy content of TTM resources.
752  * @m: The migration context.
753  * @src_bo: The buffer object @src is currently bound to.
754  * @dst_bo: If copying between resources created for the same bo, set this to
755  * the same value as @src_bo. If copying between buffer objects, set it to
756  * the buffer object @dst is currently bound to.
757  * @src: The source TTM resource.
758  * @dst: The dst TTM resource.
759  * @copy_only_ccs: If true copy only CCS metadata
760  *
761  * Copies the contents of @src to @dst: On flat CCS devices,
762  * the CCS metadata is copied as well if needed, or if not present,
763  * the CCS metadata of @dst is cleared for security reasons.
764  *
765  * Return: Pointer to a dma_fence representing the last copy batch, or
766  * an error pointer on failure. If there is a failure, any copy operation
767  * started by the function call has been synced.
768  */
769 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
770 				  struct xe_bo *src_bo,
771 				  struct xe_bo *dst_bo,
772 				  struct ttm_resource *src,
773 				  struct ttm_resource *dst,
774 				  bool copy_only_ccs)
775 {
776 	struct xe_gt *gt = m->tile->primary_gt;
777 	struct xe_device *xe = gt_to_xe(gt);
778 	struct dma_fence *fence = NULL;
779 	u64 size = xe_bo_size(src_bo);
780 	struct xe_res_cursor src_it, dst_it, ccs_it;
781 	u64 src_L0_ofs, dst_L0_ofs;
782 	u32 src_L0_pt, dst_L0_pt;
783 	u64 src_L0, dst_L0;
784 	int pass = 0;
785 	int err;
786 	bool src_is_pltt = src->mem_type == XE_PL_TT;
787 	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
788 	bool src_is_vram = mem_type_is_vram(src->mem_type);
789 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
790 	bool type_device = src_bo->ttm.type == ttm_bo_type_device;
791 	bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
792 	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
793 		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
794 	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
795 	bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
796 		GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
797 
798 	/* Copying CCS between two different BOs is not supported yet. */
799 	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
800 		return ERR_PTR(-EINVAL);
801 
802 	if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
803 		return ERR_PTR(-EINVAL);
804 
805 	if (!src_is_vram)
806 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
807 	else
808 		xe_res_first(src, 0, size, &src_it);
809 	if (!dst_is_vram)
810 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
811 	else
812 		xe_res_first(dst, 0, size, &dst_it);
813 
814 	if (copy_system_ccs)
815 		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
816 				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
817 				&ccs_it);
818 
819 	while (size) {
820 		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
821 		struct xe_sched_job *job;
822 		struct xe_bb *bb;
823 		u32 flush_flags = 0;
824 		u32 update_idx;
825 		u64 ccs_ofs, ccs_size;
826 		u32 ccs_pt;
827 		u32 pte_flags;
828 
829 		bool usm = xe->info.has_usm;
830 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
831 
832 		src_L0 = xe_migrate_res_sizes(m, &src_it);
833 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
834 
835 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
836 			pass++, src_L0, dst_L0);
837 
838 		src_L0 = min(src_L0, dst_L0);
839 
840 		pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
841 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
842 		batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
843 					      &src_L0_ofs, &src_L0_pt, 0, 0,
844 					      avail_pts);
845 
846 		pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
847 		batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
848 					      &dst_L0_ofs, &dst_L0_pt, 0,
849 					      avail_pts, avail_pts);
850 
851 		if (copy_system_ccs) {
852 			xe_assert(xe, type_device);
853 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
854 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
855 						      &ccs_ofs, &ccs_pt, 0,
856 						      2 * avail_pts,
857 						      avail_pts);
858 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
859 		}
860 
861 		/* Add copy commands size here */
862 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
863 			((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
864 
865 		bb = xe_bb_new(gt, batch_size, usm);
866 		if (IS_ERR(bb)) {
867 			err = PTR_ERR(bb);
868 			goto err_sync;
869 		}
870 
871 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
872 			xe_res_next(&src_it, src_L0);
873 		else
874 			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
875 				 &src_it, src_L0, src);
876 
877 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
878 			xe_res_next(&dst_it, src_L0);
879 		else
880 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
881 				 &dst_it, src_L0, dst);
882 
883 		if (copy_system_ccs)
884 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
885 
886 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
887 		update_idx = bb->len;
888 
889 		if (!copy_only_ccs)
890 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
891 
892 		if (needs_ccs_emit)
893 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
894 							  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
895 							  dst_L0_ofs,
896 							  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
897 							  src_L0, ccs_ofs, copy_ccs);
898 
899 		job = xe_bb_create_migration_job(m->q, bb,
900 						 xe_migrate_batch_base(m, usm),
901 						 update_idx);
902 		if (IS_ERR(job)) {
903 			err = PTR_ERR(job);
904 			goto err;
905 		}
906 
907 		xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
908 		if (!fence) {
909 			err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
910 						    DMA_RESV_USAGE_BOOKKEEP);
911 			if (!err && src_bo != dst_bo)
912 				err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
913 							    DMA_RESV_USAGE_BOOKKEEP);
914 			if (err)
915 				goto err_job;
916 		}
917 
918 		mutex_lock(&m->job_mutex);
919 		xe_sched_job_arm(job);
920 		dma_fence_put(fence);
921 		fence = dma_fence_get(&job->drm.s_fence->finished);
922 		xe_sched_job_push(job);
923 
924 		dma_fence_put(m->fence);
925 		m->fence = dma_fence_get(fence);
926 
927 		mutex_unlock(&m->job_mutex);
928 
929 		xe_bb_free(bb, fence);
930 		size -= src_L0;
931 		continue;
932 
933 err_job:
934 		xe_sched_job_put(job);
935 err:
936 		xe_bb_free(bb, NULL);
937 
938 err_sync:
939 		/* Sync partial copy if any. FIXME: under job_mutex? */
940 		if (fence) {
941 			dma_fence_wait(fence, false);
942 			dma_fence_put(fence);
943 		}
944 
945 		return ERR_PTR(err);
946 	}
947 
948 	return fence;
949 }
950 
951 /**
952  * xe_migrate_lrc() - Get the LRC from migrate context.
953  * @migrate: Migrate context.
954  *
955  * Return: Pointer to LRC on success, error on failure
956  */
957 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
958 {
959 	return migrate->q->lrc[0];
960 }
961 
962 static int emit_flush_invalidate(struct xe_exec_queue *q, u32 *dw, int i,
963 				 u32 flags)
964 {
965 	struct xe_lrc *lrc = xe_exec_queue_lrc(q);
966 	dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
967 		  MI_FLUSH_IMM_DW | flags;
968 	dw[i++] = lower_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc)) |
969 		  MI_FLUSH_DW_USE_GTT;
970 	dw[i++] = upper_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc));
971 	dw[i++] = MI_NOOP;
972 	dw[i++] = MI_NOOP;
973 
974 	return i;
975 }
976 
977 /**
978  * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
979  * @tile: Tile whose migration context to be used.
980  * @q : Execution to be used along with migration context.
981  * @src_bo: The buffer object @src is currently bound to.
982  * @read_write : Creates BB commands for CCS read/write.
983  *
984  * Creates batch buffer instructions to copy CCS metadata from CCS pool to
985  * memory and vice versa.
986  *
987  * This function should only be called for IGPU.
988  *
989  * Return: 0 if successful, negative error code on failure.
990  */
991 int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
992 			   struct xe_bo *src_bo,
993 			   enum xe_sriov_vf_ccs_rw_ctxs read_write)
994 
995 {
996 	bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
997 	bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
998 	struct ttm_resource *src = src_bo->ttm.resource;
999 	struct xe_migrate *m = tile->migrate;
1000 	struct xe_gt *gt = tile->primary_gt;
1001 	u32 batch_size, batch_size_allocated;
1002 	struct xe_device *xe = gt_to_xe(gt);
1003 	struct xe_res_cursor src_it, ccs_it;
1004 	u64 size = xe_bo_size(src_bo);
1005 	struct xe_bb *bb = NULL;
1006 	u64 src_L0, src_L0_ofs;
1007 	u32 src_L0_pt;
1008 	int err;
1009 
1010 	xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
1011 
1012 	xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
1013 			PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
1014 			&ccs_it);
1015 
1016 	/* Calculate Batch buffer size */
1017 	batch_size = 0;
1018 	while (size) {
1019 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1020 		u64 ccs_ofs, ccs_size;
1021 		u32 ccs_pt;
1022 
1023 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1024 
1025 		src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
1026 
1027 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1028 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1029 					      avail_pts);
1030 
1031 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1032 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1033 					      &ccs_pt, 0, avail_pts, avail_pts);
1034 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1035 
1036 		/* Add copy commands size here */
1037 		batch_size += EMIT_COPY_CCS_DW;
1038 
1039 		size -= src_L0;
1040 	}
1041 
1042 	bb = xe_bb_ccs_new(gt, batch_size, read_write);
1043 	if (IS_ERR(bb)) {
1044 		drm_err(&xe->drm, "BB allocation failed.\n");
1045 		err = PTR_ERR(bb);
1046 		goto err_ret;
1047 	}
1048 
1049 	batch_size_allocated = batch_size;
1050 	size = xe_bo_size(src_bo);
1051 	batch_size = 0;
1052 
1053 	/*
1054 	 * Emit PTE and copy commands here.
1055 	 * The CCS copy command can only support limited size. If the size to be
1056 	 * copied is more than the limit, divide copy into chunks. So, calculate
1057 	 * sizes here again before copy command is emitted.
1058 	 */
1059 	while (size) {
1060 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1061 		u32 flush_flags = 0;
1062 		u64 ccs_ofs, ccs_size;
1063 		u32 ccs_pt;
1064 
1065 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1066 
1067 		src_L0 = xe_migrate_res_sizes(m, &src_it);
1068 
1069 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1070 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1071 					      avail_pts);
1072 
1073 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1074 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1075 					      &ccs_pt, 0, avail_pts, avail_pts);
1076 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1077 		batch_size += EMIT_COPY_CCS_DW;
1078 
1079 		emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
1080 
1081 		emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
1082 
1083 		bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
1084 		flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
1085 						  src_L0_ofs, dst_is_pltt,
1086 						  src_L0, ccs_ofs, true);
1087 		bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
1088 
1089 		size -= src_L0;
1090 	}
1091 
1092 	xe_assert(xe, (batch_size_allocated == bb->len));
1093 	src_bo->bb_ccs[read_write] = bb;
1094 
1095 	return 0;
1096 
1097 err_ret:
1098 	return err;
1099 }
1100 
1101 /**
1102  * xe_get_migrate_exec_queue() - Get the execution queue from migrate context.
1103  * @migrate: Migrate context.
1104  *
1105  * Return: Pointer to execution queue on success, error on failure
1106  */
1107 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
1108 {
1109 	return migrate->q;
1110 }
1111 
1112 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1113 				 u32 size, u32 pitch)
1114 {
1115 	struct xe_device *xe = gt_to_xe(gt);
1116 	u32 *cs = bb->cs + bb->len;
1117 	u32 len = PVC_MEM_SET_CMD_LEN_DW;
1118 
1119 	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
1120 	*cs++ = pitch - 1;
1121 	*cs++ = (size / pitch) - 1;
1122 	*cs++ = pitch - 1;
1123 	*cs++ = lower_32_bits(src_ofs);
1124 	*cs++ = upper_32_bits(src_ofs);
1125 	if (GRAPHICS_VERx100(xe) >= 2000)
1126 		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1127 	else
1128 		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1129 
1130 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1131 
1132 	bb->len += len;
1133 }
1134 
1135 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
1136 				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
1137 {
1138 	struct xe_device *xe = gt_to_xe(gt);
1139 	u32 *cs = bb->cs + bb->len;
1140 	u32 len = XY_FAST_COLOR_BLT_DW;
1141 
1142 	if (GRAPHICS_VERx100(xe) < 1250)
1143 		len = 11;
1144 
1145 	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
1146 		(len - 2);
1147 	if (GRAPHICS_VERx100(xe) >= 2000)
1148 		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
1149 			(pitch - 1);
1150 	else
1151 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
1152 			(pitch - 1);
1153 	*cs++ = 0;
1154 	*cs++ = (size / pitch) << 16 | pitch / 4;
1155 	*cs++ = lower_32_bits(src_ofs);
1156 	*cs++ = upper_32_bits(src_ofs);
1157 	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
1158 	*cs++ = 0;
1159 	*cs++ = 0;
1160 	*cs++ = 0;
1161 	*cs++ = 0;
1162 
1163 	if (len > 11) {
1164 		*cs++ = 0;
1165 		*cs++ = 0;
1166 		*cs++ = 0;
1167 		*cs++ = 0;
1168 		*cs++ = 0;
1169 	}
1170 
1171 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1172 
1173 	bb->len += len;
1174 }
1175 
1176 static bool has_service_copy_support(struct xe_gt *gt)
1177 {
1178 	/*
1179 	 * What we care about is whether the architecture was designed with
1180 	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
1181 	 * instructions) so check the architectural engine list rather than the
1182 	 * actual list since these instructions are usable on BCS0 even if
1183 	 * all of the actual service copy engines (BCS1-BCS8) have been fused
1184 	 * off.
1185 	 */
1186 	return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
1187 					      XE_HW_ENGINE_BCS1);
1188 }
1189 
1190 static u32 emit_clear_cmd_len(struct xe_gt *gt)
1191 {
1192 	if (has_service_copy_support(gt))
1193 		return PVC_MEM_SET_CMD_LEN_DW;
1194 	else
1195 		return XY_FAST_COLOR_BLT_DW;
1196 }
1197 
1198 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1199 		       u32 size, u32 pitch, bool is_vram)
1200 {
1201 	if (has_service_copy_support(gt))
1202 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1203 	else
1204 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1205 				     is_vram);
1206 }
1207 
1208 /**
1209  * xe_migrate_clear() - Copy content of TTM resources.
1210  * @m: The migration context.
1211  * @bo: The buffer object @dst is currently bound to.
1212  * @dst: The dst TTM resource to be cleared.
1213  * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1214  *
1215  * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1216  * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1217  * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1218  * TODO: Eliminate the @bo argument.
1219  *
1220  * Return: Pointer to a dma_fence representing the last clear batch, or
1221  * an error pointer on failure. If there is a failure, any clear operation
1222  * started by the function call has been synced.
1223  */
1224 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1225 				   struct xe_bo *bo,
1226 				   struct ttm_resource *dst,
1227 				   u32 clear_flags)
1228 {
1229 	bool clear_vram = mem_type_is_vram(dst->mem_type);
1230 	bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1231 	bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1232 	struct xe_gt *gt = m->tile->primary_gt;
1233 	struct xe_device *xe = gt_to_xe(gt);
1234 	bool clear_only_system_ccs = false;
1235 	struct dma_fence *fence = NULL;
1236 	u64 size = xe_bo_size(bo);
1237 	struct xe_res_cursor src_it;
1238 	struct ttm_resource *src = dst;
1239 	int err;
1240 
1241 	if (WARN_ON(!clear_bo_data && !clear_ccs))
1242 		return NULL;
1243 
1244 	if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1245 		clear_only_system_ccs = true;
1246 
1247 	if (!clear_vram)
1248 		xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
1249 	else
1250 		xe_res_first(src, 0, xe_bo_size(bo), &src_it);
1251 
1252 	while (size) {
1253 		u64 clear_L0_ofs;
1254 		u32 clear_L0_pt;
1255 		u32 flush_flags = 0;
1256 		u64 clear_L0;
1257 		struct xe_sched_job *job;
1258 		struct xe_bb *bb;
1259 		u32 batch_size, update_idx;
1260 		u32 pte_flags;
1261 
1262 		bool usm = xe->info.has_usm;
1263 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1264 
1265 		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1266 
1267 		/* Calculate final sizes and batch size.. */
1268 		pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1269 		batch_size = 2 +
1270 			pte_update_size(m, pte_flags, src, &src_it,
1271 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1272 					clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1273 					avail_pts);
1274 
1275 		if (xe_migrate_needs_ccs_emit(xe))
1276 			batch_size += EMIT_COPY_CCS_DW;
1277 
1278 		/* Clear commands */
1279 
1280 		if (WARN_ON_ONCE(!clear_L0))
1281 			break;
1282 
1283 		bb = xe_bb_new(gt, batch_size, usm);
1284 		if (IS_ERR(bb)) {
1285 			err = PTR_ERR(bb);
1286 			goto err_sync;
1287 		}
1288 
1289 		size -= clear_L0;
1290 		/* Preemption is enabled again by the ring ops. */
1291 		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
1292 			xe_res_next(&src_it, clear_L0);
1293 		} else {
1294 			emit_pte(m, bb, clear_L0_pt, clear_vram,
1295 				 clear_only_system_ccs, &src_it, clear_L0, dst);
1296 			flush_flags |= MI_INVALIDATE_TLB;
1297 		}
1298 
1299 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1300 		update_idx = bb->len;
1301 
1302 		if (clear_bo_data)
1303 			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1304 
1305 		if (xe_migrate_needs_ccs_emit(xe)) {
1306 			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1307 				      m->cleared_mem_ofs, false, clear_L0);
1308 			flush_flags |= MI_FLUSH_DW_CCS;
1309 		}
1310 
1311 		job = xe_bb_create_migration_job(m->q, bb,
1312 						 xe_migrate_batch_base(m, usm),
1313 						 update_idx);
1314 		if (IS_ERR(job)) {
1315 			err = PTR_ERR(job);
1316 			goto err;
1317 		}
1318 
1319 		xe_sched_job_add_migrate_flush(job, flush_flags);
1320 		if (!fence) {
1321 			/*
1322 			 * There can't be anything userspace related at this
1323 			 * point, so we just need to respect any potential move
1324 			 * fences, which are always tracked as
1325 			 * DMA_RESV_USAGE_KERNEL.
1326 			 */
1327 			err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1328 						    DMA_RESV_USAGE_KERNEL);
1329 			if (err)
1330 				goto err_job;
1331 		}
1332 
1333 		mutex_lock(&m->job_mutex);
1334 		xe_sched_job_arm(job);
1335 		dma_fence_put(fence);
1336 		fence = dma_fence_get(&job->drm.s_fence->finished);
1337 		xe_sched_job_push(job);
1338 
1339 		dma_fence_put(m->fence);
1340 		m->fence = dma_fence_get(fence);
1341 
1342 		mutex_unlock(&m->job_mutex);
1343 
1344 		xe_bb_free(bb, fence);
1345 		continue;
1346 
1347 err_job:
1348 		xe_sched_job_put(job);
1349 err:
1350 		xe_bb_free(bb, NULL);
1351 err_sync:
1352 		/* Sync partial copies if any. FIXME: job_mutex? */
1353 		if (fence) {
1354 			dma_fence_wait(fence, false);
1355 			dma_fence_put(fence);
1356 		}
1357 
1358 		return ERR_PTR(err);
1359 	}
1360 
1361 	if (clear_ccs)
1362 		bo->ccs_cleared = true;
1363 
1364 	return fence;
1365 }
1366 
1367 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1368 			  const struct xe_vm_pgtable_update_op *pt_op,
1369 			  const struct xe_vm_pgtable_update *update,
1370 			  struct xe_migrate_pt_update *pt_update)
1371 {
1372 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1373 	u32 chunk;
1374 	u32 ofs = update->ofs, size = update->qwords;
1375 
1376 	/*
1377 	 * If we have 512 entries (max), we would populate it ourselves,
1378 	 * and update the PDE above it to the new pointer.
1379 	 * The only time this can only happen if we have to update the top
1380 	 * PDE. This requires a BO that is almost vm->size big.
1381 	 *
1382 	 * This shouldn't be possible in practice.. might change when 16K
1383 	 * pages are used. Hence the assert.
1384 	 */
1385 	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1386 	if (!ppgtt_ofs)
1387 		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1388 						xe_bo_addr(update->pt_bo, 0,
1389 							   XE_PAGE_SIZE), false);
1390 
1391 	do {
1392 		u64 addr = ppgtt_ofs + ofs * 8;
1393 
1394 		chunk = min(size, MAX_PTE_PER_SDI);
1395 
1396 		/* Ensure populatefn can do memset64 by aligning bb->cs */
1397 		if (!(bb->len & 1))
1398 			bb->cs[bb->len++] = MI_NOOP;
1399 
1400 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1401 		bb->cs[bb->len++] = lower_32_bits(addr);
1402 		bb->cs[bb->len++] = upper_32_bits(addr);
1403 		if (pt_op->bind)
1404 			ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1405 				      ofs, chunk, update);
1406 		else
1407 			ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1408 				   ofs, chunk, update);
1409 
1410 		bb->len += chunk * 2;
1411 		ofs += chunk;
1412 		size -= chunk;
1413 	} while (size);
1414 }
1415 
1416 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1417 {
1418 	return xe_vm_get(m->q->vm);
1419 }
1420 
1421 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1422 struct migrate_test_params {
1423 	struct xe_test_priv base;
1424 	bool force_gpu;
1425 };
1426 
1427 #define to_migrate_test_params(_priv) \
1428 	container_of(_priv, struct migrate_test_params, base)
1429 #endif
1430 
1431 static struct dma_fence *
1432 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1433 			       struct xe_migrate_pt_update *pt_update)
1434 {
1435 	XE_TEST_DECLARE(struct migrate_test_params *test =
1436 			to_migrate_test_params
1437 			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1438 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1439 	struct xe_vm *vm = pt_update->vops->vm;
1440 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1441 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1442 	int err;
1443 	u32 i, j;
1444 
1445 	if (XE_TEST_ONLY(test && test->force_gpu))
1446 		return ERR_PTR(-ETIME);
1447 
1448 	if (ops->pre_commit) {
1449 		pt_update->job = NULL;
1450 		err = ops->pre_commit(pt_update);
1451 		if (err)
1452 			return ERR_PTR(err);
1453 	}
1454 
1455 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1456 		const struct xe_vm_pgtable_update_op *pt_op =
1457 			&pt_update_ops->ops[i];
1458 
1459 		for (j = 0; j < pt_op->num_entries; j++) {
1460 			const struct xe_vm_pgtable_update *update =
1461 				&pt_op->entries[j];
1462 
1463 			if (pt_op->bind)
1464 				ops->populate(pt_update, m->tile,
1465 					      &update->pt_bo->vmap, NULL,
1466 					      update->ofs, update->qwords,
1467 					      update);
1468 			else
1469 				ops->clear(pt_update, m->tile,
1470 					   &update->pt_bo->vmap, NULL,
1471 					   update->ofs, update->qwords, update);
1472 		}
1473 	}
1474 
1475 	trace_xe_vm_cpu_bind(vm);
1476 	xe_device_wmb(vm->xe);
1477 
1478 	return dma_fence_get_stub();
1479 }
1480 
1481 static struct dma_fence *
1482 __xe_migrate_update_pgtables(struct xe_migrate *m,
1483 			     struct xe_migrate_pt_update *pt_update,
1484 			     struct xe_vm_pgtable_update_ops *pt_update_ops)
1485 {
1486 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1487 	struct xe_tile *tile = m->tile;
1488 	struct xe_gt *gt = tile->primary_gt;
1489 	struct xe_device *xe = tile_to_xe(tile);
1490 	struct xe_sched_job *job;
1491 	struct dma_fence *fence;
1492 	struct drm_suballoc *sa_bo = NULL;
1493 	struct xe_bb *bb;
1494 	u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1495 	u32 num_updates = 0, current_update = 0;
1496 	u64 addr;
1497 	int err = 0;
1498 	bool is_migrate = pt_update_ops->q == m->q;
1499 	bool usm = is_migrate && xe->info.has_usm;
1500 
1501 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1502 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1503 		struct xe_vm_pgtable_update *updates = pt_op->entries;
1504 
1505 		num_updates += pt_op->num_entries;
1506 		for (j = 0; j < pt_op->num_entries; ++j) {
1507 			u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1508 						    MAX_PTE_PER_SDI);
1509 
1510 			/* align noop + MI_STORE_DATA_IMM cmd prefix */
1511 			batch_size += 4 * num_cmds + updates[j].qwords * 2;
1512 		}
1513 	}
1514 
1515 	/* fixed + PTE entries */
1516 	if (IS_DGFX(xe))
1517 		batch_size += 2;
1518 	else
1519 		batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1520 			num_updates * 2;
1521 
1522 	bb = xe_bb_new(gt, batch_size, usm);
1523 	if (IS_ERR(bb))
1524 		return ERR_CAST(bb);
1525 
1526 	/* For sysmem PTE's, need to map them in our hole.. */
1527 	if (!IS_DGFX(xe)) {
1528 		u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1529 		u32 ptes, ofs;
1530 
1531 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1532 		if (!is_migrate) {
1533 			u32 num_units = DIV_ROUND_UP(num_updates,
1534 						     NUM_VMUSA_WRITES_PER_UNIT);
1535 
1536 			if (num_units > m->vm_update_sa.size) {
1537 				err = -ENOBUFS;
1538 				goto err_bb;
1539 			}
1540 			sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1541 						 GFP_KERNEL, true, 0);
1542 			if (IS_ERR(sa_bo)) {
1543 				err = PTR_ERR(sa_bo);
1544 				goto err_bb;
1545 			}
1546 
1547 			ppgtt_ofs = NUM_KERNEL_PDE +
1548 				(drm_suballoc_soffset(sa_bo) /
1549 				 NUM_VMUSA_UNIT_PER_PAGE);
1550 			page_ofs = (drm_suballoc_soffset(sa_bo) %
1551 				    NUM_VMUSA_UNIT_PER_PAGE) *
1552 				VM_SA_UPDATE_UNIT_SIZE;
1553 		}
1554 
1555 		/* Map our PT's to gtt */
1556 		i = 0;
1557 		j = 0;
1558 		ptes = num_updates;
1559 		ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1560 		while (ptes) {
1561 			u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1562 			u32 idx = 0;
1563 
1564 			bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1565 				MI_SDI_NUM_QW(chunk);
1566 			bb->cs[bb->len++] = ofs;
1567 			bb->cs[bb->len++] = 0; /* upper_32_bits */
1568 
1569 			for (; i < pt_update_ops->num_ops; ++i) {
1570 				struct xe_vm_pgtable_update_op *pt_op =
1571 					&pt_update_ops->ops[i];
1572 				struct xe_vm_pgtable_update *updates = pt_op->entries;
1573 
1574 				for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1575 					struct xe_vm *vm = pt_update->vops->vm;
1576 					struct xe_bo *pt_bo = updates[j].pt_bo;
1577 
1578 					if (idx == chunk)
1579 						goto next_cmd;
1580 
1581 					xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
1582 
1583 					/* Map a PT at most once */
1584 					if (pt_bo->update_index < 0)
1585 						pt_bo->update_index = current_update;
1586 
1587 					addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1588 									 pat_index, 0);
1589 					bb->cs[bb->len++] = lower_32_bits(addr);
1590 					bb->cs[bb->len++] = upper_32_bits(addr);
1591 				}
1592 
1593 				j = 0;
1594 			}
1595 
1596 next_cmd:
1597 			ptes -= chunk;
1598 			ofs += chunk * sizeof(u64);
1599 		}
1600 
1601 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1602 		update_idx = bb->len;
1603 
1604 		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1605 			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1606 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1607 			struct xe_vm_pgtable_update_op *pt_op =
1608 				&pt_update_ops->ops[i];
1609 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1610 
1611 			for (j = 0; j < pt_op->num_entries; ++j) {
1612 				struct xe_bo *pt_bo = updates[j].pt_bo;
1613 
1614 				write_pgtable(tile, bb, addr +
1615 					      pt_bo->update_index * XE_PAGE_SIZE,
1616 					      pt_op, &updates[j], pt_update);
1617 			}
1618 		}
1619 	} else {
1620 		/* phys pages, no preamble required */
1621 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1622 		update_idx = bb->len;
1623 
1624 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1625 			struct xe_vm_pgtable_update_op *pt_op =
1626 				&pt_update_ops->ops[i];
1627 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1628 
1629 			for (j = 0; j < pt_op->num_entries; ++j)
1630 				write_pgtable(tile, bb, 0, pt_op, &updates[j],
1631 					      pt_update);
1632 		}
1633 	}
1634 
1635 	job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1636 					 xe_migrate_batch_base(m, usm),
1637 					 update_idx);
1638 	if (IS_ERR(job)) {
1639 		err = PTR_ERR(job);
1640 		goto err_sa;
1641 	}
1642 
1643 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1644 
1645 	if (ops->pre_commit) {
1646 		pt_update->job = job;
1647 		err = ops->pre_commit(pt_update);
1648 		if (err)
1649 			goto err_job;
1650 	}
1651 	if (is_migrate)
1652 		mutex_lock(&m->job_mutex);
1653 
1654 	xe_sched_job_arm(job);
1655 	fence = dma_fence_get(&job->drm.s_fence->finished);
1656 	xe_sched_job_push(job);
1657 
1658 	if (is_migrate)
1659 		mutex_unlock(&m->job_mutex);
1660 
1661 	xe_bb_free(bb, fence);
1662 	drm_suballoc_free(sa_bo, fence);
1663 
1664 	return fence;
1665 
1666 err_job:
1667 	xe_sched_job_put(job);
1668 err_sa:
1669 	drm_suballoc_free(sa_bo, NULL);
1670 err_bb:
1671 	xe_bb_free(bb, NULL);
1672 	return ERR_PTR(err);
1673 }
1674 
1675 /**
1676  * xe_migrate_update_pgtables() - Pipelined page-table update
1677  * @m: The migrate context.
1678  * @pt_update: PT update arguments
1679  *
1680  * Perform a pipelined page-table update. The update descriptors are typically
1681  * built under the same lock critical section as a call to this function. If
1682  * using the default engine for the updates, they will be performed in the
1683  * order they grab the job_mutex. If different engines are used, external
1684  * synchronization is needed for overlapping updates to maintain page-table
1685  * consistency. Note that the meaning of "overlapping" is that the updates
1686  * touch the same page-table, which might be a higher-level page-directory.
1687  * If no pipelining is needed, then updates may be performed by the cpu.
1688  *
1689  * Return: A dma_fence that, when signaled, indicates the update completion.
1690  */
1691 struct dma_fence *
1692 xe_migrate_update_pgtables(struct xe_migrate *m,
1693 			   struct xe_migrate_pt_update *pt_update)
1694 
1695 {
1696 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1697 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1698 	struct dma_fence *fence;
1699 
1700 	fence =  xe_migrate_update_pgtables_cpu(m, pt_update);
1701 
1702 	/* -ETIME indicates a job is needed, anything else is legit error */
1703 	if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
1704 		return fence;
1705 
1706 	return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
1707 }
1708 
1709 /**
1710  * xe_migrate_wait() - Complete all operations using the xe_migrate context
1711  * @m: Migrate context to wait for.
1712  *
1713  * Waits until the GPU no longer uses the migrate context's default engine
1714  * or its page-table objects. FIXME: What about separate page-table update
1715  * engines?
1716  */
1717 void xe_migrate_wait(struct xe_migrate *m)
1718 {
1719 	if (m->fence)
1720 		dma_fence_wait(m->fence, false);
1721 }
1722 
1723 static u32 pte_update_cmd_size(u64 size)
1724 {
1725 	u32 num_dword;
1726 	u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
1727 
1728 	XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
1729 
1730 	/*
1731 	 * MI_STORE_DATA_IMM command is used to update page table. Each
1732 	 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To
1733 	 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need:
1734 	 *
1735 	 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
1736 	 * - 2 dword for the page table's physical location
1737 	 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords)
1738 	 */
1739 	num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI);
1740 	num_dword += entries * 2;
1741 
1742 	return num_dword;
1743 }
1744 
1745 static void build_pt_update_batch_sram(struct xe_migrate *m,
1746 				       struct xe_bb *bb, u32 pt_offset,
1747 				       struct drm_pagemap_addr *sram_addr,
1748 				       u32 size)
1749 {
1750 	u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
1751 	u32 ptes;
1752 	int i = 0;
1753 
1754 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
1755 	while (ptes) {
1756 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1757 
1758 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1759 		bb->cs[bb->len++] = pt_offset;
1760 		bb->cs[bb->len++] = 0;
1761 
1762 		pt_offset += chunk * 8;
1763 		ptes -= chunk;
1764 
1765 		while (chunk--) {
1766 			u64 addr = sram_addr[i].addr & PAGE_MASK;
1767 
1768 			xe_tile_assert(m->tile, sram_addr[i].proto ==
1769 				       DRM_INTERCONNECT_SYSTEM);
1770 			xe_tile_assert(m->tile, addr);
1771 			addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
1772 								 addr, pat_index,
1773 								 0, false, 0);
1774 			bb->cs[bb->len++] = lower_32_bits(addr);
1775 			bb->cs[bb->len++] = upper_32_bits(addr);
1776 
1777 			i++;
1778 		}
1779 	}
1780 }
1781 
1782 enum xe_migrate_copy_dir {
1783 	XE_MIGRATE_COPY_TO_VRAM,
1784 	XE_MIGRATE_COPY_TO_SRAM,
1785 };
1786 
1787 #define XE_CACHELINE_BYTES	64ull
1788 #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
1789 
1790 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
1791 					 unsigned long len,
1792 					 unsigned long sram_offset,
1793 					 struct drm_pagemap_addr *sram_addr,
1794 					 u64 vram_addr,
1795 					 const enum xe_migrate_copy_dir dir)
1796 {
1797 	struct xe_gt *gt = m->tile->primary_gt;
1798 	struct xe_device *xe = gt_to_xe(gt);
1799 	bool use_usm_batch = xe->info.has_usm;
1800 	struct dma_fence *fence = NULL;
1801 	u32 batch_size = 2;
1802 	u64 src_L0_ofs, dst_L0_ofs;
1803 	struct xe_sched_job *job;
1804 	struct xe_bb *bb;
1805 	u32 update_idx, pt_slot = 0;
1806 	unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
1807 	unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
1808 		PAGE_SIZE : 4;
1809 	int err;
1810 	unsigned long i, j;
1811 
1812 	if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
1813 			(sram_offset | vram_addr) & XE_CACHELINE_MASK))
1814 		return ERR_PTR(-EOPNOTSUPP);
1815 
1816 	xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
1817 
1818 	batch_size += pte_update_cmd_size(len);
1819 	batch_size += EMIT_COPY_DW;
1820 
1821 	bb = xe_bb_new(gt, batch_size, use_usm_batch);
1822 	if (IS_ERR(bb)) {
1823 		err = PTR_ERR(bb);
1824 		return ERR_PTR(err);
1825 	}
1826 
1827 	/*
1828 	 * If the order of a struct drm_pagemap_addr entry is greater than 0,
1829 	 * the entry is populated by GPU pagemap but subsequent entries within
1830 	 * the range of that order are not populated.
1831 	 * build_pt_update_batch_sram() expects a fully populated array of
1832 	 * struct drm_pagemap_addr. Ensure this is the case even with higher
1833 	 * orders.
1834 	 */
1835 	for (i = 0; i < npages;) {
1836 		unsigned int order = sram_addr[i].order;
1837 
1838 		for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
1839 			if (!sram_addr[i + j].addr)
1840 				sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
1841 
1842 		i += NR_PAGES(order);
1843 	}
1844 
1845 	build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
1846 				   sram_addr, len + sram_offset);
1847 
1848 	if (dir == XE_MIGRATE_COPY_TO_VRAM) {
1849 		src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
1850 		dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
1851 
1852 	} else {
1853 		src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
1854 		dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
1855 	}
1856 
1857 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1858 	update_idx = bb->len;
1859 
1860 	emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
1861 
1862 	job = xe_bb_create_migration_job(m->q, bb,
1863 					 xe_migrate_batch_base(m, use_usm_batch),
1864 					 update_idx);
1865 	if (IS_ERR(job)) {
1866 		err = PTR_ERR(job);
1867 		goto err;
1868 	}
1869 
1870 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1871 
1872 	mutex_lock(&m->job_mutex);
1873 	xe_sched_job_arm(job);
1874 	fence = dma_fence_get(&job->drm.s_fence->finished);
1875 	xe_sched_job_push(job);
1876 
1877 	dma_fence_put(m->fence);
1878 	m->fence = dma_fence_get(fence);
1879 	mutex_unlock(&m->job_mutex);
1880 
1881 	xe_bb_free(bb, fence);
1882 
1883 	return fence;
1884 
1885 err:
1886 	xe_bb_free(bb, NULL);
1887 
1888 	return ERR_PTR(err);
1889 }
1890 
1891 /**
1892  * xe_migrate_to_vram() - Migrate to VRAM
1893  * @m: The migration context.
1894  * @npages: Number of pages to migrate.
1895  * @src_addr: Array of DMA information (source of migrate)
1896  * @dst_addr: Device physical address of VRAM (destination of migrate)
1897  *
1898  * Copy from an array dma addresses to a VRAM device physical address
1899  *
1900  * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
1901  * failure
1902  */
1903 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
1904 				     unsigned long npages,
1905 				     struct drm_pagemap_addr *src_addr,
1906 				     u64 dst_addr)
1907 {
1908 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
1909 			       XE_MIGRATE_COPY_TO_VRAM);
1910 }
1911 
1912 /**
1913  * xe_migrate_from_vram() - Migrate from VRAM
1914  * @m: The migration context.
1915  * @npages: Number of pages to migrate.
1916  * @src_addr: Device physical address of VRAM (source of migrate)
1917  * @dst_addr: Array of DMA information (destination of migrate)
1918  *
1919  * Copy from a VRAM device physical address to an array dma addresses
1920  *
1921  * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
1922  * failure
1923  */
1924 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
1925 				       unsigned long npages,
1926 				       u64 src_addr,
1927 				       struct drm_pagemap_addr *dst_addr)
1928 {
1929 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
1930 			       XE_MIGRATE_COPY_TO_SRAM);
1931 }
1932 
1933 static void xe_migrate_dma_unmap(struct xe_device *xe,
1934 				 struct drm_pagemap_addr *pagemap_addr,
1935 				 int len, int write)
1936 {
1937 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
1938 
1939 	for (i = 0; i < npages; ++i) {
1940 		if (!pagemap_addr[i].addr)
1941 			break;
1942 
1943 		dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
1944 			       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1945 	}
1946 	kfree(pagemap_addr);
1947 }
1948 
1949 static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
1950 						   void *buf, int len,
1951 						   int write)
1952 {
1953 	struct drm_pagemap_addr *pagemap_addr;
1954 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
1955 
1956 	pagemap_addr = kcalloc(npages, sizeof(*pagemap_addr), GFP_KERNEL);
1957 	if (!pagemap_addr)
1958 		return ERR_PTR(-ENOMEM);
1959 
1960 	for (i = 0; i < npages; ++i) {
1961 		dma_addr_t addr;
1962 		struct page *page;
1963 		enum dma_data_direction dir = write ? DMA_TO_DEVICE :
1964 						      DMA_FROM_DEVICE;
1965 
1966 		if (is_vmalloc_addr(buf))
1967 			page = vmalloc_to_page(buf);
1968 		else
1969 			page = virt_to_page(buf);
1970 
1971 		addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
1972 		if (dma_mapping_error(xe->drm.dev, addr))
1973 			goto err_fault;
1974 
1975 		pagemap_addr[i] =
1976 			drm_pagemap_addr_encode(addr,
1977 						DRM_INTERCONNECT_SYSTEM,
1978 						0, dir);
1979 		buf += PAGE_SIZE;
1980 	}
1981 
1982 	return pagemap_addr;
1983 
1984 err_fault:
1985 	xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
1986 	return ERR_PTR(-EFAULT);
1987 }
1988 
1989 /**
1990  * xe_migrate_access_memory - Access memory of a BO via GPU
1991  *
1992  * @m: The migration context.
1993  * @bo: buffer object
1994  * @offset: access offset into buffer object
1995  * @buf: pointer to caller memory to read into or write from
1996  * @len: length of access
1997  * @write: write access
1998  *
1999  * Access memory of a BO via GPU either reading in or writing from a passed in
2000  * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
2001  * read to or write from pointer.
2002  *
2003  * Returns:
2004  * 0 if successful, negative error code on failure.
2005  */
2006 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
2007 			     unsigned long offset, void *buf, int len,
2008 			     int write)
2009 {
2010 	struct xe_tile *tile = m->tile;
2011 	struct xe_device *xe = tile_to_xe(tile);
2012 	struct xe_res_cursor cursor;
2013 	struct dma_fence *fence = NULL;
2014 	struct drm_pagemap_addr *pagemap_addr;
2015 	unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
2016 	int bytes_left = len, current_page = 0;
2017 	void *orig_buf = buf;
2018 
2019 	xe_bo_assert_held(bo);
2020 
2021 	/* Use bounce buffer for small access and unaligned access */
2022 	if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
2023 	    !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
2024 		int buf_offset = 0;
2025 		void *bounce;
2026 		int err;
2027 
2028 		BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
2029 		bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
2030 		if (!bounce)
2031 			return -ENOMEM;
2032 
2033 		/*
2034 		 * Less than ideal for large unaligned access but this should be
2035 		 * fairly rare, can fixup if this becomes common.
2036 		 */
2037 		do {
2038 			int copy_bytes = min_t(int, bytes_left,
2039 					       XE_CACHELINE_BYTES -
2040 					       (offset & XE_CACHELINE_MASK));
2041 			int ptr_offset = offset & XE_CACHELINE_MASK;
2042 
2043 			err = xe_migrate_access_memory(m, bo,
2044 						       offset &
2045 						       ~XE_CACHELINE_MASK,
2046 						       bounce,
2047 						       XE_CACHELINE_BYTES, 0);
2048 			if (err)
2049 				break;
2050 
2051 			if (write) {
2052 				memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
2053 
2054 				err = xe_migrate_access_memory(m, bo,
2055 							       offset & ~XE_CACHELINE_MASK,
2056 							       bounce,
2057 							       XE_CACHELINE_BYTES, write);
2058 				if (err)
2059 					break;
2060 			} else {
2061 				memcpy(buf + buf_offset, bounce + ptr_offset,
2062 				       copy_bytes);
2063 			}
2064 
2065 			bytes_left -= copy_bytes;
2066 			buf_offset += copy_bytes;
2067 			offset += copy_bytes;
2068 		} while (bytes_left);
2069 
2070 		kfree(bounce);
2071 		return err;
2072 	}
2073 
2074 	pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
2075 	if (IS_ERR(pagemap_addr))
2076 		return PTR_ERR(pagemap_addr);
2077 
2078 	xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
2079 
2080 	do {
2081 		struct dma_fence *__fence;
2082 		u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
2083 			cursor.start;
2084 		int current_bytes;
2085 
2086 		if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
2087 			current_bytes = min_t(int, bytes_left,
2088 					      MAX_PREEMPTDISABLE_TRANSFER);
2089 		else
2090 			current_bytes = min_t(int, bytes_left, cursor.size);
2091 
2092 		if (current_bytes & ~PAGE_MASK) {
2093 			int pitch = 4;
2094 
2095 			current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
2096 		}
2097 
2098 		__fence = xe_migrate_vram(m, current_bytes,
2099 					  (unsigned long)buf & ~PAGE_MASK,
2100 					  &pagemap_addr[current_page],
2101 					  vram_addr, write ?
2102 					  XE_MIGRATE_COPY_TO_VRAM :
2103 					  XE_MIGRATE_COPY_TO_SRAM);
2104 		if (IS_ERR(__fence)) {
2105 			if (fence) {
2106 				dma_fence_wait(fence, false);
2107 				dma_fence_put(fence);
2108 			}
2109 			fence = __fence;
2110 			goto out_err;
2111 		}
2112 
2113 		dma_fence_put(fence);
2114 		fence = __fence;
2115 
2116 		buf += current_bytes;
2117 		offset += current_bytes;
2118 		current_page = (int)(buf - orig_buf) / PAGE_SIZE;
2119 		bytes_left -= current_bytes;
2120 		if (bytes_left)
2121 			xe_res_next(&cursor, current_bytes);
2122 	} while (bytes_left);
2123 
2124 	dma_fence_wait(fence, false);
2125 	dma_fence_put(fence);
2126 
2127 out_err:
2128 	xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
2129 	return IS_ERR(fence) ? PTR_ERR(fence) : 0;
2130 }
2131 
2132 /**
2133  * xe_migrate_job_lock() - Lock migrate job lock
2134  * @m: The migration context.
2135  * @q: Queue associated with the operation which requires a lock
2136  *
2137  * Lock the migrate job lock if the queue is a migration queue, otherwise
2138  * assert the VM's dma-resv is held (user queue's have own locking).
2139  */
2140 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
2141 {
2142 	bool is_migrate = q == m->q;
2143 
2144 	if (is_migrate)
2145 		mutex_lock(&m->job_mutex);
2146 	else
2147 		xe_vm_assert_held(q->vm);	/* User queues VM's should be locked */
2148 }
2149 
2150 /**
2151  * xe_migrate_job_unlock() - Unlock migrate job lock
2152  * @m: The migration context.
2153  * @q: Queue associated with the operation which requires a lock
2154  *
2155  * Unlock the migrate job lock if the queue is a migration queue, otherwise
2156  * assert the VM's dma-resv is held (user queue's have own locking).
2157  */
2158 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
2159 {
2160 	bool is_migrate = q == m->q;
2161 
2162 	if (is_migrate)
2163 		mutex_unlock(&m->job_mutex);
2164 	else
2165 		xe_vm_assert_held(q->vm);	/* User queues VM's should be locked */
2166 }
2167 
2168 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2169 #include "tests/xe_migrate.c"
2170 #endif
2171