xref: /linux/drivers/gpu/drm/xe/xe_migrate.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "xe_migrate.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <drm/xe_drm.h>
14 
15 #include <generated/xe_wa_oob.h>
16 
17 #include "instructions/xe_mi_commands.h"
18 #include "regs/xe_gpu_commands.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "tests/xe_test.h"
21 #include "xe_assert.h"
22 #include "xe_bb.h"
23 #include "xe_bo.h"
24 #include "xe_exec_queue.h"
25 #include "xe_ggtt.h"
26 #include "xe_gt.h"
27 #include "xe_hw_engine.h"
28 #include "xe_lrc.h"
29 #include "xe_map.h"
30 #include "xe_mocs.h"
31 #include "xe_pt.h"
32 #include "xe_res_cursor.h"
33 #include "xe_sched_job.h"
34 #include "xe_sync.h"
35 #include "xe_trace.h"
36 #include "xe_vm.h"
37 #include "xe_wa.h"
38 
39 /**
40  * struct xe_migrate - migrate context.
41  */
42 struct xe_migrate {
43 	/** @q: Default exec queue used for migration */
44 	struct xe_exec_queue *q;
45 	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
46 	struct xe_tile *tile;
47 	/** @job_mutex: Timeline mutex for @eng. */
48 	struct mutex job_mutex;
49 	/** @pt_bo: Page-table buffer object. */
50 	struct xe_bo *pt_bo;
51 	/** @batch_base_ofs: VM offset of the migration batch buffer */
52 	u64 batch_base_ofs;
53 	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
54 	u64 usm_batch_base_ofs;
55 	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
56 	u64 cleared_mem_ofs;
57 	/**
58 	 * @fence: dma-fence representing the last migration job batch.
59 	 * Protected by @job_mutex.
60 	 */
61 	struct dma_fence *fence;
62 	/**
63 	 * @vm_update_sa: For integrated, used to suballocate page-tables
64 	 * out of the pt_bo.
65 	 */
66 	struct drm_suballoc_manager vm_update_sa;
67 	/** @min_chunk_size: For dgfx, Minimum chunk size */
68 	u64 min_chunk_size;
69 };
70 
71 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
72 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
73 #define NUM_KERNEL_PDE 17
74 #define NUM_PT_SLOTS 32
75 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
76 #define MAX_NUM_PTE 512
77 
78 /*
79  * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
80  * legal value accepted.  Since that instruction field is always stored in
81  * (val-2) format, this translates to 0x400 dwords for the true maximum length
82  * of the instruction.  Subtracting the instruction header (1 dword) and
83  * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
84  */
85 #define MAX_PTE_PER_SDI 0x1FE
86 
87 /**
88  * xe_tile_migrate_engine() - Get this tile's migrate engine.
89  * @tile: The tile.
90  *
91  * Returns the default migrate engine of this tile.
92  * TODO: Perhaps this function is slightly misplaced, and even unneeded?
93  *
94  * Return: The default migrate engine
95  */
96 struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
97 {
98 	return tile->migrate->q;
99 }
100 
101 static void xe_migrate_fini(struct drm_device *dev, void *arg)
102 {
103 	struct xe_migrate *m = arg;
104 
105 	xe_vm_lock(m->q->vm, false);
106 	xe_bo_unpin(m->pt_bo);
107 	xe_vm_unlock(m->q->vm);
108 
109 	dma_fence_put(m->fence);
110 	xe_bo_put(m->pt_bo);
111 	drm_suballoc_manager_fini(&m->vm_update_sa);
112 	mutex_destroy(&m->job_mutex);
113 	xe_vm_close_and_put(m->q->vm);
114 	xe_exec_queue_put(m->q);
115 }
116 
117 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
118 {
119 	XE_WARN_ON(slot >= NUM_PT_SLOTS);
120 
121 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
122 	return (slot + 1ULL) << xe_pt_shift(level + 1);
123 }
124 
125 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
126 {
127 	/*
128 	 * Remove the DPA to get a correct offset into identity table for the
129 	 * migrate offset
130 	 */
131 	addr -= xe->mem.vram.dpa_base;
132 	return addr + (256ULL << xe_pt_shift(2));
133 }
134 
135 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
136 				 struct xe_vm *vm)
137 {
138 	struct xe_device *xe = tile_to_xe(tile);
139 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
140 	u8 id = tile->id;
141 	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
142 	u32 map_ofs, level, i;
143 	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
144 	u64 entry;
145 
146 	/* Can't bump NUM_PT_SLOTS too high */
147 	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
148 	/* Must be a multiple of 64K to support all platforms */
149 	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
150 	/* And one slot reserved for the 4KiB page table updates */
151 	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
152 
153 	/* Need to be sure everything fits in the first PT, or create more */
154 	xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
155 
156 	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
157 				  num_entries * XE_PAGE_SIZE,
158 				  ttm_bo_type_kernel,
159 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
160 				  XE_BO_FLAG_PINNED);
161 	if (IS_ERR(bo))
162 		return PTR_ERR(bo);
163 
164 	entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
165 	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
166 
167 	map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
168 
169 	/* Map the entire BO in our level 0 pt */
170 	for (i = 0, level = 0; i < num_entries; level++) {
171 		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
172 						  pat_index, 0);
173 
174 		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
175 
176 		if (vm->flags & XE_VM_FLAG_64K)
177 			i += 16;
178 		else
179 			i += 1;
180 	}
181 
182 	if (!IS_DGFX(xe)) {
183 		/* Write out batch too */
184 		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
185 		for (i = 0; i < batch->size;
186 		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
187 		     XE_PAGE_SIZE) {
188 			entry = vm->pt_ops->pte_encode_bo(batch, i,
189 							  pat_index, 0);
190 
191 			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
192 				  entry);
193 			level++;
194 		}
195 		if (xe->info.has_usm) {
196 			xe_tile_assert(tile, batch->size == SZ_1M);
197 
198 			batch = tile->primary_gt->usm.bb_pool->bo;
199 			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
200 			xe_tile_assert(tile, batch->size == SZ_512K);
201 
202 			for (i = 0; i < batch->size;
203 			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
204 			     XE_PAGE_SIZE) {
205 				entry = vm->pt_ops->pte_encode_bo(batch, i,
206 								  pat_index, 0);
207 
208 				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
209 					  entry);
210 				level++;
211 			}
212 		}
213 	} else {
214 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
215 
216 		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
217 
218 		if (xe->info.has_usm) {
219 			batch = tile->primary_gt->usm.bb_pool->bo;
220 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
221 			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
222 		}
223 	}
224 
225 	for (level = 1; level < num_level; level++) {
226 		u32 flags = 0;
227 
228 		if (vm->flags & XE_VM_FLAG_64K && level == 1)
229 			flags = XE_PDE_64K;
230 
231 		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
232 						  XE_PAGE_SIZE, pat_index);
233 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
234 			  entry | flags);
235 	}
236 
237 	/* Write PDE's that point to our BO. */
238 	for (i = 0; i < num_entries - num_level; i++) {
239 		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
240 						  pat_index);
241 
242 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
243 			  (i + 1) * 8, u64, entry);
244 	}
245 
246 	/* Set up a 1GiB NULL mapping at 255GiB offset. */
247 	level = 2;
248 	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
249 		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
250 		  | XE_PTE_NULL);
251 	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
252 
253 	/* Identity map the entire vram at 256GiB offset */
254 	if (IS_DGFX(xe)) {
255 		u64 pos, ofs, flags;
256 
257 		level = 2;
258 		ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
259 		flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
260 						    true, 0);
261 
262 		/*
263 		 * Use 1GB pages, it shouldn't matter the physical amount of
264 		 * vram is less, when we don't access it.
265 		 */
266 		for (pos = xe->mem.vram.dpa_base;
267 		     pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
268 		     pos += SZ_1G, ofs += 8)
269 			xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
270 	}
271 
272 	/*
273 	 * Example layout created above, with root level = 3:
274 	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
275 	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
276 	 * [PT9...PT28]: Userspace PT's for VM_BIND, 4 KiB PTE's
277 	 * [PT29 = PDE 0] [PT30 = PDE 1] [PT31 = PDE 2]
278 	 *
279 	 * This makes the lowest part of the VM point to the pagetables.
280 	 * Hence the lowest 2M in the vm should point to itself, with a few writes
281 	 * and flushes, other parts of the VM can be used either for copying and
282 	 * clearing.
283 	 *
284 	 * For performance, the kernel reserves PDE's, so about 20 are left
285 	 * for async VM updates.
286 	 *
287 	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
288 	 * everywhere, this allows lockless updates to scratch pages by using
289 	 * the different addresses in VM.
290 	 */
291 #define NUM_VMUSA_UNIT_PER_PAGE	32
292 #define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
293 #define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
294 	drm_suballoc_manager_init(&m->vm_update_sa,
295 				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
296 				  NUM_VMUSA_UNIT_PER_PAGE, 0);
297 
298 	m->pt_bo = bo;
299 	return 0;
300 }
301 
302 /*
303  * Due to workaround 16017236439, odd instance hardware copy engines are
304  * faster than even instance ones.
305  * This function returns the mask involving all fast copy engines and the
306  * reserved copy engine to be used as logical mask for migrate engine.
307  * Including the reserved copy engine is required to avoid deadlocks due to
308  * migrate jobs servicing the faults gets stuck behind the job that faulted.
309  */
310 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
311 {
312 	u32 logical_mask = 0;
313 	struct xe_hw_engine *hwe;
314 	enum xe_hw_engine_id id;
315 
316 	for_each_hw_engine(hwe, gt, id) {
317 		if (hwe->class != XE_ENGINE_CLASS_COPY)
318 			continue;
319 
320 		if (!XE_WA(gt, 16017236439) ||
321 		    xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
322 			logical_mask |= BIT(hwe->logical_instance);
323 	}
324 
325 	return logical_mask;
326 }
327 
328 /**
329  * xe_migrate_init() - Initialize a migrate context
330  * @tile: Back-pointer to the tile we're initializing for.
331  *
332  * Return: Pointer to a migrate context on success. Error pointer on error.
333  */
334 struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
335 {
336 	struct xe_device *xe = tile_to_xe(tile);
337 	struct xe_gt *primary_gt = tile->primary_gt;
338 	struct xe_migrate *m;
339 	struct xe_vm *vm;
340 	int err;
341 
342 	m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
343 	if (!m)
344 		return ERR_PTR(-ENOMEM);
345 
346 	m->tile = tile;
347 
348 	/* Special layout, prepared below.. */
349 	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
350 			  XE_VM_FLAG_SET_TILE_ID(tile));
351 	if (IS_ERR(vm))
352 		return ERR_CAST(vm);
353 
354 	xe_vm_lock(vm, false);
355 	err = xe_migrate_prepare_vm(tile, m, vm);
356 	xe_vm_unlock(vm);
357 	if (err) {
358 		xe_vm_close_and_put(vm);
359 		return ERR_PTR(err);
360 	}
361 
362 	if (xe->info.has_usm) {
363 		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
364 							   XE_ENGINE_CLASS_COPY,
365 							   primary_gt->usm.reserved_bcs_instance,
366 							   false);
367 		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
368 
369 		if (!hwe || !logical_mask)
370 			return ERR_PTR(-EINVAL);
371 
372 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
373 					    EXEC_QUEUE_FLAG_KERNEL |
374 					    EXEC_QUEUE_FLAG_PERMANENT |
375 					    EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
376 	} else {
377 		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
378 						  XE_ENGINE_CLASS_COPY,
379 						  EXEC_QUEUE_FLAG_KERNEL |
380 						  EXEC_QUEUE_FLAG_PERMANENT);
381 	}
382 	if (IS_ERR(m->q)) {
383 		xe_vm_close_and_put(vm);
384 		return ERR_CAST(m->q);
385 	}
386 
387 	mutex_init(&m->job_mutex);
388 
389 	err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
390 	if (err)
391 		return ERR_PTR(err);
392 
393 	if (IS_DGFX(xe)) {
394 		if (xe_device_has_flat_ccs(xe))
395 			/* min chunk size corresponds to 4K of CCS Metadata */
396 			m->min_chunk_size = SZ_4K * SZ_64K /
397 				xe_device_ccs_bytes(xe, SZ_64K);
398 		else
399 			/* Somewhat arbitrary to avoid a huge amount of blits */
400 			m->min_chunk_size = SZ_64K;
401 		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
402 		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
403 			(unsigned long long)m->min_chunk_size);
404 	}
405 
406 	return m;
407 }
408 
409 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
410 {
411 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
412 		return MAX_CCS_LIMITED_TRANSFER;
413 
414 	return MAX_PREEMPTDISABLE_TRANSFER;
415 }
416 
417 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
418 {
419 	struct xe_device *xe = tile_to_xe(m->tile);
420 	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
421 
422 	if (mem_type_is_vram(cur->mem_type)) {
423 		/*
424 		 * VRAM we want to blit in chunks with sizes aligned to
425 		 * min_chunk_size in order for the offset to CCS metadata to be
426 		 * page-aligned. If it's the last chunk it may be smaller.
427 		 *
428 		 * Another constraint is that we need to limit the blit to
429 		 * the VRAM block size, unless size is smaller than
430 		 * min_chunk_size.
431 		 */
432 		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
433 
434 		size = min_t(u64, size, chunk);
435 		if (size > m->min_chunk_size)
436 			size = round_down(size, m->min_chunk_size);
437 	}
438 
439 	return size;
440 }
441 
442 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
443 {
444 	/* If the chunk is not fragmented, allow identity map. */
445 	return cur->size >= size;
446 }
447 
448 static u32 pte_update_size(struct xe_migrate *m,
449 			   bool is_vram,
450 			   struct ttm_resource *res,
451 			   struct xe_res_cursor *cur,
452 			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
453 			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
454 {
455 	u32 cmds = 0;
456 
457 	*L0_pt = pt_ofs;
458 	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
459 		/* Offset into identity map. */
460 		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
461 					      cur->start + vram_region_gpu_offset(res));
462 		cmds += cmd_size;
463 	} else {
464 		/* Clip L0 to available size */
465 		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
466 		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
467 
468 		*L0 = size;
469 		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
470 
471 		/* MI_STORE_DATA_IMM */
472 		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
473 
474 		/* PDE qwords */
475 		cmds += num_4k_pages * 2;
476 
477 		/* Each chunk has a single blit command */
478 		cmds += cmd_size;
479 	}
480 
481 	return cmds;
482 }
483 
484 static void emit_pte(struct xe_migrate *m,
485 		     struct xe_bb *bb, u32 at_pt,
486 		     bool is_vram, bool is_comp_pte,
487 		     struct xe_res_cursor *cur,
488 		     u32 size, struct ttm_resource *res)
489 {
490 	struct xe_device *xe = tile_to_xe(m->tile);
491 	struct xe_vm *vm = m->q->vm;
492 	u16 pat_index;
493 	u32 ptes;
494 	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
495 	u64 cur_ofs;
496 
497 	/* Indirect access needs compression enabled uncached PAT index */
498 	if (GRAPHICS_VERx100(xe) >= 2000)
499 		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
500 					  xe->pat.idx[XE_CACHE_WB];
501 	else
502 		pat_index = xe->pat.idx[XE_CACHE_WB];
503 
504 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
505 
506 	while (ptes) {
507 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
508 
509 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
510 		bb->cs[bb->len++] = ofs;
511 		bb->cs[bb->len++] = 0;
512 
513 		cur_ofs = ofs;
514 		ofs += chunk * 8;
515 		ptes -= chunk;
516 
517 		while (chunk--) {
518 			u64 addr, flags = 0;
519 			bool devmem = false;
520 
521 			addr = xe_res_dma(cur) & PAGE_MASK;
522 			if (is_vram) {
523 				if (vm->flags & XE_VM_FLAG_64K) {
524 					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
525 
526 					xe_assert(xe, (va & (SZ_64K - 1)) ==
527 						  (addr & (SZ_64K - 1)));
528 
529 					flags |= XE_PTE_PS64;
530 				}
531 
532 				addr += vram_region_gpu_offset(res);
533 				devmem = true;
534 			}
535 
536 			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
537 							   addr, pat_index,
538 							   0, devmem, flags);
539 			bb->cs[bb->len++] = lower_32_bits(addr);
540 			bb->cs[bb->len++] = upper_32_bits(addr);
541 
542 			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
543 			cur_ofs += 8;
544 		}
545 	}
546 }
547 
548 #define EMIT_COPY_CCS_DW 5
549 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
550 			  u64 dst_ofs, bool dst_is_indirect,
551 			  u64 src_ofs, bool src_is_indirect,
552 			  u32 size)
553 {
554 	struct xe_device *xe = gt_to_xe(gt);
555 	u32 *cs = bb->cs + bb->len;
556 	u32 num_ccs_blks;
557 	u32 num_pages;
558 	u32 ccs_copy_size;
559 	u32 mocs;
560 
561 	if (GRAPHICS_VERx100(xe) >= 2000) {
562 		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
563 		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
564 
565 		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
566 		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
567 
568 	} else {
569 		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
570 					    NUM_CCS_BYTES_PER_BLOCK);
571 		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
572 
573 		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
574 		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
575 	}
576 
577 	*cs++ = XY_CTRL_SURF_COPY_BLT |
578 		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
579 		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
580 		ccs_copy_size;
581 	*cs++ = lower_32_bits(src_ofs);
582 	*cs++ = upper_32_bits(src_ofs) | mocs;
583 	*cs++ = lower_32_bits(dst_ofs);
584 	*cs++ = upper_32_bits(dst_ofs) | mocs;
585 
586 	bb->len = cs - bb->cs;
587 }
588 
589 #define EMIT_COPY_DW 10
590 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
591 		      u64 src_ofs, u64 dst_ofs, unsigned int size,
592 		      unsigned int pitch)
593 {
594 	struct xe_device *xe = gt_to_xe(gt);
595 	u32 mocs = 0;
596 	u32 tile_y = 0;
597 
598 	xe_gt_assert(gt, size / pitch <= S16_MAX);
599 	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
600 	xe_gt_assert(gt, pitch <= U16_MAX);
601 
602 	if (GRAPHICS_VER(xe) >= 20)
603 		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
604 
605 	if (GRAPHICS_VERx100(xe) >= 1250)
606 		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
607 
608 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
609 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
610 	bb->cs[bb->len++] = 0;
611 	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
612 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
613 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
614 	bb->cs[bb->len++] = 0;
615 	bb->cs[bb->len++] = pitch | mocs;
616 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
617 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
618 }
619 
620 static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
621 			enum dma_resv_usage usage)
622 {
623 	return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
624 }
625 
626 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
627 {
628 	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
629 }
630 
631 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
632 			       struct xe_bb *bb,
633 			       u64 src_ofs, bool src_is_indirect,
634 			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
635 			       u64 ccs_ofs, bool copy_ccs)
636 {
637 	struct xe_gt *gt = m->tile->primary_gt;
638 	u32 flush_flags = 0;
639 
640 	if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
641 		/*
642 		 * If the src is already in vram, then it should already
643 		 * have been cleared by us, or has been populated by the
644 		 * user. Make sure we copy the CCS aux state as-is.
645 		 *
646 		 * Otherwise if the bo doesn't have any CCS metadata attached,
647 		 * we still need to clear it for security reasons.
648 		 */
649 		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
650 
651 		emit_copy_ccs(gt, bb,
652 			      dst_ofs, true,
653 			      ccs_src_ofs, src_is_indirect, dst_size);
654 
655 		flush_flags = MI_FLUSH_DW_CCS;
656 	} else if (copy_ccs) {
657 		if (!src_is_indirect)
658 			src_ofs = ccs_ofs;
659 		else if (!dst_is_indirect)
660 			dst_ofs = ccs_ofs;
661 
662 		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
663 
664 		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
665 			      src_is_indirect, dst_size);
666 		if (dst_is_indirect)
667 			flush_flags = MI_FLUSH_DW_CCS;
668 	}
669 
670 	return flush_flags;
671 }
672 
673 /**
674  * xe_migrate_copy() - Copy content of TTM resources.
675  * @m: The migration context.
676  * @src_bo: The buffer object @src is currently bound to.
677  * @dst_bo: If copying between resources created for the same bo, set this to
678  * the same value as @src_bo. If copying between buffer objects, set it to
679  * the buffer object @dst is currently bound to.
680  * @src: The source TTM resource.
681  * @dst: The dst TTM resource.
682  * @copy_only_ccs: If true copy only CCS metadata
683  *
684  * Copies the contents of @src to @dst: On flat CCS devices,
685  * the CCS metadata is copied as well if needed, or if not present,
686  * the CCS metadata of @dst is cleared for security reasons.
687  *
688  * Return: Pointer to a dma_fence representing the last copy batch, or
689  * an error pointer on failure. If there is a failure, any copy operation
690  * started by the function call has been synced.
691  */
692 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
693 				  struct xe_bo *src_bo,
694 				  struct xe_bo *dst_bo,
695 				  struct ttm_resource *src,
696 				  struct ttm_resource *dst,
697 				  bool copy_only_ccs)
698 {
699 	struct xe_gt *gt = m->tile->primary_gt;
700 	struct xe_device *xe = gt_to_xe(gt);
701 	struct dma_fence *fence = NULL;
702 	u64 size = src_bo->size;
703 	struct xe_res_cursor src_it, dst_it, ccs_it;
704 	u64 src_L0_ofs, dst_L0_ofs;
705 	u32 src_L0_pt, dst_L0_pt;
706 	u64 src_L0, dst_L0;
707 	int pass = 0;
708 	int err;
709 	bool src_is_pltt = src->mem_type == XE_PL_TT;
710 	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
711 	bool src_is_vram = mem_type_is_vram(src->mem_type);
712 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
713 	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
714 		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
715 	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
716 
717 	/* Copying CCS between two different BOs is not supported yet. */
718 	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
719 		return ERR_PTR(-EINVAL);
720 
721 	if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
722 		return ERR_PTR(-EINVAL);
723 
724 	if (!src_is_vram)
725 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
726 	else
727 		xe_res_first(src, 0, size, &src_it);
728 	if (!dst_is_vram)
729 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
730 	else
731 		xe_res_first(dst, 0, size, &dst_it);
732 
733 	if (copy_system_ccs)
734 		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
735 				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
736 				&ccs_it);
737 
738 	while (size) {
739 		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
740 		struct xe_sched_job *job;
741 		struct xe_bb *bb;
742 		u32 flush_flags;
743 		u32 update_idx;
744 		u64 ccs_ofs, ccs_size;
745 		u32 ccs_pt;
746 
747 		bool usm = xe->info.has_usm;
748 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
749 
750 		src_L0 = xe_migrate_res_sizes(m, &src_it);
751 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
752 
753 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
754 			pass++, src_L0, dst_L0);
755 
756 		src_L0 = min(src_L0, dst_L0);
757 
758 		batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
759 					      &src_L0_ofs, &src_L0_pt, 0, 0,
760 					      avail_pts);
761 
762 		batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
763 					      &dst_L0_ofs, &dst_L0_pt, 0,
764 					      avail_pts, avail_pts);
765 
766 		if (copy_system_ccs) {
767 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
768 			batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
769 						      &ccs_ofs, &ccs_pt, 0,
770 						      2 * avail_pts,
771 						      avail_pts);
772 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
773 		}
774 
775 		/* Add copy commands size here */
776 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
777 			((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
778 
779 		bb = xe_bb_new(gt, batch_size, usm);
780 		if (IS_ERR(bb)) {
781 			err = PTR_ERR(bb);
782 			goto err_sync;
783 		}
784 
785 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
786 			xe_res_next(&src_it, src_L0);
787 		else
788 			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
789 				 &src_it, src_L0, src);
790 
791 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
792 			xe_res_next(&dst_it, src_L0);
793 		else
794 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
795 				 &dst_it, src_L0, dst);
796 
797 		if (copy_system_ccs)
798 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
799 
800 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
801 		update_idx = bb->len;
802 
803 		if (!copy_only_ccs)
804 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
805 
806 		flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
807 						  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
808 						  dst_L0_ofs,
809 						  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
810 						  src_L0, ccs_ofs, copy_ccs);
811 
812 		mutex_lock(&m->job_mutex);
813 		job = xe_bb_create_migration_job(m->q, bb,
814 						 xe_migrate_batch_base(m, usm),
815 						 update_idx);
816 		if (IS_ERR(job)) {
817 			err = PTR_ERR(job);
818 			goto err;
819 		}
820 
821 		xe_sched_job_add_migrate_flush(job, flush_flags);
822 		if (!fence) {
823 			err = job_add_deps(job, src_bo->ttm.base.resv,
824 					   DMA_RESV_USAGE_BOOKKEEP);
825 			if (!err && src_bo != dst_bo)
826 				err = job_add_deps(job, dst_bo->ttm.base.resv,
827 						   DMA_RESV_USAGE_BOOKKEEP);
828 			if (err)
829 				goto err_job;
830 		}
831 
832 		xe_sched_job_arm(job);
833 		dma_fence_put(fence);
834 		fence = dma_fence_get(&job->drm.s_fence->finished);
835 		xe_sched_job_push(job);
836 
837 		dma_fence_put(m->fence);
838 		m->fence = dma_fence_get(fence);
839 
840 		mutex_unlock(&m->job_mutex);
841 
842 		xe_bb_free(bb, fence);
843 		size -= src_L0;
844 		continue;
845 
846 err_job:
847 		xe_sched_job_put(job);
848 err:
849 		mutex_unlock(&m->job_mutex);
850 		xe_bb_free(bb, NULL);
851 
852 err_sync:
853 		/* Sync partial copy if any. FIXME: under job_mutex? */
854 		if (fence) {
855 			dma_fence_wait(fence, false);
856 			dma_fence_put(fence);
857 		}
858 
859 		return ERR_PTR(err);
860 	}
861 
862 	return fence;
863 }
864 
865 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
866 				 u32 size, u32 pitch)
867 {
868 	struct xe_device *xe = gt_to_xe(gt);
869 	u32 *cs = bb->cs + bb->len;
870 	u32 len = PVC_MEM_SET_CMD_LEN_DW;
871 
872 	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
873 	*cs++ = pitch - 1;
874 	*cs++ = (size / pitch) - 1;
875 	*cs++ = pitch - 1;
876 	*cs++ = lower_32_bits(src_ofs);
877 	*cs++ = upper_32_bits(src_ofs);
878 	if (GRAPHICS_VERx100(xe) >= 2000)
879 		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
880 	else
881 		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
882 
883 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
884 
885 	bb->len += len;
886 }
887 
888 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
889 				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
890 {
891 	struct xe_device *xe = gt_to_xe(gt);
892 	u32 *cs = bb->cs + bb->len;
893 	u32 len = XY_FAST_COLOR_BLT_DW;
894 
895 	if (GRAPHICS_VERx100(xe) < 1250)
896 		len = 11;
897 
898 	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
899 		(len - 2);
900 	if (GRAPHICS_VERx100(xe) >= 2000)
901 		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
902 			(pitch - 1);
903 	else
904 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
905 			(pitch - 1);
906 	*cs++ = 0;
907 	*cs++ = (size / pitch) << 16 | pitch / 4;
908 	*cs++ = lower_32_bits(src_ofs);
909 	*cs++ = upper_32_bits(src_ofs);
910 	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
911 	*cs++ = 0;
912 	*cs++ = 0;
913 	*cs++ = 0;
914 	*cs++ = 0;
915 
916 	if (len > 11) {
917 		*cs++ = 0;
918 		*cs++ = 0;
919 		*cs++ = 0;
920 		*cs++ = 0;
921 		*cs++ = 0;
922 	}
923 
924 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
925 
926 	bb->len += len;
927 }
928 
929 static bool has_service_copy_support(struct xe_gt *gt)
930 {
931 	/*
932 	 * What we care about is whether the architecture was designed with
933 	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
934 	 * instructions) so check the architectural engine list rather than the
935 	 * actual list since these instructions are usable on BCS0 even if
936 	 * all of the actual service copy engines (BCS1-BCS8) have been fused
937 	 * off.
938 	 */
939 	return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
940 						XE_HW_ENGINE_BCS1);
941 }
942 
943 static u32 emit_clear_cmd_len(struct xe_gt *gt)
944 {
945 	if (has_service_copy_support(gt))
946 		return PVC_MEM_SET_CMD_LEN_DW;
947 	else
948 		return XY_FAST_COLOR_BLT_DW;
949 }
950 
951 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
952 		       u32 size, u32 pitch, bool is_vram)
953 {
954 	if (has_service_copy_support(gt))
955 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
956 	else
957 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
958 				     is_vram);
959 }
960 
961 /**
962  * xe_migrate_clear() - Copy content of TTM resources.
963  * @m: The migration context.
964  * @bo: The buffer object @dst is currently bound to.
965  * @dst: The dst TTM resource to be cleared.
966  *
967  * Clear the contents of @dst to zero. On flat CCS devices,
968  * the CCS metadata is cleared to zero as well on VRAM destinations.
969  * TODO: Eliminate the @bo argument.
970  *
971  * Return: Pointer to a dma_fence representing the last clear batch, or
972  * an error pointer on failure. If there is a failure, any clear operation
973  * started by the function call has been synced.
974  */
975 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
976 				   struct xe_bo *bo,
977 				   struct ttm_resource *dst)
978 {
979 	bool clear_vram = mem_type_is_vram(dst->mem_type);
980 	struct xe_gt *gt = m->tile->primary_gt;
981 	struct xe_device *xe = gt_to_xe(gt);
982 	bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
983 	struct dma_fence *fence = NULL;
984 	u64 size = bo->size;
985 	struct xe_res_cursor src_it;
986 	struct ttm_resource *src = dst;
987 	int err;
988 
989 	if (!clear_vram)
990 		xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
991 	else
992 		xe_res_first(src, 0, bo->size, &src_it);
993 
994 	while (size) {
995 		u64 clear_L0_ofs;
996 		u32 clear_L0_pt;
997 		u32 flush_flags = 0;
998 		u64 clear_L0;
999 		struct xe_sched_job *job;
1000 		struct xe_bb *bb;
1001 		u32 batch_size, update_idx;
1002 
1003 		bool usm = xe->info.has_usm;
1004 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1005 
1006 		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1007 
1008 		/* Calculate final sizes and batch size.. */
1009 		batch_size = 2 +
1010 			pte_update_size(m, clear_vram, src, &src_it,
1011 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1012 					clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
1013 					avail_pts);
1014 
1015 		if (xe_device_has_flat_ccs(xe))
1016 			batch_size += EMIT_COPY_CCS_DW;
1017 
1018 		/* Clear commands */
1019 
1020 		if (WARN_ON_ONCE(!clear_L0))
1021 			break;
1022 
1023 		bb = xe_bb_new(gt, batch_size, usm);
1024 		if (IS_ERR(bb)) {
1025 			err = PTR_ERR(bb);
1026 			goto err_sync;
1027 		}
1028 
1029 		size -= clear_L0;
1030 		/* Preemption is enabled again by the ring ops. */
1031 		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
1032 			xe_res_next(&src_it, clear_L0);
1033 		else
1034 			emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
1035 				 &src_it, clear_L0, dst);
1036 
1037 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1038 		update_idx = bb->len;
1039 
1040 		if (!clear_system_ccs)
1041 			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1042 
1043 		if (xe_device_has_flat_ccs(xe)) {
1044 			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1045 				      m->cleared_mem_ofs, false, clear_L0);
1046 			flush_flags = MI_FLUSH_DW_CCS;
1047 		}
1048 
1049 		mutex_lock(&m->job_mutex);
1050 		job = xe_bb_create_migration_job(m->q, bb,
1051 						 xe_migrate_batch_base(m, usm),
1052 						 update_idx);
1053 		if (IS_ERR(job)) {
1054 			err = PTR_ERR(job);
1055 			goto err;
1056 		}
1057 
1058 		xe_sched_job_add_migrate_flush(job, flush_flags);
1059 		if (!fence) {
1060 			/*
1061 			 * There can't be anything userspace related at this
1062 			 * point, so we just need to respect any potential move
1063 			 * fences, which are always tracked as
1064 			 * DMA_RESV_USAGE_KERNEL.
1065 			 */
1066 			err = job_add_deps(job, bo->ttm.base.resv,
1067 					   DMA_RESV_USAGE_KERNEL);
1068 			if (err)
1069 				goto err_job;
1070 		}
1071 
1072 		xe_sched_job_arm(job);
1073 		dma_fence_put(fence);
1074 		fence = dma_fence_get(&job->drm.s_fence->finished);
1075 		xe_sched_job_push(job);
1076 
1077 		dma_fence_put(m->fence);
1078 		m->fence = dma_fence_get(fence);
1079 
1080 		mutex_unlock(&m->job_mutex);
1081 
1082 		xe_bb_free(bb, fence);
1083 		continue;
1084 
1085 err_job:
1086 		xe_sched_job_put(job);
1087 err:
1088 		mutex_unlock(&m->job_mutex);
1089 		xe_bb_free(bb, NULL);
1090 err_sync:
1091 		/* Sync partial copies if any. FIXME: job_mutex? */
1092 		if (fence) {
1093 			dma_fence_wait(m->fence, false);
1094 			dma_fence_put(fence);
1095 		}
1096 
1097 		return ERR_PTR(err);
1098 	}
1099 
1100 	if (clear_system_ccs)
1101 		bo->ccs_cleared = true;
1102 
1103 	return fence;
1104 }
1105 
1106 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1107 			  const struct xe_vm_pgtable_update *update,
1108 			  struct xe_migrate_pt_update *pt_update)
1109 {
1110 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1111 	u32 chunk;
1112 	u32 ofs = update->ofs, size = update->qwords;
1113 
1114 	/*
1115 	 * If we have 512 entries (max), we would populate it ourselves,
1116 	 * and update the PDE above it to the new pointer.
1117 	 * The only time this can only happen if we have to update the top
1118 	 * PDE. This requires a BO that is almost vm->size big.
1119 	 *
1120 	 * This shouldn't be possible in practice.. might change when 16K
1121 	 * pages are used. Hence the assert.
1122 	 */
1123 	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1124 	if (!ppgtt_ofs)
1125 		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1126 						xe_bo_addr(update->pt_bo, 0,
1127 							   XE_PAGE_SIZE));
1128 
1129 	do {
1130 		u64 addr = ppgtt_ofs + ofs * 8;
1131 
1132 		chunk = min(size, MAX_PTE_PER_SDI);
1133 
1134 		/* Ensure populatefn can do memset64 by aligning bb->cs */
1135 		if (!(bb->len & 1))
1136 			bb->cs[bb->len++] = MI_NOOP;
1137 
1138 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1139 		bb->cs[bb->len++] = lower_32_bits(addr);
1140 		bb->cs[bb->len++] = upper_32_bits(addr);
1141 		ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
1142 			      update);
1143 
1144 		bb->len += chunk * 2;
1145 		ofs += chunk;
1146 		size -= chunk;
1147 	} while (size);
1148 }
1149 
1150 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1151 {
1152 	return xe_vm_get(m->q->vm);
1153 }
1154 
1155 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1156 struct migrate_test_params {
1157 	struct xe_test_priv base;
1158 	bool force_gpu;
1159 };
1160 
1161 #define to_migrate_test_params(_priv) \
1162 	container_of(_priv, struct migrate_test_params, base)
1163 #endif
1164 
1165 static struct dma_fence *
1166 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1167 			       struct xe_vm *vm, struct xe_bo *bo,
1168 			       const struct  xe_vm_pgtable_update *updates,
1169 			       u32 num_updates, bool wait_vm,
1170 			       struct xe_migrate_pt_update *pt_update)
1171 {
1172 	XE_TEST_DECLARE(struct migrate_test_params *test =
1173 			to_migrate_test_params
1174 			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1175 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1176 	struct dma_fence *fence;
1177 	int err;
1178 	u32 i;
1179 
1180 	if (XE_TEST_ONLY(test && test->force_gpu))
1181 		return ERR_PTR(-ETIME);
1182 
1183 	if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
1184 					  DMA_RESV_USAGE_KERNEL))
1185 		return ERR_PTR(-ETIME);
1186 
1187 	if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
1188 					       DMA_RESV_USAGE_BOOKKEEP))
1189 		return ERR_PTR(-ETIME);
1190 
1191 	if (ops->pre_commit) {
1192 		pt_update->job = NULL;
1193 		err = ops->pre_commit(pt_update);
1194 		if (err)
1195 			return ERR_PTR(err);
1196 	}
1197 	for (i = 0; i < num_updates; i++) {
1198 		const struct xe_vm_pgtable_update *update = &updates[i];
1199 
1200 		ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
1201 			      update->ofs, update->qwords, update);
1202 	}
1203 
1204 	if (vm) {
1205 		trace_xe_vm_cpu_bind(vm);
1206 		xe_device_wmb(vm->xe);
1207 	}
1208 
1209 	fence = dma_fence_get_stub();
1210 
1211 	return fence;
1212 }
1213 
1214 static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
1215 			struct xe_sync_entry *syncs, u32 num_syncs)
1216 {
1217 	struct dma_fence *fence;
1218 	int i;
1219 
1220 	for (i = 0; i < num_syncs; i++) {
1221 		fence = syncs[i].fence;
1222 
1223 		if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1224 				       &fence->flags))
1225 			return false;
1226 	}
1227 	if (q) {
1228 		fence = xe_exec_queue_last_fence_get(q, vm);
1229 		if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1230 			dma_fence_put(fence);
1231 			return false;
1232 		}
1233 		dma_fence_put(fence);
1234 	}
1235 
1236 	return true;
1237 }
1238 
1239 /**
1240  * xe_migrate_update_pgtables() - Pipelined page-table update
1241  * @m: The migrate context.
1242  * @vm: The vm we'll be updating.
1243  * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
1244  * @q: The exec queue to be used for the update or NULL if the default
1245  * migration engine is to be used.
1246  * @updates: An array of update descriptors.
1247  * @num_updates: Number of descriptors in @updates.
1248  * @syncs: Array of xe_sync_entry to await before updating. Note that waits
1249  * will block the engine timeline.
1250  * @num_syncs: Number of entries in @syncs.
1251  * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
1252  * pointers to callback functions and, if subclassed, private arguments to
1253  * those.
1254  *
1255  * Perform a pipelined page-table update. The update descriptors are typically
1256  * built under the same lock critical section as a call to this function. If
1257  * using the default engine for the updates, they will be performed in the
1258  * order they grab the job_mutex. If different engines are used, external
1259  * synchronization is needed for overlapping updates to maintain page-table
1260  * consistency. Note that the meaing of "overlapping" is that the updates
1261  * touch the same page-table, which might be a higher-level page-directory.
1262  * If no pipelining is needed, then updates may be performed by the cpu.
1263  *
1264  * Return: A dma_fence that, when signaled, indicates the update completion.
1265  */
1266 struct dma_fence *
1267 xe_migrate_update_pgtables(struct xe_migrate *m,
1268 			   struct xe_vm *vm,
1269 			   struct xe_bo *bo,
1270 			   struct xe_exec_queue *q,
1271 			   const struct xe_vm_pgtable_update *updates,
1272 			   u32 num_updates,
1273 			   struct xe_sync_entry *syncs, u32 num_syncs,
1274 			   struct xe_migrate_pt_update *pt_update)
1275 {
1276 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1277 	struct xe_tile *tile = m->tile;
1278 	struct xe_gt *gt = tile->primary_gt;
1279 	struct xe_device *xe = tile_to_xe(tile);
1280 	struct xe_sched_job *job;
1281 	struct dma_fence *fence;
1282 	struct drm_suballoc *sa_bo = NULL;
1283 	struct xe_vma *vma = pt_update->vma;
1284 	struct xe_bb *bb;
1285 	u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
1286 	u64 addr;
1287 	int err = 0;
1288 	bool usm = !q && xe->info.has_usm;
1289 	bool first_munmap_rebind = vma &&
1290 		vma->gpuva.flags & XE_VMA_FIRST_REBIND;
1291 	struct xe_exec_queue *q_override = !q ? m->q : q;
1292 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1293 
1294 	/* Use the CPU if no in syncs and engine is idle */
1295 	if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
1296 		fence =  xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
1297 							num_updates,
1298 							first_munmap_rebind,
1299 							pt_update);
1300 		if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
1301 			return fence;
1302 	}
1303 
1304 	/* fixed + PTE entries */
1305 	if (IS_DGFX(xe))
1306 		batch_size = 2;
1307 	else
1308 		batch_size = 6 + num_updates * 2;
1309 
1310 	for (i = 0; i < num_updates; i++) {
1311 		u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
1312 
1313 		/* align noop + MI_STORE_DATA_IMM cmd prefix */
1314 		batch_size += 4 * num_cmds + updates[i].qwords * 2;
1315 	}
1316 
1317 	/*
1318 	 * XXX: Create temp bo to copy from, if batch_size becomes too big?
1319 	 *
1320 	 * Worst case: Sum(2 * (each lower level page size) + (top level page size))
1321 	 * Should be reasonably bound..
1322 	 */
1323 	xe_tile_assert(tile, batch_size < SZ_128K);
1324 
1325 	bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
1326 	if (IS_ERR(bb))
1327 		return ERR_CAST(bb);
1328 
1329 	/* For sysmem PTE's, need to map them in our hole.. */
1330 	if (!IS_DGFX(xe)) {
1331 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1332 		if (q) {
1333 			xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
1334 
1335 			sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
1336 						 GFP_KERNEL, true, 0);
1337 			if (IS_ERR(sa_bo)) {
1338 				err = PTR_ERR(sa_bo);
1339 				goto err;
1340 			}
1341 
1342 			ppgtt_ofs = NUM_KERNEL_PDE +
1343 				(drm_suballoc_soffset(sa_bo) /
1344 				 NUM_VMUSA_UNIT_PER_PAGE);
1345 			page_ofs = (drm_suballoc_soffset(sa_bo) %
1346 				    NUM_VMUSA_UNIT_PER_PAGE) *
1347 				VM_SA_UPDATE_UNIT_SIZE;
1348 		}
1349 
1350 		/* Map our PT's to gtt */
1351 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
1352 		bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1353 		bb->cs[bb->len++] = 0; /* upper_32_bits */
1354 
1355 		for (i = 0; i < num_updates; i++) {
1356 			struct xe_bo *pt_bo = updates[i].pt_bo;
1357 
1358 			xe_tile_assert(tile, pt_bo->size == SZ_4K);
1359 
1360 			addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
1361 			bb->cs[bb->len++] = lower_32_bits(addr);
1362 			bb->cs[bb->len++] = upper_32_bits(addr);
1363 		}
1364 
1365 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1366 		update_idx = bb->len;
1367 
1368 		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1369 			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1370 		for (i = 0; i < num_updates; i++)
1371 			write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
1372 				      &updates[i], pt_update);
1373 	} else {
1374 		/* phys pages, no preamble required */
1375 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1376 		update_idx = bb->len;
1377 
1378 		for (i = 0; i < num_updates; i++)
1379 			write_pgtable(tile, bb, 0, &updates[i], pt_update);
1380 	}
1381 
1382 	if (!q)
1383 		mutex_lock(&m->job_mutex);
1384 
1385 	job = xe_bb_create_migration_job(q ?: m->q, bb,
1386 					 xe_migrate_batch_base(m, usm),
1387 					 update_idx);
1388 	if (IS_ERR(job)) {
1389 		err = PTR_ERR(job);
1390 		goto err_bb;
1391 	}
1392 
1393 	/* Wait on BO move */
1394 	if (bo) {
1395 		err = job_add_deps(job, bo->ttm.base.resv,
1396 				   DMA_RESV_USAGE_KERNEL);
1397 		if (err)
1398 			goto err_job;
1399 	}
1400 
1401 	/*
1402 	 * Munmap style VM unbind, need to wait for all jobs to be complete /
1403 	 * trigger preempts before moving forward
1404 	 */
1405 	if (first_munmap_rebind) {
1406 		err = job_add_deps(job, xe_vm_resv(vm),
1407 				   DMA_RESV_USAGE_BOOKKEEP);
1408 		if (err)
1409 			goto err_job;
1410 	}
1411 
1412 	err = xe_sched_job_last_fence_add_dep(job, vm);
1413 	for (i = 0; !err && i < num_syncs; i++)
1414 		err = xe_sync_entry_add_deps(&syncs[i], job);
1415 
1416 	if (err)
1417 		goto err_job;
1418 
1419 	if (ops->pre_commit) {
1420 		pt_update->job = job;
1421 		err = ops->pre_commit(pt_update);
1422 		if (err)
1423 			goto err_job;
1424 	}
1425 	xe_sched_job_arm(job);
1426 	fence = dma_fence_get(&job->drm.s_fence->finished);
1427 	xe_sched_job_push(job);
1428 
1429 	if (!q)
1430 		mutex_unlock(&m->job_mutex);
1431 
1432 	xe_bb_free(bb, fence);
1433 	drm_suballoc_free(sa_bo, fence);
1434 
1435 	return fence;
1436 
1437 err_job:
1438 	xe_sched_job_put(job);
1439 err_bb:
1440 	if (!q)
1441 		mutex_unlock(&m->job_mutex);
1442 	xe_bb_free(bb, NULL);
1443 err:
1444 	drm_suballoc_free(sa_bo, NULL);
1445 	return ERR_PTR(err);
1446 }
1447 
1448 /**
1449  * xe_migrate_wait() - Complete all operations using the xe_migrate context
1450  * @m: Migrate context to wait for.
1451  *
1452  * Waits until the GPU no longer uses the migrate context's default engine
1453  * or its page-table objects. FIXME: What about separate page-table update
1454  * engines?
1455  */
1456 void xe_migrate_wait(struct xe_migrate *m)
1457 {
1458 	if (m->fence)
1459 		dma_fence_wait(m->fence, false);
1460 }
1461 
1462 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1463 #include "tests/xe_migrate.c"
1464 #endif
1465