xref: /linux/drivers/gpu/drm/xe/xe_migrate.c (revision 47c3ea3359d14ffa4ff94511ae905978d86bb5dd)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "xe_migrate.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_pagemap.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include <generated/xe_wa_oob.h>
17 
18 #include "instructions/xe_gpu_commands.h"
19 #include "instructions/xe_mi_commands.h"
20 #include "regs/xe_gtt_defs.h"
21 #include "tests/xe_test.h"
22 #include "xe_assert.h"
23 #include "xe_bb.h"
24 #include "xe_bo.h"
25 #include "xe_exec_queue.h"
26 #include "xe_ggtt.h"
27 #include "xe_gt.h"
28 #include "xe_hw_engine.h"
29 #include "xe_lrc.h"
30 #include "xe_map.h"
31 #include "xe_mocs.h"
32 #include "xe_pt.h"
33 #include "xe_res_cursor.h"
34 #include "xe_sa.h"
35 #include "xe_sched_job.h"
36 #include "xe_sync.h"
37 #include "xe_trace_bo.h"
38 #include "xe_validation.h"
39 #include "xe_vm.h"
40 #include "xe_vram.h"
41 
42 /**
43  * struct xe_migrate - migrate context.
44  */
45 struct xe_migrate {
46 	/** @q: Default exec queue used for migration */
47 	struct xe_exec_queue *q;
48 	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
49 	struct xe_tile *tile;
50 	/** @job_mutex: Timeline mutex for @eng. */
51 	struct mutex job_mutex;
52 	/** @pt_bo: Page-table buffer object. */
53 	struct xe_bo *pt_bo;
54 	/** @batch_base_ofs: VM offset of the migration batch buffer */
55 	u64 batch_base_ofs;
56 	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
57 	u64 usm_batch_base_ofs;
58 	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
59 	u64 cleared_mem_ofs;
60 	/** @large_page_copy_ofs: VM offset of 2M pages used for large copies */
61 	u64 large_page_copy_ofs;
62 	/**
63 	 * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for
64 	 * large copies
65 	 */
66 	u64 large_page_copy_pdes;
67 	/**
68 	 * @fence: dma-fence representing the last migration job batch.
69 	 * Protected by @job_mutex.
70 	 */
71 	struct dma_fence *fence;
72 	/**
73 	 * @vm_update_sa: For integrated, used to suballocate page-tables
74 	 * out of the pt_bo.
75 	 */
76 	struct drm_suballoc_manager vm_update_sa;
77 	/** @min_chunk_size: For dgfx, Minimum chunk size */
78 	u64 min_chunk_size;
79 };
80 
81 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
82 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
83 #define NUM_KERNEL_PDE 15
84 #define NUM_PT_SLOTS 32
85 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
86 #define MAX_NUM_PTE 512
87 #define IDENTITY_OFFSET 256ULL
88 
89 /*
90  * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
91  * legal value accepted.  Since that instruction field is always stored in
92  * (val-2) format, this translates to 0x400 dwords for the true maximum length
93  * of the instruction.  Subtracting the instruction header (1 dword) and
94  * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
95  */
96 #define MAX_PTE_PER_SDI 0x1FEU
97 
98 static void xe_migrate_fini(void *arg)
99 {
100 	struct xe_migrate *m = arg;
101 
102 	xe_vm_lock(m->q->vm, false);
103 	xe_bo_unpin(m->pt_bo);
104 	xe_vm_unlock(m->q->vm);
105 
106 	dma_fence_put(m->fence);
107 	xe_bo_put(m->pt_bo);
108 	drm_suballoc_manager_fini(&m->vm_update_sa);
109 	mutex_destroy(&m->job_mutex);
110 	xe_vm_close_and_put(m->q->vm);
111 	xe_exec_queue_put(m->q);
112 }
113 
114 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
115 {
116 	XE_WARN_ON(slot >= NUM_PT_SLOTS);
117 
118 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
119 	return (slot + 1ULL) << xe_pt_shift(level + 1);
120 }
121 
122 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
123 {
124 	/*
125 	 * Remove the DPA to get a correct offset into identity table for the
126 	 * migrate offset
127 	 */
128 	u64 identity_offset = IDENTITY_OFFSET;
129 
130 	if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
131 		identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
132 							(xe->mem.vram), SZ_1G);
133 
134 	addr -= xe_vram_region_dpa_base(xe->mem.vram);
135 	return addr + (identity_offset << xe_pt_shift(2));
136 }
137 
138 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
139 					u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
140 {
141 	struct xe_vram_region *vram = xe->mem.vram;
142 	resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
143 	u64 pos, ofs, flags;
144 	u64 entry;
145 	/* XXX: Unclear if this should be usable_size? */
146 	u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
147 	u32 level = 2;
148 
149 	ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
150 	flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
151 					    true, 0);
152 
153 	xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
154 
155 	/*
156 	 * Use 1GB pages when possible, last chunk always use 2M
157 	 * pages as mixing reserved memory (stolen, WOCPM) with a single
158 	 * mapping is not allowed on certain platforms.
159 	 */
160 	for (pos = dpa_base; pos < vram_limit;
161 	     pos += SZ_1G, ofs += 8) {
162 		if (pos + SZ_1G >= vram_limit) {
163 			entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
164 			xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
165 
166 			flags = vm->pt_ops->pte_encode_addr(xe, 0,
167 							    pat_index,
168 							    level - 1,
169 							    true, 0);
170 
171 			for (ofs = pt_2m_ofs; pos < vram_limit;
172 			     pos += SZ_2M, ofs += 8)
173 				xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
174 			break;	/* Ensure pos == vram_limit assert correct */
175 		}
176 
177 		xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
178 	}
179 
180 	xe_assert(xe, pos == vram_limit);
181 }
182 
183 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
184 				 struct xe_vm *vm, struct drm_exec *exec)
185 {
186 	struct xe_device *xe = tile_to_xe(tile);
187 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
188 	u8 id = tile->id;
189 	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
190 #define VRAM_IDENTITY_MAP_COUNT	2
191 	u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
192 #undef VRAM_IDENTITY_MAP_COUNT
193 	u32 map_ofs, level, i;
194 	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
195 	u64 entry, pt29_ofs;
196 
197 	/* Can't bump NUM_PT_SLOTS too high */
198 	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
199 	/* Must be a multiple of 64K to support all platforms */
200 	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
201 	/* And one slot reserved for the 4KiB page table updates */
202 	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
203 
204 	/* Need to be sure everything fits in the first PT, or create more */
205 	xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
206 
207 	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
208 				  num_entries * XE_PAGE_SIZE,
209 				  ttm_bo_type_kernel,
210 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
211 				  XE_BO_FLAG_PAGETABLE, exec);
212 	if (IS_ERR(bo))
213 		return PTR_ERR(bo);
214 
215 	/* PT30 & PT31 reserved for 2M identity map */
216 	pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
217 	entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
218 	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
219 
220 	map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
221 
222 	/* Map the entire BO in our level 0 pt */
223 	for (i = 0, level = 0; i < num_entries; level++) {
224 		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
225 						  pat_index, 0);
226 
227 		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
228 
229 		if (vm->flags & XE_VM_FLAG_64K)
230 			i += 16;
231 		else
232 			i += 1;
233 	}
234 
235 	if (!IS_DGFX(xe)) {
236 		/* Write out batch too */
237 		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
238 		for (i = 0; i < xe_bo_size(batch);
239 		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
240 		     XE_PAGE_SIZE) {
241 			entry = vm->pt_ops->pte_encode_bo(batch, i,
242 							  pat_index, 0);
243 
244 			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
245 				  entry);
246 			level++;
247 		}
248 		if (xe->info.has_usm) {
249 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
250 
251 			batch = tile->primary_gt->usm.bb_pool->bo;
252 			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
253 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
254 
255 			for (i = 0; i < xe_bo_size(batch);
256 			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
257 			     XE_PAGE_SIZE) {
258 				entry = vm->pt_ops->pte_encode_bo(batch, i,
259 								  pat_index, 0);
260 
261 				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
262 					  entry);
263 				level++;
264 			}
265 		}
266 	} else {
267 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
268 
269 		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
270 
271 		if (xe->info.has_usm) {
272 			batch = tile->primary_gt->usm.bb_pool->bo;
273 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
274 			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
275 		}
276 	}
277 
278 	for (level = 1; level < num_level; level++) {
279 		u32 flags = 0;
280 
281 		if (vm->flags & XE_VM_FLAG_64K && level == 1)
282 			flags = XE_PDE_64K;
283 
284 		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
285 						  XE_PAGE_SIZE);
286 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
287 			  entry | flags);
288 	}
289 
290 	/* Write PDE's that point to our BO. */
291 	for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
292 		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
293 
294 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
295 			  (i + 1) * 8, u64, entry);
296 	}
297 
298 	/* Reserve 2M PDEs */
299 	level = 1;
300 	m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level);
301 	m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level +
302 		NUM_PT_SLOTS * 8;
303 
304 	/* Set up a 1GiB NULL mapping at 255GiB offset. */
305 	level = 2;
306 	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
307 		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
308 		  | XE_PTE_NULL);
309 	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
310 
311 	/* Identity map the entire vram at 256GiB offset */
312 	if (IS_DGFX(xe)) {
313 		u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
314 		resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
315 
316 		xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
317 					    pat_index, pt30_ofs);
318 		xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
319 
320 		/*
321 		 * Identity map the entire vram for compressed pat_index for xe2+
322 		 * if flat ccs is enabled.
323 		 */
324 		if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
325 			u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
326 			u64 vram_offset = IDENTITY_OFFSET +
327 				DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
328 			u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
329 
330 			xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
331 							  IDENTITY_OFFSET / 2) * SZ_1G);
332 			xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
333 						    comp_pat_index, pt31_ofs);
334 		}
335 	}
336 
337 	/*
338 	 * Example layout created above, with root level = 3:
339 	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
340 	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
341 	 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
342 	 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
343 	 *
344 	 * This makes the lowest part of the VM point to the pagetables.
345 	 * Hence the lowest 2M in the vm should point to itself, with a few writes
346 	 * and flushes, other parts of the VM can be used either for copying and
347 	 * clearing.
348 	 *
349 	 * For performance, the kernel reserves PDE's, so about 20 are left
350 	 * for async VM updates.
351 	 *
352 	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
353 	 * everywhere, this allows lockless updates to scratch pages by using
354 	 * the different addresses in VM.
355 	 */
356 #define NUM_VMUSA_UNIT_PER_PAGE	32
357 #define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
358 #define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
359 	drm_suballoc_manager_init(&m->vm_update_sa,
360 				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
361 				  NUM_VMUSA_UNIT_PER_PAGE, 0);
362 
363 	m->pt_bo = bo;
364 	return 0;
365 }
366 
367 /*
368  * Including the reserved copy engine is required to avoid deadlocks due to
369  * migrate jobs servicing the faults gets stuck behind the job that faulted.
370  */
371 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
372 {
373 	u32 logical_mask = 0;
374 	struct xe_hw_engine *hwe;
375 	enum xe_hw_engine_id id;
376 
377 	for_each_hw_engine(hwe, gt, id) {
378 		if (hwe->class != XE_ENGINE_CLASS_COPY)
379 			continue;
380 
381 		if (xe_gt_is_usm_hwe(gt, hwe))
382 			logical_mask |= BIT(hwe->logical_instance);
383 	}
384 
385 	return logical_mask;
386 }
387 
388 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
389 {
390 	return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
391 }
392 
393 /**
394  * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
395  * @tile: &xe_tile
396  *
397  * Allocates a &xe_migrate for a given tile.
398  *
399  * Return: &xe_migrate on success, or NULL when out of memory.
400  */
401 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
402 {
403 	struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
404 
405 	if (m)
406 		m->tile = tile;
407 	return m;
408 }
409 
410 static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
411 {
412 	struct xe_device *xe = tile_to_xe(tile);
413 	struct xe_validation_ctx ctx;
414 	struct drm_exec exec;
415 	int err = 0;
416 
417 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
418 		err = xe_vm_drm_exec_lock(vm, &exec);
419 		drm_exec_retry_on_contention(&exec);
420 		err = xe_migrate_prepare_vm(tile, m, vm, &exec);
421 		drm_exec_retry_on_contention(&exec);
422 		xe_validation_retry_on_oom(&ctx, &err);
423 	}
424 
425 	return err;
426 }
427 
428 /**
429  * xe_migrate_init() - Initialize a migrate context
430  * @m: The migration context
431  *
432  * Return: 0 if successful, negative error code on failure
433  */
434 int xe_migrate_init(struct xe_migrate *m)
435 {
436 	struct xe_tile *tile = m->tile;
437 	struct xe_gt *primary_gt = tile->primary_gt;
438 	struct xe_device *xe = tile_to_xe(tile);
439 	struct xe_vm *vm;
440 	int err;
441 
442 	/* Special layout, prepared below.. */
443 	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
444 			  XE_VM_FLAG_SET_TILE_ID(tile), NULL);
445 	if (IS_ERR(vm))
446 		return PTR_ERR(vm);
447 
448 	err = xe_migrate_lock_prepare_vm(tile, m, vm);
449 	if (err)
450 		goto err_out;
451 
452 	if (xe->info.has_usm) {
453 		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
454 							   XE_ENGINE_CLASS_COPY,
455 							   primary_gt->usm.reserved_bcs_instance,
456 							   false);
457 		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
458 
459 		if (!hwe || !logical_mask) {
460 			err = -EINVAL;
461 			goto err_out;
462 		}
463 
464 		/*
465 		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
466 		 * PVC, may want to revisit if performance is needed.
467 		 */
468 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
469 					    EXEC_QUEUE_FLAG_KERNEL |
470 					    EXEC_QUEUE_FLAG_PERMANENT |
471 					    EXEC_QUEUE_FLAG_HIGH_PRIORITY |
472 					    EXEC_QUEUE_FLAG_MIGRATE, 0);
473 	} else {
474 		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
475 						  XE_ENGINE_CLASS_COPY,
476 						  EXEC_QUEUE_FLAG_KERNEL |
477 						  EXEC_QUEUE_FLAG_PERMANENT |
478 						  EXEC_QUEUE_FLAG_MIGRATE, 0);
479 	}
480 	if (IS_ERR(m->q)) {
481 		err = PTR_ERR(m->q);
482 		goto err_out;
483 	}
484 
485 	mutex_init(&m->job_mutex);
486 	fs_reclaim_acquire(GFP_KERNEL);
487 	might_lock(&m->job_mutex);
488 	fs_reclaim_release(GFP_KERNEL);
489 
490 	err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
491 	if (err)
492 		return err;
493 
494 	if (IS_DGFX(xe)) {
495 		if (xe_migrate_needs_ccs_emit(xe))
496 			/* min chunk size corresponds to 4K of CCS Metadata */
497 			m->min_chunk_size = SZ_4K * SZ_64K /
498 				xe_device_ccs_bytes(xe, SZ_64K);
499 		else
500 			/* Somewhat arbitrary to avoid a huge amount of blits */
501 			m->min_chunk_size = SZ_64K;
502 		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
503 		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
504 			(unsigned long long)m->min_chunk_size);
505 	}
506 
507 	return err;
508 
509 err_out:
510 	xe_vm_close_and_put(vm);
511 	return err;
512 
513 }
514 
515 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
516 {
517 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
518 		return MAX_CCS_LIMITED_TRANSFER;
519 
520 	return MAX_PREEMPTDISABLE_TRANSFER;
521 }
522 
523 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
524 {
525 	struct xe_device *xe = tile_to_xe(m->tile);
526 	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
527 
528 	if (mem_type_is_vram(cur->mem_type)) {
529 		/*
530 		 * VRAM we want to blit in chunks with sizes aligned to
531 		 * min_chunk_size in order for the offset to CCS metadata to be
532 		 * page-aligned. If it's the last chunk it may be smaller.
533 		 *
534 		 * Another constraint is that we need to limit the blit to
535 		 * the VRAM block size, unless size is smaller than
536 		 * min_chunk_size.
537 		 */
538 		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
539 
540 		size = min_t(u64, size, chunk);
541 		if (size > m->min_chunk_size)
542 			size = round_down(size, m->min_chunk_size);
543 	}
544 
545 	return size;
546 }
547 
548 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
549 {
550 	/* If the chunk is not fragmented, allow identity map. */
551 	return cur->size >= size;
552 }
553 
554 #define PTE_UPDATE_FLAG_IS_VRAM		BIT(0)
555 #define PTE_UPDATE_FLAG_IS_COMP_PTE	BIT(1)
556 
557 static u32 pte_update_size(struct xe_migrate *m,
558 			   u32 flags,
559 			   struct ttm_resource *res,
560 			   struct xe_res_cursor *cur,
561 			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
562 			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
563 {
564 	u32 cmds = 0;
565 	bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
566 	bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
567 
568 	*L0_pt = pt_ofs;
569 	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
570 		/* Offset into identity map. */
571 		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
572 					      cur->start + vram_region_gpu_offset(res),
573 					      is_comp_pte);
574 		cmds += cmd_size;
575 	} else {
576 		/* Clip L0 to available size */
577 		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
578 		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
579 
580 		*L0 = size;
581 		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
582 
583 		/* MI_STORE_DATA_IMM */
584 		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
585 
586 		/* PDE qwords */
587 		cmds += num_4k_pages * 2;
588 
589 		/* Each chunk has a single blit command */
590 		cmds += cmd_size;
591 	}
592 
593 	return cmds;
594 }
595 
596 static void emit_pte(struct xe_migrate *m,
597 		     struct xe_bb *bb, u32 at_pt,
598 		     bool is_vram, bool is_comp_pte,
599 		     struct xe_res_cursor *cur,
600 		     u32 size, struct ttm_resource *res)
601 {
602 	struct xe_device *xe = tile_to_xe(m->tile);
603 	struct xe_vm *vm = m->q->vm;
604 	u16 pat_index;
605 	u32 ptes;
606 	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
607 	u64 cur_ofs;
608 
609 	/* Indirect access needs compression enabled uncached PAT index */
610 	if (GRAPHICS_VERx100(xe) >= 2000)
611 		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
612 					  xe->pat.idx[XE_CACHE_WB];
613 	else
614 		pat_index = xe->pat.idx[XE_CACHE_WB];
615 
616 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
617 
618 	while (ptes) {
619 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
620 
621 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
622 		bb->cs[bb->len++] = ofs;
623 		bb->cs[bb->len++] = 0;
624 
625 		cur_ofs = ofs;
626 		ofs += chunk * 8;
627 		ptes -= chunk;
628 
629 		while (chunk--) {
630 			u64 addr, flags = 0;
631 			bool devmem = false;
632 
633 			addr = xe_res_dma(cur) & PAGE_MASK;
634 			if (is_vram) {
635 				if (vm->flags & XE_VM_FLAG_64K) {
636 					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
637 
638 					xe_assert(xe, (va & (SZ_64K - 1)) ==
639 						  (addr & (SZ_64K - 1)));
640 
641 					flags |= XE_PTE_PS64;
642 				}
643 
644 				addr += vram_region_gpu_offset(res);
645 				devmem = true;
646 			}
647 
648 			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
649 							   addr, pat_index,
650 							   0, devmem, flags);
651 			bb->cs[bb->len++] = lower_32_bits(addr);
652 			bb->cs[bb->len++] = upper_32_bits(addr);
653 
654 			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
655 			cur_ofs += 8;
656 		}
657 	}
658 }
659 
660 #define EMIT_COPY_CCS_DW 5
661 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
662 			  u64 dst_ofs, bool dst_is_indirect,
663 			  u64 src_ofs, bool src_is_indirect,
664 			  u32 size)
665 {
666 	struct xe_device *xe = gt_to_xe(gt);
667 	u32 *cs = bb->cs + bb->len;
668 	u32 num_ccs_blks;
669 	u32 num_pages;
670 	u32 ccs_copy_size;
671 	u32 mocs;
672 
673 	if (GRAPHICS_VERx100(xe) >= 2000) {
674 		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
675 		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
676 
677 		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
678 		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
679 
680 	} else {
681 		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
682 					    NUM_CCS_BYTES_PER_BLOCK);
683 		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
684 
685 		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
686 		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
687 	}
688 
689 	*cs++ = XY_CTRL_SURF_COPY_BLT |
690 		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
691 		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
692 		ccs_copy_size;
693 	*cs++ = lower_32_bits(src_ofs);
694 	*cs++ = upper_32_bits(src_ofs) | mocs;
695 	*cs++ = lower_32_bits(dst_ofs);
696 	*cs++ = upper_32_bits(dst_ofs) | mocs;
697 
698 	bb->len = cs - bb->cs;
699 }
700 
701 #define EMIT_COPY_DW 10
702 static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
703 			      u64 dst_ofs, unsigned int size,
704 			      unsigned int pitch)
705 {
706 	struct xe_device *xe = gt_to_xe(gt);
707 	u32 mocs = 0;
708 	u32 tile_y = 0;
709 
710 	xe_gt_assert(gt, !(pitch & 3));
711 	xe_gt_assert(gt, size / pitch <= S16_MAX);
712 	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
713 	xe_gt_assert(gt, pitch <= U16_MAX);
714 
715 	if (GRAPHICS_VER(xe) >= 20)
716 		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
717 
718 	if (GRAPHICS_VERx100(xe) >= 1250)
719 		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
720 
721 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
722 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
723 	bb->cs[bb->len++] = 0;
724 	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
725 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
726 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
727 	bb->cs[bb->len++] = 0;
728 	bb->cs[bb->len++] = pitch | mocs;
729 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
730 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
731 }
732 
733 #define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */
734 static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
735 			  u64 dst_ofs, unsigned int size, unsigned int pitch)
736 {
737 	u32 mode, copy_type, width;
738 
739 	xe_gt_assert(gt, IS_ALIGNED(size, pitch));
740 	xe_gt_assert(gt, pitch <= U16_MAX);
741 	xe_gt_assert(gt, pitch);
742 	xe_gt_assert(gt, size);
743 
744 	if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) &&
745 	    IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) &&
746 	    IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) {
747 		mode = MEM_COPY_PAGE_COPY_MODE;
748 		copy_type = 0; /* linear copy */
749 		width = size / PAGE_COPY_MODE_PS;
750 	} else if (pitch > 1) {
751 		xe_gt_assert(gt, size / pitch <= U16_MAX);
752 		mode = 0; /* BYTE_COPY */
753 		copy_type = MEM_COPY_MATRIX_COPY;
754 		width = pitch;
755 	} else {
756 		mode = 0; /* BYTE_COPY */
757 		copy_type = 0; /* linear copy */
758 		width = size;
759 	}
760 
761 	xe_gt_assert(gt, width <= U16_MAX);
762 
763 	bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
764 	bb->cs[bb->len++] = width - 1;
765 	bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
766 	bb->cs[bb->len++] = pitch - 1;
767 	bb->cs[bb->len++] = pitch - 1;
768 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
769 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
770 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
771 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
772 	bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
773 			    FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index);
774 }
775 
776 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
777 		      u64 src_ofs, u64 dst_ofs, unsigned int size,
778 		      unsigned int pitch)
779 {
780 	struct xe_device *xe = gt_to_xe(gt);
781 
782 	if (xe->info.has_mem_copy_instr)
783 		emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
784 	else
785 		emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
786 }
787 
788 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
789 {
790 	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
791 }
792 
793 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
794 			       struct xe_bb *bb,
795 			       u64 src_ofs, bool src_is_indirect,
796 			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
797 			       u64 ccs_ofs, bool copy_ccs)
798 {
799 	struct xe_gt *gt = m->tile->primary_gt;
800 	u32 flush_flags = 0;
801 
802 	if (!copy_ccs && dst_is_indirect) {
803 		/*
804 		 * If the src is already in vram, then it should already
805 		 * have been cleared by us, or has been populated by the
806 		 * user. Make sure we copy the CCS aux state as-is.
807 		 *
808 		 * Otherwise if the bo doesn't have any CCS metadata attached,
809 		 * we still need to clear it for security reasons.
810 		 */
811 		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
812 
813 		emit_copy_ccs(gt, bb,
814 			      dst_ofs, true,
815 			      ccs_src_ofs, src_is_indirect, dst_size);
816 
817 		flush_flags = MI_FLUSH_DW_CCS;
818 	} else if (copy_ccs) {
819 		if (!src_is_indirect)
820 			src_ofs = ccs_ofs;
821 		else if (!dst_is_indirect)
822 			dst_ofs = ccs_ofs;
823 
824 		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
825 
826 		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
827 			      src_is_indirect, dst_size);
828 		if (dst_is_indirect)
829 			flush_flags = MI_FLUSH_DW_CCS;
830 	}
831 
832 	return flush_flags;
833 }
834 
835 /**
836  * xe_migrate_copy() - Copy content of TTM resources.
837  * @m: The migration context.
838  * @src_bo: The buffer object @src is currently bound to.
839  * @dst_bo: If copying between resources created for the same bo, set this to
840  * the same value as @src_bo. If copying between buffer objects, set it to
841  * the buffer object @dst is currently bound to.
842  * @src: The source TTM resource.
843  * @dst: The dst TTM resource.
844  * @copy_only_ccs: If true copy only CCS metadata
845  *
846  * Copies the contents of @src to @dst: On flat CCS devices,
847  * the CCS metadata is copied as well if needed, or if not present,
848  * the CCS metadata of @dst is cleared for security reasons.
849  *
850  * Return: Pointer to a dma_fence representing the last copy batch, or
851  * an error pointer on failure. If there is a failure, any copy operation
852  * started by the function call has been synced.
853  */
854 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
855 				  struct xe_bo *src_bo,
856 				  struct xe_bo *dst_bo,
857 				  struct ttm_resource *src,
858 				  struct ttm_resource *dst,
859 				  bool copy_only_ccs)
860 {
861 	struct xe_gt *gt = m->tile->primary_gt;
862 	struct xe_device *xe = gt_to_xe(gt);
863 	struct dma_fence *fence = NULL;
864 	u64 size = xe_bo_size(src_bo);
865 	struct xe_res_cursor src_it, dst_it, ccs_it;
866 	u64 src_L0_ofs, dst_L0_ofs;
867 	u32 src_L0_pt, dst_L0_pt;
868 	u64 src_L0, dst_L0;
869 	int pass = 0;
870 	int err;
871 	bool src_is_pltt = src->mem_type == XE_PL_TT;
872 	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
873 	bool src_is_vram = mem_type_is_vram(src->mem_type);
874 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
875 	bool type_device = src_bo->ttm.type == ttm_bo_type_device;
876 	bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
877 	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
878 		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
879 	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
880 	bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
881 		GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
882 
883 	/* Copying CCS between two different BOs is not supported yet. */
884 	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
885 		return ERR_PTR(-EINVAL);
886 
887 	if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
888 		return ERR_PTR(-EINVAL);
889 
890 	if (!src_is_vram)
891 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
892 	else
893 		xe_res_first(src, 0, size, &src_it);
894 	if (!dst_is_vram)
895 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
896 	else
897 		xe_res_first(dst, 0, size, &dst_it);
898 
899 	if (copy_system_ccs)
900 		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
901 				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
902 				&ccs_it);
903 
904 	while (size) {
905 		u32 batch_size = 1; /* MI_BATCH_BUFFER_END */
906 		struct xe_sched_job *job;
907 		struct xe_bb *bb;
908 		u32 flush_flags = 0;
909 		u32 update_idx;
910 		u64 ccs_ofs, ccs_size;
911 		u32 ccs_pt;
912 		u32 pte_flags;
913 
914 		bool usm = xe->info.has_usm;
915 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
916 
917 		src_L0 = xe_migrate_res_sizes(m, &src_it);
918 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
919 
920 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
921 			pass++, src_L0, dst_L0);
922 
923 		src_L0 = min(src_L0, dst_L0);
924 
925 		pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
926 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
927 		batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
928 					      &src_L0_ofs, &src_L0_pt, 0, 0,
929 					      avail_pts);
930 		if (copy_only_ccs) {
931 			dst_L0_ofs = src_L0_ofs;
932 		} else {
933 			pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
934 			batch_size += pte_update_size(m, pte_flags, dst,
935 						      &dst_it, &src_L0,
936 						      &dst_L0_ofs, &dst_L0_pt,
937 						      0, avail_pts, avail_pts);
938 		}
939 
940 		if (copy_system_ccs) {
941 			xe_assert(xe, type_device);
942 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
943 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
944 						      &ccs_ofs, &ccs_pt, 0,
945 						      2 * avail_pts,
946 						      avail_pts);
947 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
948 		}
949 
950 		/* Add copy commands size here */
951 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
952 			((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
953 
954 		bb = xe_bb_new(gt, batch_size, usm);
955 		if (IS_ERR(bb)) {
956 			err = PTR_ERR(bb);
957 			goto err_sync;
958 		}
959 
960 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
961 			xe_res_next(&src_it, src_L0);
962 		else
963 			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
964 				 &src_it, src_L0, src);
965 
966 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
967 			xe_res_next(&dst_it, src_L0);
968 		else if (!copy_only_ccs)
969 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
970 				 &dst_it, src_L0, dst);
971 
972 		if (copy_system_ccs)
973 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
974 
975 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
976 		update_idx = bb->len;
977 
978 		if (!copy_only_ccs)
979 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
980 
981 		if (needs_ccs_emit)
982 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
983 							  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
984 							  dst_L0_ofs,
985 							  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
986 							  src_L0, ccs_ofs, copy_ccs);
987 
988 		job = xe_bb_create_migration_job(m->q, bb,
989 						 xe_migrate_batch_base(m, usm),
990 						 update_idx);
991 		if (IS_ERR(job)) {
992 			err = PTR_ERR(job);
993 			goto err;
994 		}
995 
996 		xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
997 		if (!fence) {
998 			err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
999 						    DMA_RESV_USAGE_BOOKKEEP);
1000 			if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
1001 				err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
1002 							    DMA_RESV_USAGE_BOOKKEEP);
1003 			if (err)
1004 				goto err_job;
1005 		}
1006 
1007 		mutex_lock(&m->job_mutex);
1008 		xe_sched_job_arm(job);
1009 		dma_fence_put(fence);
1010 		fence = dma_fence_get(&job->drm.s_fence->finished);
1011 		xe_sched_job_push(job);
1012 
1013 		dma_fence_put(m->fence);
1014 		m->fence = dma_fence_get(fence);
1015 
1016 		mutex_unlock(&m->job_mutex);
1017 
1018 		xe_bb_free(bb, fence);
1019 		size -= src_L0;
1020 		continue;
1021 
1022 err_job:
1023 		xe_sched_job_put(job);
1024 err:
1025 		xe_bb_free(bb, NULL);
1026 
1027 err_sync:
1028 		/* Sync partial copy if any. FIXME: under job_mutex? */
1029 		if (fence) {
1030 			dma_fence_wait(fence, false);
1031 			dma_fence_put(fence);
1032 		}
1033 
1034 		return ERR_PTR(err);
1035 	}
1036 
1037 	return fence;
1038 }
1039 
1040 /**
1041  * xe_migrate_lrc() - Get the LRC from migrate context.
1042  * @migrate: Migrate context.
1043  *
1044  * Return: Pointer to LRC on success, error on failure
1045  */
1046 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
1047 {
1048 	return migrate->q->lrc[0];
1049 }
1050 
1051 static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
1052 {
1053 	/*
1054 	 * The migrate VM is self-referential so it can modify its own PTEs (see
1055 	 * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
1056 	 * entries for kernel operations (copies, clears, CCS migrate), and
1057 	 * suballocate the rest to user operations (binds/unbinds). With
1058 	 * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
1059 	 * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
1060 	 */
1061 	return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
1062 }
1063 
1064 static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
1065 {
1066 	u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
1067 
1068 	dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
1069 		  MI_FLUSH_IMM_DW | flags;
1070 	dw[i++] = lower_32_bits(addr);
1071 	dw[i++] = upper_32_bits(addr);
1072 	dw[i++] = MI_NOOP;
1073 	dw[i++] = MI_NOOP;
1074 
1075 	return i;
1076 }
1077 
1078 /**
1079  * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
1080  * @tile: Tile whose migration context to be used.
1081  * @q : Execution to be used along with migration context.
1082  * @src_bo: The buffer object @src is currently bound to.
1083  * @read_write : Creates BB commands for CCS read/write.
1084  *
1085  * Creates batch buffer instructions to copy CCS metadata from CCS pool to
1086  * memory and vice versa.
1087  *
1088  * This function should only be called for IGPU.
1089  *
1090  * Return: 0 if successful, negative error code on failure.
1091  */
1092 int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
1093 			   struct xe_bo *src_bo,
1094 			   enum xe_sriov_vf_ccs_rw_ctxs read_write)
1095 
1096 {
1097 	bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
1098 	bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
1099 	struct ttm_resource *src = src_bo->ttm.resource;
1100 	struct xe_migrate *m = tile->migrate;
1101 	struct xe_gt *gt = tile->primary_gt;
1102 	u32 batch_size, batch_size_allocated;
1103 	struct xe_device *xe = gt_to_xe(gt);
1104 	struct xe_res_cursor src_it, ccs_it;
1105 	u64 size = xe_bo_size(src_bo);
1106 	struct xe_bb *bb = NULL;
1107 	u64 src_L0, src_L0_ofs;
1108 	u32 src_L0_pt;
1109 	int err;
1110 
1111 	xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
1112 
1113 	xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
1114 			PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
1115 			&ccs_it);
1116 
1117 	/* Calculate Batch buffer size */
1118 	batch_size = 0;
1119 	while (size) {
1120 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1121 		u64 ccs_ofs, ccs_size;
1122 		u32 ccs_pt;
1123 
1124 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1125 
1126 		src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
1127 
1128 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1129 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1130 					      avail_pts);
1131 
1132 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1133 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1134 					      &ccs_pt, 0, avail_pts, avail_pts);
1135 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1136 
1137 		/* Add copy commands size here */
1138 		batch_size += EMIT_COPY_CCS_DW;
1139 
1140 		size -= src_L0;
1141 	}
1142 
1143 	bb = xe_bb_ccs_new(gt, batch_size, read_write);
1144 	if (IS_ERR(bb)) {
1145 		drm_err(&xe->drm, "BB allocation failed.\n");
1146 		err = PTR_ERR(bb);
1147 		goto err_ret;
1148 	}
1149 
1150 	batch_size_allocated = batch_size;
1151 	size = xe_bo_size(src_bo);
1152 	batch_size = 0;
1153 
1154 	/*
1155 	 * Emit PTE and copy commands here.
1156 	 * The CCS copy command can only support limited size. If the size to be
1157 	 * copied is more than the limit, divide copy into chunks. So, calculate
1158 	 * sizes here again before copy command is emitted.
1159 	 */
1160 	while (size) {
1161 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1162 		u32 flush_flags = 0;
1163 		u64 ccs_ofs, ccs_size;
1164 		u32 ccs_pt;
1165 
1166 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1167 
1168 		src_L0 = xe_migrate_res_sizes(m, &src_it);
1169 
1170 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1171 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1172 					      avail_pts);
1173 
1174 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1175 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1176 					      &ccs_pt, 0, avail_pts, avail_pts);
1177 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1178 		batch_size += EMIT_COPY_CCS_DW;
1179 
1180 		emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
1181 
1182 		emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
1183 
1184 		bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1185 		flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
1186 						  src_L0_ofs, dst_is_pltt,
1187 						  src_L0, ccs_ofs, true);
1188 		bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
1189 
1190 		size -= src_L0;
1191 	}
1192 
1193 	xe_assert(xe, (batch_size_allocated == bb->len));
1194 	src_bo->bb_ccs[read_write] = bb;
1195 
1196 	return 0;
1197 
1198 err_ret:
1199 	return err;
1200 }
1201 
1202 /**
1203  * xe_get_migrate_exec_queue() - Get the execution queue from migrate context.
1204  * @migrate: Migrate context.
1205  *
1206  * Return: Pointer to execution queue on success, error on failure
1207  */
1208 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
1209 {
1210 	return migrate->q;
1211 }
1212 
1213 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1214 				 u32 size, u32 pitch)
1215 {
1216 	struct xe_device *xe = gt_to_xe(gt);
1217 	u32 *cs = bb->cs + bb->len;
1218 	u32 len = PVC_MEM_SET_CMD_LEN_DW;
1219 
1220 	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
1221 	*cs++ = pitch - 1;
1222 	*cs++ = (size / pitch) - 1;
1223 	*cs++ = pitch - 1;
1224 	*cs++ = lower_32_bits(src_ofs);
1225 	*cs++ = upper_32_bits(src_ofs);
1226 	if (GRAPHICS_VERx100(xe) >= 2000)
1227 		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1228 	else
1229 		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1230 
1231 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1232 
1233 	bb->len += len;
1234 }
1235 
1236 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
1237 				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
1238 {
1239 	struct xe_device *xe = gt_to_xe(gt);
1240 	u32 *cs = bb->cs + bb->len;
1241 	u32 len = XY_FAST_COLOR_BLT_DW;
1242 
1243 	if (GRAPHICS_VERx100(xe) < 1250)
1244 		len = 11;
1245 
1246 	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
1247 		(len - 2);
1248 	if (GRAPHICS_VERx100(xe) >= 2000)
1249 		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
1250 			(pitch - 1);
1251 	else
1252 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
1253 			(pitch - 1);
1254 	*cs++ = 0;
1255 	*cs++ = (size / pitch) << 16 | pitch / 4;
1256 	*cs++ = lower_32_bits(src_ofs);
1257 	*cs++ = upper_32_bits(src_ofs);
1258 	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
1259 	*cs++ = 0;
1260 	*cs++ = 0;
1261 	*cs++ = 0;
1262 	*cs++ = 0;
1263 
1264 	if (len > 11) {
1265 		*cs++ = 0;
1266 		*cs++ = 0;
1267 		*cs++ = 0;
1268 		*cs++ = 0;
1269 		*cs++ = 0;
1270 	}
1271 
1272 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1273 
1274 	bb->len += len;
1275 }
1276 
1277 static bool has_service_copy_support(struct xe_gt *gt)
1278 {
1279 	/*
1280 	 * What we care about is whether the architecture was designed with
1281 	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
1282 	 * instructions) so check the architectural engine list rather than the
1283 	 * actual list since these instructions are usable on BCS0 even if
1284 	 * all of the actual service copy engines (BCS1-BCS8) have been fused
1285 	 * off.
1286 	 */
1287 	return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
1288 					      XE_HW_ENGINE_BCS1);
1289 }
1290 
1291 static u32 emit_clear_cmd_len(struct xe_gt *gt)
1292 {
1293 	if (has_service_copy_support(gt))
1294 		return PVC_MEM_SET_CMD_LEN_DW;
1295 	else
1296 		return XY_FAST_COLOR_BLT_DW;
1297 }
1298 
1299 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1300 		       u32 size, u32 pitch, bool is_vram)
1301 {
1302 	if (has_service_copy_support(gt))
1303 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1304 	else
1305 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1306 				     is_vram);
1307 }
1308 
1309 /**
1310  * xe_migrate_clear() - Copy content of TTM resources.
1311  * @m: The migration context.
1312  * @bo: The buffer object @dst is currently bound to.
1313  * @dst: The dst TTM resource to be cleared.
1314  * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1315  *
1316  * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1317  * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1318  * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1319  * TODO: Eliminate the @bo argument.
1320  *
1321  * Return: Pointer to a dma_fence representing the last clear batch, or
1322  * an error pointer on failure. If there is a failure, any clear operation
1323  * started by the function call has been synced.
1324  */
1325 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1326 				   struct xe_bo *bo,
1327 				   struct ttm_resource *dst,
1328 				   u32 clear_flags)
1329 {
1330 	bool clear_vram = mem_type_is_vram(dst->mem_type);
1331 	bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1332 	bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1333 	struct xe_gt *gt = m->tile->primary_gt;
1334 	struct xe_device *xe = gt_to_xe(gt);
1335 	bool clear_only_system_ccs = false;
1336 	struct dma_fence *fence = NULL;
1337 	u64 size = xe_bo_size(bo);
1338 	struct xe_res_cursor src_it;
1339 	struct ttm_resource *src = dst;
1340 	int err;
1341 
1342 	if (WARN_ON(!clear_bo_data && !clear_ccs))
1343 		return NULL;
1344 
1345 	if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1346 		clear_only_system_ccs = true;
1347 
1348 	if (!clear_vram)
1349 		xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
1350 	else
1351 		xe_res_first(src, 0, xe_bo_size(bo), &src_it);
1352 
1353 	while (size) {
1354 		u64 clear_L0_ofs;
1355 		u32 clear_L0_pt;
1356 		u32 flush_flags = 0;
1357 		u64 clear_L0;
1358 		struct xe_sched_job *job;
1359 		struct xe_bb *bb;
1360 		u32 batch_size, update_idx;
1361 		u32 pte_flags;
1362 
1363 		bool usm = xe->info.has_usm;
1364 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1365 
1366 		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1367 
1368 		/* Calculate final sizes and batch size.. */
1369 		pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1370 		batch_size = 1 +
1371 			pte_update_size(m, pte_flags, src, &src_it,
1372 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1373 					clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1374 					avail_pts);
1375 
1376 		if (xe_migrate_needs_ccs_emit(xe))
1377 			batch_size += EMIT_COPY_CCS_DW;
1378 
1379 		/* Clear commands */
1380 
1381 		if (WARN_ON_ONCE(!clear_L0))
1382 			break;
1383 
1384 		bb = xe_bb_new(gt, batch_size, usm);
1385 		if (IS_ERR(bb)) {
1386 			err = PTR_ERR(bb);
1387 			goto err_sync;
1388 		}
1389 
1390 		size -= clear_L0;
1391 		/* Preemption is enabled again by the ring ops. */
1392 		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
1393 			xe_res_next(&src_it, clear_L0);
1394 		} else {
1395 			emit_pte(m, bb, clear_L0_pt, clear_vram,
1396 				 clear_only_system_ccs, &src_it, clear_L0, dst);
1397 			flush_flags |= MI_INVALIDATE_TLB;
1398 		}
1399 
1400 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1401 		update_idx = bb->len;
1402 
1403 		if (clear_bo_data)
1404 			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1405 
1406 		if (xe_migrate_needs_ccs_emit(xe)) {
1407 			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1408 				      m->cleared_mem_ofs, false, clear_L0);
1409 			flush_flags |= MI_FLUSH_DW_CCS;
1410 		}
1411 
1412 		job = xe_bb_create_migration_job(m->q, bb,
1413 						 xe_migrate_batch_base(m, usm),
1414 						 update_idx);
1415 		if (IS_ERR(job)) {
1416 			err = PTR_ERR(job);
1417 			goto err;
1418 		}
1419 
1420 		xe_sched_job_add_migrate_flush(job, flush_flags);
1421 		if (!fence) {
1422 			/*
1423 			 * There can't be anything userspace related at this
1424 			 * point, so we just need to respect any potential move
1425 			 * fences, which are always tracked as
1426 			 * DMA_RESV_USAGE_KERNEL.
1427 			 */
1428 			err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1429 						    DMA_RESV_USAGE_KERNEL);
1430 			if (err)
1431 				goto err_job;
1432 		}
1433 
1434 		mutex_lock(&m->job_mutex);
1435 		xe_sched_job_arm(job);
1436 		dma_fence_put(fence);
1437 		fence = dma_fence_get(&job->drm.s_fence->finished);
1438 		xe_sched_job_push(job);
1439 
1440 		dma_fence_put(m->fence);
1441 		m->fence = dma_fence_get(fence);
1442 
1443 		mutex_unlock(&m->job_mutex);
1444 
1445 		xe_bb_free(bb, fence);
1446 		continue;
1447 
1448 err_job:
1449 		xe_sched_job_put(job);
1450 err:
1451 		xe_bb_free(bb, NULL);
1452 err_sync:
1453 		/* Sync partial copies if any. FIXME: job_mutex? */
1454 		if (fence) {
1455 			dma_fence_wait(fence, false);
1456 			dma_fence_put(fence);
1457 		}
1458 
1459 		return ERR_PTR(err);
1460 	}
1461 
1462 	if (clear_ccs)
1463 		bo->ccs_cleared = true;
1464 
1465 	return fence;
1466 }
1467 
1468 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1469 			  const struct xe_vm_pgtable_update_op *pt_op,
1470 			  const struct xe_vm_pgtable_update *update,
1471 			  struct xe_migrate_pt_update *pt_update)
1472 {
1473 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1474 	u32 chunk;
1475 	u32 ofs = update->ofs, size = update->qwords;
1476 
1477 	/*
1478 	 * If we have 512 entries (max), we would populate it ourselves,
1479 	 * and update the PDE above it to the new pointer.
1480 	 * The only time this can only happen if we have to update the top
1481 	 * PDE. This requires a BO that is almost vm->size big.
1482 	 *
1483 	 * This shouldn't be possible in practice.. might change when 16K
1484 	 * pages are used. Hence the assert.
1485 	 */
1486 	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1487 	if (!ppgtt_ofs)
1488 		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1489 						xe_bo_addr(update->pt_bo, 0,
1490 							   XE_PAGE_SIZE), false);
1491 
1492 	do {
1493 		u64 addr = ppgtt_ofs + ofs * 8;
1494 
1495 		chunk = min(size, MAX_PTE_PER_SDI);
1496 
1497 		/* Ensure populatefn can do memset64 by aligning bb->cs */
1498 		if (!(bb->len & 1))
1499 			bb->cs[bb->len++] = MI_NOOP;
1500 
1501 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1502 		bb->cs[bb->len++] = lower_32_bits(addr);
1503 		bb->cs[bb->len++] = upper_32_bits(addr);
1504 		if (pt_op->bind)
1505 			ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1506 				      ofs, chunk, update);
1507 		else
1508 			ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1509 				   ofs, chunk, update);
1510 
1511 		bb->len += chunk * 2;
1512 		ofs += chunk;
1513 		size -= chunk;
1514 	} while (size);
1515 }
1516 
1517 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1518 {
1519 	return xe_vm_get(m->q->vm);
1520 }
1521 
1522 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1523 struct migrate_test_params {
1524 	struct xe_test_priv base;
1525 	bool force_gpu;
1526 };
1527 
1528 #define to_migrate_test_params(_priv) \
1529 	container_of(_priv, struct migrate_test_params, base)
1530 #endif
1531 
1532 static struct dma_fence *
1533 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1534 			       struct xe_migrate_pt_update *pt_update)
1535 {
1536 	XE_TEST_DECLARE(struct migrate_test_params *test =
1537 			to_migrate_test_params
1538 			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1539 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1540 	struct xe_vm *vm = pt_update->vops->vm;
1541 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1542 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1543 	int err;
1544 	u32 i, j;
1545 
1546 	if (XE_TEST_ONLY(test && test->force_gpu))
1547 		return ERR_PTR(-ETIME);
1548 
1549 	if (ops->pre_commit) {
1550 		pt_update->job = NULL;
1551 		err = ops->pre_commit(pt_update);
1552 		if (err)
1553 			return ERR_PTR(err);
1554 	}
1555 
1556 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1557 		const struct xe_vm_pgtable_update_op *pt_op =
1558 			&pt_update_ops->ops[i];
1559 
1560 		for (j = 0; j < pt_op->num_entries; j++) {
1561 			const struct xe_vm_pgtable_update *update =
1562 				&pt_op->entries[j];
1563 
1564 			if (pt_op->bind)
1565 				ops->populate(pt_update, m->tile,
1566 					      &update->pt_bo->vmap, NULL,
1567 					      update->ofs, update->qwords,
1568 					      update);
1569 			else
1570 				ops->clear(pt_update, m->tile,
1571 					   &update->pt_bo->vmap, NULL,
1572 					   update->ofs, update->qwords, update);
1573 		}
1574 	}
1575 
1576 	trace_xe_vm_cpu_bind(vm);
1577 	xe_device_wmb(vm->xe);
1578 
1579 	return dma_fence_get_stub();
1580 }
1581 
1582 static struct dma_fence *
1583 __xe_migrate_update_pgtables(struct xe_migrate *m,
1584 			     struct xe_migrate_pt_update *pt_update,
1585 			     struct xe_vm_pgtable_update_ops *pt_update_ops)
1586 {
1587 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1588 	struct xe_tile *tile = m->tile;
1589 	struct xe_gt *gt = tile->primary_gt;
1590 	struct xe_device *xe = tile_to_xe(tile);
1591 	struct xe_sched_job *job;
1592 	struct dma_fence *fence;
1593 	struct drm_suballoc *sa_bo = NULL;
1594 	struct xe_bb *bb;
1595 	u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1596 	u32 num_updates = 0, current_update = 0;
1597 	u64 addr;
1598 	int err = 0;
1599 	bool is_migrate = pt_update_ops->q == m->q;
1600 	bool usm = is_migrate && xe->info.has_usm;
1601 
1602 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1603 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1604 		struct xe_vm_pgtable_update *updates = pt_op->entries;
1605 
1606 		num_updates += pt_op->num_entries;
1607 		for (j = 0; j < pt_op->num_entries; ++j) {
1608 			u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1609 						    MAX_PTE_PER_SDI);
1610 
1611 			/* align noop + MI_STORE_DATA_IMM cmd prefix */
1612 			batch_size += 4 * num_cmds + updates[j].qwords * 2;
1613 		}
1614 	}
1615 
1616 	/* fixed + PTE entries */
1617 	if (IS_DGFX(xe))
1618 		batch_size += 2;
1619 	else
1620 		batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1621 			num_updates * 2;
1622 
1623 	bb = xe_bb_new(gt, batch_size, usm);
1624 	if (IS_ERR(bb))
1625 		return ERR_CAST(bb);
1626 
1627 	/* For sysmem PTE's, need to map them in our hole.. */
1628 	if (!IS_DGFX(xe)) {
1629 		u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1630 		u32 ptes, ofs;
1631 
1632 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1633 		if (!is_migrate) {
1634 			u32 num_units = DIV_ROUND_UP(num_updates,
1635 						     NUM_VMUSA_WRITES_PER_UNIT);
1636 
1637 			if (num_units > m->vm_update_sa.size) {
1638 				err = -ENOBUFS;
1639 				goto err_bb;
1640 			}
1641 			sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1642 						 GFP_KERNEL, true, 0);
1643 			if (IS_ERR(sa_bo)) {
1644 				err = PTR_ERR(sa_bo);
1645 				goto err_bb;
1646 			}
1647 
1648 			ppgtt_ofs = NUM_KERNEL_PDE +
1649 				(drm_suballoc_soffset(sa_bo) /
1650 				 NUM_VMUSA_UNIT_PER_PAGE);
1651 			page_ofs = (drm_suballoc_soffset(sa_bo) %
1652 				    NUM_VMUSA_UNIT_PER_PAGE) *
1653 				VM_SA_UPDATE_UNIT_SIZE;
1654 		}
1655 
1656 		/* Map our PT's to gtt */
1657 		i = 0;
1658 		j = 0;
1659 		ptes = num_updates;
1660 		ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1661 		while (ptes) {
1662 			u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1663 			u32 idx = 0;
1664 
1665 			bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1666 				MI_SDI_NUM_QW(chunk);
1667 			bb->cs[bb->len++] = ofs;
1668 			bb->cs[bb->len++] = 0; /* upper_32_bits */
1669 
1670 			for (; i < pt_update_ops->num_ops; ++i) {
1671 				struct xe_vm_pgtable_update_op *pt_op =
1672 					&pt_update_ops->ops[i];
1673 				struct xe_vm_pgtable_update *updates = pt_op->entries;
1674 
1675 				for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1676 					struct xe_vm *vm = pt_update->vops->vm;
1677 					struct xe_bo *pt_bo = updates[j].pt_bo;
1678 
1679 					if (idx == chunk)
1680 						goto next_cmd;
1681 
1682 					xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
1683 
1684 					/* Map a PT at most once */
1685 					if (pt_bo->update_index < 0)
1686 						pt_bo->update_index = current_update;
1687 
1688 					addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1689 									 pat_index, 0);
1690 					bb->cs[bb->len++] = lower_32_bits(addr);
1691 					bb->cs[bb->len++] = upper_32_bits(addr);
1692 				}
1693 
1694 				j = 0;
1695 			}
1696 
1697 next_cmd:
1698 			ptes -= chunk;
1699 			ofs += chunk * sizeof(u64);
1700 		}
1701 
1702 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1703 		update_idx = bb->len;
1704 
1705 		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1706 			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1707 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1708 			struct xe_vm_pgtable_update_op *pt_op =
1709 				&pt_update_ops->ops[i];
1710 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1711 
1712 			for (j = 0; j < pt_op->num_entries; ++j) {
1713 				struct xe_bo *pt_bo = updates[j].pt_bo;
1714 
1715 				write_pgtable(tile, bb, addr +
1716 					      pt_bo->update_index * XE_PAGE_SIZE,
1717 					      pt_op, &updates[j], pt_update);
1718 			}
1719 		}
1720 	} else {
1721 		/* phys pages, no preamble required */
1722 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1723 		update_idx = bb->len;
1724 
1725 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1726 			struct xe_vm_pgtable_update_op *pt_op =
1727 				&pt_update_ops->ops[i];
1728 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1729 
1730 			for (j = 0; j < pt_op->num_entries; ++j)
1731 				write_pgtable(tile, bb, 0, pt_op, &updates[j],
1732 					      pt_update);
1733 		}
1734 	}
1735 
1736 	job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1737 					 xe_migrate_batch_base(m, usm),
1738 					 update_idx);
1739 	if (IS_ERR(job)) {
1740 		err = PTR_ERR(job);
1741 		goto err_sa;
1742 	}
1743 
1744 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1745 
1746 	if (ops->pre_commit) {
1747 		pt_update->job = job;
1748 		err = ops->pre_commit(pt_update);
1749 		if (err)
1750 			goto err_job;
1751 	}
1752 	if (is_migrate)
1753 		mutex_lock(&m->job_mutex);
1754 
1755 	xe_sched_job_arm(job);
1756 	fence = dma_fence_get(&job->drm.s_fence->finished);
1757 	xe_sched_job_push(job);
1758 
1759 	if (is_migrate)
1760 		mutex_unlock(&m->job_mutex);
1761 
1762 	xe_bb_free(bb, fence);
1763 	drm_suballoc_free(sa_bo, fence);
1764 
1765 	return fence;
1766 
1767 err_job:
1768 	xe_sched_job_put(job);
1769 err_sa:
1770 	drm_suballoc_free(sa_bo, NULL);
1771 err_bb:
1772 	xe_bb_free(bb, NULL);
1773 	return ERR_PTR(err);
1774 }
1775 
1776 /**
1777  * xe_migrate_update_pgtables() - Pipelined page-table update
1778  * @m: The migrate context.
1779  * @pt_update: PT update arguments
1780  *
1781  * Perform a pipelined page-table update. The update descriptors are typically
1782  * built under the same lock critical section as a call to this function. If
1783  * using the default engine for the updates, they will be performed in the
1784  * order they grab the job_mutex. If different engines are used, external
1785  * synchronization is needed for overlapping updates to maintain page-table
1786  * consistency. Note that the meaning of "overlapping" is that the updates
1787  * touch the same page-table, which might be a higher-level page-directory.
1788  * If no pipelining is needed, then updates may be performed by the cpu.
1789  *
1790  * Return: A dma_fence that, when signaled, indicates the update completion.
1791  */
1792 struct dma_fence *
1793 xe_migrate_update_pgtables(struct xe_migrate *m,
1794 			   struct xe_migrate_pt_update *pt_update)
1795 
1796 {
1797 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1798 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1799 	struct dma_fence *fence;
1800 
1801 	fence =  xe_migrate_update_pgtables_cpu(m, pt_update);
1802 
1803 	/* -ETIME indicates a job is needed, anything else is legit error */
1804 	if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
1805 		return fence;
1806 
1807 	return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
1808 }
1809 
1810 /**
1811  * xe_migrate_wait() - Complete all operations using the xe_migrate context
1812  * @m: Migrate context to wait for.
1813  *
1814  * Waits until the GPU no longer uses the migrate context's default engine
1815  * or its page-table objects. FIXME: What about separate page-table update
1816  * engines?
1817  */
1818 void xe_migrate_wait(struct xe_migrate *m)
1819 {
1820 	if (m->fence)
1821 		dma_fence_wait(m->fence, false);
1822 }
1823 
1824 static u32 pte_update_cmd_size(u64 size)
1825 {
1826 	u32 num_dword;
1827 	u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
1828 
1829 	XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
1830 
1831 	/*
1832 	 * MI_STORE_DATA_IMM command is used to update page table. Each
1833 	 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To
1834 	 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need:
1835 	 *
1836 	 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
1837 	 * - 2 dword for the page table's physical location
1838 	 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords)
1839 	 */
1840 	num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI);
1841 	num_dword += entries * 2;
1842 
1843 	return num_dword;
1844 }
1845 
1846 static void build_pt_update_batch_sram(struct xe_migrate *m,
1847 				       struct xe_bb *bb, u32 pt_offset,
1848 				       struct drm_pagemap_addr *sram_addr,
1849 				       u32 size, int level)
1850 {
1851 	u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
1852 	u64 gpu_page_size = 0x1ull << xe_pt_shift(level);
1853 	u32 ptes;
1854 	int i = 0;
1855 
1856 	xe_tile_assert(m->tile, PAGE_ALIGNED(size));
1857 
1858 	ptes = DIV_ROUND_UP(size, gpu_page_size);
1859 	while (ptes) {
1860 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1861 
1862 		if (!level)
1863 			chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
1864 
1865 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1866 		bb->cs[bb->len++] = pt_offset;
1867 		bb->cs[bb->len++] = 0;
1868 
1869 		pt_offset += chunk * 8;
1870 		ptes -= chunk;
1871 
1872 		while (chunk--) {
1873 			u64 addr = sram_addr[i].addr;
1874 			u64 pte;
1875 
1876 			xe_tile_assert(m->tile, sram_addr[i].proto ==
1877 				       DRM_INTERCONNECT_SYSTEM);
1878 			xe_tile_assert(m->tile, addr);
1879 			xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
1880 
1881 again:
1882 			pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
1883 								addr, pat_index,
1884 								level, false, 0);
1885 			bb->cs[bb->len++] = lower_32_bits(pte);
1886 			bb->cs[bb->len++] = upper_32_bits(pte);
1887 
1888 			if (gpu_page_size < PAGE_SIZE) {
1889 				addr += XE_PAGE_SIZE;
1890 				if (!PAGE_ALIGNED(addr)) {
1891 					chunk--;
1892 					goto again;
1893 				}
1894 				i++;
1895 			} else {
1896 				i += gpu_page_size / PAGE_SIZE;
1897 			}
1898 		}
1899 	}
1900 }
1901 
1902 static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
1903 				    unsigned long size)
1904 {
1905 	u32 large_size = (0x1 << xe_pt_shift(1));
1906 	unsigned long i, incr = large_size / PAGE_SIZE;
1907 
1908 	for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
1909 		if (PAGE_SIZE << sram_addr[i].order != large_size)
1910 			return false;
1911 
1912 	return true;
1913 }
1914 
1915 enum xe_migrate_copy_dir {
1916 	XE_MIGRATE_COPY_TO_VRAM,
1917 	XE_MIGRATE_COPY_TO_SRAM,
1918 };
1919 
1920 #define XE_CACHELINE_BYTES	64ull
1921 #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
1922 
1923 static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len)
1924 {
1925 	u32 pitch;
1926 
1927 	if (IS_ALIGNED(len, PAGE_SIZE))
1928 		pitch = PAGE_SIZE;
1929 	else if (IS_ALIGNED(len, SZ_4K))
1930 		pitch = SZ_4K;
1931 	else if (IS_ALIGNED(len, SZ_256))
1932 		pitch = SZ_256;
1933 	else if (IS_ALIGNED(len, 4))
1934 		pitch = 4;
1935 	else
1936 		pitch = 1;
1937 
1938 	xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr);
1939 	return pitch;
1940 }
1941 
1942 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
1943 					 unsigned long len,
1944 					 unsigned long sram_offset,
1945 					 struct drm_pagemap_addr *sram_addr,
1946 					 u64 vram_addr,
1947 					 const enum xe_migrate_copy_dir dir)
1948 {
1949 	struct xe_gt *gt = m->tile->primary_gt;
1950 	struct xe_device *xe = gt_to_xe(gt);
1951 	bool use_usm_batch = xe->info.has_usm;
1952 	struct dma_fence *fence = NULL;
1953 	u32 batch_size = 1;
1954 	u64 src_L0_ofs, dst_L0_ofs;
1955 	struct xe_sched_job *job;
1956 	struct xe_bb *bb;
1957 	u32 update_idx, pt_slot = 0;
1958 	unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
1959 	unsigned int pitch = xe_migrate_copy_pitch(xe, len);
1960 	int err;
1961 	unsigned long i, j;
1962 	bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
1963 
1964 	if (!xe->info.has_mem_copy_instr &&
1965 	    drm_WARN_ON(&xe->drm,
1966 			(!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK))
1967 		return ERR_PTR(-EOPNOTSUPP);
1968 
1969 	xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
1970 
1971 	batch_size += pte_update_cmd_size(npages << PAGE_SHIFT);
1972 	batch_size += EMIT_COPY_DW;
1973 
1974 	bb = xe_bb_new(gt, batch_size, use_usm_batch);
1975 	if (IS_ERR(bb)) {
1976 		err = PTR_ERR(bb);
1977 		return ERR_PTR(err);
1978 	}
1979 
1980 	/*
1981 	 * If the order of a struct drm_pagemap_addr entry is greater than 0,
1982 	 * the entry is populated by GPU pagemap but subsequent entries within
1983 	 * the range of that order are not populated.
1984 	 * build_pt_update_batch_sram() expects a fully populated array of
1985 	 * struct drm_pagemap_addr. Ensure this is the case even with higher
1986 	 * orders.
1987 	 */
1988 	for (i = 0; !use_pde && i < npages;) {
1989 		unsigned int order = sram_addr[i].order;
1990 
1991 		for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
1992 			if (!sram_addr[i + j].addr)
1993 				sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
1994 
1995 		i += NR_PAGES(order);
1996 	}
1997 
1998 	if (use_pde)
1999 		build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
2000 					   sram_addr, npages << PAGE_SHIFT, 1);
2001 	else
2002 		build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
2003 					   sram_addr, npages << PAGE_SHIFT, 0);
2004 
2005 	if (dir == XE_MIGRATE_COPY_TO_VRAM) {
2006 		if (use_pde)
2007 			src_L0_ofs = m->large_page_copy_ofs + sram_offset;
2008 		else
2009 			src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2010 		dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2011 
2012 	} else {
2013 		src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2014 		if (use_pde)
2015 			dst_L0_ofs = m->large_page_copy_ofs + sram_offset;
2016 		else
2017 			dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2018 	}
2019 
2020 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
2021 	update_idx = bb->len;
2022 
2023 	emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
2024 
2025 	job = xe_bb_create_migration_job(m->q, bb,
2026 					 xe_migrate_batch_base(m, use_usm_batch),
2027 					 update_idx);
2028 	if (IS_ERR(job)) {
2029 		err = PTR_ERR(job);
2030 		goto err;
2031 	}
2032 
2033 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
2034 
2035 	mutex_lock(&m->job_mutex);
2036 	xe_sched_job_arm(job);
2037 	fence = dma_fence_get(&job->drm.s_fence->finished);
2038 	xe_sched_job_push(job);
2039 
2040 	dma_fence_put(m->fence);
2041 	m->fence = dma_fence_get(fence);
2042 	mutex_unlock(&m->job_mutex);
2043 
2044 	xe_bb_free(bb, fence);
2045 
2046 	return fence;
2047 
2048 err:
2049 	xe_bb_free(bb, NULL);
2050 
2051 	return ERR_PTR(err);
2052 }
2053 
2054 /**
2055  * xe_migrate_to_vram() - Migrate to VRAM
2056  * @m: The migration context.
2057  * @npages: Number of pages to migrate.
2058  * @src_addr: Array of DMA information (source of migrate)
2059  * @dst_addr: Device physical address of VRAM (destination of migrate)
2060  *
2061  * Copy from an array dma addresses to a VRAM device physical address
2062  *
2063  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2064  * failure
2065  */
2066 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
2067 				     unsigned long npages,
2068 				     struct drm_pagemap_addr *src_addr,
2069 				     u64 dst_addr)
2070 {
2071 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
2072 			       XE_MIGRATE_COPY_TO_VRAM);
2073 }
2074 
2075 /**
2076  * xe_migrate_from_vram() - Migrate from VRAM
2077  * @m: The migration context.
2078  * @npages: Number of pages to migrate.
2079  * @src_addr: Device physical address of VRAM (source of migrate)
2080  * @dst_addr: Array of DMA information (destination of migrate)
2081  *
2082  * Copy from a VRAM device physical address to an array dma addresses
2083  *
2084  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2085  * failure
2086  */
2087 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
2088 				       unsigned long npages,
2089 				       u64 src_addr,
2090 				       struct drm_pagemap_addr *dst_addr)
2091 {
2092 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
2093 			       XE_MIGRATE_COPY_TO_SRAM);
2094 }
2095 
2096 static void xe_migrate_dma_unmap(struct xe_device *xe,
2097 				 struct drm_pagemap_addr *pagemap_addr,
2098 				 int len, int write)
2099 {
2100 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2101 
2102 	for (i = 0; i < npages; ++i) {
2103 		if (!pagemap_addr[i].addr)
2104 			break;
2105 
2106 		dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
2107 			       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2108 	}
2109 	kfree(pagemap_addr);
2110 }
2111 
2112 static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
2113 						   void *buf, int len,
2114 						   int write)
2115 {
2116 	struct drm_pagemap_addr *pagemap_addr;
2117 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2118 
2119 	pagemap_addr = kcalloc(npages, sizeof(*pagemap_addr), GFP_KERNEL);
2120 	if (!pagemap_addr)
2121 		return ERR_PTR(-ENOMEM);
2122 
2123 	for (i = 0; i < npages; ++i) {
2124 		dma_addr_t addr;
2125 		struct page *page;
2126 		enum dma_data_direction dir = write ? DMA_TO_DEVICE :
2127 						      DMA_FROM_DEVICE;
2128 
2129 		if (is_vmalloc_addr(buf))
2130 			page = vmalloc_to_page(buf);
2131 		else
2132 			page = virt_to_page(buf);
2133 
2134 		addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
2135 		if (dma_mapping_error(xe->drm.dev, addr))
2136 			goto err_fault;
2137 
2138 		pagemap_addr[i] =
2139 			drm_pagemap_addr_encode(addr,
2140 						DRM_INTERCONNECT_SYSTEM,
2141 						0, dir);
2142 		buf += PAGE_SIZE;
2143 	}
2144 
2145 	return pagemap_addr;
2146 
2147 err_fault:
2148 	xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
2149 	return ERR_PTR(-EFAULT);
2150 }
2151 
2152 /**
2153  * xe_migrate_access_memory - Access memory of a BO via GPU
2154  *
2155  * @m: The migration context.
2156  * @bo: buffer object
2157  * @offset: access offset into buffer object
2158  * @buf: pointer to caller memory to read into or write from
2159  * @len: length of access
2160  * @write: write access
2161  *
2162  * Access memory of a BO via GPU either reading in or writing from a passed in
2163  * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
2164  * read to or write from pointer.
2165  *
2166  * Returns:
2167  * 0 if successful, negative error code on failure.
2168  */
2169 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
2170 			     unsigned long offset, void *buf, int len,
2171 			     int write)
2172 {
2173 	struct xe_tile *tile = m->tile;
2174 	struct xe_device *xe = tile_to_xe(tile);
2175 	struct xe_res_cursor cursor;
2176 	struct dma_fence *fence = NULL;
2177 	struct drm_pagemap_addr *pagemap_addr;
2178 	unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
2179 	int bytes_left = len, current_page = 0;
2180 	void *orig_buf = buf;
2181 
2182 	xe_bo_assert_held(bo);
2183 
2184 	/* Use bounce buffer for small access and unaligned access */
2185 	if (!xe->info.has_mem_copy_instr &&
2186 	    (!IS_ALIGNED(len, 4) ||
2187 	     !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
2188 	     !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) {
2189 		int buf_offset = 0;
2190 		void *bounce;
2191 		int err;
2192 
2193 		BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
2194 		bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
2195 		if (!bounce)
2196 			return -ENOMEM;
2197 
2198 		/*
2199 		 * Less than ideal for large unaligned access but this should be
2200 		 * fairly rare, can fixup if this becomes common.
2201 		 */
2202 		do {
2203 			int copy_bytes = min_t(int, bytes_left,
2204 					       XE_CACHELINE_BYTES -
2205 					       (offset & XE_CACHELINE_MASK));
2206 			int ptr_offset = offset & XE_CACHELINE_MASK;
2207 
2208 			err = xe_migrate_access_memory(m, bo,
2209 						       offset &
2210 						       ~XE_CACHELINE_MASK,
2211 						       bounce,
2212 						       XE_CACHELINE_BYTES, 0);
2213 			if (err)
2214 				break;
2215 
2216 			if (write) {
2217 				memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
2218 
2219 				err = xe_migrate_access_memory(m, bo,
2220 							       offset & ~XE_CACHELINE_MASK,
2221 							       bounce,
2222 							       XE_CACHELINE_BYTES, write);
2223 				if (err)
2224 					break;
2225 			} else {
2226 				memcpy(buf + buf_offset, bounce + ptr_offset,
2227 				       copy_bytes);
2228 			}
2229 
2230 			bytes_left -= copy_bytes;
2231 			buf_offset += copy_bytes;
2232 			offset += copy_bytes;
2233 		} while (bytes_left);
2234 
2235 		kfree(bounce);
2236 		return err;
2237 	}
2238 
2239 	pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
2240 	if (IS_ERR(pagemap_addr))
2241 		return PTR_ERR(pagemap_addr);
2242 
2243 	xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
2244 
2245 	do {
2246 		struct dma_fence *__fence;
2247 		u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
2248 			cursor.start;
2249 		int current_bytes;
2250 		u32 pitch;
2251 
2252 		if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
2253 			current_bytes = min_t(int, bytes_left,
2254 					      MAX_PREEMPTDISABLE_TRANSFER);
2255 		else
2256 			current_bytes = min_t(int, bytes_left, cursor.size);
2257 
2258 		pitch = xe_migrate_copy_pitch(xe, current_bytes);
2259 		if (xe->info.has_mem_copy_instr)
2260 			current_bytes = min_t(int, current_bytes, U16_MAX * pitch);
2261 		else
2262 			current_bytes = min_t(int, current_bytes,
2263 					      round_down(S16_MAX * pitch,
2264 							 XE_CACHELINE_BYTES));
2265 
2266 		__fence = xe_migrate_vram(m, current_bytes,
2267 					  (unsigned long)buf & ~PAGE_MASK,
2268 					  &pagemap_addr[current_page],
2269 					  vram_addr, write ?
2270 					  XE_MIGRATE_COPY_TO_VRAM :
2271 					  XE_MIGRATE_COPY_TO_SRAM);
2272 		if (IS_ERR(__fence)) {
2273 			if (fence) {
2274 				dma_fence_wait(fence, false);
2275 				dma_fence_put(fence);
2276 			}
2277 			fence = __fence;
2278 			goto out_err;
2279 		}
2280 
2281 		dma_fence_put(fence);
2282 		fence = __fence;
2283 
2284 		buf += current_bytes;
2285 		offset += current_bytes;
2286 		current_page = (int)(buf - orig_buf) / PAGE_SIZE;
2287 		bytes_left -= current_bytes;
2288 		if (bytes_left)
2289 			xe_res_next(&cursor, current_bytes);
2290 	} while (bytes_left);
2291 
2292 	dma_fence_wait(fence, false);
2293 	dma_fence_put(fence);
2294 
2295 out_err:
2296 	xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
2297 	return IS_ERR(fence) ? PTR_ERR(fence) : 0;
2298 }
2299 
2300 /**
2301  * xe_migrate_job_lock() - Lock migrate job lock
2302  * @m: The migration context.
2303  * @q: Queue associated with the operation which requires a lock
2304  *
2305  * Lock the migrate job lock if the queue is a migration queue, otherwise
2306  * assert the VM's dma-resv is held (user queue's have own locking).
2307  */
2308 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
2309 {
2310 	bool is_migrate = q == m->q;
2311 
2312 	if (is_migrate)
2313 		mutex_lock(&m->job_mutex);
2314 	else
2315 		xe_vm_assert_held(q->vm);	/* User queues VM's should be locked */
2316 }
2317 
2318 /**
2319  * xe_migrate_job_unlock() - Unlock migrate job lock
2320  * @m: The migration context.
2321  * @q: Queue associated with the operation which requires a lock
2322  *
2323  * Unlock the migrate job lock if the queue is a migration queue, otherwise
2324  * assert the VM's dma-resv is held (user queue's have own locking).
2325  */
2326 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
2327 {
2328 	bool is_migrate = q == m->q;
2329 
2330 	if (is_migrate)
2331 		mutex_unlock(&m->job_mutex);
2332 	else
2333 		xe_vm_assert_held(q->vm);	/* User queues VM's should be locked */
2334 }
2335 
2336 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2337 #include "tests/xe_migrate.c"
2338 #endif
2339