xref: /linux/drivers/gpu/drm/xe/xe_migrate.c (revision 6916d5703ddf9a38f1f6c2cc793381a24ee914c6)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "xe_migrate.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/drm_pagemap.h>
13 #include <drm/ttm/ttm_tt.h>
14 #include <uapi/drm/xe_drm.h>
15 
16 #include <generated/xe_wa_oob.h>
17 
18 #include "instructions/xe_gpu_commands.h"
19 #include "instructions/xe_mi_commands.h"
20 #include "regs/xe_gtt_defs.h"
21 #include "tests/xe_test.h"
22 #include "xe_assert.h"
23 #include "xe_bb.h"
24 #include "xe_bo.h"
25 #include "xe_exec_queue.h"
26 #include "xe_ggtt.h"
27 #include "xe_gt.h"
28 #include "xe_gt_printk.h"
29 #include "xe_hw_engine.h"
30 #include "xe_lrc.h"
31 #include "xe_map.h"
32 #include "xe_mem_pool.h"
33 #include "xe_mocs.h"
34 #include "xe_printk.h"
35 #include "xe_pt.h"
36 #include "xe_res_cursor.h"
37 #include "xe_sa.h"
38 #include "xe_sched_job.h"
39 #include "xe_sriov_vf_ccs.h"
40 #include "xe_svm.h"
41 #include "xe_sync.h"
42 #include "xe_trace_bo.h"
43 #include "xe_validation.h"
44 #include "xe_vm.h"
45 #include "xe_vram.h"
46 
47 /**
48  * struct xe_migrate - migrate context.
49  */
50 struct xe_migrate {
51 	/** @q: Default exec queue used for migration */
52 	struct xe_exec_queue *q;
53 	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
54 	struct xe_tile *tile;
55 	/** @job_mutex: Timeline mutex for @eng. */
56 	struct mutex job_mutex;
57 	/** @pt_bo: Page-table buffer object. */
58 	struct xe_bo *pt_bo;
59 	/** @batch_base_ofs: VM offset of the migration batch buffer */
60 	u64 batch_base_ofs;
61 	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
62 	u64 usm_batch_base_ofs;
63 	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
64 	u64 cleared_mem_ofs;
65 	/** @large_page_copy_ofs: VM offset of 2M pages used for large copies */
66 	u64 large_page_copy_ofs;
67 	/**
68 	 * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for
69 	 * large copies
70 	 */
71 	u64 large_page_copy_pdes;
72 	/**
73 	 * @fence: dma-fence representing the last migration job batch.
74 	 * Protected by @job_mutex.
75 	 */
76 	struct dma_fence *fence;
77 	/**
78 	 * @vm_update_sa: For integrated, used to suballocate page-tables
79 	 * out of the pt_bo.
80 	 */
81 	struct drm_suballoc_manager vm_update_sa;
82 	/** @min_chunk_size: For dgfx, Minimum chunk size */
83 	u64 min_chunk_size;
84 };
85 
86 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
87 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
88 #define NUM_KERNEL_PDE 15
89 #define NUM_PT_SLOTS 32
90 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
91 #define MAX_NUM_PTE 512
92 #define IDENTITY_OFFSET 256ULL
93 
94 /*
95  * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
96  * legal value accepted.  Since that instruction field is always stored in
97  * (val-2) format, this translates to 0x400 dwords for the true maximum length
98  * of the instruction.  Subtracting the instruction header (1 dword) and
99  * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
100  */
101 #define MAX_PTE_PER_SDI 0x1FEU
102 
xe_migrate_fini(void * arg)103 static void xe_migrate_fini(void *arg)
104 {
105 	struct xe_migrate *m = arg;
106 
107 	xe_vm_lock(m->q->vm, false);
108 	xe_bo_unpin(m->pt_bo);
109 	xe_vm_unlock(m->q->vm);
110 
111 	dma_fence_put(m->fence);
112 	xe_bo_put(m->pt_bo);
113 	drm_suballoc_manager_fini(&m->vm_update_sa);
114 	mutex_destroy(&m->job_mutex);
115 	xe_vm_close_and_put(m->q->vm);
116 	xe_exec_queue_put(m->q);
117 }
118 
xe_migrate_vm_addr(u64 slot,u32 level)119 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
120 {
121 	XE_WARN_ON(slot >= NUM_PT_SLOTS);
122 
123 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
124 	return (slot + 1ULL) << xe_pt_shift(level + 1);
125 }
126 
xe_migrate_vram_ofs(struct xe_device * xe,u64 addr,bool is_comp_pte)127 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
128 {
129 	/*
130 	 * Remove the DPA to get a correct offset into identity table for the
131 	 * migrate offset
132 	 */
133 	u64 identity_offset = IDENTITY_OFFSET;
134 
135 	if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
136 		identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
137 							(xe->mem.vram), SZ_1G);
138 
139 	addr -= xe_vram_region_dpa_base(xe->mem.vram);
140 	return addr + (identity_offset << xe_pt_shift(2));
141 }
142 
xe_migrate_program_identity(struct xe_device * xe,struct xe_vm * vm,struct xe_bo * bo,u64 map_ofs,u64 vram_offset,u16 pat_index,u64 pt_2m_ofs)143 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
144 					u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
145 {
146 	struct xe_vram_region *vram = xe->mem.vram;
147 	resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
148 	u64 pos, ofs, flags;
149 	u64 entry;
150 	/* XXX: Unclear if this should be usable_size? */
151 	u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
152 	u32 level = 2;
153 
154 	ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
155 	flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
156 					    true, 0);
157 
158 	xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
159 
160 	/*
161 	 * Use 1GB pages when possible, last chunk always use 2M
162 	 * pages as mixing reserved memory (stolen, WOCPM) with a single
163 	 * mapping is not allowed on certain platforms.
164 	 */
165 	for (pos = dpa_base; pos < vram_limit;
166 	     pos += SZ_1G, ofs += 8) {
167 		if (pos + SZ_1G >= vram_limit) {
168 			entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
169 			xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
170 
171 			flags = vm->pt_ops->pte_encode_addr(xe, 0,
172 							    pat_index,
173 							    level - 1,
174 							    true, 0);
175 
176 			for (ofs = pt_2m_ofs; pos < vram_limit;
177 			     pos += SZ_2M, ofs += 8)
178 				xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
179 			break;	/* Ensure pos == vram_limit assert correct */
180 		}
181 
182 		xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
183 	}
184 
185 	xe_assert(xe, pos == vram_limit);
186 }
187 
xe_migrate_pt_bo_alloc(struct xe_tile * tile,struct xe_migrate * m,struct xe_vm * vm,struct drm_exec * exec)188 static int xe_migrate_pt_bo_alloc(struct xe_tile *tile, struct xe_migrate *m,
189 				  struct xe_vm *vm, struct drm_exec *exec)
190 {
191 	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
192 	u32 num_entries = NUM_PT_SLOTS;
193 
194 	/* Can't bump NUM_PT_SLOTS too high */
195 	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
196 	/* Must be a multiple of 64K to support all platforms */
197 	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
198 	/* And one slot reserved for the 4KiB page table updates */
199 	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
200 
201 	/* Need to be sure everything fits in the first PT, or create more */
202 	xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M);
203 
204 	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
205 				  num_entries * XE_PAGE_SIZE,
206 				  ttm_bo_type_kernel,
207 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
208 				  XE_BO_FLAG_PAGETABLE, exec);
209 	if (IS_ERR(bo))
210 		return PTR_ERR(bo);
211 
212 	m->pt_bo = bo;
213 	return 0;
214 }
215 
xe_migrate_prepare_vm(struct xe_tile * tile,struct xe_migrate * m,struct xe_vm * vm,u32 * ofs)216 static void xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
217 				  struct xe_vm *vm, u32 *ofs)
218 {
219 	struct xe_device *xe = tile_to_xe(tile);
220 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
221 	u8 id = tile->id;
222 	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
223 #define VRAM_IDENTITY_MAP_COUNT	2
224 	u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
225 #undef VRAM_IDENTITY_MAP_COUNT
226 	u32 map_ofs, level, i;
227 	struct xe_bo *bo = m->pt_bo, *batch = tile->mem.kernel_bb_pool->bo;
228 	u64 entry, pt29_ofs;
229 
230 	/* PT30 & PT31 reserved for 2M identity map */
231 	pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
232 	entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
233 	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
234 
235 	map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
236 
237 	/* Map the entire BO in our level 0 pt */
238 	for (i = 0, level = 0; i < num_entries; level++) {
239 		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
240 						  pat_index, 0);
241 
242 		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
243 
244 		if (vm->flags & XE_VM_FLAG_64K)
245 			i += 16;
246 		else
247 			i += 1;
248 	}
249 
250 	if (!IS_DGFX(xe)) {
251 		/* Write out batch too */
252 		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
253 		for (i = 0; i < xe_bo_size(batch);
254 		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
255 		     XE_PAGE_SIZE) {
256 			entry = vm->pt_ops->pte_encode_bo(batch, i,
257 							  pat_index, 0);
258 
259 			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
260 				  entry);
261 			level++;
262 		}
263 		if (xe->info.has_usm) {
264 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M);
265 
266 			batch = tile->primary_gt->usm.bb_pool->bo;
267 			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
268 			xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K);
269 
270 			for (i = 0; i < xe_bo_size(batch);
271 			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
272 			     XE_PAGE_SIZE) {
273 				entry = vm->pt_ops->pte_encode_bo(batch, i,
274 								  pat_index, 0);
275 
276 				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
277 					  entry);
278 				level++;
279 			}
280 		}
281 	} else {
282 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
283 
284 		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
285 
286 		if (xe->info.has_usm) {
287 			batch = tile->primary_gt->usm.bb_pool->bo;
288 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
289 			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
290 		}
291 	}
292 
293 	for (level = 1; level < num_level; level++) {
294 		u32 flags = 0;
295 
296 		if (vm->flags & XE_VM_FLAG_64K && level == 1)
297 			flags = XE_PDE_64K;
298 
299 		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
300 						  XE_PAGE_SIZE);
301 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
302 			  entry | flags);
303 	}
304 
305 	/* Write PDE's that point to our BO. */
306 	for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
307 		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
308 
309 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
310 			  (i + 1) * 8, u64, entry);
311 	}
312 
313 	/* Reserve 2M PDEs */
314 	level = 1;
315 	m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level);
316 	m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level +
317 		NUM_PT_SLOTS * 8;
318 
319 	/* Set up a 1GiB NULL mapping at 255GiB offset. */
320 	level = 2;
321 	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
322 		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
323 		  | XE_PTE_NULL);
324 	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
325 
326 	/* Identity map the entire vram at 256GiB offset */
327 	if (IS_DGFX(xe)) {
328 		u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
329 		resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
330 
331 		xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
332 					    pat_index, pt30_ofs);
333 		xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
334 
335 		/*
336 		 * Identity map the entire vram for compressed pat_index for xe2+
337 		 * if flat ccs is enabled.
338 		 */
339 		if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
340 			u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
341 			u64 vram_offset = IDENTITY_OFFSET +
342 				DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
343 			u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
344 
345 			xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
346 							  IDENTITY_OFFSET / 2) * SZ_1G);
347 			xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
348 						    comp_pat_index, pt31_ofs);
349 		}
350 	}
351 
352 	if (ofs)
353 		*ofs = map_ofs;
354 }
355 
xe_migrate_suballoc_manager_init(struct xe_migrate * m,u32 map_ofs)356 static void xe_migrate_suballoc_manager_init(struct xe_migrate *m, u32 map_ofs)
357 {
358 	/*
359 	 * Example layout created above, with root level = 3:
360 	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
361 	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
362 	 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
363 	 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
364 	 *
365 	 * This makes the lowest part of the VM point to the pagetables.
366 	 * Hence the lowest 2M in the vm should point to itself, with a few writes
367 	 * and flushes, other parts of the VM can be used either for copying and
368 	 * clearing.
369 	 *
370 	 * For performance, the kernel reserves PDE's, so about 20 are left
371 	 * for async VM updates.
372 	 *
373 	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
374 	 * everywhere, this allows lockless updates to scratch pages by using
375 	 * the different addresses in VM.
376 	 */
377 #define NUM_VMUSA_UNIT_PER_PAGE	32
378 #define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
379 #define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
380 	drm_suballoc_manager_init(&m->vm_update_sa,
381 				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
382 				  NUM_VMUSA_UNIT_PER_PAGE, 0);
383 }
384 
385 /*
386  * Including the reserved copy engine is required to avoid deadlocks due to
387  * migrate jobs servicing the faults gets stuck behind the job that faulted.
388  */
xe_migrate_usm_logical_mask(struct xe_gt * gt)389 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
390 {
391 	u32 logical_mask = 0;
392 	struct xe_hw_engine *hwe;
393 	enum xe_hw_engine_id id;
394 
395 	for_each_hw_engine(hwe, gt, id) {
396 		if (hwe->class != XE_ENGINE_CLASS_COPY)
397 			continue;
398 
399 		if (xe_gt_is_usm_hwe(gt, hwe))
400 			logical_mask |= BIT(hwe->logical_instance);
401 	}
402 
403 	return logical_mask;
404 }
405 
xe_migrate_needs_ccs_emit(struct xe_device * xe)406 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
407 {
408 	return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
409 }
410 
411 /**
412  * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
413  * @tile: &xe_tile
414  *
415  * Allocates a &xe_migrate for a given tile.
416  *
417  * Return: &xe_migrate on success, or NULL when out of memory.
418  */
xe_migrate_alloc(struct xe_tile * tile)419 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
420 {
421 	struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
422 
423 	if (m)
424 		m->tile = tile;
425 	return m;
426 }
427 
xe_migrate_lock_prepare_vm(struct xe_tile * tile,struct xe_migrate * m,struct xe_vm * vm)428 static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
429 {
430 	struct xe_device *xe = tile_to_xe(tile);
431 	struct xe_validation_ctx ctx;
432 	struct drm_exec exec;
433 	u32 map_ofs;
434 	int err = 0;
435 
436 	xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
437 		err = xe_vm_drm_exec_lock(vm, &exec);
438 		if (err)
439 			return err;
440 
441 		drm_exec_retry_on_contention(&exec);
442 
443 		err = xe_migrate_pt_bo_alloc(tile, m, vm, &exec);
444 		if (err)
445 			return err;
446 
447 		xe_migrate_prepare_vm(tile, m, vm, &map_ofs);
448 		xe_migrate_suballoc_manager_init(m, map_ofs);
449 		drm_exec_retry_on_contention(&exec);
450 		xe_validation_retry_on_oom(&ctx, &err);
451 	}
452 
453 	return err;
454 }
455 
456 /**
457  * xe_migrate_init() - Initialize a migrate context
458  * @m: The migration context
459  *
460  * Return: 0 if successful, negative error code on failure
461  */
xe_migrate_init(struct xe_migrate * m)462 int xe_migrate_init(struct xe_migrate *m)
463 {
464 	struct xe_tile *tile = m->tile;
465 	struct xe_gt *primary_gt = tile->primary_gt;
466 	struct xe_device *xe = tile_to_xe(tile);
467 	struct xe_vm *vm;
468 	int err;
469 
470 	/* Special layout, prepared below.. */
471 	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
472 			  XE_VM_FLAG_SET_TILE_ID(tile), NULL);
473 	if (IS_ERR(vm))
474 		return PTR_ERR(vm);
475 
476 	err = xe_migrate_lock_prepare_vm(tile, m, vm);
477 	if (err)
478 		goto err_out;
479 
480 	if (xe->info.has_usm) {
481 		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
482 							   XE_ENGINE_CLASS_COPY,
483 							   primary_gt->usm.reserved_bcs_instance,
484 							   false);
485 		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
486 
487 		if (!hwe || !logical_mask) {
488 			err = -EINVAL;
489 			goto err_out;
490 		}
491 
492 		/*
493 		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
494 		 * PVC, may want to revisit if performance is needed.
495 		 */
496 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
497 					    EXEC_QUEUE_FLAG_KERNEL |
498 					    EXEC_QUEUE_FLAG_PERMANENT |
499 					    EXEC_QUEUE_FLAG_HIGH_PRIORITY |
500 					    EXEC_QUEUE_FLAG_MIGRATE |
501 					    EXEC_QUEUE_FLAG_LOW_LATENCY, 0);
502 	} else {
503 		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
504 						  XE_ENGINE_CLASS_COPY,
505 						  EXEC_QUEUE_FLAG_KERNEL |
506 						  EXEC_QUEUE_FLAG_PERMANENT |
507 						  EXEC_QUEUE_FLAG_MIGRATE, 0);
508 	}
509 	if (IS_ERR(m->q)) {
510 		err = PTR_ERR(m->q);
511 		goto err_out;
512 	}
513 
514 	mutex_init(&m->job_mutex);
515 	fs_reclaim_acquire(GFP_KERNEL);
516 	might_lock(&m->job_mutex);
517 	fs_reclaim_release(GFP_KERNEL);
518 
519 	err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
520 	if (err)
521 		return err;
522 
523 	if (IS_DGFX(xe)) {
524 		if (xe_migrate_needs_ccs_emit(xe))
525 			/* min chunk size corresponds to 4K of CCS Metadata */
526 			m->min_chunk_size = SZ_4K * SZ_64K /
527 				xe_device_ccs_bytes(xe, SZ_64K);
528 		else
529 			/* Somewhat arbitrary to avoid a huge amount of blits */
530 			m->min_chunk_size = SZ_64K;
531 		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
532 		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
533 			(unsigned long long)m->min_chunk_size);
534 	}
535 
536 	return err;
537 
538 err_out:
539 	xe_vm_close_and_put(vm);
540 	return err;
541 
542 }
543 
max_mem_transfer_per_pass(struct xe_device * xe)544 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
545 {
546 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
547 		return MAX_CCS_LIMITED_TRANSFER;
548 
549 	return MAX_PREEMPTDISABLE_TRANSFER;
550 }
551 
xe_migrate_res_sizes(struct xe_migrate * m,struct xe_res_cursor * cur)552 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
553 {
554 	struct xe_device *xe = tile_to_xe(m->tile);
555 	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
556 
557 	if (mem_type_is_vram(cur->mem_type)) {
558 		/*
559 		 * VRAM we want to blit in chunks with sizes aligned to
560 		 * min_chunk_size in order for the offset to CCS metadata to be
561 		 * page-aligned. If it's the last chunk it may be smaller.
562 		 *
563 		 * Another constraint is that we need to limit the blit to
564 		 * the VRAM block size, unless size is smaller than
565 		 * min_chunk_size.
566 		 */
567 		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
568 
569 		size = min_t(u64, size, chunk);
570 		if (size > m->min_chunk_size)
571 			size = round_down(size, m->min_chunk_size);
572 	}
573 
574 	return size;
575 }
576 
xe_migrate_allow_identity(u64 size,const struct xe_res_cursor * cur)577 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
578 {
579 	/* If the chunk is not fragmented, allow identity map. */
580 	return cur->size >= size;
581 }
582 
583 #define PTE_UPDATE_FLAG_IS_VRAM		BIT(0)
584 #define PTE_UPDATE_FLAG_IS_COMP_PTE	BIT(1)
585 
pte_update_size(struct xe_migrate * m,u32 flags,struct ttm_resource * res,struct xe_res_cursor * cur,u64 * L0,u64 * L0_ofs,u32 * L0_pt,u32 cmd_size,u32 pt_ofs,u32 avail_pts)586 static u32 pte_update_size(struct xe_migrate *m,
587 			   u32 flags,
588 			   struct ttm_resource *res,
589 			   struct xe_res_cursor *cur,
590 			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
591 			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
592 {
593 	u32 cmds = 0;
594 	bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
595 	bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
596 
597 	*L0_pt = pt_ofs;
598 	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
599 		/* Offset into identity map. */
600 		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
601 					      cur->start + vram_region_gpu_offset(res),
602 					      is_comp_pte);
603 		cmds += cmd_size;
604 	} else {
605 		/* Clip L0 to available size */
606 		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
607 		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
608 
609 		*L0 = size;
610 		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
611 
612 		/* MI_STORE_DATA_IMM */
613 		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
614 
615 		/* PDE qwords */
616 		cmds += num_4k_pages * 2;
617 
618 		/* Each chunk has a single blit command */
619 		cmds += cmd_size;
620 	}
621 
622 	return cmds;
623 }
624 
emit_pte(struct xe_migrate * m,struct xe_bb * bb,u32 at_pt,bool is_vram,bool is_comp_pte,struct xe_res_cursor * cur,u32 size,struct ttm_resource * res)625 static void emit_pte(struct xe_migrate *m,
626 		     struct xe_bb *bb, u32 at_pt,
627 		     bool is_vram, bool is_comp_pte,
628 		     struct xe_res_cursor *cur,
629 		     u32 size, struct ttm_resource *res)
630 {
631 	struct xe_device *xe = tile_to_xe(m->tile);
632 	struct xe_vm *vm = m->q->vm;
633 	u16 pat_index;
634 	u32 ptes;
635 	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
636 	u64 cur_ofs;
637 
638 	/* Indirect access needs compression enabled uncached PAT index */
639 	if (GRAPHICS_VERx100(xe) >= 2000)
640 		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
641 					  xe->pat.idx[XE_CACHE_WB];
642 	else
643 		pat_index = xe->pat.idx[XE_CACHE_WB];
644 
645 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
646 
647 	while (ptes) {
648 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
649 
650 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
651 		bb->cs[bb->len++] = ofs;
652 		bb->cs[bb->len++] = 0;
653 
654 		cur_ofs = ofs;
655 		ofs += chunk * 8;
656 		ptes -= chunk;
657 
658 		while (chunk--) {
659 			u64 addr, flags = 0;
660 			bool devmem = false;
661 
662 			addr = xe_res_dma(cur) & PAGE_MASK;
663 			if (is_vram) {
664 				if (vm->flags & XE_VM_FLAG_64K) {
665 					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
666 
667 					xe_assert(xe, (va & (SZ_64K - 1)) ==
668 						  (addr & (SZ_64K - 1)));
669 
670 					flags |= XE_PTE_PS64;
671 				}
672 
673 				addr += vram_region_gpu_offset(res);
674 				devmem = true;
675 			}
676 
677 			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
678 							   addr, pat_index,
679 							   0, devmem, flags);
680 			bb->cs[bb->len++] = lower_32_bits(addr);
681 			bb->cs[bb->len++] = upper_32_bits(addr);
682 
683 			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
684 			cur_ofs += 8;
685 		}
686 	}
687 }
688 
689 #define EMIT_COPY_CCS_DW 5
emit_copy_ccs(struct xe_gt * gt,struct xe_bb * bb,u64 dst_ofs,bool dst_is_indirect,u64 src_ofs,bool src_is_indirect,u32 size)690 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
691 			  u64 dst_ofs, bool dst_is_indirect,
692 			  u64 src_ofs, bool src_is_indirect,
693 			  u32 size)
694 {
695 	struct xe_device *xe = gt_to_xe(gt);
696 	u32 *cs = bb->cs + bb->len;
697 	u32 num_ccs_blks;
698 	u32 num_pages;
699 	u32 ccs_copy_size;
700 	u32 mocs;
701 
702 	if (GRAPHICS_VERx100(xe) >= 2000) {
703 		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
704 		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
705 
706 		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
707 		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
708 
709 	} else {
710 		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
711 					    NUM_CCS_BYTES_PER_BLOCK);
712 		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
713 
714 		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
715 		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
716 	}
717 
718 	*cs++ = XY_CTRL_SURF_COPY_BLT |
719 		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
720 		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
721 		ccs_copy_size;
722 	*cs++ = lower_32_bits(src_ofs);
723 	*cs++ = upper_32_bits(src_ofs) | mocs;
724 	*cs++ = lower_32_bits(dst_ofs);
725 	*cs++ = upper_32_bits(dst_ofs) | mocs;
726 
727 	bb->len = cs - bb->cs;
728 }
729 
730 #define EMIT_COPY_DW 10
emit_xy_fast_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u64 dst_ofs,unsigned int size,unsigned int pitch)731 static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
732 			      u64 dst_ofs, unsigned int size,
733 			      unsigned int pitch)
734 {
735 	struct xe_device *xe = gt_to_xe(gt);
736 	u32 mocs = 0;
737 	u32 tile_y = 0;
738 
739 	xe_gt_assert(gt, !(pitch & 3));
740 	xe_gt_assert(gt, size / pitch <= S16_MAX);
741 	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
742 	xe_gt_assert(gt, pitch <= U16_MAX);
743 
744 	if (GRAPHICS_VER(xe) >= 20)
745 		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
746 
747 	if (GRAPHICS_VERx100(xe) >= 1250)
748 		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
749 
750 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
751 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
752 	bb->cs[bb->len++] = 0;
753 	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
754 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
755 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
756 	bb->cs[bb->len++] = 0;
757 	bb->cs[bb->len++] = pitch | mocs;
758 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
759 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
760 }
761 
762 #define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */
emit_mem_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u64 dst_ofs,unsigned int size,unsigned int pitch)763 static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
764 			  u64 dst_ofs, unsigned int size, unsigned int pitch)
765 {
766 	u32 mode, copy_type, width;
767 
768 	xe_gt_assert(gt, IS_ALIGNED(size, pitch));
769 	xe_gt_assert(gt, pitch <= U16_MAX);
770 	xe_gt_assert(gt, pitch);
771 	xe_gt_assert(gt, size);
772 
773 	if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) &&
774 	    IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) &&
775 	    IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) {
776 		mode = MEM_COPY_PAGE_COPY_MODE;
777 		copy_type = 0; /* linear copy */
778 		width = size / PAGE_COPY_MODE_PS;
779 	} else if (pitch > 1) {
780 		xe_gt_assert(gt, size / pitch <= U16_MAX);
781 		mode = 0; /* BYTE_COPY */
782 		copy_type = MEM_COPY_MATRIX_COPY;
783 		width = pitch;
784 	} else {
785 		mode = 0; /* BYTE_COPY */
786 		copy_type = 0; /* linear copy */
787 		width = size;
788 	}
789 
790 	xe_gt_assert(gt, width <= U16_MAX);
791 
792 	bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
793 	bb->cs[bb->len++] = width - 1;
794 	bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
795 	bb->cs[bb->len++] = pitch - 1;
796 	bb->cs[bb->len++] = pitch - 1;
797 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
798 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
799 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
800 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
801 	bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
802 			    FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index);
803 }
804 
emit_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u64 dst_ofs,unsigned int size,unsigned int pitch)805 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
806 		      u64 src_ofs, u64 dst_ofs, unsigned int size,
807 		      unsigned int pitch)
808 {
809 	struct xe_device *xe = gt_to_xe(gt);
810 
811 	if (xe->info.has_mem_copy_instr)
812 		emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
813 	else
814 		emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
815 }
816 
xe_migrate_batch_base(struct xe_migrate * m,bool usm)817 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
818 {
819 	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
820 }
821 
xe_migrate_ccs_copy(struct xe_migrate * m,struct xe_bb * bb,u64 src_ofs,bool src_is_indirect,u64 dst_ofs,bool dst_is_indirect,u32 dst_size,u64 ccs_ofs,bool copy_ccs)822 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
823 			       struct xe_bb *bb,
824 			       u64 src_ofs, bool src_is_indirect,
825 			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
826 			       u64 ccs_ofs, bool copy_ccs)
827 {
828 	struct xe_gt *gt = m->tile->primary_gt;
829 	u32 flush_flags = 0;
830 
831 	if (!copy_ccs && dst_is_indirect) {
832 		/*
833 		 * If the src is already in vram, then it should already
834 		 * have been cleared by us, or has been populated by the
835 		 * user. Make sure we copy the CCS aux state as-is.
836 		 *
837 		 * Otherwise if the bo doesn't have any CCS metadata attached,
838 		 * we still need to clear it for security reasons.
839 		 */
840 		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
841 
842 		emit_copy_ccs(gt, bb,
843 			      dst_ofs, true,
844 			      ccs_src_ofs, src_is_indirect, dst_size);
845 
846 		flush_flags = MI_FLUSH_DW_CCS;
847 	} else if (copy_ccs) {
848 		if (!src_is_indirect)
849 			src_ofs = ccs_ofs;
850 		else if (!dst_is_indirect)
851 			dst_ofs = ccs_ofs;
852 
853 		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
854 
855 		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
856 			      src_is_indirect, dst_size);
857 		if (dst_is_indirect)
858 			flush_flags = MI_FLUSH_DW_CCS;
859 	}
860 
861 	return flush_flags;
862 }
863 
__xe_migrate_copy(struct xe_migrate * m,struct xe_bo * src_bo,struct xe_bo * dst_bo,struct ttm_resource * src,struct ttm_resource * dst,bool copy_only_ccs,bool is_vram_resolve)864 static struct dma_fence *__xe_migrate_copy(struct xe_migrate *m,
865 					   struct xe_bo *src_bo,
866 					   struct xe_bo *dst_bo,
867 					   struct ttm_resource *src,
868 					   struct ttm_resource *dst,
869 					   bool copy_only_ccs,
870 					   bool is_vram_resolve)
871 {
872 	struct xe_gt *gt = m->tile->primary_gt;
873 	struct xe_device *xe = gt_to_xe(gt);
874 	struct dma_fence *fence = NULL;
875 	u64 size = xe_bo_size(src_bo);
876 	struct xe_res_cursor src_it, dst_it, ccs_it;
877 	u64 src_L0_ofs, dst_L0_ofs;
878 	u32 src_L0_pt, dst_L0_pt;
879 	u64 src_L0, dst_L0;
880 	int pass = 0;
881 	int err;
882 	bool src_is_pltt = src->mem_type == XE_PL_TT;
883 	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
884 	bool src_is_vram = mem_type_is_vram(src->mem_type);
885 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
886 	bool type_device = src_bo->ttm.type == ttm_bo_type_device;
887 	bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
888 	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
889 		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
890 	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
891 
892 	/*
893 	 * For decompression operation, always use the compression PAT index.
894 	 * Otherwise, only use the compression PAT index for device memory
895 	 * when copying from VRAM to system memory.
896 	 */
897 	bool use_comp_pat = is_vram_resolve || (type_device &&
898 			    xe_device_has_flat_ccs(xe) &&
899 			    GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram);
900 
901 	/* Copying CCS between two different BOs is not supported yet. */
902 	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
903 		return ERR_PTR(-EINVAL);
904 
905 	if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo)))
906 		return ERR_PTR(-EINVAL);
907 
908 	if (!src_is_vram)
909 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
910 	else
911 		xe_res_first(src, 0, size, &src_it);
912 	if (!dst_is_vram)
913 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
914 	else
915 		xe_res_first(dst, 0, size, &dst_it);
916 
917 	if (copy_system_ccs)
918 		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
919 				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
920 				&ccs_it);
921 
922 	while (size) {
923 		u32 batch_size = 1; /* MI_BATCH_BUFFER_END */
924 		struct xe_sched_job *job;
925 		struct xe_bb *bb;
926 		u32 flush_flags = 0;
927 		u32 update_idx;
928 		u64 ccs_ofs, ccs_size;
929 		u32 ccs_pt;
930 		u32 pte_flags;
931 
932 		bool usm = xe->info.has_usm;
933 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
934 
935 		src_L0 = xe_migrate_res_sizes(m, &src_it);
936 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
937 
938 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
939 			pass++, src_L0, dst_L0);
940 
941 		src_L0 = min(src_L0, dst_L0);
942 
943 		pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
944 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
945 		batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
946 					      &src_L0_ofs, &src_L0_pt, 0, 0,
947 					      avail_pts);
948 		if (copy_only_ccs) {
949 			dst_L0_ofs = src_L0_ofs;
950 		} else {
951 			pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
952 			batch_size += pte_update_size(m, pte_flags, dst,
953 						      &dst_it, &src_L0,
954 						      &dst_L0_ofs, &dst_L0_pt,
955 						      0, avail_pts, avail_pts);
956 		}
957 
958 		if (copy_system_ccs) {
959 			xe_assert(xe, type_device);
960 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
961 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
962 						      &ccs_ofs, &ccs_pt, 0,
963 						      2 * avail_pts,
964 						      avail_pts);
965 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
966 		}
967 
968 		/* Add copy commands size here */
969 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
970 			((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
971 
972 		bb = xe_bb_new(gt, batch_size, usm);
973 		if (IS_ERR(bb)) {
974 			err = PTR_ERR(bb);
975 			goto err_sync;
976 		}
977 
978 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
979 			xe_res_next(&src_it, src_L0);
980 		else
981 			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
982 				 &src_it, src_L0, src);
983 
984 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
985 			xe_res_next(&dst_it, src_L0);
986 		else if (!copy_only_ccs)
987 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
988 				 &dst_it, src_L0, dst);
989 
990 		if (copy_system_ccs)
991 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
992 
993 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
994 		update_idx = bb->len;
995 
996 		if (!copy_only_ccs)
997 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
998 
999 		if (needs_ccs_emit)
1000 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
1001 							  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
1002 							  dst_L0_ofs,
1003 							  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
1004 							  src_L0, ccs_ofs, copy_ccs);
1005 
1006 		job = xe_bb_create_migration_job(m->q, bb,
1007 						 xe_migrate_batch_base(m, usm),
1008 						 update_idx);
1009 		if (IS_ERR(job)) {
1010 			err = PTR_ERR(job);
1011 			goto err;
1012 		}
1013 
1014 		xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
1015 		if (!fence) {
1016 			err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
1017 						    DMA_RESV_USAGE_BOOKKEEP);
1018 			if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
1019 				err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
1020 							    DMA_RESV_USAGE_BOOKKEEP);
1021 			if (err)
1022 				goto err_job;
1023 		}
1024 
1025 		mutex_lock(&m->job_mutex);
1026 		xe_sched_job_arm(job);
1027 		dma_fence_put(fence);
1028 		fence = dma_fence_get(&job->drm.s_fence->finished);
1029 		xe_sched_job_push(job);
1030 
1031 		dma_fence_put(m->fence);
1032 		m->fence = dma_fence_get(fence);
1033 
1034 		mutex_unlock(&m->job_mutex);
1035 
1036 		xe_bb_free(bb, fence);
1037 		size -= src_L0;
1038 		continue;
1039 
1040 err_job:
1041 		xe_sched_job_put(job);
1042 err:
1043 		xe_bb_free(bb, NULL);
1044 
1045 err_sync:
1046 		/* Sync partial copy if any. FIXME: under job_mutex? */
1047 		if (fence) {
1048 			dma_fence_wait(fence, false);
1049 			dma_fence_put(fence);
1050 		}
1051 
1052 		return ERR_PTR(err);
1053 	}
1054 
1055 	return fence;
1056 }
1057 
1058 /**
1059  * xe_migrate_copy() - Copy content of TTM resources.
1060  * @m: The migration context.
1061  * @src_bo: The buffer object @src is currently bound to.
1062  * @dst_bo: If copying between resources created for the same bo, set this to
1063  * the same value as @src_bo. If copying between buffer objects, set it to
1064  * the buffer object @dst is currently bound to.
1065  * @src: The source TTM resource.
1066  * @dst: The dst TTM resource.
1067  * @copy_only_ccs: If true copy only CCS metadata
1068  *
1069  * Copies the contents of @src to @dst: On flat CCS devices,
1070  * the CCS metadata is copied as well if needed, or if not present,
1071  * the CCS metadata of @dst is cleared for security reasons.
1072  *
1073  * Return: Pointer to a dma_fence representing the last copy batch, or
1074  * an error pointer on failure. If there is a failure, any copy operation
1075  * started by the function call has been synced.
1076  */
xe_migrate_copy(struct xe_migrate * m,struct xe_bo * src_bo,struct xe_bo * dst_bo,struct ttm_resource * src,struct ttm_resource * dst,bool copy_only_ccs)1077 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
1078 				  struct xe_bo *src_bo,
1079 				  struct xe_bo *dst_bo,
1080 				  struct ttm_resource *src,
1081 				  struct ttm_resource *dst,
1082 				  bool copy_only_ccs)
1083 {
1084 	return __xe_migrate_copy(m, src_bo, dst_bo, src, dst, copy_only_ccs, false);
1085 }
1086 
1087 /**
1088  * xe_migrate_resolve() - Resolve and decompress a buffer object if required.
1089  * @m: The migrate context
1090  * @bo: The buffer object to resolve
1091  * @res: The reservation object
1092  *
1093  * Wrapper around __xe_migrate_copy() with is_vram_resolve set to true
1094  * to trigger decompression if needed.
1095  *
1096  * Return: A dma_fence that signals on completion, or an ERR_PTR on failure.
1097  */
xe_migrate_resolve(struct xe_migrate * m,struct xe_bo * bo,struct ttm_resource * res)1098 struct dma_fence *xe_migrate_resolve(struct xe_migrate *m,
1099 				     struct xe_bo *bo,
1100 				     struct ttm_resource *res)
1101 {
1102 	return __xe_migrate_copy(m, bo, bo, res, res, false, true);
1103 }
1104 
1105 /**
1106  * xe_migrate_lrc() - Get the LRC from migrate context.
1107  * @migrate: Migrate context.
1108  *
1109  * Return: Pointer to LRC on success, error on failure
1110  */
xe_migrate_lrc(struct xe_migrate * migrate)1111 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
1112 {
1113 	return migrate->q->lrc[0];
1114 }
1115 
migrate_vm_ppgtt_addr_tlb_inval(void)1116 static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
1117 {
1118 	/*
1119 	 * The migrate VM is self-referential so it can modify its own PTEs (see
1120 	 * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
1121 	 * entries for kernel operations (copies, clears, CCS migrate), and
1122 	 * suballocate the rest to user operations (binds/unbinds). With
1123 	 * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
1124 	 * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
1125 	 */
1126 	return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
1127 }
1128 
emit_flush_invalidate(u32 * dw,int i,u32 flags)1129 static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
1130 {
1131 	u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
1132 
1133 	dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
1134 		  MI_FLUSH_IMM_DW | flags;
1135 	dw[i++] = lower_32_bits(addr);
1136 	dw[i++] = upper_32_bits(addr);
1137 	dw[i++] = MI_NOOP;
1138 	dw[i++] = MI_NOOP;
1139 
1140 	return i;
1141 }
1142 
1143 /**
1144  * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
1145  * @tile: Tile whose migration context to be used.
1146  * @q : Execution to be used along with migration context.
1147  * @src_bo: The buffer object @src is currently bound to.
1148  * @read_write : Creates BB commands for CCS read/write.
1149  *
1150  * Creates batch buffer instructions to copy CCS metadata from CCS pool to
1151  * memory and vice versa.
1152  *
1153  * This function should only be called for IGPU.
1154  *
1155  * Return: 0 if successful, negative error code on failure.
1156  */
xe_migrate_ccs_rw_copy(struct xe_tile * tile,struct xe_exec_queue * q,struct xe_bo * src_bo,enum xe_sriov_vf_ccs_rw_ctxs read_write)1157 int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
1158 			   struct xe_bo *src_bo,
1159 			   enum xe_sriov_vf_ccs_rw_ctxs read_write)
1160 
1161 {
1162 	bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
1163 	bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
1164 	struct ttm_resource *src = src_bo->ttm.resource;
1165 	struct xe_migrate *m = tile->migrate;
1166 	struct xe_gt *gt = tile->primary_gt;
1167 	u32 batch_size, batch_size_allocated;
1168 	struct xe_device *xe = gt_to_xe(gt);
1169 	struct xe_res_cursor src_it, ccs_it;
1170 	struct xe_mem_pool *bb_pool;
1171 	struct xe_sriov_vf_ccs_ctx *ctx;
1172 	u64 size = xe_bo_size(src_bo);
1173 	struct xe_mem_pool_node *bb;
1174 	u64 src_L0, src_L0_ofs;
1175 	struct xe_bb xe_bb_tmp;
1176 	u32 src_L0_pt;
1177 	int err;
1178 
1179 	ctx = &xe->sriov.vf.ccs.contexts[read_write];
1180 
1181 	xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
1182 
1183 	xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
1184 			PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
1185 			&ccs_it);
1186 
1187 	/* Calculate Batch buffer size */
1188 	batch_size = 0;
1189 	while (size) {
1190 		batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1191 		u64 ccs_ofs, ccs_size;
1192 		u32 ccs_pt;
1193 
1194 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1195 
1196 		src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
1197 
1198 		batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1199 					      &src_L0_ofs, &src_L0_pt, 0, 0,
1200 					      avail_pts);
1201 
1202 		ccs_size = xe_device_ccs_bytes(xe, src_L0);
1203 		batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1204 					      &ccs_pt, 0, avail_pts, avail_pts);
1205 		xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1206 
1207 		/* Add copy commands size here */
1208 		batch_size += EMIT_COPY_CCS_DW;
1209 
1210 		size -= src_L0;
1211 	}
1212 
1213 	bb = xe_mem_pool_alloc_node();
1214 	if (IS_ERR(bb))
1215 		return PTR_ERR(bb);
1216 
1217 	bb_pool = ctx->mem.ccs_bb_pool;
1218 	scoped_guard(mutex, xe_mem_pool_bo_swap_guard(bb_pool)) {
1219 		xe_mem_pool_swap_shadow_locked(bb_pool);
1220 
1221 		err = xe_mem_pool_insert_node(bb_pool, bb, batch_size * sizeof(u32));
1222 		if (err) {
1223 			xe_gt_err(gt, "BB allocation failed.\n");
1224 			kfree(bb);
1225 			return err;
1226 		}
1227 
1228 		batch_size_allocated = batch_size;
1229 		size = xe_bo_size(src_bo);
1230 		batch_size = 0;
1231 
1232 		xe_bb_tmp = (struct xe_bb){ .cs = xe_mem_pool_node_cpu_addr(bb), .len = 0 };
1233 		/*
1234 		 * Emit PTE and copy commands here.
1235 		 * The CCS copy command can only support limited size. If the size to be
1236 		 * copied is more than the limit, divide copy into chunks. So, calculate
1237 		 * sizes here again before copy command is emitted.
1238 		 */
1239 
1240 		while (size) {
1241 			batch_size += 10; /* Flush + ggtt addr + 2 NOP */
1242 			u32 flush_flags = 0;
1243 			u64 ccs_ofs, ccs_size;
1244 			u32 ccs_pt;
1245 
1246 			u32 avail_pts = max_mem_transfer_per_pass(xe) /
1247 					LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1248 
1249 			src_L0 = xe_migrate_res_sizes(m, &src_it);
1250 
1251 			batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
1252 						      &src_L0_ofs, &src_L0_pt, 0, 0,
1253 						      avail_pts);
1254 
1255 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
1256 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
1257 						      &ccs_pt, 0, avail_pts, avail_pts);
1258 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
1259 			batch_size += EMIT_COPY_CCS_DW;
1260 
1261 			emit_pte(m, &xe_bb_tmp, src_L0_pt, false, true, &src_it, src_L0, src);
1262 
1263 			emit_pte(m, &xe_bb_tmp, ccs_pt, false, false, &ccs_it, ccs_size, src);
1264 
1265 			xe_bb_tmp.len = emit_flush_invalidate(xe_bb_tmp.cs, xe_bb_tmp.len,
1266 							      flush_flags);
1267 			flush_flags = xe_migrate_ccs_copy(m, &xe_bb_tmp, src_L0_ofs, src_is_pltt,
1268 							  src_L0_ofs, dst_is_pltt,
1269 							  src_L0, ccs_ofs, true);
1270 			xe_bb_tmp.len = emit_flush_invalidate(xe_bb_tmp.cs, xe_bb_tmp.len,
1271 							      flush_flags);
1272 
1273 			size -= src_L0;
1274 		}
1275 
1276 		xe_assert(xe, (batch_size_allocated == xe_bb_tmp.len));
1277 		xe_assert(xe, bb->sa_node.size == xe_bb_tmp.len * sizeof(u32));
1278 		src_bo->bb_ccs[read_write] = bb;
1279 
1280 		xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1281 		xe_mem_pool_sync_shadow_locked(bb);
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 /**
1288  * xe_migrate_ccs_rw_copy_clear() - Clear the CCS read/write batch buffer
1289  * content.
1290  * @src_bo: The buffer object @src is currently bound to.
1291  * @read_write : Creates BB commands for CCS read/write.
1292  *
1293  * Directly clearing the BB lacks atomicity and can lead to undefined
1294  * behavior if the vCPU is halted mid-operation during the clearing
1295  * process. To avoid this issue, we use a shadow buffer object approach.
1296  *
1297  * First swap the SA BO address with the shadow BO, perform the clearing
1298  * operation on the BB, update the shadow BO in the ring buffer, then
1299  * sync the shadow and the actual buffer to maintain consistency.
1300  *
1301  * Returns: None.
1302  */
xe_migrate_ccs_rw_copy_clear(struct xe_bo * src_bo,enum xe_sriov_vf_ccs_rw_ctxs read_write)1303 void xe_migrate_ccs_rw_copy_clear(struct xe_bo *src_bo,
1304 				  enum xe_sriov_vf_ccs_rw_ctxs read_write)
1305 {
1306 	struct xe_mem_pool_node *bb = src_bo->bb_ccs[read_write];
1307 	struct xe_device *xe = xe_bo_device(src_bo);
1308 	struct xe_mem_pool *bb_pool;
1309 	struct xe_sriov_vf_ccs_ctx *ctx;
1310 	u32 *cs;
1311 
1312 	xe_assert(xe, IS_SRIOV_VF(xe));
1313 
1314 	ctx = &xe->sriov.vf.ccs.contexts[read_write];
1315 	bb_pool = ctx->mem.ccs_bb_pool;
1316 
1317 	scoped_guard(mutex, xe_mem_pool_bo_swap_guard(bb_pool)) {
1318 		xe_mem_pool_swap_shadow_locked(bb_pool);
1319 
1320 		cs = xe_mem_pool_node_cpu_addr(bb);
1321 		memset(cs, MI_NOOP, bb->sa_node.size);
1322 		xe_sriov_vf_ccs_rw_update_bb_addr(ctx);
1323 
1324 		xe_mem_pool_sync_shadow_locked(bb);
1325 		xe_mem_pool_free_node(bb);
1326 		src_bo->bb_ccs[read_write] = NULL;
1327 	}
1328 }
1329 
1330 /**
1331  * xe_migrate_exec_queue() - Get the execution queue from migrate context.
1332  * @migrate: Migrate context.
1333  *
1334  * Return: Pointer to execution queue on success, error on failure
1335  */
xe_migrate_exec_queue(struct xe_migrate * migrate)1336 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
1337 {
1338 	return migrate->q;
1339 }
1340 
1341 /**
1342  * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
1343  * @vram_bo: The VRAM buffer object.
1344  * @vram_offset: The VRAM offset.
1345  * @sysmem_bo: The sysmem buffer object.
1346  * @sysmem_offset: The sysmem offset.
1347  * @size: The size of VRAM chunk to copy.
1348  * @dir: The direction of the copy operation.
1349  *
1350  * Copies a portion of a buffer object between VRAM and system memory.
1351  * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
1352  * copying to system memory.
1353  *
1354  * Return: Pointer to a dma_fence representing the last copy batch, or
1355  * an error pointer on failure. If there is a failure, any copy operation
1356  * started by the function call has been synced.
1357  */
xe_migrate_vram_copy_chunk(struct xe_bo * vram_bo,u64 vram_offset,struct xe_bo * sysmem_bo,u64 sysmem_offset,u64 size,enum xe_migrate_copy_dir dir)1358 struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
1359 					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
1360 					     u64 size, enum xe_migrate_copy_dir dir)
1361 {
1362 	struct xe_device *xe = xe_bo_device(vram_bo);
1363 	struct xe_tile *tile = vram_bo->tile;
1364 	struct xe_gt *gt = tile->primary_gt;
1365 	struct xe_migrate *m = tile->migrate;
1366 	struct dma_fence *fence = NULL;
1367 	struct ttm_resource *vram = vram_bo->ttm.resource;
1368 	struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
1369 	struct xe_res_cursor vram_it, sysmem_it;
1370 	u64 vram_L0_ofs, sysmem_L0_ofs;
1371 	u32 vram_L0_pt, sysmem_L0_pt;
1372 	u64 vram_L0, sysmem_L0;
1373 	bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
1374 	bool use_comp_pat = to_sysmem &&
1375 		GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
1376 	int pass = 0;
1377 	int err;
1378 
1379 	xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
1380 	xe_assert(xe, xe_bo_is_vram(vram_bo));
1381 	xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
1382 	xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
1383 	xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
1384 
1385 	xe_res_first(vram, vram_offset, size, &vram_it);
1386 	xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
1387 
1388 	while (size) {
1389 		u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
1390 		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
1391 		struct xe_sched_job *job;
1392 		struct xe_bb *bb;
1393 		u32 update_idx;
1394 		bool usm = xe->info.has_usm;
1395 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1396 
1397 		sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
1398 		vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
1399 
1400 		xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0);
1401 
1402 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
1403 		batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
1404 					      &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
1405 
1406 		batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
1407 					      &sysmem_L0_pt, 0, avail_pts, avail_pts);
1408 		batch_size += EMIT_COPY_DW;
1409 
1410 		bb = xe_bb_new(gt, batch_size, usm);
1411 		if (IS_ERR(bb)) {
1412 			err = PTR_ERR(bb);
1413 			return ERR_PTR(err);
1414 		}
1415 
1416 		if (xe_migrate_allow_identity(vram_L0, &vram_it))
1417 			xe_res_next(&vram_it, vram_L0);
1418 		else
1419 			emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
1420 
1421 		emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
1422 
1423 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1424 		update_idx = bb->len;
1425 
1426 		if (to_sysmem)
1427 			emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
1428 		else
1429 			emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
1430 
1431 		job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
1432 						 update_idx);
1433 		if (IS_ERR(job)) {
1434 			xe_bb_free(bb, NULL);
1435 			err = PTR_ERR(job);
1436 			return ERR_PTR(err);
1437 		}
1438 
1439 		xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1440 
1441 		xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv,
1442 						     DMA_RESV_USAGE_BOOKKEEP));
1443 		xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
1444 						     DMA_RESV_USAGE_BOOKKEEP));
1445 
1446 		scoped_guard(mutex, &m->job_mutex) {
1447 			xe_sched_job_arm(job);
1448 			dma_fence_put(fence);
1449 			fence = dma_fence_get(&job->drm.s_fence->finished);
1450 			xe_sched_job_push(job);
1451 
1452 			dma_fence_put(m->fence);
1453 			m->fence = dma_fence_get(fence);
1454 		}
1455 
1456 		xe_bb_free(bb, fence);
1457 		size -= vram_L0;
1458 	}
1459 
1460 	return fence;
1461 }
1462 
emit_clear_link_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u32 size,u32 pitch)1463 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1464 				 u32 size, u32 pitch)
1465 {
1466 	struct xe_device *xe = gt_to_xe(gt);
1467 	u32 *cs = bb->cs + bb->len;
1468 	u32 len = PVC_MEM_SET_CMD_LEN_DW;
1469 
1470 	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
1471 	*cs++ = pitch - 1;
1472 	*cs++ = (size / pitch) - 1;
1473 	*cs++ = pitch - 1;
1474 	*cs++ = lower_32_bits(src_ofs);
1475 	*cs++ = upper_32_bits(src_ofs);
1476 	if (GRAPHICS_VERx100(xe) >= 2000)
1477 		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1478 	else
1479 		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
1480 
1481 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1482 
1483 	bb->len += len;
1484 }
1485 
emit_clear_main_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u32 size,u32 pitch,bool is_vram)1486 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
1487 				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
1488 {
1489 	struct xe_device *xe = gt_to_xe(gt);
1490 	u32 *cs = bb->cs + bb->len;
1491 	u32 len = XY_FAST_COLOR_BLT_DW;
1492 
1493 	if (GRAPHICS_VERx100(xe) < 1250)
1494 		len = 11;
1495 
1496 	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
1497 		(len - 2);
1498 	if (GRAPHICS_VERx100(xe) >= 2000)
1499 		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
1500 			(pitch - 1);
1501 	else
1502 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
1503 			(pitch - 1);
1504 	*cs++ = 0;
1505 	*cs++ = (size / pitch) << 16 | pitch / 4;
1506 	*cs++ = lower_32_bits(src_ofs);
1507 	*cs++ = upper_32_bits(src_ofs);
1508 	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
1509 	*cs++ = 0;
1510 	*cs++ = 0;
1511 	*cs++ = 0;
1512 	*cs++ = 0;
1513 
1514 	if (len > 11) {
1515 		*cs++ = 0;
1516 		*cs++ = 0;
1517 		*cs++ = 0;
1518 		*cs++ = 0;
1519 		*cs++ = 0;
1520 	}
1521 
1522 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1523 
1524 	bb->len += len;
1525 }
1526 
emit_clear_cmd_len(struct xe_gt * gt)1527 static u32 emit_clear_cmd_len(struct xe_gt *gt)
1528 {
1529 	if (gt->info.has_xe2_blt_instructions)
1530 		return PVC_MEM_SET_CMD_LEN_DW;
1531 	else
1532 		return XY_FAST_COLOR_BLT_DW;
1533 }
1534 
emit_clear(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u32 size,u32 pitch,bool is_vram)1535 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1536 		       u32 size, u32 pitch, bool is_vram)
1537 {
1538 	if (gt->info.has_xe2_blt_instructions)
1539 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1540 	else
1541 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1542 				     is_vram);
1543 }
1544 
1545 /**
1546  * xe_migrate_clear() - Copy content of TTM resources.
1547  * @m: The migration context.
1548  * @bo: The buffer object @dst is currently bound to.
1549  * @dst: The dst TTM resource to be cleared.
1550  * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1551  *
1552  * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1553  * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1554  * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1555  * TODO: Eliminate the @bo argument.
1556  *
1557  * Return: Pointer to a dma_fence representing the last clear batch, or
1558  * an error pointer on failure. If there is a failure, any clear operation
1559  * started by the function call has been synced.
1560  */
xe_migrate_clear(struct xe_migrate * m,struct xe_bo * bo,struct ttm_resource * dst,u32 clear_flags)1561 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1562 				   struct xe_bo *bo,
1563 				   struct ttm_resource *dst,
1564 				   u32 clear_flags)
1565 {
1566 	bool clear_vram = mem_type_is_vram(dst->mem_type);
1567 	bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1568 	bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1569 	struct xe_gt *gt = m->tile->primary_gt;
1570 	struct xe_device *xe = gt_to_xe(gt);
1571 	bool clear_only_system_ccs = false;
1572 	struct dma_fence *fence = NULL;
1573 	u64 size = xe_bo_size(bo);
1574 	struct xe_res_cursor src_it;
1575 	struct ttm_resource *src = dst;
1576 	int err;
1577 
1578 	if (WARN_ON(!clear_bo_data && !clear_ccs))
1579 		return NULL;
1580 
1581 	if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1582 		clear_only_system_ccs = true;
1583 
1584 	if (!clear_vram)
1585 		xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it);
1586 	else
1587 		xe_res_first(src, 0, xe_bo_size(bo), &src_it);
1588 
1589 	while (size) {
1590 		u64 clear_L0_ofs;
1591 		u32 clear_L0_pt;
1592 		u32 flush_flags = 0;
1593 		u64 clear_L0;
1594 		struct xe_sched_job *job;
1595 		struct xe_bb *bb;
1596 		u32 batch_size, update_idx;
1597 		u32 pte_flags;
1598 
1599 		bool usm = xe->info.has_usm;
1600 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1601 
1602 		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1603 
1604 		/* Calculate final sizes and batch size.. */
1605 		pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1606 		batch_size = 1 +
1607 			pte_update_size(m, pte_flags, src, &src_it,
1608 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1609 					clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1610 					avail_pts);
1611 
1612 		if (xe_migrate_needs_ccs_emit(xe))
1613 			batch_size += EMIT_COPY_CCS_DW;
1614 
1615 		/* Clear commands */
1616 
1617 		if (WARN_ON_ONCE(!clear_L0))
1618 			break;
1619 
1620 		bb = xe_bb_new(gt, batch_size, usm);
1621 		if (IS_ERR(bb)) {
1622 			err = PTR_ERR(bb);
1623 			goto err_sync;
1624 		}
1625 
1626 		size -= clear_L0;
1627 		/* Preemption is enabled again by the ring ops. */
1628 		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
1629 			xe_res_next(&src_it, clear_L0);
1630 		} else {
1631 			emit_pte(m, bb, clear_L0_pt, clear_vram,
1632 				 clear_only_system_ccs, &src_it, clear_L0, dst);
1633 			flush_flags |= MI_INVALIDATE_TLB;
1634 		}
1635 
1636 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1637 		update_idx = bb->len;
1638 
1639 		if (clear_bo_data)
1640 			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1641 
1642 		if (xe_migrate_needs_ccs_emit(xe)) {
1643 			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1644 				      m->cleared_mem_ofs, false, clear_L0);
1645 			flush_flags |= MI_FLUSH_DW_CCS;
1646 		}
1647 
1648 		job = xe_bb_create_migration_job(m->q, bb,
1649 						 xe_migrate_batch_base(m, usm),
1650 						 update_idx);
1651 		if (IS_ERR(job)) {
1652 			err = PTR_ERR(job);
1653 			goto err;
1654 		}
1655 
1656 		xe_sched_job_add_migrate_flush(job, flush_flags);
1657 		if (!fence) {
1658 			/*
1659 			 * There can't be anything userspace related at this
1660 			 * point, so we just need to respect any potential move
1661 			 * fences, which are always tracked as
1662 			 * DMA_RESV_USAGE_KERNEL.
1663 			 */
1664 			err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1665 						    DMA_RESV_USAGE_KERNEL);
1666 			if (err)
1667 				goto err_job;
1668 		}
1669 
1670 		mutex_lock(&m->job_mutex);
1671 		xe_sched_job_arm(job);
1672 		dma_fence_put(fence);
1673 		fence = dma_fence_get(&job->drm.s_fence->finished);
1674 		xe_sched_job_push(job);
1675 
1676 		dma_fence_put(m->fence);
1677 		m->fence = dma_fence_get(fence);
1678 
1679 		mutex_unlock(&m->job_mutex);
1680 
1681 		xe_bb_free(bb, fence);
1682 		continue;
1683 
1684 err_job:
1685 		xe_sched_job_put(job);
1686 err:
1687 		xe_bb_free(bb, NULL);
1688 err_sync:
1689 		/* Sync partial copies if any. FIXME: job_mutex? */
1690 		if (fence) {
1691 			dma_fence_wait(fence, false);
1692 			dma_fence_put(fence);
1693 		}
1694 
1695 		return ERR_PTR(err);
1696 	}
1697 
1698 	if (clear_ccs)
1699 		bo->ccs_cleared = true;
1700 
1701 	return fence;
1702 }
1703 
write_pgtable(struct xe_tile * tile,struct xe_bb * bb,u64 ppgtt_ofs,const struct xe_vm_pgtable_update_op * pt_op,const struct xe_vm_pgtable_update * update,struct xe_migrate_pt_update * pt_update)1704 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1705 			  const struct xe_vm_pgtable_update_op *pt_op,
1706 			  const struct xe_vm_pgtable_update *update,
1707 			  struct xe_migrate_pt_update *pt_update)
1708 {
1709 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1710 	u32 chunk;
1711 	u32 ofs = update->ofs, size = update->qwords;
1712 
1713 	/*
1714 	 * If we have 512 entries (max), we would populate it ourselves,
1715 	 * and update the PDE above it to the new pointer.
1716 	 * The only time this can only happen if we have to update the top
1717 	 * PDE. This requires a BO that is almost vm->size big.
1718 	 *
1719 	 * This shouldn't be possible in practice.. might change when 16K
1720 	 * pages are used. Hence the assert.
1721 	 */
1722 	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1723 	if (!ppgtt_ofs)
1724 		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1725 						xe_bo_addr(update->pt_bo, 0,
1726 							   XE_PAGE_SIZE), false);
1727 
1728 	do {
1729 		u64 addr = ppgtt_ofs + ofs * 8;
1730 
1731 		chunk = min(size, MAX_PTE_PER_SDI);
1732 
1733 		/* Ensure populatefn can do memset64 by aligning bb->cs */
1734 		if (!(bb->len & 1))
1735 			bb->cs[bb->len++] = MI_NOOP;
1736 
1737 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1738 		bb->cs[bb->len++] = lower_32_bits(addr);
1739 		bb->cs[bb->len++] = upper_32_bits(addr);
1740 		if (pt_op->bind)
1741 			ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1742 				      ofs, chunk, update);
1743 		else
1744 			ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1745 				   ofs, chunk, update);
1746 
1747 		bb->len += chunk * 2;
1748 		ofs += chunk;
1749 		size -= chunk;
1750 	} while (size);
1751 }
1752 
xe_migrate_get_vm(struct xe_migrate * m)1753 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1754 {
1755 	return xe_vm_get(m->q->vm);
1756 }
1757 
1758 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1759 struct migrate_test_params {
1760 	struct xe_test_priv base;
1761 	bool force_gpu;
1762 };
1763 
1764 #define to_migrate_test_params(_priv) \
1765 	container_of(_priv, struct migrate_test_params, base)
1766 #endif
1767 
1768 static struct dma_fence *
xe_migrate_update_pgtables_cpu(struct xe_migrate * m,struct xe_migrate_pt_update * pt_update)1769 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1770 			       struct xe_migrate_pt_update *pt_update)
1771 {
1772 	XE_TEST_DECLARE(struct migrate_test_params *test =
1773 			to_migrate_test_params
1774 			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1775 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1776 	struct xe_vm *vm = pt_update->vops->vm;
1777 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1778 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1779 	int err;
1780 	u32 i, j;
1781 
1782 	if (XE_TEST_ONLY(test && test->force_gpu))
1783 		return ERR_PTR(-ETIME);
1784 
1785 	if (ops->pre_commit) {
1786 		pt_update->job = NULL;
1787 		err = ops->pre_commit(pt_update);
1788 		if (err)
1789 			return ERR_PTR(err);
1790 	}
1791 
1792 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1793 		const struct xe_vm_pgtable_update_op *pt_op =
1794 			&pt_update_ops->ops[i];
1795 
1796 		for (j = 0; j < pt_op->num_entries; j++) {
1797 			const struct xe_vm_pgtable_update *update =
1798 				&pt_op->entries[j];
1799 
1800 			if (pt_op->bind)
1801 				ops->populate(pt_update, m->tile,
1802 					      &update->pt_bo->vmap, NULL,
1803 					      update->ofs, update->qwords,
1804 					      update);
1805 			else
1806 				ops->clear(pt_update, m->tile,
1807 					   &update->pt_bo->vmap, NULL,
1808 					   update->ofs, update->qwords, update);
1809 		}
1810 	}
1811 
1812 	trace_xe_vm_cpu_bind(vm);
1813 	xe_device_wmb(vm->xe);
1814 
1815 	return dma_fence_get_stub();
1816 }
1817 
1818 static struct dma_fence *
__xe_migrate_update_pgtables(struct xe_migrate * m,struct xe_migrate_pt_update * pt_update,struct xe_vm_pgtable_update_ops * pt_update_ops)1819 __xe_migrate_update_pgtables(struct xe_migrate *m,
1820 			     struct xe_migrate_pt_update *pt_update,
1821 			     struct xe_vm_pgtable_update_ops *pt_update_ops)
1822 {
1823 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1824 	struct xe_tile *tile = m->tile;
1825 	struct xe_gt *gt = tile->primary_gt;
1826 	struct xe_device *xe = tile_to_xe(tile);
1827 	struct xe_sched_job *job;
1828 	struct dma_fence *fence;
1829 	struct drm_suballoc *sa_bo = NULL;
1830 	struct xe_bb *bb;
1831 	u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1832 	u32 num_updates = 0, current_update = 0;
1833 	u64 addr;
1834 	int err = 0;
1835 	bool is_migrate = pt_update_ops->q == m->q;
1836 	bool usm = is_migrate && xe->info.has_usm;
1837 
1838 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1839 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1840 		struct xe_vm_pgtable_update *updates = pt_op->entries;
1841 
1842 		num_updates += pt_op->num_entries;
1843 		for (j = 0; j < pt_op->num_entries; ++j) {
1844 			u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1845 						    MAX_PTE_PER_SDI);
1846 
1847 			/* align noop + MI_STORE_DATA_IMM cmd prefix */
1848 			batch_size += 4 * num_cmds + updates[j].qwords * 2;
1849 		}
1850 	}
1851 
1852 	/* fixed + PTE entries */
1853 	if (IS_DGFX(xe))
1854 		batch_size += 2;
1855 	else
1856 		batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1857 			num_updates * 2;
1858 
1859 	bb = xe_bb_new(gt, batch_size, usm);
1860 	if (IS_ERR(bb))
1861 		return ERR_CAST(bb);
1862 
1863 	/* For sysmem PTE's, need to map them in our hole.. */
1864 	if (!IS_DGFX(xe)) {
1865 		u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1866 		u32 ptes, ofs;
1867 
1868 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1869 		if (!is_migrate) {
1870 			u32 num_units = DIV_ROUND_UP(num_updates,
1871 						     NUM_VMUSA_WRITES_PER_UNIT);
1872 
1873 			if (num_units > m->vm_update_sa.size) {
1874 				err = -ENOBUFS;
1875 				goto err_bb;
1876 			}
1877 			sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1878 						 GFP_KERNEL, true, 0);
1879 			if (IS_ERR(sa_bo)) {
1880 				err = PTR_ERR(sa_bo);
1881 				goto err_bb;
1882 			}
1883 
1884 			ppgtt_ofs = NUM_KERNEL_PDE +
1885 				(drm_suballoc_soffset(sa_bo) /
1886 				 NUM_VMUSA_UNIT_PER_PAGE);
1887 			page_ofs = (drm_suballoc_soffset(sa_bo) %
1888 				    NUM_VMUSA_UNIT_PER_PAGE) *
1889 				VM_SA_UPDATE_UNIT_SIZE;
1890 		}
1891 
1892 		/* Map our PT's to gtt */
1893 		i = 0;
1894 		j = 0;
1895 		ptes = num_updates;
1896 		ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1897 		while (ptes) {
1898 			u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1899 			u32 idx = 0;
1900 
1901 			bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1902 				MI_SDI_NUM_QW(chunk);
1903 			bb->cs[bb->len++] = ofs;
1904 			bb->cs[bb->len++] = 0; /* upper_32_bits */
1905 
1906 			for (; i < pt_update_ops->num_ops; ++i) {
1907 				struct xe_vm_pgtable_update_op *pt_op =
1908 					&pt_update_ops->ops[i];
1909 				struct xe_vm_pgtable_update *updates = pt_op->entries;
1910 
1911 				for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1912 					struct xe_vm *vm = pt_update->vops->vm;
1913 					struct xe_bo *pt_bo = updates[j].pt_bo;
1914 
1915 					if (idx == chunk)
1916 						goto next_cmd;
1917 
1918 					xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K);
1919 
1920 					/* Map a PT at most once */
1921 					if (pt_bo->update_index < 0)
1922 						pt_bo->update_index = current_update;
1923 
1924 					addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1925 									 pat_index, 0);
1926 					bb->cs[bb->len++] = lower_32_bits(addr);
1927 					bb->cs[bb->len++] = upper_32_bits(addr);
1928 				}
1929 
1930 				j = 0;
1931 			}
1932 
1933 next_cmd:
1934 			ptes -= chunk;
1935 			ofs += chunk * sizeof(u64);
1936 		}
1937 
1938 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1939 		update_idx = bb->len;
1940 
1941 		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1942 			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1943 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1944 			struct xe_vm_pgtable_update_op *pt_op =
1945 				&pt_update_ops->ops[i];
1946 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1947 
1948 			for (j = 0; j < pt_op->num_entries; ++j) {
1949 				struct xe_bo *pt_bo = updates[j].pt_bo;
1950 
1951 				write_pgtable(tile, bb, addr +
1952 					      pt_bo->update_index * XE_PAGE_SIZE,
1953 					      pt_op, &updates[j], pt_update);
1954 			}
1955 		}
1956 	} else {
1957 		/* phys pages, no preamble required */
1958 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1959 		update_idx = bb->len;
1960 
1961 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1962 			struct xe_vm_pgtable_update_op *pt_op =
1963 				&pt_update_ops->ops[i];
1964 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1965 
1966 			for (j = 0; j < pt_op->num_entries; ++j)
1967 				write_pgtable(tile, bb, 0, pt_op, &updates[j],
1968 					      pt_update);
1969 		}
1970 	}
1971 
1972 	job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1973 					 xe_migrate_batch_base(m, usm),
1974 					 update_idx);
1975 	if (IS_ERR(job)) {
1976 		err = PTR_ERR(job);
1977 		goto err_sa;
1978 	}
1979 
1980 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1981 
1982 	if (ops->pre_commit) {
1983 		pt_update->job = job;
1984 		err = ops->pre_commit(pt_update);
1985 		if (err)
1986 			goto err_job;
1987 	}
1988 	if (is_migrate)
1989 		mutex_lock(&m->job_mutex);
1990 
1991 	xe_sched_job_arm(job);
1992 	fence = dma_fence_get(&job->drm.s_fence->finished);
1993 	xe_sched_job_push(job);
1994 
1995 	if (is_migrate)
1996 		mutex_unlock(&m->job_mutex);
1997 
1998 	xe_bb_free(bb, fence);
1999 	drm_suballoc_free(sa_bo, fence);
2000 
2001 	return fence;
2002 
2003 err_job:
2004 	xe_sched_job_put(job);
2005 err_sa:
2006 	drm_suballoc_free(sa_bo, NULL);
2007 err_bb:
2008 	xe_bb_free(bb, NULL);
2009 	return ERR_PTR(err);
2010 }
2011 
2012 /**
2013  * xe_migrate_update_pgtables() - Pipelined page-table update
2014  * @m: The migrate context.
2015  * @pt_update: PT update arguments
2016  *
2017  * Perform a pipelined page-table update. The update descriptors are typically
2018  * built under the same lock critical section as a call to this function. If
2019  * using the default engine for the updates, they will be performed in the
2020  * order they grab the job_mutex. If different engines are used, external
2021  * synchronization is needed for overlapping updates to maintain page-table
2022  * consistency. Note that the meaning of "overlapping" is that the updates
2023  * touch the same page-table, which might be a higher-level page-directory.
2024  * If no pipelining is needed, then updates may be performed by the cpu.
2025  *
2026  * Return: A dma_fence that, when signaled, indicates the update completion.
2027  */
2028 struct dma_fence *
xe_migrate_update_pgtables(struct xe_migrate * m,struct xe_migrate_pt_update * pt_update)2029 xe_migrate_update_pgtables(struct xe_migrate *m,
2030 			   struct xe_migrate_pt_update *pt_update)
2031 
2032 {
2033 	struct xe_vm_pgtable_update_ops *pt_update_ops =
2034 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
2035 	struct dma_fence *fence;
2036 
2037 	fence =  xe_migrate_update_pgtables_cpu(m, pt_update);
2038 
2039 	/* -ETIME indicates a job is needed, anything else is legit error */
2040 	if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
2041 		return fence;
2042 
2043 	return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
2044 }
2045 
2046 /**
2047  * xe_migrate_wait() - Complete all operations using the xe_migrate context
2048  * @m: Migrate context to wait for.
2049  *
2050  * Waits until the GPU no longer uses the migrate context's default engine
2051  * or its page-table objects. FIXME: What about separate page-table update
2052  * engines?
2053  */
xe_migrate_wait(struct xe_migrate * m)2054 void xe_migrate_wait(struct xe_migrate *m)
2055 {
2056 	if (m->fence)
2057 		dma_fence_wait(m->fence, false);
2058 }
2059 
pte_update_cmd_size(u64 size)2060 static u32 pte_update_cmd_size(u64 size)
2061 {
2062 	u32 num_dword;
2063 	u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
2064 
2065 	XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
2066 
2067 	/*
2068 	 * MI_STORE_DATA_IMM command is used to update page table. Each
2069 	 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To
2070 	 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need:
2071 	 *
2072 	 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
2073 	 * - 2 dword for the page table's physical location
2074 	 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords)
2075 	 */
2076 	num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI);
2077 	num_dword += entries * 2;
2078 
2079 	return num_dword;
2080 }
2081 
build_pt_update_batch_sram(struct xe_migrate * m,struct xe_bb * bb,u32 pt_offset,struct drm_pagemap_addr * sram_addr,u32 size,int level)2082 static void build_pt_update_batch_sram(struct xe_migrate *m,
2083 				       struct xe_bb *bb, u32 pt_offset,
2084 				       struct drm_pagemap_addr *sram_addr,
2085 				       u32 size, int level)
2086 {
2087 	u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
2088 	u64 gpu_page_size = 0x1ull << xe_pt_shift(level);
2089 	u32 ptes;
2090 	int i = 0;
2091 
2092 	xe_tile_assert(m->tile, PAGE_ALIGNED(size));
2093 
2094 	ptes = DIV_ROUND_UP(size, gpu_page_size);
2095 	while (ptes) {
2096 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
2097 
2098 		if (!level)
2099 			chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
2100 
2101 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
2102 		bb->cs[bb->len++] = pt_offset;
2103 		bb->cs[bb->len++] = 0;
2104 
2105 		pt_offset += chunk * 8;
2106 		ptes -= chunk;
2107 
2108 		while (chunk--) {
2109 			u64 addr = sram_addr[i].addr;
2110 			u64 pte;
2111 
2112 			xe_tile_assert(m->tile, sram_addr[i].proto ==
2113 				       DRM_INTERCONNECT_SYSTEM ||
2114 				       sram_addr[i].proto == XE_INTERCONNECT_P2P);
2115 			xe_tile_assert(m->tile, addr);
2116 			xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
2117 
2118 again:
2119 			pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
2120 								addr, pat_index,
2121 								level, false, 0);
2122 			bb->cs[bb->len++] = lower_32_bits(pte);
2123 			bb->cs[bb->len++] = upper_32_bits(pte);
2124 
2125 			if (gpu_page_size < PAGE_SIZE) {
2126 				addr += XE_PAGE_SIZE;
2127 				if (!PAGE_ALIGNED(addr)) {
2128 					chunk--;
2129 					goto again;
2130 				}
2131 				i++;
2132 			} else {
2133 				i += gpu_page_size / PAGE_SIZE;
2134 			}
2135 		}
2136 	}
2137 }
2138 
xe_migrate_vram_use_pde(struct drm_pagemap_addr * sram_addr,unsigned long size)2139 static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
2140 				    unsigned long size)
2141 {
2142 	u32 large_size = (0x1 << xe_pt_shift(1));
2143 	unsigned long i, incr = large_size / PAGE_SIZE;
2144 
2145 	for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
2146 		if (PAGE_SIZE << sram_addr[i].order != large_size)
2147 			return false;
2148 
2149 	return true;
2150 }
2151 
2152 #define XE_CACHELINE_BYTES	64ull
2153 #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
2154 
xe_migrate_copy_pitch(struct xe_device * xe,u32 len)2155 static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len)
2156 {
2157 	u32 pitch;
2158 
2159 	if (IS_ALIGNED(len, PAGE_SIZE))
2160 		pitch = PAGE_SIZE;
2161 	else if (IS_ALIGNED(len, SZ_4K))
2162 		pitch = SZ_4K;
2163 	else if (IS_ALIGNED(len, SZ_256))
2164 		pitch = SZ_256;
2165 	else if (IS_ALIGNED(len, 4))
2166 		pitch = 4;
2167 	else
2168 		pitch = 1;
2169 
2170 	xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr);
2171 	return pitch;
2172 }
2173 
xe_migrate_vram(struct xe_migrate * m,unsigned long len,unsigned long sram_offset,struct drm_pagemap_addr * sram_addr,u64 vram_addr,struct dma_fence * deps,const enum xe_migrate_copy_dir dir)2174 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
2175 					 unsigned long len,
2176 					 unsigned long sram_offset,
2177 					 struct drm_pagemap_addr *sram_addr,
2178 					 u64 vram_addr,
2179 					 struct dma_fence *deps,
2180 					 const enum xe_migrate_copy_dir dir)
2181 {
2182 	struct xe_gt *gt = m->tile->primary_gt;
2183 	struct xe_device *xe = gt_to_xe(gt);
2184 	bool use_usm_batch = xe->info.has_usm;
2185 	struct dma_fence *fence = NULL;
2186 	u32 batch_size = 1;
2187 	u64 src_L0_ofs, dst_L0_ofs;
2188 	struct xe_sched_job *job;
2189 	struct xe_bb *bb;
2190 	u32 update_idx, pt_slot = 0;
2191 	unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
2192 	unsigned int pitch = xe_migrate_copy_pitch(xe, len);
2193 	int err;
2194 	unsigned long i, j;
2195 	bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
2196 
2197 	if (!xe->info.has_mem_copy_instr &&
2198 	    drm_WARN_ON(&xe->drm,
2199 			(!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK))
2200 		return ERR_PTR(-EOPNOTSUPP);
2201 
2202 	xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
2203 
2204 	batch_size += pte_update_cmd_size(npages << PAGE_SHIFT);
2205 	batch_size += EMIT_COPY_DW;
2206 
2207 	bb = xe_bb_new(gt, batch_size, use_usm_batch);
2208 	if (IS_ERR(bb)) {
2209 		err = PTR_ERR(bb);
2210 		return ERR_PTR(err);
2211 	}
2212 
2213 	/*
2214 	 * If the order of a struct drm_pagemap_addr entry is greater than 0,
2215 	 * the entry is populated by GPU pagemap but subsequent entries within
2216 	 * the range of that order are not populated.
2217 	 * build_pt_update_batch_sram() expects a fully populated array of
2218 	 * struct drm_pagemap_addr. Ensure this is the case even with higher
2219 	 * orders.
2220 	 */
2221 	for (i = 0; !use_pde && i < npages;) {
2222 		unsigned int order = sram_addr[i].order;
2223 
2224 		for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
2225 			if (!sram_addr[i + j].addr)
2226 				sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
2227 
2228 		i += NR_PAGES(order);
2229 	}
2230 
2231 	if (use_pde)
2232 		build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
2233 					   sram_addr, npages << PAGE_SHIFT, 1);
2234 	else
2235 		build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
2236 					   sram_addr, npages << PAGE_SHIFT, 0);
2237 
2238 	if (dir == XE_MIGRATE_COPY_TO_VRAM) {
2239 		if (use_pde)
2240 			src_L0_ofs = m->large_page_copy_ofs + sram_offset;
2241 		else
2242 			src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2243 		dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2244 
2245 	} else {
2246 		src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
2247 		if (use_pde)
2248 			dst_L0_ofs = m->large_page_copy_ofs + sram_offset;
2249 		else
2250 			dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
2251 	}
2252 
2253 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
2254 	update_idx = bb->len;
2255 
2256 	emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
2257 
2258 	job = xe_bb_create_migration_job(m->q, bb,
2259 					 xe_migrate_batch_base(m, use_usm_batch),
2260 					 update_idx);
2261 	if (IS_ERR(job)) {
2262 		err = PTR_ERR(job);
2263 		goto err;
2264 	}
2265 
2266 	xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
2267 
2268 	if (deps && !dma_fence_is_signaled(deps)) {
2269 		dma_fence_get(deps);
2270 		err = drm_sched_job_add_dependency(&job->drm, deps);
2271 		if (err)
2272 			dma_fence_wait(deps, false);
2273 		err = 0;
2274 	}
2275 
2276 	mutex_lock(&m->job_mutex);
2277 	xe_sched_job_arm(job);
2278 	fence = dma_fence_get(&job->drm.s_fence->finished);
2279 	xe_sched_job_push(job);
2280 
2281 	dma_fence_put(m->fence);
2282 	m->fence = dma_fence_get(fence);
2283 	mutex_unlock(&m->job_mutex);
2284 
2285 	xe_bb_free(bb, fence);
2286 
2287 	return fence;
2288 
2289 err:
2290 	xe_bb_free(bb, NULL);
2291 
2292 	return ERR_PTR(err);
2293 }
2294 
2295 /**
2296  * xe_migrate_to_vram() - Migrate to VRAM
2297  * @m: The migration context.
2298  * @npages: Number of pages to migrate.
2299  * @src_addr: Array of DMA information (source of migrate)
2300  * @dst_addr: Device physical address of VRAM (destination of migrate)
2301  * @deps: struct dma_fence representing the dependencies that need
2302  * to be signaled before migration.
2303  *
2304  * Copy from an array dma addresses to a VRAM device physical address
2305  *
2306  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2307  * failure
2308  */
xe_migrate_to_vram(struct xe_migrate * m,unsigned long npages,struct drm_pagemap_addr * src_addr,u64 dst_addr,struct dma_fence * deps)2309 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
2310 				     unsigned long npages,
2311 				     struct drm_pagemap_addr *src_addr,
2312 				     u64 dst_addr,
2313 				     struct dma_fence *deps)
2314 {
2315 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
2316 			       deps, XE_MIGRATE_COPY_TO_VRAM);
2317 }
2318 
2319 /**
2320  * xe_migrate_from_vram() - Migrate from VRAM
2321  * @m: The migration context.
2322  * @npages: Number of pages to migrate.
2323  * @src_addr: Device physical address of VRAM (source of migrate)
2324  * @dst_addr: Array of DMA information (destination of migrate)
2325  * @deps: struct dma_fence representing the dependencies that need
2326  * to be signaled before migration.
2327  *
2328  * Copy from a VRAM device physical address to an array dma addresses
2329  *
2330  * Return: dma fence for migrate to signal completion on success, ERR_PTR on
2331  * failure
2332  */
xe_migrate_from_vram(struct xe_migrate * m,unsigned long npages,u64 src_addr,struct drm_pagemap_addr * dst_addr,struct dma_fence * deps)2333 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
2334 				       unsigned long npages,
2335 				       u64 src_addr,
2336 				       struct drm_pagemap_addr *dst_addr,
2337 				       struct dma_fence *deps)
2338 {
2339 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
2340 			       deps, XE_MIGRATE_COPY_TO_SRAM);
2341 }
2342 
xe_migrate_dma_unmap(struct xe_device * xe,struct drm_pagemap_addr * pagemap_addr,int len,int write)2343 static void xe_migrate_dma_unmap(struct xe_device *xe,
2344 				 struct drm_pagemap_addr *pagemap_addr,
2345 				 int len, int write)
2346 {
2347 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2348 
2349 	for (i = 0; i < npages; ++i) {
2350 		if (!pagemap_addr[i].addr)
2351 			break;
2352 
2353 		dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
2354 			       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2355 	}
2356 	kfree(pagemap_addr);
2357 }
2358 
xe_migrate_dma_map(struct xe_device * xe,void * buf,int len,int write)2359 static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
2360 						   void *buf, int len,
2361 						   int write)
2362 {
2363 	struct drm_pagemap_addr *pagemap_addr;
2364 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
2365 
2366 	pagemap_addr = kzalloc_objs(*pagemap_addr, npages);
2367 	if (!pagemap_addr)
2368 		return ERR_PTR(-ENOMEM);
2369 
2370 	for (i = 0; i < npages; ++i) {
2371 		dma_addr_t addr;
2372 		struct page *page;
2373 		enum dma_data_direction dir = write ? DMA_TO_DEVICE :
2374 						      DMA_FROM_DEVICE;
2375 
2376 		if (is_vmalloc_addr(buf))
2377 			page = vmalloc_to_page(buf);
2378 		else
2379 			page = virt_to_page(buf);
2380 
2381 		addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
2382 		if (dma_mapping_error(xe->drm.dev, addr))
2383 			goto err_fault;
2384 
2385 		pagemap_addr[i] =
2386 			drm_pagemap_addr_encode(addr,
2387 						DRM_INTERCONNECT_SYSTEM,
2388 						0, dir);
2389 		buf += PAGE_SIZE;
2390 	}
2391 
2392 	return pagemap_addr;
2393 
2394 err_fault:
2395 	xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
2396 	return ERR_PTR(-EFAULT);
2397 }
2398 
2399 /**
2400  * xe_migrate_access_memory - Access memory of a BO via GPU
2401  *
2402  * @m: The migration context.
2403  * @bo: buffer object
2404  * @offset: access offset into buffer object
2405  * @buf: pointer to caller memory to read into or write from
2406  * @len: length of access
2407  * @write: write access
2408  *
2409  * Access memory of a BO via GPU either reading in or writing from a passed in
2410  * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
2411  * read to or write from pointer.
2412  *
2413  * Returns:
2414  * 0 if successful, negative error code on failure.
2415  */
xe_migrate_access_memory(struct xe_migrate * m,struct xe_bo * bo,unsigned long offset,void * buf,int len,int write)2416 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
2417 			     unsigned long offset, void *buf, int len,
2418 			     int write)
2419 {
2420 	struct xe_tile *tile = m->tile;
2421 	struct xe_device *xe = tile_to_xe(tile);
2422 	struct xe_res_cursor cursor;
2423 	struct dma_fence *fence = NULL;
2424 	struct drm_pagemap_addr *pagemap_addr;
2425 	unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
2426 	int bytes_left = len, current_page = 0;
2427 	void *orig_buf = buf;
2428 
2429 	xe_bo_assert_held(bo);
2430 
2431 	/* Use bounce buffer for small access and unaligned access */
2432 	if (!xe->info.has_mem_copy_instr &&
2433 	    (!IS_ALIGNED(len, 4) ||
2434 	     !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
2435 	     !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) {
2436 		int buf_offset = 0;
2437 		void *bounce;
2438 		int err;
2439 
2440 		BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
2441 		bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
2442 		if (!bounce)
2443 			return -ENOMEM;
2444 
2445 		/*
2446 		 * Less than ideal for large unaligned access but this should be
2447 		 * fairly rare, can fixup if this becomes common.
2448 		 */
2449 		do {
2450 			int copy_bytes = min_t(int, bytes_left,
2451 					       XE_CACHELINE_BYTES -
2452 					       (offset & XE_CACHELINE_MASK));
2453 			int ptr_offset = offset & XE_CACHELINE_MASK;
2454 
2455 			err = xe_migrate_access_memory(m, bo,
2456 						       offset &
2457 						       ~XE_CACHELINE_MASK,
2458 						       bounce,
2459 						       XE_CACHELINE_BYTES, 0);
2460 			if (err)
2461 				break;
2462 
2463 			if (write) {
2464 				memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
2465 
2466 				err = xe_migrate_access_memory(m, bo,
2467 							       offset & ~XE_CACHELINE_MASK,
2468 							       bounce,
2469 							       XE_CACHELINE_BYTES, write);
2470 				if (err)
2471 					break;
2472 			} else {
2473 				memcpy(buf + buf_offset, bounce + ptr_offset,
2474 				       copy_bytes);
2475 			}
2476 
2477 			bytes_left -= copy_bytes;
2478 			buf_offset += copy_bytes;
2479 			offset += copy_bytes;
2480 		} while (bytes_left);
2481 
2482 		kfree(bounce);
2483 		return err;
2484 	}
2485 
2486 	pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
2487 	if (IS_ERR(pagemap_addr))
2488 		return PTR_ERR(pagemap_addr);
2489 
2490 	xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
2491 
2492 	do {
2493 		struct dma_fence *__fence;
2494 		u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
2495 			cursor.start;
2496 		int current_bytes;
2497 		u32 pitch;
2498 
2499 		if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
2500 			current_bytes = min_t(int, bytes_left,
2501 					      MAX_PREEMPTDISABLE_TRANSFER);
2502 		else
2503 			current_bytes = min_t(int, bytes_left, cursor.size);
2504 
2505 		pitch = xe_migrate_copy_pitch(xe, current_bytes);
2506 		if (xe->info.has_mem_copy_instr)
2507 			current_bytes = min_t(int, current_bytes, U16_MAX * pitch);
2508 		else
2509 			current_bytes = min_t(int, current_bytes,
2510 					      round_down(S16_MAX * pitch,
2511 							 XE_CACHELINE_BYTES));
2512 
2513 		__fence = xe_migrate_vram(m, current_bytes,
2514 					  (unsigned long)buf & ~PAGE_MASK,
2515 					  &pagemap_addr[current_page],
2516 					  vram_addr, NULL, write ?
2517 					  XE_MIGRATE_COPY_TO_VRAM :
2518 					  XE_MIGRATE_COPY_TO_SRAM);
2519 		if (IS_ERR(__fence)) {
2520 			if (fence) {
2521 				dma_fence_wait(fence, false);
2522 				dma_fence_put(fence);
2523 			}
2524 			fence = __fence;
2525 			goto out_err;
2526 		}
2527 
2528 		dma_fence_put(fence);
2529 		fence = __fence;
2530 
2531 		buf += current_bytes;
2532 		offset += current_bytes;
2533 		current_page = (int)(buf - orig_buf) / PAGE_SIZE;
2534 		bytes_left -= current_bytes;
2535 		if (bytes_left)
2536 			xe_res_next(&cursor, current_bytes);
2537 	} while (bytes_left);
2538 
2539 	dma_fence_wait(fence, false);
2540 	dma_fence_put(fence);
2541 
2542 out_err:
2543 	xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
2544 	return IS_ERR(fence) ? PTR_ERR(fence) : 0;
2545 }
2546 
2547 /**
2548  * xe_migrate_job_lock() - Lock migrate job lock
2549  * @m: The migration context.
2550  * @q: Queue associated with the operation which requires a lock
2551  *
2552  * Lock the migrate job lock if the queue is a migration queue, otherwise
2553  * assert the VM's dma-resv is held (user queue's have own locking).
2554  */
xe_migrate_job_lock(struct xe_migrate * m,struct xe_exec_queue * q)2555 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
2556 {
2557 	bool is_migrate = q == m->q;
2558 
2559 	if (is_migrate)
2560 		mutex_lock(&m->job_mutex);
2561 	else
2562 		xe_vm_assert_held(q->user_vm);	/* User queues VM's should be locked */
2563 }
2564 
2565 /**
2566  * xe_migrate_job_unlock() - Unlock migrate job lock
2567  * @m: The migration context.
2568  * @q: Queue associated with the operation which requires a lock
2569  *
2570  * Unlock the migrate job lock if the queue is a migration queue, otherwise
2571  * assert the VM's dma-resv is held (user queue's have own locking).
2572  */
xe_migrate_job_unlock(struct xe_migrate * m,struct xe_exec_queue * q)2573 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
2574 {
2575 	bool is_migrate = q == m->q;
2576 
2577 	if (is_migrate)
2578 		mutex_unlock(&m->job_mutex);
2579 	else
2580 		xe_vm_assert_held(q->user_vm);	/* User queues VM's should be locked */
2581 }
2582 
2583 #if IS_ENABLED(CONFIG_PROVE_LOCKING)
2584 /**
2585  * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue
2586  * @q: Migrate queue
2587  */
xe_migrate_job_lock_assert(struct xe_exec_queue * q)2588 void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
2589 {
2590 	struct xe_migrate *m = gt_to_tile(q->gt)->migrate;
2591 
2592 	xe_gt_assert(q->gt, q == m->q);
2593 	lockdep_assert_held(&m->job_mutex);
2594 }
2595 #endif
2596 
2597 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
2598 #include "tests/xe_migrate.c"
2599 #endif
2600