xref: /linux/drivers/gpu/drm/xe/xe_migrate.c (revision 785151f50ddacac06c7a3c5f3d31642794507fdf)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "xe_migrate.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/sizes.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/ttm/ttm_tt.h>
13 #include <uapi/drm/xe_drm.h>
14 
15 #include <generated/xe_wa_oob.h>
16 
17 #include "instructions/xe_gpu_commands.h"
18 #include "instructions/xe_mi_commands.h"
19 #include "regs/xe_gtt_defs.h"
20 #include "tests/xe_test.h"
21 #include "xe_assert.h"
22 #include "xe_bb.h"
23 #include "xe_bo.h"
24 #include "xe_exec_queue.h"
25 #include "xe_ggtt.h"
26 #include "xe_gt.h"
27 #include "xe_hw_engine.h"
28 #include "xe_lrc.h"
29 #include "xe_map.h"
30 #include "xe_mocs.h"
31 #include "xe_pt.h"
32 #include "xe_res_cursor.h"
33 #include "xe_sched_job.h"
34 #include "xe_sync.h"
35 #include "xe_trace_bo.h"
36 #include "xe_vm.h"
37 
38 /**
39  * struct xe_migrate - migrate context.
40  */
41 struct xe_migrate {
42 	/** @q: Default exec queue used for migration */
43 	struct xe_exec_queue *q;
44 	/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
45 	struct xe_tile *tile;
46 	/** @job_mutex: Timeline mutex for @eng. */
47 	struct mutex job_mutex;
48 	/** @pt_bo: Page-table buffer object. */
49 	struct xe_bo *pt_bo;
50 	/** @batch_base_ofs: VM offset of the migration batch buffer */
51 	u64 batch_base_ofs;
52 	/** @usm_batch_base_ofs: VM offset of the usm batch buffer */
53 	u64 usm_batch_base_ofs;
54 	/** @cleared_mem_ofs: VM offset of @cleared_bo. */
55 	u64 cleared_mem_ofs;
56 	/**
57 	 * @fence: dma-fence representing the last migration job batch.
58 	 * Protected by @job_mutex.
59 	 */
60 	struct dma_fence *fence;
61 	/**
62 	 * @vm_update_sa: For integrated, used to suballocate page-tables
63 	 * out of the pt_bo.
64 	 */
65 	struct drm_suballoc_manager vm_update_sa;
66 	/** @min_chunk_size: For dgfx, Minimum chunk size */
67 	u64 min_chunk_size;
68 };
69 
70 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
71 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
72 #define NUM_KERNEL_PDE 15
73 #define NUM_PT_SLOTS 32
74 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
75 #define MAX_NUM_PTE 512
76 #define IDENTITY_OFFSET 256ULL
77 
78 /*
79  * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
80  * legal value accepted.  Since that instruction field is always stored in
81  * (val-2) format, this translates to 0x400 dwords for the true maximum length
82  * of the instruction.  Subtracting the instruction header (1 dword) and
83  * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values.
84  */
85 #define MAX_PTE_PER_SDI 0x1FE
86 
87 /**
88  * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
89  * @tile: The tile.
90  *
91  * Returns the default migrate exec queue of this tile.
92  *
93  * Return: The default migrate exec queue
94  */
xe_tile_migrate_exec_queue(struct xe_tile * tile)95 struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
96 {
97 	return tile->migrate->q;
98 }
99 
xe_migrate_fini(void * arg)100 static void xe_migrate_fini(void *arg)
101 {
102 	struct xe_migrate *m = arg;
103 
104 	xe_vm_lock(m->q->vm, false);
105 	xe_bo_unpin(m->pt_bo);
106 	xe_vm_unlock(m->q->vm);
107 
108 	dma_fence_put(m->fence);
109 	xe_bo_put(m->pt_bo);
110 	drm_suballoc_manager_fini(&m->vm_update_sa);
111 	mutex_destroy(&m->job_mutex);
112 	xe_vm_close_and_put(m->q->vm);
113 	xe_exec_queue_put(m->q);
114 }
115 
xe_migrate_vm_addr(u64 slot,u32 level)116 static u64 xe_migrate_vm_addr(u64 slot, u32 level)
117 {
118 	XE_WARN_ON(slot >= NUM_PT_SLOTS);
119 
120 	/* First slot is reserved for mapping of PT bo and bb, start from 1 */
121 	return (slot + 1ULL) << xe_pt_shift(level + 1);
122 }
123 
xe_migrate_vram_ofs(struct xe_device * xe,u64 addr,bool is_comp_pte)124 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
125 {
126 	/*
127 	 * Remove the DPA to get a correct offset into identity table for the
128 	 * migrate offset
129 	 */
130 	u64 identity_offset = IDENTITY_OFFSET;
131 
132 	if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
133 		identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
134 
135 	addr -= xe->mem.vram.dpa_base;
136 	return addr + (identity_offset << xe_pt_shift(2));
137 }
138 
xe_migrate_program_identity(struct xe_device * xe,struct xe_vm * vm,struct xe_bo * bo,u64 map_ofs,u64 vram_offset,u16 pat_index,u64 pt_2m_ofs)139 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
140 					u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
141 {
142 	u64 pos, ofs, flags;
143 	u64 entry;
144 	/* XXX: Unclear if this should be usable_size? */
145 	u64 vram_limit =  xe->mem.vram.actual_physical_size +
146 		xe->mem.vram.dpa_base;
147 	u32 level = 2;
148 
149 	ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
150 	flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
151 					    true, 0);
152 
153 	xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
154 
155 	/*
156 	 * Use 1GB pages when possible, last chunk always use 2M
157 	 * pages as mixing reserved memory (stolen, WOCPM) with a single
158 	 * mapping is not allowed on certain platforms.
159 	 */
160 	for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
161 	     pos += SZ_1G, ofs += 8) {
162 		if (pos + SZ_1G >= vram_limit) {
163 			entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
164 							  pat_index);
165 			xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
166 
167 			flags = vm->pt_ops->pte_encode_addr(xe, 0,
168 							    pat_index,
169 							    level - 1,
170 							    true, 0);
171 
172 			for (ofs = pt_2m_ofs; pos < vram_limit;
173 			     pos += SZ_2M, ofs += 8)
174 				xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
175 			break;	/* Ensure pos == vram_limit assert correct */
176 		}
177 
178 		xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
179 	}
180 
181 	xe_assert(xe, pos == vram_limit);
182 }
183 
xe_migrate_prepare_vm(struct xe_tile * tile,struct xe_migrate * m,struct xe_vm * vm)184 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
185 				 struct xe_vm *vm)
186 {
187 	struct xe_device *xe = tile_to_xe(tile);
188 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
189 	u8 id = tile->id;
190 	u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
191 #define VRAM_IDENTITY_MAP_COUNT	2
192 	u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
193 #undef VRAM_IDENTITY_MAP_COUNT
194 	u32 map_ofs, level, i;
195 	struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
196 	u64 entry, pt29_ofs;
197 
198 	/* Can't bump NUM_PT_SLOTS too high */
199 	BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
200 	/* Must be a multiple of 64K to support all platforms */
201 	BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
202 	/* And one slot reserved for the 4KiB page table updates */
203 	BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
204 
205 	/* Need to be sure everything fits in the first PT, or create more */
206 	xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
207 
208 	bo = xe_bo_create_pin_map(vm->xe, tile, vm,
209 				  num_entries * XE_PAGE_SIZE,
210 				  ttm_bo_type_kernel,
211 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
212 				  XE_BO_FLAG_PAGETABLE);
213 	if (IS_ERR(bo))
214 		return PTR_ERR(bo);
215 
216 	/* PT30 & PT31 reserved for 2M identity map */
217 	pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
218 	entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
219 	xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
220 
221 	map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
222 
223 	/* Map the entire BO in our level 0 pt */
224 	for (i = 0, level = 0; i < num_entries; level++) {
225 		entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
226 						  pat_index, 0);
227 
228 		xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
229 
230 		if (vm->flags & XE_VM_FLAG_64K)
231 			i += 16;
232 		else
233 			i += 1;
234 	}
235 
236 	if (!IS_DGFX(xe)) {
237 		/* Write out batch too */
238 		m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
239 		for (i = 0; i < batch->size;
240 		     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
241 		     XE_PAGE_SIZE) {
242 			entry = vm->pt_ops->pte_encode_bo(batch, i,
243 							  pat_index, 0);
244 
245 			xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
246 				  entry);
247 			level++;
248 		}
249 		if (xe->info.has_usm) {
250 			xe_tile_assert(tile, batch->size == SZ_1M);
251 
252 			batch = tile->primary_gt->usm.bb_pool->bo;
253 			m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
254 			xe_tile_assert(tile, batch->size == SZ_512K);
255 
256 			for (i = 0; i < batch->size;
257 			     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
258 			     XE_PAGE_SIZE) {
259 				entry = vm->pt_ops->pte_encode_bo(batch, i,
260 								  pat_index, 0);
261 
262 				xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
263 					  entry);
264 				level++;
265 			}
266 		}
267 	} else {
268 		u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
269 
270 		m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
271 
272 		if (xe->info.has_usm) {
273 			batch = tile->primary_gt->usm.bb_pool->bo;
274 			batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
275 			m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
276 		}
277 	}
278 
279 	for (level = 1; level < num_level; level++) {
280 		u32 flags = 0;
281 
282 		if (vm->flags & XE_VM_FLAG_64K && level == 1)
283 			flags = XE_PDE_64K;
284 
285 		entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
286 						  XE_PAGE_SIZE, pat_index);
287 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
288 			  entry | flags);
289 	}
290 
291 	/* Write PDE's that point to our BO. */
292 	for (i = 0; i < map_ofs / PAGE_SIZE; i++) {
293 		entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
294 						  pat_index);
295 
296 		xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
297 			  (i + 1) * 8, u64, entry);
298 	}
299 
300 	/* Set up a 1GiB NULL mapping at 255GiB offset. */
301 	level = 2;
302 	xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
303 		  vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
304 		  | XE_PTE_NULL);
305 	m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
306 
307 	/* Identity map the entire vram at 256GiB offset */
308 	if (IS_DGFX(xe)) {
309 		u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
310 
311 		xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
312 					    pat_index, pt30_ofs);
313 		xe_assert(xe, xe->mem.vram.actual_physical_size <=
314 					(MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
315 
316 		/*
317 		 * Identity map the entire vram for compressed pat_index for xe2+
318 		 * if flat ccs is enabled.
319 		 */
320 		if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
321 			u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
322 			u64 vram_offset = IDENTITY_OFFSET +
323 				DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
324 			u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
325 
326 			xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
327 						IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
328 			xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
329 						    comp_pat_index, pt31_ofs);
330 		}
331 	}
332 
333 	/*
334 	 * Example layout created above, with root level = 3:
335 	 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
336 	 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
337 	 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
338 	 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
339 	 *
340 	 * This makes the lowest part of the VM point to the pagetables.
341 	 * Hence the lowest 2M in the vm should point to itself, with a few writes
342 	 * and flushes, other parts of the VM can be used either for copying and
343 	 * clearing.
344 	 *
345 	 * For performance, the kernel reserves PDE's, so about 20 are left
346 	 * for async VM updates.
347 	 *
348 	 * To make it easier to work, each scratch PT is put in slot (1 + PT #)
349 	 * everywhere, this allows lockless updates to scratch pages by using
350 	 * the different addresses in VM.
351 	 */
352 #define NUM_VMUSA_UNIT_PER_PAGE	32
353 #define VM_SA_UPDATE_UNIT_SIZE		(XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
354 #define NUM_VMUSA_WRITES_PER_UNIT	(VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
355 	drm_suballoc_manager_init(&m->vm_update_sa,
356 				  (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
357 				  NUM_VMUSA_UNIT_PER_PAGE, 0);
358 
359 	m->pt_bo = bo;
360 	return 0;
361 }
362 
363 /*
364  * Including the reserved copy engine is required to avoid deadlocks due to
365  * migrate jobs servicing the faults gets stuck behind the job that faulted.
366  */
xe_migrate_usm_logical_mask(struct xe_gt * gt)367 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
368 {
369 	u32 logical_mask = 0;
370 	struct xe_hw_engine *hwe;
371 	enum xe_hw_engine_id id;
372 
373 	for_each_hw_engine(hwe, gt, id) {
374 		if (hwe->class != XE_ENGINE_CLASS_COPY)
375 			continue;
376 
377 		if (xe_gt_is_usm_hwe(gt, hwe))
378 			logical_mask |= BIT(hwe->logical_instance);
379 	}
380 
381 	return logical_mask;
382 }
383 
xe_migrate_needs_ccs_emit(struct xe_device * xe)384 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
385 {
386 	return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
387 }
388 
389 /**
390  * xe_migrate_init() - Initialize a migrate context
391  * @tile: Back-pointer to the tile we're initializing for.
392  *
393  * Return: Pointer to a migrate context on success. Error pointer on error.
394  */
xe_migrate_init(struct xe_tile * tile)395 struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
396 {
397 	struct xe_device *xe = tile_to_xe(tile);
398 	struct xe_gt *primary_gt = tile->primary_gt;
399 	struct xe_migrate *m;
400 	struct xe_vm *vm;
401 	int err;
402 
403 	m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
404 	if (!m)
405 		return ERR_PTR(-ENOMEM);
406 
407 	m->tile = tile;
408 
409 	/* Special layout, prepared below.. */
410 	vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
411 			  XE_VM_FLAG_SET_TILE_ID(tile));
412 	if (IS_ERR(vm))
413 		return ERR_CAST(vm);
414 
415 	xe_vm_lock(vm, false);
416 	err = xe_migrate_prepare_vm(tile, m, vm);
417 	xe_vm_unlock(vm);
418 	if (err) {
419 		xe_vm_close_and_put(vm);
420 		return ERR_PTR(err);
421 	}
422 
423 	if (xe->info.has_usm) {
424 		struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
425 							   XE_ENGINE_CLASS_COPY,
426 							   primary_gt->usm.reserved_bcs_instance,
427 							   false);
428 		u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
429 
430 		if (!hwe || !logical_mask)
431 			return ERR_PTR(-EINVAL);
432 
433 		/*
434 		 * XXX: Currently only reserving 1 (likely slow) BCS instance on
435 		 * PVC, may want to revisit if performance is needed.
436 		 */
437 		m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
438 					    EXEC_QUEUE_FLAG_KERNEL |
439 					    EXEC_QUEUE_FLAG_PERMANENT |
440 					    EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
441 	} else {
442 		m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
443 						  XE_ENGINE_CLASS_COPY,
444 						  EXEC_QUEUE_FLAG_KERNEL |
445 						  EXEC_QUEUE_FLAG_PERMANENT, 0);
446 	}
447 	if (IS_ERR(m->q)) {
448 		xe_vm_close_and_put(vm);
449 		return ERR_CAST(m->q);
450 	}
451 
452 	mutex_init(&m->job_mutex);
453 	fs_reclaim_acquire(GFP_KERNEL);
454 	might_lock(&m->job_mutex);
455 	fs_reclaim_release(GFP_KERNEL);
456 
457 	err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
458 	if (err)
459 		return ERR_PTR(err);
460 
461 	if (IS_DGFX(xe)) {
462 		if (xe_migrate_needs_ccs_emit(xe))
463 			/* min chunk size corresponds to 4K of CCS Metadata */
464 			m->min_chunk_size = SZ_4K * SZ_64K /
465 				xe_device_ccs_bytes(xe, SZ_64K);
466 		else
467 			/* Somewhat arbitrary to avoid a huge amount of blits */
468 			m->min_chunk_size = SZ_64K;
469 		m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
470 		drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
471 			(unsigned long long)m->min_chunk_size);
472 	}
473 
474 	return m;
475 }
476 
max_mem_transfer_per_pass(struct xe_device * xe)477 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
478 {
479 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
480 		return MAX_CCS_LIMITED_TRANSFER;
481 
482 	return MAX_PREEMPTDISABLE_TRANSFER;
483 }
484 
xe_migrate_res_sizes(struct xe_migrate * m,struct xe_res_cursor * cur)485 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
486 {
487 	struct xe_device *xe = tile_to_xe(m->tile);
488 	u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
489 
490 	if (mem_type_is_vram(cur->mem_type)) {
491 		/*
492 		 * VRAM we want to blit in chunks with sizes aligned to
493 		 * min_chunk_size in order for the offset to CCS metadata to be
494 		 * page-aligned. If it's the last chunk it may be smaller.
495 		 *
496 		 * Another constraint is that we need to limit the blit to
497 		 * the VRAM block size, unless size is smaller than
498 		 * min_chunk_size.
499 		 */
500 		u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
501 
502 		size = min_t(u64, size, chunk);
503 		if (size > m->min_chunk_size)
504 			size = round_down(size, m->min_chunk_size);
505 	}
506 
507 	return size;
508 }
509 
xe_migrate_allow_identity(u64 size,const struct xe_res_cursor * cur)510 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
511 {
512 	/* If the chunk is not fragmented, allow identity map. */
513 	return cur->size >= size;
514 }
515 
516 #define PTE_UPDATE_FLAG_IS_VRAM		BIT(0)
517 #define PTE_UPDATE_FLAG_IS_COMP_PTE	BIT(1)
518 
pte_update_size(struct xe_migrate * m,u32 flags,struct ttm_resource * res,struct xe_res_cursor * cur,u64 * L0,u64 * L0_ofs,u32 * L0_pt,u32 cmd_size,u32 pt_ofs,u32 avail_pts)519 static u32 pte_update_size(struct xe_migrate *m,
520 			   u32 flags,
521 			   struct ttm_resource *res,
522 			   struct xe_res_cursor *cur,
523 			   u64 *L0, u64 *L0_ofs, u32 *L0_pt,
524 			   u32 cmd_size, u32 pt_ofs, u32 avail_pts)
525 {
526 	u32 cmds = 0;
527 	bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
528 	bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
529 
530 	*L0_pt = pt_ofs;
531 	if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
532 		/* Offset into identity map. */
533 		*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
534 					      cur->start + vram_region_gpu_offset(res),
535 					      is_comp_pte);
536 		cmds += cmd_size;
537 	} else {
538 		/* Clip L0 to available size */
539 		u64 size = min(*L0, (u64)avail_pts * SZ_2M);
540 		u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT;
541 
542 		*L0 = size;
543 		*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
544 
545 		/* MI_STORE_DATA_IMM */
546 		cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI);
547 
548 		/* PDE qwords */
549 		cmds += num_4k_pages * 2;
550 
551 		/* Each chunk has a single blit command */
552 		cmds += cmd_size;
553 	}
554 
555 	return cmds;
556 }
557 
emit_pte(struct xe_migrate * m,struct xe_bb * bb,u32 at_pt,bool is_vram,bool is_comp_pte,struct xe_res_cursor * cur,u32 size,struct ttm_resource * res)558 static void emit_pte(struct xe_migrate *m,
559 		     struct xe_bb *bb, u32 at_pt,
560 		     bool is_vram, bool is_comp_pte,
561 		     struct xe_res_cursor *cur,
562 		     u32 size, struct ttm_resource *res)
563 {
564 	struct xe_device *xe = tile_to_xe(m->tile);
565 	struct xe_vm *vm = m->q->vm;
566 	u16 pat_index;
567 	u32 ptes;
568 	u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
569 	u64 cur_ofs;
570 
571 	/* Indirect access needs compression enabled uncached PAT index */
572 	if (GRAPHICS_VERx100(xe) >= 2000)
573 		pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
574 					  xe->pat.idx[XE_CACHE_WB];
575 	else
576 		pat_index = xe->pat.idx[XE_CACHE_WB];
577 
578 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
579 
580 	while (ptes) {
581 		u32 chunk = min(MAX_PTE_PER_SDI, ptes);
582 
583 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
584 		bb->cs[bb->len++] = ofs;
585 		bb->cs[bb->len++] = 0;
586 
587 		cur_ofs = ofs;
588 		ofs += chunk * 8;
589 		ptes -= chunk;
590 
591 		while (chunk--) {
592 			u64 addr, flags = 0;
593 			bool devmem = false;
594 
595 			addr = xe_res_dma(cur) & PAGE_MASK;
596 			if (is_vram) {
597 				if (vm->flags & XE_VM_FLAG_64K) {
598 					u64 va = cur_ofs * XE_PAGE_SIZE / 8;
599 
600 					xe_assert(xe, (va & (SZ_64K - 1)) ==
601 						  (addr & (SZ_64K - 1)));
602 
603 					flags |= XE_PTE_PS64;
604 				}
605 
606 				addr += vram_region_gpu_offset(res);
607 				devmem = true;
608 			}
609 
610 			addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
611 							   addr, pat_index,
612 							   0, devmem, flags);
613 			bb->cs[bb->len++] = lower_32_bits(addr);
614 			bb->cs[bb->len++] = upper_32_bits(addr);
615 
616 			xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
617 			cur_ofs += 8;
618 		}
619 	}
620 }
621 
622 #define EMIT_COPY_CCS_DW 5
emit_copy_ccs(struct xe_gt * gt,struct xe_bb * bb,u64 dst_ofs,bool dst_is_indirect,u64 src_ofs,bool src_is_indirect,u32 size)623 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
624 			  u64 dst_ofs, bool dst_is_indirect,
625 			  u64 src_ofs, bool src_is_indirect,
626 			  u32 size)
627 {
628 	struct xe_device *xe = gt_to_xe(gt);
629 	u32 *cs = bb->cs + bb->len;
630 	u32 num_ccs_blks;
631 	u32 num_pages;
632 	u32 ccs_copy_size;
633 	u32 mocs;
634 
635 	if (GRAPHICS_VERx100(xe) >= 2000) {
636 		num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
637 		xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
638 
639 		ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
640 		mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
641 
642 	} else {
643 		num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
644 					    NUM_CCS_BYTES_PER_BLOCK);
645 		xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
646 
647 		ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
648 		mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
649 	}
650 
651 	*cs++ = XY_CTRL_SURF_COPY_BLT |
652 		(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
653 		(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
654 		ccs_copy_size;
655 	*cs++ = lower_32_bits(src_ofs);
656 	*cs++ = upper_32_bits(src_ofs) | mocs;
657 	*cs++ = lower_32_bits(dst_ofs);
658 	*cs++ = upper_32_bits(dst_ofs) | mocs;
659 
660 	bb->len = cs - bb->cs;
661 }
662 
663 #define EMIT_COPY_DW 10
emit_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u64 dst_ofs,unsigned int size,unsigned int pitch)664 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
665 		      u64 src_ofs, u64 dst_ofs, unsigned int size,
666 		      unsigned int pitch)
667 {
668 	struct xe_device *xe = gt_to_xe(gt);
669 	u32 mocs = 0;
670 	u32 tile_y = 0;
671 
672 	xe_gt_assert(gt, !(pitch & 3));
673 	xe_gt_assert(gt, size / pitch <= S16_MAX);
674 	xe_gt_assert(gt, pitch / 4 <= S16_MAX);
675 	xe_gt_assert(gt, pitch <= U16_MAX);
676 
677 	if (GRAPHICS_VER(xe) >= 20)
678 		mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
679 
680 	if (GRAPHICS_VERx100(xe) >= 1250)
681 		tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
682 
683 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
684 	bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
685 	bb->cs[bb->len++] = 0;
686 	bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
687 	bb->cs[bb->len++] = lower_32_bits(dst_ofs);
688 	bb->cs[bb->len++] = upper_32_bits(dst_ofs);
689 	bb->cs[bb->len++] = 0;
690 	bb->cs[bb->len++] = pitch | mocs;
691 	bb->cs[bb->len++] = lower_32_bits(src_ofs);
692 	bb->cs[bb->len++] = upper_32_bits(src_ofs);
693 }
694 
xe_migrate_batch_base(struct xe_migrate * m,bool usm)695 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
696 {
697 	return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
698 }
699 
xe_migrate_ccs_copy(struct xe_migrate * m,struct xe_bb * bb,u64 src_ofs,bool src_is_indirect,u64 dst_ofs,bool dst_is_indirect,u32 dst_size,u64 ccs_ofs,bool copy_ccs)700 static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
701 			       struct xe_bb *bb,
702 			       u64 src_ofs, bool src_is_indirect,
703 			       u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
704 			       u64 ccs_ofs, bool copy_ccs)
705 {
706 	struct xe_gt *gt = m->tile->primary_gt;
707 	u32 flush_flags = 0;
708 
709 	if (!copy_ccs && dst_is_indirect) {
710 		/*
711 		 * If the src is already in vram, then it should already
712 		 * have been cleared by us, or has been populated by the
713 		 * user. Make sure we copy the CCS aux state as-is.
714 		 *
715 		 * Otherwise if the bo doesn't have any CCS metadata attached,
716 		 * we still need to clear it for security reasons.
717 		 */
718 		u64 ccs_src_ofs =  src_is_indirect ? src_ofs : m->cleared_mem_ofs;
719 
720 		emit_copy_ccs(gt, bb,
721 			      dst_ofs, true,
722 			      ccs_src_ofs, src_is_indirect, dst_size);
723 
724 		flush_flags = MI_FLUSH_DW_CCS;
725 	} else if (copy_ccs) {
726 		if (!src_is_indirect)
727 			src_ofs = ccs_ofs;
728 		else if (!dst_is_indirect)
729 			dst_ofs = ccs_ofs;
730 
731 		xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
732 
733 		emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
734 			      src_is_indirect, dst_size);
735 		if (dst_is_indirect)
736 			flush_flags = MI_FLUSH_DW_CCS;
737 	}
738 
739 	return flush_flags;
740 }
741 
742 /**
743  * xe_migrate_copy() - Copy content of TTM resources.
744  * @m: The migration context.
745  * @src_bo: The buffer object @src is currently bound to.
746  * @dst_bo: If copying between resources created for the same bo, set this to
747  * the same value as @src_bo. If copying between buffer objects, set it to
748  * the buffer object @dst is currently bound to.
749  * @src: The source TTM resource.
750  * @dst: The dst TTM resource.
751  * @copy_only_ccs: If true copy only CCS metadata
752  *
753  * Copies the contents of @src to @dst: On flat CCS devices,
754  * the CCS metadata is copied as well if needed, or if not present,
755  * the CCS metadata of @dst is cleared for security reasons.
756  *
757  * Return: Pointer to a dma_fence representing the last copy batch, or
758  * an error pointer on failure. If there is a failure, any copy operation
759  * started by the function call has been synced.
760  */
xe_migrate_copy(struct xe_migrate * m,struct xe_bo * src_bo,struct xe_bo * dst_bo,struct ttm_resource * src,struct ttm_resource * dst,bool copy_only_ccs)761 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
762 				  struct xe_bo *src_bo,
763 				  struct xe_bo *dst_bo,
764 				  struct ttm_resource *src,
765 				  struct ttm_resource *dst,
766 				  bool copy_only_ccs)
767 {
768 	struct xe_gt *gt = m->tile->primary_gt;
769 	struct xe_device *xe = gt_to_xe(gt);
770 	struct dma_fence *fence = NULL;
771 	u64 size = src_bo->size;
772 	struct xe_res_cursor src_it, dst_it, ccs_it;
773 	u64 src_L0_ofs, dst_L0_ofs;
774 	u32 src_L0_pt, dst_L0_pt;
775 	u64 src_L0, dst_L0;
776 	int pass = 0;
777 	int err;
778 	bool src_is_pltt = src->mem_type == XE_PL_TT;
779 	bool dst_is_pltt = dst->mem_type == XE_PL_TT;
780 	bool src_is_vram = mem_type_is_vram(src->mem_type);
781 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
782 	bool type_device = src_bo->ttm.type == ttm_bo_type_device;
783 	bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
784 	bool copy_ccs = xe_device_has_flat_ccs(xe) &&
785 		xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
786 	bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
787 	bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
788 		GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
789 
790 	/* Copying CCS between two different BOs is not supported yet. */
791 	if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
792 		return ERR_PTR(-EINVAL);
793 
794 	if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
795 		return ERR_PTR(-EINVAL);
796 
797 	if (!src_is_vram)
798 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
799 	else
800 		xe_res_first(src, 0, size, &src_it);
801 	if (!dst_is_vram)
802 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
803 	else
804 		xe_res_first(dst, 0, size, &dst_it);
805 
806 	if (copy_system_ccs)
807 		xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
808 				PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
809 				&ccs_it);
810 
811 	while (size) {
812 		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
813 		struct xe_sched_job *job;
814 		struct xe_bb *bb;
815 		u32 flush_flags = 0;
816 		u32 update_idx;
817 		u64 ccs_ofs, ccs_size;
818 		u32 ccs_pt;
819 		u32 pte_flags;
820 
821 		bool usm = xe->info.has_usm;
822 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
823 
824 		src_L0 = xe_migrate_res_sizes(m, &src_it);
825 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
826 
827 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
828 			pass++, src_L0, dst_L0);
829 
830 		src_L0 = min(src_L0, dst_L0);
831 
832 		pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
833 		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
834 		batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
835 					      &src_L0_ofs, &src_L0_pt, 0, 0,
836 					      avail_pts);
837 
838 		pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
839 		batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
840 					      &dst_L0_ofs, &dst_L0_pt, 0,
841 					      avail_pts, avail_pts);
842 
843 		if (copy_system_ccs) {
844 			xe_assert(xe, type_device);
845 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
846 			batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
847 						      &ccs_ofs, &ccs_pt, 0,
848 						      2 * avail_pts,
849 						      avail_pts);
850 			xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
851 		}
852 
853 		/* Add copy commands size here */
854 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
855 			((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
856 
857 		bb = xe_bb_new(gt, batch_size, usm);
858 		if (IS_ERR(bb)) {
859 			err = PTR_ERR(bb);
860 			goto err_sync;
861 		}
862 
863 		if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
864 			xe_res_next(&src_it, src_L0);
865 		else
866 			emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
867 				 &src_it, src_L0, src);
868 
869 		if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
870 			xe_res_next(&dst_it, src_L0);
871 		else
872 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
873 				 &dst_it, src_L0, dst);
874 
875 		if (copy_system_ccs)
876 			emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
877 
878 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
879 		update_idx = bb->len;
880 
881 		if (!copy_only_ccs)
882 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
883 
884 		if (needs_ccs_emit)
885 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
886 							  IS_DGFX(xe) ? src_is_vram : src_is_pltt,
887 							  dst_L0_ofs,
888 							  IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
889 							  src_L0, ccs_ofs, copy_ccs);
890 
891 		job = xe_bb_create_migration_job(m->q, bb,
892 						 xe_migrate_batch_base(m, usm),
893 						 update_idx);
894 		if (IS_ERR(job)) {
895 			err = PTR_ERR(job);
896 			goto err;
897 		}
898 
899 		xe_sched_job_add_migrate_flush(job, flush_flags);
900 		if (!fence) {
901 			err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
902 						    DMA_RESV_USAGE_BOOKKEEP);
903 			if (!err && src_bo != dst_bo)
904 				err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
905 							    DMA_RESV_USAGE_BOOKKEEP);
906 			if (err)
907 				goto err_job;
908 		}
909 
910 		mutex_lock(&m->job_mutex);
911 		xe_sched_job_arm(job);
912 		dma_fence_put(fence);
913 		fence = dma_fence_get(&job->drm.s_fence->finished);
914 		xe_sched_job_push(job);
915 
916 		dma_fence_put(m->fence);
917 		m->fence = dma_fence_get(fence);
918 
919 		mutex_unlock(&m->job_mutex);
920 
921 		xe_bb_free(bb, fence);
922 		size -= src_L0;
923 		continue;
924 
925 err_job:
926 		xe_sched_job_put(job);
927 err:
928 		xe_bb_free(bb, NULL);
929 
930 err_sync:
931 		/* Sync partial copy if any. FIXME: under job_mutex? */
932 		if (fence) {
933 			dma_fence_wait(fence, false);
934 			dma_fence_put(fence);
935 		}
936 
937 		return ERR_PTR(err);
938 	}
939 
940 	return fence;
941 }
942 
emit_clear_link_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u32 size,u32 pitch)943 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
944 				 u32 size, u32 pitch)
945 {
946 	struct xe_device *xe = gt_to_xe(gt);
947 	u32 *cs = bb->cs + bb->len;
948 	u32 len = PVC_MEM_SET_CMD_LEN_DW;
949 
950 	*cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
951 	*cs++ = pitch - 1;
952 	*cs++ = (size / pitch) - 1;
953 	*cs++ = pitch - 1;
954 	*cs++ = lower_32_bits(src_ofs);
955 	*cs++ = upper_32_bits(src_ofs);
956 	if (GRAPHICS_VERx100(xe) >= 2000)
957 		*cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
958 	else
959 		*cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
960 
961 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
962 
963 	bb->len += len;
964 }
965 
emit_clear_main_copy(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u32 size,u32 pitch,bool is_vram)966 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
967 				 u64 src_ofs, u32 size, u32 pitch, bool is_vram)
968 {
969 	struct xe_device *xe = gt_to_xe(gt);
970 	u32 *cs = bb->cs + bb->len;
971 	u32 len = XY_FAST_COLOR_BLT_DW;
972 
973 	if (GRAPHICS_VERx100(xe) < 1250)
974 		len = 11;
975 
976 	*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
977 		(len - 2);
978 	if (GRAPHICS_VERx100(xe) >= 2000)
979 		*cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
980 			(pitch - 1);
981 	else
982 		*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
983 			(pitch - 1);
984 	*cs++ = 0;
985 	*cs++ = (size / pitch) << 16 | pitch / 4;
986 	*cs++ = lower_32_bits(src_ofs);
987 	*cs++ = upper_32_bits(src_ofs);
988 	*cs++ = (is_vram ? 0x0 : 0x1) <<  XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
989 	*cs++ = 0;
990 	*cs++ = 0;
991 	*cs++ = 0;
992 	*cs++ = 0;
993 
994 	if (len > 11) {
995 		*cs++ = 0;
996 		*cs++ = 0;
997 		*cs++ = 0;
998 		*cs++ = 0;
999 		*cs++ = 0;
1000 	}
1001 
1002 	xe_gt_assert(gt, cs - bb->cs == len + bb->len);
1003 
1004 	bb->len += len;
1005 }
1006 
has_service_copy_support(struct xe_gt * gt)1007 static bool has_service_copy_support(struct xe_gt *gt)
1008 {
1009 	/*
1010 	 * What we care about is whether the architecture was designed with
1011 	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
1012 	 * instructions) so check the architectural engine list rather than the
1013 	 * actual list since these instructions are usable on BCS0 even if
1014 	 * all of the actual service copy engines (BCS1-BCS8) have been fused
1015 	 * off.
1016 	 */
1017 	return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
1018 					      XE_HW_ENGINE_BCS1);
1019 }
1020 
emit_clear_cmd_len(struct xe_gt * gt)1021 static u32 emit_clear_cmd_len(struct xe_gt *gt)
1022 {
1023 	if (has_service_copy_support(gt))
1024 		return PVC_MEM_SET_CMD_LEN_DW;
1025 	else
1026 		return XY_FAST_COLOR_BLT_DW;
1027 }
1028 
emit_clear(struct xe_gt * gt,struct xe_bb * bb,u64 src_ofs,u32 size,u32 pitch,bool is_vram)1029 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
1030 		       u32 size, u32 pitch, bool is_vram)
1031 {
1032 	if (has_service_copy_support(gt))
1033 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
1034 	else
1035 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
1036 				     is_vram);
1037 }
1038 
1039 /**
1040  * xe_migrate_clear() - Copy content of TTM resources.
1041  * @m: The migration context.
1042  * @bo: The buffer object @dst is currently bound to.
1043  * @dst: The dst TTM resource to be cleared.
1044  * @clear_flags: flags to specify which data to clear: CCS, BO, or both.
1045  *
1046  * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set.
1047  * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA.
1048  * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata.
1049  * TODO: Eliminate the @bo argument.
1050  *
1051  * Return: Pointer to a dma_fence representing the last clear batch, or
1052  * an error pointer on failure. If there is a failure, any clear operation
1053  * started by the function call has been synced.
1054  */
xe_migrate_clear(struct xe_migrate * m,struct xe_bo * bo,struct ttm_resource * dst,u32 clear_flags)1055 struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
1056 				   struct xe_bo *bo,
1057 				   struct ttm_resource *dst,
1058 				   u32 clear_flags)
1059 {
1060 	bool clear_vram = mem_type_is_vram(dst->mem_type);
1061 	bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags;
1062 	bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags;
1063 	struct xe_gt *gt = m->tile->primary_gt;
1064 	struct xe_device *xe = gt_to_xe(gt);
1065 	bool clear_only_system_ccs = false;
1066 	struct dma_fence *fence = NULL;
1067 	u64 size = bo->size;
1068 	struct xe_res_cursor src_it;
1069 	struct ttm_resource *src = dst;
1070 	int err;
1071 
1072 	if (WARN_ON(!clear_bo_data && !clear_ccs))
1073 		return NULL;
1074 
1075 	if (!clear_bo_data && clear_ccs && !IS_DGFX(xe))
1076 		clear_only_system_ccs = true;
1077 
1078 	if (!clear_vram)
1079 		xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
1080 	else
1081 		xe_res_first(src, 0, bo->size, &src_it);
1082 
1083 	while (size) {
1084 		u64 clear_L0_ofs;
1085 		u32 clear_L0_pt;
1086 		u32 flush_flags = 0;
1087 		u64 clear_L0;
1088 		struct xe_sched_job *job;
1089 		struct xe_bb *bb;
1090 		u32 batch_size, update_idx;
1091 		u32 pte_flags;
1092 
1093 		bool usm = xe->info.has_usm;
1094 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1095 
1096 		clear_L0 = xe_migrate_res_sizes(m, &src_it);
1097 
1098 		/* Calculate final sizes and batch size.. */
1099 		pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
1100 		batch_size = 2 +
1101 			pte_update_size(m, pte_flags, src, &src_it,
1102 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
1103 					clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
1104 					avail_pts);
1105 
1106 		if (xe_migrate_needs_ccs_emit(xe))
1107 			batch_size += EMIT_COPY_CCS_DW;
1108 
1109 		/* Clear commands */
1110 
1111 		if (WARN_ON_ONCE(!clear_L0))
1112 			break;
1113 
1114 		bb = xe_bb_new(gt, batch_size, usm);
1115 		if (IS_ERR(bb)) {
1116 			err = PTR_ERR(bb);
1117 			goto err_sync;
1118 		}
1119 
1120 		size -= clear_L0;
1121 		/* Preemption is enabled again by the ring ops. */
1122 		if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
1123 			xe_res_next(&src_it, clear_L0);
1124 		else
1125 			emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
1126 				 &src_it, clear_L0, dst);
1127 
1128 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1129 		update_idx = bb->len;
1130 
1131 		if (clear_bo_data)
1132 			emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
1133 
1134 		if (xe_migrate_needs_ccs_emit(xe)) {
1135 			emit_copy_ccs(gt, bb, clear_L0_ofs, true,
1136 				      m->cleared_mem_ofs, false, clear_L0);
1137 			flush_flags = MI_FLUSH_DW_CCS;
1138 		}
1139 
1140 		job = xe_bb_create_migration_job(m->q, bb,
1141 						 xe_migrate_batch_base(m, usm),
1142 						 update_idx);
1143 		if (IS_ERR(job)) {
1144 			err = PTR_ERR(job);
1145 			goto err;
1146 		}
1147 
1148 		xe_sched_job_add_migrate_flush(job, flush_flags);
1149 		if (!fence) {
1150 			/*
1151 			 * There can't be anything userspace related at this
1152 			 * point, so we just need to respect any potential move
1153 			 * fences, which are always tracked as
1154 			 * DMA_RESV_USAGE_KERNEL.
1155 			 */
1156 			err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
1157 						    DMA_RESV_USAGE_KERNEL);
1158 			if (err)
1159 				goto err_job;
1160 		}
1161 
1162 		mutex_lock(&m->job_mutex);
1163 		xe_sched_job_arm(job);
1164 		dma_fence_put(fence);
1165 		fence = dma_fence_get(&job->drm.s_fence->finished);
1166 		xe_sched_job_push(job);
1167 
1168 		dma_fence_put(m->fence);
1169 		m->fence = dma_fence_get(fence);
1170 
1171 		mutex_unlock(&m->job_mutex);
1172 
1173 		xe_bb_free(bb, fence);
1174 		continue;
1175 
1176 err_job:
1177 		xe_sched_job_put(job);
1178 err:
1179 		xe_bb_free(bb, NULL);
1180 err_sync:
1181 		/* Sync partial copies if any. FIXME: job_mutex? */
1182 		if (fence) {
1183 			dma_fence_wait(fence, false);
1184 			dma_fence_put(fence);
1185 		}
1186 
1187 		return ERR_PTR(err);
1188 	}
1189 
1190 	if (clear_ccs)
1191 		bo->ccs_cleared = true;
1192 
1193 	return fence;
1194 }
1195 
write_pgtable(struct xe_tile * tile,struct xe_bb * bb,u64 ppgtt_ofs,const struct xe_vm_pgtable_update_op * pt_op,const struct xe_vm_pgtable_update * update,struct xe_migrate_pt_update * pt_update)1196 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
1197 			  const struct xe_vm_pgtable_update_op *pt_op,
1198 			  const struct xe_vm_pgtable_update *update,
1199 			  struct xe_migrate_pt_update *pt_update)
1200 {
1201 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1202 	u32 chunk;
1203 	u32 ofs = update->ofs, size = update->qwords;
1204 
1205 	/*
1206 	 * If we have 512 entries (max), we would populate it ourselves,
1207 	 * and update the PDE above it to the new pointer.
1208 	 * The only time this can only happen if we have to update the top
1209 	 * PDE. This requires a BO that is almost vm->size big.
1210 	 *
1211 	 * This shouldn't be possible in practice.. might change when 16K
1212 	 * pages are used. Hence the assert.
1213 	 */
1214 	xe_tile_assert(tile, update->qwords < MAX_NUM_PTE);
1215 	if (!ppgtt_ofs)
1216 		ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
1217 						xe_bo_addr(update->pt_bo, 0,
1218 							   XE_PAGE_SIZE), false);
1219 
1220 	do {
1221 		u64 addr = ppgtt_ofs + ofs * 8;
1222 
1223 		chunk = min(size, MAX_PTE_PER_SDI);
1224 
1225 		/* Ensure populatefn can do memset64 by aligning bb->cs */
1226 		if (!(bb->len & 1))
1227 			bb->cs[bb->len++] = MI_NOOP;
1228 
1229 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1230 		bb->cs[bb->len++] = lower_32_bits(addr);
1231 		bb->cs[bb->len++] = upper_32_bits(addr);
1232 		if (pt_op->bind)
1233 			ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
1234 				      ofs, chunk, update);
1235 		else
1236 			ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
1237 				   ofs, chunk, update);
1238 
1239 		bb->len += chunk * 2;
1240 		ofs += chunk;
1241 		size -= chunk;
1242 	} while (size);
1243 }
1244 
xe_migrate_get_vm(struct xe_migrate * m)1245 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
1246 {
1247 	return xe_vm_get(m->q->vm);
1248 }
1249 
1250 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1251 struct migrate_test_params {
1252 	struct xe_test_priv base;
1253 	bool force_gpu;
1254 };
1255 
1256 #define to_migrate_test_params(_priv) \
1257 	container_of(_priv, struct migrate_test_params, base)
1258 #endif
1259 
1260 static struct dma_fence *
xe_migrate_update_pgtables_cpu(struct xe_migrate * m,struct xe_migrate_pt_update * pt_update)1261 xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
1262 			       struct xe_migrate_pt_update *pt_update)
1263 {
1264 	XE_TEST_DECLARE(struct migrate_test_params *test =
1265 			to_migrate_test_params
1266 			(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
1267 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1268 	struct xe_vm *vm = pt_update->vops->vm;
1269 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1270 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1271 	int err;
1272 	u32 i, j;
1273 
1274 	if (XE_TEST_ONLY(test && test->force_gpu))
1275 		return ERR_PTR(-ETIME);
1276 
1277 	if (ops->pre_commit) {
1278 		pt_update->job = NULL;
1279 		err = ops->pre_commit(pt_update);
1280 		if (err)
1281 			return ERR_PTR(err);
1282 	}
1283 
1284 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1285 		const struct xe_vm_pgtable_update_op *pt_op =
1286 			&pt_update_ops->ops[i];
1287 
1288 		for (j = 0; j < pt_op->num_entries; j++) {
1289 			const struct xe_vm_pgtable_update *update =
1290 				&pt_op->entries[j];
1291 
1292 			if (pt_op->bind)
1293 				ops->populate(pt_update, m->tile,
1294 					      &update->pt_bo->vmap, NULL,
1295 					      update->ofs, update->qwords,
1296 					      update);
1297 			else
1298 				ops->clear(pt_update, m->tile,
1299 					   &update->pt_bo->vmap, NULL,
1300 					   update->ofs, update->qwords, update);
1301 		}
1302 	}
1303 
1304 	trace_xe_vm_cpu_bind(vm);
1305 	xe_device_wmb(vm->xe);
1306 
1307 	return dma_fence_get_stub();
1308 }
1309 
1310 static struct dma_fence *
__xe_migrate_update_pgtables(struct xe_migrate * m,struct xe_migrate_pt_update * pt_update,struct xe_vm_pgtable_update_ops * pt_update_ops)1311 __xe_migrate_update_pgtables(struct xe_migrate *m,
1312 			     struct xe_migrate_pt_update *pt_update,
1313 			     struct xe_vm_pgtable_update_ops *pt_update_ops)
1314 {
1315 	const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
1316 	struct xe_tile *tile = m->tile;
1317 	struct xe_gt *gt = tile->primary_gt;
1318 	struct xe_device *xe = tile_to_xe(tile);
1319 	struct xe_sched_job *job;
1320 	struct dma_fence *fence;
1321 	struct drm_suballoc *sa_bo = NULL;
1322 	struct xe_bb *bb;
1323 	u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
1324 	u32 num_updates = 0, current_update = 0;
1325 	u64 addr;
1326 	int err = 0;
1327 	bool is_migrate = pt_update_ops->q == m->q;
1328 	bool usm = is_migrate && xe->info.has_usm;
1329 
1330 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
1331 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
1332 		struct xe_vm_pgtable_update *updates = pt_op->entries;
1333 
1334 		num_updates += pt_op->num_entries;
1335 		for (j = 0; j < pt_op->num_entries; ++j) {
1336 			u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
1337 						    MAX_PTE_PER_SDI);
1338 
1339 			/* align noop + MI_STORE_DATA_IMM cmd prefix */
1340 			batch_size += 4 * num_cmds + updates[j].qwords * 2;
1341 		}
1342 	}
1343 
1344 	/* fixed + PTE entries */
1345 	if (IS_DGFX(xe))
1346 		batch_size += 2;
1347 	else
1348 		batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
1349 			num_updates * 2;
1350 
1351 	bb = xe_bb_new(gt, batch_size, usm);
1352 	if (IS_ERR(bb))
1353 		return ERR_CAST(bb);
1354 
1355 	/* For sysmem PTE's, need to map them in our hole.. */
1356 	if (!IS_DGFX(xe)) {
1357 		u16 pat_index = xe->pat.idx[XE_CACHE_WB];
1358 		u32 ptes, ofs;
1359 
1360 		ppgtt_ofs = NUM_KERNEL_PDE - 1;
1361 		if (!is_migrate) {
1362 			u32 num_units = DIV_ROUND_UP(num_updates,
1363 						     NUM_VMUSA_WRITES_PER_UNIT);
1364 
1365 			if (num_units > m->vm_update_sa.size) {
1366 				err = -ENOBUFS;
1367 				goto err_bb;
1368 			}
1369 			sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
1370 						 GFP_KERNEL, true, 0);
1371 			if (IS_ERR(sa_bo)) {
1372 				err = PTR_ERR(sa_bo);
1373 				goto err_bb;
1374 			}
1375 
1376 			ppgtt_ofs = NUM_KERNEL_PDE +
1377 				(drm_suballoc_soffset(sa_bo) /
1378 				 NUM_VMUSA_UNIT_PER_PAGE);
1379 			page_ofs = (drm_suballoc_soffset(sa_bo) %
1380 				    NUM_VMUSA_UNIT_PER_PAGE) *
1381 				VM_SA_UPDATE_UNIT_SIZE;
1382 		}
1383 
1384 		/* Map our PT's to gtt */
1385 		i = 0;
1386 		j = 0;
1387 		ptes = num_updates;
1388 		ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
1389 		while (ptes) {
1390 			u32 chunk = min(MAX_PTE_PER_SDI, ptes);
1391 			u32 idx = 0;
1392 
1393 			bb->cs[bb->len++] = MI_STORE_DATA_IMM |
1394 				MI_SDI_NUM_QW(chunk);
1395 			bb->cs[bb->len++] = ofs;
1396 			bb->cs[bb->len++] = 0; /* upper_32_bits */
1397 
1398 			for (; i < pt_update_ops->num_ops; ++i) {
1399 				struct xe_vm_pgtable_update_op *pt_op =
1400 					&pt_update_ops->ops[i];
1401 				struct xe_vm_pgtable_update *updates = pt_op->entries;
1402 
1403 				for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
1404 					struct xe_vm *vm = pt_update->vops->vm;
1405 					struct xe_bo *pt_bo = updates[j].pt_bo;
1406 
1407 					if (idx == chunk)
1408 						goto next_cmd;
1409 
1410 					xe_tile_assert(tile, pt_bo->size == SZ_4K);
1411 
1412 					/* Map a PT at most once */
1413 					if (pt_bo->update_index < 0)
1414 						pt_bo->update_index = current_update;
1415 
1416 					addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
1417 									 pat_index, 0);
1418 					bb->cs[bb->len++] = lower_32_bits(addr);
1419 					bb->cs[bb->len++] = upper_32_bits(addr);
1420 				}
1421 
1422 				j = 0;
1423 			}
1424 
1425 next_cmd:
1426 			ptes -= chunk;
1427 			ofs += chunk * sizeof(u64);
1428 		}
1429 
1430 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1431 		update_idx = bb->len;
1432 
1433 		addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
1434 			(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
1435 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1436 			struct xe_vm_pgtable_update_op *pt_op =
1437 				&pt_update_ops->ops[i];
1438 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1439 
1440 			for (j = 0; j < pt_op->num_entries; ++j) {
1441 				struct xe_bo *pt_bo = updates[j].pt_bo;
1442 
1443 				write_pgtable(tile, bb, addr +
1444 					      pt_bo->update_index * XE_PAGE_SIZE,
1445 					      pt_op, &updates[j], pt_update);
1446 			}
1447 		}
1448 	} else {
1449 		/* phys pages, no preamble required */
1450 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1451 		update_idx = bb->len;
1452 
1453 		for (i = 0; i < pt_update_ops->num_ops; ++i) {
1454 			struct xe_vm_pgtable_update_op *pt_op =
1455 				&pt_update_ops->ops[i];
1456 			struct xe_vm_pgtable_update *updates = pt_op->entries;
1457 
1458 			for (j = 0; j < pt_op->num_entries; ++j)
1459 				write_pgtable(tile, bb, 0, pt_op, &updates[j],
1460 					      pt_update);
1461 		}
1462 	}
1463 
1464 	job = xe_bb_create_migration_job(pt_update_ops->q, bb,
1465 					 xe_migrate_batch_base(m, usm),
1466 					 update_idx);
1467 	if (IS_ERR(job)) {
1468 		err = PTR_ERR(job);
1469 		goto err_sa;
1470 	}
1471 
1472 	if (ops->pre_commit) {
1473 		pt_update->job = job;
1474 		err = ops->pre_commit(pt_update);
1475 		if (err)
1476 			goto err_job;
1477 	}
1478 	if (is_migrate)
1479 		mutex_lock(&m->job_mutex);
1480 
1481 	xe_sched_job_arm(job);
1482 	fence = dma_fence_get(&job->drm.s_fence->finished);
1483 	xe_sched_job_push(job);
1484 
1485 	if (is_migrate)
1486 		mutex_unlock(&m->job_mutex);
1487 
1488 	xe_bb_free(bb, fence);
1489 	drm_suballoc_free(sa_bo, fence);
1490 
1491 	return fence;
1492 
1493 err_job:
1494 	xe_sched_job_put(job);
1495 err_sa:
1496 	drm_suballoc_free(sa_bo, NULL);
1497 err_bb:
1498 	xe_bb_free(bb, NULL);
1499 	return ERR_PTR(err);
1500 }
1501 
1502 /**
1503  * xe_migrate_update_pgtables() - Pipelined page-table update
1504  * @m: The migrate context.
1505  * @pt_update: PT update arguments
1506  *
1507  * Perform a pipelined page-table update. The update descriptors are typically
1508  * built under the same lock critical section as a call to this function. If
1509  * using the default engine for the updates, they will be performed in the
1510  * order they grab the job_mutex. If different engines are used, external
1511  * synchronization is needed for overlapping updates to maintain page-table
1512  * consistency. Note that the meaning of "overlapping" is that the updates
1513  * touch the same page-table, which might be a higher-level page-directory.
1514  * If no pipelining is needed, then updates may be performed by the cpu.
1515  *
1516  * Return: A dma_fence that, when signaled, indicates the update completion.
1517  */
1518 struct dma_fence *
xe_migrate_update_pgtables(struct xe_migrate * m,struct xe_migrate_pt_update * pt_update)1519 xe_migrate_update_pgtables(struct xe_migrate *m,
1520 			   struct xe_migrate_pt_update *pt_update)
1521 
1522 {
1523 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1524 		&pt_update->vops->pt_update_ops[pt_update->tile_id];
1525 	struct dma_fence *fence;
1526 
1527 	fence =  xe_migrate_update_pgtables_cpu(m, pt_update);
1528 
1529 	/* -ETIME indicates a job is needed, anything else is legit error */
1530 	if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
1531 		return fence;
1532 
1533 	return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
1534 }
1535 
1536 /**
1537  * xe_migrate_wait() - Complete all operations using the xe_migrate context
1538  * @m: Migrate context to wait for.
1539  *
1540  * Waits until the GPU no longer uses the migrate context's default engine
1541  * or its page-table objects. FIXME: What about separate page-table update
1542  * engines?
1543  */
xe_migrate_wait(struct xe_migrate * m)1544 void xe_migrate_wait(struct xe_migrate *m)
1545 {
1546 	if (m->fence)
1547 		dma_fence_wait(m->fence, false);
1548 }
1549 
pte_update_cmd_size(u64 size)1550 static u32 pte_update_cmd_size(u64 size)
1551 {
1552 	u32 num_dword;
1553 	u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
1554 
1555 	XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
1556 	/*
1557 	 * MI_STORE_DATA_IMM command is used to update page table. Each
1558 	 * instruction can update maximumly 0x1ff pte entries. To update
1559 	 * n (n <= 0x1ff) pte entries, we need:
1560 	 * 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
1561 	 * 2 dword for the page table's physical location
1562 	 * 2*n dword for value of pte to fill (each pte entry is 2 dwords)
1563 	 */
1564 	num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff);
1565 	num_dword += entries * 2;
1566 
1567 	return num_dword;
1568 }
1569 
build_pt_update_batch_sram(struct xe_migrate * m,struct xe_bb * bb,u32 pt_offset,dma_addr_t * sram_addr,u32 size)1570 static void build_pt_update_batch_sram(struct xe_migrate *m,
1571 				       struct xe_bb *bb, u32 pt_offset,
1572 				       dma_addr_t *sram_addr, u32 size)
1573 {
1574 	u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
1575 	u32 ptes;
1576 	int i = 0;
1577 
1578 	ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
1579 	while (ptes) {
1580 		u32 chunk = min(0x1ffU, ptes);
1581 
1582 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
1583 		bb->cs[bb->len++] = pt_offset;
1584 		bb->cs[bb->len++] = 0;
1585 
1586 		pt_offset += chunk * 8;
1587 		ptes -= chunk;
1588 
1589 		while (chunk--) {
1590 			u64 addr = sram_addr[i++] & PAGE_MASK;
1591 
1592 			xe_tile_assert(m->tile, addr);
1593 			addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
1594 								 addr, pat_index,
1595 								 0, false, 0);
1596 			bb->cs[bb->len++] = lower_32_bits(addr);
1597 			bb->cs[bb->len++] = upper_32_bits(addr);
1598 		}
1599 	}
1600 }
1601 
1602 enum xe_migrate_copy_dir {
1603 	XE_MIGRATE_COPY_TO_VRAM,
1604 	XE_MIGRATE_COPY_TO_SRAM,
1605 };
1606 
1607 #define XE_CACHELINE_BYTES	64ull
1608 #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
1609 
xe_migrate_vram(struct xe_migrate * m,unsigned long len,unsigned long sram_offset,dma_addr_t * sram_addr,u64 vram_addr,const enum xe_migrate_copy_dir dir)1610 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
1611 					 unsigned long len,
1612 					 unsigned long sram_offset,
1613 					 dma_addr_t *sram_addr, u64 vram_addr,
1614 					 const enum xe_migrate_copy_dir dir)
1615 {
1616 	struct xe_gt *gt = m->tile->primary_gt;
1617 	struct xe_device *xe = gt_to_xe(gt);
1618 	bool use_usm_batch = xe->info.has_usm;
1619 	struct dma_fence *fence = NULL;
1620 	u32 batch_size = 2;
1621 	u64 src_L0_ofs, dst_L0_ofs;
1622 	struct xe_sched_job *job;
1623 	struct xe_bb *bb;
1624 	u32 update_idx, pt_slot = 0;
1625 	unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
1626 	unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
1627 		PAGE_SIZE : 4;
1628 	int err;
1629 
1630 	if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
1631 			(sram_offset | vram_addr) & XE_CACHELINE_MASK))
1632 		return ERR_PTR(-EOPNOTSUPP);
1633 
1634 	xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
1635 
1636 	batch_size += pte_update_cmd_size(len);
1637 	batch_size += EMIT_COPY_DW;
1638 
1639 	bb = xe_bb_new(gt, batch_size, use_usm_batch);
1640 	if (IS_ERR(bb)) {
1641 		err = PTR_ERR(bb);
1642 		return ERR_PTR(err);
1643 	}
1644 
1645 	build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
1646 				   sram_addr, len + sram_offset);
1647 
1648 	if (dir == XE_MIGRATE_COPY_TO_VRAM) {
1649 		src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
1650 		dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
1651 
1652 	} else {
1653 		src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
1654 		dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
1655 	}
1656 
1657 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1658 	update_idx = bb->len;
1659 
1660 	emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
1661 
1662 	job = xe_bb_create_migration_job(m->q, bb,
1663 					 xe_migrate_batch_base(m, use_usm_batch),
1664 					 update_idx);
1665 	if (IS_ERR(job)) {
1666 		err = PTR_ERR(job);
1667 		goto err;
1668 	}
1669 
1670 	xe_sched_job_add_migrate_flush(job, 0);
1671 
1672 	mutex_lock(&m->job_mutex);
1673 	xe_sched_job_arm(job);
1674 	fence = dma_fence_get(&job->drm.s_fence->finished);
1675 	xe_sched_job_push(job);
1676 
1677 	dma_fence_put(m->fence);
1678 	m->fence = dma_fence_get(fence);
1679 	mutex_unlock(&m->job_mutex);
1680 
1681 	xe_bb_free(bb, fence);
1682 
1683 	return fence;
1684 
1685 err:
1686 	xe_bb_free(bb, NULL);
1687 
1688 	return ERR_PTR(err);
1689 }
1690 
1691 /**
1692  * xe_migrate_to_vram() - Migrate to VRAM
1693  * @m: The migration context.
1694  * @npages: Number of pages to migrate.
1695  * @src_addr: Array of dma addresses (source of migrate)
1696  * @dst_addr: Device physical address of VRAM (destination of migrate)
1697  *
1698  * Copy from an array dma addresses to a VRAM device physical address
1699  *
1700  * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
1701  * failure
1702  */
xe_migrate_to_vram(struct xe_migrate * m,unsigned long npages,dma_addr_t * src_addr,u64 dst_addr)1703 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
1704 				     unsigned long npages,
1705 				     dma_addr_t *src_addr,
1706 				     u64 dst_addr)
1707 {
1708 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
1709 			       XE_MIGRATE_COPY_TO_VRAM);
1710 }
1711 
1712 /**
1713  * xe_migrate_from_vram() - Migrate from VRAM
1714  * @m: The migration context.
1715  * @npages: Number of pages to migrate.
1716  * @src_addr: Device physical address of VRAM (source of migrate)
1717  * @dst_addr: Array of dma addresses (destination of migrate)
1718  *
1719  * Copy from a VRAM device physical address to an array dma addresses
1720  *
1721  * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
1722  * failure
1723  */
xe_migrate_from_vram(struct xe_migrate * m,unsigned long npages,u64 src_addr,dma_addr_t * dst_addr)1724 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
1725 				       unsigned long npages,
1726 				       u64 src_addr,
1727 				       dma_addr_t *dst_addr)
1728 {
1729 	return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
1730 			       XE_MIGRATE_COPY_TO_SRAM);
1731 }
1732 
xe_migrate_dma_unmap(struct xe_device * xe,dma_addr_t * dma_addr,int len,int write)1733 static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
1734 				 int len, int write)
1735 {
1736 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
1737 
1738 	for (i = 0; i < npages; ++i) {
1739 		if (!dma_addr[i])
1740 			break;
1741 
1742 		dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
1743 			       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1744 	}
1745 	kfree(dma_addr);
1746 }
1747 
xe_migrate_dma_map(struct xe_device * xe,void * buf,int len,int write)1748 static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
1749 				      void *buf, int len, int write)
1750 {
1751 	dma_addr_t *dma_addr;
1752 	unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
1753 
1754 	dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
1755 	if (!dma_addr)
1756 		return ERR_PTR(-ENOMEM);
1757 
1758 	for (i = 0; i < npages; ++i) {
1759 		dma_addr_t addr;
1760 		struct page *page;
1761 
1762 		if (is_vmalloc_addr(buf))
1763 			page = vmalloc_to_page(buf);
1764 		else
1765 			page = virt_to_page(buf);
1766 
1767 		addr = dma_map_page(xe->drm.dev,
1768 				    page, 0, PAGE_SIZE,
1769 				    write ? DMA_TO_DEVICE :
1770 				    DMA_FROM_DEVICE);
1771 		if (dma_mapping_error(xe->drm.dev, addr))
1772 			goto err_fault;
1773 
1774 		dma_addr[i] = addr;
1775 		buf += PAGE_SIZE;
1776 	}
1777 
1778 	return dma_addr;
1779 
1780 err_fault:
1781 	xe_migrate_dma_unmap(xe, dma_addr, len, write);
1782 	return ERR_PTR(-EFAULT);
1783 }
1784 
1785 /**
1786  * xe_migrate_access_memory - Access memory of a BO via GPU
1787  *
1788  * @m: The migration context.
1789  * @bo: buffer object
1790  * @offset: access offset into buffer object
1791  * @buf: pointer to caller memory to read into or write from
1792  * @len: length of access
1793  * @write: write access
1794  *
1795  * Access memory of a BO via GPU either reading in or writing from a passed in
1796  * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
1797  * read to or write from pointer.
1798  *
1799  * Returns:
1800  * 0 if successful, negative error code on failure.
1801  */
xe_migrate_access_memory(struct xe_migrate * m,struct xe_bo * bo,unsigned long offset,void * buf,int len,int write)1802 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
1803 			     unsigned long offset, void *buf, int len,
1804 			     int write)
1805 {
1806 	struct xe_tile *tile = m->tile;
1807 	struct xe_device *xe = tile_to_xe(tile);
1808 	struct xe_res_cursor cursor;
1809 	struct dma_fence *fence = NULL;
1810 	dma_addr_t *dma_addr;
1811 	unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
1812 	int bytes_left = len, current_page = 0;
1813 	void *orig_buf = buf;
1814 
1815 	xe_bo_assert_held(bo);
1816 
1817 	/* Use bounce buffer for small access and unaligned access */
1818 	if (len & XE_CACHELINE_MASK ||
1819 	    ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
1820 		int buf_offset = 0;
1821 
1822 		/*
1823 		 * Less than ideal for large unaligned access but this should be
1824 		 * fairly rare, can fixup if this becomes common.
1825 		 */
1826 		do {
1827 			u8 bounce[XE_CACHELINE_BYTES];
1828 			void *ptr = (void *)bounce;
1829 			int err;
1830 			int copy_bytes = min_t(int, bytes_left,
1831 					       XE_CACHELINE_BYTES -
1832 					       (offset & XE_CACHELINE_MASK));
1833 			int ptr_offset = offset & XE_CACHELINE_MASK;
1834 
1835 			err = xe_migrate_access_memory(m, bo,
1836 						       offset &
1837 						       ~XE_CACHELINE_MASK,
1838 						       (void *)ptr,
1839 						       sizeof(bounce), 0);
1840 			if (err)
1841 				return err;
1842 
1843 			if (write) {
1844 				memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
1845 
1846 				err = xe_migrate_access_memory(m, bo,
1847 							       offset & ~XE_CACHELINE_MASK,
1848 							       (void *)ptr,
1849 							       sizeof(bounce), 0);
1850 				if (err)
1851 					return err;
1852 			} else {
1853 				memcpy(buf + buf_offset, ptr + ptr_offset,
1854 				       copy_bytes);
1855 			}
1856 
1857 			bytes_left -= copy_bytes;
1858 			buf_offset += copy_bytes;
1859 			offset += copy_bytes;
1860 		} while (bytes_left);
1861 
1862 		return 0;
1863 	}
1864 
1865 	dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
1866 	if (IS_ERR(dma_addr))
1867 		return PTR_ERR(dma_addr);
1868 
1869 	xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor);
1870 
1871 	do {
1872 		struct dma_fence *__fence;
1873 		u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
1874 			cursor.start;
1875 		int current_bytes;
1876 
1877 		if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
1878 			current_bytes = min_t(int, bytes_left,
1879 					      MAX_PREEMPTDISABLE_TRANSFER);
1880 		else
1881 			current_bytes = min_t(int, bytes_left, cursor.size);
1882 
1883 		if (fence)
1884 			dma_fence_put(fence);
1885 
1886 		__fence = xe_migrate_vram(m, current_bytes,
1887 					  (unsigned long)buf & ~PAGE_MASK,
1888 					  dma_addr + current_page,
1889 					  vram_addr, write ?
1890 					  XE_MIGRATE_COPY_TO_VRAM :
1891 					  XE_MIGRATE_COPY_TO_SRAM);
1892 		if (IS_ERR(__fence)) {
1893 			if (fence)
1894 				dma_fence_wait(fence, false);
1895 			fence = __fence;
1896 			goto out_err;
1897 		}
1898 		fence = __fence;
1899 
1900 		buf += current_bytes;
1901 		offset += current_bytes;
1902 		current_page = (int)(buf - orig_buf) / PAGE_SIZE;
1903 		bytes_left -= current_bytes;
1904 		if (bytes_left)
1905 			xe_res_next(&cursor, current_bytes);
1906 	} while (bytes_left);
1907 
1908 	dma_fence_wait(fence, false);
1909 	dma_fence_put(fence);
1910 
1911 out_err:
1912 	xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
1913 	return IS_ERR(fence) ? PTR_ERR(fence) : 0;
1914 }
1915 
1916 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
1917 #include "tests/xe_migrate.c"
1918 #endif
1919