xref: /linux/drivers/gpu/drm/xe/tests/xe_migrate.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020-2022 Intel Corporation
4  */
5 
6 #include <kunit/test.h>
7 #include <kunit/visibility.h>
8 
9 #include "tests/xe_kunit_helpers.h"
10 #include "tests/xe_pci_test.h"
11 
12 #include "xe_pci.h"
13 #include "xe_pm.h"
14 
sanity_fence_failed(struct xe_device * xe,struct dma_fence * fence,const char * str,struct kunit * test)15 static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
16 				const char *str, struct kunit *test)
17 {
18 	long ret;
19 
20 	if (IS_ERR(fence)) {
21 		KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
22 			   PTR_ERR(fence));
23 		return true;
24 	}
25 	if (!fence)
26 		return true;
27 
28 	ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
29 	if (ret <= 0) {
30 		KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
31 		return true;
32 	}
33 
34 	return false;
35 }
36 
run_sanity_job(struct xe_migrate * m,struct xe_device * xe,struct xe_bb * bb,u32 second_idx,const char * str,struct kunit * test)37 static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
38 			  struct xe_bb *bb, u32 second_idx, const char *str,
39 			  struct kunit *test)
40 {
41 	u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm);
42 	struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
43 							      batch_base,
44 							      second_idx);
45 	struct dma_fence *fence;
46 
47 	if (IS_ERR(job)) {
48 		KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
49 			   PTR_ERR(job));
50 		return PTR_ERR(job);
51 	}
52 
53 	xe_sched_job_arm(job);
54 	fence = dma_fence_get(&job->drm.s_fence->finished);
55 	xe_sched_job_push(job);
56 
57 	if (sanity_fence_failed(xe, fence, str, test))
58 		return -ETIMEDOUT;
59 
60 	dma_fence_put(fence);
61 	kunit_info(test, "%s: Job completed\n", str);
62 	return 0;
63 }
64 
65 #define check(_retval, _expected, str, _test)				\
66 	do { if ((_retval) != (_expected)) {				\
67 			KUNIT_FAIL(_test, "Sanity check failed: " str	\
68 				   " expected %llx, got %llx\n",	\
69 				   (u64)(_expected), (u64)(_retval));	\
70 		} } while (0)
71 
test_copy(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test,u32 region)72 static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
73 		      struct kunit *test, u32 region)
74 {
75 	struct xe_device *xe = tile_to_xe(m->tile);
76 	u64 retval, expected = 0;
77 	bool big = bo->size >= SZ_2M;
78 	struct dma_fence *fence;
79 	const char *str = big ? "Copying big bo" : "Copying small bo";
80 	int err;
81 
82 	struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
83 						   bo->size,
84 						   ttm_bo_type_kernel,
85 						   region |
86 						   XE_BO_FLAG_NEEDS_CPU_ACCESS);
87 	if (IS_ERR(remote)) {
88 		KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
89 			   str, remote);
90 		return;
91 	}
92 
93 	err = xe_bo_validate(remote, NULL, false);
94 	if (err) {
95 		KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
96 			   str, err);
97 		goto out_unlock;
98 	}
99 
100 	err = xe_bo_vmap(remote);
101 	if (err) {
102 		KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
103 			   str, err);
104 		goto out_unlock;
105 	}
106 
107 	xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
108 	fence = xe_migrate_clear(m, remote, remote->ttm.resource,
109 				 XE_MIGRATE_CLEAR_FLAG_FULL);
110 	if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
111 				 "Clearing remote small bo", test)) {
112 		retval = xe_map_rd(xe, &remote->vmap, 0, u64);
113 		check(retval, expected, "remote first offset should be cleared",
114 		      test);
115 		retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
116 		check(retval, expected, "remote last offset should be cleared",
117 		      test);
118 	}
119 	dma_fence_put(fence);
120 
121 	/* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
122 	xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
123 	xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
124 
125 	expected = 0xc0c0c0c0c0c0c0c0;
126 	fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
127 				bo->ttm.resource, false);
128 	if (!sanity_fence_failed(xe, fence, big ? "Copying big bo remote -> vram" :
129 				 "Copying small bo remote -> vram", test)) {
130 		retval = xe_map_rd(xe, &bo->vmap, 0, u64);
131 		check(retval, expected,
132 		      "remote -> vram bo first offset should be copied", test);
133 		retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
134 		check(retval, expected,
135 		      "remote -> vram bo offset should be copied", test);
136 	}
137 	dma_fence_put(fence);
138 
139 	/* And other way around.. slightly hacky.. */
140 	xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
141 	xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
142 
143 	fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
144 				remote->ttm.resource, false);
145 	if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> remote" :
146 				 "Copying small bo vram -> remote", test)) {
147 		retval = xe_map_rd(xe, &remote->vmap, 0, u64);
148 		check(retval, expected,
149 		      "vram -> remote bo first offset should be copied", test);
150 		retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
151 		check(retval, expected,
152 		      "vram -> remote bo last offset should be copied", test);
153 	}
154 	dma_fence_put(fence);
155 
156 	xe_bo_vunmap(remote);
157 out_unlock:
158 	xe_bo_unlock(remote);
159 	xe_bo_put(remote);
160 }
161 
test_copy_sysmem(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test)162 static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
163 			     struct kunit *test)
164 {
165 	test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
166 }
167 
test_copy_vram(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test)168 static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
169 			   struct kunit *test)
170 {
171 	u32 region;
172 
173 	if (bo->ttm.resource->mem_type == XE_PL_SYSTEM)
174 		return;
175 
176 	if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
177 		region = XE_BO_FLAG_VRAM1;
178 	else
179 		region = XE_BO_FLAG_VRAM0;
180 	test_copy(m, bo, test, region);
181 }
182 
xe_migrate_sanity_test(struct xe_migrate * m,struct kunit * test)183 static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
184 {
185 	struct xe_tile *tile = m->tile;
186 	struct xe_device *xe = tile_to_xe(tile);
187 	struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
188 	struct xe_res_cursor src_it;
189 	struct dma_fence *fence;
190 	u64 retval, expected;
191 	struct xe_bb *bb;
192 	int err;
193 	u8 id = tile->id;
194 
195 	err = xe_bo_vmap(bo);
196 	if (err) {
197 		KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
198 			   PTR_ERR(bo));
199 		return;
200 	}
201 
202 	big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
203 				   ttm_bo_type_kernel,
204 				   XE_BO_FLAG_VRAM_IF_DGFX(tile) |
205 				   XE_BO_FLAG_PINNED);
206 	if (IS_ERR(big)) {
207 		KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
208 		goto vunmap;
209 	}
210 
211 	pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
212 				  ttm_bo_type_kernel,
213 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
214 				  XE_BO_FLAG_PINNED);
215 	if (IS_ERR(pt)) {
216 		KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
217 			   PTR_ERR(pt));
218 		goto free_big;
219 	}
220 
221 	tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
222 				    2 * SZ_4K,
223 				    ttm_bo_type_kernel,
224 				    XE_BO_FLAG_VRAM_IF_DGFX(tile) |
225 				    XE_BO_FLAG_PINNED);
226 	if (IS_ERR(tiny)) {
227 		KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
228 			   PTR_ERR(pt));
229 		goto free_pt;
230 	}
231 
232 	bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm);
233 	if (IS_ERR(bb)) {
234 		KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
235 			   PTR_ERR(bb));
236 		goto free_tiny;
237 	}
238 
239 	kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
240 		   (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
241 		   (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
242 
243 	/* First part of the test, are we updating our pagetable bo with a new entry? */
244 	xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
245 		  0xdeaddeadbeefbeef);
246 	expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
247 	if (m->q->vm->flags & XE_VM_FLAG_64K)
248 		expected |= XE_PTE_PS64;
249 	if (xe_bo_is_vram(pt))
250 		xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
251 	else
252 		xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
253 
254 	emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
255 		 &src_it, XE_PAGE_SIZE, pt->ttm.resource);
256 
257 	run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
258 
259 	retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
260 			   u64);
261 	check(retval, expected, "PTE entry write", test);
262 
263 	/* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
264 	bb->len = 0;
265 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
266 	xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
267 	expected = 0;
268 
269 	emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
270 		   IS_DGFX(xe));
271 	run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
272 		       test);
273 
274 	retval = xe_map_rd(xe, &pt->vmap, 0, u32);
275 	check(retval, expected, "Write to PT after adding PTE", test);
276 
277 	/* Sanity checks passed, try the full ones! */
278 
279 	/* Clear a small bo */
280 	kunit_info(test, "Clearing small buffer object\n");
281 	xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
282 	expected = 0;
283 	fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
284 				 XE_MIGRATE_CLEAR_FLAG_FULL);
285 	if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
286 		goto out;
287 
288 	dma_fence_put(fence);
289 	retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
290 	check(retval, expected, "Command clear small first value", test);
291 	retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
292 	check(retval, expected, "Command clear small last value", test);
293 
294 	kunit_info(test, "Copying small buffer object to system\n");
295 	test_copy_sysmem(m, tiny, test);
296 	if (xe->info.tile_count > 1) {
297 		kunit_info(test, "Copying small buffer object to other vram\n");
298 		test_copy_vram(m, tiny, test);
299 	}
300 
301 	/* Clear a big bo */
302 	kunit_info(test, "Clearing big buffer object\n");
303 	xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
304 	expected = 0;
305 	fence = xe_migrate_clear(m, big, big->ttm.resource,
306 				 XE_MIGRATE_CLEAR_FLAG_FULL);
307 	if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
308 		goto out;
309 
310 	dma_fence_put(fence);
311 	retval = xe_map_rd(xe, &big->vmap, 0, u32);
312 	check(retval, expected, "Command clear big first value", test);
313 	retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
314 	check(retval, expected, "Command clear big last value", test);
315 
316 	kunit_info(test, "Copying big buffer object to system\n");
317 	test_copy_sysmem(m, big, test);
318 	if (xe->info.tile_count > 1) {
319 		kunit_info(test, "Copying big buffer object to other vram\n");
320 		test_copy_vram(m, big, test);
321 	}
322 
323 out:
324 	xe_bb_free(bb, NULL);
325 free_tiny:
326 	xe_bo_unpin(tiny);
327 	xe_bo_put(tiny);
328 free_pt:
329 	xe_bo_unpin(pt);
330 	xe_bo_put(pt);
331 free_big:
332 	xe_bo_unpin(big);
333 	xe_bo_put(big);
334 vunmap:
335 	xe_bo_vunmap(m->pt_bo);
336 }
337 
migrate_test_run_device(struct xe_device * xe)338 static int migrate_test_run_device(struct xe_device *xe)
339 {
340 	struct kunit *test = kunit_get_current_test();
341 	struct xe_tile *tile;
342 	int id;
343 
344 	xe_pm_runtime_get(xe);
345 
346 	for_each_tile(tile, xe, id) {
347 		struct xe_migrate *m = tile->migrate;
348 
349 		kunit_info(test, "Testing tile id %d.\n", id);
350 		xe_vm_lock(m->q->vm, false);
351 		xe_migrate_sanity_test(m, test);
352 		xe_vm_unlock(m->q->vm);
353 	}
354 
355 	xe_pm_runtime_put(xe);
356 
357 	return 0;
358 }
359 
xe_migrate_sanity_kunit(struct kunit * test)360 static void xe_migrate_sanity_kunit(struct kunit *test)
361 {
362 	struct xe_device *xe = test->priv;
363 
364 	migrate_test_run_device(xe);
365 }
366 
blt_copy(struct xe_tile * tile,struct xe_bo * src_bo,struct xe_bo * dst_bo,bool copy_only_ccs,const char * str,struct kunit * test)367 static struct dma_fence *blt_copy(struct xe_tile *tile,
368 				  struct xe_bo *src_bo, struct xe_bo *dst_bo,
369 				  bool copy_only_ccs, const char *str, struct kunit *test)
370 {
371 	struct xe_gt *gt = tile->primary_gt;
372 	struct xe_migrate *m = tile->migrate;
373 	struct xe_device *xe = gt_to_xe(gt);
374 	struct dma_fence *fence = NULL;
375 	u64 size = src_bo->size;
376 	struct xe_res_cursor src_it, dst_it;
377 	struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
378 	u64 src_L0_ofs, dst_L0_ofs;
379 	u32 src_L0_pt, dst_L0_pt;
380 	u64 src_L0, dst_L0;
381 	int err;
382 	bool src_is_vram = mem_type_is_vram(src->mem_type);
383 	bool dst_is_vram = mem_type_is_vram(dst->mem_type);
384 
385 	if (!src_is_vram)
386 		xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
387 	else
388 		xe_res_first(src, 0, size, &src_it);
389 
390 	if (!dst_is_vram)
391 		xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
392 	else
393 		xe_res_first(dst, 0, size, &dst_it);
394 
395 	while (size) {
396 		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
397 		struct xe_sched_job *job;
398 		struct xe_bb *bb;
399 		u32 flush_flags = 0;
400 		u32 update_idx;
401 		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
402 		u32 pte_flags;
403 
404 		src_L0 = xe_migrate_res_sizes(m, &src_it);
405 		dst_L0 = xe_migrate_res_sizes(m, &dst_it);
406 
407 		src_L0 = min(src_L0, dst_L0);
408 
409 		pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
410 					   PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
411 		batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
412 					      &src_L0_ofs, &src_L0_pt, 0, 0,
413 					      avail_pts);
414 
415 		pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
416 					   PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
417 		batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
418 					      &dst_L0_ofs, &dst_L0_pt, 0,
419 					      avail_pts, avail_pts);
420 
421 		/* Add copy commands size here */
422 		batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
423 			((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0);
424 
425 		bb = xe_bb_new(gt, batch_size, xe->info.has_usm);
426 		if (IS_ERR(bb)) {
427 			err = PTR_ERR(bb);
428 			goto err_sync;
429 		}
430 
431 		if (src_is_vram)
432 			xe_res_next(&src_it, src_L0);
433 		else
434 			emit_pte(m, bb, src_L0_pt, src_is_vram, false,
435 				 &src_it, src_L0, src);
436 
437 		if (dst_is_vram)
438 			xe_res_next(&dst_it, src_L0);
439 		else
440 			emit_pte(m, bb, dst_L0_pt, dst_is_vram, false,
441 				 &dst_it, src_L0, dst);
442 
443 		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
444 		update_idx = bb->len;
445 		if (!copy_only_ccs)
446 			emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
447 
448 		if (copy_only_ccs)
449 			flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
450 							  src_is_vram, dst_L0_ofs,
451 							  dst_is_vram, src_L0, dst_L0_ofs,
452 							  copy_only_ccs);
453 
454 		job = xe_bb_create_migration_job(m->q, bb,
455 						 xe_migrate_batch_base(m, xe->info.has_usm),
456 						 update_idx);
457 		if (IS_ERR(job)) {
458 			err = PTR_ERR(job);
459 			goto err;
460 		}
461 
462 		xe_sched_job_add_migrate_flush(job, flush_flags);
463 
464 		mutex_lock(&m->job_mutex);
465 		xe_sched_job_arm(job);
466 		dma_fence_put(fence);
467 		fence = dma_fence_get(&job->drm.s_fence->finished);
468 		xe_sched_job_push(job);
469 
470 		dma_fence_put(m->fence);
471 		m->fence = dma_fence_get(fence);
472 
473 		mutex_unlock(&m->job_mutex);
474 
475 		xe_bb_free(bb, fence);
476 		size -= src_L0;
477 		continue;
478 
479 err:
480 		xe_bb_free(bb, NULL);
481 
482 err_sync:
483 		if (fence) {
484 			dma_fence_wait(fence, false);
485 			dma_fence_put(fence);
486 		}
487 		return ERR_PTR(err);
488 	}
489 
490 	return fence;
491 }
492 
test_migrate(struct xe_device * xe,struct xe_tile * tile,struct xe_bo * sys_bo,struct xe_bo * vram_bo,struct xe_bo * ccs_bo,struct kunit * test)493 static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
494 			 struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
495 			 struct kunit *test)
496 {
497 	struct dma_fence *fence;
498 	u64 expected, retval;
499 	long timeout;
500 	long ret;
501 
502 	expected = 0xd0d0d0d0d0d0d0d0;
503 	xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
504 
505 	fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
506 	if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
507 		retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
508 		if (retval == expected)
509 			KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
510 	}
511 	dma_fence_put(fence);
512 
513 	kunit_info(test, "Evict vram buffer object\n");
514 	ret = xe_bo_evict(vram_bo, true);
515 	if (ret) {
516 		KUNIT_FAIL(test, "Failed to evict bo.\n");
517 		return;
518 	}
519 
520 	ret = xe_bo_vmap(vram_bo);
521 	if (ret) {
522 		KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
523 		return;
524 	}
525 
526 	retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
527 	check(retval, expected, "Clear evicted vram data first value", test);
528 	retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
529 	check(retval, expected, "Clear evicted vram data last value", test);
530 
531 	fence = blt_copy(tile, vram_bo, ccs_bo,
532 			 true, "Blit surf copy from vram to sysmem", test);
533 	if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
534 		retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
535 		check(retval, 0, "Clear ccs data first value", test);
536 
537 		retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
538 		check(retval, 0, "Clear ccs data last value", test);
539 	}
540 	dma_fence_put(fence);
541 
542 	kunit_info(test, "Restore vram buffer object\n");
543 	ret = xe_bo_validate(vram_bo, NULL, false);
544 	if (ret) {
545 		KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
546 		return;
547 	}
548 
549 	/* Sync all migration blits */
550 	timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
551 					DMA_RESV_USAGE_KERNEL,
552 					true,
553 					5 * HZ);
554 	if (timeout <= 0) {
555 		KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
556 		return;
557 	}
558 
559 	ret = xe_bo_vmap(vram_bo);
560 	if (ret) {
561 		KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
562 		return;
563 	}
564 
565 	retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
566 	check(retval, expected, "Restored value must be equal to initial value", test);
567 	retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
568 	check(retval, expected, "Restored value must be equal to initial value", test);
569 
570 	fence = blt_copy(tile, vram_bo, ccs_bo,
571 			 true, "Blit surf copy from vram to sysmem", test);
572 	if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
573 		retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
574 		check(retval, 0, "Clear ccs data first value", test);
575 		retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
576 		check(retval, 0, "Clear ccs data last value", test);
577 	}
578 	dma_fence_put(fence);
579 }
580 
test_clear(struct xe_device * xe,struct xe_tile * tile,struct xe_bo * sys_bo,struct xe_bo * vram_bo,struct kunit * test)581 static void test_clear(struct xe_device *xe, struct xe_tile *tile,
582 		       struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test)
583 {
584 	struct dma_fence *fence;
585 	u64 expected, retval;
586 
587 	expected = 0xd0d0d0d0d0d0d0d0;
588 	xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
589 
590 	fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
591 	if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
592 		retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
593 		if (retval == expected)
594 			KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
595 	}
596 	dma_fence_put(fence);
597 
598 	fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test);
599 	if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
600 		retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
601 		check(retval, expected, "Decompressed value must be equal to initial value", test);
602 		retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
603 		check(retval, expected, "Decompressed value must be equal to initial value", test);
604 	}
605 	dma_fence_put(fence);
606 
607 	kunit_info(test, "Clear vram buffer object\n");
608 	expected = 0x0000000000000000;
609 	fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource,
610 				 XE_MIGRATE_CLEAR_FLAG_FULL);
611 	if (sanity_fence_failed(xe, fence, "Clear vram_bo", test))
612 		return;
613 	dma_fence_put(fence);
614 
615 	fence = blt_copy(tile, vram_bo, sys_bo,
616 			 false, "Blit copy from vram to sysmem", test);
617 	if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
618 		retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
619 		check(retval, expected, "Clear main buffer first value", test);
620 		retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
621 		check(retval, expected, "Clear main buffer last value", test);
622 	}
623 	dma_fence_put(fence);
624 
625 	fence = blt_copy(tile, vram_bo, sys_bo,
626 			 true, "Blit surf copy from vram to sysmem", test);
627 	if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
628 		retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
629 		check(retval, expected, "Clear ccs data first value", test);
630 		retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
631 		check(retval, expected, "Clear ccs data last value", test);
632 	}
633 	dma_fence_put(fence);
634 }
635 
validate_ccs_test_run_tile(struct xe_device * xe,struct xe_tile * tile,struct kunit * test)636 static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
637 				       struct kunit *test)
638 {
639 	struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
640 	unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
641 	long ret;
642 
643 	sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
644 				   DRM_XE_GEM_CPU_CACHING_WC,
645 				   XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
646 
647 	if (IS_ERR(sys_bo)) {
648 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
649 			   PTR_ERR(sys_bo));
650 		return;
651 	}
652 
653 	xe_bo_lock(sys_bo, false);
654 	ret = xe_bo_validate(sys_bo, NULL, false);
655 	if (ret) {
656 		KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
657 		goto free_sysbo;
658 	}
659 
660 	ret = xe_bo_vmap(sys_bo);
661 	if (ret) {
662 		KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
663 		goto free_sysbo;
664 	}
665 	xe_bo_unlock(sys_bo);
666 
667 	ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
668 				   DRM_XE_GEM_CPU_CACHING_WC,
669 				   bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
670 
671 	if (IS_ERR(ccs_bo)) {
672 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
673 			   PTR_ERR(ccs_bo));
674 		return;
675 	}
676 
677 	xe_bo_lock(ccs_bo, false);
678 	ret = xe_bo_validate(ccs_bo, NULL, false);
679 	if (ret) {
680 		KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
681 		goto free_ccsbo;
682 	}
683 
684 	ret = xe_bo_vmap(ccs_bo);
685 	if (ret) {
686 		KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
687 		goto free_ccsbo;
688 	}
689 	xe_bo_unlock(ccs_bo);
690 
691 	vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
692 				    DRM_XE_GEM_CPU_CACHING_WC,
693 				    bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
694 	if (IS_ERR(vram_bo)) {
695 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
696 			   PTR_ERR(vram_bo));
697 		return;
698 	}
699 
700 	xe_bo_lock(vram_bo, false);
701 	ret = xe_bo_validate(vram_bo, NULL, false);
702 	if (ret) {
703 		KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
704 		goto free_vrambo;
705 	}
706 
707 	ret = xe_bo_vmap(vram_bo);
708 	if (ret) {
709 		KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
710 		goto free_vrambo;
711 	}
712 
713 	test_clear(xe, tile, sys_bo, vram_bo, test);
714 	test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
715 	xe_bo_unlock(vram_bo);
716 
717 	xe_bo_lock(vram_bo, false);
718 	xe_bo_vunmap(vram_bo);
719 	xe_bo_unlock(vram_bo);
720 
721 	xe_bo_lock(ccs_bo, false);
722 	xe_bo_vunmap(ccs_bo);
723 	xe_bo_unlock(ccs_bo);
724 
725 	xe_bo_lock(sys_bo, false);
726 	xe_bo_vunmap(sys_bo);
727 	xe_bo_unlock(sys_bo);
728 free_vrambo:
729 	xe_bo_put(vram_bo);
730 free_ccsbo:
731 	xe_bo_put(ccs_bo);
732 free_sysbo:
733 	xe_bo_put(sys_bo);
734 }
735 
validate_ccs_test_run_device(struct xe_device * xe)736 static int validate_ccs_test_run_device(struct xe_device *xe)
737 {
738 	struct kunit *test = kunit_get_current_test();
739 	struct xe_tile *tile;
740 	int id;
741 
742 	if (!xe_device_has_flat_ccs(xe)) {
743 		kunit_skip(test, "non-flat-ccs device\n");
744 		return 0;
745 	}
746 
747 	if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) {
748 		kunit_skip(test, "non-xe2 discrete device\n");
749 		return 0;
750 	}
751 
752 	xe_pm_runtime_get(xe);
753 
754 	for_each_tile(tile, xe, id)
755 		validate_ccs_test_run_tile(xe, tile, test);
756 
757 	xe_pm_runtime_put(xe);
758 
759 	return 0;
760 }
761 
xe_validate_ccs_kunit(struct kunit * test)762 static void xe_validate_ccs_kunit(struct kunit *test)
763 {
764 	struct xe_device *xe = test->priv;
765 
766 	validate_ccs_test_run_device(xe);
767 }
768 
769 static struct kunit_case xe_migrate_tests[] = {
770 	KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param),
771 	KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param),
772 	{}
773 };
774 
775 VISIBLE_IF_KUNIT
776 struct kunit_suite xe_migrate_test_suite = {
777 	.name = "xe_migrate",
778 	.test_cases = xe_migrate_tests,
779 	.init = xe_kunit_helper_xe_device_live_test_init,
780 };
781 EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite);
782