1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020-2022 Intel Corporation
4 */
5
6 #include <kunit/test.h>
7 #include <kunit/visibility.h>
8
9 #include "tests/xe_kunit_helpers.h"
10 #include "tests/xe_pci_test.h"
11
12 #include "xe_pci.h"
13 #include "xe_pm.h"
14
sanity_fence_failed(struct xe_device * xe,struct dma_fence * fence,const char * str,struct kunit * test)15 static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
16 const char *str, struct kunit *test)
17 {
18 long ret;
19
20 if (IS_ERR(fence)) {
21 KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
22 PTR_ERR(fence));
23 return true;
24 }
25 if (!fence)
26 return true;
27
28 ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
29 if (ret <= 0) {
30 KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
31 return true;
32 }
33
34 return false;
35 }
36
run_sanity_job(struct xe_migrate * m,struct xe_device * xe,struct xe_bb * bb,u32 second_idx,const char * str,struct kunit * test)37 static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
38 struct xe_bb *bb, u32 second_idx, const char *str,
39 struct kunit *test)
40 {
41 u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm);
42 struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
43 batch_base,
44 second_idx);
45 struct dma_fence *fence;
46
47 if (IS_ERR(job)) {
48 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
49 PTR_ERR(job));
50 return PTR_ERR(job);
51 }
52
53 xe_sched_job_arm(job);
54 fence = dma_fence_get(&job->drm.s_fence->finished);
55 xe_sched_job_push(job);
56
57 if (sanity_fence_failed(xe, fence, str, test))
58 return -ETIMEDOUT;
59
60 dma_fence_put(fence);
61 kunit_info(test, "%s: Job completed\n", str);
62 return 0;
63 }
64
65 #define check(_retval, _expected, str, _test) \
66 do { if ((_retval) != (_expected)) { \
67 KUNIT_FAIL(_test, "Sanity check failed: " str \
68 " expected %llx, got %llx\n", \
69 (u64)(_expected), (u64)(_retval)); \
70 } } while (0)
71
test_copy(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test,u32 region)72 static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
73 struct kunit *test, u32 region)
74 {
75 struct xe_device *xe = tile_to_xe(m->tile);
76 u64 retval, expected = 0;
77 bool big = xe_bo_size(bo) >= SZ_2M;
78 struct dma_fence *fence;
79 const char *str = big ? "Copying big bo" : "Copying small bo";
80 int err;
81
82 struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
83 xe_bo_size(bo),
84 ttm_bo_type_kernel,
85 region |
86 XE_BO_FLAG_NEEDS_CPU_ACCESS |
87 XE_BO_FLAG_PINNED);
88 if (IS_ERR(remote)) {
89 KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
90 str, remote);
91 return;
92 }
93
94 err = xe_bo_validate(remote, NULL, false);
95 if (err) {
96 KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
97 str, err);
98 goto out_unlock;
99 }
100
101 err = xe_bo_vmap(remote);
102 if (err) {
103 KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
104 str, err);
105 goto out_unlock;
106 }
107
108 xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
109 fence = xe_migrate_clear(m, remote, remote->ttm.resource,
110 XE_MIGRATE_CLEAR_FLAG_FULL);
111 if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
112 "Clearing remote small bo", test)) {
113 retval = xe_map_rd(xe, &remote->vmap, 0, u64);
114 check(retval, expected, "remote first offset should be cleared",
115 test);
116 retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(remote) - 8, u64);
117 check(retval, expected, "remote last offset should be cleared",
118 test);
119 }
120 dma_fence_put(fence);
121
122 /* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
123 xe_map_memset(xe, &remote->vmap, 0, 0xc0, xe_bo_size(remote));
124 xe_map_memset(xe, &bo->vmap, 0, 0xd0, xe_bo_size(bo));
125
126 expected = 0xc0c0c0c0c0c0c0c0;
127 fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
128 bo->ttm.resource, false);
129 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo remote -> vram" :
130 "Copying small bo remote -> vram", test)) {
131 retval = xe_map_rd(xe, &bo->vmap, 0, u64);
132 check(retval, expected,
133 "remote -> vram bo first offset should be copied", test);
134 retval = xe_map_rd(xe, &bo->vmap, xe_bo_size(bo) - 8, u64);
135 check(retval, expected,
136 "remote -> vram bo offset should be copied", test);
137 }
138 dma_fence_put(fence);
139
140 /* And other way around.. slightly hacky.. */
141 xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote));
142 xe_map_memset(xe, &bo->vmap, 0, 0xc0, xe_bo_size(bo));
143
144 fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
145 remote->ttm.resource, false);
146 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> remote" :
147 "Copying small bo vram -> remote", test)) {
148 retval = xe_map_rd(xe, &remote->vmap, 0, u64);
149 check(retval, expected,
150 "vram -> remote bo first offset should be copied", test);
151 retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(bo) - 8, u64);
152 check(retval, expected,
153 "vram -> remote bo last offset should be copied", test);
154 }
155 dma_fence_put(fence);
156
157 xe_bo_vunmap(remote);
158 out_unlock:
159 xe_bo_unlock(remote);
160 xe_bo_put(remote);
161 }
162
test_copy_sysmem(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test)163 static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
164 struct kunit *test)
165 {
166 test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
167 }
168
test_copy_vram(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test)169 static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
170 struct kunit *test)
171 {
172 u32 region;
173
174 if (bo->ttm.resource->mem_type == XE_PL_SYSTEM)
175 return;
176
177 if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
178 region = XE_BO_FLAG_VRAM1;
179 else
180 region = XE_BO_FLAG_VRAM0;
181 test_copy(m, bo, test, region);
182 }
183
xe_migrate_sanity_test(struct xe_migrate * m,struct kunit * test)184 static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
185 {
186 struct xe_tile *tile = m->tile;
187 struct xe_device *xe = tile_to_xe(tile);
188 struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
189 struct xe_res_cursor src_it;
190 struct dma_fence *fence;
191 u64 retval, expected;
192 struct xe_bb *bb;
193 int err;
194 u8 id = tile->id;
195
196 err = xe_bo_vmap(bo);
197 if (err) {
198 KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
199 PTR_ERR(bo));
200 return;
201 }
202
203 big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
204 ttm_bo_type_kernel,
205 XE_BO_FLAG_VRAM_IF_DGFX(tile));
206 if (IS_ERR(big)) {
207 KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
208 goto vunmap;
209 }
210
211 pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
212 ttm_bo_type_kernel,
213 XE_BO_FLAG_VRAM_IF_DGFX(tile));
214 if (IS_ERR(pt)) {
215 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
216 PTR_ERR(pt));
217 goto free_big;
218 }
219
220 tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
221 2 * SZ_4K,
222 ttm_bo_type_kernel,
223 XE_BO_FLAG_VRAM_IF_DGFX(tile));
224 if (IS_ERR(tiny)) {
225 KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
226 PTR_ERR(tiny));
227 goto free_pt;
228 }
229
230 bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm);
231 if (IS_ERR(bb)) {
232 KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
233 PTR_ERR(bb));
234 goto free_tiny;
235 }
236
237 kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
238 (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
239 (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
240
241 /* First part of the test, are we updating our pagetable bo with a new entry? */
242 xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
243 0xdeaddeadbeefbeef);
244 expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
245 if (m->q->vm->flags & XE_VM_FLAG_64K)
246 expected |= XE_PTE_PS64;
247 if (xe_bo_is_vram(pt))
248 xe_res_first(pt->ttm.resource, 0, xe_bo_size(pt), &src_it);
249 else
250 xe_res_first_sg(xe_bo_sg(pt), 0, xe_bo_size(pt), &src_it);
251
252 emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
253 &src_it, XE_PAGE_SIZE, pt->ttm.resource);
254
255 run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
256
257 retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
258 u64);
259 check(retval, expected, "PTE entry write", test);
260
261 /* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
262 bb->len = 0;
263 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
264 xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
265 expected = 0;
266
267 emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
268 IS_DGFX(xe));
269 run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
270 test);
271
272 retval = xe_map_rd(xe, &pt->vmap, 0, u32);
273 check(retval, expected, "Write to PT after adding PTE", test);
274
275 /* Sanity checks passed, try the full ones! */
276
277 /* Clear a small bo */
278 kunit_info(test, "Clearing small buffer object\n");
279 xe_map_memset(xe, &tiny->vmap, 0, 0x22, xe_bo_size(tiny));
280 expected = 0;
281 fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
282 XE_MIGRATE_CLEAR_FLAG_FULL);
283 if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
284 goto out;
285
286 dma_fence_put(fence);
287 retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
288 check(retval, expected, "Command clear small first value", test);
289 retval = xe_map_rd(xe, &tiny->vmap, xe_bo_size(tiny) - 4, u32);
290 check(retval, expected, "Command clear small last value", test);
291
292 kunit_info(test, "Copying small buffer object to system\n");
293 test_copy_sysmem(m, tiny, test);
294 if (xe->info.tile_count > 1) {
295 kunit_info(test, "Copying small buffer object to other vram\n");
296 test_copy_vram(m, tiny, test);
297 }
298
299 /* Clear a big bo */
300 kunit_info(test, "Clearing big buffer object\n");
301 xe_map_memset(xe, &big->vmap, 0, 0x11, xe_bo_size(big));
302 expected = 0;
303 fence = xe_migrate_clear(m, big, big->ttm.resource,
304 XE_MIGRATE_CLEAR_FLAG_FULL);
305 if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
306 goto out;
307
308 dma_fence_put(fence);
309 retval = xe_map_rd(xe, &big->vmap, 0, u32);
310 check(retval, expected, "Command clear big first value", test);
311 retval = xe_map_rd(xe, &big->vmap, xe_bo_size(big) - 4, u32);
312 check(retval, expected, "Command clear big last value", test);
313
314 kunit_info(test, "Copying big buffer object to system\n");
315 test_copy_sysmem(m, big, test);
316 if (xe->info.tile_count > 1) {
317 kunit_info(test, "Copying big buffer object to other vram\n");
318 test_copy_vram(m, big, test);
319 }
320
321 out:
322 xe_bb_free(bb, NULL);
323 free_tiny:
324 xe_bo_unpin(tiny);
325 xe_bo_put(tiny);
326 free_pt:
327 xe_bo_unpin(pt);
328 xe_bo_put(pt);
329 free_big:
330 xe_bo_unpin(big);
331 xe_bo_put(big);
332 vunmap:
333 xe_bo_vunmap(m->pt_bo);
334 }
335
migrate_test_run_device(struct xe_device * xe)336 static int migrate_test_run_device(struct xe_device *xe)
337 {
338 struct kunit *test = kunit_get_current_test();
339 struct xe_tile *tile;
340 int id;
341
342 xe_pm_runtime_get(xe);
343
344 for_each_tile(tile, xe, id) {
345 struct xe_migrate *m = tile->migrate;
346
347 kunit_info(test, "Testing tile id %d.\n", id);
348 xe_vm_lock(m->q->vm, false);
349 xe_migrate_sanity_test(m, test);
350 xe_vm_unlock(m->q->vm);
351 }
352
353 xe_pm_runtime_put(xe);
354
355 return 0;
356 }
357
xe_migrate_sanity_kunit(struct kunit * test)358 static void xe_migrate_sanity_kunit(struct kunit *test)
359 {
360 struct xe_device *xe = test->priv;
361
362 migrate_test_run_device(xe);
363 }
364
blt_copy(struct xe_tile * tile,struct xe_bo * src_bo,struct xe_bo * dst_bo,bool copy_only_ccs,const char * str,struct kunit * test)365 static struct dma_fence *blt_copy(struct xe_tile *tile,
366 struct xe_bo *src_bo, struct xe_bo *dst_bo,
367 bool copy_only_ccs, const char *str, struct kunit *test)
368 {
369 struct xe_gt *gt = tile->primary_gt;
370 struct xe_migrate *m = tile->migrate;
371 struct xe_device *xe = gt_to_xe(gt);
372 struct dma_fence *fence = NULL;
373 u64 size = xe_bo_size(src_bo);
374 struct xe_res_cursor src_it, dst_it;
375 struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
376 u64 src_L0_ofs, dst_L0_ofs;
377 u32 src_L0_pt, dst_L0_pt;
378 u64 src_L0, dst_L0;
379 int err;
380 bool src_is_vram = mem_type_is_vram(src->mem_type);
381 bool dst_is_vram = mem_type_is_vram(dst->mem_type);
382
383 if (!src_is_vram)
384 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
385 else
386 xe_res_first(src, 0, size, &src_it);
387
388 if (!dst_is_vram)
389 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
390 else
391 xe_res_first(dst, 0, size, &dst_it);
392
393 while (size) {
394 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
395 struct xe_sched_job *job;
396 struct xe_bb *bb;
397 u32 flush_flags = 0;
398 u32 update_idx;
399 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
400 u32 pte_flags;
401
402 src_L0 = xe_migrate_res_sizes(m, &src_it);
403 dst_L0 = xe_migrate_res_sizes(m, &dst_it);
404
405 src_L0 = min(src_L0, dst_L0);
406
407 pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
408 PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
409 batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
410 &src_L0_ofs, &src_L0_pt, 0, 0,
411 avail_pts);
412
413 pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
414 PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
415 batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
416 &dst_L0_ofs, &dst_L0_pt, 0,
417 avail_pts, avail_pts);
418
419 /* Add copy commands size here */
420 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
421 ((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0);
422
423 bb = xe_bb_new(gt, batch_size, xe->info.has_usm);
424 if (IS_ERR(bb)) {
425 err = PTR_ERR(bb);
426 goto err_sync;
427 }
428
429 if (src_is_vram)
430 xe_res_next(&src_it, src_L0);
431 else
432 emit_pte(m, bb, src_L0_pt, src_is_vram, false,
433 &src_it, src_L0, src);
434
435 if (dst_is_vram)
436 xe_res_next(&dst_it, src_L0);
437 else
438 emit_pte(m, bb, dst_L0_pt, dst_is_vram, false,
439 &dst_it, src_L0, dst);
440
441 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
442 update_idx = bb->len;
443 if (!copy_only_ccs)
444 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
445
446 if (copy_only_ccs)
447 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
448 src_is_vram, dst_L0_ofs,
449 dst_is_vram, src_L0, dst_L0_ofs,
450 copy_only_ccs);
451
452 job = xe_bb_create_migration_job(m->q, bb,
453 xe_migrate_batch_base(m, xe->info.has_usm),
454 update_idx);
455 if (IS_ERR(job)) {
456 err = PTR_ERR(job);
457 goto err;
458 }
459
460 xe_sched_job_add_migrate_flush(job, flush_flags);
461
462 mutex_lock(&m->job_mutex);
463 xe_sched_job_arm(job);
464 dma_fence_put(fence);
465 fence = dma_fence_get(&job->drm.s_fence->finished);
466 xe_sched_job_push(job);
467
468 dma_fence_put(m->fence);
469 m->fence = dma_fence_get(fence);
470
471 mutex_unlock(&m->job_mutex);
472
473 xe_bb_free(bb, fence);
474 size -= src_L0;
475 continue;
476
477 err:
478 xe_bb_free(bb, NULL);
479
480 err_sync:
481 if (fence) {
482 dma_fence_wait(fence, false);
483 dma_fence_put(fence);
484 }
485 return ERR_PTR(err);
486 }
487
488 return fence;
489 }
490
test_migrate(struct xe_device * xe,struct xe_tile * tile,struct xe_bo * sys_bo,struct xe_bo * vram_bo,struct xe_bo * ccs_bo,struct kunit * test)491 static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
492 struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
493 struct kunit *test)
494 {
495 struct dma_fence *fence;
496 u64 expected, retval;
497 long timeout;
498 long ret;
499
500 expected = 0xd0d0d0d0d0d0d0d0;
501 xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
502
503 fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
504 if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
505 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
506 if (retval == expected)
507 KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
508 }
509 dma_fence_put(fence);
510
511 kunit_info(test, "Evict vram buffer object\n");
512 ret = xe_bo_evict(vram_bo);
513 if (ret) {
514 KUNIT_FAIL(test, "Failed to evict bo.\n");
515 return;
516 }
517
518 ret = xe_bo_vmap(vram_bo);
519 if (ret) {
520 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
521 return;
522 }
523
524 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
525 check(retval, expected, "Clear evicted vram data first value", test);
526 retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
527 check(retval, expected, "Clear evicted vram data last value", test);
528
529 fence = blt_copy(tile, vram_bo, ccs_bo,
530 true, "Blit surf copy from vram to sysmem", test);
531 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
532 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
533 check(retval, 0, "Clear ccs data first value", test);
534
535 retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
536 check(retval, 0, "Clear ccs data last value", test);
537 }
538 dma_fence_put(fence);
539
540 kunit_info(test, "Restore vram buffer object\n");
541 ret = xe_bo_validate(vram_bo, NULL, false);
542 if (ret) {
543 KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
544 return;
545 }
546
547 /* Sync all migration blits */
548 timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
549 DMA_RESV_USAGE_KERNEL,
550 true,
551 5 * HZ);
552 if (timeout <= 0) {
553 KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
554 return;
555 }
556
557 ret = xe_bo_vmap(vram_bo);
558 if (ret) {
559 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
560 return;
561 }
562
563 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
564 check(retval, expected, "Restored value must be equal to initial value", test);
565 retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64);
566 check(retval, expected, "Restored value must be equal to initial value", test);
567
568 fence = blt_copy(tile, vram_bo, ccs_bo,
569 true, "Blit surf copy from vram to sysmem", test);
570 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
571 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
572 check(retval, 0, "Clear ccs data first value", test);
573 retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64);
574 check(retval, 0, "Clear ccs data last value", test);
575 }
576 dma_fence_put(fence);
577 }
578
test_clear(struct xe_device * xe,struct xe_tile * tile,struct xe_bo * sys_bo,struct xe_bo * vram_bo,struct kunit * test)579 static void test_clear(struct xe_device *xe, struct xe_tile *tile,
580 struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test)
581 {
582 struct dma_fence *fence;
583 u64 expected, retval;
584
585 expected = 0xd0d0d0d0d0d0d0d0;
586 xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo));
587
588 fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
589 if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
590 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
591 if (retval == expected)
592 KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
593 }
594 dma_fence_put(fence);
595
596 fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test);
597 if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
598 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
599 check(retval, expected, "Decompressed value must be equal to initial value", test);
600 retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
601 check(retval, expected, "Decompressed value must be equal to initial value", test);
602 }
603 dma_fence_put(fence);
604
605 kunit_info(test, "Clear vram buffer object\n");
606 expected = 0x0000000000000000;
607 fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource,
608 XE_MIGRATE_CLEAR_FLAG_FULL);
609 if (sanity_fence_failed(xe, fence, "Clear vram_bo", test))
610 return;
611 dma_fence_put(fence);
612
613 fence = blt_copy(tile, vram_bo, sys_bo,
614 false, "Blit copy from vram to sysmem", test);
615 if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
616 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
617 check(retval, expected, "Clear main buffer first value", test);
618 retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
619 check(retval, expected, "Clear main buffer last value", test);
620 }
621 dma_fence_put(fence);
622
623 fence = blt_copy(tile, vram_bo, sys_bo,
624 true, "Blit surf copy from vram to sysmem", test);
625 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
626 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
627 check(retval, expected, "Clear ccs data first value", test);
628 retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64);
629 check(retval, expected, "Clear ccs data last value", test);
630 }
631 dma_fence_put(fence);
632 }
633
validate_ccs_test_run_tile(struct xe_device * xe,struct xe_tile * tile,struct kunit * test)634 static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
635 struct kunit *test)
636 {
637 struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
638 unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
639 long ret;
640
641 sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
642 DRM_XE_GEM_CPU_CACHING_WC,
643 XE_BO_FLAG_SYSTEM |
644 XE_BO_FLAG_NEEDS_CPU_ACCESS |
645 XE_BO_FLAG_PINNED);
646
647 if (IS_ERR(sys_bo)) {
648 KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
649 PTR_ERR(sys_bo));
650 return;
651 }
652
653 xe_bo_lock(sys_bo, false);
654 ret = xe_bo_validate(sys_bo, NULL, false);
655 if (ret) {
656 KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
657 goto free_sysbo;
658 }
659
660 ret = xe_bo_vmap(sys_bo);
661 if (ret) {
662 KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
663 goto free_sysbo;
664 }
665 xe_bo_unlock(sys_bo);
666
667 ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
668 DRM_XE_GEM_CPU_CACHING_WC,
669 bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
670 XE_BO_FLAG_PINNED);
671
672 if (IS_ERR(ccs_bo)) {
673 KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
674 PTR_ERR(ccs_bo));
675 return;
676 }
677
678 xe_bo_lock(ccs_bo, false);
679 ret = xe_bo_validate(ccs_bo, NULL, false);
680 if (ret) {
681 KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
682 goto free_ccsbo;
683 }
684
685 ret = xe_bo_vmap(ccs_bo);
686 if (ret) {
687 KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
688 goto free_ccsbo;
689 }
690 xe_bo_unlock(ccs_bo);
691
692 vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
693 DRM_XE_GEM_CPU_CACHING_WC,
694 bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
695 XE_BO_FLAG_PINNED);
696 if (IS_ERR(vram_bo)) {
697 KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
698 PTR_ERR(vram_bo));
699 return;
700 }
701
702 xe_bo_lock(vram_bo, false);
703 ret = xe_bo_validate(vram_bo, NULL, false);
704 if (ret) {
705 KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
706 goto free_vrambo;
707 }
708
709 ret = xe_bo_vmap(vram_bo);
710 if (ret) {
711 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
712 goto free_vrambo;
713 }
714
715 test_clear(xe, tile, sys_bo, vram_bo, test);
716 test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
717 xe_bo_unlock(vram_bo);
718
719 xe_bo_lock(vram_bo, false);
720 xe_bo_vunmap(vram_bo);
721 xe_bo_unlock(vram_bo);
722
723 xe_bo_lock(ccs_bo, false);
724 xe_bo_vunmap(ccs_bo);
725 xe_bo_unlock(ccs_bo);
726
727 xe_bo_lock(sys_bo, false);
728 xe_bo_vunmap(sys_bo);
729 xe_bo_unlock(sys_bo);
730 free_vrambo:
731 xe_bo_put(vram_bo);
732 free_ccsbo:
733 xe_bo_put(ccs_bo);
734 free_sysbo:
735 xe_bo_put(sys_bo);
736 }
737
validate_ccs_test_run_device(struct xe_device * xe)738 static int validate_ccs_test_run_device(struct xe_device *xe)
739 {
740 struct kunit *test = kunit_get_current_test();
741 struct xe_tile *tile;
742 int id;
743
744 if (!xe_device_has_flat_ccs(xe)) {
745 kunit_skip(test, "non-flat-ccs device\n");
746 return 0;
747 }
748
749 if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) {
750 kunit_skip(test, "non-xe2 discrete device\n");
751 return 0;
752 }
753
754 xe_pm_runtime_get(xe);
755
756 for_each_tile(tile, xe, id)
757 validate_ccs_test_run_tile(xe, tile, test);
758
759 xe_pm_runtime_put(xe);
760
761 return 0;
762 }
763
xe_validate_ccs_kunit(struct kunit * test)764 static void xe_validate_ccs_kunit(struct kunit *test)
765 {
766 struct xe_device *xe = test->priv;
767
768 validate_ccs_test_run_device(xe);
769 }
770
771 static struct kunit_case xe_migrate_tests[] = {
772 KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param),
773 KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param),
774 {}
775 };
776
777 VISIBLE_IF_KUNIT
778 struct kunit_suite xe_migrate_test_suite = {
779 .name = "xe_migrate",
780 .test_cases = xe_migrate_tests,
781 .init = xe_kunit_helper_xe_device_live_test_init,
782 };
783 EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite);
784