1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020-2022 Intel Corporation
4 */
5
6 #include <kunit/test.h>
7 #include <kunit/visibility.h>
8
9 #include "tests/xe_kunit_helpers.h"
10 #include "tests/xe_pci_test.h"
11
12 #include "xe_pci.h"
13 #include "xe_pm.h"
14
sanity_fence_failed(struct xe_device * xe,struct dma_fence * fence,const char * str,struct kunit * test)15 static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
16 const char *str, struct kunit *test)
17 {
18 long ret;
19
20 if (IS_ERR(fence)) {
21 KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
22 PTR_ERR(fence));
23 return true;
24 }
25 if (!fence)
26 return true;
27
28 ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
29 if (ret <= 0) {
30 KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
31 return true;
32 }
33
34 return false;
35 }
36
run_sanity_job(struct xe_migrate * m,struct xe_device * xe,struct xe_bb * bb,u32 second_idx,const char * str,struct kunit * test)37 static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
38 struct xe_bb *bb, u32 second_idx, const char *str,
39 struct kunit *test)
40 {
41 u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm);
42 struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
43 batch_base,
44 second_idx);
45 struct dma_fence *fence;
46
47 if (IS_ERR(job)) {
48 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
49 PTR_ERR(job));
50 return PTR_ERR(job);
51 }
52
53 xe_sched_job_arm(job);
54 fence = dma_fence_get(&job->drm.s_fence->finished);
55 xe_sched_job_push(job);
56
57 if (sanity_fence_failed(xe, fence, str, test))
58 return -ETIMEDOUT;
59
60 dma_fence_put(fence);
61 kunit_info(test, "%s: Job completed\n", str);
62 return 0;
63 }
64
65 #define check(_retval, _expected, str, _test) \
66 do { if ((_retval) != (_expected)) { \
67 KUNIT_FAIL(_test, "Sanity check failed: " str \
68 " expected %llx, got %llx\n", \
69 (u64)(_expected), (u64)(_retval)); \
70 } } while (0)
71
test_copy(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test,u32 region)72 static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
73 struct kunit *test, u32 region)
74 {
75 struct xe_device *xe = tile_to_xe(m->tile);
76 u64 retval, expected = 0;
77 bool big = bo->size >= SZ_2M;
78 struct dma_fence *fence;
79 const char *str = big ? "Copying big bo" : "Copying small bo";
80 int err;
81
82 struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
83 bo->size,
84 ttm_bo_type_kernel,
85 region |
86 XE_BO_FLAG_NEEDS_CPU_ACCESS |
87 XE_BO_FLAG_PINNED);
88 if (IS_ERR(remote)) {
89 KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
90 str, remote);
91 return;
92 }
93
94 err = xe_bo_validate(remote, NULL, false);
95 if (err) {
96 KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
97 str, err);
98 goto out_unlock;
99 }
100
101 err = xe_bo_vmap(remote);
102 if (err) {
103 KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
104 str, err);
105 goto out_unlock;
106 }
107
108 xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
109 fence = xe_migrate_clear(m, remote, remote->ttm.resource,
110 XE_MIGRATE_CLEAR_FLAG_FULL);
111 if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
112 "Clearing remote small bo", test)) {
113 retval = xe_map_rd(xe, &remote->vmap, 0, u64);
114 check(retval, expected, "remote first offset should be cleared",
115 test);
116 retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
117 check(retval, expected, "remote last offset should be cleared",
118 test);
119 }
120 dma_fence_put(fence);
121
122 /* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
123 xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
124 xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
125
126 expected = 0xc0c0c0c0c0c0c0c0;
127 fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
128 bo->ttm.resource, false);
129 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo remote -> vram" :
130 "Copying small bo remote -> vram", test)) {
131 retval = xe_map_rd(xe, &bo->vmap, 0, u64);
132 check(retval, expected,
133 "remote -> vram bo first offset should be copied", test);
134 retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
135 check(retval, expected,
136 "remote -> vram bo offset should be copied", test);
137 }
138 dma_fence_put(fence);
139
140 /* And other way around.. slightly hacky.. */
141 xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
142 xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
143
144 fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
145 remote->ttm.resource, false);
146 if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> remote" :
147 "Copying small bo vram -> remote", test)) {
148 retval = xe_map_rd(xe, &remote->vmap, 0, u64);
149 check(retval, expected,
150 "vram -> remote bo first offset should be copied", test);
151 retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
152 check(retval, expected,
153 "vram -> remote bo last offset should be copied", test);
154 }
155 dma_fence_put(fence);
156
157 xe_bo_vunmap(remote);
158 out_unlock:
159 xe_bo_unlock(remote);
160 xe_bo_put(remote);
161 }
162
test_copy_sysmem(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test)163 static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
164 struct kunit *test)
165 {
166 test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
167 }
168
test_copy_vram(struct xe_migrate * m,struct xe_bo * bo,struct kunit * test)169 static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
170 struct kunit *test)
171 {
172 u32 region;
173
174 if (bo->ttm.resource->mem_type == XE_PL_SYSTEM)
175 return;
176
177 if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
178 region = XE_BO_FLAG_VRAM1;
179 else
180 region = XE_BO_FLAG_VRAM0;
181 test_copy(m, bo, test, region);
182 }
183
xe_migrate_sanity_test(struct xe_migrate * m,struct kunit * test)184 static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
185 {
186 struct xe_tile *tile = m->tile;
187 struct xe_device *xe = tile_to_xe(tile);
188 struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
189 struct xe_res_cursor src_it;
190 struct dma_fence *fence;
191 u64 retval, expected;
192 struct xe_bb *bb;
193 int err;
194 u8 id = tile->id;
195
196 err = xe_bo_vmap(bo);
197 if (err) {
198 KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
199 PTR_ERR(bo));
200 return;
201 }
202
203 big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
204 ttm_bo_type_kernel,
205 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
206 XE_BO_FLAG_PINNED);
207 if (IS_ERR(big)) {
208 KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
209 goto vunmap;
210 }
211
212 pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
213 ttm_bo_type_kernel,
214 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
215 XE_BO_FLAG_PINNED);
216 if (IS_ERR(pt)) {
217 KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
218 PTR_ERR(pt));
219 goto free_big;
220 }
221
222 tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
223 2 * SZ_4K,
224 ttm_bo_type_kernel,
225 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
226 XE_BO_FLAG_PINNED);
227 if (IS_ERR(tiny)) {
228 KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
229 PTR_ERR(tiny));
230 goto free_pt;
231 }
232
233 bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm);
234 if (IS_ERR(bb)) {
235 KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
236 PTR_ERR(bb));
237 goto free_tiny;
238 }
239
240 kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
241 (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
242 (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
243
244 /* First part of the test, are we updating our pagetable bo with a new entry? */
245 xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
246 0xdeaddeadbeefbeef);
247 expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
248 if (m->q->vm->flags & XE_VM_FLAG_64K)
249 expected |= XE_PTE_PS64;
250 if (xe_bo_is_vram(pt))
251 xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
252 else
253 xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
254
255 emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
256 &src_it, XE_PAGE_SIZE, pt->ttm.resource);
257
258 run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
259
260 retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
261 u64);
262 check(retval, expected, "PTE entry write", test);
263
264 /* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
265 bb->len = 0;
266 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
267 xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
268 expected = 0;
269
270 emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
271 IS_DGFX(xe));
272 run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
273 test);
274
275 retval = xe_map_rd(xe, &pt->vmap, 0, u32);
276 check(retval, expected, "Write to PT after adding PTE", test);
277
278 /* Sanity checks passed, try the full ones! */
279
280 /* Clear a small bo */
281 kunit_info(test, "Clearing small buffer object\n");
282 xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
283 expected = 0;
284 fence = xe_migrate_clear(m, tiny, tiny->ttm.resource,
285 XE_MIGRATE_CLEAR_FLAG_FULL);
286 if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
287 goto out;
288
289 dma_fence_put(fence);
290 retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
291 check(retval, expected, "Command clear small first value", test);
292 retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
293 check(retval, expected, "Command clear small last value", test);
294
295 kunit_info(test, "Copying small buffer object to system\n");
296 test_copy_sysmem(m, tiny, test);
297 if (xe->info.tile_count > 1) {
298 kunit_info(test, "Copying small buffer object to other vram\n");
299 test_copy_vram(m, tiny, test);
300 }
301
302 /* Clear a big bo */
303 kunit_info(test, "Clearing big buffer object\n");
304 xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
305 expected = 0;
306 fence = xe_migrate_clear(m, big, big->ttm.resource,
307 XE_MIGRATE_CLEAR_FLAG_FULL);
308 if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
309 goto out;
310
311 dma_fence_put(fence);
312 retval = xe_map_rd(xe, &big->vmap, 0, u32);
313 check(retval, expected, "Command clear big first value", test);
314 retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
315 check(retval, expected, "Command clear big last value", test);
316
317 kunit_info(test, "Copying big buffer object to system\n");
318 test_copy_sysmem(m, big, test);
319 if (xe->info.tile_count > 1) {
320 kunit_info(test, "Copying big buffer object to other vram\n");
321 test_copy_vram(m, big, test);
322 }
323
324 out:
325 xe_bb_free(bb, NULL);
326 free_tiny:
327 xe_bo_unpin(tiny);
328 xe_bo_put(tiny);
329 free_pt:
330 xe_bo_unpin(pt);
331 xe_bo_put(pt);
332 free_big:
333 xe_bo_unpin(big);
334 xe_bo_put(big);
335 vunmap:
336 xe_bo_vunmap(m->pt_bo);
337 }
338
migrate_test_run_device(struct xe_device * xe)339 static int migrate_test_run_device(struct xe_device *xe)
340 {
341 struct kunit *test = kunit_get_current_test();
342 struct xe_tile *tile;
343 int id;
344
345 xe_pm_runtime_get(xe);
346
347 for_each_tile(tile, xe, id) {
348 struct xe_migrate *m = tile->migrate;
349
350 kunit_info(test, "Testing tile id %d.\n", id);
351 xe_vm_lock(m->q->vm, false);
352 xe_migrate_sanity_test(m, test);
353 xe_vm_unlock(m->q->vm);
354 }
355
356 xe_pm_runtime_put(xe);
357
358 return 0;
359 }
360
xe_migrate_sanity_kunit(struct kunit * test)361 static void xe_migrate_sanity_kunit(struct kunit *test)
362 {
363 struct xe_device *xe = test->priv;
364
365 migrate_test_run_device(xe);
366 }
367
blt_copy(struct xe_tile * tile,struct xe_bo * src_bo,struct xe_bo * dst_bo,bool copy_only_ccs,const char * str,struct kunit * test)368 static struct dma_fence *blt_copy(struct xe_tile *tile,
369 struct xe_bo *src_bo, struct xe_bo *dst_bo,
370 bool copy_only_ccs, const char *str, struct kunit *test)
371 {
372 struct xe_gt *gt = tile->primary_gt;
373 struct xe_migrate *m = tile->migrate;
374 struct xe_device *xe = gt_to_xe(gt);
375 struct dma_fence *fence = NULL;
376 u64 size = src_bo->size;
377 struct xe_res_cursor src_it, dst_it;
378 struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
379 u64 src_L0_ofs, dst_L0_ofs;
380 u32 src_L0_pt, dst_L0_pt;
381 u64 src_L0, dst_L0;
382 int err;
383 bool src_is_vram = mem_type_is_vram(src->mem_type);
384 bool dst_is_vram = mem_type_is_vram(dst->mem_type);
385
386 if (!src_is_vram)
387 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
388 else
389 xe_res_first(src, 0, size, &src_it);
390
391 if (!dst_is_vram)
392 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
393 else
394 xe_res_first(dst, 0, size, &dst_it);
395
396 while (size) {
397 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
398 struct xe_sched_job *job;
399 struct xe_bb *bb;
400 u32 flush_flags = 0;
401 u32 update_idx;
402 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
403 u32 pte_flags;
404
405 src_L0 = xe_migrate_res_sizes(m, &src_it);
406 dst_L0 = xe_migrate_res_sizes(m, &dst_it);
407
408 src_L0 = min(src_L0, dst_L0);
409
410 pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
411 PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
412 batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
413 &src_L0_ofs, &src_L0_pt, 0, 0,
414 avail_pts);
415
416 pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
417 PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
418 batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
419 &dst_L0_ofs, &dst_L0_pt, 0,
420 avail_pts, avail_pts);
421
422 /* Add copy commands size here */
423 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
424 ((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0);
425
426 bb = xe_bb_new(gt, batch_size, xe->info.has_usm);
427 if (IS_ERR(bb)) {
428 err = PTR_ERR(bb);
429 goto err_sync;
430 }
431
432 if (src_is_vram)
433 xe_res_next(&src_it, src_L0);
434 else
435 emit_pte(m, bb, src_L0_pt, src_is_vram, false,
436 &src_it, src_L0, src);
437
438 if (dst_is_vram)
439 xe_res_next(&dst_it, src_L0);
440 else
441 emit_pte(m, bb, dst_L0_pt, dst_is_vram, false,
442 &dst_it, src_L0, dst);
443
444 bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
445 update_idx = bb->len;
446 if (!copy_only_ccs)
447 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
448
449 if (copy_only_ccs)
450 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
451 src_is_vram, dst_L0_ofs,
452 dst_is_vram, src_L0, dst_L0_ofs,
453 copy_only_ccs);
454
455 job = xe_bb_create_migration_job(m->q, bb,
456 xe_migrate_batch_base(m, xe->info.has_usm),
457 update_idx);
458 if (IS_ERR(job)) {
459 err = PTR_ERR(job);
460 goto err;
461 }
462
463 xe_sched_job_add_migrate_flush(job, flush_flags);
464
465 mutex_lock(&m->job_mutex);
466 xe_sched_job_arm(job);
467 dma_fence_put(fence);
468 fence = dma_fence_get(&job->drm.s_fence->finished);
469 xe_sched_job_push(job);
470
471 dma_fence_put(m->fence);
472 m->fence = dma_fence_get(fence);
473
474 mutex_unlock(&m->job_mutex);
475
476 xe_bb_free(bb, fence);
477 size -= src_L0;
478 continue;
479
480 err:
481 xe_bb_free(bb, NULL);
482
483 err_sync:
484 if (fence) {
485 dma_fence_wait(fence, false);
486 dma_fence_put(fence);
487 }
488 return ERR_PTR(err);
489 }
490
491 return fence;
492 }
493
test_migrate(struct xe_device * xe,struct xe_tile * tile,struct xe_bo * sys_bo,struct xe_bo * vram_bo,struct xe_bo * ccs_bo,struct kunit * test)494 static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
495 struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
496 struct kunit *test)
497 {
498 struct dma_fence *fence;
499 u64 expected, retval;
500 long timeout;
501 long ret;
502
503 expected = 0xd0d0d0d0d0d0d0d0;
504 xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
505
506 fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
507 if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
508 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
509 if (retval == expected)
510 KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
511 }
512 dma_fence_put(fence);
513
514 kunit_info(test, "Evict vram buffer object\n");
515 ret = xe_bo_evict(vram_bo, true);
516 if (ret) {
517 KUNIT_FAIL(test, "Failed to evict bo.\n");
518 return;
519 }
520
521 ret = xe_bo_vmap(vram_bo);
522 if (ret) {
523 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
524 return;
525 }
526
527 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
528 check(retval, expected, "Clear evicted vram data first value", test);
529 retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
530 check(retval, expected, "Clear evicted vram data last value", test);
531
532 fence = blt_copy(tile, vram_bo, ccs_bo,
533 true, "Blit surf copy from vram to sysmem", test);
534 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
535 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
536 check(retval, 0, "Clear ccs data first value", test);
537
538 retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
539 check(retval, 0, "Clear ccs data last value", test);
540 }
541 dma_fence_put(fence);
542
543 kunit_info(test, "Restore vram buffer object\n");
544 ret = xe_bo_validate(vram_bo, NULL, false);
545 if (ret) {
546 KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
547 return;
548 }
549
550 /* Sync all migration blits */
551 timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
552 DMA_RESV_USAGE_KERNEL,
553 true,
554 5 * HZ);
555 if (timeout <= 0) {
556 KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
557 return;
558 }
559
560 ret = xe_bo_vmap(vram_bo);
561 if (ret) {
562 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
563 return;
564 }
565
566 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
567 check(retval, expected, "Restored value must be equal to initial value", test);
568 retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
569 check(retval, expected, "Restored value must be equal to initial value", test);
570
571 fence = blt_copy(tile, vram_bo, ccs_bo,
572 true, "Blit surf copy from vram to sysmem", test);
573 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
574 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
575 check(retval, 0, "Clear ccs data first value", test);
576 retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
577 check(retval, 0, "Clear ccs data last value", test);
578 }
579 dma_fence_put(fence);
580 }
581
test_clear(struct xe_device * xe,struct xe_tile * tile,struct xe_bo * sys_bo,struct xe_bo * vram_bo,struct kunit * test)582 static void test_clear(struct xe_device *xe, struct xe_tile *tile,
583 struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test)
584 {
585 struct dma_fence *fence;
586 u64 expected, retval;
587
588 expected = 0xd0d0d0d0d0d0d0d0;
589 xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
590
591 fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
592 if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
593 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
594 if (retval == expected)
595 KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
596 }
597 dma_fence_put(fence);
598
599 fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test);
600 if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
601 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
602 check(retval, expected, "Decompressed value must be equal to initial value", test);
603 retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
604 check(retval, expected, "Decompressed value must be equal to initial value", test);
605 }
606 dma_fence_put(fence);
607
608 kunit_info(test, "Clear vram buffer object\n");
609 expected = 0x0000000000000000;
610 fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource,
611 XE_MIGRATE_CLEAR_FLAG_FULL);
612 if (sanity_fence_failed(xe, fence, "Clear vram_bo", test))
613 return;
614 dma_fence_put(fence);
615
616 fence = blt_copy(tile, vram_bo, sys_bo,
617 false, "Blit copy from vram to sysmem", test);
618 if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
619 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
620 check(retval, expected, "Clear main buffer first value", test);
621 retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
622 check(retval, expected, "Clear main buffer last value", test);
623 }
624 dma_fence_put(fence);
625
626 fence = blt_copy(tile, vram_bo, sys_bo,
627 true, "Blit surf copy from vram to sysmem", test);
628 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
629 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
630 check(retval, expected, "Clear ccs data first value", test);
631 retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
632 check(retval, expected, "Clear ccs data last value", test);
633 }
634 dma_fence_put(fence);
635 }
636
validate_ccs_test_run_tile(struct xe_device * xe,struct xe_tile * tile,struct kunit * test)637 static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
638 struct kunit *test)
639 {
640 struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
641 unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
642 long ret;
643
644 sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
645 DRM_XE_GEM_CPU_CACHING_WC,
646 XE_BO_FLAG_SYSTEM |
647 XE_BO_FLAG_NEEDS_CPU_ACCESS |
648 XE_BO_FLAG_PINNED);
649
650 if (IS_ERR(sys_bo)) {
651 KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
652 PTR_ERR(sys_bo));
653 return;
654 }
655
656 xe_bo_lock(sys_bo, false);
657 ret = xe_bo_validate(sys_bo, NULL, false);
658 if (ret) {
659 KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
660 goto free_sysbo;
661 }
662
663 ret = xe_bo_vmap(sys_bo);
664 if (ret) {
665 KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
666 goto free_sysbo;
667 }
668 xe_bo_unlock(sys_bo);
669
670 ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
671 DRM_XE_GEM_CPU_CACHING_WC,
672 bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
673 XE_BO_FLAG_PINNED);
674
675 if (IS_ERR(ccs_bo)) {
676 KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
677 PTR_ERR(ccs_bo));
678 return;
679 }
680
681 xe_bo_lock(ccs_bo, false);
682 ret = xe_bo_validate(ccs_bo, NULL, false);
683 if (ret) {
684 KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
685 goto free_ccsbo;
686 }
687
688 ret = xe_bo_vmap(ccs_bo);
689 if (ret) {
690 KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
691 goto free_ccsbo;
692 }
693 xe_bo_unlock(ccs_bo);
694
695 vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
696 DRM_XE_GEM_CPU_CACHING_WC,
697 bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
698 XE_BO_FLAG_PINNED);
699 if (IS_ERR(vram_bo)) {
700 KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
701 PTR_ERR(vram_bo));
702 return;
703 }
704
705 xe_bo_lock(vram_bo, false);
706 ret = xe_bo_validate(vram_bo, NULL, false);
707 if (ret) {
708 KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
709 goto free_vrambo;
710 }
711
712 ret = xe_bo_vmap(vram_bo);
713 if (ret) {
714 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
715 goto free_vrambo;
716 }
717
718 test_clear(xe, tile, sys_bo, vram_bo, test);
719 test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
720 xe_bo_unlock(vram_bo);
721
722 xe_bo_lock(vram_bo, false);
723 xe_bo_vunmap(vram_bo);
724 xe_bo_unlock(vram_bo);
725
726 xe_bo_lock(ccs_bo, false);
727 xe_bo_vunmap(ccs_bo);
728 xe_bo_unlock(ccs_bo);
729
730 xe_bo_lock(sys_bo, false);
731 xe_bo_vunmap(sys_bo);
732 xe_bo_unlock(sys_bo);
733 free_vrambo:
734 xe_bo_put(vram_bo);
735 free_ccsbo:
736 xe_bo_put(ccs_bo);
737 free_sysbo:
738 xe_bo_put(sys_bo);
739 }
740
validate_ccs_test_run_device(struct xe_device * xe)741 static int validate_ccs_test_run_device(struct xe_device *xe)
742 {
743 struct kunit *test = kunit_get_current_test();
744 struct xe_tile *tile;
745 int id;
746
747 if (!xe_device_has_flat_ccs(xe)) {
748 kunit_skip(test, "non-flat-ccs device\n");
749 return 0;
750 }
751
752 if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) {
753 kunit_skip(test, "non-xe2 discrete device\n");
754 return 0;
755 }
756
757 xe_pm_runtime_get(xe);
758
759 for_each_tile(tile, xe, id)
760 validate_ccs_test_run_tile(xe, tile, test);
761
762 xe_pm_runtime_put(xe);
763
764 return 0;
765 }
766
xe_validate_ccs_kunit(struct kunit * test)767 static void xe_validate_ccs_kunit(struct kunit *test)
768 {
769 struct xe_device *xe = test->priv;
770
771 validate_ccs_test_run_device(xe);
772 }
773
774 static struct kunit_case xe_migrate_tests[] = {
775 KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param),
776 KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param),
777 {}
778 };
779
780 VISIBLE_IF_KUNIT
781 struct kunit_suite xe_migrate_test_suite = {
782 .name = "xe_migrate",
783 .test_cases = xe_migrate_tests,
784 .init = xe_kunit_helper_xe_device_live_test_init,
785 };
786 EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite);
787