xref: /linux/drivers/gpu/drm/xe/tests/xe_bo.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <kunit/test.h>
7 #include <kunit/visibility.h>
8 
9 #include <linux/iosys-map.h>
10 #include <linux/math64.h>
11 #include <linux/prandom.h>
12 #include <linux/swap.h>
13 
14 #include <uapi/linux/sysinfo.h>
15 
16 #include "tests/xe_kunit_helpers.h"
17 #include "tests/xe_pci_test.h"
18 #include "tests/xe_test.h"
19 
20 #include "xe_bo_evict.h"
21 #include "xe_gt.h"
22 #include "xe_pci.h"
23 #include "xe_pm.h"
24 
ccs_test_migrate(struct xe_tile * tile,struct xe_bo * bo,bool clear,u64 get_val,u64 assign_val,struct kunit * test,struct drm_exec * exec)25 static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
26 			    bool clear, u64 get_val, u64 assign_val,
27 			    struct kunit *test, struct drm_exec *exec)
28 {
29 	struct dma_fence *fence;
30 	struct ttm_tt *ttm;
31 	struct page *page;
32 	pgoff_t ccs_page;
33 	long timeout;
34 	u64 *cpu_map;
35 	int ret;
36 	u32 offset;
37 
38 	/* Move bo to VRAM if not already there. */
39 	ret = xe_bo_validate(bo, NULL, false, exec);
40 	if (ret) {
41 		KUNIT_FAIL(test, "Failed to validate bo.\n");
42 		return ret;
43 	}
44 
45 	/* Optionally clear bo *and* CCS data in VRAM. */
46 	if (clear) {
47 		fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource,
48 					 XE_MIGRATE_CLEAR_FLAG_FULL);
49 		if (IS_ERR(fence)) {
50 			KUNIT_FAIL(test, "Failed to submit bo clear.\n");
51 			return PTR_ERR(fence);
52 		}
53 
54 		if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) {
55 			dma_fence_put(fence);
56 			KUNIT_FAIL(test, "Timeout while clearing bo.\n");
57 			return  -ETIME;
58 		}
59 
60 		dma_fence_put(fence);
61 	}
62 
63 	/* Evict to system. CCS data should be copied. */
64 	ret = xe_bo_evict(bo, exec);
65 	if (ret) {
66 		KUNIT_FAIL(test, "Failed to evict bo.\n");
67 		return ret;
68 	}
69 
70 	/* Sync all migration blits */
71 	timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
72 					DMA_RESV_USAGE_KERNEL,
73 					true,
74 					5 * HZ);
75 	if (timeout <= 0) {
76 		KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
77 		return -ETIME;
78 	}
79 
80 	/*
81 	 * Bo with CCS data is now in system memory. Verify backing store
82 	 * and data integrity. Then assign for the next testing round while
83 	 * we still have a CPU map.
84 	 */
85 	ttm = bo->ttm.ttm;
86 	if (!ttm || !ttm_tt_is_populated(ttm)) {
87 		KUNIT_FAIL(test, "Bo was not in expected placement.\n");
88 		return -EINVAL;
89 	}
90 
91 	ccs_page = xe_bo_ccs_pages_start(bo) >> PAGE_SHIFT;
92 	if (ccs_page >= ttm->num_pages) {
93 		KUNIT_FAIL(test, "No TTM CCS pages present.\n");
94 		return -EINVAL;
95 	}
96 
97 	page = ttm->pages[ccs_page];
98 	cpu_map = kmap_local_page(page);
99 
100 	/* Check first CCS value */
101 	if (cpu_map[0] != get_val) {
102 		KUNIT_FAIL(test,
103 			   "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
104 			   (unsigned long long)get_val,
105 			   (unsigned long long)cpu_map[0]);
106 		ret = -EINVAL;
107 	}
108 
109 	/* Check last CCS value, or at least last value in page. */
110 	offset = xe_device_ccs_bytes(tile_to_xe(tile), xe_bo_size(bo));
111 	offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
112 	if (cpu_map[offset] != get_val) {
113 		KUNIT_FAIL(test,
114 			   "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
115 			   (unsigned long long)get_val,
116 			   (unsigned long long)cpu_map[offset]);
117 		ret = -EINVAL;
118 	}
119 
120 	cpu_map[0] = assign_val;
121 	cpu_map[offset] = assign_val;
122 	kunmap_local(cpu_map);
123 
124 	return ret;
125 }
126 
ccs_test_run_tile(struct xe_device * xe,struct xe_tile * tile,struct kunit * test)127 static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
128 			      struct kunit *test)
129 {
130 	struct xe_bo *bo;
131 
132 	int ret;
133 
134 	/* TODO: Sanity check */
135 	unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
136 	struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
137 
138 	if (IS_DGFX(xe))
139 		kunit_info(test, "Testing vram id %u\n", tile->id);
140 	else
141 		kunit_info(test, "Testing system memory\n");
142 
143 	bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
144 			       bo_flags, exec);
145 	if (IS_ERR(bo)) {
146 		KUNIT_FAIL(test, "Failed to create bo.\n");
147 		return;
148 	}
149 
150 	xe_bo_lock(bo, false);
151 
152 	kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
153 	ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
154 			       test, exec);
155 	if (ret)
156 		goto out_unlock;
157 
158 	kunit_info(test, "Verifying that CCS data survives migration.\n");
159 	ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL,
160 			       0xdeadbeefdeadbeefULL, test, exec);
161 	if (ret)
162 		goto out_unlock;
163 
164 	kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
165 	ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test, exec);
166 
167 out_unlock:
168 	xe_bo_unlock(bo);
169 	xe_bo_put(bo);
170 }
171 
ccs_test_run_device(struct xe_device * xe)172 static int ccs_test_run_device(struct xe_device *xe)
173 {
174 	struct kunit *test = kunit_get_current_test();
175 	struct xe_tile *tile;
176 	int id;
177 
178 	if (!xe_device_has_flat_ccs(xe)) {
179 		kunit_skip(test, "non-flat-ccs device\n");
180 		return 0;
181 	}
182 
183 	/* For xe2+ dgfx, we don't handle ccs metadata */
184 	if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) {
185 		kunit_skip(test, "xe2+ dgfx device\n");
186 		return 0;
187 	}
188 
189 	guard(xe_pm_runtime)(xe);
190 	for_each_tile(tile, xe, id) {
191 		/* For igfx run only for primary tile */
192 		if (!IS_DGFX(xe) && id > 0)
193 			continue;
194 		ccs_test_run_tile(xe, tile, test);
195 	}
196 
197 	return 0;
198 }
199 
xe_ccs_migrate_kunit(struct kunit * test)200 static void xe_ccs_migrate_kunit(struct kunit *test)
201 {
202 	struct xe_device *xe = test->priv;
203 
204 	ccs_test_run_device(xe);
205 }
206 
evict_test_run_tile(struct xe_device * xe,struct xe_tile * tile,struct kunit * test)207 static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
208 {
209 	struct xe_bo *bo, *external;
210 	unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
211 	struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
212 	struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
213 	struct xe_gt *__gt;
214 	int err, i, id;
215 
216 	kunit_info(test, "Testing device %s vram id %u\n",
217 		   dev_name(xe->drm.dev), tile->id);
218 
219 	for (i = 0; i < 2; ++i) {
220 		xe_vm_lock(vm, false);
221 		bo = xe_bo_create_user(xe, vm, 0x10000,
222 				       DRM_XE_GEM_CPU_CACHING_WC,
223 				       bo_flags, exec);
224 		xe_vm_unlock(vm);
225 		if (IS_ERR(bo)) {
226 			KUNIT_FAIL(test, "bo create err=%pe\n", bo);
227 			break;
228 		}
229 
230 		external = xe_bo_create_user(xe, NULL, 0x10000,
231 					     DRM_XE_GEM_CPU_CACHING_WC,
232 					     bo_flags, NULL);
233 		if (IS_ERR(external)) {
234 			KUNIT_FAIL(test, "external bo create err=%pe\n", external);
235 			goto cleanup_bo;
236 		}
237 
238 		xe_bo_lock(external, false);
239 		err = xe_bo_pin_external(external, false, exec);
240 		xe_bo_unlock(external);
241 		if (err) {
242 			KUNIT_FAIL(test, "external bo pin err=%pe\n",
243 				   ERR_PTR(err));
244 			goto cleanup_external;
245 		}
246 
247 		err = xe_bo_evict_all(xe);
248 		if (err) {
249 			KUNIT_FAIL(test, "evict err=%pe\n", ERR_PTR(err));
250 			goto cleanup_all;
251 		}
252 
253 		for_each_gt(__gt, xe, id)
254 			xe_gt_sanitize(__gt);
255 		err = xe_bo_restore_early(xe);
256 		/*
257 		 * Snapshotting the CTB and copying back a potentially old
258 		 * version seems risky, depending on what might have been
259 		 * inflight. Also it seems snapshotting the ADS object and
260 		 * copying back results in serious breakage. Normally when
261 		 * calling xe_bo_restore_kernel() we always fully restart the
262 		 * GT, which re-intializes such things.  We could potentially
263 		 * skip saving and restoring such objects in xe_bo_evict_all()
264 		 * however seems quite fragile not to also restart the GT. Try
265 		 * to do that here by triggering a GT reset.
266 		 */
267 		for_each_gt(__gt, xe, id)
268 			xe_gt_reset(__gt);
269 
270 		if (err) {
271 			KUNIT_FAIL(test, "restore kernel err=%pe\n",
272 				   ERR_PTR(err));
273 			goto cleanup_all;
274 		}
275 
276 		err = xe_bo_restore_late(xe);
277 		if (err) {
278 			KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
279 			goto cleanup_all;
280 		}
281 
282 		if (!xe_bo_is_vram(external)) {
283 			KUNIT_FAIL(test, "external bo is not vram\n");
284 			err = -EPROTO;
285 			goto cleanup_all;
286 		}
287 
288 		if (xe_bo_is_vram(bo)) {
289 			KUNIT_FAIL(test, "bo is vram\n");
290 			err = -EPROTO;
291 			goto cleanup_all;
292 		}
293 
294 		if (i) {
295 			down_read(&vm->lock);
296 			xe_vm_lock(vm, false);
297 			err = xe_bo_validate(bo, bo->vm, false, exec);
298 			xe_vm_unlock(vm);
299 			up_read(&vm->lock);
300 			if (err) {
301 				KUNIT_FAIL(test, "bo valid err=%pe\n",
302 					   ERR_PTR(err));
303 				goto cleanup_all;
304 			}
305 			xe_bo_lock(external, false);
306 			err = xe_bo_validate(external, NULL, false, exec);
307 			xe_bo_unlock(external);
308 			if (err) {
309 				KUNIT_FAIL(test, "external bo valid err=%pe\n",
310 					   ERR_PTR(err));
311 				goto cleanup_all;
312 			}
313 		}
314 
315 		xe_bo_lock(external, false);
316 		xe_bo_unpin_external(external);
317 		xe_bo_unlock(external);
318 
319 		xe_bo_put(external);
320 
321 		xe_bo_lock(bo, false);
322 		__xe_bo_unset_bulk_move(bo);
323 		xe_bo_unlock(bo);
324 		xe_bo_put(bo);
325 		continue;
326 
327 cleanup_all:
328 		xe_bo_lock(external, false);
329 		xe_bo_unpin_external(external);
330 		xe_bo_unlock(external);
331 cleanup_external:
332 		xe_bo_put(external);
333 cleanup_bo:
334 		xe_bo_lock(bo, false);
335 		__xe_bo_unset_bulk_move(bo);
336 		xe_bo_unlock(bo);
337 		xe_bo_put(bo);
338 		break;
339 	}
340 
341 	xe_vm_put(vm);
342 
343 	return 0;
344 }
345 
evict_test_run_device(struct xe_device * xe)346 static int evict_test_run_device(struct xe_device *xe)
347 {
348 	struct kunit *test = kunit_get_current_test();
349 	struct xe_tile *tile;
350 	int id;
351 
352 	if (!IS_DGFX(xe)) {
353 		kunit_skip(test, "non-discrete device\n");
354 		return 0;
355 	}
356 
357 	guard(xe_pm_runtime)(xe);
358 	for_each_tile(tile, xe, id)
359 		evict_test_run_tile(xe, tile, test);
360 
361 	return 0;
362 }
363 
xe_bo_evict_kunit(struct kunit * test)364 static void xe_bo_evict_kunit(struct kunit *test)
365 {
366 	struct xe_device *xe = test->priv;
367 
368 	evict_test_run_device(xe);
369 }
370 
371 struct xe_bo_link {
372 	struct list_head link;
373 	struct xe_bo *bo;
374 	u32 val;
375 };
376 
377 #define XE_BO_SHRINK_SIZE ((unsigned long)SZ_64M)
378 
shrink_test_fill_random(struct xe_bo * bo,struct rnd_state * state,struct xe_bo_link * link)379 static int shrink_test_fill_random(struct xe_bo *bo, struct rnd_state *state,
380 				   struct xe_bo_link *link)
381 {
382 	struct iosys_map map;
383 	int ret = ttm_bo_vmap(&bo->ttm, &map);
384 	size_t __maybe_unused i;
385 
386 	if (ret)
387 		return ret;
388 
389 	for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
390 		u32 val = prandom_u32_state(state);
391 
392 		iosys_map_wr(&map, i, u32, val);
393 		if (i == 0)
394 			link->val = val;
395 	}
396 
397 	ttm_bo_vunmap(&bo->ttm, &map);
398 	return 0;
399 }
400 
shrink_test_verify(struct kunit * test,struct xe_bo * bo,unsigned int bo_nr,struct rnd_state * state,struct xe_bo_link * link)401 static bool shrink_test_verify(struct kunit *test, struct xe_bo *bo,
402 			       unsigned int bo_nr, struct rnd_state *state,
403 			       struct xe_bo_link *link)
404 {
405 	struct iosys_map map;
406 	int ret = ttm_bo_vmap(&bo->ttm, &map);
407 	size_t i;
408 	bool failed = false;
409 
410 	if (ret) {
411 		KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr);
412 		return true;
413 	}
414 
415 	for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
416 		u32 val = prandom_u32_state(state);
417 
418 		if (iosys_map_rd(&map, i, u32) != val) {
419 			KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx",
420 				   bo_nr, (unsigned long long)i);
421 			kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n",
422 				   (unsigned int)iosys_map_rd(&map, i, u32), val);
423 			if (i == 0 && val != link->val)
424 				kunit_info(test, "Looks like PRNG is out of sync.\n");
425 			failed = true;
426 			break;
427 		}
428 	}
429 
430 	ttm_bo_vunmap(&bo->ttm, &map);
431 
432 	return failed;
433 }
434 
435 /*
436  * Try to create system bos corresponding to twice the amount
437  * of available system memory to test shrinker functionality.
438  * If no swap space is available to accommodate the
439  * memory overcommit, mark bos purgeable.
440  */
shrink_test_run_device(struct xe_device * xe)441 static int shrink_test_run_device(struct xe_device *xe)
442 {
443 	struct kunit *test = kunit_get_current_test();
444 	LIST_HEAD(bos);
445 	struct xe_bo_link *link, *next;
446 	struct sysinfo si;
447 	u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit;
448 	unsigned int interrupted = 0, successful = 0, count = 0;
449 	struct rnd_state prng;
450 	u64 rand_seed;
451 	bool failed = false;
452 
453 	rand_seed = get_random_u64();
454 	prandom_seed_state(&prng, rand_seed);
455 	kunit_info(test, "Random seed is 0x%016llx.\n",
456 		   (unsigned long long)rand_seed);
457 
458 	/* Skip if execution time is expected to be too long. */
459 
460 	limit = SZ_32G;
461 	/* IGFX with flat CCS needs to copy when swapping / shrinking */
462 	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
463 		limit = SZ_16G;
464 
465 	si_meminfo(&si);
466 	ram = (size_t)si.freeram * si.mem_unit;
467 	if (ram > limit) {
468 		kunit_skip(test, "Too long expected execution time.\n");
469 		return 0;
470 	}
471 	to_alloc = ram * 2;
472 
473 	ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE;
474 	if (to_alloc > ram_and_swap)
475 		purgeable = to_alloc - ram_and_swap;
476 	purgeable += div64_u64(purgeable, 5);
477 
478 	kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n",
479 		   (unsigned long)ram);
480 	for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) {
481 		struct xe_bo *bo;
482 		unsigned int mem_type;
483 		struct xe_ttm_tt *xe_tt;
484 
485 		link = kzalloc_obj(*link);
486 		if (!link) {
487 			KUNIT_FAIL(test, "Unexpected link allocation failure\n");
488 			failed = true;
489 			break;
490 		}
491 
492 		INIT_LIST_HEAD(&link->link);
493 
494 		/* We can create bos using WC caching here. But it is slower. */
495 		bo = xe_bo_create_user(xe, NULL, XE_BO_SHRINK_SIZE,
496 				       DRM_XE_GEM_CPU_CACHING_WB,
497 				       XE_BO_FLAG_SYSTEM, NULL);
498 		if (IS_ERR(bo)) {
499 			if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
500 			    bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
501 				KUNIT_FAIL(test, "Error creating bo: %pe\n", bo);
502 			kfree(link);
503 			failed = true;
504 			break;
505 		}
506 		xe_bo_lock(bo, false);
507 		xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
508 
509 		/*
510 		 * Allocate purgeable bos first, because if we do it the
511 		 * other way around, they may not be subject to swapping...
512 		 */
513 		if (alloced < purgeable) {
514 			xe_ttm_tt_account_subtract(xe, &xe_tt->ttm);
515 			xe_tt->purgeable = true;
516 			xe_ttm_tt_account_add(xe, &xe_tt->ttm);
517 			bo->ttm.priority = 0;
518 			spin_lock(&bo->ttm.bdev->lru_lock);
519 			ttm_bo_move_to_lru_tail(&bo->ttm);
520 			spin_unlock(&bo->ttm.bdev->lru_lock);
521 		} else {
522 			int ret = shrink_test_fill_random(bo, &prng, link);
523 
524 			if (ret) {
525 				xe_bo_unlock(bo);
526 				xe_bo_put(bo);
527 				KUNIT_FAIL(test, "Error filling bo with random data: %pe\n",
528 					   ERR_PTR(ret));
529 				kfree(link);
530 				failed = true;
531 				break;
532 			}
533 		}
534 
535 		mem_type = bo->ttm.resource->mem_type;
536 		xe_bo_unlock(bo);
537 		link->bo = bo;
538 		list_add_tail(&link->link, &bos);
539 
540 		if (mem_type != XE_PL_TT) {
541 			KUNIT_FAIL(test, "Bo in incorrect memory type: %u\n",
542 				   bo->ttm.resource->mem_type);
543 			failed = true;
544 		}
545 		cond_resched();
546 		if (signal_pending(current))
547 			break;
548 	}
549 
550 	/*
551 	 * Read back and destroy bos. Reset the pseudo-random seed to get an
552 	 * identical pseudo-random number sequence for readback.
553 	 */
554 	prandom_seed_state(&prng, rand_seed);
555 	list_for_each_entry_safe(link, next, &bos, link) {
556 		static struct ttm_operation_ctx ctx = {.interruptible = true};
557 		struct xe_bo *bo = link->bo;
558 		struct xe_ttm_tt *xe_tt;
559 		int ret;
560 
561 		count++;
562 		if (!signal_pending(current) && !failed) {
563 			bool purgeable, intr = false;
564 
565 			xe_bo_lock(bo, NULL);
566 
567 			/* xe_tt->purgeable is cleared on validate. */
568 			xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
569 			purgeable = xe_tt->purgeable;
570 			do {
571 				ret = ttm_bo_validate(&bo->ttm, &tt_placement, &ctx);
572 				if (ret == -EINTR)
573 					intr = true;
574 			} while (ret == -EINTR && !signal_pending(current));
575 			if (!ret && !purgeable)
576 				failed = shrink_test_verify(test, bo, count, &prng, link);
577 
578 			xe_bo_unlock(bo);
579 			if (ret) {
580 				KUNIT_FAIL(test, "Validation failed: %pe\n",
581 					   ERR_PTR(ret));
582 				failed = true;
583 			} else if (intr) {
584 				interrupted++;
585 			} else {
586 				successful++;
587 			}
588 		}
589 		xe_bo_put(link->bo);
590 		list_del(&link->link);
591 		kfree(link);
592 	}
593 	kunit_info(test, "Readbacks interrupted: %u successful: %u\n",
594 		   interrupted, successful);
595 
596 	return 0;
597 }
598 
xe_bo_shrink_kunit(struct kunit * test)599 static void xe_bo_shrink_kunit(struct kunit *test)
600 {
601 	struct xe_device *xe = test->priv;
602 
603 	shrink_test_run_device(xe);
604 }
605 
606 static struct kunit_case xe_bo_tests[] = {
607 	KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
608 	KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
609 	{}
610 };
611 
612 VISIBLE_IF_KUNIT
613 struct kunit_suite xe_bo_test_suite = {
614 	.name = "xe_bo",
615 	.test_cases = xe_bo_tests,
616 	.init = xe_kunit_helper_xe_device_live_test_init,
617 };
618 EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
619 
620 static struct kunit_case xe_bo_shrink_test[] = {
621 	KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
622 			      {.speed = KUNIT_SPEED_SLOW}),
623 	{}
624 };
625 
626 VISIBLE_IF_KUNIT
627 struct kunit_suite xe_bo_shrink_test_suite = {
628 	.name = "xe_bo_shrink",
629 	.test_cases = xe_bo_shrink_test,
630 	.init = xe_kunit_helper_xe_device_live_test_init,
631 };
632 EXPORT_SYMBOL_IF_KUNIT(xe_bo_shrink_test_suite);
633