xref: /linux/drivers/gpu/drm/xe/tests/xe_migrate.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020-2022 Intel Corporation
4  */
5 
6 #include <kunit/test.h>
7 #include <kunit/visibility.h>
8 
9 #include "tests/xe_migrate_test.h"
10 #include "tests/xe_pci_test.h"
11 
12 #include "xe_pci.h"
13 #include "xe_pm.h"
14 
15 static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
16 				const char *str, struct kunit *test)
17 {
18 	long ret;
19 
20 	if (IS_ERR(fence)) {
21 		KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
22 			   PTR_ERR(fence));
23 		return true;
24 	}
25 	if (!fence)
26 		return true;
27 
28 	ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
29 	if (ret <= 0) {
30 		KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
31 		return true;
32 	}
33 
34 	return false;
35 }
36 
37 static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
38 			  struct xe_bb *bb, u32 second_idx, const char *str,
39 			  struct kunit *test)
40 {
41 	u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm);
42 	struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
43 							      batch_base,
44 							      second_idx);
45 	struct dma_fence *fence;
46 
47 	if (IS_ERR(job)) {
48 		KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
49 			   PTR_ERR(job));
50 		return PTR_ERR(job);
51 	}
52 
53 	xe_sched_job_arm(job);
54 	fence = dma_fence_get(&job->drm.s_fence->finished);
55 	xe_sched_job_push(job);
56 
57 	if (sanity_fence_failed(xe, fence, str, test))
58 		return -ETIMEDOUT;
59 
60 	dma_fence_put(fence);
61 	kunit_info(test, "%s: Job completed\n", str);
62 	return 0;
63 }
64 
65 #define check(_retval, _expected, str, _test)				\
66 	do { if ((_retval) != (_expected)) {				\
67 			KUNIT_FAIL(_test, "Sanity check failed: " str	\
68 				   " expected %llx, got %llx\n",	\
69 				   (u64)(_expected), (u64)(_retval));	\
70 		} } while (0)
71 
72 static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
73 		      struct kunit *test, u32 region)
74 {
75 	struct xe_device *xe = tile_to_xe(m->tile);
76 	u64 retval, expected = 0;
77 	bool big = bo->size >= SZ_2M;
78 	struct dma_fence *fence;
79 	const char *str = big ? "Copying big bo" : "Copying small bo";
80 	int err;
81 
82 	struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
83 						   bo->size,
84 						   ttm_bo_type_kernel,
85 						   region |
86 						   XE_BO_FLAG_NEEDS_CPU_ACCESS);
87 	if (IS_ERR(remote)) {
88 		KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
89 			   str, remote);
90 		return;
91 	}
92 
93 	err = xe_bo_validate(remote, NULL, false);
94 	if (err) {
95 		KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
96 			   str, err);
97 		goto out_unlock;
98 	}
99 
100 	err = xe_bo_vmap(remote);
101 	if (err) {
102 		KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
103 			   str, err);
104 		goto out_unlock;
105 	}
106 
107 	xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
108 	fence = xe_migrate_clear(m, remote, remote->ttm.resource);
109 	if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
110 				 "Clearing remote small bo", test)) {
111 		retval = xe_map_rd(xe, &remote->vmap, 0, u64);
112 		check(retval, expected, "remote first offset should be cleared",
113 		      test);
114 		retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
115 		check(retval, expected, "remote last offset should be cleared",
116 		      test);
117 	}
118 	dma_fence_put(fence);
119 
120 	/* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
121 	xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
122 	xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
123 
124 	expected = 0xc0c0c0c0c0c0c0c0;
125 	fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
126 				bo->ttm.resource, false);
127 	if (!sanity_fence_failed(xe, fence, big ? "Copying big bo remote -> vram" :
128 				 "Copying small bo remote -> vram", test)) {
129 		retval = xe_map_rd(xe, &bo->vmap, 0, u64);
130 		check(retval, expected,
131 		      "remote -> vram bo first offset should be copied", test);
132 		retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
133 		check(retval, expected,
134 		      "remote -> vram bo offset should be copied", test);
135 	}
136 	dma_fence_put(fence);
137 
138 	/* And other way around.. slightly hacky.. */
139 	xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
140 	xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
141 
142 	fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
143 				remote->ttm.resource, false);
144 	if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> remote" :
145 				 "Copying small bo vram -> remote", test)) {
146 		retval = xe_map_rd(xe, &remote->vmap, 0, u64);
147 		check(retval, expected,
148 		      "vram -> remote bo first offset should be copied", test);
149 		retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
150 		check(retval, expected,
151 		      "vram -> remote bo last offset should be copied", test);
152 	}
153 	dma_fence_put(fence);
154 
155 	xe_bo_vunmap(remote);
156 out_unlock:
157 	xe_bo_unlock(remote);
158 	xe_bo_put(remote);
159 }
160 
161 static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
162 			     struct kunit *test)
163 {
164 	test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
165 }
166 
167 static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
168 			   struct kunit *test)
169 {
170 	u32 region;
171 
172 	if (bo->ttm.resource->mem_type == XE_PL_SYSTEM)
173 		return;
174 
175 	if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
176 		region = XE_BO_FLAG_VRAM1;
177 	else
178 		region = XE_BO_FLAG_VRAM0;
179 	test_copy(m, bo, test, region);
180 }
181 
182 static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
183 {
184 	struct xe_tile *tile = m->tile;
185 	struct xe_device *xe = tile_to_xe(tile);
186 	struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
187 	struct xe_res_cursor src_it;
188 	struct dma_fence *fence;
189 	u64 retval, expected;
190 	struct xe_bb *bb;
191 	int err;
192 	u8 id = tile->id;
193 
194 	err = xe_bo_vmap(bo);
195 	if (err) {
196 		KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
197 			   PTR_ERR(bo));
198 		return;
199 	}
200 
201 	big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
202 				   ttm_bo_type_kernel,
203 				   XE_BO_FLAG_VRAM_IF_DGFX(tile) |
204 				   XE_BO_FLAG_PINNED);
205 	if (IS_ERR(big)) {
206 		KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
207 		goto vunmap;
208 	}
209 
210 	pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
211 				  ttm_bo_type_kernel,
212 				  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
213 				  XE_BO_FLAG_PINNED);
214 	if (IS_ERR(pt)) {
215 		KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
216 			   PTR_ERR(pt));
217 		goto free_big;
218 	}
219 
220 	tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
221 				    2 * SZ_4K,
222 				    ttm_bo_type_kernel,
223 				    XE_BO_FLAG_VRAM_IF_DGFX(tile) |
224 				    XE_BO_FLAG_PINNED);
225 	if (IS_ERR(tiny)) {
226 		KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
227 			   PTR_ERR(pt));
228 		goto free_pt;
229 	}
230 
231 	bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm);
232 	if (IS_ERR(bb)) {
233 		KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
234 			   PTR_ERR(bb));
235 		goto free_tiny;
236 	}
237 
238 	kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
239 		   (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
240 		   (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
241 
242 	/* First part of the test, are we updating our pagetable bo with a new entry? */
243 	xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
244 		  0xdeaddeadbeefbeef);
245 	expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
246 	if (m->q->vm->flags & XE_VM_FLAG_64K)
247 		expected |= XE_PTE_PS64;
248 	if (xe_bo_is_vram(pt))
249 		xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
250 	else
251 		xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
252 
253 	emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
254 		 &src_it, XE_PAGE_SIZE, pt->ttm.resource);
255 
256 	run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
257 
258 	retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
259 			   u64);
260 	check(retval, expected, "PTE entry write", test);
261 
262 	/* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
263 	bb->len = 0;
264 	bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
265 	xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
266 	expected = 0;
267 
268 	emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
269 		   IS_DGFX(xe));
270 	run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
271 		       test);
272 
273 	retval = xe_map_rd(xe, &pt->vmap, 0, u32);
274 	check(retval, expected, "Write to PT after adding PTE", test);
275 
276 	/* Sanity checks passed, try the full ones! */
277 
278 	/* Clear a small bo */
279 	kunit_info(test, "Clearing small buffer object\n");
280 	xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
281 	expected = 0;
282 	fence = xe_migrate_clear(m, tiny, tiny->ttm.resource);
283 	if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
284 		goto out;
285 
286 	dma_fence_put(fence);
287 	retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
288 	check(retval, expected, "Command clear small first value", test);
289 	retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
290 	check(retval, expected, "Command clear small last value", test);
291 
292 	kunit_info(test, "Copying small buffer object to system\n");
293 	test_copy_sysmem(m, tiny, test);
294 	if (xe->info.tile_count > 1) {
295 		kunit_info(test, "Copying small buffer object to other vram\n");
296 		test_copy_vram(m, tiny, test);
297 	}
298 
299 	/* Clear a big bo */
300 	kunit_info(test, "Clearing big buffer object\n");
301 	xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
302 	expected = 0;
303 	fence = xe_migrate_clear(m, big, big->ttm.resource);
304 	if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
305 		goto out;
306 
307 	dma_fence_put(fence);
308 	retval = xe_map_rd(xe, &big->vmap, 0, u32);
309 	check(retval, expected, "Command clear big first value", test);
310 	retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
311 	check(retval, expected, "Command clear big last value", test);
312 
313 	kunit_info(test, "Copying big buffer object to system\n");
314 	test_copy_sysmem(m, big, test);
315 	if (xe->info.tile_count > 1) {
316 		kunit_info(test, "Copying big buffer object to other vram\n");
317 		test_copy_vram(m, big, test);
318 	}
319 
320 out:
321 	xe_bb_free(bb, NULL);
322 free_tiny:
323 	xe_bo_unpin(tiny);
324 	xe_bo_put(tiny);
325 free_pt:
326 	xe_bo_unpin(pt);
327 	xe_bo_put(pt);
328 free_big:
329 	xe_bo_unpin(big);
330 	xe_bo_put(big);
331 vunmap:
332 	xe_bo_vunmap(m->pt_bo);
333 }
334 
335 static int migrate_test_run_device(struct xe_device *xe)
336 {
337 	struct kunit *test = xe_cur_kunit();
338 	struct xe_tile *tile;
339 	int id;
340 
341 	xe_pm_runtime_get(xe);
342 
343 	for_each_tile(tile, xe, id) {
344 		struct xe_migrate *m = tile->migrate;
345 
346 		kunit_info(test, "Testing tile id %d.\n", id);
347 		xe_vm_lock(m->q->vm, false);
348 		xe_migrate_sanity_test(m, test);
349 		xe_vm_unlock(m->q->vm);
350 	}
351 
352 	xe_pm_runtime_put(xe);
353 
354 	return 0;
355 }
356 
357 void xe_migrate_sanity_kunit(struct kunit *test)
358 {
359 	xe_call_for_each_device(migrate_test_run_device);
360 }
361 EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit);
362