xref: /linux/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c (revision 220994d61cebfc04f071d69049127657c7e8191b)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <kunit/static_stub.h>
7 #include <kunit/test.h>
8 #include <kunit/test-bug.h>
9 
10 #include "xe_device.h"
11 #include "xe_ggtt.h"
12 #include "xe_guc_ct.h"
13 #include "xe_kunit_helpers.h"
14 #include "xe_pci_test.h"
15 
16 #define DUT_GGTT_START		SZ_1M
17 #define DUT_GGTT_SIZE		SZ_2M
18 
replacement_xe_managed_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,size_t size,u32 flags)19 static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *xe,
20 							      struct xe_tile *tile,
21 							      size_t size, u32 flags)
22 {
23 	struct kunit *test = kunit_get_current_test();
24 	struct xe_bo *bo;
25 	void *buf;
26 
27 	bo = drmm_kzalloc(&xe->drm, sizeof(*bo), GFP_KERNEL);
28 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
29 
30 	buf = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
31 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
32 
33 	bo->tile = tile;
34 	bo->ttm.bdev = &xe->ttm;
35 	bo->ttm.base.size = size;
36 	iosys_map_set_vaddr(&bo->vmap, buf);
37 
38 	if (flags & XE_BO_FLAG_GGTT) {
39 		struct xe_ggtt *ggtt = tile->mem.ggtt;
40 
41 		bo->ggtt_node[tile->id] = xe_ggtt_node_init(ggtt);
42 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
43 
44 		KUNIT_ASSERT_EQ(test, 0,
45 				xe_ggtt_node_insert(bo->ggtt_node[tile->id],
46 						    xe_bo_size(bo), SZ_4K));
47 	}
48 
49 	return bo;
50 }
51 
guc_buf_test_init(struct kunit * test)52 static int guc_buf_test_init(struct kunit *test)
53 {
54 	struct xe_pci_fake_data fake = {
55 		.sriov_mode = XE_SRIOV_MODE_PF,
56 		.platform = XE_TIGERLAKE, /* some random platform */
57 		.subplatform = XE_SUBPLATFORM_NONE,
58 	};
59 	struct xe_ggtt *ggtt;
60 	struct xe_guc *guc;
61 
62 	test->priv = &fake;
63 	xe_kunit_helper_xe_device_test_init(test);
64 
65 	ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
66 	guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
67 
68 	KUNIT_ASSERT_EQ(test, 0,
69 			xe_ggtt_init_kunit(ggtt, DUT_GGTT_START,
70 					   DUT_GGTT_START + DUT_GGTT_SIZE));
71 
72 	kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
73 				   replacement_xe_managed_bo_create_pin_map);
74 
75 	KUNIT_ASSERT_EQ(test, 0, xe_guc_buf_cache_init(&guc->buf));
76 
77 	test->priv = &guc->buf;
78 	return 0;
79 }
80 
test_smallest(struct kunit * test)81 static void test_smallest(struct kunit *test)
82 {
83 	struct xe_guc_buf_cache *cache = test->priv;
84 	struct xe_guc_buf buf;
85 
86 	buf = xe_guc_buf_reserve(cache, 1);
87 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
88 	KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
89 	KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
90 	KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
91 	KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
92 	xe_guc_buf_release(buf);
93 }
94 
test_largest(struct kunit * test)95 static void test_largest(struct kunit *test)
96 {
97 	struct xe_guc_buf_cache *cache = test->priv;
98 	struct xe_guc_buf buf;
99 
100 	buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
101 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
102 	KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
103 	KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
104 	KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
105 	KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
106 	xe_guc_buf_release(buf);
107 }
108 
test_granular(struct kunit * test)109 static void test_granular(struct kunit *test)
110 {
111 	struct xe_guc_buf_cache *cache = test->priv;
112 	struct xe_guc_buf *bufs;
113 	int n, dwords;
114 
115 	dwords = xe_guc_buf_cache_dwords(cache);
116 	bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
117 	KUNIT_EXPECT_NOT_NULL(test, bufs);
118 
119 	for (n = 0; n < dwords; n++)
120 		bufs[n] = xe_guc_buf_reserve(cache, 1);
121 
122 	for (n = 0; n < dwords; n++)
123 		KUNIT_EXPECT_TRUE_MSG(test, xe_guc_buf_is_valid(bufs[n]), "n=%d", n);
124 
125 	for (n = 0; n < dwords; n++)
126 		xe_guc_buf_release(bufs[n]);
127 }
128 
test_unique(struct kunit * test)129 static void test_unique(struct kunit *test)
130 {
131 	struct xe_guc_buf_cache *cache = test->priv;
132 	struct xe_guc_buf *bufs;
133 	int n, m, dwords;
134 
135 	dwords = xe_guc_buf_cache_dwords(cache);
136 	bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
137 	KUNIT_EXPECT_NOT_NULL(test, bufs);
138 
139 	for (n = 0; n < dwords; n++)
140 		bufs[n] = xe_guc_buf_reserve(cache, 1);
141 
142 	for (n = 0; n < dwords; n++) {
143 		for (m = n + 1; m < dwords; m++) {
144 			KUNIT_EXPECT_PTR_NE_MSG(test, xe_guc_buf_cpu_ptr(bufs[n]),
145 						xe_guc_buf_cpu_ptr(bufs[m]), "n=%d, m=%d", n, m);
146 			KUNIT_ASSERT_NE_MSG(test, xe_guc_buf_gpu_addr(bufs[n]),
147 					    xe_guc_buf_gpu_addr(bufs[m]), "n=%d, m=%d", n, m);
148 		}
149 	}
150 
151 	for (n = 0; n < dwords; n++)
152 		xe_guc_buf_release(bufs[n]);
153 }
154 
test_overlap(struct kunit * test)155 static void test_overlap(struct kunit *test)
156 {
157 	struct xe_guc_buf_cache *cache = test->priv;
158 	struct xe_guc_buf b1, b2;
159 	u32 dwords = xe_guc_buf_cache_dwords(cache) / 2;
160 	u32 bytes = dwords * sizeof(u32);
161 	void *p1, *p2;
162 	u64 a1, a2;
163 
164 	b1 = xe_guc_buf_reserve(cache, dwords);
165 	b2 = xe_guc_buf_reserve(cache, dwords);
166 
167 	p1 = xe_guc_buf_cpu_ptr(b1);
168 	p2 = xe_guc_buf_cpu_ptr(b2);
169 
170 	a1 = xe_guc_buf_gpu_addr(b1);
171 	a2 = xe_guc_buf_gpu_addr(b2);
172 
173 	KUNIT_EXPECT_PTR_NE(test, p1, p2);
174 	if (p1 < p2)
175 		KUNIT_EXPECT_LT(test, (uintptr_t)(p1 + bytes - 1), (uintptr_t)p2);
176 	else
177 		KUNIT_EXPECT_LT(test, (uintptr_t)(p2 + bytes - 1), (uintptr_t)p1);
178 
179 	KUNIT_EXPECT_NE(test, a1, a2);
180 	if (a1 < a2)
181 		KUNIT_EXPECT_LT(test, a1 + bytes - 1, a2);
182 	else
183 		KUNIT_EXPECT_LT(test, a2 + bytes - 1, a1);
184 
185 	xe_guc_buf_release(b1);
186 	xe_guc_buf_release(b2);
187 }
188 
test_reusable(struct kunit * test)189 static void test_reusable(struct kunit *test)
190 {
191 	struct xe_guc_buf_cache *cache = test->priv;
192 	struct xe_guc_buf b1, b2;
193 	void *p1;
194 	u64 a1;
195 
196 	b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
197 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1));
198 	KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1));
199 	KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1));
200 	xe_guc_buf_release(b1);
201 
202 	b2 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
203 	KUNIT_EXPECT_PTR_EQ(test, p1, xe_guc_buf_cpu_ptr(b2));
204 	KUNIT_EXPECT_EQ(test, a1, xe_guc_buf_gpu_addr(b2));
205 	xe_guc_buf_release(b2);
206 }
207 
test_too_big(struct kunit * test)208 static void test_too_big(struct kunit *test)
209 {
210 	struct xe_guc_buf_cache *cache = test->priv;
211 	struct xe_guc_buf buf;
212 
213 	buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache) + 1);
214 	KUNIT_EXPECT_FALSE(test, xe_guc_buf_is_valid(buf));
215 	xe_guc_buf_release(buf); /* shouldn't crash */
216 }
217 
test_flush(struct kunit * test)218 static void test_flush(struct kunit *test)
219 {
220 	struct xe_guc_buf_cache *cache = test->priv;
221 	struct xe_guc_buf buf;
222 	const u32 dwords = xe_guc_buf_cache_dwords(cache);
223 	const u32 bytes = dwords * sizeof(u32);
224 	u32 *s, *p, *d;
225 	int n;
226 
227 	KUNIT_ASSERT_NOT_NULL(test, s = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
228 	KUNIT_ASSERT_NOT_NULL(test, d = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
229 
230 	for (n = 0; n < dwords; n++)
231 		s[n] = n;
232 
233 	buf = xe_guc_buf_reserve(cache, dwords);
234 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
235 	KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
236 	KUNIT_EXPECT_PTR_NE(test, p, s);
237 	KUNIT_EXPECT_PTR_NE(test, p, d);
238 
239 	memcpy(p, s, bytes);
240 	KUNIT_EXPECT_NE(test, 0, xe_guc_buf_flush(buf));
241 
242 	iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes);
243 	KUNIT_EXPECT_MEMEQ(test, s, d, bytes);
244 
245 	xe_guc_buf_release(buf);
246 }
247 
test_lookup(struct kunit * test)248 static void test_lookup(struct kunit *test)
249 {
250 	struct xe_guc_buf_cache *cache = test->priv;
251 	struct xe_guc_buf buf;
252 	u32 dwords;
253 	u64 addr;
254 	u32 *p;
255 	int n;
256 
257 	dwords = xe_guc_buf_cache_dwords(cache);
258 	buf = xe_guc_buf_reserve(cache, dwords);
259 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
260 	KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
261 	KUNIT_ASSERT_NE(test, 0, addr = xe_guc_buf_gpu_addr(buf));
262 
263 	KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p - 1, sizeof(u32)));
264 	KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p + dwords, sizeof(u32)));
265 
266 	for (n = 0; n < dwords; n++)
267 		KUNIT_EXPECT_EQ_MSG(test, xe_guc_cache_gpu_addr_from_ptr(cache, p + n, sizeof(u32)),
268 				    addr + n * sizeof(u32), "n=%d", n);
269 
270 	xe_guc_buf_release(buf);
271 }
272 
test_data(struct kunit * test)273 static void test_data(struct kunit *test)
274 {
275 	static const u32 data[] = { 1, 2, 3, 4, 5, 6 };
276 	struct xe_guc_buf_cache *cache = test->priv;
277 	struct xe_guc_buf buf;
278 	void *p;
279 
280 	buf = xe_guc_buf_from_data(cache, data, sizeof(data));
281 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
282 	KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
283 	KUNIT_EXPECT_MEMEQ(test, p, data, sizeof(data));
284 
285 	xe_guc_buf_release(buf);
286 }
287 
test_class(struct kunit * test)288 static void test_class(struct kunit *test)
289 {
290 	struct xe_guc_buf_cache *cache = test->priv;
291 	u32 dwords = xe_guc_buf_cache_dwords(cache);
292 
293 	{
294 		CLASS(xe_guc_buf, buf)(cache, dwords);
295 		KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
296 		KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
297 		KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
298 		KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
299 		KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
300 	}
301 
302 	{
303 		CLASS(xe_guc_buf, buf)(cache, dwords);
304 		KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
305 		KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
306 		KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
307 		KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
308 		KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
309 	}
310 }
311 
312 static struct kunit_case guc_buf_test_cases[] = {
313 	KUNIT_CASE(test_smallest),
314 	KUNIT_CASE(test_largest),
315 	KUNIT_CASE(test_granular),
316 	KUNIT_CASE(test_unique),
317 	KUNIT_CASE(test_overlap),
318 	KUNIT_CASE(test_reusable),
319 	KUNIT_CASE(test_too_big),
320 	KUNIT_CASE(test_flush),
321 	KUNIT_CASE(test_lookup),
322 	KUNIT_CASE(test_data),
323 	KUNIT_CASE(test_class),
324 	{}
325 };
326 
327 static struct kunit_suite guc_buf_suite = {
328 	.name = "guc_buf",
329 	.test_cases = guc_buf_test_cases,
330 	.init = guc_buf_test_init,
331 };
332 
333 kunit_test_suites(&guc_buf_suite);
334