xref: /linux/drivers/iommu/generic_pt/kunit_generic_pt.h (revision ce5cfb0fa20dc6454da039612e34325b7b4a8243)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
4  *
5  * Test the format API directly.
6  *
7  */
8 #include "kunit_iommu.h"
9 #include "pt_iter.h"
10 
do_map(struct kunit * test,pt_vaddr_t va,pt_oaddr_t pa,pt_vaddr_t len)11 static void do_map(struct kunit *test, pt_vaddr_t va, pt_oaddr_t pa,
12 		   pt_vaddr_t len)
13 {
14 	struct kunit_iommu_priv *priv = test->priv;
15 	int ret;
16 
17 	KUNIT_ASSERT_EQ(test, len, (size_t)len);
18 
19 	ret = iommu_map(&priv->domain, va, pa, len, IOMMU_READ | IOMMU_WRITE,
20 			GFP_KERNEL);
21 	KUNIT_ASSERT_NO_ERRNO_FN(test, "map_pages", ret);
22 }
23 
24 #define KUNIT_ASSERT_PT_LOAD(test, pts, entry)             \
25 	({                                                 \
26 		pt_load_entry(pts);                        \
27 		KUNIT_ASSERT_EQ(test, (pts)->type, entry); \
28 	})
29 
30 struct check_levels_arg {
31 	struct kunit *test;
32 	void *fn_arg;
33 	void (*fn)(struct kunit *test, struct pt_state *pts, void *arg);
34 };
35 
__check_all_levels(struct pt_range * range,void * arg,unsigned int level,struct pt_table_p * table)36 static int __check_all_levels(struct pt_range *range, void *arg,
37 			      unsigned int level, struct pt_table_p *table)
38 {
39 	struct pt_state pts = pt_init(range, level, table);
40 	struct check_levels_arg *chk = arg;
41 	struct kunit *test = chk->test;
42 	int ret;
43 
44 	_pt_iter_first(&pts);
45 
46 
47 	/*
48 	 * If we were able to use the full VA space this should always be the
49 	 * last index in each table.
50 	 */
51 	if (!(IS_32BIT && range->max_vasz_lg2 > 32)) {
52 		if (pt_feature(range->common, PT_FEAT_SIGN_EXTEND) &&
53 		    pts.level == pts.range->top_level)
54 			KUNIT_ASSERT_EQ(test, pts.index,
55 					log2_to_int(range->max_vasz_lg2 - 1 -
56 						    pt_table_item_lg2sz(&pts)) -
57 						1);
58 		else
59 			KUNIT_ASSERT_EQ(test, pts.index,
60 					log2_to_int(pt_table_oa_lg2sz(&pts) -
61 						    pt_table_item_lg2sz(&pts)) -
62 						1);
63 	}
64 
65 	if (pt_can_have_table(&pts)) {
66 		pt_load_single_entry(&pts);
67 		KUNIT_ASSERT_EQ(test, pts.type, PT_ENTRY_TABLE);
68 		ret = pt_descend(&pts, arg, __check_all_levels);
69 		KUNIT_ASSERT_EQ(test, ret, 0);
70 
71 		/* Index 0 is used by the test */
72 		if (IS_32BIT && !pts.index)
73 			return 0;
74 		KUNIT_ASSERT_NE(chk->test, pts.index, 0);
75 	}
76 
77 	/*
78 	 * A format should not create a table with only one entry, at least this
79 	 * test approach won't work.
80 	 */
81 	KUNIT_ASSERT_GT(chk->test, pts.end_index, 1);
82 
83 	/*
84 	 * For increase top we end up using index 0 for the original top's tree,
85 	 * so use index 1 for testing instead.
86 	 */
87 	pts.index = 0;
88 	pt_index_to_va(&pts);
89 	pt_load_single_entry(&pts);
90 	if (pts.type == PT_ENTRY_TABLE && pts.end_index > 2) {
91 		pts.index = 1;
92 		pt_index_to_va(&pts);
93 	}
94 	(*chk->fn)(chk->test, &pts, chk->fn_arg);
95 	return 0;
96 }
97 
98 /*
99  * Call fn for each level in the table with a pts setup to index 0 in a table
100  * for that level. This allows writing tests that run on every level.
101  * The test can use every index in the table except the last one.
102  */
check_all_levels(struct kunit * test,void (* fn)(struct kunit * test,struct pt_state * pts,void * arg),void * fn_arg)103 static void check_all_levels(struct kunit *test,
104 			     void (*fn)(struct kunit *test,
105 					struct pt_state *pts, void *arg),
106 			     void *fn_arg)
107 {
108 	struct kunit_iommu_priv *priv = test->priv;
109 	struct pt_range range = pt_top_range(priv->common);
110 	struct check_levels_arg chk = {
111 		.test = test,
112 		.fn = fn,
113 		.fn_arg = fn_arg,
114 	};
115 	int ret;
116 
117 	if (pt_feature(priv->common, PT_FEAT_DYNAMIC_TOP) &&
118 	    priv->common->max_vasz_lg2 > range.max_vasz_lg2)
119 		range.last_va = fvalog2_set_mod_max(range.va,
120 						    priv->common->max_vasz_lg2);
121 
122 	/*
123 	 * Map a page at the highest VA, this will populate all the levels so we
124 	 * can then iterate over them. Index 0 will be used for testing.
125 	 */
126 	if (IS_32BIT && range.max_vasz_lg2 > 32)
127 		range.last_va = (u32)range.last_va;
128 	range.va = range.last_va - (priv->smallest_pgsz - 1);
129 	do_map(test, range.va, 0, priv->smallest_pgsz);
130 
131 	range = pt_make_range(priv->common, range.va, range.last_va);
132 	ret = pt_walk_range(&range, __check_all_levels, &chk);
133 	KUNIT_ASSERT_EQ(test, ret, 0);
134 }
135 
test_init(struct kunit * test)136 static void test_init(struct kunit *test)
137 {
138 	struct kunit_iommu_priv *priv = test->priv;
139 
140 	/* Fixture does the setup */
141 	KUNIT_ASSERT_NE(test, priv->info.pgsize_bitmap, 0);
142 }
143 
144 /*
145  * Basic check that the log2_* functions are working, especially at the integer
146  * limits.
147  */
test_bitops(struct kunit * test)148 static void test_bitops(struct kunit *test)
149 {
150 	int i;
151 
152 	KUNIT_ASSERT_EQ(test, fls_t(u32, 0), 0);
153 	KUNIT_ASSERT_EQ(test, fls_t(u32, 1), 1);
154 	KUNIT_ASSERT_EQ(test, fls_t(u32, BIT(2)), 3);
155 	KUNIT_ASSERT_EQ(test, fls_t(u32, U32_MAX), 32);
156 
157 	KUNIT_ASSERT_EQ(test, fls_t(u64, 0), 0);
158 	KUNIT_ASSERT_EQ(test, fls_t(u64, 1), 1);
159 	KUNIT_ASSERT_EQ(test, fls_t(u64, BIT(2)), 3);
160 	KUNIT_ASSERT_EQ(test, fls_t(u64, U64_MAX), 64);
161 
162 	KUNIT_ASSERT_EQ(test, ffs_t(u32, 1), 0);
163 	KUNIT_ASSERT_EQ(test, ffs_t(u32, BIT(2)), 2);
164 	KUNIT_ASSERT_EQ(test, ffs_t(u32, BIT(31)), 31);
165 
166 	KUNIT_ASSERT_EQ(test, ffs_t(u64, 1), 0);
167 	KUNIT_ASSERT_EQ(test, ffs_t(u64, BIT(2)), 2);
168 	KUNIT_ASSERT_EQ(test, ffs_t(u64, BIT_ULL(63)), 63);
169 
170 	for (i = 0; i != 31; i++)
171 		KUNIT_ASSERT_EQ(test, ffz_t(u64, BIT_ULL(i) - 1), i);
172 
173 	for (i = 0; i != 63; i++)
174 		KUNIT_ASSERT_EQ(test, ffz_t(u64, BIT_ULL(i) - 1), i);
175 
176 	for (i = 0; i != 32; i++) {
177 		u64 val = get_random_u64();
178 
179 		KUNIT_ASSERT_EQ(test, log2_mod_t(u32, val, ffs_t(u32, val)), 0);
180 		KUNIT_ASSERT_EQ(test, log2_mod_t(u64, val, ffs_t(u64, val)), 0);
181 
182 		KUNIT_ASSERT_EQ(test, log2_mod_t(u32, val, ffz_t(u32, val)),
183 				log2_to_max_int_t(u32, ffz_t(u32, val)));
184 		KUNIT_ASSERT_EQ(test, log2_mod_t(u64, val, ffz_t(u64, val)),
185 				log2_to_max_int_t(u64, ffz_t(u64, val)));
186 	}
187 }
188 
ref_best_pgsize(pt_vaddr_t pgsz_bitmap,pt_vaddr_t va,pt_vaddr_t last_va,pt_oaddr_t oa)189 static unsigned int ref_best_pgsize(pt_vaddr_t pgsz_bitmap, pt_vaddr_t va,
190 				    pt_vaddr_t last_va, pt_oaddr_t oa)
191 {
192 	pt_vaddr_t pgsz_lg2;
193 
194 	/* Brute force the constraints described in pt_compute_best_pgsize() */
195 	for (pgsz_lg2 = PT_VADDR_MAX_LG2 - 1; pgsz_lg2 != 0; pgsz_lg2--) {
196 		if ((pgsz_bitmap & log2_to_int(pgsz_lg2)) &&
197 		    log2_mod(va, pgsz_lg2) == 0 &&
198 		    oalog2_mod(oa, pgsz_lg2) == 0 &&
199 		    va + log2_to_int(pgsz_lg2) - 1 <= last_va &&
200 		    log2_div_eq(va, va + log2_to_int(pgsz_lg2) - 1, pgsz_lg2) &&
201 		    oalog2_div_eq(oa, oa + log2_to_int(pgsz_lg2) - 1, pgsz_lg2))
202 			return pgsz_lg2;
203 	}
204 	return 0;
205 }
206 
207 /* Check that the bit logic in pt_compute_best_pgsize() works. */
test_best_pgsize(struct kunit * test)208 static void test_best_pgsize(struct kunit *test)
209 {
210 	unsigned int a_lg2;
211 	unsigned int b_lg2;
212 	unsigned int c_lg2;
213 
214 	/* Try random prefixes with every suffix combination */
215 	for (a_lg2 = 1; a_lg2 != 10; a_lg2++) {
216 		for (b_lg2 = 1; b_lg2 != 10; b_lg2++) {
217 			for (c_lg2 = 1; c_lg2 != 10; c_lg2++) {
218 				pt_vaddr_t pgsz_bitmap = get_random_u64();
219 				pt_vaddr_t va = get_random_u64() << a_lg2;
220 				pt_oaddr_t oa = get_random_u64() << b_lg2;
221 				pt_vaddr_t last_va = log2_set_mod_max(
222 					get_random_u64(), c_lg2);
223 
224 				if (va > last_va)
225 					swap(va, last_va);
226 				KUNIT_ASSERT_EQ(
227 					test,
228 					pt_compute_best_pgsize(pgsz_bitmap, va,
229 							       last_va, oa),
230 					ref_best_pgsize(pgsz_bitmap, va,
231 							last_va, oa));
232 			}
233 		}
234 	}
235 
236 	/* 0 prefix, every suffix */
237 	for (c_lg2 = 1; c_lg2 != PT_VADDR_MAX_LG2 - 1; c_lg2++) {
238 		pt_vaddr_t pgsz_bitmap = get_random_u64();
239 		pt_vaddr_t va = 0;
240 		pt_oaddr_t oa = 0;
241 		pt_vaddr_t last_va = log2_set_mod_max(0, c_lg2);
242 
243 		KUNIT_ASSERT_EQ(test,
244 				pt_compute_best_pgsize(pgsz_bitmap, va, last_va,
245 						       oa),
246 				ref_best_pgsize(pgsz_bitmap, va, last_va, oa));
247 	}
248 
249 	/* 1's prefix, every suffix */
250 	for (a_lg2 = 1; a_lg2 != 10; a_lg2++) {
251 		for (b_lg2 = 1; b_lg2 != 10; b_lg2++) {
252 			for (c_lg2 = 1; c_lg2 != 10; c_lg2++) {
253 				pt_vaddr_t pgsz_bitmap = get_random_u64();
254 				pt_vaddr_t va = PT_VADDR_MAX << a_lg2;
255 				pt_oaddr_t oa = PT_VADDR_MAX << b_lg2;
256 				pt_vaddr_t last_va = PT_VADDR_MAX;
257 
258 				KUNIT_ASSERT_EQ(
259 					test,
260 					pt_compute_best_pgsize(pgsz_bitmap, va,
261 							       last_va, oa),
262 					ref_best_pgsize(pgsz_bitmap, va,
263 							last_va, oa));
264 			}
265 		}
266 	}
267 
268 	/* pgsize_bitmap is always 0 */
269 	for (a_lg2 = 1; a_lg2 != 10; a_lg2++) {
270 		for (b_lg2 = 1; b_lg2 != 10; b_lg2++) {
271 			for (c_lg2 = 1; c_lg2 != 10; c_lg2++) {
272 				pt_vaddr_t pgsz_bitmap = 0;
273 				pt_vaddr_t va = get_random_u64() << a_lg2;
274 				pt_oaddr_t oa = get_random_u64() << b_lg2;
275 				pt_vaddr_t last_va = log2_set_mod_max(
276 					get_random_u64(), c_lg2);
277 
278 				if (va > last_va)
279 					swap(va, last_va);
280 				KUNIT_ASSERT_EQ(
281 					test,
282 					pt_compute_best_pgsize(pgsz_bitmap, va,
283 							       last_va, oa),
284 					0);
285 			}
286 		}
287 	}
288 
289 	if (sizeof(pt_vaddr_t) <= 4)
290 		return;
291 
292 	/* over 32 bit page sizes */
293 	for (a_lg2 = 32; a_lg2 != 42; a_lg2++) {
294 		for (b_lg2 = 32; b_lg2 != 42; b_lg2++) {
295 			for (c_lg2 = 32; c_lg2 != 42; c_lg2++) {
296 				pt_vaddr_t pgsz_bitmap = get_random_u64();
297 				pt_vaddr_t va = get_random_u64() << a_lg2;
298 				pt_oaddr_t oa = get_random_u64() << b_lg2;
299 				pt_vaddr_t last_va = log2_set_mod_max(
300 					get_random_u64(), c_lg2);
301 
302 				if (va > last_va)
303 					swap(va, last_va);
304 				KUNIT_ASSERT_EQ(
305 					test,
306 					pt_compute_best_pgsize(pgsz_bitmap, va,
307 							       last_va, oa),
308 					ref_best_pgsize(pgsz_bitmap, va,
309 							last_va, oa));
310 			}
311 		}
312 	}
313 }
314 
315 /*
316  * Check that pt_install_table() and pt_table_pa() match
317  */
test_lvl_table_ptr(struct kunit * test,struct pt_state * pts,void * arg)318 static void test_lvl_table_ptr(struct kunit *test, struct pt_state *pts,
319 			       void *arg)
320 {
321 	struct kunit_iommu_priv *priv = test->priv;
322 	pt_oaddr_t paddr =
323 		log2_set_mod(priv->test_oa, 0, priv->smallest_pgsz_lg2);
324 	struct pt_write_attrs attrs = {};
325 
326 	if (!pt_can_have_table(pts))
327 		return;
328 
329 	KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
330 				 pt_iommu_set_prot(pts->range->common, &attrs,
331 						   IOMMU_READ));
332 
333 	pt_load_single_entry(pts);
334 	KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
335 
336 	KUNIT_ASSERT_TRUE(test, pt_install_table(pts, paddr, &attrs));
337 
338 	/* A second install should pass because install updates pts->entry. */
339 	KUNIT_ASSERT_EQ(test, pt_install_table(pts, paddr, &attrs), true);
340 
341 	KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_TABLE);
342 	KUNIT_ASSERT_EQ(test, pt_table_pa(pts), paddr);
343 
344 	pt_clear_entries(pts, ilog2(1));
345 	KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
346 }
347 
test_table_ptr(struct kunit * test)348 static void test_table_ptr(struct kunit *test)
349 {
350 	check_all_levels(test, test_lvl_table_ptr, NULL);
351 }
352 
353 struct lvl_radix_arg {
354 	pt_vaddr_t vbits;
355 };
356 
357 /*
358  * Check pt_table_oa_lg2sz() and pt_table_item_lg2sz() they need to decode a
359  * continuous list of VA across all the levels that covers the entire advertised
360  * VA space.
361  */
test_lvl_radix(struct kunit * test,struct pt_state * pts,void * arg)362 static void test_lvl_radix(struct kunit *test, struct pt_state *pts, void *arg)
363 {
364 	unsigned int table_lg2sz = pt_table_oa_lg2sz(pts);
365 	unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
366 	struct lvl_radix_arg *radix = arg;
367 
368 	/* Every bit below us is decoded */
369 	KUNIT_ASSERT_EQ(test, log2_set_mod_max(0, isz_lg2), radix->vbits);
370 
371 	/* We are not decoding bits someone else is */
372 	KUNIT_ASSERT_EQ(test, log2_div(radix->vbits, isz_lg2), 0);
373 
374 	/* Can't decode past the pt_vaddr_t size */
375 	KUNIT_ASSERT_LE(test, table_lg2sz, PT_VADDR_MAX_LG2);
376 	KUNIT_ASSERT_EQ(test, fvalog2_div(table_lg2sz, PT_MAX_VA_ADDRESS_LG2),
377 			0);
378 
379 	radix->vbits = fvalog2_set_mod_max(0, table_lg2sz);
380 }
381 
test_max_va(struct kunit * test)382 static void test_max_va(struct kunit *test)
383 {
384 	struct kunit_iommu_priv *priv = test->priv;
385 	struct pt_range range = pt_top_range(priv->common);
386 
387 	KUNIT_ASSERT_GE(test, priv->common->max_vasz_lg2, range.max_vasz_lg2);
388 }
389 
test_table_radix(struct kunit * test)390 static void test_table_radix(struct kunit *test)
391 {
392 	struct kunit_iommu_priv *priv = test->priv;
393 	struct lvl_radix_arg radix = { .vbits = priv->smallest_pgsz - 1 };
394 	struct pt_range range;
395 
396 	check_all_levels(test, test_lvl_radix, &radix);
397 
398 	range = pt_top_range(priv->common);
399 	if (range.max_vasz_lg2 == PT_VADDR_MAX_LG2) {
400 		KUNIT_ASSERT_EQ(test, radix.vbits, PT_VADDR_MAX);
401 	} else {
402 		if (!IS_32BIT)
403 			KUNIT_ASSERT_EQ(test,
404 					log2_set_mod_max(0, range.max_vasz_lg2),
405 					radix.vbits);
406 		KUNIT_ASSERT_EQ(test, log2_div(radix.vbits, range.max_vasz_lg2),
407 				0);
408 	}
409 }
410 
safe_pt_num_items_lg2(const struct pt_state * pts)411 static unsigned int safe_pt_num_items_lg2(const struct pt_state *pts)
412 {
413 	struct pt_range top_range = pt_top_range(pts->range->common);
414 	struct pt_state top_pts = pt_init_top(&top_range);
415 
416 	/*
417 	 * Avoid calling pt_num_items_lg2() on the top, instead we can derive
418 	 * the size of the top table from the top range.
419 	 */
420 	if (pts->level == top_range.top_level)
421 		return ilog2(pt_range_to_end_index(&top_pts));
422 	return pt_num_items_lg2(pts);
423 }
424 
test_lvl_possible_sizes(struct kunit * test,struct pt_state * pts,void * arg)425 static void test_lvl_possible_sizes(struct kunit *test, struct pt_state *pts,
426 				    void *arg)
427 {
428 	unsigned int num_items_lg2 = safe_pt_num_items_lg2(pts);
429 	pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
430 	unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
431 
432 	if (!pt_can_have_leaf(pts)) {
433 		KUNIT_ASSERT_EQ(test, pgsize_bitmap, 0);
434 		return;
435 	}
436 
437 	/* No bits for sizes that would be outside this table */
438 	KUNIT_ASSERT_EQ(test, log2_mod(pgsize_bitmap, isz_lg2), 0);
439 	KUNIT_ASSERT_EQ(
440 		test, fvalog2_div(pgsize_bitmap, num_items_lg2 + isz_lg2), 0);
441 
442 	/*
443 	 * Non contiguous must be supported. AMDv1 has a HW bug where it does
444 	 * not support it on one of the levels.
445 	 */
446 	if ((u64)pgsize_bitmap != 0xff0000000000ULL ||
447 	    strcmp(__stringify(PTPFX_RAW), "amdv1") != 0)
448 		KUNIT_ASSERT_TRUE(test, pgsize_bitmap & log2_to_int(isz_lg2));
449 	else
450 		KUNIT_ASSERT_NE(test, pgsize_bitmap, 0);
451 
452 	/* A contiguous entry should not span the whole table */
453 	if (num_items_lg2 + isz_lg2 != PT_VADDR_MAX_LG2)
454 		KUNIT_ASSERT_FALSE(
455 			test,
456 			pgsize_bitmap & log2_to_int(num_items_lg2 + isz_lg2));
457 }
458 
test_entry_possible_sizes(struct kunit * test)459 static void test_entry_possible_sizes(struct kunit *test)
460 {
461 	check_all_levels(test, test_lvl_possible_sizes, NULL);
462 }
463 
sweep_all_pgsizes(struct kunit * test,struct pt_state * pts,struct pt_write_attrs * attrs,pt_oaddr_t test_oaddr)464 static void sweep_all_pgsizes(struct kunit *test, struct pt_state *pts,
465 			      struct pt_write_attrs *attrs,
466 			      pt_oaddr_t test_oaddr)
467 {
468 	pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
469 	unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
470 	unsigned int len_lg2;
471 
472 	if (pts->index != 0)
473 		return;
474 
475 	for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2 - 1; len_lg2++) {
476 		struct pt_state sub_pts = *pts;
477 		pt_oaddr_t oaddr;
478 
479 		if (!(pgsize_bitmap & log2_to_int(len_lg2)))
480 			continue;
481 
482 		oaddr = log2_set_mod(test_oaddr, 0, len_lg2);
483 		pt_install_leaf_entry(pts, oaddr, len_lg2, attrs);
484 		/* Verify that every contiguous item translates correctly */
485 		for (sub_pts.index = 0;
486 		     sub_pts.index != log2_to_int(len_lg2 - isz_lg2);
487 		     sub_pts.index++) {
488 			KUNIT_ASSERT_PT_LOAD(test, &sub_pts, PT_ENTRY_OA);
489 			KUNIT_ASSERT_EQ(test, pt_item_oa(&sub_pts),
490 					oaddr + sub_pts.index *
491 							oalog2_mul(1, isz_lg2));
492 			KUNIT_ASSERT_EQ(test, pt_entry_oa(&sub_pts), oaddr);
493 			KUNIT_ASSERT_EQ(test, pt_entry_num_contig_lg2(&sub_pts),
494 					len_lg2 - isz_lg2);
495 		}
496 
497 		pt_clear_entries(pts, len_lg2 - isz_lg2);
498 		KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
499 	}
500 }
501 
502 /*
503  * Check that pt_install_leaf_entry() and pt_entry_oa() match.
504  * Check that pt_clear_entries() works.
505  */
test_lvl_entry_oa(struct kunit * test,struct pt_state * pts,void * arg)506 static void test_lvl_entry_oa(struct kunit *test, struct pt_state *pts,
507 			      void *arg)
508 {
509 	unsigned int max_oa_lg2 = pts->range->common->max_oasz_lg2;
510 	struct kunit_iommu_priv *priv = test->priv;
511 	struct pt_write_attrs attrs = {};
512 
513 	if (!pt_can_have_leaf(pts))
514 		return;
515 
516 	KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
517 				 pt_iommu_set_prot(pts->range->common, &attrs,
518 						   IOMMU_READ));
519 
520 	sweep_all_pgsizes(test, pts, &attrs, priv->test_oa);
521 
522 	/* Check that the table can store the boundary OAs */
523 	sweep_all_pgsizes(test, pts, &attrs, 0);
524 	if (max_oa_lg2 == PT_OADDR_MAX_LG2)
525 		sweep_all_pgsizes(test, pts, &attrs, PT_OADDR_MAX);
526 	else
527 		sweep_all_pgsizes(test, pts, &attrs,
528 				  oalog2_to_max_int(max_oa_lg2));
529 }
530 
test_entry_oa(struct kunit * test)531 static void test_entry_oa(struct kunit *test)
532 {
533 	check_all_levels(test, test_lvl_entry_oa, NULL);
534 }
535 
536 /* Test pt_attr_from_entry() */
test_lvl_attr_from_entry(struct kunit * test,struct pt_state * pts,void * arg)537 static void test_lvl_attr_from_entry(struct kunit *test, struct pt_state *pts,
538 				     void *arg)
539 {
540 	pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
541 	unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
542 	struct kunit_iommu_priv *priv = test->priv;
543 	unsigned int len_lg2;
544 	unsigned int prot;
545 
546 	if (!pt_can_have_leaf(pts))
547 		return;
548 
549 	for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2; len_lg2++) {
550 		if (!(pgsize_bitmap & log2_to_int(len_lg2)))
551 			continue;
552 		for (prot = 0; prot <= (IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE |
553 					IOMMU_NOEXEC | IOMMU_MMIO);
554 		     prot++) {
555 			pt_oaddr_t oaddr;
556 			struct pt_write_attrs attrs = {};
557 			u64 good_entry;
558 
559 			/*
560 			 * If the format doesn't support this combination of
561 			 * prot bits skip it
562 			 */
563 			if (pt_iommu_set_prot(pts->range->common, &attrs,
564 					      prot)) {
565 				/* But RW has to be supported */
566 				KUNIT_ASSERT_NE(test, prot,
567 						IOMMU_READ | IOMMU_WRITE);
568 				continue;
569 			}
570 
571 			oaddr = log2_set_mod(priv->test_oa, 0, len_lg2);
572 			pt_install_leaf_entry(pts, oaddr, len_lg2, &attrs);
573 			KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_OA);
574 
575 			good_entry = pts->entry;
576 
577 			memset(&attrs, 0, sizeof(attrs));
578 			pt_attr_from_entry(pts, &attrs);
579 
580 			pt_clear_entries(pts, len_lg2 - isz_lg2);
581 			KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
582 
583 			pt_install_leaf_entry(pts, oaddr, len_lg2, &attrs);
584 			KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_OA);
585 
586 			/*
587 			 * The descriptor produced by pt_attr_from_entry()
588 			 * produce an identical entry value when re-written
589 			 */
590 			KUNIT_ASSERT_EQ(test, good_entry, pts->entry);
591 
592 			pt_clear_entries(pts, len_lg2 - isz_lg2);
593 		}
594 	}
595 }
596 
test_attr_from_entry(struct kunit * test)597 static void test_attr_from_entry(struct kunit *test)
598 {
599 	check_all_levels(test, test_lvl_attr_from_entry, NULL);
600 }
601 
test_lvl_dirty(struct kunit * test,struct pt_state * pts,void * arg)602 static void test_lvl_dirty(struct kunit *test, struct pt_state *pts, void *arg)
603 {
604 	pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
605 	unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
606 	struct kunit_iommu_priv *priv = test->priv;
607 	unsigned int start_idx = pts->index;
608 	struct pt_write_attrs attrs = {};
609 	unsigned int len_lg2;
610 
611 	if (!pt_can_have_leaf(pts))
612 		return;
613 
614 	KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
615 				 pt_iommu_set_prot(pts->range->common, &attrs,
616 						   IOMMU_READ | IOMMU_WRITE));
617 
618 	for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2; len_lg2++) {
619 		pt_oaddr_t oaddr;
620 		unsigned int i;
621 
622 		if (!(pgsize_bitmap & log2_to_int(len_lg2)))
623 			continue;
624 
625 		oaddr = log2_set_mod(priv->test_oa, 0, len_lg2);
626 		pt_install_leaf_entry(pts, oaddr, len_lg2, &attrs);
627 		KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_OA);
628 
629 		pt_load_entry(pts);
630 		pt_entry_make_write_clean(pts);
631 		pt_load_entry(pts);
632 		KUNIT_ASSERT_FALSE(test, pt_entry_is_write_dirty(pts));
633 
634 		for (i = 0; i != log2_to_int(len_lg2 - isz_lg2); i++) {
635 			/* dirty every contiguous entry */
636 			pts->index = start_idx + i;
637 			pt_load_entry(pts);
638 			KUNIT_ASSERT_TRUE(test, pt_entry_make_write_dirty(pts));
639 			pts->index = start_idx;
640 			pt_load_entry(pts);
641 			KUNIT_ASSERT_TRUE(test, pt_entry_is_write_dirty(pts));
642 
643 			pt_entry_make_write_clean(pts);
644 			pt_load_entry(pts);
645 			KUNIT_ASSERT_FALSE(test, pt_entry_is_write_dirty(pts));
646 		}
647 
648 		pt_clear_entries(pts, len_lg2 - isz_lg2);
649 	}
650 }
651 
test_dirty(struct kunit * test)652 static __maybe_unused void test_dirty(struct kunit *test)
653 {
654 	struct kunit_iommu_priv *priv = test->priv;
655 
656 	if (!pt_dirty_supported(priv->common))
657 		kunit_skip(test,
658 			   "Page table features do not support dirty tracking");
659 
660 	check_all_levels(test, test_lvl_dirty, NULL);
661 }
662 
test_lvl_sw_bit_leaf(struct kunit * test,struct pt_state * pts,void * arg)663 static void test_lvl_sw_bit_leaf(struct kunit *test, struct pt_state *pts,
664 				 void *arg)
665 {
666 	struct kunit_iommu_priv *priv = test->priv;
667 	pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
668 	unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
669 	struct pt_write_attrs attrs = {};
670 	unsigned int len_lg2;
671 
672 	if (!pt_can_have_leaf(pts))
673 		return;
674 	if (pts->index != 0)
675 		return;
676 
677 	KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
678 				 pt_iommu_set_prot(pts->range->common, &attrs,
679 						   IOMMU_READ));
680 
681 	for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2 - 1; len_lg2++) {
682 		pt_oaddr_t paddr = log2_set_mod(priv->test_oa, 0, len_lg2);
683 		struct pt_write_attrs new_attrs = {};
684 		unsigned int bitnr;
685 
686 		if (!(pgsize_bitmap & log2_to_int(len_lg2)))
687 			continue;
688 
689 		pt_install_leaf_entry(pts, paddr, len_lg2, &attrs);
690 
691 		for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
692 		     bitnr++)
693 			KUNIT_ASSERT_FALSE(test,
694 					   pt_test_sw_bit_acquire(pts, bitnr));
695 
696 		for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
697 		     bitnr++) {
698 			KUNIT_ASSERT_FALSE(test,
699 					   pt_test_sw_bit_acquire(pts, bitnr));
700 			pt_set_sw_bit_release(pts, bitnr);
701 			KUNIT_ASSERT_TRUE(test,
702 					  pt_test_sw_bit_acquire(pts, bitnr));
703 		}
704 
705 		for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
706 		     bitnr++)
707 			KUNIT_ASSERT_TRUE(test,
708 					  pt_test_sw_bit_acquire(pts, bitnr));
709 
710 		KUNIT_ASSERT_EQ(test, pt_item_oa(pts), paddr);
711 
712 		/* SW bits didn't leak into the attrs */
713 		pt_attr_from_entry(pts, &new_attrs);
714 		KUNIT_ASSERT_MEMEQ(test, &new_attrs, &attrs, sizeof(attrs));
715 
716 		pt_clear_entries(pts, len_lg2 - isz_lg2);
717 		KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
718 	}
719 }
720 
test_sw_bit_leaf(struct kunit * test)721 static __maybe_unused void test_sw_bit_leaf(struct kunit *test)
722 {
723 	check_all_levels(test, test_lvl_sw_bit_leaf, NULL);
724 }
725 
test_lvl_sw_bit_table(struct kunit * test,struct pt_state * pts,void * arg)726 static void test_lvl_sw_bit_table(struct kunit *test, struct pt_state *pts,
727 				  void *arg)
728 {
729 	struct kunit_iommu_priv *priv = test->priv;
730 	struct pt_write_attrs attrs = {};
731 	pt_oaddr_t paddr =
732 		log2_set_mod(priv->test_oa, 0, priv->smallest_pgsz_lg2);
733 	unsigned int bitnr;
734 
735 	if (!pt_can_have_leaf(pts))
736 		return;
737 	if (pts->index != 0)
738 		return;
739 
740 	KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
741 				 pt_iommu_set_prot(pts->range->common, &attrs,
742 						   IOMMU_READ));
743 
744 	KUNIT_ASSERT_TRUE(test, pt_install_table(pts, paddr, &attrs));
745 
746 	for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++)
747 		KUNIT_ASSERT_FALSE(test, pt_test_sw_bit_acquire(pts, bitnr));
748 
749 	for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++) {
750 		KUNIT_ASSERT_FALSE(test, pt_test_sw_bit_acquire(pts, bitnr));
751 		pt_set_sw_bit_release(pts, bitnr);
752 		KUNIT_ASSERT_TRUE(test, pt_test_sw_bit_acquire(pts, bitnr));
753 	}
754 
755 	for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++)
756 		KUNIT_ASSERT_TRUE(test, pt_test_sw_bit_acquire(pts, bitnr));
757 
758 	KUNIT_ASSERT_EQ(test, pt_table_pa(pts), paddr);
759 
760 	pt_clear_entries(pts, ilog2(1));
761 	KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
762 }
763 
test_sw_bit_table(struct kunit * test)764 static __maybe_unused void test_sw_bit_table(struct kunit *test)
765 {
766 	check_all_levels(test, test_lvl_sw_bit_table, NULL);
767 }
768 
769 static struct kunit_case generic_pt_test_cases[] = {
770 	KUNIT_CASE_FMT(test_init),
771 	KUNIT_CASE_FMT(test_bitops),
772 	KUNIT_CASE_FMT(test_best_pgsize),
773 	KUNIT_CASE_FMT(test_table_ptr),
774 	KUNIT_CASE_FMT(test_max_va),
775 	KUNIT_CASE_FMT(test_table_radix),
776 	KUNIT_CASE_FMT(test_entry_possible_sizes),
777 	KUNIT_CASE_FMT(test_entry_oa),
778 	KUNIT_CASE_FMT(test_attr_from_entry),
779 #ifdef pt_entry_is_write_dirty
780 	KUNIT_CASE_FMT(test_dirty),
781 #endif
782 #ifdef pt_sw_bit
783 	KUNIT_CASE_FMT(test_sw_bit_leaf),
784 	KUNIT_CASE_FMT(test_sw_bit_table),
785 #endif
786 	{},
787 };
788 
pt_kunit_generic_pt_init(struct kunit * test)789 static int pt_kunit_generic_pt_init(struct kunit *test)
790 {
791 	struct kunit_iommu_priv *priv;
792 	int ret;
793 
794 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
795 	if (!priv)
796 		return -ENOMEM;
797 	ret = pt_kunit_priv_init(test, priv);
798 	if (ret) {
799 		kunit_kfree(test, priv);
800 		return ret;
801 	}
802 	test->priv = priv;
803 	return 0;
804 }
805 
pt_kunit_generic_pt_exit(struct kunit * test)806 static void pt_kunit_generic_pt_exit(struct kunit *test)
807 {
808 	struct kunit_iommu_priv *priv = test->priv;
809 
810 	if (!test->priv)
811 		return;
812 
813 	pt_iommu_deinit(priv->iommu);
814 	kunit_kfree(test, test->priv);
815 }
816 
817 static struct kunit_suite NS(generic_pt_suite) = {
818 	.name = __stringify(NS(fmt_test)),
819 	.init = pt_kunit_generic_pt_init,
820 	.exit = pt_kunit_generic_pt_exit,
821 	.test_cases = generic_pt_test_cases,
822 };
823 kunit_test_suites(&NS(generic_pt_suite));
824