xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c (revision 429508c84d95811dd1300181dfe84743caff9a38)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2024 Google LLC.
4  */
5 #include <kunit/test.h>
6 #include <linux/io-pgtable.h>
7 
8 #include "arm-smmu-v3.h"
9 
10 struct arm_smmu_test_writer {
11 	struct arm_smmu_entry_writer writer;
12 	struct kunit *test;
13 	const __le64 *init_entry;
14 	const __le64 *target_entry;
15 	__le64 *entry;
16 
17 	bool invalid_entry_written;
18 	unsigned int num_syncs;
19 };
20 
21 #define NUM_ENTRY_QWORDS 8
22 #define NUM_EXPECTED_SYNCS(x) x
23 
24 static struct arm_smmu_ste bypass_ste;
25 static struct arm_smmu_ste abort_ste;
26 static struct arm_smmu_device smmu = {
27 	.features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR
28 };
29 static struct mm_struct sva_mm = {
30 	.pgd = (void *)0xdaedbeefdeadbeefULL,
31 };
32 
33 static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
34 						const __le64 *used_bits,
35 						const __le64 *target,
36 						unsigned int length)
37 {
38 	bool differs = false;
39 	unsigned int i;
40 
41 	for (i = 0; i < length; i++) {
42 		if ((entry[i] & used_bits[i]) != target[i])
43 			differs = true;
44 	}
45 	return differs;
46 }
47 
48 static void
49 arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
50 {
51 	struct arm_smmu_test_writer *test_writer =
52 		container_of(writer, struct arm_smmu_test_writer, writer);
53 	__le64 *entry_used_bits;
54 
55 	entry_used_bits = kunit_kzalloc(
56 		test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
57 		GFP_KERNEL);
58 	KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
59 
60 	pr_debug("STE value is now set to: ");
61 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8,
62 			     test_writer->entry,
63 			     NUM_ENTRY_QWORDS * sizeof(*test_writer->entry),
64 			     false);
65 
66 	test_writer->num_syncs += 1;
67 	if (!test_writer->entry[0]) {
68 		test_writer->invalid_entry_written = true;
69 	} else {
70 		/*
71 		 * At any stage in a hitless transition, the entry must be
72 		 * equivalent to either the initial entry or the target entry
73 		 * when only considering the bits used by the current
74 		 * configuration.
75 		 */
76 		writer->ops->get_used(test_writer->entry, entry_used_bits);
77 		KUNIT_EXPECT_FALSE(
78 			test_writer->test,
79 			arm_smmu_entry_differs_in_used_bits(
80 				test_writer->entry, entry_used_bits,
81 				test_writer->init_entry, NUM_ENTRY_QWORDS) &&
82 				arm_smmu_entry_differs_in_used_bits(
83 					test_writer->entry, entry_used_bits,
84 					test_writer->target_entry,
85 					NUM_ENTRY_QWORDS));
86 	}
87 }
88 
89 static void
90 arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
91 				       const __le64 *ste)
92 {
93 	__le64 used_bits[NUM_ENTRY_QWORDS] = {};
94 
95 	arm_smmu_get_ste_used(ste, used_bits);
96 	pr_debug("STE used bits: ");
97 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, used_bits,
98 			     sizeof(used_bits), false);
99 }
100 
101 static const struct arm_smmu_entry_writer_ops test_ste_ops = {
102 	.sync = arm_smmu_test_writer_record_syncs,
103 	.get_used = arm_smmu_get_ste_used,
104 };
105 
106 static const struct arm_smmu_entry_writer_ops test_cd_ops = {
107 	.sync = arm_smmu_test_writer_record_syncs,
108 	.get_used = arm_smmu_get_cd_used,
109 };
110 
111 static void arm_smmu_v3_test_ste_expect_transition(
112 	struct kunit *test, const struct arm_smmu_ste *cur,
113 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected,
114 	bool hitless)
115 {
116 	struct arm_smmu_ste cur_copy = *cur;
117 	struct arm_smmu_test_writer test_writer = {
118 		.writer = {
119 			.ops = &test_ste_ops,
120 		},
121 		.test = test,
122 		.init_entry = cur->data,
123 		.target_entry = target->data,
124 		.entry = cur_copy.data,
125 		.num_syncs = 0,
126 		.invalid_entry_written = false,
127 
128 	};
129 
130 	pr_debug("STE initial value: ");
131 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
132 			     sizeof(cur_copy), false);
133 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
134 	pr_debug("STE target value: ");
135 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
136 			     sizeof(cur_copy), false);
137 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
138 					       target->data);
139 
140 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
141 
142 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
143 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
144 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
145 }
146 
147 static void arm_smmu_v3_test_ste_expect_hitless_transition(
148 	struct kunit *test, const struct arm_smmu_ste *cur,
149 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
150 {
151 	arm_smmu_v3_test_ste_expect_transition(test, cur, target,
152 					       num_syncs_expected, true);
153 }
154 
155 static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
156 
157 static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
158 					   const dma_addr_t dma_addr)
159 {
160 	struct arm_smmu_master master = {
161 		.cd_table.cdtab_dma = dma_addr,
162 		.cd_table.s1cdmax = 0xFF,
163 		.cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
164 		.smmu = &smmu,
165 	};
166 
167 	arm_smmu_make_cdtable_ste(ste, &master);
168 }
169 
170 static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
171 {
172 	/*
173 	 * Bypass STEs has used bits in the first two Qwords, while abort STEs
174 	 * only have used bits in the first QWord. Transitioning from bypass to
175 	 * abort requires two syncs: the first to set the first qword and make
176 	 * the STE into an abort, the second to clean up the second qword.
177 	 */
178 	arm_smmu_v3_test_ste_expect_hitless_transition(
179 		test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2));
180 }
181 
182 static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test)
183 {
184 	/*
185 	 * Transitioning from abort to bypass also requires two syncs: the first
186 	 * to set the second qword data required by the bypass STE, and the
187 	 * second to set the first qword and switch to bypass.
188 	 */
189 	arm_smmu_v3_test_ste_expect_hitless_transition(
190 		test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2));
191 }
192 
193 static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
194 {
195 	struct arm_smmu_ste ste;
196 
197 	arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
198 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
199 						       NUM_EXPECTED_SYNCS(2));
200 }
201 
202 static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
203 {
204 	struct arm_smmu_ste ste;
205 
206 	arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
207 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
208 						       NUM_EXPECTED_SYNCS(2));
209 }
210 
211 static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
212 {
213 	struct arm_smmu_ste ste;
214 
215 	arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
216 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
217 						       NUM_EXPECTED_SYNCS(3));
218 }
219 
220 static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
221 {
222 	struct arm_smmu_ste ste;
223 
224 	arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
225 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
226 						       NUM_EXPECTED_SYNCS(3));
227 }
228 
229 static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
230 				      bool ats_enabled)
231 {
232 	struct arm_smmu_master master = {
233 		.smmu = &smmu,
234 		.ats_enabled = ats_enabled,
235 	};
236 	struct io_pgtable io_pgtable = {};
237 	struct arm_smmu_domain smmu_domain = {
238 		.pgtbl_ops = &io_pgtable.ops,
239 	};
240 
241 	io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL;
242 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1;
243 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2;
244 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3;
245 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1;
246 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2;
247 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
248 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
249 
250 	arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain);
251 }
252 
253 static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
254 {
255 	struct arm_smmu_ste ste;
256 
257 	arm_smmu_test_make_s2_ste(&ste, true);
258 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
259 						       NUM_EXPECTED_SYNCS(2));
260 }
261 
262 static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
263 {
264 	struct arm_smmu_ste ste;
265 
266 	arm_smmu_test_make_s2_ste(&ste, true);
267 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
268 						       NUM_EXPECTED_SYNCS(2));
269 }
270 
271 static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
272 {
273 	struct arm_smmu_ste ste;
274 
275 	arm_smmu_test_make_s2_ste(&ste, true);
276 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
277 						       NUM_EXPECTED_SYNCS(2));
278 }
279 
280 static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
281 {
282 	struct arm_smmu_ste ste;
283 
284 	arm_smmu_test_make_s2_ste(&ste, true);
285 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
286 						       NUM_EXPECTED_SYNCS(2));
287 }
288 
289 static void arm_smmu_v3_test_cd_expect_transition(
290 	struct kunit *test, const struct arm_smmu_cd *cur,
291 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
292 	bool hitless)
293 {
294 	struct arm_smmu_cd cur_copy = *cur;
295 	struct arm_smmu_test_writer test_writer = {
296 		.writer = {
297 			.ops = &test_cd_ops,
298 		},
299 		.test = test,
300 		.init_entry = cur->data,
301 		.target_entry = target->data,
302 		.entry = cur_copy.data,
303 		.num_syncs = 0,
304 		.invalid_entry_written = false,
305 
306 	};
307 
308 	pr_debug("CD initial value: ");
309 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
310 			     sizeof(cur_copy), false);
311 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
312 	pr_debug("CD target value: ");
313 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
314 			     sizeof(cur_copy), false);
315 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
316 					       target->data);
317 
318 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
319 
320 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
321 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
322 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
323 }
324 
325 static void arm_smmu_v3_test_cd_expect_non_hitless_transition(
326 	struct kunit *test, const struct arm_smmu_cd *cur,
327 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
328 {
329 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
330 					      num_syncs_expected, false);
331 }
332 
333 static void arm_smmu_v3_test_cd_expect_hitless_transition(
334 	struct kunit *test, const struct arm_smmu_cd *cur,
335 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
336 {
337 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
338 					      num_syncs_expected, true);
339 }
340 
341 static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid)
342 {
343 	struct arm_smmu_master master = {
344 		.smmu = &smmu,
345 	};
346 	struct io_pgtable io_pgtable = {};
347 	struct arm_smmu_domain smmu_domain = {
348 		.pgtbl_ops = &io_pgtable.ops,
349 		.cd = {
350 			.asid = asid,
351 		},
352 	};
353 
354 	io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL;
355 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1;
356 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2;
357 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3;
358 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1;
359 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2;
360 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4;
361 	io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL;
362 
363 	arm_smmu_make_s1_cd(cd, &master, &smmu_domain);
364 }
365 
366 static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test)
367 {
368 	struct arm_smmu_cd cd = {};
369 	struct arm_smmu_cd cd_2;
370 
371 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
372 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
373 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
374 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
375 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
376 }
377 
378 static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test)
379 {
380 	struct arm_smmu_cd cd = {};
381 	struct arm_smmu_cd cd_2;
382 
383 	arm_smmu_test_make_s1_cd(&cd, 778);
384 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
385 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
386 						      NUM_EXPECTED_SYNCS(1));
387 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
388 						      NUM_EXPECTED_SYNCS(1));
389 }
390 
391 static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid)
392 {
393 	struct arm_smmu_master master = {
394 		.smmu = &smmu,
395 	};
396 
397 	arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid);
398 }
399 
400 static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
401 					      unsigned int asid)
402 {
403 	struct arm_smmu_master master = {
404 		.smmu = &smmu,
405 	};
406 
407 	arm_smmu_make_sva_cd(cd, &master, NULL, asid);
408 }
409 
410 static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
411 {
412 	struct arm_smmu_cd cd = {};
413 	struct arm_smmu_cd cd_2;
414 
415 	arm_smmu_test_make_sva_cd(&cd_2, 1997);
416 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
417 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
418 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
419 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
420 }
421 
422 static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test)
423 {
424 	struct arm_smmu_cd cd;
425 	struct arm_smmu_cd cd_2;
426 
427 	arm_smmu_test_make_sva_cd(&cd, 1997);
428 	arm_smmu_test_make_sva_release_cd(&cd_2, 1997);
429 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
430 						      NUM_EXPECTED_SYNCS(2));
431 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
432 						      NUM_EXPECTED_SYNCS(2));
433 }
434 
435 static struct kunit_case arm_smmu_v3_test_cases[] = {
436 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort),
437 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass),
438 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort),
439 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
440 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
441 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
442 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
443 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
444 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
445 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
446 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
447 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
448 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
449 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
450 	{},
451 };
452 
453 static int arm_smmu_v3_test_suite_init(struct kunit_suite *test)
454 {
455 	arm_smmu_make_bypass_ste(&smmu, &bypass_ste);
456 	arm_smmu_make_abort_ste(&abort_ste);
457 	return 0;
458 }
459 
460 static struct kunit_suite arm_smmu_v3_test_module = {
461 	.name = "arm-smmu-v3-kunit-test",
462 	.suite_init = arm_smmu_v3_test_suite_init,
463 	.test_cases = arm_smmu_v3_test_cases,
464 };
465 kunit_test_suites(&arm_smmu_v3_test_module);
466 
467 MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
468 MODULE_LICENSE("GPL v2");
469