xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c (revision d40981350844c2cfa437abfc80596e10ea8f1149)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2024 Google LLC.
4  */
5 #include <kunit/test.h>
6 #include <linux/io-pgtable.h>
7 
8 #include "arm-smmu-v3.h"
9 
10 struct arm_smmu_test_writer {
11 	struct arm_smmu_entry_writer writer;
12 	struct kunit *test;
13 	const __le64 *init_entry;
14 	const __le64 *target_entry;
15 	__le64 *entry;
16 
17 	bool invalid_entry_written;
18 	unsigned int num_syncs;
19 };
20 
21 #define NUM_ENTRY_QWORDS 8
22 #define NUM_EXPECTED_SYNCS(x) x
23 
24 static struct arm_smmu_ste bypass_ste;
25 static struct arm_smmu_ste abort_ste;
26 static struct arm_smmu_device smmu = {
27 	.features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR
28 };
29 static struct mm_struct sva_mm = {
30 	.pgd = (void *)0xdaedbeefdeadbeefULL,
31 };
32 
33 static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
34 						const __le64 *used_bits,
35 						const __le64 *target,
36 						unsigned int length)
37 {
38 	bool differs = false;
39 	unsigned int i;
40 
41 	for (i = 0; i < length; i++) {
42 		if ((entry[i] & used_bits[i]) != target[i])
43 			differs = true;
44 	}
45 	return differs;
46 }
47 
48 static void
49 arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
50 {
51 	struct arm_smmu_test_writer *test_writer =
52 		container_of(writer, struct arm_smmu_test_writer, writer);
53 	__le64 *entry_used_bits;
54 
55 	entry_used_bits = kunit_kzalloc(
56 		test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
57 		GFP_KERNEL);
58 	KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
59 
60 	pr_debug("STE value is now set to: ");
61 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8,
62 			     test_writer->entry,
63 			     NUM_ENTRY_QWORDS * sizeof(*test_writer->entry),
64 			     false);
65 
66 	test_writer->num_syncs += 1;
67 	if (!test_writer->entry[0]) {
68 		test_writer->invalid_entry_written = true;
69 	} else {
70 		/*
71 		 * At any stage in a hitless transition, the entry must be
72 		 * equivalent to either the initial entry or the target entry
73 		 * when only considering the bits used by the current
74 		 * configuration.
75 		 */
76 		writer->ops->get_used(test_writer->entry, entry_used_bits);
77 		KUNIT_EXPECT_FALSE(
78 			test_writer->test,
79 			arm_smmu_entry_differs_in_used_bits(
80 				test_writer->entry, entry_used_bits,
81 				test_writer->init_entry, NUM_ENTRY_QWORDS) &&
82 				arm_smmu_entry_differs_in_used_bits(
83 					test_writer->entry, entry_used_bits,
84 					test_writer->target_entry,
85 					NUM_ENTRY_QWORDS));
86 	}
87 }
88 
89 static void
90 arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
91 				       const __le64 *ste)
92 {
93 	__le64 used_bits[NUM_ENTRY_QWORDS] = {};
94 
95 	arm_smmu_get_ste_used(ste, used_bits);
96 	pr_debug("STE used bits: ");
97 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, used_bits,
98 			     sizeof(used_bits), false);
99 }
100 
101 static const struct arm_smmu_entry_writer_ops test_ste_ops = {
102 	.sync = arm_smmu_test_writer_record_syncs,
103 	.get_used = arm_smmu_get_ste_used,
104 };
105 
106 static const struct arm_smmu_entry_writer_ops test_cd_ops = {
107 	.sync = arm_smmu_test_writer_record_syncs,
108 	.get_used = arm_smmu_get_cd_used,
109 };
110 
111 static void arm_smmu_v3_test_ste_expect_transition(
112 	struct kunit *test, const struct arm_smmu_ste *cur,
113 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected,
114 	bool hitless)
115 {
116 	struct arm_smmu_ste cur_copy = *cur;
117 	struct arm_smmu_test_writer test_writer = {
118 		.writer = {
119 			.ops = &test_ste_ops,
120 		},
121 		.test = test,
122 		.init_entry = cur->data,
123 		.target_entry = target->data,
124 		.entry = cur_copy.data,
125 		.num_syncs = 0,
126 		.invalid_entry_written = false,
127 
128 	};
129 
130 	pr_debug("STE initial value: ");
131 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
132 			     sizeof(cur_copy), false);
133 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
134 	pr_debug("STE target value: ");
135 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
136 			     sizeof(cur_copy), false);
137 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
138 					       target->data);
139 
140 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
141 
142 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
143 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
144 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
145 }
146 
147 static void arm_smmu_v3_test_ste_expect_non_hitless_transition(
148 	struct kunit *test, const struct arm_smmu_ste *cur,
149 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
150 {
151 	arm_smmu_v3_test_ste_expect_transition(test, cur, target,
152 					       num_syncs_expected, false);
153 }
154 
155 static void arm_smmu_v3_test_ste_expect_hitless_transition(
156 	struct kunit *test, const struct arm_smmu_ste *cur,
157 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
158 {
159 	arm_smmu_v3_test_ste_expect_transition(test, cur, target,
160 					       num_syncs_expected, true);
161 }
162 
163 static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
164 
165 static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
166 					   unsigned int s1dss,
167 					   const dma_addr_t dma_addr)
168 {
169 	struct arm_smmu_master master = {
170 		.cd_table.cdtab_dma = dma_addr,
171 		.cd_table.s1cdmax = 0xFF,
172 		.cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
173 		.smmu = &smmu,
174 	};
175 
176 	arm_smmu_make_cdtable_ste(ste, &master, true, s1dss);
177 }
178 
179 static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
180 {
181 	/*
182 	 * Bypass STEs has used bits in the first two Qwords, while abort STEs
183 	 * only have used bits in the first QWord. Transitioning from bypass to
184 	 * abort requires two syncs: the first to set the first qword and make
185 	 * the STE into an abort, the second to clean up the second qword.
186 	 */
187 	arm_smmu_v3_test_ste_expect_hitless_transition(
188 		test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2));
189 }
190 
191 static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test)
192 {
193 	/*
194 	 * Transitioning from abort to bypass also requires two syncs: the first
195 	 * to set the second qword data required by the bypass STE, and the
196 	 * second to set the first qword and switch to bypass.
197 	 */
198 	arm_smmu_v3_test_ste_expect_hitless_transition(
199 		test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2));
200 }
201 
202 static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
203 {
204 	struct arm_smmu_ste ste;
205 
206 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
207 				       fake_cdtab_dma_addr);
208 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
209 						       NUM_EXPECTED_SYNCS(2));
210 }
211 
212 static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
213 {
214 	struct arm_smmu_ste ste;
215 
216 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
217 				       fake_cdtab_dma_addr);
218 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
219 						       NUM_EXPECTED_SYNCS(2));
220 }
221 
222 static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
223 {
224 	struct arm_smmu_ste ste;
225 
226 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
227 				       fake_cdtab_dma_addr);
228 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
229 						       NUM_EXPECTED_SYNCS(3));
230 }
231 
232 static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
233 {
234 	struct arm_smmu_ste ste;
235 
236 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
237 				       fake_cdtab_dma_addr);
238 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
239 						       NUM_EXPECTED_SYNCS(3));
240 }
241 
242 static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test)
243 {
244 	struct arm_smmu_ste ste;
245 	struct arm_smmu_ste s1dss_bypass;
246 
247 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
248 				       fake_cdtab_dma_addr);
249 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
250 				       fake_cdtab_dma_addr);
251 
252 	/*
253 	 * Flipping s1dss on a CD table STE only involves changes to the second
254 	 * qword of an STE and can be done in a single write.
255 	 */
256 	arm_smmu_v3_test_ste_expect_hitless_transition(
257 		test, &ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(1));
258 	arm_smmu_v3_test_ste_expect_hitless_transition(
259 		test, &s1dss_bypass, &ste, NUM_EXPECTED_SYNCS(1));
260 }
261 
262 static void
263 arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test)
264 {
265 	struct arm_smmu_ste s1dss_bypass;
266 
267 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
268 				       fake_cdtab_dma_addr);
269 	arm_smmu_v3_test_ste_expect_hitless_transition(
270 		test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2));
271 }
272 
273 static void
274 arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test)
275 {
276 	struct arm_smmu_ste s1dss_bypass;
277 
278 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
279 				       fake_cdtab_dma_addr);
280 	arm_smmu_v3_test_ste_expect_hitless_transition(
281 		test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2));
282 }
283 
284 static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
285 				      bool ats_enabled)
286 {
287 	struct arm_smmu_master master = {
288 		.smmu = &smmu,
289 	};
290 	struct io_pgtable io_pgtable = {};
291 	struct arm_smmu_domain smmu_domain = {
292 		.pgtbl_ops = &io_pgtable.ops,
293 	};
294 
295 	io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL;
296 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1;
297 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2;
298 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3;
299 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1;
300 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2;
301 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
302 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
303 
304 	arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain, ats_enabled);
305 }
306 
307 static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
308 {
309 	struct arm_smmu_ste ste;
310 
311 	arm_smmu_test_make_s2_ste(&ste, true);
312 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
313 						       NUM_EXPECTED_SYNCS(2));
314 }
315 
316 static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
317 {
318 	struct arm_smmu_ste ste;
319 
320 	arm_smmu_test_make_s2_ste(&ste, true);
321 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
322 						       NUM_EXPECTED_SYNCS(2));
323 }
324 
325 static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
326 {
327 	struct arm_smmu_ste ste;
328 
329 	arm_smmu_test_make_s2_ste(&ste, true);
330 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
331 						       NUM_EXPECTED_SYNCS(2));
332 }
333 
334 static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
335 {
336 	struct arm_smmu_ste ste;
337 
338 	arm_smmu_test_make_s2_ste(&ste, true);
339 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
340 						       NUM_EXPECTED_SYNCS(2));
341 }
342 
343 static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test)
344 {
345 	struct arm_smmu_ste s1_ste;
346 	struct arm_smmu_ste s2_ste;
347 
348 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
349 				       fake_cdtab_dma_addr);
350 	arm_smmu_test_make_s2_ste(&s2_ste, true);
351 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
352 						       NUM_EXPECTED_SYNCS(3));
353 }
354 
355 static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test)
356 {
357 	struct arm_smmu_ste s1_ste;
358 	struct arm_smmu_ste s2_ste;
359 
360 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
361 				       fake_cdtab_dma_addr);
362 	arm_smmu_test_make_s2_ste(&s2_ste, true);
363 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
364 						       NUM_EXPECTED_SYNCS(3));
365 }
366 
367 static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test)
368 {
369 	struct arm_smmu_ste ste;
370 	struct arm_smmu_ste ste_2;
371 
372 	/*
373 	 * Although no flow resembles this in practice, one way to force an STE
374 	 * update to be non-hitless is to change its CD table pointer as well as
375 	 * s1 dss field in the same update.
376 	 */
377 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
378 				       fake_cdtab_dma_addr);
379 	arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS,
380 				       0x4B4B4b4B4B);
381 	arm_smmu_v3_test_ste_expect_non_hitless_transition(
382 		test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3));
383 }
384 
385 static void arm_smmu_v3_test_cd_expect_transition(
386 	struct kunit *test, const struct arm_smmu_cd *cur,
387 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
388 	bool hitless)
389 {
390 	struct arm_smmu_cd cur_copy = *cur;
391 	struct arm_smmu_test_writer test_writer = {
392 		.writer = {
393 			.ops = &test_cd_ops,
394 		},
395 		.test = test,
396 		.init_entry = cur->data,
397 		.target_entry = target->data,
398 		.entry = cur_copy.data,
399 		.num_syncs = 0,
400 		.invalid_entry_written = false,
401 
402 	};
403 
404 	pr_debug("CD initial value: ");
405 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
406 			     sizeof(cur_copy), false);
407 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
408 	pr_debug("CD target value: ");
409 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
410 			     sizeof(cur_copy), false);
411 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
412 					       target->data);
413 
414 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
415 
416 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
417 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
418 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
419 }
420 
421 static void arm_smmu_v3_test_cd_expect_non_hitless_transition(
422 	struct kunit *test, const struct arm_smmu_cd *cur,
423 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
424 {
425 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
426 					      num_syncs_expected, false);
427 }
428 
429 static void arm_smmu_v3_test_cd_expect_hitless_transition(
430 	struct kunit *test, const struct arm_smmu_cd *cur,
431 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
432 {
433 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
434 					      num_syncs_expected, true);
435 }
436 
437 static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid)
438 {
439 	struct arm_smmu_master master = {
440 		.smmu = &smmu,
441 	};
442 	struct io_pgtable io_pgtable = {};
443 	struct arm_smmu_domain smmu_domain = {
444 		.pgtbl_ops = &io_pgtable.ops,
445 		.cd = {
446 			.asid = asid,
447 		},
448 	};
449 
450 	io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL;
451 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1;
452 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2;
453 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3;
454 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1;
455 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2;
456 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4;
457 	io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL;
458 
459 	arm_smmu_make_s1_cd(cd, &master, &smmu_domain);
460 }
461 
462 static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test)
463 {
464 	struct arm_smmu_cd cd = {};
465 	struct arm_smmu_cd cd_2;
466 
467 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
468 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
469 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
470 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
471 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
472 }
473 
474 static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test)
475 {
476 	struct arm_smmu_cd cd = {};
477 	struct arm_smmu_cd cd_2;
478 
479 	arm_smmu_test_make_s1_cd(&cd, 778);
480 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
481 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
482 						      NUM_EXPECTED_SYNCS(1));
483 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
484 						      NUM_EXPECTED_SYNCS(1));
485 }
486 
487 static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid)
488 {
489 	struct arm_smmu_master master = {
490 		.smmu = &smmu,
491 	};
492 
493 	arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid);
494 }
495 
496 static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
497 					      unsigned int asid)
498 {
499 	struct arm_smmu_master master = {
500 		.smmu = &smmu,
501 	};
502 
503 	arm_smmu_make_sva_cd(cd, &master, NULL, asid);
504 }
505 
506 static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
507 {
508 	struct arm_smmu_cd cd = {};
509 	struct arm_smmu_cd cd_2;
510 
511 	arm_smmu_test_make_sva_cd(&cd_2, 1997);
512 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
513 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
514 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
515 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
516 }
517 
518 static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test)
519 {
520 	struct arm_smmu_cd cd;
521 	struct arm_smmu_cd cd_2;
522 
523 	arm_smmu_test_make_sva_cd(&cd, 1997);
524 	arm_smmu_test_make_sva_release_cd(&cd_2, 1997);
525 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
526 						      NUM_EXPECTED_SYNCS(2));
527 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
528 						      NUM_EXPECTED_SYNCS(2));
529 }
530 
531 static struct kunit_case arm_smmu_v3_test_cases[] = {
532 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort),
533 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass),
534 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort),
535 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
536 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
537 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
538 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_s1dss_change),
539 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass),
540 	KUNIT_CASE(arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass),
541 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
542 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
543 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
544 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
545 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2),
546 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1),
547 	KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless),
548 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
549 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
550 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
551 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
552 	{},
553 };
554 
555 static int arm_smmu_v3_test_suite_init(struct kunit_suite *test)
556 {
557 	arm_smmu_make_bypass_ste(&smmu, &bypass_ste);
558 	arm_smmu_make_abort_ste(&abort_ste);
559 	return 0;
560 }
561 
562 static struct kunit_suite arm_smmu_v3_test_module = {
563 	.name = "arm-smmu-v3-kunit-test",
564 	.suite_init = arm_smmu_v3_test_suite_init,
565 	.test_cases = arm_smmu_v3_test_cases,
566 };
567 kunit_test_suites(&arm_smmu_v3_test_module);
568 
569 MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
570 MODULE_DESCRIPTION("KUnit tests for arm-smmu-v3 driver");
571 MODULE_LICENSE("GPL v2");
572