xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2024 Google LLC.
4  */
5 #include <kunit/test.h>
6 #include <linux/io-pgtable.h>
7 
8 #include "arm-smmu-v3.h"
9 
10 struct arm_smmu_test_writer {
11 	struct arm_smmu_entry_writer writer;
12 	struct kunit *test;
13 	const __le64 *init_entry;
14 	const __le64 *target_entry;
15 	__le64 *entry;
16 
17 	bool invalid_entry_written;
18 	unsigned int num_syncs;
19 };
20 
21 #define NUM_ENTRY_QWORDS 8
22 #define NUM_EXPECTED_SYNCS(x) x
23 
24 static struct arm_smmu_ste bypass_ste;
25 static struct arm_smmu_ste abort_ste;
26 static struct arm_smmu_device smmu = {
27 	.features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR
28 };
29 static struct mm_struct sva_mm = {
30 	.pgd = (void *)0xdaedbeefdeadbeefULL,
31 };
32 
33 enum arm_smmu_test_master_feat {
34 	ARM_SMMU_MASTER_TEST_ATS = BIT(0),
35 	ARM_SMMU_MASTER_TEST_STALL = BIT(1),
36 };
37 
arm_smmu_entry_differs_in_used_bits(const __le64 * entry,const __le64 * used_bits,const __le64 * target,unsigned int length)38 static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
39 						const __le64 *used_bits,
40 						const __le64 *target,
41 						unsigned int length)
42 {
43 	bool differs = false;
44 	unsigned int i;
45 
46 	for (i = 0; i < length; i++) {
47 		if ((entry[i] & used_bits[i]) != target[i])
48 			differs = true;
49 	}
50 	return differs;
51 }
52 
53 static void
arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer * writer)54 arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
55 {
56 	struct arm_smmu_test_writer *test_writer =
57 		container_of(writer, struct arm_smmu_test_writer, writer);
58 	__le64 *entry_used_bits;
59 
60 	entry_used_bits = kunit_kzalloc(
61 		test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
62 		GFP_KERNEL);
63 	KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
64 
65 	pr_debug("STE value is now set to: ");
66 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8,
67 			     test_writer->entry,
68 			     NUM_ENTRY_QWORDS * sizeof(*test_writer->entry),
69 			     false);
70 
71 	test_writer->num_syncs += 1;
72 	if (!test_writer->entry[0]) {
73 		test_writer->invalid_entry_written = true;
74 	} else {
75 		/*
76 		 * At any stage in a hitless transition, the entry must be
77 		 * equivalent to either the initial entry or the target entry
78 		 * when only considering the bits used by the current
79 		 * configuration.
80 		 */
81 		writer->ops->get_used(test_writer->entry, entry_used_bits);
82 		KUNIT_EXPECT_FALSE(
83 			test_writer->test,
84 			arm_smmu_entry_differs_in_used_bits(
85 				test_writer->entry, entry_used_bits,
86 				test_writer->init_entry, NUM_ENTRY_QWORDS) &&
87 				arm_smmu_entry_differs_in_used_bits(
88 					test_writer->entry, entry_used_bits,
89 					test_writer->target_entry,
90 					NUM_ENTRY_QWORDS));
91 	}
92 }
93 
94 static void
arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer * writer,const __le64 * ste)95 arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
96 				       const __le64 *ste)
97 {
98 	__le64 used_bits[NUM_ENTRY_QWORDS] = {};
99 
100 	arm_smmu_get_ste_used(ste, used_bits);
101 	pr_debug("STE used bits: ");
102 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, used_bits,
103 			     sizeof(used_bits), false);
104 }
105 
106 static const struct arm_smmu_entry_writer_ops test_ste_ops = {
107 	.sync = arm_smmu_test_writer_record_syncs,
108 	.get_used = arm_smmu_get_ste_used,
109 };
110 
111 static const struct arm_smmu_entry_writer_ops test_cd_ops = {
112 	.sync = arm_smmu_test_writer_record_syncs,
113 	.get_used = arm_smmu_get_cd_used,
114 };
115 
arm_smmu_v3_test_ste_expect_transition(struct kunit * test,const struct arm_smmu_ste * cur,const struct arm_smmu_ste * target,unsigned int num_syncs_expected,bool hitless)116 static void arm_smmu_v3_test_ste_expect_transition(
117 	struct kunit *test, const struct arm_smmu_ste *cur,
118 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected,
119 	bool hitless)
120 {
121 	struct arm_smmu_ste cur_copy = *cur;
122 	struct arm_smmu_test_writer test_writer = {
123 		.writer = {
124 			.ops = &test_ste_ops,
125 		},
126 		.test = test,
127 		.init_entry = cur->data,
128 		.target_entry = target->data,
129 		.entry = cur_copy.data,
130 		.num_syncs = 0,
131 		.invalid_entry_written = false,
132 
133 	};
134 
135 	pr_debug("STE initial value: ");
136 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
137 			     sizeof(cur_copy), false);
138 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
139 	pr_debug("STE target value: ");
140 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
141 			     sizeof(cur_copy), false);
142 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
143 					       target->data);
144 
145 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
146 
147 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
148 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
149 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
150 }
151 
arm_smmu_v3_test_ste_expect_non_hitless_transition(struct kunit * test,const struct arm_smmu_ste * cur,const struct arm_smmu_ste * target,unsigned int num_syncs_expected)152 static void arm_smmu_v3_test_ste_expect_non_hitless_transition(
153 	struct kunit *test, const struct arm_smmu_ste *cur,
154 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
155 {
156 	arm_smmu_v3_test_ste_expect_transition(test, cur, target,
157 					       num_syncs_expected, false);
158 }
159 
arm_smmu_v3_test_ste_expect_hitless_transition(struct kunit * test,const struct arm_smmu_ste * cur,const struct arm_smmu_ste * target,unsigned int num_syncs_expected)160 static void arm_smmu_v3_test_ste_expect_hitless_transition(
161 	struct kunit *test, const struct arm_smmu_ste *cur,
162 	const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
163 {
164 	arm_smmu_v3_test_ste_expect_transition(test, cur, target,
165 					       num_syncs_expected, true);
166 }
167 
168 static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
169 
arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste * ste,unsigned int s1dss,const dma_addr_t dma_addr,enum arm_smmu_test_master_feat feat)170 static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
171 					   unsigned int s1dss,
172 					   const dma_addr_t dma_addr,
173 					   enum arm_smmu_test_master_feat feat)
174 {
175 	bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS;
176 	bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL;
177 
178 	struct arm_smmu_master master = {
179 		.ats_enabled = ats_enabled,
180 		.cd_table.cdtab_dma = dma_addr,
181 		.cd_table.s1cdmax = 0xFF,
182 		.cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
183 		.smmu = &smmu,
184 		.stall_enabled = stall_enabled,
185 	};
186 
187 	arm_smmu_make_cdtable_ste(ste, &master, ats_enabled, s1dss);
188 }
189 
arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit * test)190 static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
191 {
192 	/*
193 	 * Bypass STEs has used bits in the first two Qwords, while abort STEs
194 	 * only have used bits in the first QWord. Transitioning from bypass to
195 	 * abort requires two syncs: the first to set the first qword and make
196 	 * the STE into an abort, the second to clean up the second qword.
197 	 */
198 	arm_smmu_v3_test_ste_expect_hitless_transition(
199 		test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2));
200 }
201 
arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit * test)202 static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test)
203 {
204 	/*
205 	 * Transitioning from abort to bypass also requires two syncs: the first
206 	 * to set the second qword data required by the bypass STE, and the
207 	 * second to set the first qword and switch to bypass.
208 	 */
209 	arm_smmu_v3_test_ste_expect_hitless_transition(
210 		test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2));
211 }
212 
arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit * test)213 static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
214 {
215 	struct arm_smmu_ste ste;
216 
217 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
218 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
219 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
220 						       NUM_EXPECTED_SYNCS(2));
221 }
222 
arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit * test)223 static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
224 {
225 	struct arm_smmu_ste ste;
226 
227 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
228 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
229 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
230 						       NUM_EXPECTED_SYNCS(2));
231 }
232 
arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit * test)233 static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
234 {
235 	struct arm_smmu_ste ste;
236 
237 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
238 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
239 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
240 						       NUM_EXPECTED_SYNCS(3));
241 }
242 
arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit * test)243 static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
244 {
245 	struct arm_smmu_ste ste;
246 
247 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
248 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
249 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
250 						       NUM_EXPECTED_SYNCS(3));
251 }
252 
arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit * test)253 static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test)
254 {
255 	struct arm_smmu_ste ste;
256 	struct arm_smmu_ste s1dss_bypass;
257 
258 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
259 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
260 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
261 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
262 
263 	/*
264 	 * Flipping s1dss on a CD table STE only involves changes to the second
265 	 * qword of an STE and can be done in a single write.
266 	 */
267 	arm_smmu_v3_test_ste_expect_hitless_transition(
268 		test, &ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(1));
269 	arm_smmu_v3_test_ste_expect_hitless_transition(
270 		test, &s1dss_bypass, &ste, NUM_EXPECTED_SYNCS(1));
271 }
272 
273 static void
arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit * test)274 arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test)
275 {
276 	struct arm_smmu_ste s1dss_bypass;
277 
278 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
279 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
280 	arm_smmu_v3_test_ste_expect_hitless_transition(
281 		test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2));
282 }
283 
284 static void
arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit * test)285 arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test)
286 {
287 	struct arm_smmu_ste s1dss_bypass;
288 
289 	arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
290 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
291 	arm_smmu_v3_test_ste_expect_hitless_transition(
292 		test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2));
293 }
294 
arm_smmu_test_make_s2_ste(struct arm_smmu_ste * ste,enum arm_smmu_test_master_feat feat)295 static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
296 				      enum arm_smmu_test_master_feat feat)
297 {
298 	bool ats_enabled = feat & ARM_SMMU_MASTER_TEST_ATS;
299 	bool stall_enabled = feat & ARM_SMMU_MASTER_TEST_STALL;
300 	struct arm_smmu_master master = {
301 		.ats_enabled = ats_enabled,
302 		.smmu = &smmu,
303 		.stall_enabled = stall_enabled,
304 	};
305 	struct io_pgtable io_pgtable = {};
306 	struct arm_smmu_domain smmu_domain = {
307 		.pgtbl_ops = &io_pgtable.ops,
308 	};
309 
310 	io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL;
311 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1;
312 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2;
313 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3;
314 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1;
315 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2;
316 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
317 	io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
318 
319 	arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain, ats_enabled);
320 }
321 
arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit * test)322 static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
323 {
324 	struct arm_smmu_ste ste;
325 
326 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
327 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
328 						       NUM_EXPECTED_SYNCS(2));
329 }
330 
arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit * test)331 static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
332 {
333 	struct arm_smmu_ste ste;
334 
335 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
336 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
337 						       NUM_EXPECTED_SYNCS(2));
338 }
339 
arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit * test)340 static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
341 {
342 	struct arm_smmu_ste ste;
343 
344 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
345 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
346 						       NUM_EXPECTED_SYNCS(2));
347 }
348 
arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit * test)349 static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
350 {
351 	struct arm_smmu_ste ste;
352 
353 	arm_smmu_test_make_s2_ste(&ste, ARM_SMMU_MASTER_TEST_ATS);
354 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
355 						       NUM_EXPECTED_SYNCS(2));
356 }
357 
arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit * test)358 static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test)
359 {
360 	struct arm_smmu_ste s1_ste;
361 	struct arm_smmu_ste s2_ste;
362 
363 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
364 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
365 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS);
366 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
367 						       NUM_EXPECTED_SYNCS(3));
368 }
369 
arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit * test)370 static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test)
371 {
372 	struct arm_smmu_ste s1_ste;
373 	struct arm_smmu_ste s2_ste;
374 
375 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
376 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
377 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_ATS);
378 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
379 						       NUM_EXPECTED_SYNCS(3));
380 }
381 
arm_smmu_v3_write_ste_test_non_hitless(struct kunit * test)382 static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test)
383 {
384 	struct arm_smmu_ste ste;
385 	struct arm_smmu_ste ste_2;
386 
387 	/*
388 	 * Although no flow resembles this in practice, one way to force an STE
389 	 * update to be non-hitless is to change its CD table pointer as well as
390 	 * s1 dss field in the same update.
391 	 */
392 	arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
393 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_ATS);
394 	arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS,
395 				       0x4B4B4b4B4B, ARM_SMMU_MASTER_TEST_ATS);
396 	arm_smmu_v3_test_ste_expect_non_hitless_transition(
397 		test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3));
398 }
399 
arm_smmu_v3_test_cd_expect_transition(struct kunit * test,const struct arm_smmu_cd * cur,const struct arm_smmu_cd * target,unsigned int num_syncs_expected,bool hitless)400 static void arm_smmu_v3_test_cd_expect_transition(
401 	struct kunit *test, const struct arm_smmu_cd *cur,
402 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
403 	bool hitless)
404 {
405 	struct arm_smmu_cd cur_copy = *cur;
406 	struct arm_smmu_test_writer test_writer = {
407 		.writer = {
408 			.ops = &test_cd_ops,
409 		},
410 		.test = test,
411 		.init_entry = cur->data,
412 		.target_entry = target->data,
413 		.entry = cur_copy.data,
414 		.num_syncs = 0,
415 		.invalid_entry_written = false,
416 
417 	};
418 
419 	pr_debug("CD initial value: ");
420 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
421 			     sizeof(cur_copy), false);
422 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
423 	pr_debug("CD target value: ");
424 	print_hex_dump_debug("    ", DUMP_PREFIX_NONE, 16, 8, target->data,
425 			     sizeof(cur_copy), false);
426 	arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
427 					       target->data);
428 
429 	arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
430 
431 	KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
432 	KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
433 	KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
434 }
435 
arm_smmu_v3_test_cd_expect_non_hitless_transition(struct kunit * test,const struct arm_smmu_cd * cur,const struct arm_smmu_cd * target,unsigned int num_syncs_expected)436 static void arm_smmu_v3_test_cd_expect_non_hitless_transition(
437 	struct kunit *test, const struct arm_smmu_cd *cur,
438 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
439 {
440 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
441 					      num_syncs_expected, false);
442 }
443 
arm_smmu_v3_test_cd_expect_hitless_transition(struct kunit * test,const struct arm_smmu_cd * cur,const struct arm_smmu_cd * target,unsigned int num_syncs_expected)444 static void arm_smmu_v3_test_cd_expect_hitless_transition(
445 	struct kunit *test, const struct arm_smmu_cd *cur,
446 	const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
447 {
448 	arm_smmu_v3_test_cd_expect_transition(test, cur, target,
449 					      num_syncs_expected, true);
450 }
451 
arm_smmu_test_make_s1_cd(struct arm_smmu_cd * cd,unsigned int asid)452 static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid)
453 {
454 	struct arm_smmu_master master = {
455 		.smmu = &smmu,
456 	};
457 	struct io_pgtable io_pgtable = {};
458 	struct arm_smmu_domain smmu_domain = {
459 		.pgtbl_ops = &io_pgtable.ops,
460 		.cd = {
461 			.asid = asid,
462 		},
463 	};
464 
465 	io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL;
466 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1;
467 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2;
468 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3;
469 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1;
470 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2;
471 	io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4;
472 	io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL;
473 
474 	arm_smmu_make_s1_cd(cd, &master, &smmu_domain);
475 }
476 
arm_smmu_v3_write_cd_test_s1_clear(struct kunit * test)477 static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test)
478 {
479 	struct arm_smmu_cd cd = {};
480 	struct arm_smmu_cd cd_2;
481 
482 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
483 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
484 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
485 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
486 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
487 }
488 
arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit * test)489 static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test)
490 {
491 	struct arm_smmu_cd cd = {};
492 	struct arm_smmu_cd cd_2;
493 
494 	arm_smmu_test_make_s1_cd(&cd, 778);
495 	arm_smmu_test_make_s1_cd(&cd_2, 1997);
496 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
497 						      NUM_EXPECTED_SYNCS(1));
498 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
499 						      NUM_EXPECTED_SYNCS(1));
500 }
501 
arm_smmu_test_make_sva_cd(struct arm_smmu_cd * cd,unsigned int asid)502 static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid)
503 {
504 	struct arm_smmu_master master = {
505 		.smmu = &smmu,
506 	};
507 
508 	arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid);
509 }
510 
arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd * cd,unsigned int asid)511 static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
512 					      unsigned int asid)
513 {
514 	struct arm_smmu_master master = {
515 		.smmu = &smmu,
516 	};
517 
518 	arm_smmu_make_sva_cd(cd, &master, NULL, asid);
519 }
520 
arm_smmu_v3_write_ste_test_s1_to_s2_stall(struct kunit * test)521 static void arm_smmu_v3_write_ste_test_s1_to_s2_stall(struct kunit *test)
522 {
523 	struct arm_smmu_ste s1_ste;
524 	struct arm_smmu_ste s2_ste;
525 
526 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
527 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL);
528 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL);
529 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
530 						       NUM_EXPECTED_SYNCS(3));
531 }
532 
arm_smmu_v3_write_ste_test_s2_to_s1_stall(struct kunit * test)533 static void arm_smmu_v3_write_ste_test_s2_to_s1_stall(struct kunit *test)
534 {
535 	struct arm_smmu_ste s1_ste;
536 	struct arm_smmu_ste s2_ste;
537 
538 	arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
539 				       fake_cdtab_dma_addr, ARM_SMMU_MASTER_TEST_STALL);
540 	arm_smmu_test_make_s2_ste(&s2_ste, ARM_SMMU_MASTER_TEST_STALL);
541 	arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
542 						       NUM_EXPECTED_SYNCS(3));
543 }
544 
arm_smmu_v3_write_cd_test_sva_clear(struct kunit * test)545 static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
546 {
547 	struct arm_smmu_cd cd = {};
548 	struct arm_smmu_cd cd_2;
549 
550 	arm_smmu_test_make_sva_cd(&cd_2, 1997);
551 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
552 		test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
553 	arm_smmu_v3_test_cd_expect_non_hitless_transition(
554 		test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
555 }
556 
arm_smmu_v3_write_cd_test_sva_release(struct kunit * test)557 static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test)
558 {
559 	struct arm_smmu_cd cd;
560 	struct arm_smmu_cd cd_2;
561 
562 	arm_smmu_test_make_sva_cd(&cd, 1997);
563 	arm_smmu_test_make_sva_release_cd(&cd_2, 1997);
564 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
565 						      NUM_EXPECTED_SYNCS(2));
566 	arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
567 						      NUM_EXPECTED_SYNCS(2));
568 }
569 
570 static struct kunit_case arm_smmu_v3_test_cases[] = {
571 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort),
572 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass),
573 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort),
574 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
575 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
576 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
577 	KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_s1dss_change),
578 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass),
579 	KUNIT_CASE(arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass),
580 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
581 	KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
582 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
583 	KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
584 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2),
585 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1),
586 	KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless),
587 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
588 	KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
589 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2_stall),
590 	KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1_stall),
591 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
592 	KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
593 	{},
594 };
595 
arm_smmu_v3_test_suite_init(struct kunit_suite * test)596 static int arm_smmu_v3_test_suite_init(struct kunit_suite *test)
597 {
598 	arm_smmu_make_bypass_ste(&smmu, &bypass_ste);
599 	arm_smmu_make_abort_ste(&abort_ste);
600 	return 0;
601 }
602 
603 static struct kunit_suite arm_smmu_v3_test_module = {
604 	.name = "arm-smmu-v3-kunit-test",
605 	.suite_init = arm_smmu_v3_test_suite_init,
606 	.test_cases = arm_smmu_v3_test_cases,
607 };
608 kunit_test_suites(&arm_smmu_v3_test_module);
609 
610 MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
611 MODULE_DESCRIPTION("KUnit tests for arm-smmu-v3 driver");
612 MODULE_LICENSE("GPL v2");
613