xref: /linux/drivers/base/regmap/regmap-kunit.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6 
7 #include <kunit/test.h>
8 #include "internal.h"
9 
10 #define BLOCK_TEST_SIZE 12
11 
12 static const struct regmap_config test_regmap_config = {
13 	.max_register = BLOCK_TEST_SIZE,
14 	.reg_stride = 1,
15 	.val_bits = sizeof(unsigned int) * 8,
16 };
17 
18 struct regcache_types {
19 	enum regcache_type type;
20 	const char *name;
21 };
22 
23 static void case_to_desc(const struct regcache_types *t, char *desc)
24 {
25 	strcpy(desc, t->name);
26 }
27 
28 static const struct regcache_types regcache_types_list[] = {
29 	{ REGCACHE_NONE, "none" },
30 	{ REGCACHE_FLAT, "flat" },
31 	{ REGCACHE_RBTREE, "rbtree" },
32 	{ REGCACHE_MAPLE, "maple" },
33 };
34 
35 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
36 
37 static const struct regcache_types real_cache_types_list[] = {
38 	{ REGCACHE_FLAT, "flat" },
39 	{ REGCACHE_RBTREE, "rbtree" },
40 	{ REGCACHE_MAPLE, "maple" },
41 };
42 
43 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
44 
45 static const struct regcache_types sparse_cache_types_list[] = {
46 	{ REGCACHE_RBTREE, "rbtree" },
47 	{ REGCACHE_MAPLE, "maple" },
48 };
49 
50 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
51 
52 static struct regmap *gen_regmap(struct regmap_config *config,
53 				 struct regmap_ram_data **data)
54 {
55 	unsigned int *buf;
56 	struct regmap *ret;
57 	size_t size = (config->max_register + 1) * sizeof(unsigned int);
58 	int i;
59 	struct reg_default *defaults;
60 
61 	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
62 					config->cache_type == REGCACHE_MAPLE;
63 
64 	buf = kmalloc(size, GFP_KERNEL);
65 	if (!buf)
66 		return ERR_PTR(-ENOMEM);
67 
68 	get_random_bytes(buf, size);
69 
70 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
71 	if (!(*data))
72 		return ERR_PTR(-ENOMEM);
73 	(*data)->vals = buf;
74 
75 	if (config->num_reg_defaults) {
76 		defaults = kcalloc(config->num_reg_defaults,
77 				   sizeof(struct reg_default),
78 				   GFP_KERNEL);
79 		if (!defaults)
80 			return ERR_PTR(-ENOMEM);
81 		config->reg_defaults = defaults;
82 
83 		for (i = 0; i < config->num_reg_defaults; i++) {
84 			defaults[i].reg = i * config->reg_stride;
85 			defaults[i].def = buf[i * config->reg_stride];
86 		}
87 	}
88 
89 	ret = regmap_init_ram(config, *data);
90 	if (IS_ERR(ret)) {
91 		kfree(buf);
92 		kfree(*data);
93 	}
94 
95 	return ret;
96 }
97 
98 static bool reg_5_false(struct device *context, unsigned int reg)
99 {
100 	return reg != 5;
101 }
102 
103 static void basic_read_write(struct kunit *test)
104 {
105 	struct regcache_types *t = (struct regcache_types *)test->param_value;
106 	struct regmap *map;
107 	struct regmap_config config;
108 	struct regmap_ram_data *data;
109 	unsigned int val, rval;
110 
111 	config = test_regmap_config;
112 	config.cache_type = t->type;
113 
114 	map = gen_regmap(&config, &data);
115 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
116 	if (IS_ERR(map))
117 		return;
118 
119 	get_random_bytes(&val, sizeof(val));
120 
121 	/* If we write a value to a register we can read it back */
122 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
123 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
124 	KUNIT_EXPECT_EQ(test, val, rval);
125 
126 	/* If using a cache the cache satisfied the read */
127 	KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
128 
129 	regmap_exit(map);
130 }
131 
132 static void bulk_write(struct kunit *test)
133 {
134 	struct regcache_types *t = (struct regcache_types *)test->param_value;
135 	struct regmap *map;
136 	struct regmap_config config;
137 	struct regmap_ram_data *data;
138 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
139 	int i;
140 
141 	config = test_regmap_config;
142 	config.cache_type = t->type;
143 
144 	map = gen_regmap(&config, &data);
145 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
146 	if (IS_ERR(map))
147 		return;
148 
149 	get_random_bytes(&val, sizeof(val));
150 
151 	/*
152 	 * Data written via the bulk API can be read back with single
153 	 * reads.
154 	 */
155 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
156 						   BLOCK_TEST_SIZE));
157 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
158 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
159 
160 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
161 
162 	/* If using a cache the cache satisfied the read */
163 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
164 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
165 
166 	regmap_exit(map);
167 }
168 
169 static void bulk_read(struct kunit *test)
170 {
171 	struct regcache_types *t = (struct regcache_types *)test->param_value;
172 	struct regmap *map;
173 	struct regmap_config config;
174 	struct regmap_ram_data *data;
175 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
176 	int i;
177 
178 	config = test_regmap_config;
179 	config.cache_type = t->type;
180 
181 	map = gen_regmap(&config, &data);
182 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
183 	if (IS_ERR(map))
184 		return;
185 
186 	get_random_bytes(&val, sizeof(val));
187 
188 	/* Data written as single writes can be read via the bulk API */
189 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
190 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
191 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
192 						  BLOCK_TEST_SIZE));
193 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
194 
195 	/* If using a cache the cache satisfied the read */
196 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
197 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
198 
199 	regmap_exit(map);
200 }
201 
202 static void write_readonly(struct kunit *test)
203 {
204 	struct regcache_types *t = (struct regcache_types *)test->param_value;
205 	struct regmap *map;
206 	struct regmap_config config;
207 	struct regmap_ram_data *data;
208 	unsigned int val;
209 	int i;
210 
211 	config = test_regmap_config;
212 	config.cache_type = t->type;
213 	config.num_reg_defaults = BLOCK_TEST_SIZE;
214 	config.writeable_reg = reg_5_false;
215 
216 	map = gen_regmap(&config, &data);
217 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
218 	if (IS_ERR(map))
219 		return;
220 
221 	get_random_bytes(&val, sizeof(val));
222 
223 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
224 		data->written[i] = false;
225 
226 	/* Change the value of all registers, readonly should fail */
227 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
228 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
229 
230 	/* Did that match what we see on the device? */
231 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
232 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
233 
234 	regmap_exit(map);
235 }
236 
237 static void read_writeonly(struct kunit *test)
238 {
239 	struct regcache_types *t = (struct regcache_types *)test->param_value;
240 	struct regmap *map;
241 	struct regmap_config config;
242 	struct regmap_ram_data *data;
243 	unsigned int val;
244 	int i;
245 
246 	config = test_regmap_config;
247 	config.cache_type = t->type;
248 	config.readable_reg = reg_5_false;
249 
250 	map = gen_regmap(&config, &data);
251 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
252 	if (IS_ERR(map))
253 		return;
254 
255 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
256 		data->read[i] = false;
257 
258 	/*
259 	 * Try to read all the registers, the writeonly one should
260 	 * fail if we aren't using the flat cache.
261 	 */
262 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
263 		if (t->type != REGCACHE_FLAT) {
264 			KUNIT_EXPECT_EQ(test, i != 5,
265 					regmap_read(map, i, &val) == 0);
266 		} else {
267 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
268 		}
269 	}
270 
271 	/* Did we trigger a hardware access? */
272 	KUNIT_EXPECT_FALSE(test, data->read[5]);
273 
274 	regmap_exit(map);
275 }
276 
277 static void reg_defaults(struct kunit *test)
278 {
279 	struct regcache_types *t = (struct regcache_types *)test->param_value;
280 	struct regmap *map;
281 	struct regmap_config config;
282 	struct regmap_ram_data *data;
283 	unsigned int rval[BLOCK_TEST_SIZE];
284 	int i;
285 
286 	config = test_regmap_config;
287 	config.cache_type = t->type;
288 	config.num_reg_defaults = BLOCK_TEST_SIZE;
289 
290 	map = gen_regmap(&config, &data);
291 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
292 	if (IS_ERR(map))
293 		return;
294 
295 	/* Read back the expected default data */
296 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
297 						  BLOCK_TEST_SIZE));
298 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
299 
300 	/* The data should have been read from cache if there was one */
301 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
302 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
303 }
304 
305 static void reg_defaults_read_dev(struct kunit *test)
306 {
307 	struct regcache_types *t = (struct regcache_types *)test->param_value;
308 	struct regmap *map;
309 	struct regmap_config config;
310 	struct regmap_ram_data *data;
311 	unsigned int rval[BLOCK_TEST_SIZE];
312 	int i;
313 
314 	config = test_regmap_config;
315 	config.cache_type = t->type;
316 	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
317 
318 	map = gen_regmap(&config, &data);
319 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
320 	if (IS_ERR(map))
321 		return;
322 
323 	/* We should have read the cache defaults back from the map */
324 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
325 		KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
326 		data->read[i] = false;
327 	}
328 
329 	/* Read back the expected default data */
330 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
331 						  BLOCK_TEST_SIZE));
332 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
333 
334 	/* The data should have been read from cache if there was one */
335 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
336 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
337 }
338 
339 static void register_patch(struct kunit *test)
340 {
341 	struct regcache_types *t = (struct regcache_types *)test->param_value;
342 	struct regmap *map;
343 	struct regmap_config config;
344 	struct regmap_ram_data *data;
345 	struct reg_sequence patch[2];
346 	unsigned int rval[BLOCK_TEST_SIZE];
347 	int i;
348 
349 	/* We need defaults so readback works */
350 	config = test_regmap_config;
351 	config.cache_type = t->type;
352 	config.num_reg_defaults = BLOCK_TEST_SIZE;
353 
354 	map = gen_regmap(&config, &data);
355 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
356 	if (IS_ERR(map))
357 		return;
358 
359 	/* Stash the original values */
360 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
361 						  BLOCK_TEST_SIZE));
362 
363 	/* Patch a couple of values */
364 	patch[0].reg = 2;
365 	patch[0].def = rval[2] + 1;
366 	patch[0].delay_us = 0;
367 	patch[1].reg = 5;
368 	patch[1].def = rval[5] + 1;
369 	patch[1].delay_us = 0;
370 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
371 						       ARRAY_SIZE(patch)));
372 
373 	/* Only the patched registers are written */
374 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
375 		switch (i) {
376 		case 2:
377 		case 5:
378 			KUNIT_EXPECT_TRUE(test, data->written[i]);
379 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
380 			break;
381 		default:
382 			KUNIT_EXPECT_FALSE(test, data->written[i]);
383 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
384 			break;
385 		}
386 	}
387 
388 	regmap_exit(map);
389 }
390 
391 static void stride(struct kunit *test)
392 {
393 	struct regcache_types *t = (struct regcache_types *)test->param_value;
394 	struct regmap *map;
395 	struct regmap_config config;
396 	struct regmap_ram_data *data;
397 	unsigned int rval;
398 	int i;
399 
400 	config = test_regmap_config;
401 	config.cache_type = t->type;
402 	config.reg_stride = 2;
403 	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
404 
405 	map = gen_regmap(&config, &data);
406 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
407 	if (IS_ERR(map))
408 		return;
409 
410 	/* Only even registers can be accessed, try both read and write */
411 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
412 		data->read[i] = false;
413 		data->written[i] = false;
414 
415 		if (i % 2) {
416 			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
417 			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
418 			KUNIT_EXPECT_FALSE(test, data->read[i]);
419 			KUNIT_EXPECT_FALSE(test, data->written[i]);
420 		} else {
421 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
422 			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
423 			KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
424 					data->read[i]);
425 
426 			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
427 			KUNIT_EXPECT_TRUE(test, data->written[i]);
428 		}
429 	}
430 
431 	regmap_exit(map);
432 }
433 
434 static struct regmap_range_cfg test_range = {
435 	.selector_reg = 1,
436 	.selector_mask = 0xff,
437 
438 	.window_start = 4,
439 	.window_len = 10,
440 
441 	.range_min = 20,
442 	.range_max = 40,
443 };
444 
445 static bool test_range_volatile(struct device *dev, unsigned int reg)
446 {
447 	if (reg >= test_range.window_start &&
448 	    reg <= test_range.selector_reg + test_range.window_len)
449 		return true;
450 
451 	if (reg >= test_range.range_min && reg <= test_range.range_max)
452 		return true;
453 
454 	return false;
455 }
456 
457 static void basic_ranges(struct kunit *test)
458 {
459 	struct regcache_types *t = (struct regcache_types *)test->param_value;
460 	struct regmap *map;
461 	struct regmap_config config;
462 	struct regmap_ram_data *data;
463 	unsigned int val;
464 	int i;
465 
466 	config = test_regmap_config;
467 	config.cache_type = t->type;
468 	config.volatile_reg = test_range_volatile;
469 	config.ranges = &test_range;
470 	config.num_ranges = 1;
471 	config.max_register = test_range.range_max;
472 
473 	map = gen_regmap(&config, &data);
474 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
475 	if (IS_ERR(map))
476 		return;
477 
478 	for (i = test_range.range_min; i < test_range.range_max; i++) {
479 		data->read[i] = false;
480 		data->written[i] = false;
481 	}
482 
483 	/* Reset the page to a non-zero value to trigger a change */
484 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
485 					      test_range.range_max));
486 
487 	/* Check we set the page and use the window for writes */
488 	data->written[test_range.selector_reg] = false;
489 	data->written[test_range.window_start] = false;
490 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
491 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
492 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
493 
494 	data->written[test_range.selector_reg] = false;
495 	data->written[test_range.window_start] = false;
496 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
497 					      test_range.range_min +
498 					      test_range.window_len,
499 					      0));
500 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
501 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
502 
503 	/* Same for reads */
504 	data->written[test_range.selector_reg] = false;
505 	data->read[test_range.window_start] = false;
506 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
507 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
508 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
509 
510 	data->written[test_range.selector_reg] = false;
511 	data->read[test_range.window_start] = false;
512 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
513 					     test_range.range_min +
514 					     test_range.window_len,
515 					     &val));
516 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
517 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
518 
519 	/* No physical access triggered in the virtual range */
520 	for (i = test_range.range_min; i < test_range.range_max; i++) {
521 		KUNIT_EXPECT_FALSE(test, data->read[i]);
522 		KUNIT_EXPECT_FALSE(test, data->written[i]);
523 	}
524 
525 	regmap_exit(map);
526 }
527 
528 /* Try to stress dynamic creation of cache data structures */
529 static void stress_insert(struct kunit *test)
530 {
531 	struct regcache_types *t = (struct regcache_types *)test->param_value;
532 	struct regmap *map;
533 	struct regmap_config config;
534 	struct regmap_ram_data *data;
535 	unsigned int rval, *vals;
536 	size_t buf_sz;
537 	int i;
538 
539 	config = test_regmap_config;
540 	config.cache_type = t->type;
541 	config.max_register = 300;
542 
543 	map = gen_regmap(&config, &data);
544 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
545 	if (IS_ERR(map))
546 		return;
547 
548 	vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
549 			     GFP_KERNEL);
550 	KUNIT_ASSERT_FALSE(test, vals == NULL);
551 	buf_sz = sizeof(unsigned long) * config.max_register;
552 
553 	get_random_bytes(vals, buf_sz);
554 
555 	/* Write data into the map/cache in ever decreasing strides */
556 	for (i = 0; i < config.max_register; i += 100)
557 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
558 	for (i = 0; i < config.max_register; i += 50)
559 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
560 	for (i = 0; i < config.max_register; i += 25)
561 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
562 	for (i = 0; i < config.max_register; i += 10)
563 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
564 	for (i = 0; i < config.max_register; i += 5)
565 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
566 	for (i = 0; i < config.max_register; i += 3)
567 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
568 	for (i = 0; i < config.max_register; i += 2)
569 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
570 	for (i = 0; i < config.max_register; i++)
571 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
572 
573 	/* Do reads from the cache (if there is one) match? */
574 	for (i = 0; i < config.max_register; i ++) {
575 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
576 		KUNIT_EXPECT_EQ(test, rval, vals[i]);
577 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
578 	}
579 
580 	regmap_exit(map);
581 }
582 
583 static void cache_bypass(struct kunit *test)
584 {
585 	struct regcache_types *t = (struct regcache_types *)test->param_value;
586 	struct regmap *map;
587 	struct regmap_config config;
588 	struct regmap_ram_data *data;
589 	unsigned int val, rval;
590 
591 	config = test_regmap_config;
592 	config.cache_type = t->type;
593 
594 	map = gen_regmap(&config, &data);
595 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
596 	if (IS_ERR(map))
597 		return;
598 
599 	get_random_bytes(&val, sizeof(val));
600 
601 	/* Ensure the cache has a value in it */
602 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
603 
604 	/* Bypass then write a different value */
605 	regcache_cache_bypass(map, true);
606 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
607 
608 	/* Read the bypassed value */
609 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
610 	KUNIT_EXPECT_EQ(test, val + 1, rval);
611 	KUNIT_EXPECT_EQ(test, data->vals[0], rval);
612 
613 	/* Disable bypass, the cache should still return the original value */
614 	regcache_cache_bypass(map, false);
615 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
616 	KUNIT_EXPECT_EQ(test, val, rval);
617 
618 	regmap_exit(map);
619 }
620 
621 static void cache_sync(struct kunit *test)
622 {
623 	struct regcache_types *t = (struct regcache_types *)test->param_value;
624 	struct regmap *map;
625 	struct regmap_config config;
626 	struct regmap_ram_data *data;
627 	unsigned int val[BLOCK_TEST_SIZE];
628 	int i;
629 
630 	config = test_regmap_config;
631 	config.cache_type = t->type;
632 
633 	map = gen_regmap(&config, &data);
634 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
635 	if (IS_ERR(map))
636 		return;
637 
638 	get_random_bytes(&val, sizeof(val));
639 
640 	/* Put some data into the cache */
641 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
642 						   BLOCK_TEST_SIZE));
643 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
644 		data->written[i] = false;
645 
646 	/* Trash the data on the device itself then resync */
647 	regcache_mark_dirty(map);
648 	memset(data->vals, 0, sizeof(val));
649 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
650 
651 	/* Did we just write the correct data out? */
652 	KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
653 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
654 		KUNIT_EXPECT_EQ(test, true, data->written[i]);
655 
656 	regmap_exit(map);
657 }
658 
659 static void cache_sync_defaults(struct kunit *test)
660 {
661 	struct regcache_types *t = (struct regcache_types *)test->param_value;
662 	struct regmap *map;
663 	struct regmap_config config;
664 	struct regmap_ram_data *data;
665 	unsigned int val;
666 	int i;
667 
668 	config = test_regmap_config;
669 	config.cache_type = t->type;
670 	config.num_reg_defaults = BLOCK_TEST_SIZE;
671 
672 	map = gen_regmap(&config, &data);
673 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
674 	if (IS_ERR(map))
675 		return;
676 
677 	get_random_bytes(&val, sizeof(val));
678 
679 	/* Change the value of one register */
680 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
681 
682 	/* Resync */
683 	regcache_mark_dirty(map);
684 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
685 		data->written[i] = false;
686 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
687 
688 	/* Did we just sync the one register we touched? */
689 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
690 		KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
691 
692 	regmap_exit(map);
693 }
694 
695 static void cache_sync_readonly(struct kunit *test)
696 {
697 	struct regcache_types *t = (struct regcache_types *)test->param_value;
698 	struct regmap *map;
699 	struct regmap_config config;
700 	struct regmap_ram_data *data;
701 	unsigned int val;
702 	int i;
703 
704 	config = test_regmap_config;
705 	config.cache_type = t->type;
706 	config.writeable_reg = reg_5_false;
707 
708 	map = gen_regmap(&config, &data);
709 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
710 	if (IS_ERR(map))
711 		return;
712 
713 	/* Read all registers to fill the cache */
714 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
715 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
716 
717 	/* Change the value of all registers, readonly should fail */
718 	get_random_bytes(&val, sizeof(val));
719 	regcache_cache_only(map, true);
720 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
721 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
722 	regcache_cache_only(map, false);
723 
724 	/* Resync */
725 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
726 		data->written[i] = false;
727 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
728 
729 	/* Did that match what we see on the device? */
730 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
731 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
732 
733 	regmap_exit(map);
734 }
735 
736 static void cache_sync_patch(struct kunit *test)
737 {
738 	struct regcache_types *t = (struct regcache_types *)test->param_value;
739 	struct regmap *map;
740 	struct regmap_config config;
741 	struct regmap_ram_data *data;
742 	struct reg_sequence patch[2];
743 	unsigned int rval[BLOCK_TEST_SIZE], val;
744 	int i;
745 
746 	/* We need defaults so readback works */
747 	config = test_regmap_config;
748 	config.cache_type = t->type;
749 	config.num_reg_defaults = BLOCK_TEST_SIZE;
750 
751 	map = gen_regmap(&config, &data);
752 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
753 	if (IS_ERR(map))
754 		return;
755 
756 	/* Stash the original values */
757 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
758 						  BLOCK_TEST_SIZE));
759 
760 	/* Patch a couple of values */
761 	patch[0].reg = 2;
762 	patch[0].def = rval[2] + 1;
763 	patch[0].delay_us = 0;
764 	patch[1].reg = 5;
765 	patch[1].def = rval[5] + 1;
766 	patch[1].delay_us = 0;
767 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
768 						       ARRAY_SIZE(patch)));
769 
770 	/* Sync the cache */
771 	regcache_mark_dirty(map);
772 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
773 		data->written[i] = false;
774 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
775 
776 	/* The patch should be on the device but not in the cache */
777 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
778 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
779 		KUNIT_EXPECT_EQ(test, val, rval[i]);
780 
781 		switch (i) {
782 		case 2:
783 		case 5:
784 			KUNIT_EXPECT_EQ(test, true, data->written[i]);
785 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
786 			break;
787 		default:
788 			KUNIT_EXPECT_EQ(test, false, data->written[i]);
789 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
790 			break;
791 		}
792 	}
793 
794 	regmap_exit(map);
795 }
796 
797 static void cache_drop(struct kunit *test)
798 {
799 	struct regcache_types *t = (struct regcache_types *)test->param_value;
800 	struct regmap *map;
801 	struct regmap_config config;
802 	struct regmap_ram_data *data;
803 	unsigned int rval[BLOCK_TEST_SIZE];
804 	int i;
805 
806 	config = test_regmap_config;
807 	config.cache_type = t->type;
808 	config.num_reg_defaults = BLOCK_TEST_SIZE;
809 
810 	map = gen_regmap(&config, &data);
811 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
812 	if (IS_ERR(map))
813 		return;
814 
815 	/* Ensure the data is read from the cache */
816 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
817 		data->read[i] = false;
818 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
819 						  BLOCK_TEST_SIZE));
820 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
821 		KUNIT_EXPECT_FALSE(test, data->read[i]);
822 		data->read[i] = false;
823 	}
824 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
825 
826 	/* Drop some registers */
827 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
828 
829 	/* Reread and check only the dropped registers hit the device. */
830 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
831 						  BLOCK_TEST_SIZE));
832 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
833 		KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
834 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
835 
836 	regmap_exit(map);
837 }
838 
839 static void cache_present(struct kunit *test)
840 {
841 	struct regcache_types *t = (struct regcache_types *)test->param_value;
842 	struct regmap *map;
843 	struct regmap_config config;
844 	struct regmap_ram_data *data;
845 	unsigned int val;
846 	int i;
847 
848 	config = test_regmap_config;
849 	config.cache_type = t->type;
850 
851 	map = gen_regmap(&config, &data);
852 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
853 	if (IS_ERR(map))
854 		return;
855 
856 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
857 		data->read[i] = false;
858 
859 	/* No defaults so no registers cached. */
860 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
861 		KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
862 
863 	/* We didn't trigger any reads */
864 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
865 		KUNIT_ASSERT_FALSE(test, data->read[i]);
866 
867 	/* Fill the cache */
868 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
869 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
870 
871 	/* Now everything should be cached */
872 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
873 		KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
874 
875 	regmap_exit(map);
876 }
877 
878 struct raw_test_types {
879 	const char *name;
880 
881 	enum regcache_type cache_type;
882 	enum regmap_endian val_endian;
883 };
884 
885 static void raw_to_desc(const struct raw_test_types *t, char *desc)
886 {
887 	strcpy(desc, t->name);
888 }
889 
890 static const struct raw_test_types raw_types_list[] = {
891 	{ "none-little",   REGCACHE_NONE,   REGMAP_ENDIAN_LITTLE },
892 	{ "none-big",      REGCACHE_NONE,   REGMAP_ENDIAN_BIG },
893 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
894 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
895 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
896 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
897 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
898 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
899 };
900 
901 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
902 
903 static const struct raw_test_types raw_cache_types_list[] = {
904 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
905 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
906 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
907 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
908 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
909 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
910 };
911 
912 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
913 
914 static const struct regmap_config raw_regmap_config = {
915 	.max_register = BLOCK_TEST_SIZE,
916 
917 	.reg_format_endian = REGMAP_ENDIAN_LITTLE,
918 	.reg_bits = 16,
919 	.val_bits = 16,
920 };
921 
922 static struct regmap *gen_raw_regmap(struct regmap_config *config,
923 				     struct raw_test_types *test_type,
924 				     struct regmap_ram_data **data)
925 {
926 	u16 *buf;
927 	struct regmap *ret;
928 	size_t size = (config->max_register + 1) * config->reg_bits / 8;
929 	int i;
930 	struct reg_default *defaults;
931 
932 	config->cache_type = test_type->cache_type;
933 	config->val_format_endian = test_type->val_endian;
934 	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
935 					config->cache_type == REGCACHE_MAPLE;
936 
937 	buf = kmalloc(size, GFP_KERNEL);
938 	if (!buf)
939 		return ERR_PTR(-ENOMEM);
940 
941 	get_random_bytes(buf, size);
942 
943 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
944 	if (!(*data))
945 		return ERR_PTR(-ENOMEM);
946 	(*data)->vals = (void *)buf;
947 
948 	config->num_reg_defaults = config->max_register + 1;
949 	defaults = kcalloc(config->num_reg_defaults,
950 			   sizeof(struct reg_default),
951 			   GFP_KERNEL);
952 	if (!defaults)
953 		return ERR_PTR(-ENOMEM);
954 	config->reg_defaults = defaults;
955 
956 	for (i = 0; i < config->num_reg_defaults; i++) {
957 		defaults[i].reg = i;
958 		switch (test_type->val_endian) {
959 		case REGMAP_ENDIAN_LITTLE:
960 			defaults[i].def = le16_to_cpu(buf[i]);
961 			break;
962 		case REGMAP_ENDIAN_BIG:
963 			defaults[i].def = be16_to_cpu(buf[i]);
964 			break;
965 		default:
966 			return ERR_PTR(-EINVAL);
967 		}
968 	}
969 
970 	/*
971 	 * We use the defaults in the tests but they don't make sense
972 	 * to the core if there's no cache.
973 	 */
974 	if (config->cache_type == REGCACHE_NONE)
975 		config->num_reg_defaults = 0;
976 
977 	ret = regmap_init_raw_ram(config, *data);
978 	if (IS_ERR(ret)) {
979 		kfree(buf);
980 		kfree(*data);
981 	}
982 
983 	return ret;
984 }
985 
986 static void raw_read_defaults_single(struct kunit *test)
987 {
988 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
989 	struct regmap *map;
990 	struct regmap_config config;
991 	struct regmap_ram_data *data;
992 	unsigned int rval;
993 	int i;
994 
995 	config = raw_regmap_config;
996 
997 	map = gen_raw_regmap(&config, t, &data);
998 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
999 	if (IS_ERR(map))
1000 		return;
1001 
1002 	/* Check that we can read the defaults via the API */
1003 	for (i = 0; i < config.max_register + 1; i++) {
1004 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1005 		KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1006 	}
1007 
1008 	regmap_exit(map);
1009 }
1010 
1011 static void raw_read_defaults(struct kunit *test)
1012 {
1013 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1014 	struct regmap *map;
1015 	struct regmap_config config;
1016 	struct regmap_ram_data *data;
1017 	u16 *rval;
1018 	u16 def;
1019 	size_t val_len;
1020 	int i;
1021 
1022 	config = raw_regmap_config;
1023 
1024 	map = gen_raw_regmap(&config, t, &data);
1025 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1026 	if (IS_ERR(map))
1027 		return;
1028 
1029 	val_len = sizeof(*rval) * (config.max_register + 1);
1030 	rval = kmalloc(val_len, GFP_KERNEL);
1031 	KUNIT_ASSERT_TRUE(test, rval != NULL);
1032 	if (!rval)
1033 		return;
1034 
1035 	/* Check that we can read the defaults via the API */
1036 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1037 	for (i = 0; i < config.max_register + 1; i++) {
1038 		def = config.reg_defaults[i].def;
1039 		if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1040 			KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1041 		} else {
1042 			KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1043 		}
1044 	}
1045 
1046 	kfree(rval);
1047 	regmap_exit(map);
1048 }
1049 
1050 static void raw_write_read_single(struct kunit *test)
1051 {
1052 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1053 	struct regmap *map;
1054 	struct regmap_config config;
1055 	struct regmap_ram_data *data;
1056 	u16 val;
1057 	unsigned int rval;
1058 
1059 	config = raw_regmap_config;
1060 
1061 	map = gen_raw_regmap(&config, t, &data);
1062 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1063 	if (IS_ERR(map))
1064 		return;
1065 
1066 	get_random_bytes(&val, sizeof(val));
1067 
1068 	/* If we write a value to a register we can read it back */
1069 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1070 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1071 	KUNIT_EXPECT_EQ(test, val, rval);
1072 
1073 	regmap_exit(map);
1074 }
1075 
1076 static void raw_write(struct kunit *test)
1077 {
1078 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1079 	struct regmap *map;
1080 	struct regmap_config config;
1081 	struct regmap_ram_data *data;
1082 	u16 *hw_buf;
1083 	u16 val[2];
1084 	unsigned int rval;
1085 	int i;
1086 
1087 	config = raw_regmap_config;
1088 
1089 	map = gen_raw_regmap(&config, t, &data);
1090 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1091 	if (IS_ERR(map))
1092 		return;
1093 
1094 	hw_buf = (u16 *)data->vals;
1095 
1096 	get_random_bytes(&val, sizeof(val));
1097 
1098 	/* Do a raw write */
1099 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1100 
1101 	/* We should read back the new values, and defaults for the rest */
1102 	for (i = 0; i < config.max_register + 1; i++) {
1103 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1104 
1105 		switch (i) {
1106 		case 2:
1107 		case 3:
1108 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1109 				KUNIT_EXPECT_EQ(test, rval,
1110 						be16_to_cpu(val[i % 2]));
1111 			} else {
1112 				KUNIT_EXPECT_EQ(test, rval,
1113 						le16_to_cpu(val[i % 2]));
1114 			}
1115 			break;
1116 		default:
1117 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1118 			break;
1119 		}
1120 	}
1121 
1122 	/* The values should appear in the "hardware" */
1123 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1124 
1125 	regmap_exit(map);
1126 }
1127 
1128 static void raw_sync(struct kunit *test)
1129 {
1130 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1131 	struct regmap *map;
1132 	struct regmap_config config;
1133 	struct regmap_ram_data *data;
1134 	u16 val[2];
1135 	u16 *hw_buf;
1136 	unsigned int rval;
1137 	int i;
1138 
1139 	config = raw_regmap_config;
1140 
1141 	map = gen_raw_regmap(&config, t, &data);
1142 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1143 	if (IS_ERR(map))
1144 		return;
1145 
1146 	hw_buf = (u16 *)data->vals;
1147 
1148 	get_random_bytes(&val, sizeof(val));
1149 
1150 	/* Do a regular write and a raw write in cache only mode */
1151 	regcache_cache_only(map, true);
1152 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1153 	if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1154 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1155 						      be16_to_cpu(val[0])));
1156 	else
1157 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1158 						      le16_to_cpu(val[0])));
1159 
1160 	/* We should read back the new values, and defaults for the rest */
1161 	for (i = 0; i < config.max_register + 1; i++) {
1162 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1163 
1164 		switch (i) {
1165 		case 2:
1166 		case 3:
1167 		case 6:
1168 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1169 				KUNIT_EXPECT_EQ(test, rval,
1170 						be16_to_cpu(val[i % 2]));
1171 			} else {
1172 				KUNIT_EXPECT_EQ(test, rval,
1173 						le16_to_cpu(val[i % 2]));
1174 			}
1175 			break;
1176 		default:
1177 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1178 			break;
1179 		}
1180 	}
1181 
1182 	/* The values should not appear in the "hardware" */
1183 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
1184 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
1185 
1186 	for (i = 0; i < config.max_register + 1; i++)
1187 		data->written[i] = false;
1188 
1189 	/* Do the sync */
1190 	regcache_cache_only(map, false);
1191 	regcache_mark_dirty(map);
1192 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1193 
1194 	/* The values should now appear in the "hardware" */
1195 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1196 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
1197 
1198 	regmap_exit(map);
1199 }
1200 
1201 static struct kunit_case regmap_test_cases[] = {
1202 	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1203 	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1204 	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1205 	KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1206 	KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1207 	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1208 	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1209 	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1210 	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1211 	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1212 	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1213 	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1214 	KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1215 	KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1216 	KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1217 	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1218 	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1219 	KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
1220 
1221 	KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1222 	KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1223 	KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1224 	KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1225 	KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1226 	{}
1227 };
1228 
1229 static struct kunit_suite regmap_test_suite = {
1230 	.name = "regmap",
1231 	.test_cases = regmap_test_cases,
1232 };
1233 kunit_test_suite(regmap_test_suite);
1234 
1235 MODULE_LICENSE("GPL v2");
1236