xref: /linux/drivers/base/regmap/regmap-kunit.c (revision ba1401f9cced493948a691a670308832588e8f60)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6 
7 #include <kunit/device.h>
8 #include <kunit/resource.h>
9 #include <kunit/test.h>
10 #include "internal.h"
11 
12 #define BLOCK_TEST_SIZE 12
13 
14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
15 
16 struct regmap_test_priv {
17 	struct device *dev;
18 };
19 
20 struct regmap_test_param {
21 	enum regcache_type cache;
22 	enum regmap_endian val_endian;
23 
24 	unsigned int from_reg;
25 	bool fast_io;
26 };
27 
28 static void get_changed_bytes(void *orig, void *new, size_t size)
29 {
30 	char *o = orig;
31 	char *n = new;
32 	int i;
33 
34 	get_random_bytes(new, size);
35 
36 	/*
37 	 * This could be nicer and more efficient but we shouldn't
38 	 * super care.
39 	 */
40 	for (i = 0; i < size; i++)
41 		while (n[i] == o[i])
42 			get_random_bytes(&n[i], 1);
43 }
44 
45 static const struct regmap_config test_regmap_config = {
46 	.reg_stride = 1,
47 	.val_bits = sizeof(unsigned int) * 8,
48 };
49 
50 static const char *regcache_type_name(enum regcache_type type)
51 {
52 	switch (type) {
53 	case REGCACHE_NONE:
54 		return "none";
55 	case REGCACHE_FLAT:
56 		return "flat";
57 	case REGCACHE_FLAT_S:
58 		return "flat-sparse";
59 	case REGCACHE_RBTREE:
60 		return "rbtree";
61 	case REGCACHE_MAPLE:
62 		return "maple";
63 	default:
64 		return NULL;
65 	}
66 }
67 
68 static const char *regmap_endian_name(enum regmap_endian endian)
69 {
70 	switch (endian) {
71 	case REGMAP_ENDIAN_BIG:
72 		return "big";
73 	case REGMAP_ENDIAN_LITTLE:
74 		return "little";
75 	case REGMAP_ENDIAN_DEFAULT:
76 		return "default";
77 	case REGMAP_ENDIAN_NATIVE:
78 		return "native";
79 	default:
80 		return NULL;
81 	}
82 }
83 
84 static void param_to_desc(const struct regmap_test_param *param, char *desc)
85 {
86 	snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x",
87 		 regcache_type_name(param->cache),
88 		 regmap_endian_name(param->val_endian),
89 		 param->fast_io ? " fast I/O" : "",
90 		 param->from_reg);
91 }
92 
93 static const struct regmap_test_param regcache_types_list[] = {
94 	{ .cache = REGCACHE_NONE },
95 	{ .cache = REGCACHE_NONE, .fast_io = true },
96 	{ .cache = REGCACHE_FLAT },
97 	{ .cache = REGCACHE_FLAT, .fast_io = true },
98 	{ .cache = REGCACHE_FLAT_S },
99 	{ .cache = REGCACHE_FLAT_S, .fast_io = true },
100 	{ .cache = REGCACHE_RBTREE },
101 	{ .cache = REGCACHE_RBTREE, .fast_io = true },
102 	{ .cache = REGCACHE_MAPLE },
103 	{ .cache = REGCACHE_MAPLE, .fast_io = true },
104 };
105 
106 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
107 
108 static const struct regmap_test_param real_cache_types_only_list[] = {
109 	{ .cache = REGCACHE_FLAT },
110 	{ .cache = REGCACHE_FLAT, .fast_io = true },
111 	{ .cache = REGCACHE_FLAT_S },
112 	{ .cache = REGCACHE_FLAT_S, .fast_io = true },
113 	{ .cache = REGCACHE_RBTREE },
114 	{ .cache = REGCACHE_RBTREE, .fast_io = true },
115 	{ .cache = REGCACHE_MAPLE },
116 	{ .cache = REGCACHE_MAPLE, .fast_io = true },
117 };
118 
119 KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
120 
121 static const struct regmap_test_param real_cache_types_list[] = {
122 	{ .cache = REGCACHE_FLAT,   .from_reg = 0 },
123 	{ .cache = REGCACHE_FLAT,   .from_reg = 0, .fast_io = true },
124 	{ .cache = REGCACHE_FLAT,   .from_reg = 0x2001 },
125 	{ .cache = REGCACHE_FLAT,   .from_reg = 0x2002 },
126 	{ .cache = REGCACHE_FLAT,   .from_reg = 0x2003 },
127 	{ .cache = REGCACHE_FLAT,   .from_reg = 0x2004 },
128 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0 },
129 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
130 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
131 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
132 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
133 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
134 	{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
135 	{ .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
136 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
137 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
138 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
139 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
140 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0 },
141 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0, .fast_io = true },
142 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2001 },
143 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2002 },
144 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2003 },
145 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2004 },
146 };
147 
148 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
149 
150 static const struct regmap_test_param sparse_cache_types_list[] = {
151 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0 },
152 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
153 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
154 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
155 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
156 	{ .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
157 	{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
158 	{ .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
159 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
160 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
161 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
162 	{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
163 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0 },
164 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0, .fast_io = true },
165 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2001 },
166 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2002 },
167 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2003 },
168 	{ .cache = REGCACHE_MAPLE,  .from_reg = 0x2004 },
169 };
170 
171 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
172 
173 static struct regmap *gen_regmap(struct kunit *test,
174 				 struct regmap_config *config,
175 				 struct regmap_ram_data **data)
176 {
177 	const struct regmap_test_param *param = test->param_value;
178 	struct regmap_test_priv *priv = test->priv;
179 	unsigned int *buf;
180 	struct regmap *ret = ERR_PTR(-ENOMEM);
181 	size_t size;
182 	int i, error;
183 	struct reg_default *defaults;
184 
185 	config->cache_type = param->cache;
186 	config->fast_io = param->fast_io;
187 
188 	if (config->max_register == 0) {
189 		config->max_register = param->from_reg;
190 		if (config->num_reg_defaults)
191 			config->max_register += (config->num_reg_defaults - 1) *
192 						config->reg_stride;
193 		else
194 			config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
195 	}
196 
197 	size = array_size(config->max_register + 1, sizeof(*buf));
198 	buf = kmalloc(size, GFP_KERNEL);
199 	if (!buf)
200 		return ERR_PTR(-ENOMEM);
201 
202 	get_random_bytes(buf, size);
203 
204 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
205 	if (!(*data))
206 		goto out_free;
207 	(*data)->vals = buf;
208 
209 	if (config->num_reg_defaults) {
210 		defaults = kunit_kcalloc(test,
211 					 config->num_reg_defaults,
212 					 sizeof(struct reg_default),
213 					 GFP_KERNEL);
214 		if (!defaults)
215 			goto out_free;
216 
217 		config->reg_defaults = defaults;
218 
219 		for (i = 0; i < config->num_reg_defaults; i++) {
220 			defaults[i].reg = param->from_reg + (i * config->reg_stride);
221 			defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
222 		}
223 	}
224 
225 	ret = regmap_init_ram(priv->dev, config, *data);
226 	if (IS_ERR(ret))
227 		goto out_free;
228 
229 	/* This calls regmap_exit() on failure, which frees buf and *data */
230 	error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
231 	if (error)
232 		ret = ERR_PTR(error);
233 
234 	return ret;
235 
236 out_free:
237 	kfree(buf);
238 	kfree(*data);
239 
240 	return ret;
241 }
242 
243 static bool reg_5_false(struct device *dev, unsigned int reg)
244 {
245 	struct kunit *test = dev_get_drvdata(dev);
246 	const struct regmap_test_param *param = test->param_value;
247 
248 	return reg != (param->from_reg + 5);
249 }
250 
251 static void basic_read_write(struct kunit *test)
252 {
253 	struct regmap *map;
254 	struct regmap_config config;
255 	struct regmap_ram_data *data;
256 	unsigned int val, rval;
257 
258 	config = test_regmap_config;
259 
260 	map = gen_regmap(test, &config, &data);
261 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
262 	if (IS_ERR(map))
263 		return;
264 
265 	get_random_bytes(&val, sizeof(val));
266 
267 	/* If we write a value to a register we can read it back */
268 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
269 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
270 	KUNIT_EXPECT_EQ(test, val, rval);
271 
272 	/* If using a cache the cache satisfied the read */
273 	KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
274 }
275 
276 static void bulk_write(struct kunit *test)
277 {
278 	struct regmap *map;
279 	struct regmap_config config;
280 	struct regmap_ram_data *data;
281 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
282 	int i;
283 
284 	config = test_regmap_config;
285 
286 	map = gen_regmap(test, &config, &data);
287 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
288 	if (IS_ERR(map))
289 		return;
290 
291 	get_random_bytes(&val, sizeof(val));
292 
293 	/*
294 	 * Data written via the bulk API can be read back with single
295 	 * reads.
296 	 */
297 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
298 						   BLOCK_TEST_SIZE));
299 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
300 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
301 
302 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
303 
304 	/* If using a cache the cache satisfied the read */
305 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
306 		KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
307 }
308 
309 static void bulk_read(struct kunit *test)
310 {
311 	struct regmap *map;
312 	struct regmap_config config;
313 	struct regmap_ram_data *data;
314 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
315 	int i;
316 
317 	config = test_regmap_config;
318 
319 	map = gen_regmap(test, &config, &data);
320 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
321 	if (IS_ERR(map))
322 		return;
323 
324 	get_random_bytes(&val, sizeof(val));
325 
326 	/* Data written as single writes can be read via the bulk API */
327 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
328 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
329 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
330 						  BLOCK_TEST_SIZE));
331 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
332 
333 	/* If using a cache the cache satisfied the read */
334 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
335 		KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
336 }
337 
338 static void multi_write(struct kunit *test)
339 {
340 	struct regmap *map;
341 	struct regmap_config config;
342 	struct regmap_ram_data *data;
343 	struct reg_sequence sequence[BLOCK_TEST_SIZE];
344 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
345 	int i;
346 
347 	config = test_regmap_config;
348 
349 	map = gen_regmap(test, &config, &data);
350 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
351 	if (IS_ERR(map))
352 		return;
353 
354 	get_random_bytes(&val, sizeof(val));
355 
356 	/*
357 	 * Data written via the multi API can be read back with single
358 	 * reads.
359 	 */
360 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
361 		sequence[i].reg = i;
362 		sequence[i].def = val[i];
363 		sequence[i].delay_us = 0;
364 	}
365 	KUNIT_EXPECT_EQ(test, 0,
366 			regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
367 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
368 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
369 
370 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
371 
372 	/* If using a cache the cache satisfied the read */
373 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
374 		KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
375 }
376 
377 static void multi_read(struct kunit *test)
378 {
379 	struct regmap *map;
380 	struct regmap_config config;
381 	struct regmap_ram_data *data;
382 	unsigned int regs[BLOCK_TEST_SIZE];
383 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
384 	int i;
385 
386 	config = test_regmap_config;
387 
388 	map = gen_regmap(test, &config, &data);
389 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
390 	if (IS_ERR(map))
391 		return;
392 
393 	get_random_bytes(&val, sizeof(val));
394 
395 	/* Data written as single writes can be read via the multi API */
396 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
397 		regs[i] = i;
398 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
399 	}
400 	KUNIT_EXPECT_EQ(test, 0,
401 			regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
402 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
403 
404 	/* If using a cache the cache satisfied the read */
405 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
406 		KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
407 }
408 
409 static void read_bypassed(struct kunit *test)
410 {
411 	const struct regmap_test_param *param = test->param_value;
412 	struct regmap *map;
413 	struct regmap_config config;
414 	struct regmap_ram_data *data;
415 	unsigned int val[BLOCK_TEST_SIZE], rval;
416 	int i;
417 
418 	config = test_regmap_config;
419 
420 	map = gen_regmap(test, &config, &data);
421 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
422 	if (IS_ERR(map))
423 		return;
424 
425 	KUNIT_EXPECT_FALSE(test, map->cache_bypass);
426 
427 	get_random_bytes(&val, sizeof(val));
428 
429 	/* Write some test values */
430 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
431 
432 	regcache_cache_only(map, true);
433 
434 	/*
435 	 * While in cache-only regmap_read_bypassed() should return the register
436 	 * value and leave the map in cache-only.
437 	 */
438 	for (i = 0; i < ARRAY_SIZE(val); i++) {
439 		/* Put inverted bits in rval to prove we really read the value */
440 		rval = ~val[i];
441 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
442 		KUNIT_EXPECT_EQ(test, val[i], rval);
443 
444 		rval = ~val[i];
445 		KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
446 		KUNIT_EXPECT_EQ(test, val[i], rval);
447 		KUNIT_EXPECT_TRUE(test, map->cache_only);
448 		KUNIT_EXPECT_FALSE(test, map->cache_bypass);
449 	}
450 
451 	/*
452 	 * Change the underlying register values to prove it is returning
453 	 * real values not cached values.
454 	 */
455 	for (i = 0; i < ARRAY_SIZE(val); i++) {
456 		val[i] = ~val[i];
457 		data->vals[param->from_reg + i] = val[i];
458 	}
459 
460 	for (i = 0; i < ARRAY_SIZE(val); i++) {
461 		rval = ~val[i];
462 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
463 		KUNIT_EXPECT_NE(test, val[i], rval);
464 
465 		rval = ~val[i];
466 		KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
467 		KUNIT_EXPECT_EQ(test, val[i], rval);
468 		KUNIT_EXPECT_TRUE(test, map->cache_only);
469 		KUNIT_EXPECT_FALSE(test, map->cache_bypass);
470 	}
471 }
472 
473 static void read_bypassed_volatile(struct kunit *test)
474 {
475 	const struct regmap_test_param *param = test->param_value;
476 	struct regmap *map;
477 	struct regmap_config config;
478 	struct regmap_ram_data *data;
479 	unsigned int val[BLOCK_TEST_SIZE], rval;
480 	int i;
481 
482 	config = test_regmap_config;
483 	/* All registers except #5 volatile */
484 	config.volatile_reg = reg_5_false;
485 
486 	map = gen_regmap(test, &config, &data);
487 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
488 	if (IS_ERR(map))
489 		return;
490 
491 	KUNIT_EXPECT_FALSE(test, map->cache_bypass);
492 
493 	get_random_bytes(&val, sizeof(val));
494 
495 	/* Write some test values */
496 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
497 
498 	regcache_cache_only(map, true);
499 
500 	/*
501 	 * While in cache-only regmap_read_bypassed() should return the register
502 	 * value and leave the map in cache-only.
503 	 */
504 	for (i = 0; i < ARRAY_SIZE(val); i++) {
505 		/* Register #5 is non-volatile so should read from cache */
506 		KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
507 				regmap_read(map, param->from_reg + i, &rval));
508 
509 		/* Put inverted bits in rval to prove we really read the value */
510 		rval = ~val[i];
511 		KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
512 		KUNIT_EXPECT_EQ(test, val[i], rval);
513 		KUNIT_EXPECT_TRUE(test, map->cache_only);
514 		KUNIT_EXPECT_FALSE(test, map->cache_bypass);
515 	}
516 
517 	/*
518 	 * Change the underlying register values to prove it is returning
519 	 * real values not cached values.
520 	 */
521 	for (i = 0; i < ARRAY_SIZE(val); i++) {
522 		val[i] = ~val[i];
523 		data->vals[param->from_reg + i] = val[i];
524 	}
525 
526 	for (i = 0; i < ARRAY_SIZE(val); i++) {
527 		if (i == 5)
528 			continue;
529 
530 		rval = ~val[i];
531 		KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
532 		KUNIT_EXPECT_EQ(test, val[i], rval);
533 		KUNIT_EXPECT_TRUE(test, map->cache_only);
534 		KUNIT_EXPECT_FALSE(test, map->cache_bypass);
535 	}
536 }
537 
538 static void write_readonly(struct kunit *test)
539 {
540 	struct regmap *map;
541 	struct regmap_config config;
542 	struct regmap_ram_data *data;
543 	unsigned int val;
544 	int i;
545 
546 	config = test_regmap_config;
547 	config.num_reg_defaults = BLOCK_TEST_SIZE;
548 	config.writeable_reg = reg_5_false;
549 
550 	map = gen_regmap(test, &config, &data);
551 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
552 	if (IS_ERR(map))
553 		return;
554 
555 	get_random_bytes(&val, sizeof(val));
556 
557 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
558 		data->written[i] = false;
559 
560 	/* Change the value of all registers, readonly should fail */
561 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
562 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
563 
564 	/* Did that match what we see on the device? */
565 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
566 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
567 }
568 
569 static void read_writeonly(struct kunit *test)
570 {
571 	struct regmap *map;
572 	struct regmap_config config;
573 	struct regmap_ram_data *data;
574 	unsigned int val;
575 	int i;
576 
577 	config = test_regmap_config;
578 	config.readable_reg = reg_5_false;
579 
580 	map = gen_regmap(test, &config, &data);
581 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
582 	if (IS_ERR(map))
583 		return;
584 
585 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
586 		data->read[i] = false;
587 
588 	/*
589 	 * Try to read all the registers, the writeonly one should
590 	 * fail if we aren't using the flat cache.
591 	 */
592 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
593 		if (config.cache_type != REGCACHE_FLAT) {
594 			KUNIT_EXPECT_EQ(test, i != 5,
595 					regmap_read(map, i, &val) == 0);
596 		} else {
597 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
598 		}
599 	}
600 
601 	/* Did we trigger a hardware access? */
602 	KUNIT_EXPECT_FALSE(test, data->read[5]);
603 }
604 
605 static void reg_defaults(struct kunit *test)
606 {
607 	struct regmap *map;
608 	struct regmap_config config;
609 	struct regmap_ram_data *data;
610 	unsigned int rval[BLOCK_TEST_SIZE];
611 	int i;
612 
613 	config = test_regmap_config;
614 	config.num_reg_defaults = BLOCK_TEST_SIZE;
615 
616 	map = gen_regmap(test, &config, &data);
617 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
618 	if (IS_ERR(map))
619 		return;
620 
621 	/* Read back the expected default data */
622 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
623 						  BLOCK_TEST_SIZE));
624 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
625 
626 	/* The data should have been read from cache if there was one */
627 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
628 		KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
629 }
630 
631 static void reg_defaults_read_dev(struct kunit *test)
632 {
633 	struct regmap *map;
634 	struct regmap_config config;
635 	struct regmap_ram_data *data;
636 	unsigned int rval[BLOCK_TEST_SIZE];
637 	int i;
638 
639 	config = test_regmap_config;
640 	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
641 
642 	map = gen_regmap(test, &config, &data);
643 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
644 	if (IS_ERR(map))
645 		return;
646 
647 	/* We should have read the cache defaults back from the map */
648 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
649 		KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
650 		data->read[i] = false;
651 	}
652 
653 	/* Read back the expected default data */
654 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
655 						  BLOCK_TEST_SIZE));
656 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
657 
658 	/* The data should have been read from cache if there was one */
659 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
660 		KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
661 }
662 
663 static void register_patch(struct kunit *test)
664 {
665 	struct regmap *map;
666 	struct regmap_config config;
667 	struct regmap_ram_data *data;
668 	struct reg_sequence patch[2];
669 	unsigned int rval[BLOCK_TEST_SIZE];
670 	int i;
671 
672 	/* We need defaults so readback works */
673 	config = test_regmap_config;
674 	config.num_reg_defaults = BLOCK_TEST_SIZE;
675 
676 	map = gen_regmap(test, &config, &data);
677 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
678 	if (IS_ERR(map))
679 		return;
680 
681 	/* Stash the original values */
682 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
683 						  BLOCK_TEST_SIZE));
684 
685 	/* Patch a couple of values */
686 	patch[0].reg = 2;
687 	patch[0].def = rval[2] + 1;
688 	patch[0].delay_us = 0;
689 	patch[1].reg = 5;
690 	patch[1].def = rval[5] + 1;
691 	patch[1].delay_us = 0;
692 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
693 						       ARRAY_SIZE(patch)));
694 
695 	/* Only the patched registers are written */
696 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
697 		switch (i) {
698 		case 2:
699 		case 5:
700 			KUNIT_EXPECT_TRUE(test, data->written[i]);
701 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
702 			break;
703 		default:
704 			KUNIT_EXPECT_FALSE(test, data->written[i]);
705 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
706 			break;
707 		}
708 	}
709 }
710 
711 static void stride(struct kunit *test)
712 {
713 	struct regmap *map;
714 	struct regmap_config config;
715 	struct regmap_ram_data *data;
716 	unsigned int rval;
717 	int i;
718 
719 	config = test_regmap_config;
720 	config.reg_stride = 2;
721 	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
722 
723 	/*
724 	 * Allow one extra register so that the read/written arrays
725 	 * are sized big enough to include an entry for the odd
726 	 * address past the final reg_default register.
727 	 */
728 	config.max_register = BLOCK_TEST_SIZE;
729 
730 	map = gen_regmap(test, &config, &data);
731 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
732 	if (IS_ERR(map))
733 		return;
734 
735 	/* Only even addresses can be accessed, try both read and write */
736 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
737 		data->read[i] = false;
738 		data->written[i] = false;
739 
740 		if (i % 2) {
741 			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
742 			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
743 			KUNIT_EXPECT_FALSE(test, data->read[i]);
744 			KUNIT_EXPECT_FALSE(test, data->written[i]);
745 		} else {
746 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
747 			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
748 			KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
749 					data->read[i]);
750 
751 			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
752 			KUNIT_EXPECT_TRUE(test, data->written[i]);
753 		}
754 	}
755 }
756 
757 static const struct regmap_range_cfg test_range = {
758 	.selector_reg = 1,
759 	.selector_mask = 0xff,
760 
761 	.window_start = 4,
762 	.window_len = 10,
763 
764 	.range_min = 20,
765 	.range_max = 40,
766 };
767 
768 static bool test_range_window_volatile(struct device *dev, unsigned int reg)
769 {
770 	if (reg >= test_range.window_start &&
771 	    reg <= test_range.window_start + test_range.window_len)
772 		return true;
773 
774 	return false;
775 }
776 
777 static bool test_range_all_volatile(struct device *dev, unsigned int reg)
778 {
779 	if (test_range_window_volatile(dev, reg))
780 		return true;
781 
782 	if (reg >= test_range.range_min && reg <= test_range.range_max)
783 		return true;
784 
785 	return false;
786 }
787 
788 static void basic_ranges(struct kunit *test)
789 {
790 	struct regmap *map;
791 	struct regmap_config config;
792 	struct regmap_ram_data *data;
793 	unsigned int val;
794 	int i;
795 
796 	config = test_regmap_config;
797 	config.volatile_reg = test_range_all_volatile;
798 	config.ranges = &test_range;
799 	config.num_ranges = 1;
800 	config.max_register = test_range.range_max;
801 
802 	map = gen_regmap(test, &config, &data);
803 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
804 	if (IS_ERR(map))
805 		return;
806 
807 	for (i = test_range.range_min; i < test_range.range_max; i++) {
808 		data->read[i] = false;
809 		data->written[i] = false;
810 	}
811 
812 	/* Reset the page to a non-zero value to trigger a change */
813 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
814 					      test_range.range_max));
815 
816 	/* Check we set the page and use the window for writes */
817 	data->written[test_range.selector_reg] = false;
818 	data->written[test_range.window_start] = false;
819 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
820 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
821 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
822 
823 	data->written[test_range.selector_reg] = false;
824 	data->written[test_range.window_start] = false;
825 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
826 					      test_range.range_min +
827 					      test_range.window_len,
828 					      0));
829 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
830 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
831 
832 	/* Same for reads */
833 	data->written[test_range.selector_reg] = false;
834 	data->read[test_range.window_start] = false;
835 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
836 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
837 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
838 
839 	data->written[test_range.selector_reg] = false;
840 	data->read[test_range.window_start] = false;
841 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
842 					     test_range.range_min +
843 					     test_range.window_len,
844 					     &val));
845 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
846 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
847 
848 	/* No physical access triggered in the virtual range */
849 	for (i = test_range.range_min; i < test_range.range_max; i++) {
850 		KUNIT_EXPECT_FALSE(test, data->read[i]);
851 		KUNIT_EXPECT_FALSE(test, data->written[i]);
852 	}
853 }
854 
855 /* Try to stress dynamic creation of cache data structures */
856 static void stress_insert(struct kunit *test)
857 {
858 	struct regmap *map;
859 	struct regmap_config config;
860 	struct regmap_ram_data *data;
861 	unsigned int rval, *vals;
862 	size_t buf_sz;
863 	int i;
864 
865 	config = test_regmap_config;
866 	config.max_register = 300;
867 
868 	map = gen_regmap(test, &config, &data);
869 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
870 	if (IS_ERR(map))
871 		return;
872 
873 	buf_sz = array_size(sizeof(*vals), config.max_register);
874 	vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
875 	KUNIT_ASSERT_FALSE(test, vals == NULL);
876 
877 	get_random_bytes(vals, buf_sz);
878 
879 	/* Write data into the map/cache in ever decreasing strides */
880 	for (i = 0; i < config.max_register; i += 100)
881 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
882 	for (i = 0; i < config.max_register; i += 50)
883 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
884 	for (i = 0; i < config.max_register; i += 25)
885 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
886 	for (i = 0; i < config.max_register; i += 10)
887 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
888 	for (i = 0; i < config.max_register; i += 5)
889 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
890 	for (i = 0; i < config.max_register; i += 3)
891 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
892 	for (i = 0; i < config.max_register; i += 2)
893 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
894 	for (i = 0; i < config.max_register; i++)
895 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
896 
897 	/* Do reads from the cache (if there is one) match? */
898 	for (i = 0; i < config.max_register; i ++) {
899 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
900 		KUNIT_EXPECT_EQ(test, rval, vals[i]);
901 		KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
902 	}
903 }
904 
905 static void cache_bypass(struct kunit *test)
906 {
907 	const struct regmap_test_param *param = test->param_value;
908 	struct regmap *map;
909 	struct regmap_config config;
910 	struct regmap_ram_data *data;
911 	unsigned int val, rval;
912 
913 	config = test_regmap_config;
914 
915 	map = gen_regmap(test, &config, &data);
916 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
917 	if (IS_ERR(map))
918 		return;
919 
920 	get_random_bytes(&val, sizeof(val));
921 
922 	/* Ensure the cache has a value in it */
923 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
924 
925 	/* Bypass then write a different value */
926 	regcache_cache_bypass(map, true);
927 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
928 
929 	/* Read the bypassed value */
930 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
931 	KUNIT_EXPECT_EQ(test, val + 1, rval);
932 	KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
933 
934 	/* Disable bypass, the cache should still return the original value */
935 	regcache_cache_bypass(map, false);
936 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
937 	KUNIT_EXPECT_EQ(test, val, rval);
938 }
939 
940 static void cache_sync_marked_dirty(struct kunit *test)
941 {
942 	const struct regmap_test_param *param = test->param_value;
943 	struct regmap *map;
944 	struct regmap_config config;
945 	struct regmap_ram_data *data;
946 	unsigned int val[BLOCK_TEST_SIZE];
947 	int i;
948 
949 	config = test_regmap_config;
950 
951 	map = gen_regmap(test, &config, &data);
952 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
953 	if (IS_ERR(map))
954 		return;
955 
956 	get_random_bytes(&val, sizeof(val));
957 
958 	/* Put some data into the cache */
959 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
960 						   BLOCK_TEST_SIZE));
961 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
962 		data->written[param->from_reg + i] = false;
963 
964 	/* Trash the data on the device itself then resync */
965 	regcache_mark_dirty(map);
966 	memset(data->vals, 0, sizeof(val));
967 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
968 
969 	/* Did we just write the correct data out? */
970 	KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
971 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
972 		KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
973 }
974 
975 static void cache_sync_after_cache_only(struct kunit *test)
976 {
977 	const struct regmap_test_param *param = test->param_value;
978 	struct regmap *map;
979 	struct regmap_config config;
980 	struct regmap_ram_data *data;
981 	unsigned int val[BLOCK_TEST_SIZE];
982 	unsigned int val_mask;
983 	int i;
984 
985 	config = test_regmap_config;
986 
987 	map = gen_regmap(test, &config, &data);
988 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
989 	if (IS_ERR(map))
990 		return;
991 
992 	val_mask = GENMASK(config.val_bits - 1, 0);
993 	get_random_bytes(&val, sizeof(val));
994 
995 	/* Put some data into the cache */
996 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
997 						   BLOCK_TEST_SIZE));
998 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
999 		data->written[param->from_reg + i] = false;
1000 
1001 	/* Set cache-only and change the values */
1002 	regcache_cache_only(map, true);
1003 	for (i = 0; i < ARRAY_SIZE(val); ++i)
1004 		val[i] = ~val[i] & val_mask;
1005 
1006 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
1007 						   BLOCK_TEST_SIZE));
1008 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1009 		KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
1010 
1011 	KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
1012 
1013 	/* Exit cache-only and sync the cache without marking hardware registers dirty */
1014 	regcache_cache_only(map, false);
1015 
1016 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1017 
1018 	/* Did we just write the correct data out? */
1019 	KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
1020 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1021 		KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
1022 }
1023 
1024 static void cache_sync_defaults_marked_dirty(struct kunit *test)
1025 {
1026 	const struct regmap_test_param *param = test->param_value;
1027 	struct regmap *map;
1028 	struct regmap_config config;
1029 	struct regmap_ram_data *data;
1030 	unsigned int val;
1031 	int i;
1032 
1033 	config = test_regmap_config;
1034 	config.num_reg_defaults = BLOCK_TEST_SIZE;
1035 
1036 	map = gen_regmap(test, &config, &data);
1037 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1038 	if (IS_ERR(map))
1039 		return;
1040 
1041 	get_random_bytes(&val, sizeof(val));
1042 
1043 	/* Change the value of one register */
1044 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
1045 
1046 	/* Resync */
1047 	regcache_mark_dirty(map);
1048 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1049 		data->written[param->from_reg + i] = false;
1050 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1051 
1052 	/* Did we just sync the one register we touched? */
1053 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1054 		KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
1055 
1056 	/* Rewrite registers back to their defaults */
1057 	for (i = 0; i < config.num_reg_defaults; ++i)
1058 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
1059 						      config.reg_defaults[i].def));
1060 
1061 	/*
1062 	 * Resync after regcache_mark_dirty() should not write out registers
1063 	 * that are at default value
1064 	 */
1065 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1066 		data->written[param->from_reg + i] = false;
1067 	regcache_mark_dirty(map);
1068 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1069 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1070 		KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
1071 }
1072 
1073 static void cache_sync_default_after_cache_only(struct kunit *test)
1074 {
1075 	const struct regmap_test_param *param = test->param_value;
1076 	struct regmap *map;
1077 	struct regmap_config config;
1078 	struct regmap_ram_data *data;
1079 	unsigned int orig_val;
1080 	int i;
1081 
1082 	config = test_regmap_config;
1083 	config.num_reg_defaults = BLOCK_TEST_SIZE;
1084 
1085 	map = gen_regmap(test, &config, &data);
1086 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1087 	if (IS_ERR(map))
1088 		return;
1089 
1090 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
1091 
1092 	/* Enter cache-only and change the value of one register */
1093 	regcache_cache_only(map, true);
1094 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
1095 
1096 	/* Exit cache-only and resync, should write out the changed register */
1097 	regcache_cache_only(map, false);
1098 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1099 		data->written[param->from_reg + i] = false;
1100 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1101 
1102 	/* Was the register written out? */
1103 	KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1104 	KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
1105 
1106 	/* Enter cache-only and write register back to its default value */
1107 	regcache_cache_only(map, true);
1108 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
1109 
1110 	/* Resync should write out the new value */
1111 	regcache_cache_only(map, false);
1112 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1113 		data->written[param->from_reg + i] = false;
1114 
1115 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1116 	KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1117 	KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
1118 }
1119 
1120 static void cache_sync_readonly(struct kunit *test)
1121 {
1122 	const struct regmap_test_param *param = test->param_value;
1123 	struct regmap *map;
1124 	struct regmap_config config;
1125 	struct regmap_ram_data *data;
1126 	unsigned int val;
1127 	int i;
1128 
1129 	config = test_regmap_config;
1130 	config.writeable_reg = reg_5_false;
1131 
1132 	map = gen_regmap(test, &config, &data);
1133 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1134 	if (IS_ERR(map))
1135 		return;
1136 
1137 	/* Read all registers to fill the cache */
1138 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1139 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1140 
1141 	/* Change the value of all registers, readonly should fail */
1142 	get_random_bytes(&val, sizeof(val));
1143 	regcache_cache_only(map, true);
1144 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1145 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
1146 	regcache_cache_only(map, false);
1147 
1148 	/* Resync */
1149 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1150 		data->written[param->from_reg + i] = false;
1151 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1152 
1153 	/* Did that match what we see on the device? */
1154 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1155 		KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
1156 }
1157 
1158 static void cache_sync_patch(struct kunit *test)
1159 {
1160 	const struct regmap_test_param *param = test->param_value;
1161 	struct regmap *map;
1162 	struct regmap_config config;
1163 	struct regmap_ram_data *data;
1164 	struct reg_sequence patch[2];
1165 	unsigned int rval[BLOCK_TEST_SIZE], val;
1166 	int i;
1167 
1168 	/* We need defaults so readback works */
1169 	config = test_regmap_config;
1170 	config.num_reg_defaults = BLOCK_TEST_SIZE;
1171 
1172 	map = gen_regmap(test, &config, &data);
1173 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1174 	if (IS_ERR(map))
1175 		return;
1176 
1177 	/* Stash the original values */
1178 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1179 						  BLOCK_TEST_SIZE));
1180 
1181 	/* Patch a couple of values */
1182 	patch[0].reg = param->from_reg + 2;
1183 	patch[0].def = rval[2] + 1;
1184 	patch[0].delay_us = 0;
1185 	patch[1].reg = param->from_reg + 5;
1186 	patch[1].def = rval[5] + 1;
1187 	patch[1].delay_us = 0;
1188 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
1189 						       ARRAY_SIZE(patch)));
1190 
1191 	/* Sync the cache */
1192 	regcache_mark_dirty(map);
1193 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1194 		data->written[param->from_reg + i] = false;
1195 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1196 
1197 	/* The patch should be on the device but not in the cache */
1198 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1199 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1200 		KUNIT_EXPECT_EQ(test, val, rval[i]);
1201 
1202 		switch (i) {
1203 		case 2:
1204 		case 5:
1205 			KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
1206 			KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
1207 			break;
1208 		default:
1209 			KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
1210 			KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
1211 			break;
1212 		}
1213 	}
1214 }
1215 
1216 static void cache_drop(struct kunit *test)
1217 {
1218 	const struct regmap_test_param *param = test->param_value;
1219 	struct regmap *map;
1220 	struct regmap_config config;
1221 	struct regmap_ram_data *data;
1222 	unsigned int rval[BLOCK_TEST_SIZE];
1223 	int i;
1224 
1225 	config = test_regmap_config;
1226 	config.num_reg_defaults = BLOCK_TEST_SIZE;
1227 
1228 	map = gen_regmap(test, &config, &data);
1229 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1230 	if (IS_ERR(map))
1231 		return;
1232 
1233 	/* Ensure the data is read from the cache */
1234 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1235 		data->read[param->from_reg + i] = false;
1236 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1237 						  BLOCK_TEST_SIZE));
1238 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1239 		KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
1240 		data->read[param->from_reg + i] = false;
1241 	}
1242 	KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1243 
1244 	/* Drop some registers */
1245 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1246 						      param->from_reg + 5));
1247 
1248 	/* Reread and check only the dropped registers hit the device. */
1249 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1250 						  BLOCK_TEST_SIZE));
1251 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1252 		KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1253 	KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1254 }
1255 
1256 static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
1257 {
1258 	const struct regmap_test_param *param = test->param_value;
1259 	struct regmap *map;
1260 	struct regmap_config config;
1261 	struct regmap_ram_data *data;
1262 	unsigned int val[4][BLOCK_TEST_SIZE];
1263 	unsigned int reg;
1264 	const int num_ranges = ARRAY_SIZE(val) * 2;
1265 	int rangeidx, i;
1266 
1267 	static_assert(ARRAY_SIZE(val) == 4);
1268 
1269 	config = test_regmap_config;
1270 	config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
1271 
1272 	map = gen_regmap(test, &config, &data);
1273 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1274 	if (IS_ERR(map))
1275 		return;
1276 
1277 	for (i = 0; i < config.max_register + 1; i++)
1278 		data->written[i] = false;
1279 
1280 	/* Create non-contiguous cache blocks by writing every other range */
1281 	get_random_bytes(&val, sizeof(val));
1282 	for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1283 		reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1284 		KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
1285 							   &val[rangeidx / 2],
1286 							   BLOCK_TEST_SIZE));
1287 		KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1288 				   &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1289 	}
1290 
1291 	/* Check that odd ranges weren't written */
1292 	for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1293 		reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1294 		for (i = 0; i < BLOCK_TEST_SIZE; i++)
1295 			KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1296 	}
1297 
1298 	/* Drop range 2 */
1299 	reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1300 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
1301 
1302 	/* Drop part of range 4 */
1303 	reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1304 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
1305 
1306 	/* Mark dirty and reset mock registers to 0 */
1307 	regcache_mark_dirty(map);
1308 	for (i = 0; i < config.max_register + 1; i++) {
1309 		data->vals[i] = 0;
1310 		data->written[i] = false;
1311 	}
1312 
1313 	/* The registers that were dropped from range 4 should now remain at 0 */
1314 	val[4 / 2][3] = 0;
1315 	val[4 / 2][4] = 0;
1316 	val[4 / 2][5] = 0;
1317 
1318 	/* Sync and check that the expected register ranges were written */
1319 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1320 
1321 	/* Check that odd ranges weren't written */
1322 	for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1323 		reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1324 		for (i = 0; i < BLOCK_TEST_SIZE; i++)
1325 			KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1326 	}
1327 
1328 	/* Check that even ranges (except 2 and 4) were written */
1329 	for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1330 		if ((rangeidx == 2) || (rangeidx == 4))
1331 			continue;
1332 
1333 		reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1334 		for (i = 0; i < BLOCK_TEST_SIZE; i++)
1335 			KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
1336 
1337 		KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1338 				   &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1339 	}
1340 
1341 	/* Check that range 2 wasn't written */
1342 	reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1343 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1344 		KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1345 
1346 	/* Check that range 4 was partially written */
1347 	reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1348 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1349 		KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
1350 
1351 	KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
1352 
1353 	/* Nothing before param->from_reg should have been written */
1354 	for (i = 0; i < param->from_reg; i++)
1355 		KUNIT_EXPECT_FALSE(test, data->written[i]);
1356 }
1357 
1358 static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1359 {
1360 	const struct regmap_test_param *param = test->param_value;
1361 	struct regmap *map;
1362 	struct regmap_config config;
1363 	struct regmap_ram_data *data;
1364 	unsigned int rval[BLOCK_TEST_SIZE];
1365 	int i;
1366 
1367 	config = test_regmap_config;
1368 	config.num_reg_defaults = BLOCK_TEST_SIZE;
1369 
1370 	map = gen_regmap(test, &config, &data);
1371 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1372 	if (IS_ERR(map))
1373 		return;
1374 
1375 	/* Ensure the data is read from the cache */
1376 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1377 		data->read[param->from_reg + i] = false;
1378 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1379 						  BLOCK_TEST_SIZE));
1380 	KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1381 
1382 	/* Change all values in cache from defaults */
1383 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1384 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1385 
1386 	/* Drop all registers */
1387 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1388 
1389 	/* Mark dirty and cache sync should not write anything. */
1390 	regcache_mark_dirty(map);
1391 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1392 		data->written[param->from_reg + i] = false;
1393 
1394 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1395 	for (i = 0; i <= config.max_register; i++)
1396 		KUNIT_EXPECT_FALSE(test, data->written[i]);
1397 }
1398 
1399 static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1400 {
1401 	const struct regmap_test_param *param = test->param_value;
1402 	struct regmap *map;
1403 	struct regmap_config config;
1404 	struct regmap_ram_data *data;
1405 	unsigned int rval[BLOCK_TEST_SIZE];
1406 	int i;
1407 
1408 	config = test_regmap_config;
1409 
1410 	map = gen_regmap(test, &config, &data);
1411 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1412 	if (IS_ERR(map))
1413 		return;
1414 
1415 	/* Ensure the data is read from the cache */
1416 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1417 		data->read[param->from_reg + i] = false;
1418 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1419 						  BLOCK_TEST_SIZE));
1420 	KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1421 
1422 	/* Change all values in cache */
1423 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1424 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1425 
1426 	/* Drop all registers */
1427 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1428 
1429 	/*
1430 	 * Sync cache without marking it dirty. All registers were dropped
1431 	 * so the cache should not have any entries to write out.
1432 	 */
1433 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1434 		data->written[param->from_reg + i] = false;
1435 
1436 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1437 	for (i = 0; i <= config.max_register; i++)
1438 		KUNIT_EXPECT_FALSE(test, data->written[i]);
1439 }
1440 
1441 static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1442 {
1443 	const struct regmap_test_param *param = test->param_value;
1444 	struct regmap *map;
1445 	struct regmap_config config;
1446 	struct regmap_ram_data *data;
1447 	unsigned int rval[BLOCK_TEST_SIZE];
1448 	int i;
1449 
1450 	config = test_regmap_config;
1451 	config.num_reg_defaults = BLOCK_TEST_SIZE;
1452 
1453 	map = gen_regmap(test, &config, &data);
1454 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1455 	if (IS_ERR(map))
1456 		return;
1457 
1458 	/* Ensure the data is read from the cache */
1459 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1460 		data->read[param->from_reg + i] = false;
1461 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1462 						  BLOCK_TEST_SIZE));
1463 	KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1464 
1465 	/* Change all values in cache from defaults */
1466 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1467 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1468 
1469 	/* Drop all registers */
1470 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1471 
1472 	/*
1473 	 * Sync cache without marking it dirty. All registers were dropped
1474 	 * so the cache should not have any entries to write out.
1475 	 */
1476 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1477 		data->written[param->from_reg + i] = false;
1478 
1479 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1480 	for (i = 0; i <= config.max_register; i++)
1481 		KUNIT_EXPECT_FALSE(test, data->written[i]);
1482 }
1483 
1484 static void cache_present(struct kunit *test)
1485 {
1486 	const struct regmap_test_param *param = test->param_value;
1487 	struct regmap *map;
1488 	struct regmap_config config;
1489 	struct regmap_ram_data *data;
1490 	unsigned int val;
1491 	int i;
1492 
1493 	config = test_regmap_config;
1494 
1495 	map = gen_regmap(test, &config, &data);
1496 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1497 	if (IS_ERR(map))
1498 		return;
1499 
1500 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1501 		data->read[param->from_reg + i] = false;
1502 
1503 	/* No defaults so no registers cached. */
1504 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1505 		KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1506 
1507 	/* We didn't trigger any reads */
1508 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1509 		KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1510 
1511 	/* Fill the cache */
1512 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1513 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1514 
1515 	/* Now everything should be cached */
1516 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1517 		KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
1518 }
1519 
1520 static void cache_write_zero(struct kunit *test)
1521 {
1522 	const struct regmap_test_param *param = test->param_value;
1523 	struct regmap *map;
1524 	struct regmap_config config;
1525 	struct regmap_ram_data *data;
1526 	unsigned int val;
1527 	int i;
1528 
1529 	config = test_regmap_config;
1530 
1531 	map = gen_regmap(test, &config, &data);
1532 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1533 	if (IS_ERR(map))
1534 		return;
1535 
1536 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1537 		data->read[param->from_reg + i] = false;
1538 
1539 	/* No defaults so no registers cached. */
1540 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1541 		KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1542 
1543 	/* We didn't trigger any reads */
1544 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
1545 		KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1546 
1547 	/* Write a zero value */
1548 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
1549 
1550 	/* Read that zero value back */
1551 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1552 	KUNIT_EXPECT_EQ(test, 0, val);
1553 
1554 	/* From the cache? */
1555 	KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
1556 
1557 	/* Try to throw it away */
1558 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
1559 	KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
1560 }
1561 
1562 /* Check that caching the window register works with sync */
1563 static void cache_range_window_reg(struct kunit *test)
1564 {
1565 	struct regmap *map;
1566 	struct regmap_config config;
1567 	struct regmap_ram_data *data;
1568 	unsigned int val;
1569 	int i;
1570 
1571 	config = test_regmap_config;
1572 	config.volatile_reg = test_range_window_volatile;
1573 	config.ranges = &test_range;
1574 	config.num_ranges = 1;
1575 	config.max_register = test_range.range_max;
1576 
1577 	map = gen_regmap(test, &config, &data);
1578 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1579 	if (IS_ERR(map))
1580 		return;
1581 
1582 	/* Write new values to the entire range */
1583 	for (i = test_range.range_min; i <= test_range.range_max; i++)
1584 		KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1585 
1586 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1587 	KUNIT_ASSERT_EQ(test, val, 2);
1588 
1589 	/* Write to the first register in the range to reset the page */
1590 	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1591 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1592 	KUNIT_ASSERT_EQ(test, val, 0);
1593 
1594 	/* Trigger a cache sync */
1595 	regcache_mark_dirty(map);
1596 	KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1597 
1598 	/* Write to the first register again, the page should be reset */
1599 	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1600 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1601 	KUNIT_ASSERT_EQ(test, val, 0);
1602 
1603 	/* Trigger another cache sync */
1604 	regcache_mark_dirty(map);
1605 	KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1606 
1607 	/* Write to the last register again, the page should be reset */
1608 	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1609 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1610 	KUNIT_ASSERT_EQ(test, val, 2);
1611 }
1612 
1613 static const struct regmap_test_param raw_types_list[] = {
1614 	{ .cache = REGCACHE_NONE,   .val_endian = REGMAP_ENDIAN_LITTLE },
1615 	{ .cache = REGCACHE_NONE,   .val_endian = REGMAP_ENDIAN_BIG },
1616 	{ .cache = REGCACHE_FLAT,   .val_endian = REGMAP_ENDIAN_LITTLE },
1617 	{ .cache = REGCACHE_FLAT,   .val_endian = REGMAP_ENDIAN_BIG },
1618 	{ .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
1619 	{ .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
1620 	{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1621 	{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1622 	{ .cache = REGCACHE_MAPLE,  .val_endian = REGMAP_ENDIAN_LITTLE },
1623 	{ .cache = REGCACHE_MAPLE,  .val_endian = REGMAP_ENDIAN_BIG },
1624 };
1625 
1626 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
1627 
1628 static const struct regmap_test_param raw_cache_types_list[] = {
1629 	{ .cache = REGCACHE_FLAT,   .val_endian = REGMAP_ENDIAN_LITTLE },
1630 	{ .cache = REGCACHE_FLAT,   .val_endian = REGMAP_ENDIAN_BIG },
1631 	{ .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
1632 	{ .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
1633 	{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1634 	{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1635 	{ .cache = REGCACHE_MAPLE,  .val_endian = REGMAP_ENDIAN_LITTLE },
1636 	{ .cache = REGCACHE_MAPLE,  .val_endian = REGMAP_ENDIAN_BIG },
1637 };
1638 
1639 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
1640 
1641 static const struct regmap_config raw_regmap_config = {
1642 	.max_register = BLOCK_TEST_SIZE,
1643 
1644 	.reg_format_endian = REGMAP_ENDIAN_LITTLE,
1645 	.reg_bits = 16,
1646 	.val_bits = 16,
1647 };
1648 
1649 static struct regmap *gen_raw_regmap(struct kunit *test,
1650 				     struct regmap_config *config,
1651 				     struct regmap_ram_data **data)
1652 {
1653 	struct regmap_test_priv *priv = test->priv;
1654 	const struct regmap_test_param *param = test->param_value;
1655 	u16 *buf;
1656 	struct regmap *ret = ERR_PTR(-ENOMEM);
1657 	int i, error;
1658 	struct reg_default *defaults;
1659 	size_t size;
1660 
1661 	config->cache_type = param->cache;
1662 	config->val_format_endian = param->val_endian;
1663 	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1664 					config->cache_type == REGCACHE_MAPLE;
1665 
1666 	size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
1667 	buf = kmalloc(size, GFP_KERNEL);
1668 	if (!buf)
1669 		return ERR_PTR(-ENOMEM);
1670 
1671 	get_random_bytes(buf, size);
1672 
1673 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
1674 	if (!(*data))
1675 		goto out_free;
1676 	(*data)->vals = (void *)buf;
1677 
1678 	config->num_reg_defaults = config->max_register + 1;
1679 	defaults = kunit_kcalloc(test,
1680 				 config->num_reg_defaults,
1681 				 sizeof(struct reg_default),
1682 				 GFP_KERNEL);
1683 	if (!defaults)
1684 		goto out_free;
1685 	config->reg_defaults = defaults;
1686 
1687 	for (i = 0; i < config->num_reg_defaults; i++) {
1688 		defaults[i].reg = i;
1689 		switch (param->val_endian) {
1690 		case REGMAP_ENDIAN_LITTLE:
1691 			defaults[i].def = le16_to_cpu(buf[i]);
1692 			break;
1693 		case REGMAP_ENDIAN_BIG:
1694 			defaults[i].def = be16_to_cpu(buf[i]);
1695 			break;
1696 		default:
1697 			ret = ERR_PTR(-EINVAL);
1698 			goto out_free;
1699 		}
1700 	}
1701 
1702 	/*
1703 	 * We use the defaults in the tests but they don't make sense
1704 	 * to the core if there's no cache.
1705 	 */
1706 	if (config->cache_type == REGCACHE_NONE)
1707 		config->num_reg_defaults = 0;
1708 
1709 	ret = regmap_init_raw_ram(priv->dev, config, *data);
1710 	if (IS_ERR(ret))
1711 		goto out_free;
1712 
1713 	/* This calls regmap_exit() on failure, which frees buf and *data */
1714 	error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
1715 	if (error)
1716 		ret = ERR_PTR(error);
1717 
1718 	return ret;
1719 
1720 out_free:
1721 	kfree(buf);
1722 	kfree(*data);
1723 
1724 	return ret;
1725 }
1726 
1727 static void raw_read_defaults_single(struct kunit *test)
1728 {
1729 	struct regmap *map;
1730 	struct regmap_config config;
1731 	struct regmap_ram_data *data;
1732 	unsigned int rval;
1733 	int i;
1734 
1735 	config = raw_regmap_config;
1736 
1737 	map = gen_raw_regmap(test, &config, &data);
1738 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1739 	if (IS_ERR(map))
1740 		return;
1741 
1742 	/* Check that we can read the defaults via the API */
1743 	for (i = 0; i < config.max_register + 1; i++) {
1744 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1745 		KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1746 	}
1747 }
1748 
1749 static void raw_read_defaults(struct kunit *test)
1750 {
1751 	struct regmap *map;
1752 	struct regmap_config config;
1753 	struct regmap_ram_data *data;
1754 	u16 *rval;
1755 	u16 def;
1756 	size_t val_len;
1757 	int i;
1758 
1759 	config = raw_regmap_config;
1760 
1761 	map = gen_raw_regmap(test, &config, &data);
1762 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1763 	if (IS_ERR(map))
1764 		return;
1765 
1766 	val_len = array_size(sizeof(*rval), config.max_register + 1);
1767 	rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
1768 	KUNIT_ASSERT_TRUE(test, rval != NULL);
1769 	if (!rval)
1770 		return;
1771 
1772 	/* Check that we can read the defaults via the API */
1773 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1774 	for (i = 0; i < config.max_register + 1; i++) {
1775 		def = config.reg_defaults[i].def;
1776 		if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1777 			KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
1778 		} else {
1779 			KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
1780 		}
1781 	}
1782 }
1783 
1784 static void raw_write_read_single(struct kunit *test)
1785 {
1786 	struct regmap *map;
1787 	struct regmap_config config;
1788 	struct regmap_ram_data *data;
1789 	u16 val;
1790 	unsigned int rval;
1791 
1792 	config = raw_regmap_config;
1793 
1794 	map = gen_raw_regmap(test, &config, &data);
1795 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1796 	if (IS_ERR(map))
1797 		return;
1798 
1799 	get_random_bytes(&val, sizeof(val));
1800 
1801 	/* If we write a value to a register we can read it back */
1802 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1803 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1804 	KUNIT_EXPECT_EQ(test, val, rval);
1805 }
1806 
1807 static void raw_write(struct kunit *test)
1808 {
1809 	struct regmap *map;
1810 	struct regmap_config config;
1811 	struct regmap_ram_data *data;
1812 	u16 *hw_buf;
1813 	u16 val[2];
1814 	unsigned int rval;
1815 	int i;
1816 
1817 	config = raw_regmap_config;
1818 
1819 	map = gen_raw_regmap(test, &config, &data);
1820 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1821 	if (IS_ERR(map))
1822 		return;
1823 
1824 	hw_buf = (u16 *)data->vals;
1825 
1826 	get_random_bytes(&val, sizeof(val));
1827 
1828 	/* Do a raw write */
1829 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1830 
1831 	/* We should read back the new values, and defaults for the rest */
1832 	for (i = 0; i < config.max_register + 1; i++) {
1833 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1834 
1835 		switch (i) {
1836 		case 2:
1837 		case 3:
1838 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1839 				KUNIT_EXPECT_EQ(test, rval,
1840 						be16_to_cpu((__force __be16)val[i % 2]));
1841 			} else {
1842 				KUNIT_EXPECT_EQ(test, rval,
1843 						le16_to_cpu((__force __le16)val[i % 2]));
1844 			}
1845 			break;
1846 		default:
1847 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1848 			break;
1849 		}
1850 	}
1851 
1852 	/* The values should appear in the "hardware" */
1853 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1854 }
1855 
1856 static bool reg_zero(struct device *dev, unsigned int reg)
1857 {
1858 	return reg == 0;
1859 }
1860 
1861 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1862 {
1863 	return reg == 0;
1864 }
1865 
1866 static void raw_noinc_write(struct kunit *test)
1867 {
1868 	struct regmap *map;
1869 	struct regmap_config config;
1870 	struct regmap_ram_data *data;
1871 	unsigned int val;
1872 	u16 val_test, val_last;
1873 	u16 val_array[BLOCK_TEST_SIZE];
1874 
1875 	config = raw_regmap_config;
1876 	config.volatile_reg = reg_zero;
1877 	config.writeable_noinc_reg = reg_zero;
1878 	config.readable_noinc_reg = reg_zero;
1879 
1880 	map = gen_raw_regmap(test, &config, &data);
1881 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1882 	if (IS_ERR(map))
1883 		return;
1884 
1885 	data->noinc_reg = ram_reg_zero;
1886 
1887 	get_random_bytes(&val_array, sizeof(val_array));
1888 
1889 	if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1890 		val_test = be16_to_cpu(val_array[1]) + 100;
1891 		val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1892 	} else {
1893 		val_test = le16_to_cpu(val_array[1]) + 100;
1894 		val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1895 	}
1896 
1897 	/* Put some data into the register following the noinc register */
1898 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1899 
1900 	/* Write some data to the noinc register */
1901 	KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1902 						    sizeof(val_array)));
1903 
1904 	/* We should read back the last value written */
1905 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1906 	KUNIT_ASSERT_EQ(test, val_last, val);
1907 
1908 	/* Make sure we didn't touch the register after the noinc register */
1909 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1910 	KUNIT_ASSERT_EQ(test, val_test, val);
1911 }
1912 
1913 static void raw_sync(struct kunit *test)
1914 {
1915 	struct regmap *map;
1916 	struct regmap_config config;
1917 	struct regmap_ram_data *data;
1918 	u16 val[3];
1919 	u16 *hw_buf;
1920 	unsigned int rval;
1921 	int i;
1922 
1923 	config = raw_regmap_config;
1924 
1925 	map = gen_raw_regmap(test, &config, &data);
1926 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1927 	if (IS_ERR(map))
1928 		return;
1929 
1930 	hw_buf = (u16 *)data->vals;
1931 
1932 	get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1933 
1934 	/* Do a regular write and a raw write in cache only mode */
1935 	regcache_cache_only(map, true);
1936 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1937 						  sizeof(u16) * 2));
1938 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1939 
1940 	/* We should read back the new values, and defaults for the rest */
1941 	for (i = 0; i < config.max_register + 1; i++) {
1942 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1943 
1944 		switch (i) {
1945 		case 2:
1946 		case 3:
1947 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1948 				KUNIT_EXPECT_EQ(test, rval,
1949 						be16_to_cpu((__force __be16)val[i - 2]));
1950 			} else {
1951 				KUNIT_EXPECT_EQ(test, rval,
1952 						le16_to_cpu((__force __le16)val[i - 2]));
1953 			}
1954 			break;
1955 		case 4:
1956 			KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1957 			break;
1958 		default:
1959 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1960 			break;
1961 		}
1962 	}
1963 
1964 	/*
1965 	 * The value written via _write() was translated by the core,
1966 	 * translate the original copy for comparison purposes.
1967 	 */
1968 	if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1969 		val[2] = cpu_to_be16(val[2]);
1970 	else
1971 		val[2] = cpu_to_le16(val[2]);
1972 
1973 	/* The values should not appear in the "hardware" */
1974 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1975 
1976 	for (i = 0; i < config.max_register + 1; i++)
1977 		data->written[i] = false;
1978 
1979 	/* Do the sync */
1980 	regcache_cache_only(map, false);
1981 	regcache_mark_dirty(map);
1982 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1983 
1984 	/* The values should now appear in the "hardware" */
1985 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1986 }
1987 
1988 static void raw_ranges(struct kunit *test)
1989 {
1990 	struct regmap *map;
1991 	struct regmap_config config;
1992 	struct regmap_ram_data *data;
1993 	unsigned int val;
1994 	int i;
1995 
1996 	config = raw_regmap_config;
1997 	config.volatile_reg = test_range_all_volatile;
1998 	config.ranges = &test_range;
1999 	config.num_ranges = 1;
2000 	config.max_register = test_range.range_max;
2001 
2002 	map = gen_raw_regmap(test, &config, &data);
2003 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
2004 	if (IS_ERR(map))
2005 		return;
2006 
2007 	/* Reset the page to a non-zero value to trigger a change */
2008 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
2009 					      test_range.range_max));
2010 
2011 	/* Check we set the page and use the window for writes */
2012 	data->written[test_range.selector_reg] = false;
2013 	data->written[test_range.window_start] = false;
2014 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
2015 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2016 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
2017 
2018 	data->written[test_range.selector_reg] = false;
2019 	data->written[test_range.window_start] = false;
2020 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
2021 					      test_range.range_min +
2022 					      test_range.window_len,
2023 					      0));
2024 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2025 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
2026 
2027 	/* Same for reads */
2028 	data->written[test_range.selector_reg] = false;
2029 	data->read[test_range.window_start] = false;
2030 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
2031 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2032 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2033 
2034 	data->written[test_range.selector_reg] = false;
2035 	data->read[test_range.window_start] = false;
2036 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
2037 					     test_range.range_min +
2038 					     test_range.window_len,
2039 					     &val));
2040 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2041 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2042 
2043 	/* No physical access triggered in the virtual range */
2044 	for (i = test_range.range_min; i < test_range.range_max; i++) {
2045 		KUNIT_EXPECT_FALSE(test, data->read[i]);
2046 		KUNIT_EXPECT_FALSE(test, data->written[i]);
2047 	}
2048 }
2049 
2050 static struct kunit_case regmap_test_cases[] = {
2051 	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
2052 	KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
2053 	KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
2054 	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
2055 	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
2056 	KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
2057 	KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
2058 	KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
2059 	KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
2060 	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
2061 	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
2062 	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
2063 	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
2064 	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
2065 	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
2066 	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
2067 	KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
2068 	KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
2069 	KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
2070 	KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
2071 	KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
2072 	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
2073 	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
2074 	KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
2075 	KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
2076 	KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
2077 	KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
2078 	KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
2079 	KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
2080 	KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
2081 
2082 	KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
2083 	KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
2084 	KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
2085 	KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
2086 	KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
2087 	KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
2088 	KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
2089 	{}
2090 };
2091 
2092 static int regmap_test_init(struct kunit *test)
2093 {
2094 	struct regmap_test_priv *priv;
2095 	struct device *dev;
2096 
2097 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
2098 	if (!priv)
2099 		return -ENOMEM;
2100 
2101 	test->priv = priv;
2102 
2103 	dev = kunit_device_register(test, "regmap_test");
2104 	if (IS_ERR(dev))
2105 		return PTR_ERR(dev);
2106 
2107 	priv->dev = get_device(dev);
2108 	dev_set_drvdata(dev, test);
2109 
2110 	return 0;
2111 }
2112 
2113 static void regmap_test_exit(struct kunit *test)
2114 {
2115 	struct regmap_test_priv *priv = test->priv;
2116 
2117 	/* Destroy the dummy struct device */
2118 	if (priv && priv->dev)
2119 		put_device(priv->dev);
2120 }
2121 
2122 static struct kunit_suite regmap_test_suite = {
2123 	.name = "regmap",
2124 	.init = regmap_test_init,
2125 	.exit = regmap_test_exit,
2126 	.test_cases = regmap_test_cases,
2127 };
2128 kunit_test_suite(regmap_test_suite);
2129 
2130 MODULE_DESCRIPTION("Regmap KUnit tests");
2131 MODULE_LICENSE("GPL v2");
2132