1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6
7 #include <kunit/device.h>
8 #include <kunit/resource.h>
9 #include <kunit/test.h>
10 #include "internal.h"
11
12 #define BLOCK_TEST_SIZE 12
13
14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
15
16 struct regmap_test_priv {
17 struct device *dev;
18 bool *reg_default_called;
19 unsigned int reg_default_max;
20 };
21
22 struct regmap_test_param {
23 enum regcache_type cache;
24 enum regmap_endian val_endian;
25
26 unsigned int from_reg;
27 bool fast_io;
28 };
29
get_changed_bytes(void * orig,void * new,size_t size)30 static void get_changed_bytes(void *orig, void *new, size_t size)
31 {
32 char *o = orig;
33 char *n = new;
34 int i;
35
36 get_random_bytes(new, size);
37
38 /*
39 * This could be nicer and more efficient but we shouldn't
40 * super care.
41 */
42 for (i = 0; i < size; i++)
43 while (n[i] == o[i])
44 get_random_bytes(&n[i], 1);
45 }
46
47 static const struct regmap_config test_regmap_config = {
48 .reg_stride = 1,
49 .val_bits = sizeof(unsigned int) * 8,
50 };
51
regcache_type_name(enum regcache_type type)52 static const char *regcache_type_name(enum regcache_type type)
53 {
54 switch (type) {
55 case REGCACHE_NONE:
56 return "none";
57 case REGCACHE_FLAT:
58 return "flat";
59 case REGCACHE_FLAT_S:
60 return "flat-sparse";
61 case REGCACHE_RBTREE:
62 return "rbtree";
63 case REGCACHE_MAPLE:
64 return "maple";
65 default:
66 return NULL;
67 }
68 }
69
regmap_endian_name(enum regmap_endian endian)70 static const char *regmap_endian_name(enum regmap_endian endian)
71 {
72 switch (endian) {
73 case REGMAP_ENDIAN_BIG:
74 return "big";
75 case REGMAP_ENDIAN_LITTLE:
76 return "little";
77 case REGMAP_ENDIAN_DEFAULT:
78 return "default";
79 case REGMAP_ENDIAN_NATIVE:
80 return "native";
81 default:
82 return NULL;
83 }
84 }
85
param_to_desc(const struct regmap_test_param * param,char * desc)86 static void param_to_desc(const struct regmap_test_param *param, char *desc)
87 {
88 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x",
89 regcache_type_name(param->cache),
90 regmap_endian_name(param->val_endian),
91 param->fast_io ? " fast I/O" : "",
92 param->from_reg);
93 }
94
95 static const struct regmap_test_param regcache_types_list[] = {
96 { .cache = REGCACHE_NONE },
97 { .cache = REGCACHE_NONE, .fast_io = true },
98 { .cache = REGCACHE_FLAT },
99 { .cache = REGCACHE_FLAT, .fast_io = true },
100 { .cache = REGCACHE_FLAT_S },
101 { .cache = REGCACHE_FLAT_S, .fast_io = true },
102 { .cache = REGCACHE_RBTREE },
103 { .cache = REGCACHE_RBTREE, .fast_io = true },
104 { .cache = REGCACHE_MAPLE },
105 { .cache = REGCACHE_MAPLE, .fast_io = true },
106 };
107
108 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
109
110 static const struct regmap_test_param real_cache_types_only_list[] = {
111 { .cache = REGCACHE_FLAT },
112 { .cache = REGCACHE_FLAT, .fast_io = true },
113 { .cache = REGCACHE_FLAT_S },
114 { .cache = REGCACHE_FLAT_S, .fast_io = true },
115 { .cache = REGCACHE_RBTREE },
116 { .cache = REGCACHE_RBTREE, .fast_io = true },
117 { .cache = REGCACHE_MAPLE },
118 { .cache = REGCACHE_MAPLE, .fast_io = true },
119 };
120
121 KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
122
123 static const struct regmap_test_param flat_cache_types_list[] = {
124 { .cache = REGCACHE_FLAT, .from_reg = 0 },
125 { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true },
126 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
127 };
128
129 KUNIT_ARRAY_PARAM(flat_cache_types, flat_cache_types_list, param_to_desc);
130
131 static const struct regmap_test_param real_cache_types_list[] = {
132 { .cache = REGCACHE_FLAT, .from_reg = 0 },
133 { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true },
134 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
135 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
136 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
137 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
138 { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
139 { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
140 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
141 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
142 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
143 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
144 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
145 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
146 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
147 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
148 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
149 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
150 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
151 { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
152 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
153 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
154 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
155 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
156 };
157
158 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
159
160 static const struct regmap_test_param sparse_cache_types_list[] = {
161 { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
162 { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
163 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
164 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
165 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
166 { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
167 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
168 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
169 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
170 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
171 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
172 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
173 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
174 { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
175 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
176 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
177 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
178 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
179 };
180
181 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
182
gen_regmap(struct kunit * test,struct regmap_config * config,struct regmap_ram_data ** data)183 static struct regmap *gen_regmap(struct kunit *test,
184 struct regmap_config *config,
185 struct regmap_ram_data **data)
186 {
187 const struct regmap_test_param *param = test->param_value;
188 struct regmap_test_priv *priv = test->priv;
189 unsigned int *buf;
190 struct regmap *ret = ERR_PTR(-ENOMEM);
191 size_t size;
192 int i, error;
193 struct reg_default *defaults;
194
195 config->cache_type = param->cache;
196 config->fast_io = param->fast_io;
197
198 if (config->max_register == 0) {
199 config->max_register = param->from_reg;
200 if (config->num_reg_defaults)
201 config->max_register += (config->num_reg_defaults - 1) *
202 config->reg_stride;
203 else
204 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
205 }
206
207 size = array_size(config->max_register + 1, sizeof(*buf));
208 buf = kmalloc(size, GFP_KERNEL);
209 if (!buf)
210 return ERR_PTR(-ENOMEM);
211
212 get_random_bytes(buf, size);
213
214 *data = kzalloc_obj(**data);
215 if (!(*data))
216 goto out_free;
217 (*data)->vals = buf;
218
219 if (config->num_reg_defaults) {
220 defaults = kunit_kcalloc(test,
221 config->num_reg_defaults,
222 sizeof(struct reg_default),
223 GFP_KERNEL);
224 if (!defaults)
225 goto out_free;
226
227 config->reg_defaults = defaults;
228
229 for (i = 0; i < config->num_reg_defaults; i++) {
230 defaults[i].reg = param->from_reg + (i * config->reg_stride);
231 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
232 }
233 }
234
235 ret = regmap_init_ram(priv->dev, config, *data);
236 if (IS_ERR(ret))
237 goto out_free;
238
239 /* This calls regmap_exit() on failure, which frees buf and *data */
240 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
241 if (error)
242 ret = ERR_PTR(error);
243
244 return ret;
245
246 out_free:
247 kfree(buf);
248 kfree(*data);
249
250 return ret;
251 }
252
reg_5_false(struct device * dev,unsigned int reg)253 static bool reg_5_false(struct device *dev, unsigned int reg)
254 {
255 struct kunit *test = dev_get_drvdata(dev);
256 const struct regmap_test_param *param = test->param_value;
257
258 return reg != (param->from_reg + 5);
259 }
260
reg_default_expected(unsigned int reg)261 static unsigned int reg_default_expected(unsigned int reg)
262 {
263 return 0x5a5a0000 | (reg & 0xffff);
264 }
265
reg_default_test_cb(struct device * dev,unsigned int reg,unsigned int * def)266 static int reg_default_test_cb(struct device *dev, unsigned int reg,
267 unsigned int *def)
268 {
269 struct kunit *test = dev_get_drvdata(dev);
270 struct regmap_test_priv *priv = test->priv;
271
272 if (priv && priv->reg_default_called && reg <= priv->reg_default_max)
273 priv->reg_default_called[reg] = true;
274
275 *def = reg_default_expected(reg);
276 return 0;
277 }
278
expect_reg_default_value(struct kunit * test,struct regmap * map,struct regmap_ram_data * data,struct regmap_test_priv * priv,unsigned int reg)279 static void expect_reg_default_value(struct kunit *test, struct regmap *map,
280 struct regmap_ram_data *data,
281 struct regmap_test_priv *priv,
282 unsigned int reg)
283 {
284 unsigned int val;
285
286 KUNIT_EXPECT_TRUE(test, priv->reg_default_called[reg]);
287 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, reg, &val));
288 KUNIT_EXPECT_EQ(test, reg_default_expected(reg), val);
289 KUNIT_EXPECT_FALSE(test, data->read[reg]);
290 }
291
basic_read_write(struct kunit * test)292 static void basic_read_write(struct kunit *test)
293 {
294 struct regmap *map;
295 struct regmap_config config;
296 struct regmap_ram_data *data;
297 unsigned int val, rval;
298
299 config = test_regmap_config;
300
301 map = gen_regmap(test, &config, &data);
302 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
303 if (IS_ERR(map))
304 return;
305
306 get_random_bytes(&val, sizeof(val));
307
308 /* If we write a value to a register we can read it back */
309 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
310 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
311 KUNIT_EXPECT_EQ(test, val, rval);
312
313 /* If using a cache the cache satisfied the read */
314 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
315 }
316
bulk_write(struct kunit * test)317 static void bulk_write(struct kunit *test)
318 {
319 struct regmap *map;
320 struct regmap_config config;
321 struct regmap_ram_data *data;
322 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
323 int i;
324
325 config = test_regmap_config;
326
327 map = gen_regmap(test, &config, &data);
328 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
329 if (IS_ERR(map))
330 return;
331
332 get_random_bytes(&val, sizeof(val));
333
334 /*
335 * Data written via the bulk API can be read back with single
336 * reads.
337 */
338 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
339 BLOCK_TEST_SIZE));
340 for (i = 0; i < BLOCK_TEST_SIZE; i++)
341 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
342
343 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
344
345 /* If using a cache the cache satisfied the read */
346 for (i = 0; i < BLOCK_TEST_SIZE; i++)
347 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
348 }
349
bulk_read(struct kunit * test)350 static void bulk_read(struct kunit *test)
351 {
352 struct regmap *map;
353 struct regmap_config config;
354 struct regmap_ram_data *data;
355 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
356 int i;
357
358 config = test_regmap_config;
359
360 map = gen_regmap(test, &config, &data);
361 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
362 if (IS_ERR(map))
363 return;
364
365 get_random_bytes(&val, sizeof(val));
366
367 /* Data written as single writes can be read via the bulk API */
368 for (i = 0; i < BLOCK_TEST_SIZE; i++)
369 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
370 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
371 BLOCK_TEST_SIZE));
372 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
373
374 /* If using a cache the cache satisfied the read */
375 for (i = 0; i < BLOCK_TEST_SIZE; i++)
376 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
377 }
378
multi_write(struct kunit * test)379 static void multi_write(struct kunit *test)
380 {
381 struct regmap *map;
382 struct regmap_config config;
383 struct regmap_ram_data *data;
384 struct reg_sequence sequence[BLOCK_TEST_SIZE];
385 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
386 int i;
387
388 config = test_regmap_config;
389
390 map = gen_regmap(test, &config, &data);
391 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
392 if (IS_ERR(map))
393 return;
394
395 get_random_bytes(&val, sizeof(val));
396
397 /*
398 * Data written via the multi API can be read back with single
399 * reads.
400 */
401 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
402 sequence[i].reg = i;
403 sequence[i].def = val[i];
404 sequence[i].delay_us = 0;
405 }
406 KUNIT_EXPECT_EQ(test, 0,
407 regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
408 for (i = 0; i < BLOCK_TEST_SIZE; i++)
409 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
410
411 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
412
413 /* If using a cache the cache satisfied the read */
414 for (i = 0; i < BLOCK_TEST_SIZE; i++)
415 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
416 }
417
multi_read(struct kunit * test)418 static void multi_read(struct kunit *test)
419 {
420 struct regmap *map;
421 struct regmap_config config;
422 struct regmap_ram_data *data;
423 unsigned int regs[BLOCK_TEST_SIZE];
424 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
425 int i;
426
427 config = test_regmap_config;
428
429 map = gen_regmap(test, &config, &data);
430 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
431 if (IS_ERR(map))
432 return;
433
434 get_random_bytes(&val, sizeof(val));
435
436 /* Data written as single writes can be read via the multi API */
437 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
438 regs[i] = i;
439 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
440 }
441 KUNIT_EXPECT_EQ(test, 0,
442 regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
443 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
444
445 /* If using a cache the cache satisfied the read */
446 for (i = 0; i < BLOCK_TEST_SIZE; i++)
447 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
448 }
449
read_bypassed(struct kunit * test)450 static void read_bypassed(struct kunit *test)
451 {
452 const struct regmap_test_param *param = test->param_value;
453 struct regmap *map;
454 struct regmap_config config;
455 struct regmap_ram_data *data;
456 unsigned int val[BLOCK_TEST_SIZE], rval;
457 int i;
458
459 config = test_regmap_config;
460
461 map = gen_regmap(test, &config, &data);
462 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
463 if (IS_ERR(map))
464 return;
465
466 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
467
468 get_random_bytes(&val, sizeof(val));
469
470 /* Write some test values */
471 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
472
473 regcache_cache_only(map, true);
474
475 /*
476 * While in cache-only regmap_read_bypassed() should return the register
477 * value and leave the map in cache-only.
478 */
479 for (i = 0; i < ARRAY_SIZE(val); i++) {
480 /* Put inverted bits in rval to prove we really read the value */
481 rval = ~val[i];
482 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
483 KUNIT_EXPECT_EQ(test, val[i], rval);
484
485 rval = ~val[i];
486 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
487 KUNIT_EXPECT_EQ(test, val[i], rval);
488 KUNIT_EXPECT_TRUE(test, map->cache_only);
489 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
490 }
491
492 /*
493 * Change the underlying register values to prove it is returning
494 * real values not cached values.
495 */
496 for (i = 0; i < ARRAY_SIZE(val); i++) {
497 val[i] = ~val[i];
498 data->vals[param->from_reg + i] = val[i];
499 }
500
501 for (i = 0; i < ARRAY_SIZE(val); i++) {
502 rval = ~val[i];
503 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
504 KUNIT_EXPECT_NE(test, val[i], rval);
505
506 rval = ~val[i];
507 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
508 KUNIT_EXPECT_EQ(test, val[i], rval);
509 KUNIT_EXPECT_TRUE(test, map->cache_only);
510 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
511 }
512 }
513
read_bypassed_volatile(struct kunit * test)514 static void read_bypassed_volatile(struct kunit *test)
515 {
516 const struct regmap_test_param *param = test->param_value;
517 struct regmap *map;
518 struct regmap_config config;
519 struct regmap_ram_data *data;
520 unsigned int val[BLOCK_TEST_SIZE], rval;
521 int i;
522
523 config = test_regmap_config;
524 /* All registers except #5 volatile */
525 config.volatile_reg = reg_5_false;
526
527 map = gen_regmap(test, &config, &data);
528 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
529 if (IS_ERR(map))
530 return;
531
532 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
533
534 get_random_bytes(&val, sizeof(val));
535
536 /* Write some test values */
537 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
538
539 regcache_cache_only(map, true);
540
541 /*
542 * While in cache-only regmap_read_bypassed() should return the register
543 * value and leave the map in cache-only.
544 */
545 for (i = 0; i < ARRAY_SIZE(val); i++) {
546 /* Register #5 is non-volatile so should read from cache */
547 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
548 regmap_read(map, param->from_reg + i, &rval));
549
550 /* Put inverted bits in rval to prove we really read the value */
551 rval = ~val[i];
552 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
553 KUNIT_EXPECT_EQ(test, val[i], rval);
554 KUNIT_EXPECT_TRUE(test, map->cache_only);
555 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
556 }
557
558 /*
559 * Change the underlying register values to prove it is returning
560 * real values not cached values.
561 */
562 for (i = 0; i < ARRAY_SIZE(val); i++) {
563 val[i] = ~val[i];
564 data->vals[param->from_reg + i] = val[i];
565 }
566
567 for (i = 0; i < ARRAY_SIZE(val); i++) {
568 if (i == 5)
569 continue;
570
571 rval = ~val[i];
572 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
573 KUNIT_EXPECT_EQ(test, val[i], rval);
574 KUNIT_EXPECT_TRUE(test, map->cache_only);
575 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
576 }
577 }
578
write_readonly(struct kunit * test)579 static void write_readonly(struct kunit *test)
580 {
581 struct regmap *map;
582 struct regmap_config config;
583 struct regmap_ram_data *data;
584 unsigned int val;
585 int i;
586
587 config = test_regmap_config;
588 config.num_reg_defaults = BLOCK_TEST_SIZE;
589 config.writeable_reg = reg_5_false;
590
591 map = gen_regmap(test, &config, &data);
592 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
593 if (IS_ERR(map))
594 return;
595
596 get_random_bytes(&val, sizeof(val));
597
598 for (i = 0; i < BLOCK_TEST_SIZE; i++)
599 data->written[i] = false;
600
601 /* Change the value of all registers, readonly should fail */
602 for (i = 0; i < BLOCK_TEST_SIZE; i++)
603 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
604
605 /* Did that match what we see on the device? */
606 for (i = 0; i < BLOCK_TEST_SIZE; i++)
607 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
608 }
609
read_writeonly(struct kunit * test)610 static void read_writeonly(struct kunit *test)
611 {
612 struct regmap *map;
613 struct regmap_config config;
614 struct regmap_ram_data *data;
615 unsigned int val;
616 int i;
617
618 config = test_regmap_config;
619 config.readable_reg = reg_5_false;
620
621 map = gen_regmap(test, &config, &data);
622 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
623 if (IS_ERR(map))
624 return;
625
626 for (i = 0; i < BLOCK_TEST_SIZE; i++)
627 data->read[i] = false;
628
629 /*
630 * Try to read all the registers, the writeonly one should
631 * fail if we aren't using the flat cache.
632 */
633 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
634 if (config.cache_type != REGCACHE_FLAT) {
635 KUNIT_EXPECT_EQ(test, i != 5,
636 regmap_read(map, i, &val) == 0);
637 } else {
638 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
639 }
640 }
641
642 /* Did we trigger a hardware access? */
643 KUNIT_EXPECT_FALSE(test, data->read[5]);
644 }
645
reg_defaults(struct kunit * test)646 static void reg_defaults(struct kunit *test)
647 {
648 struct regmap *map;
649 struct regmap_config config;
650 struct regmap_ram_data *data;
651 unsigned int rval[BLOCK_TEST_SIZE];
652 int i;
653
654 config = test_regmap_config;
655 config.num_reg_defaults = BLOCK_TEST_SIZE;
656
657 map = gen_regmap(test, &config, &data);
658 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
659 if (IS_ERR(map))
660 return;
661
662 /* Read back the expected default data */
663 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
664 BLOCK_TEST_SIZE));
665 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
666
667 /* The data should have been read from cache if there was one */
668 for (i = 0; i < BLOCK_TEST_SIZE; i++)
669 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
670 }
671
reg_default_callback_populates_flat_cache(struct kunit * test)672 static void reg_default_callback_populates_flat_cache(struct kunit *test)
673 {
674 const struct regmap_test_param *param = test->param_value;
675 struct regmap_test_priv *priv = test->priv;
676 struct regmap *map;
677 struct regmap_config config;
678 struct regmap_ram_data *data;
679 unsigned int reg, val;
680 unsigned int defaults_end;
681
682 config = test_regmap_config;
683 config.num_reg_defaults = 3;
684 config.max_register = param->from_reg + BLOCK_TEST_SIZE - 1;
685 config.reg_default_cb = reg_default_test_cb;
686
687 priv->reg_default_max = config.max_register;
688 priv->reg_default_called = kunit_kcalloc(test, config.max_register + 1,
689 sizeof(*priv->reg_default_called),
690 GFP_KERNEL);
691 KUNIT_ASSERT_NOT_NULL(test, priv->reg_default_called);
692
693 map = gen_regmap(test, &config, &data);
694 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
695 if (IS_ERR(map))
696 return;
697
698 for (reg = 0; reg <= config.max_register; reg++)
699 data->read[reg] = false;
700
701 defaults_end = param->from_reg + config.num_reg_defaults - 1;
702
703 for (reg = param->from_reg; reg <= defaults_end; reg++) {
704 KUNIT_EXPECT_FALSE(test, priv->reg_default_called[reg]);
705 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, reg, &val));
706 KUNIT_EXPECT_EQ(test, data->vals[reg], val);
707 KUNIT_EXPECT_FALSE(test, data->read[reg]);
708 }
709
710 if (param->from_reg > 0)
711 expect_reg_default_value(test, map, data, priv, 0);
712
713 if (defaults_end + 1 <= config.max_register)
714 expect_reg_default_value(test, map, data, priv, defaults_end + 1);
715
716 if (config.max_register > defaults_end + 1)
717 expect_reg_default_value(test, map, data, priv, config.max_register);
718 }
719
reg_defaults_read_dev(struct kunit * test)720 static void reg_defaults_read_dev(struct kunit *test)
721 {
722 struct regmap *map;
723 struct regmap_config config;
724 struct regmap_ram_data *data;
725 unsigned int rval[BLOCK_TEST_SIZE];
726 int i;
727
728 config = test_regmap_config;
729 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
730
731 map = gen_regmap(test, &config, &data);
732 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
733 if (IS_ERR(map))
734 return;
735
736 /* We should have read the cache defaults back from the map */
737 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
738 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
739 data->read[i] = false;
740 }
741
742 /* Read back the expected default data */
743 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
744 BLOCK_TEST_SIZE));
745 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
746
747 /* The data should have been read from cache if there was one */
748 for (i = 0; i < BLOCK_TEST_SIZE; i++)
749 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
750 }
751
register_patch(struct kunit * test)752 static void register_patch(struct kunit *test)
753 {
754 struct regmap *map;
755 struct regmap_config config;
756 struct regmap_ram_data *data;
757 struct reg_sequence patch[2];
758 unsigned int rval[BLOCK_TEST_SIZE];
759 int i;
760
761 /* We need defaults so readback works */
762 config = test_regmap_config;
763 config.num_reg_defaults = BLOCK_TEST_SIZE;
764
765 map = gen_regmap(test, &config, &data);
766 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
767 if (IS_ERR(map))
768 return;
769
770 /* Stash the original values */
771 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
772 BLOCK_TEST_SIZE));
773
774 /* Patch a couple of values */
775 patch[0].reg = 2;
776 patch[0].def = rval[2] + 1;
777 patch[0].delay_us = 0;
778 patch[1].reg = 5;
779 patch[1].def = rval[5] + 1;
780 patch[1].delay_us = 0;
781 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
782 ARRAY_SIZE(patch)));
783
784 /* Only the patched registers are written */
785 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
786 switch (i) {
787 case 2:
788 case 5:
789 KUNIT_EXPECT_TRUE(test, data->written[i]);
790 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
791 break;
792 default:
793 KUNIT_EXPECT_FALSE(test, data->written[i]);
794 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
795 break;
796 }
797 }
798 }
799
stride(struct kunit * test)800 static void stride(struct kunit *test)
801 {
802 struct regmap *map;
803 struct regmap_config config;
804 struct regmap_ram_data *data;
805 unsigned int rval;
806 int i;
807
808 config = test_regmap_config;
809 config.reg_stride = 2;
810 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
811
812 /*
813 * Allow one extra register so that the read/written arrays
814 * are sized big enough to include an entry for the odd
815 * address past the final reg_default register.
816 */
817 config.max_register = BLOCK_TEST_SIZE;
818
819 map = gen_regmap(test, &config, &data);
820 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
821 if (IS_ERR(map))
822 return;
823
824 /* Only even addresses can be accessed, try both read and write */
825 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
826 data->read[i] = false;
827 data->written[i] = false;
828
829 if (i % 2) {
830 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
831 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
832 KUNIT_EXPECT_FALSE(test, data->read[i]);
833 KUNIT_EXPECT_FALSE(test, data->written[i]);
834 } else {
835 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
836 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
837 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
838 data->read[i]);
839
840 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
841 KUNIT_EXPECT_TRUE(test, data->written[i]);
842 }
843 }
844 }
845
846 static const struct regmap_range_cfg test_range = {
847 .selector_reg = 1,
848 .selector_mask = 0xff,
849
850 .window_start = 4,
851 .window_len = 10,
852
853 .range_min = 20,
854 .range_max = 40,
855 };
856
test_range_window_volatile(struct device * dev,unsigned int reg)857 static bool test_range_window_volatile(struct device *dev, unsigned int reg)
858 {
859 if (reg >= test_range.window_start &&
860 reg <= test_range.window_start + test_range.window_len)
861 return true;
862
863 return false;
864 }
865
test_range_all_volatile(struct device * dev,unsigned int reg)866 static bool test_range_all_volatile(struct device *dev, unsigned int reg)
867 {
868 if (test_range_window_volatile(dev, reg))
869 return true;
870
871 if (reg >= test_range.range_min && reg <= test_range.range_max)
872 return true;
873
874 return false;
875 }
876
basic_ranges(struct kunit * test)877 static void basic_ranges(struct kunit *test)
878 {
879 struct regmap *map;
880 struct regmap_config config;
881 struct regmap_ram_data *data;
882 unsigned int val;
883 int i;
884
885 config = test_regmap_config;
886 config.volatile_reg = test_range_all_volatile;
887 config.ranges = &test_range;
888 config.num_ranges = 1;
889 config.max_register = test_range.range_max;
890
891 map = gen_regmap(test, &config, &data);
892 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
893 if (IS_ERR(map))
894 return;
895
896 for (i = test_range.range_min; i < test_range.range_max; i++) {
897 data->read[i] = false;
898 data->written[i] = false;
899 }
900
901 /* Reset the page to a non-zero value to trigger a change */
902 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
903 test_range.range_max));
904
905 /* Check we set the page and use the window for writes */
906 data->written[test_range.selector_reg] = false;
907 data->written[test_range.window_start] = false;
908 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
909 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
910 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
911
912 data->written[test_range.selector_reg] = false;
913 data->written[test_range.window_start] = false;
914 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
915 test_range.range_min +
916 test_range.window_len,
917 0));
918 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
919 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
920
921 /* Same for reads */
922 data->written[test_range.selector_reg] = false;
923 data->read[test_range.window_start] = false;
924 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
925 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
926 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
927
928 data->written[test_range.selector_reg] = false;
929 data->read[test_range.window_start] = false;
930 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
931 test_range.range_min +
932 test_range.window_len,
933 &val));
934 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
935 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
936
937 /* No physical access triggered in the virtual range */
938 for (i = test_range.range_min; i < test_range.range_max; i++) {
939 KUNIT_EXPECT_FALSE(test, data->read[i]);
940 KUNIT_EXPECT_FALSE(test, data->written[i]);
941 }
942 }
943
944 /* Try to stress dynamic creation of cache data structures */
stress_insert(struct kunit * test)945 static void stress_insert(struct kunit *test)
946 {
947 struct regmap *map;
948 struct regmap_config config;
949 struct regmap_ram_data *data;
950 unsigned int rval, *vals;
951 size_t buf_sz;
952 int i;
953
954 config = test_regmap_config;
955 config.max_register = 300;
956
957 map = gen_regmap(test, &config, &data);
958 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
959 if (IS_ERR(map))
960 return;
961
962 buf_sz = array_size(sizeof(*vals), config.max_register);
963 vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
964 KUNIT_ASSERT_FALSE(test, vals == NULL);
965
966 get_random_bytes(vals, buf_sz);
967
968 /* Write data into the map/cache in ever decreasing strides */
969 for (i = 0; i < config.max_register; i += 100)
970 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
971 for (i = 0; i < config.max_register; i += 50)
972 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
973 for (i = 0; i < config.max_register; i += 25)
974 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
975 for (i = 0; i < config.max_register; i += 10)
976 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
977 for (i = 0; i < config.max_register; i += 5)
978 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
979 for (i = 0; i < config.max_register; i += 3)
980 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
981 for (i = 0; i < config.max_register; i += 2)
982 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
983 for (i = 0; i < config.max_register; i++)
984 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
985
986 /* Do reads from the cache (if there is one) match? */
987 for (i = 0; i < config.max_register; i ++) {
988 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
989 KUNIT_EXPECT_EQ(test, rval, vals[i]);
990 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
991 }
992 }
993
cache_bypass(struct kunit * test)994 static void cache_bypass(struct kunit *test)
995 {
996 const struct regmap_test_param *param = test->param_value;
997 struct regmap *map;
998 struct regmap_config config;
999 struct regmap_ram_data *data;
1000 unsigned int val, rval;
1001
1002 config = test_regmap_config;
1003
1004 map = gen_regmap(test, &config, &data);
1005 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1006 if (IS_ERR(map))
1007 return;
1008
1009 get_random_bytes(&val, sizeof(val));
1010
1011 /* Ensure the cache has a value in it */
1012 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
1013
1014 /* Bypass then write a different value */
1015 regcache_cache_bypass(map, true);
1016 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
1017
1018 /* Read the bypassed value */
1019 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
1020 KUNIT_EXPECT_EQ(test, val + 1, rval);
1021 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
1022
1023 /* Disable bypass, the cache should still return the original value */
1024 regcache_cache_bypass(map, false);
1025 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
1026 KUNIT_EXPECT_EQ(test, val, rval);
1027 }
1028
cache_sync_marked_dirty(struct kunit * test)1029 static void cache_sync_marked_dirty(struct kunit *test)
1030 {
1031 const struct regmap_test_param *param = test->param_value;
1032 struct regmap *map;
1033 struct regmap_config config;
1034 struct regmap_ram_data *data;
1035 unsigned int val[BLOCK_TEST_SIZE];
1036 int i;
1037
1038 config = test_regmap_config;
1039
1040 map = gen_regmap(test, &config, &data);
1041 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1042 if (IS_ERR(map))
1043 return;
1044
1045 get_random_bytes(&val, sizeof(val));
1046
1047 /* Put some data into the cache */
1048 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
1049 BLOCK_TEST_SIZE));
1050 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1051 data->written[param->from_reg + i] = false;
1052
1053 /* Trash the data on the device itself then resync */
1054 regcache_mark_dirty(map);
1055 memset(data->vals, 0, sizeof(val));
1056 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1057
1058 /* Did we just write the correct data out? */
1059 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
1060 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1061 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
1062 }
1063
cache_sync_after_cache_only(struct kunit * test)1064 static void cache_sync_after_cache_only(struct kunit *test)
1065 {
1066 const struct regmap_test_param *param = test->param_value;
1067 struct regmap *map;
1068 struct regmap_config config;
1069 struct regmap_ram_data *data;
1070 unsigned int val[BLOCK_TEST_SIZE];
1071 unsigned int val_mask;
1072 int i;
1073
1074 config = test_regmap_config;
1075
1076 map = gen_regmap(test, &config, &data);
1077 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1078 if (IS_ERR(map))
1079 return;
1080
1081 val_mask = GENMASK(config.val_bits - 1, 0);
1082 get_random_bytes(&val, sizeof(val));
1083
1084 /* Put some data into the cache */
1085 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
1086 BLOCK_TEST_SIZE));
1087 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1088 data->written[param->from_reg + i] = false;
1089
1090 /* Set cache-only and change the values */
1091 regcache_cache_only(map, true);
1092 for (i = 0; i < ARRAY_SIZE(val); ++i)
1093 val[i] = ~val[i] & val_mask;
1094
1095 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
1096 BLOCK_TEST_SIZE));
1097 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1098 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
1099
1100 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
1101
1102 /* Exit cache-only and sync the cache without marking hardware registers dirty */
1103 regcache_cache_only(map, false);
1104
1105 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1106
1107 /* Did we just write the correct data out? */
1108 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
1109 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1110 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
1111 }
1112
cache_sync_defaults_marked_dirty(struct kunit * test)1113 static void cache_sync_defaults_marked_dirty(struct kunit *test)
1114 {
1115 const struct regmap_test_param *param = test->param_value;
1116 struct regmap *map;
1117 struct regmap_config config;
1118 struct regmap_ram_data *data;
1119 unsigned int val;
1120 int i;
1121
1122 config = test_regmap_config;
1123 config.num_reg_defaults = BLOCK_TEST_SIZE;
1124
1125 map = gen_regmap(test, &config, &data);
1126 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1127 if (IS_ERR(map))
1128 return;
1129
1130 get_random_bytes(&val, sizeof(val));
1131
1132 /* Change the value of one register */
1133 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
1134
1135 /* Resync */
1136 regcache_mark_dirty(map);
1137 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1138 data->written[param->from_reg + i] = false;
1139 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1140
1141 /* Did we just sync the one register we touched? */
1142 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1143 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
1144
1145 /* Rewrite registers back to their defaults */
1146 for (i = 0; i < config.num_reg_defaults; ++i)
1147 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
1148 config.reg_defaults[i].def));
1149
1150 /*
1151 * Resync after regcache_mark_dirty() should not write out registers
1152 * that are at default value
1153 */
1154 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1155 data->written[param->from_reg + i] = false;
1156 regcache_mark_dirty(map);
1157 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1158 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1159 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
1160 }
1161
cache_sync_default_after_cache_only(struct kunit * test)1162 static void cache_sync_default_after_cache_only(struct kunit *test)
1163 {
1164 const struct regmap_test_param *param = test->param_value;
1165 struct regmap *map;
1166 struct regmap_config config;
1167 struct regmap_ram_data *data;
1168 unsigned int orig_val;
1169 int i;
1170
1171 config = test_regmap_config;
1172 config.num_reg_defaults = BLOCK_TEST_SIZE;
1173
1174 map = gen_regmap(test, &config, &data);
1175 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1176 if (IS_ERR(map))
1177 return;
1178
1179 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
1180
1181 /* Enter cache-only and change the value of one register */
1182 regcache_cache_only(map, true);
1183 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
1184
1185 /* Exit cache-only and resync, should write out the changed register */
1186 regcache_cache_only(map, false);
1187 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1188 data->written[param->from_reg + i] = false;
1189 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1190
1191 /* Was the register written out? */
1192 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1193 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
1194
1195 /* Enter cache-only and write register back to its default value */
1196 regcache_cache_only(map, true);
1197 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
1198
1199 /* Resync should write out the new value */
1200 regcache_cache_only(map, false);
1201 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1202 data->written[param->from_reg + i] = false;
1203
1204 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1205 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1206 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
1207 }
1208
cache_sync_readonly(struct kunit * test)1209 static void cache_sync_readonly(struct kunit *test)
1210 {
1211 const struct regmap_test_param *param = test->param_value;
1212 struct regmap *map;
1213 struct regmap_config config;
1214 struct regmap_ram_data *data;
1215 unsigned int val;
1216 int i;
1217
1218 config = test_regmap_config;
1219 config.writeable_reg = reg_5_false;
1220
1221 map = gen_regmap(test, &config, &data);
1222 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1223 if (IS_ERR(map))
1224 return;
1225
1226 /* Read all registers to fill the cache */
1227 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1228 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1229
1230 /* Change the value of all registers, readonly should fail */
1231 get_random_bytes(&val, sizeof(val));
1232 regcache_cache_only(map, true);
1233 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1234 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
1235 regcache_cache_only(map, false);
1236
1237 /* Resync */
1238 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1239 data->written[param->from_reg + i] = false;
1240 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1241
1242 /* Did that match what we see on the device? */
1243 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1244 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
1245 }
1246
cache_sync_patch(struct kunit * test)1247 static void cache_sync_patch(struct kunit *test)
1248 {
1249 const struct regmap_test_param *param = test->param_value;
1250 struct regmap *map;
1251 struct regmap_config config;
1252 struct regmap_ram_data *data;
1253 struct reg_sequence patch[2];
1254 unsigned int rval[BLOCK_TEST_SIZE], val;
1255 int i;
1256
1257 /* We need defaults so readback works */
1258 config = test_regmap_config;
1259 config.num_reg_defaults = BLOCK_TEST_SIZE;
1260
1261 map = gen_regmap(test, &config, &data);
1262 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1263 if (IS_ERR(map))
1264 return;
1265
1266 /* Stash the original values */
1267 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1268 BLOCK_TEST_SIZE));
1269
1270 /* Patch a couple of values */
1271 patch[0].reg = param->from_reg + 2;
1272 patch[0].def = rval[2] + 1;
1273 patch[0].delay_us = 0;
1274 patch[1].reg = param->from_reg + 5;
1275 patch[1].def = rval[5] + 1;
1276 patch[1].delay_us = 0;
1277 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
1278 ARRAY_SIZE(patch)));
1279
1280 /* Sync the cache */
1281 regcache_mark_dirty(map);
1282 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1283 data->written[param->from_reg + i] = false;
1284 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1285
1286 /* The patch should be on the device but not in the cache */
1287 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1288 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1289 KUNIT_EXPECT_EQ(test, val, rval[i]);
1290
1291 switch (i) {
1292 case 2:
1293 case 5:
1294 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
1295 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
1296 break;
1297 default:
1298 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
1299 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
1300 break;
1301 }
1302 }
1303 }
1304
cache_drop(struct kunit * test)1305 static void cache_drop(struct kunit *test)
1306 {
1307 const struct regmap_test_param *param = test->param_value;
1308 struct regmap *map;
1309 struct regmap_config config;
1310 struct regmap_ram_data *data;
1311 unsigned int rval[BLOCK_TEST_SIZE];
1312 int i;
1313
1314 config = test_regmap_config;
1315 config.num_reg_defaults = BLOCK_TEST_SIZE;
1316
1317 map = gen_regmap(test, &config, &data);
1318 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1319 if (IS_ERR(map))
1320 return;
1321
1322 /* Ensure the data is read from the cache */
1323 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1324 data->read[param->from_reg + i] = false;
1325 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1326 BLOCK_TEST_SIZE));
1327 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1328 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
1329 data->read[param->from_reg + i] = false;
1330 }
1331 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1332
1333 /* Drop some registers */
1334 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1335 param->from_reg + 5));
1336
1337 /* Reread and check only the dropped registers hit the device. */
1338 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1339 BLOCK_TEST_SIZE));
1340 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1341 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1342 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1343 }
1344
cache_drop_with_non_contiguous_ranges(struct kunit * test)1345 static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
1346 {
1347 const struct regmap_test_param *param = test->param_value;
1348 struct regmap *map;
1349 struct regmap_config config;
1350 struct regmap_ram_data *data;
1351 unsigned int val[4][BLOCK_TEST_SIZE];
1352 unsigned int reg;
1353 const int num_ranges = ARRAY_SIZE(val) * 2;
1354 int rangeidx, i;
1355
1356 static_assert(ARRAY_SIZE(val) == 4);
1357
1358 config = test_regmap_config;
1359 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
1360
1361 map = gen_regmap(test, &config, &data);
1362 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1363 if (IS_ERR(map))
1364 return;
1365
1366 for (i = 0; i < config.max_register + 1; i++)
1367 data->written[i] = false;
1368
1369 /* Create non-contiguous cache blocks by writing every other range */
1370 get_random_bytes(&val, sizeof(val));
1371 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1372 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1373 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
1374 &val[rangeidx / 2],
1375 BLOCK_TEST_SIZE));
1376 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1377 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1378 }
1379
1380 /* Check that odd ranges weren't written */
1381 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1382 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1383 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1384 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1385 }
1386
1387 /* Drop range 2 */
1388 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1389 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
1390
1391 /* Drop part of range 4 */
1392 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1393 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
1394
1395 /* Mark dirty and reset mock registers to 0 */
1396 regcache_mark_dirty(map);
1397 for (i = 0; i < config.max_register + 1; i++) {
1398 data->vals[i] = 0;
1399 data->written[i] = false;
1400 }
1401
1402 /* The registers that were dropped from range 4 should now remain at 0 */
1403 val[4 / 2][3] = 0;
1404 val[4 / 2][4] = 0;
1405 val[4 / 2][5] = 0;
1406
1407 /* Sync and check that the expected register ranges were written */
1408 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1409
1410 /* Check that odd ranges weren't written */
1411 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1412 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1413 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1414 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1415 }
1416
1417 /* Check that even ranges (except 2 and 4) were written */
1418 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1419 if ((rangeidx == 2) || (rangeidx == 4))
1420 continue;
1421
1422 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1423 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1424 KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
1425
1426 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1427 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1428 }
1429
1430 /* Check that range 2 wasn't written */
1431 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1432 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1433 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1434
1435 /* Check that range 4 was partially written */
1436 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1437 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1438 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
1439
1440 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
1441
1442 /* Nothing before param->from_reg should have been written */
1443 for (i = 0; i < param->from_reg; i++)
1444 KUNIT_EXPECT_FALSE(test, data->written[i]);
1445 }
1446
cache_drop_all_and_sync_marked_dirty(struct kunit * test)1447 static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1448 {
1449 const struct regmap_test_param *param = test->param_value;
1450 struct regmap *map;
1451 struct regmap_config config;
1452 struct regmap_ram_data *data;
1453 unsigned int rval[BLOCK_TEST_SIZE];
1454 int i;
1455
1456 config = test_regmap_config;
1457 config.num_reg_defaults = BLOCK_TEST_SIZE;
1458
1459 map = gen_regmap(test, &config, &data);
1460 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1461 if (IS_ERR(map))
1462 return;
1463
1464 /* Ensure the data is read from the cache */
1465 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1466 data->read[param->from_reg + i] = false;
1467 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1468 BLOCK_TEST_SIZE));
1469 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1470
1471 /* Change all values in cache from defaults */
1472 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1473 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1474
1475 /* Drop all registers */
1476 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1477
1478 /* Mark dirty and cache sync should not write anything. */
1479 regcache_mark_dirty(map);
1480 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1481 data->written[param->from_reg + i] = false;
1482
1483 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1484 for (i = 0; i <= config.max_register; i++)
1485 KUNIT_EXPECT_FALSE(test, data->written[i]);
1486 }
1487
cache_drop_all_and_sync_no_defaults(struct kunit * test)1488 static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1489 {
1490 const struct regmap_test_param *param = test->param_value;
1491 struct regmap *map;
1492 struct regmap_config config;
1493 struct regmap_ram_data *data;
1494 unsigned int rval[BLOCK_TEST_SIZE];
1495 int i;
1496
1497 config = test_regmap_config;
1498
1499 map = gen_regmap(test, &config, &data);
1500 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1501 if (IS_ERR(map))
1502 return;
1503
1504 /* Ensure the data is read from the cache */
1505 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1506 data->read[param->from_reg + i] = false;
1507 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1508 BLOCK_TEST_SIZE));
1509 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1510
1511 /* Change all values in cache */
1512 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1513 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1514
1515 /* Drop all registers */
1516 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1517
1518 /*
1519 * Sync cache without marking it dirty. All registers were dropped
1520 * so the cache should not have any entries to write out.
1521 */
1522 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1523 data->written[param->from_reg + i] = false;
1524
1525 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1526 for (i = 0; i <= config.max_register; i++)
1527 KUNIT_EXPECT_FALSE(test, data->written[i]);
1528 }
1529
cache_drop_all_and_sync_has_defaults(struct kunit * test)1530 static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1531 {
1532 const struct regmap_test_param *param = test->param_value;
1533 struct regmap *map;
1534 struct regmap_config config;
1535 struct regmap_ram_data *data;
1536 unsigned int rval[BLOCK_TEST_SIZE];
1537 int i;
1538
1539 config = test_regmap_config;
1540 config.num_reg_defaults = BLOCK_TEST_SIZE;
1541
1542 map = gen_regmap(test, &config, &data);
1543 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1544 if (IS_ERR(map))
1545 return;
1546
1547 /* Ensure the data is read from the cache */
1548 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1549 data->read[param->from_reg + i] = false;
1550 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1551 BLOCK_TEST_SIZE));
1552 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1553
1554 /* Change all values in cache from defaults */
1555 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1556 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1557
1558 /* Drop all registers */
1559 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1560
1561 /*
1562 * Sync cache without marking it dirty. All registers were dropped
1563 * so the cache should not have any entries to write out.
1564 */
1565 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1566 data->written[param->from_reg + i] = false;
1567
1568 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1569 for (i = 0; i <= config.max_register; i++)
1570 KUNIT_EXPECT_FALSE(test, data->written[i]);
1571 }
1572
cache_present(struct kunit * test)1573 static void cache_present(struct kunit *test)
1574 {
1575 const struct regmap_test_param *param = test->param_value;
1576 struct regmap *map;
1577 struct regmap_config config;
1578 struct regmap_ram_data *data;
1579 unsigned int val;
1580 int i;
1581
1582 config = test_regmap_config;
1583
1584 map = gen_regmap(test, &config, &data);
1585 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1586 if (IS_ERR(map))
1587 return;
1588
1589 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1590 data->read[param->from_reg + i] = false;
1591
1592 /* No defaults so no registers cached. */
1593 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1594 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1595
1596 /* We didn't trigger any reads */
1597 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1598 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1599
1600 /* Fill the cache */
1601 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1602 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1603
1604 /* Now everything should be cached */
1605 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1606 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
1607 }
1608
cache_write_zero(struct kunit * test)1609 static void cache_write_zero(struct kunit *test)
1610 {
1611 const struct regmap_test_param *param = test->param_value;
1612 struct regmap *map;
1613 struct regmap_config config;
1614 struct regmap_ram_data *data;
1615 unsigned int val;
1616 int i;
1617
1618 config = test_regmap_config;
1619
1620 map = gen_regmap(test, &config, &data);
1621 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1622 if (IS_ERR(map))
1623 return;
1624
1625 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1626 data->read[param->from_reg + i] = false;
1627
1628 /* No defaults so no registers cached. */
1629 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1630 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1631
1632 /* We didn't trigger any reads */
1633 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1634 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1635
1636 /* Write a zero value */
1637 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
1638
1639 /* Read that zero value back */
1640 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1641 KUNIT_EXPECT_EQ(test, 0, val);
1642
1643 /* From the cache? */
1644 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
1645
1646 /* Try to throw it away */
1647 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
1648 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
1649 }
1650
1651 /* Check that caching the window register works with sync */
cache_range_window_reg(struct kunit * test)1652 static void cache_range_window_reg(struct kunit *test)
1653 {
1654 struct regmap *map;
1655 struct regmap_config config;
1656 struct regmap_ram_data *data;
1657 unsigned int val;
1658 int i;
1659
1660 config = test_regmap_config;
1661 config.volatile_reg = test_range_window_volatile;
1662 config.ranges = &test_range;
1663 config.num_ranges = 1;
1664 config.max_register = test_range.range_max;
1665
1666 map = gen_regmap(test, &config, &data);
1667 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1668 if (IS_ERR(map))
1669 return;
1670
1671 /* Write new values to the entire range */
1672 for (i = test_range.range_min; i <= test_range.range_max; i++)
1673 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1674
1675 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1676 KUNIT_ASSERT_EQ(test, val, 2);
1677
1678 /* Write to the first register in the range to reset the page */
1679 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1680 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1681 KUNIT_ASSERT_EQ(test, val, 0);
1682
1683 /* Trigger a cache sync */
1684 regcache_mark_dirty(map);
1685 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1686
1687 /* Write to the first register again, the page should be reset */
1688 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1689 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1690 KUNIT_ASSERT_EQ(test, val, 0);
1691
1692 /* Trigger another cache sync */
1693 regcache_mark_dirty(map);
1694 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1695
1696 /* Write to the last register again, the page should be reset */
1697 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1698 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1699 KUNIT_ASSERT_EQ(test, val, 2);
1700 }
1701
1702 static const struct regmap_test_param raw_types_list[] = {
1703 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
1704 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
1705 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1706 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1707 { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
1708 { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
1709 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1710 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1711 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1712 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1713 };
1714
1715 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
1716
1717 static const struct regmap_test_param raw_cache_types_list[] = {
1718 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1719 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1720 { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
1721 { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
1722 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1723 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1724 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1725 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1726 };
1727
1728 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
1729
1730 static const struct regmap_config raw_regmap_config = {
1731 .max_register = BLOCK_TEST_SIZE,
1732
1733 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
1734 .reg_bits = 16,
1735 .val_bits = 16,
1736 };
1737
gen_raw_regmap(struct kunit * test,struct regmap_config * config,struct regmap_ram_data ** data)1738 static struct regmap *gen_raw_regmap(struct kunit *test,
1739 struct regmap_config *config,
1740 struct regmap_ram_data **data)
1741 {
1742 struct regmap_test_priv *priv = test->priv;
1743 const struct regmap_test_param *param = test->param_value;
1744 u16 *buf;
1745 struct regmap *ret = ERR_PTR(-ENOMEM);
1746 int i, error;
1747 struct reg_default *defaults;
1748 size_t size;
1749
1750 config->cache_type = param->cache;
1751 config->val_format_endian = param->val_endian;
1752 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1753 config->cache_type == REGCACHE_MAPLE;
1754
1755 size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
1756 buf = kmalloc(size, GFP_KERNEL);
1757 if (!buf)
1758 return ERR_PTR(-ENOMEM);
1759
1760 get_random_bytes(buf, size);
1761
1762 *data = kzalloc_obj(**data);
1763 if (!(*data))
1764 goto out_free;
1765 (*data)->vals = (void *)buf;
1766
1767 config->num_reg_defaults = config->max_register + 1;
1768 defaults = kunit_kcalloc(test,
1769 config->num_reg_defaults,
1770 sizeof(struct reg_default),
1771 GFP_KERNEL);
1772 if (!defaults)
1773 goto out_free;
1774 config->reg_defaults = defaults;
1775
1776 for (i = 0; i < config->num_reg_defaults; i++) {
1777 defaults[i].reg = i;
1778 switch (param->val_endian) {
1779 case REGMAP_ENDIAN_LITTLE:
1780 defaults[i].def = le16_to_cpu(buf[i]);
1781 break;
1782 case REGMAP_ENDIAN_BIG:
1783 defaults[i].def = be16_to_cpu(buf[i]);
1784 break;
1785 default:
1786 ret = ERR_PTR(-EINVAL);
1787 goto out_free;
1788 }
1789 }
1790
1791 /*
1792 * We use the defaults in the tests but they don't make sense
1793 * to the core if there's no cache.
1794 */
1795 if (config->cache_type == REGCACHE_NONE)
1796 config->num_reg_defaults = 0;
1797
1798 ret = regmap_init_raw_ram(priv->dev, config, *data);
1799 if (IS_ERR(ret))
1800 goto out_free;
1801
1802 /* This calls regmap_exit() on failure, which frees buf and *data */
1803 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
1804 if (error)
1805 ret = ERR_PTR(error);
1806
1807 return ret;
1808
1809 out_free:
1810 kfree(buf);
1811 kfree(*data);
1812
1813 return ret;
1814 }
1815
raw_read_defaults_single(struct kunit * test)1816 static void raw_read_defaults_single(struct kunit *test)
1817 {
1818 struct regmap *map;
1819 struct regmap_config config;
1820 struct regmap_ram_data *data;
1821 unsigned int rval;
1822 int i;
1823
1824 config = raw_regmap_config;
1825
1826 map = gen_raw_regmap(test, &config, &data);
1827 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1828 if (IS_ERR(map))
1829 return;
1830
1831 /* Check that we can read the defaults via the API */
1832 for (i = 0; i < config.max_register + 1; i++) {
1833 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1834 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1835 }
1836 }
1837
raw_read_defaults(struct kunit * test)1838 static void raw_read_defaults(struct kunit *test)
1839 {
1840 struct regmap *map;
1841 struct regmap_config config;
1842 struct regmap_ram_data *data;
1843 u16 *rval;
1844 u16 def;
1845 size_t val_len;
1846 int i;
1847
1848 config = raw_regmap_config;
1849
1850 map = gen_raw_regmap(test, &config, &data);
1851 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1852 if (IS_ERR(map))
1853 return;
1854
1855 val_len = array_size(sizeof(*rval), config.max_register + 1);
1856 rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
1857 KUNIT_ASSERT_TRUE(test, rval != NULL);
1858 if (!rval)
1859 return;
1860
1861 /* Check that we can read the defaults via the API */
1862 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1863 for (i = 0; i < config.max_register + 1; i++) {
1864 def = config.reg_defaults[i].def;
1865 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1866 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
1867 } else {
1868 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
1869 }
1870 }
1871 }
1872
raw_write_read_single(struct kunit * test)1873 static void raw_write_read_single(struct kunit *test)
1874 {
1875 struct regmap *map;
1876 struct regmap_config config;
1877 struct regmap_ram_data *data;
1878 u16 val;
1879 unsigned int rval;
1880
1881 config = raw_regmap_config;
1882
1883 map = gen_raw_regmap(test, &config, &data);
1884 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1885 if (IS_ERR(map))
1886 return;
1887
1888 get_random_bytes(&val, sizeof(val));
1889
1890 /* If we write a value to a register we can read it back */
1891 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1892 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1893 KUNIT_EXPECT_EQ(test, val, rval);
1894 }
1895
raw_write(struct kunit * test)1896 static void raw_write(struct kunit *test)
1897 {
1898 struct regmap *map;
1899 struct regmap_config config;
1900 struct regmap_ram_data *data;
1901 u16 *hw_buf;
1902 u16 val[2];
1903 unsigned int rval;
1904 int i;
1905
1906 config = raw_regmap_config;
1907
1908 map = gen_raw_regmap(test, &config, &data);
1909 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1910 if (IS_ERR(map))
1911 return;
1912
1913 hw_buf = (u16 *)data->vals;
1914
1915 get_random_bytes(&val, sizeof(val));
1916
1917 /* Do a raw write */
1918 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1919
1920 /* We should read back the new values, and defaults for the rest */
1921 for (i = 0; i < config.max_register + 1; i++) {
1922 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1923
1924 switch (i) {
1925 case 2:
1926 case 3:
1927 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1928 KUNIT_EXPECT_EQ(test, rval,
1929 be16_to_cpu((__force __be16)val[i % 2]));
1930 } else {
1931 KUNIT_EXPECT_EQ(test, rval,
1932 le16_to_cpu((__force __le16)val[i % 2]));
1933 }
1934 break;
1935 default:
1936 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1937 break;
1938 }
1939 }
1940
1941 /* The values should appear in the "hardware" */
1942 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1943 }
1944
reg_zero(struct device * dev,unsigned int reg)1945 static bool reg_zero(struct device *dev, unsigned int reg)
1946 {
1947 return reg == 0;
1948 }
1949
ram_reg_zero(struct regmap_ram_data * data,unsigned int reg)1950 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1951 {
1952 return reg == 0;
1953 }
1954
raw_noinc_write(struct kunit * test)1955 static void raw_noinc_write(struct kunit *test)
1956 {
1957 struct regmap *map;
1958 struct regmap_config config;
1959 struct regmap_ram_data *data;
1960 unsigned int val;
1961 u16 val_test, val_last;
1962 u16 val_array[BLOCK_TEST_SIZE];
1963
1964 config = raw_regmap_config;
1965 config.volatile_reg = reg_zero;
1966 config.writeable_noinc_reg = reg_zero;
1967 config.readable_noinc_reg = reg_zero;
1968
1969 map = gen_raw_regmap(test, &config, &data);
1970 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1971 if (IS_ERR(map))
1972 return;
1973
1974 data->noinc_reg = ram_reg_zero;
1975
1976 get_random_bytes(&val_array, sizeof(val_array));
1977
1978 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1979 val_test = be16_to_cpu(val_array[1]) + 100;
1980 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1981 } else {
1982 val_test = le16_to_cpu(val_array[1]) + 100;
1983 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1984 }
1985
1986 /* Put some data into the register following the noinc register */
1987 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1988
1989 /* Write some data to the noinc register */
1990 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1991 sizeof(val_array)));
1992
1993 /* We should read back the last value written */
1994 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1995 KUNIT_ASSERT_EQ(test, val_last, val);
1996
1997 /* Make sure we didn't touch the register after the noinc register */
1998 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1999 KUNIT_ASSERT_EQ(test, val_test, val);
2000 }
2001
raw_sync(struct kunit * test)2002 static void raw_sync(struct kunit *test)
2003 {
2004 struct regmap *map;
2005 struct regmap_config config;
2006 struct regmap_ram_data *data;
2007 u16 val[3];
2008 u16 *hw_buf;
2009 unsigned int rval;
2010 int i;
2011
2012 config = raw_regmap_config;
2013
2014 map = gen_raw_regmap(test, &config, &data);
2015 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
2016 if (IS_ERR(map))
2017 return;
2018
2019 hw_buf = (u16 *)data->vals;
2020
2021 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
2022
2023 /* Do a regular write and a raw write in cache only mode */
2024 regcache_cache_only(map, true);
2025 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
2026 sizeof(u16) * 2));
2027 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
2028
2029 /* We should read back the new values, and defaults for the rest */
2030 for (i = 0; i < config.max_register + 1; i++) {
2031 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
2032
2033 switch (i) {
2034 case 2:
2035 case 3:
2036 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
2037 KUNIT_EXPECT_EQ(test, rval,
2038 be16_to_cpu((__force __be16)val[i - 2]));
2039 } else {
2040 KUNIT_EXPECT_EQ(test, rval,
2041 le16_to_cpu((__force __le16)val[i - 2]));
2042 }
2043 break;
2044 case 4:
2045 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
2046 break;
2047 default:
2048 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
2049 break;
2050 }
2051 }
2052
2053 /*
2054 * The value written via _write() was translated by the core,
2055 * translate the original copy for comparison purposes.
2056 */
2057 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
2058 val[2] = cpu_to_be16(val[2]);
2059 else
2060 val[2] = cpu_to_le16(val[2]);
2061
2062 /* The values should not appear in the "hardware" */
2063 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
2064
2065 for (i = 0; i < config.max_register + 1; i++)
2066 data->written[i] = false;
2067
2068 /* Do the sync */
2069 regcache_cache_only(map, false);
2070 regcache_mark_dirty(map);
2071 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
2072
2073 /* The values should now appear in the "hardware" */
2074 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
2075 }
2076
raw_ranges(struct kunit * test)2077 static void raw_ranges(struct kunit *test)
2078 {
2079 struct regmap *map;
2080 struct regmap_config config;
2081 struct regmap_ram_data *data;
2082 unsigned int val;
2083 int i;
2084
2085 config = raw_regmap_config;
2086 config.volatile_reg = test_range_all_volatile;
2087 config.ranges = &test_range;
2088 config.num_ranges = 1;
2089 config.max_register = test_range.range_max;
2090
2091 map = gen_raw_regmap(test, &config, &data);
2092 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
2093 if (IS_ERR(map))
2094 return;
2095
2096 /* Reset the page to a non-zero value to trigger a change */
2097 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
2098 test_range.range_max));
2099
2100 /* Check we set the page and use the window for writes */
2101 data->written[test_range.selector_reg] = false;
2102 data->written[test_range.window_start] = false;
2103 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
2104 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2105 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
2106
2107 data->written[test_range.selector_reg] = false;
2108 data->written[test_range.window_start] = false;
2109 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
2110 test_range.range_min +
2111 test_range.window_len,
2112 0));
2113 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2114 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
2115
2116 /* Same for reads */
2117 data->written[test_range.selector_reg] = false;
2118 data->read[test_range.window_start] = false;
2119 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
2120 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2121 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2122
2123 data->written[test_range.selector_reg] = false;
2124 data->read[test_range.window_start] = false;
2125 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
2126 test_range.range_min +
2127 test_range.window_len,
2128 &val));
2129 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2130 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2131
2132 /* No physical access triggered in the virtual range */
2133 for (i = test_range.range_min; i < test_range.range_max; i++) {
2134 KUNIT_EXPECT_FALSE(test, data->read[i]);
2135 KUNIT_EXPECT_FALSE(test, data->written[i]);
2136 }
2137 }
2138
2139 static struct kunit_case regmap_test_cases[] = {
2140 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
2141 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
2142 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
2143 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
2144 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
2145 KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
2146 KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
2147 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
2148 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
2149 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
2150 KUNIT_CASE_PARAM(reg_default_callback_populates_flat_cache,
2151 flat_cache_types_gen_params),
2152 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
2153 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
2154 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
2155 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
2156 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
2157 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
2158 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
2159 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
2160 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
2161 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
2162 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
2163 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
2164 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
2165 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
2166 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
2167 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
2168 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
2169 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
2170 KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
2171 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
2172
2173 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
2174 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
2175 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
2176 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
2177 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
2178 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
2179 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
2180 {}
2181 };
2182
regmap_test_init(struct kunit * test)2183 static int regmap_test_init(struct kunit *test)
2184 {
2185 struct regmap_test_priv *priv;
2186 struct device *dev;
2187
2188 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
2189 if (!priv)
2190 return -ENOMEM;
2191
2192 test->priv = priv;
2193
2194 dev = kunit_device_register(test, "regmap_test");
2195 if (IS_ERR(dev))
2196 return PTR_ERR(dev);
2197
2198 priv->dev = get_device(dev);
2199 dev_set_drvdata(dev, test);
2200
2201 return 0;
2202 }
2203
regmap_test_exit(struct kunit * test)2204 static void regmap_test_exit(struct kunit *test)
2205 {
2206 struct regmap_test_priv *priv = test->priv;
2207
2208 /* Destroy the dummy struct device */
2209 if (priv && priv->dev)
2210 put_device(priv->dev);
2211 }
2212
2213 static struct kunit_suite regmap_test_suite = {
2214 .name = "regmap",
2215 .init = regmap_test_init,
2216 .exit = regmap_test_exit,
2217 .test_cases = regmap_test_cases,
2218 };
2219 kunit_test_suite(regmap_test_suite);
2220
2221 MODULE_DESCRIPTION("Regmap KUnit tests");
2222 MODULE_LICENSE("GPL v2");
2223