1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6
7 #include <kunit/device.h>
8 #include <kunit/resource.h>
9 #include <kunit/test.h>
10 #include "internal.h"
11
12 #define BLOCK_TEST_SIZE 12
13
14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
15
16 struct regmap_test_priv {
17 struct device *dev;
18 };
19
20 struct regmap_test_param {
21 enum regcache_type cache;
22 enum regmap_endian val_endian;
23
24 unsigned int from_reg;
25 bool fast_io;
26 };
27
get_changed_bytes(void * orig,void * new,size_t size)28 static void get_changed_bytes(void *orig, void *new, size_t size)
29 {
30 char *o = orig;
31 char *n = new;
32 int i;
33
34 get_random_bytes(new, size);
35
36 /*
37 * This could be nicer and more efficient but we shouldn't
38 * super care.
39 */
40 for (i = 0; i < size; i++)
41 while (n[i] == o[i])
42 get_random_bytes(&n[i], 1);
43 }
44
45 static const struct regmap_config test_regmap_config = {
46 .reg_stride = 1,
47 .val_bits = sizeof(unsigned int) * 8,
48 };
49
regcache_type_name(enum regcache_type type)50 static const char *regcache_type_name(enum regcache_type type)
51 {
52 switch (type) {
53 case REGCACHE_NONE:
54 return "none";
55 case REGCACHE_FLAT:
56 return "flat";
57 case REGCACHE_RBTREE:
58 return "rbtree";
59 case REGCACHE_MAPLE:
60 return "maple";
61 default:
62 return NULL;
63 }
64 }
65
regmap_endian_name(enum regmap_endian endian)66 static const char *regmap_endian_name(enum regmap_endian endian)
67 {
68 switch (endian) {
69 case REGMAP_ENDIAN_BIG:
70 return "big";
71 case REGMAP_ENDIAN_LITTLE:
72 return "little";
73 case REGMAP_ENDIAN_DEFAULT:
74 return "default";
75 case REGMAP_ENDIAN_NATIVE:
76 return "native";
77 default:
78 return NULL;
79 }
80 }
81
param_to_desc(const struct regmap_test_param * param,char * desc)82 static void param_to_desc(const struct regmap_test_param *param, char *desc)
83 {
84 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x",
85 regcache_type_name(param->cache),
86 regmap_endian_name(param->val_endian),
87 param->fast_io ? " fast I/O" : "",
88 param->from_reg);
89 }
90
91 static const struct regmap_test_param regcache_types_list[] = {
92 { .cache = REGCACHE_NONE },
93 { .cache = REGCACHE_NONE, .fast_io = true },
94 { .cache = REGCACHE_FLAT },
95 { .cache = REGCACHE_FLAT, .fast_io = true },
96 { .cache = REGCACHE_RBTREE },
97 { .cache = REGCACHE_RBTREE, .fast_io = true },
98 { .cache = REGCACHE_MAPLE },
99 { .cache = REGCACHE_MAPLE, .fast_io = true },
100 };
101
102 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
103
104 static const struct regmap_test_param real_cache_types_only_list[] = {
105 { .cache = REGCACHE_FLAT },
106 { .cache = REGCACHE_FLAT, .fast_io = true },
107 { .cache = REGCACHE_RBTREE },
108 { .cache = REGCACHE_RBTREE, .fast_io = true },
109 { .cache = REGCACHE_MAPLE },
110 { .cache = REGCACHE_MAPLE, .fast_io = true },
111 };
112
113 KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
114
115 static const struct regmap_test_param real_cache_types_list[] = {
116 { .cache = REGCACHE_FLAT, .from_reg = 0 },
117 { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true },
118 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
119 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
120 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
121 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
122 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
123 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
124 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
125 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
126 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
127 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
128 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
129 { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
130 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
131 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
132 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
134 };
135
136 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
137
138 static const struct regmap_test_param sparse_cache_types_list[] = {
139 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
140 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
141 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
142 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
143 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
144 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
145 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
146 { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
147 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
148 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
149 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
150 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
151 };
152
153 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
154
gen_regmap(struct kunit * test,struct regmap_config * config,struct regmap_ram_data ** data)155 static struct regmap *gen_regmap(struct kunit *test,
156 struct regmap_config *config,
157 struct regmap_ram_data **data)
158 {
159 const struct regmap_test_param *param = test->param_value;
160 struct regmap_test_priv *priv = test->priv;
161 unsigned int *buf;
162 struct regmap *ret = ERR_PTR(-ENOMEM);
163 size_t size;
164 int i, error;
165 struct reg_default *defaults;
166
167 config->cache_type = param->cache;
168 config->fast_io = param->fast_io;
169
170 if (config->max_register == 0) {
171 config->max_register = param->from_reg;
172 if (config->num_reg_defaults)
173 config->max_register += (config->num_reg_defaults - 1) *
174 config->reg_stride;
175 else
176 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
177 }
178
179 size = array_size(config->max_register + 1, sizeof(*buf));
180 buf = kmalloc(size, GFP_KERNEL);
181 if (!buf)
182 return ERR_PTR(-ENOMEM);
183
184 get_random_bytes(buf, size);
185
186 *data = kzalloc(sizeof(**data), GFP_KERNEL);
187 if (!(*data))
188 goto out_free;
189 (*data)->vals = buf;
190
191 if (config->num_reg_defaults) {
192 defaults = kunit_kcalloc(test,
193 config->num_reg_defaults,
194 sizeof(struct reg_default),
195 GFP_KERNEL);
196 if (!defaults)
197 goto out_free;
198
199 config->reg_defaults = defaults;
200
201 for (i = 0; i < config->num_reg_defaults; i++) {
202 defaults[i].reg = param->from_reg + (i * config->reg_stride);
203 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
204 }
205 }
206
207 ret = regmap_init_ram(priv->dev, config, *data);
208 if (IS_ERR(ret))
209 goto out_free;
210
211 /* This calls regmap_exit() on failure, which frees buf and *data */
212 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
213 if (error)
214 ret = ERR_PTR(error);
215
216 return ret;
217
218 out_free:
219 kfree(buf);
220 kfree(*data);
221
222 return ret;
223 }
224
reg_5_false(struct device * dev,unsigned int reg)225 static bool reg_5_false(struct device *dev, unsigned int reg)
226 {
227 struct kunit *test = dev_get_drvdata(dev);
228 const struct regmap_test_param *param = test->param_value;
229
230 return reg != (param->from_reg + 5);
231 }
232
basic_read_write(struct kunit * test)233 static void basic_read_write(struct kunit *test)
234 {
235 struct regmap *map;
236 struct regmap_config config;
237 struct regmap_ram_data *data;
238 unsigned int val, rval;
239
240 config = test_regmap_config;
241
242 map = gen_regmap(test, &config, &data);
243 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
244 if (IS_ERR(map))
245 return;
246
247 get_random_bytes(&val, sizeof(val));
248
249 /* If we write a value to a register we can read it back */
250 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
251 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
252 KUNIT_EXPECT_EQ(test, val, rval);
253
254 /* If using a cache the cache satisfied the read */
255 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
256 }
257
bulk_write(struct kunit * test)258 static void bulk_write(struct kunit *test)
259 {
260 struct regmap *map;
261 struct regmap_config config;
262 struct regmap_ram_data *data;
263 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
264 int i;
265
266 config = test_regmap_config;
267
268 map = gen_regmap(test, &config, &data);
269 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
270 if (IS_ERR(map))
271 return;
272
273 get_random_bytes(&val, sizeof(val));
274
275 /*
276 * Data written via the bulk API can be read back with single
277 * reads.
278 */
279 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
280 BLOCK_TEST_SIZE));
281 for (i = 0; i < BLOCK_TEST_SIZE; i++)
282 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
283
284 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
285
286 /* If using a cache the cache satisfied the read */
287 for (i = 0; i < BLOCK_TEST_SIZE; i++)
288 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
289 }
290
bulk_read(struct kunit * test)291 static void bulk_read(struct kunit *test)
292 {
293 struct regmap *map;
294 struct regmap_config config;
295 struct regmap_ram_data *data;
296 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
297 int i;
298
299 config = test_regmap_config;
300
301 map = gen_regmap(test, &config, &data);
302 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
303 if (IS_ERR(map))
304 return;
305
306 get_random_bytes(&val, sizeof(val));
307
308 /* Data written as single writes can be read via the bulk API */
309 for (i = 0; i < BLOCK_TEST_SIZE; i++)
310 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
311 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
312 BLOCK_TEST_SIZE));
313 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
314
315 /* If using a cache the cache satisfied the read */
316 for (i = 0; i < BLOCK_TEST_SIZE; i++)
317 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
318 }
319
multi_write(struct kunit * test)320 static void multi_write(struct kunit *test)
321 {
322 struct regmap *map;
323 struct regmap_config config;
324 struct regmap_ram_data *data;
325 struct reg_sequence sequence[BLOCK_TEST_SIZE];
326 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
327 int i;
328
329 config = test_regmap_config;
330
331 map = gen_regmap(test, &config, &data);
332 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
333 if (IS_ERR(map))
334 return;
335
336 get_random_bytes(&val, sizeof(val));
337
338 /*
339 * Data written via the multi API can be read back with single
340 * reads.
341 */
342 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
343 sequence[i].reg = i;
344 sequence[i].def = val[i];
345 sequence[i].delay_us = 0;
346 }
347 KUNIT_EXPECT_EQ(test, 0,
348 regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
349 for (i = 0; i < BLOCK_TEST_SIZE; i++)
350 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
351
352 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
353
354 /* If using a cache the cache satisfied the read */
355 for (i = 0; i < BLOCK_TEST_SIZE; i++)
356 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
357 }
358
multi_read(struct kunit * test)359 static void multi_read(struct kunit *test)
360 {
361 struct regmap *map;
362 struct regmap_config config;
363 struct regmap_ram_data *data;
364 unsigned int regs[BLOCK_TEST_SIZE];
365 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
366 int i;
367
368 config = test_regmap_config;
369
370 map = gen_regmap(test, &config, &data);
371 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
372 if (IS_ERR(map))
373 return;
374
375 get_random_bytes(&val, sizeof(val));
376
377 /* Data written as single writes can be read via the multi API */
378 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
379 regs[i] = i;
380 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
381 }
382 KUNIT_EXPECT_EQ(test, 0,
383 regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
384 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
385
386 /* If using a cache the cache satisfied the read */
387 for (i = 0; i < BLOCK_TEST_SIZE; i++)
388 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
389 }
390
read_bypassed(struct kunit * test)391 static void read_bypassed(struct kunit *test)
392 {
393 const struct regmap_test_param *param = test->param_value;
394 struct regmap *map;
395 struct regmap_config config;
396 struct regmap_ram_data *data;
397 unsigned int val[BLOCK_TEST_SIZE], rval;
398 int i;
399
400 config = test_regmap_config;
401
402 map = gen_regmap(test, &config, &data);
403 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
404 if (IS_ERR(map))
405 return;
406
407 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
408
409 get_random_bytes(&val, sizeof(val));
410
411 /* Write some test values */
412 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
413
414 regcache_cache_only(map, true);
415
416 /*
417 * While in cache-only regmap_read_bypassed() should return the register
418 * value and leave the map in cache-only.
419 */
420 for (i = 0; i < ARRAY_SIZE(val); i++) {
421 /* Put inverted bits in rval to prove we really read the value */
422 rval = ~val[i];
423 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
424 KUNIT_EXPECT_EQ(test, val[i], rval);
425
426 rval = ~val[i];
427 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
428 KUNIT_EXPECT_EQ(test, val[i], rval);
429 KUNIT_EXPECT_TRUE(test, map->cache_only);
430 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
431 }
432
433 /*
434 * Change the underlying register values to prove it is returning
435 * real values not cached values.
436 */
437 for (i = 0; i < ARRAY_SIZE(val); i++) {
438 val[i] = ~val[i];
439 data->vals[param->from_reg + i] = val[i];
440 }
441
442 for (i = 0; i < ARRAY_SIZE(val); i++) {
443 rval = ~val[i];
444 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
445 KUNIT_EXPECT_NE(test, val[i], rval);
446
447 rval = ~val[i];
448 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
449 KUNIT_EXPECT_EQ(test, val[i], rval);
450 KUNIT_EXPECT_TRUE(test, map->cache_only);
451 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
452 }
453 }
454
read_bypassed_volatile(struct kunit * test)455 static void read_bypassed_volatile(struct kunit *test)
456 {
457 const struct regmap_test_param *param = test->param_value;
458 struct regmap *map;
459 struct regmap_config config;
460 struct regmap_ram_data *data;
461 unsigned int val[BLOCK_TEST_SIZE], rval;
462 int i;
463
464 config = test_regmap_config;
465 /* All registers except #5 volatile */
466 config.volatile_reg = reg_5_false;
467
468 map = gen_regmap(test, &config, &data);
469 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
470 if (IS_ERR(map))
471 return;
472
473 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
474
475 get_random_bytes(&val, sizeof(val));
476
477 /* Write some test values */
478 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
479
480 regcache_cache_only(map, true);
481
482 /*
483 * While in cache-only regmap_read_bypassed() should return the register
484 * value and leave the map in cache-only.
485 */
486 for (i = 0; i < ARRAY_SIZE(val); i++) {
487 /* Register #5 is non-volatile so should read from cache */
488 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
489 regmap_read(map, param->from_reg + i, &rval));
490
491 /* Put inverted bits in rval to prove we really read the value */
492 rval = ~val[i];
493 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
494 KUNIT_EXPECT_EQ(test, val[i], rval);
495 KUNIT_EXPECT_TRUE(test, map->cache_only);
496 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
497 }
498
499 /*
500 * Change the underlying register values to prove it is returning
501 * real values not cached values.
502 */
503 for (i = 0; i < ARRAY_SIZE(val); i++) {
504 val[i] = ~val[i];
505 data->vals[param->from_reg + i] = val[i];
506 }
507
508 for (i = 0; i < ARRAY_SIZE(val); i++) {
509 if (i == 5)
510 continue;
511
512 rval = ~val[i];
513 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
514 KUNIT_EXPECT_EQ(test, val[i], rval);
515 KUNIT_EXPECT_TRUE(test, map->cache_only);
516 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
517 }
518 }
519
write_readonly(struct kunit * test)520 static void write_readonly(struct kunit *test)
521 {
522 struct regmap *map;
523 struct regmap_config config;
524 struct regmap_ram_data *data;
525 unsigned int val;
526 int i;
527
528 config = test_regmap_config;
529 config.num_reg_defaults = BLOCK_TEST_SIZE;
530 config.writeable_reg = reg_5_false;
531
532 map = gen_regmap(test, &config, &data);
533 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
534 if (IS_ERR(map))
535 return;
536
537 get_random_bytes(&val, sizeof(val));
538
539 for (i = 0; i < BLOCK_TEST_SIZE; i++)
540 data->written[i] = false;
541
542 /* Change the value of all registers, readonly should fail */
543 for (i = 0; i < BLOCK_TEST_SIZE; i++)
544 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
545
546 /* Did that match what we see on the device? */
547 for (i = 0; i < BLOCK_TEST_SIZE; i++)
548 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
549 }
550
read_writeonly(struct kunit * test)551 static void read_writeonly(struct kunit *test)
552 {
553 struct regmap *map;
554 struct regmap_config config;
555 struct regmap_ram_data *data;
556 unsigned int val;
557 int i;
558
559 config = test_regmap_config;
560 config.readable_reg = reg_5_false;
561
562 map = gen_regmap(test, &config, &data);
563 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
564 if (IS_ERR(map))
565 return;
566
567 for (i = 0; i < BLOCK_TEST_SIZE; i++)
568 data->read[i] = false;
569
570 /*
571 * Try to read all the registers, the writeonly one should
572 * fail if we aren't using the flat cache.
573 */
574 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
575 if (config.cache_type != REGCACHE_FLAT) {
576 KUNIT_EXPECT_EQ(test, i != 5,
577 regmap_read(map, i, &val) == 0);
578 } else {
579 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
580 }
581 }
582
583 /* Did we trigger a hardware access? */
584 KUNIT_EXPECT_FALSE(test, data->read[5]);
585 }
586
reg_defaults(struct kunit * test)587 static void reg_defaults(struct kunit *test)
588 {
589 struct regmap *map;
590 struct regmap_config config;
591 struct regmap_ram_data *data;
592 unsigned int rval[BLOCK_TEST_SIZE];
593 int i;
594
595 config = test_regmap_config;
596 config.num_reg_defaults = BLOCK_TEST_SIZE;
597
598 map = gen_regmap(test, &config, &data);
599 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
600 if (IS_ERR(map))
601 return;
602
603 /* Read back the expected default data */
604 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
605 BLOCK_TEST_SIZE));
606 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
607
608 /* The data should have been read from cache if there was one */
609 for (i = 0; i < BLOCK_TEST_SIZE; i++)
610 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
611 }
612
reg_defaults_read_dev(struct kunit * test)613 static void reg_defaults_read_dev(struct kunit *test)
614 {
615 struct regmap *map;
616 struct regmap_config config;
617 struct regmap_ram_data *data;
618 unsigned int rval[BLOCK_TEST_SIZE];
619 int i;
620
621 config = test_regmap_config;
622 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
623
624 map = gen_regmap(test, &config, &data);
625 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
626 if (IS_ERR(map))
627 return;
628
629 /* We should have read the cache defaults back from the map */
630 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
631 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
632 data->read[i] = false;
633 }
634
635 /* Read back the expected default data */
636 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
637 BLOCK_TEST_SIZE));
638 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
639
640 /* The data should have been read from cache if there was one */
641 for (i = 0; i < BLOCK_TEST_SIZE; i++)
642 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
643 }
644
register_patch(struct kunit * test)645 static void register_patch(struct kunit *test)
646 {
647 struct regmap *map;
648 struct regmap_config config;
649 struct regmap_ram_data *data;
650 struct reg_sequence patch[2];
651 unsigned int rval[BLOCK_TEST_SIZE];
652 int i;
653
654 /* We need defaults so readback works */
655 config = test_regmap_config;
656 config.num_reg_defaults = BLOCK_TEST_SIZE;
657
658 map = gen_regmap(test, &config, &data);
659 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
660 if (IS_ERR(map))
661 return;
662
663 /* Stash the original values */
664 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
665 BLOCK_TEST_SIZE));
666
667 /* Patch a couple of values */
668 patch[0].reg = 2;
669 patch[0].def = rval[2] + 1;
670 patch[0].delay_us = 0;
671 patch[1].reg = 5;
672 patch[1].def = rval[5] + 1;
673 patch[1].delay_us = 0;
674 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
675 ARRAY_SIZE(patch)));
676
677 /* Only the patched registers are written */
678 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
679 switch (i) {
680 case 2:
681 case 5:
682 KUNIT_EXPECT_TRUE(test, data->written[i]);
683 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
684 break;
685 default:
686 KUNIT_EXPECT_FALSE(test, data->written[i]);
687 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
688 break;
689 }
690 }
691 }
692
stride(struct kunit * test)693 static void stride(struct kunit *test)
694 {
695 struct regmap *map;
696 struct regmap_config config;
697 struct regmap_ram_data *data;
698 unsigned int rval;
699 int i;
700
701 config = test_regmap_config;
702 config.reg_stride = 2;
703 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
704
705 /*
706 * Allow one extra register so that the read/written arrays
707 * are sized big enough to include an entry for the odd
708 * address past the final reg_default register.
709 */
710 config.max_register = BLOCK_TEST_SIZE;
711
712 map = gen_regmap(test, &config, &data);
713 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
714 if (IS_ERR(map))
715 return;
716
717 /* Only even addresses can be accessed, try both read and write */
718 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
719 data->read[i] = false;
720 data->written[i] = false;
721
722 if (i % 2) {
723 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
724 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
725 KUNIT_EXPECT_FALSE(test, data->read[i]);
726 KUNIT_EXPECT_FALSE(test, data->written[i]);
727 } else {
728 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
729 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
730 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
731 data->read[i]);
732
733 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
734 KUNIT_EXPECT_TRUE(test, data->written[i]);
735 }
736 }
737 }
738
739 static struct regmap_range_cfg test_range = {
740 .selector_reg = 1,
741 .selector_mask = 0xff,
742
743 .window_start = 4,
744 .window_len = 10,
745
746 .range_min = 20,
747 .range_max = 40,
748 };
749
test_range_window_volatile(struct device * dev,unsigned int reg)750 static bool test_range_window_volatile(struct device *dev, unsigned int reg)
751 {
752 if (reg >= test_range.window_start &&
753 reg <= test_range.window_start + test_range.window_len)
754 return true;
755
756 return false;
757 }
758
test_range_all_volatile(struct device * dev,unsigned int reg)759 static bool test_range_all_volatile(struct device *dev, unsigned int reg)
760 {
761 if (test_range_window_volatile(dev, reg))
762 return true;
763
764 if (reg >= test_range.range_min && reg <= test_range.range_max)
765 return true;
766
767 return false;
768 }
769
basic_ranges(struct kunit * test)770 static void basic_ranges(struct kunit *test)
771 {
772 struct regmap *map;
773 struct regmap_config config;
774 struct regmap_ram_data *data;
775 unsigned int val;
776 int i;
777
778 config = test_regmap_config;
779 config.volatile_reg = test_range_all_volatile;
780 config.ranges = &test_range;
781 config.num_ranges = 1;
782 config.max_register = test_range.range_max;
783
784 map = gen_regmap(test, &config, &data);
785 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
786 if (IS_ERR(map))
787 return;
788
789 for (i = test_range.range_min; i < test_range.range_max; i++) {
790 data->read[i] = false;
791 data->written[i] = false;
792 }
793
794 /* Reset the page to a non-zero value to trigger a change */
795 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
796 test_range.range_max));
797
798 /* Check we set the page and use the window for writes */
799 data->written[test_range.selector_reg] = false;
800 data->written[test_range.window_start] = false;
801 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
802 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
803 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
804
805 data->written[test_range.selector_reg] = false;
806 data->written[test_range.window_start] = false;
807 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
808 test_range.range_min +
809 test_range.window_len,
810 0));
811 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
812 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
813
814 /* Same for reads */
815 data->written[test_range.selector_reg] = false;
816 data->read[test_range.window_start] = false;
817 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
818 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
819 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
820
821 data->written[test_range.selector_reg] = false;
822 data->read[test_range.window_start] = false;
823 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
824 test_range.range_min +
825 test_range.window_len,
826 &val));
827 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
828 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
829
830 /* No physical access triggered in the virtual range */
831 for (i = test_range.range_min; i < test_range.range_max; i++) {
832 KUNIT_EXPECT_FALSE(test, data->read[i]);
833 KUNIT_EXPECT_FALSE(test, data->written[i]);
834 }
835 }
836
837 /* Try to stress dynamic creation of cache data structures */
stress_insert(struct kunit * test)838 static void stress_insert(struct kunit *test)
839 {
840 struct regmap *map;
841 struct regmap_config config;
842 struct regmap_ram_data *data;
843 unsigned int rval, *vals;
844 size_t buf_sz;
845 int i;
846
847 config = test_regmap_config;
848 config.max_register = 300;
849
850 map = gen_regmap(test, &config, &data);
851 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
852 if (IS_ERR(map))
853 return;
854
855 buf_sz = array_size(sizeof(*vals), config.max_register);
856 vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
857 KUNIT_ASSERT_FALSE(test, vals == NULL);
858
859 get_random_bytes(vals, buf_sz);
860
861 /* Write data into the map/cache in ever decreasing strides */
862 for (i = 0; i < config.max_register; i += 100)
863 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
864 for (i = 0; i < config.max_register; i += 50)
865 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
866 for (i = 0; i < config.max_register; i += 25)
867 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
868 for (i = 0; i < config.max_register; i += 10)
869 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
870 for (i = 0; i < config.max_register; i += 5)
871 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
872 for (i = 0; i < config.max_register; i += 3)
873 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
874 for (i = 0; i < config.max_register; i += 2)
875 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
876 for (i = 0; i < config.max_register; i++)
877 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
878
879 /* Do reads from the cache (if there is one) match? */
880 for (i = 0; i < config.max_register; i ++) {
881 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
882 KUNIT_EXPECT_EQ(test, rval, vals[i]);
883 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
884 }
885 }
886
cache_bypass(struct kunit * test)887 static void cache_bypass(struct kunit *test)
888 {
889 const struct regmap_test_param *param = test->param_value;
890 struct regmap *map;
891 struct regmap_config config;
892 struct regmap_ram_data *data;
893 unsigned int val, rval;
894
895 config = test_regmap_config;
896
897 map = gen_regmap(test, &config, &data);
898 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
899 if (IS_ERR(map))
900 return;
901
902 get_random_bytes(&val, sizeof(val));
903
904 /* Ensure the cache has a value in it */
905 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
906
907 /* Bypass then write a different value */
908 regcache_cache_bypass(map, true);
909 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
910
911 /* Read the bypassed value */
912 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
913 KUNIT_EXPECT_EQ(test, val + 1, rval);
914 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
915
916 /* Disable bypass, the cache should still return the original value */
917 regcache_cache_bypass(map, false);
918 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
919 KUNIT_EXPECT_EQ(test, val, rval);
920 }
921
cache_sync_marked_dirty(struct kunit * test)922 static void cache_sync_marked_dirty(struct kunit *test)
923 {
924 const struct regmap_test_param *param = test->param_value;
925 struct regmap *map;
926 struct regmap_config config;
927 struct regmap_ram_data *data;
928 unsigned int val[BLOCK_TEST_SIZE];
929 int i;
930
931 config = test_regmap_config;
932
933 map = gen_regmap(test, &config, &data);
934 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
935 if (IS_ERR(map))
936 return;
937
938 get_random_bytes(&val, sizeof(val));
939
940 /* Put some data into the cache */
941 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
942 BLOCK_TEST_SIZE));
943 for (i = 0; i < BLOCK_TEST_SIZE; i++)
944 data->written[param->from_reg + i] = false;
945
946 /* Trash the data on the device itself then resync */
947 regcache_mark_dirty(map);
948 memset(data->vals, 0, sizeof(val));
949 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
950
951 /* Did we just write the correct data out? */
952 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
953 for (i = 0; i < BLOCK_TEST_SIZE; i++)
954 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
955 }
956
cache_sync_after_cache_only(struct kunit * test)957 static void cache_sync_after_cache_only(struct kunit *test)
958 {
959 const struct regmap_test_param *param = test->param_value;
960 struct regmap *map;
961 struct regmap_config config;
962 struct regmap_ram_data *data;
963 unsigned int val[BLOCK_TEST_SIZE];
964 unsigned int val_mask;
965 int i;
966
967 config = test_regmap_config;
968
969 map = gen_regmap(test, &config, &data);
970 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
971 if (IS_ERR(map))
972 return;
973
974 val_mask = GENMASK(config.val_bits - 1, 0);
975 get_random_bytes(&val, sizeof(val));
976
977 /* Put some data into the cache */
978 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
979 BLOCK_TEST_SIZE));
980 for (i = 0; i < BLOCK_TEST_SIZE; i++)
981 data->written[param->from_reg + i] = false;
982
983 /* Set cache-only and change the values */
984 regcache_cache_only(map, true);
985 for (i = 0; i < ARRAY_SIZE(val); ++i)
986 val[i] = ~val[i] & val_mask;
987
988 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
989 BLOCK_TEST_SIZE));
990 for (i = 0; i < BLOCK_TEST_SIZE; i++)
991 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
992
993 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
994
995 /* Exit cache-only and sync the cache without marking hardware registers dirty */
996 regcache_cache_only(map, false);
997
998 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
999
1000 /* Did we just write the correct data out? */
1001 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
1002 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1003 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
1004 }
1005
cache_sync_defaults_marked_dirty(struct kunit * test)1006 static void cache_sync_defaults_marked_dirty(struct kunit *test)
1007 {
1008 const struct regmap_test_param *param = test->param_value;
1009 struct regmap *map;
1010 struct regmap_config config;
1011 struct regmap_ram_data *data;
1012 unsigned int val;
1013 int i;
1014
1015 config = test_regmap_config;
1016 config.num_reg_defaults = BLOCK_TEST_SIZE;
1017
1018 map = gen_regmap(test, &config, &data);
1019 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1020 if (IS_ERR(map))
1021 return;
1022
1023 get_random_bytes(&val, sizeof(val));
1024
1025 /* Change the value of one register */
1026 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
1027
1028 /* Resync */
1029 regcache_mark_dirty(map);
1030 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1031 data->written[param->from_reg + i] = false;
1032 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1033
1034 /* Did we just sync the one register we touched? */
1035 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1036 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
1037
1038 /* Rewrite registers back to their defaults */
1039 for (i = 0; i < config.num_reg_defaults; ++i)
1040 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
1041 config.reg_defaults[i].def));
1042
1043 /*
1044 * Resync after regcache_mark_dirty() should not write out registers
1045 * that are at default value
1046 */
1047 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1048 data->written[param->from_reg + i] = false;
1049 regcache_mark_dirty(map);
1050 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1051 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1052 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
1053 }
1054
cache_sync_default_after_cache_only(struct kunit * test)1055 static void cache_sync_default_after_cache_only(struct kunit *test)
1056 {
1057 const struct regmap_test_param *param = test->param_value;
1058 struct regmap *map;
1059 struct regmap_config config;
1060 struct regmap_ram_data *data;
1061 unsigned int orig_val;
1062 int i;
1063
1064 config = test_regmap_config;
1065 config.num_reg_defaults = BLOCK_TEST_SIZE;
1066
1067 map = gen_regmap(test, &config, &data);
1068 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1069 if (IS_ERR(map))
1070 return;
1071
1072 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
1073
1074 /* Enter cache-only and change the value of one register */
1075 regcache_cache_only(map, true);
1076 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
1077
1078 /* Exit cache-only and resync, should write out the changed register */
1079 regcache_cache_only(map, false);
1080 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1081 data->written[param->from_reg + i] = false;
1082 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1083
1084 /* Was the register written out? */
1085 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1086 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
1087
1088 /* Enter cache-only and write register back to its default value */
1089 regcache_cache_only(map, true);
1090 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
1091
1092 /* Resync should write out the new value */
1093 regcache_cache_only(map, false);
1094 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1095 data->written[param->from_reg + i] = false;
1096
1097 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1098 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1099 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
1100 }
1101
cache_sync_readonly(struct kunit * test)1102 static void cache_sync_readonly(struct kunit *test)
1103 {
1104 const struct regmap_test_param *param = test->param_value;
1105 struct regmap *map;
1106 struct regmap_config config;
1107 struct regmap_ram_data *data;
1108 unsigned int val;
1109 int i;
1110
1111 config = test_regmap_config;
1112 config.writeable_reg = reg_5_false;
1113
1114 map = gen_regmap(test, &config, &data);
1115 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1116 if (IS_ERR(map))
1117 return;
1118
1119 /* Read all registers to fill the cache */
1120 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1121 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1122
1123 /* Change the value of all registers, readonly should fail */
1124 get_random_bytes(&val, sizeof(val));
1125 regcache_cache_only(map, true);
1126 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1127 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
1128 regcache_cache_only(map, false);
1129
1130 /* Resync */
1131 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1132 data->written[param->from_reg + i] = false;
1133 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1134
1135 /* Did that match what we see on the device? */
1136 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1137 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
1138 }
1139
cache_sync_patch(struct kunit * test)1140 static void cache_sync_patch(struct kunit *test)
1141 {
1142 const struct regmap_test_param *param = test->param_value;
1143 struct regmap *map;
1144 struct regmap_config config;
1145 struct regmap_ram_data *data;
1146 struct reg_sequence patch[2];
1147 unsigned int rval[BLOCK_TEST_SIZE], val;
1148 int i;
1149
1150 /* We need defaults so readback works */
1151 config = test_regmap_config;
1152 config.num_reg_defaults = BLOCK_TEST_SIZE;
1153
1154 map = gen_regmap(test, &config, &data);
1155 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1156 if (IS_ERR(map))
1157 return;
1158
1159 /* Stash the original values */
1160 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1161 BLOCK_TEST_SIZE));
1162
1163 /* Patch a couple of values */
1164 patch[0].reg = param->from_reg + 2;
1165 patch[0].def = rval[2] + 1;
1166 patch[0].delay_us = 0;
1167 patch[1].reg = param->from_reg + 5;
1168 patch[1].def = rval[5] + 1;
1169 patch[1].delay_us = 0;
1170 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
1171 ARRAY_SIZE(patch)));
1172
1173 /* Sync the cache */
1174 regcache_mark_dirty(map);
1175 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1176 data->written[param->from_reg + i] = false;
1177 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1178
1179 /* The patch should be on the device but not in the cache */
1180 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1181 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1182 KUNIT_EXPECT_EQ(test, val, rval[i]);
1183
1184 switch (i) {
1185 case 2:
1186 case 5:
1187 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
1188 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
1189 break;
1190 default:
1191 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
1192 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
1193 break;
1194 }
1195 }
1196 }
1197
cache_drop(struct kunit * test)1198 static void cache_drop(struct kunit *test)
1199 {
1200 const struct regmap_test_param *param = test->param_value;
1201 struct regmap *map;
1202 struct regmap_config config;
1203 struct regmap_ram_data *data;
1204 unsigned int rval[BLOCK_TEST_SIZE];
1205 int i;
1206
1207 config = test_regmap_config;
1208 config.num_reg_defaults = BLOCK_TEST_SIZE;
1209
1210 map = gen_regmap(test, &config, &data);
1211 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1212 if (IS_ERR(map))
1213 return;
1214
1215 /* Ensure the data is read from the cache */
1216 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1217 data->read[param->from_reg + i] = false;
1218 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1219 BLOCK_TEST_SIZE));
1220 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1221 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
1222 data->read[param->from_reg + i] = false;
1223 }
1224 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1225
1226 /* Drop some registers */
1227 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1228 param->from_reg + 5));
1229
1230 /* Reread and check only the dropped registers hit the device. */
1231 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1232 BLOCK_TEST_SIZE));
1233 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1234 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1235 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1236 }
1237
cache_drop_with_non_contiguous_ranges(struct kunit * test)1238 static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
1239 {
1240 const struct regmap_test_param *param = test->param_value;
1241 struct regmap *map;
1242 struct regmap_config config;
1243 struct regmap_ram_data *data;
1244 unsigned int val[4][BLOCK_TEST_SIZE];
1245 unsigned int reg;
1246 const int num_ranges = ARRAY_SIZE(val) * 2;
1247 int rangeidx, i;
1248
1249 static_assert(ARRAY_SIZE(val) == 4);
1250
1251 config = test_regmap_config;
1252 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
1253
1254 map = gen_regmap(test, &config, &data);
1255 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1256 if (IS_ERR(map))
1257 return;
1258
1259 for (i = 0; i < config.max_register + 1; i++)
1260 data->written[i] = false;
1261
1262 /* Create non-contiguous cache blocks by writing every other range */
1263 get_random_bytes(&val, sizeof(val));
1264 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1265 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1266 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
1267 &val[rangeidx / 2],
1268 BLOCK_TEST_SIZE));
1269 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1270 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1271 }
1272
1273 /* Check that odd ranges weren't written */
1274 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1275 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1276 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1277 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1278 }
1279
1280 /* Drop range 2 */
1281 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1282 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
1283
1284 /* Drop part of range 4 */
1285 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1286 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
1287
1288 /* Mark dirty and reset mock registers to 0 */
1289 regcache_mark_dirty(map);
1290 for (i = 0; i < config.max_register + 1; i++) {
1291 data->vals[i] = 0;
1292 data->written[i] = false;
1293 }
1294
1295 /* The registers that were dropped from range 4 should now remain at 0 */
1296 val[4 / 2][3] = 0;
1297 val[4 / 2][4] = 0;
1298 val[4 / 2][5] = 0;
1299
1300 /* Sync and check that the expected register ranges were written */
1301 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1302
1303 /* Check that odd ranges weren't written */
1304 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1305 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1306 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1307 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1308 }
1309
1310 /* Check that even ranges (except 2 and 4) were written */
1311 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1312 if ((rangeidx == 2) || (rangeidx == 4))
1313 continue;
1314
1315 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1316 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1317 KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
1318
1319 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1320 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1321 }
1322
1323 /* Check that range 2 wasn't written */
1324 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1325 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1326 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1327
1328 /* Check that range 4 was partially written */
1329 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1330 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1331 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
1332
1333 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
1334
1335 /* Nothing before param->from_reg should have been written */
1336 for (i = 0; i < param->from_reg; i++)
1337 KUNIT_EXPECT_FALSE(test, data->written[i]);
1338 }
1339
cache_drop_all_and_sync_marked_dirty(struct kunit * test)1340 static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1341 {
1342 const struct regmap_test_param *param = test->param_value;
1343 struct regmap *map;
1344 struct regmap_config config;
1345 struct regmap_ram_data *data;
1346 unsigned int rval[BLOCK_TEST_SIZE];
1347 int i;
1348
1349 config = test_regmap_config;
1350 config.num_reg_defaults = BLOCK_TEST_SIZE;
1351
1352 map = gen_regmap(test, &config, &data);
1353 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1354 if (IS_ERR(map))
1355 return;
1356
1357 /* Ensure the data is read from the cache */
1358 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1359 data->read[param->from_reg + i] = false;
1360 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1361 BLOCK_TEST_SIZE));
1362 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1363
1364 /* Change all values in cache from defaults */
1365 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1366 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1367
1368 /* Drop all registers */
1369 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1370
1371 /* Mark dirty and cache sync should not write anything. */
1372 regcache_mark_dirty(map);
1373 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1374 data->written[param->from_reg + i] = false;
1375
1376 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1377 for (i = 0; i <= config.max_register; i++)
1378 KUNIT_EXPECT_FALSE(test, data->written[i]);
1379 }
1380
cache_drop_all_and_sync_no_defaults(struct kunit * test)1381 static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1382 {
1383 const struct regmap_test_param *param = test->param_value;
1384 struct regmap *map;
1385 struct regmap_config config;
1386 struct regmap_ram_data *data;
1387 unsigned int rval[BLOCK_TEST_SIZE];
1388 int i;
1389
1390 config = test_regmap_config;
1391
1392 map = gen_regmap(test, &config, &data);
1393 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1394 if (IS_ERR(map))
1395 return;
1396
1397 /* Ensure the data is read from the cache */
1398 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1399 data->read[param->from_reg + i] = false;
1400 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1401 BLOCK_TEST_SIZE));
1402 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1403
1404 /* Change all values in cache */
1405 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1406 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1407
1408 /* Drop all registers */
1409 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1410
1411 /*
1412 * Sync cache without marking it dirty. All registers were dropped
1413 * so the cache should not have any entries to write out.
1414 */
1415 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1416 data->written[param->from_reg + i] = false;
1417
1418 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1419 for (i = 0; i <= config.max_register; i++)
1420 KUNIT_EXPECT_FALSE(test, data->written[i]);
1421 }
1422
cache_drop_all_and_sync_has_defaults(struct kunit * test)1423 static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1424 {
1425 const struct regmap_test_param *param = test->param_value;
1426 struct regmap *map;
1427 struct regmap_config config;
1428 struct regmap_ram_data *data;
1429 unsigned int rval[BLOCK_TEST_SIZE];
1430 int i;
1431
1432 config = test_regmap_config;
1433 config.num_reg_defaults = BLOCK_TEST_SIZE;
1434
1435 map = gen_regmap(test, &config, &data);
1436 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1437 if (IS_ERR(map))
1438 return;
1439
1440 /* Ensure the data is read from the cache */
1441 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1442 data->read[param->from_reg + i] = false;
1443 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1444 BLOCK_TEST_SIZE));
1445 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1446
1447 /* Change all values in cache from defaults */
1448 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1449 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1450
1451 /* Drop all registers */
1452 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1453
1454 /*
1455 * Sync cache without marking it dirty. All registers were dropped
1456 * so the cache should not have any entries to write out.
1457 */
1458 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1459 data->written[param->from_reg + i] = false;
1460
1461 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1462 for (i = 0; i <= config.max_register; i++)
1463 KUNIT_EXPECT_FALSE(test, data->written[i]);
1464 }
1465
cache_present(struct kunit * test)1466 static void cache_present(struct kunit *test)
1467 {
1468 const struct regmap_test_param *param = test->param_value;
1469 struct regmap *map;
1470 struct regmap_config config;
1471 struct regmap_ram_data *data;
1472 unsigned int val;
1473 int i;
1474
1475 config = test_regmap_config;
1476
1477 map = gen_regmap(test, &config, &data);
1478 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1479 if (IS_ERR(map))
1480 return;
1481
1482 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1483 data->read[param->from_reg + i] = false;
1484
1485 /* No defaults so no registers cached. */
1486 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1487 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1488
1489 /* We didn't trigger any reads */
1490 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1491 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1492
1493 /* Fill the cache */
1494 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1495 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1496
1497 /* Now everything should be cached */
1498 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1499 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
1500 }
1501
cache_write_zero(struct kunit * test)1502 static void cache_write_zero(struct kunit *test)
1503 {
1504 const struct regmap_test_param *param = test->param_value;
1505 struct regmap *map;
1506 struct regmap_config config;
1507 struct regmap_ram_data *data;
1508 unsigned int val;
1509 int i;
1510
1511 config = test_regmap_config;
1512
1513 map = gen_regmap(test, &config, &data);
1514 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1515 if (IS_ERR(map))
1516 return;
1517
1518 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1519 data->read[param->from_reg + i] = false;
1520
1521 /* No defaults so no registers cached. */
1522 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1523 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1524
1525 /* We didn't trigger any reads */
1526 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1527 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1528
1529 /* Write a zero value */
1530 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
1531
1532 /* Read that zero value back */
1533 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1534 KUNIT_EXPECT_EQ(test, 0, val);
1535
1536 /* From the cache? */
1537 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
1538
1539 /* Try to throw it away */
1540 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
1541 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
1542 }
1543
1544 /* Check that caching the window register works with sync */
cache_range_window_reg(struct kunit * test)1545 static void cache_range_window_reg(struct kunit *test)
1546 {
1547 struct regmap *map;
1548 struct regmap_config config;
1549 struct regmap_ram_data *data;
1550 unsigned int val;
1551 int i;
1552
1553 config = test_regmap_config;
1554 config.volatile_reg = test_range_window_volatile;
1555 config.ranges = &test_range;
1556 config.num_ranges = 1;
1557 config.max_register = test_range.range_max;
1558
1559 map = gen_regmap(test, &config, &data);
1560 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1561 if (IS_ERR(map))
1562 return;
1563
1564 /* Write new values to the entire range */
1565 for (i = test_range.range_min; i <= test_range.range_max; i++)
1566 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1567
1568 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1569 KUNIT_ASSERT_EQ(test, val, 2);
1570
1571 /* Write to the first register in the range to reset the page */
1572 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1573 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1574 KUNIT_ASSERT_EQ(test, val, 0);
1575
1576 /* Trigger a cache sync */
1577 regcache_mark_dirty(map);
1578 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1579
1580 /* Write to the first register again, the page should be reset */
1581 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1582 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1583 KUNIT_ASSERT_EQ(test, val, 0);
1584
1585 /* Trigger another cache sync */
1586 regcache_mark_dirty(map);
1587 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1588
1589 /* Write to the last register again, the page should be reset */
1590 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1591 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1592 KUNIT_ASSERT_EQ(test, val, 2);
1593 }
1594
1595 static const struct regmap_test_param raw_types_list[] = {
1596 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
1597 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
1598 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1599 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1600 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1601 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1602 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1603 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1604 };
1605
1606 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
1607
1608 static const struct regmap_test_param raw_cache_types_list[] = {
1609 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1610 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1611 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1612 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1613 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1614 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1615 };
1616
1617 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
1618
1619 static const struct regmap_config raw_regmap_config = {
1620 .max_register = BLOCK_TEST_SIZE,
1621
1622 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
1623 .reg_bits = 16,
1624 .val_bits = 16,
1625 };
1626
gen_raw_regmap(struct kunit * test,struct regmap_config * config,struct regmap_ram_data ** data)1627 static struct regmap *gen_raw_regmap(struct kunit *test,
1628 struct regmap_config *config,
1629 struct regmap_ram_data **data)
1630 {
1631 struct regmap_test_priv *priv = test->priv;
1632 const struct regmap_test_param *param = test->param_value;
1633 u16 *buf;
1634 struct regmap *ret = ERR_PTR(-ENOMEM);
1635 int i, error;
1636 struct reg_default *defaults;
1637 size_t size;
1638
1639 config->cache_type = param->cache;
1640 config->val_format_endian = param->val_endian;
1641 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1642 config->cache_type == REGCACHE_MAPLE;
1643
1644 size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
1645 buf = kmalloc(size, GFP_KERNEL);
1646 if (!buf)
1647 return ERR_PTR(-ENOMEM);
1648
1649 get_random_bytes(buf, size);
1650
1651 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1652 if (!(*data))
1653 goto out_free;
1654 (*data)->vals = (void *)buf;
1655
1656 config->num_reg_defaults = config->max_register + 1;
1657 defaults = kunit_kcalloc(test,
1658 config->num_reg_defaults,
1659 sizeof(struct reg_default),
1660 GFP_KERNEL);
1661 if (!defaults)
1662 goto out_free;
1663 config->reg_defaults = defaults;
1664
1665 for (i = 0; i < config->num_reg_defaults; i++) {
1666 defaults[i].reg = i;
1667 switch (param->val_endian) {
1668 case REGMAP_ENDIAN_LITTLE:
1669 defaults[i].def = le16_to_cpu(buf[i]);
1670 break;
1671 case REGMAP_ENDIAN_BIG:
1672 defaults[i].def = be16_to_cpu(buf[i]);
1673 break;
1674 default:
1675 ret = ERR_PTR(-EINVAL);
1676 goto out_free;
1677 }
1678 }
1679
1680 /*
1681 * We use the defaults in the tests but they don't make sense
1682 * to the core if there's no cache.
1683 */
1684 if (config->cache_type == REGCACHE_NONE)
1685 config->num_reg_defaults = 0;
1686
1687 ret = regmap_init_raw_ram(priv->dev, config, *data);
1688 if (IS_ERR(ret))
1689 goto out_free;
1690
1691 /* This calls regmap_exit() on failure, which frees buf and *data */
1692 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
1693 if (error)
1694 ret = ERR_PTR(error);
1695
1696 return ret;
1697
1698 out_free:
1699 kfree(buf);
1700 kfree(*data);
1701
1702 return ret;
1703 }
1704
raw_read_defaults_single(struct kunit * test)1705 static void raw_read_defaults_single(struct kunit *test)
1706 {
1707 struct regmap *map;
1708 struct regmap_config config;
1709 struct regmap_ram_data *data;
1710 unsigned int rval;
1711 int i;
1712
1713 config = raw_regmap_config;
1714
1715 map = gen_raw_regmap(test, &config, &data);
1716 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1717 if (IS_ERR(map))
1718 return;
1719
1720 /* Check that we can read the defaults via the API */
1721 for (i = 0; i < config.max_register + 1; i++) {
1722 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1723 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1724 }
1725 }
1726
raw_read_defaults(struct kunit * test)1727 static void raw_read_defaults(struct kunit *test)
1728 {
1729 struct regmap *map;
1730 struct regmap_config config;
1731 struct regmap_ram_data *data;
1732 u16 *rval;
1733 u16 def;
1734 size_t val_len;
1735 int i;
1736
1737 config = raw_regmap_config;
1738
1739 map = gen_raw_regmap(test, &config, &data);
1740 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1741 if (IS_ERR(map))
1742 return;
1743
1744 val_len = array_size(sizeof(*rval), config.max_register + 1);
1745 rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
1746 KUNIT_ASSERT_TRUE(test, rval != NULL);
1747 if (!rval)
1748 return;
1749
1750 /* Check that we can read the defaults via the API */
1751 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1752 for (i = 0; i < config.max_register + 1; i++) {
1753 def = config.reg_defaults[i].def;
1754 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1755 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
1756 } else {
1757 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
1758 }
1759 }
1760 }
1761
raw_write_read_single(struct kunit * test)1762 static void raw_write_read_single(struct kunit *test)
1763 {
1764 struct regmap *map;
1765 struct regmap_config config;
1766 struct regmap_ram_data *data;
1767 u16 val;
1768 unsigned int rval;
1769
1770 config = raw_regmap_config;
1771
1772 map = gen_raw_regmap(test, &config, &data);
1773 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1774 if (IS_ERR(map))
1775 return;
1776
1777 get_random_bytes(&val, sizeof(val));
1778
1779 /* If we write a value to a register we can read it back */
1780 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1781 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1782 KUNIT_EXPECT_EQ(test, val, rval);
1783 }
1784
raw_write(struct kunit * test)1785 static void raw_write(struct kunit *test)
1786 {
1787 struct regmap *map;
1788 struct regmap_config config;
1789 struct regmap_ram_data *data;
1790 u16 *hw_buf;
1791 u16 val[2];
1792 unsigned int rval;
1793 int i;
1794
1795 config = raw_regmap_config;
1796
1797 map = gen_raw_regmap(test, &config, &data);
1798 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1799 if (IS_ERR(map))
1800 return;
1801
1802 hw_buf = (u16 *)data->vals;
1803
1804 get_random_bytes(&val, sizeof(val));
1805
1806 /* Do a raw write */
1807 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1808
1809 /* We should read back the new values, and defaults for the rest */
1810 for (i = 0; i < config.max_register + 1; i++) {
1811 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1812
1813 switch (i) {
1814 case 2:
1815 case 3:
1816 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1817 KUNIT_EXPECT_EQ(test, rval,
1818 be16_to_cpu((__force __be16)val[i % 2]));
1819 } else {
1820 KUNIT_EXPECT_EQ(test, rval,
1821 le16_to_cpu((__force __le16)val[i % 2]));
1822 }
1823 break;
1824 default:
1825 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1826 break;
1827 }
1828 }
1829
1830 /* The values should appear in the "hardware" */
1831 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1832 }
1833
reg_zero(struct device * dev,unsigned int reg)1834 static bool reg_zero(struct device *dev, unsigned int reg)
1835 {
1836 return reg == 0;
1837 }
1838
ram_reg_zero(struct regmap_ram_data * data,unsigned int reg)1839 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1840 {
1841 return reg == 0;
1842 }
1843
raw_noinc_write(struct kunit * test)1844 static void raw_noinc_write(struct kunit *test)
1845 {
1846 struct regmap *map;
1847 struct regmap_config config;
1848 struct regmap_ram_data *data;
1849 unsigned int val;
1850 u16 val_test, val_last;
1851 u16 val_array[BLOCK_TEST_SIZE];
1852
1853 config = raw_regmap_config;
1854 config.volatile_reg = reg_zero;
1855 config.writeable_noinc_reg = reg_zero;
1856 config.readable_noinc_reg = reg_zero;
1857
1858 map = gen_raw_regmap(test, &config, &data);
1859 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1860 if (IS_ERR(map))
1861 return;
1862
1863 data->noinc_reg = ram_reg_zero;
1864
1865 get_random_bytes(&val_array, sizeof(val_array));
1866
1867 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1868 val_test = be16_to_cpu(val_array[1]) + 100;
1869 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1870 } else {
1871 val_test = le16_to_cpu(val_array[1]) + 100;
1872 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1873 }
1874
1875 /* Put some data into the register following the noinc register */
1876 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1877
1878 /* Write some data to the noinc register */
1879 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1880 sizeof(val_array)));
1881
1882 /* We should read back the last value written */
1883 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1884 KUNIT_ASSERT_EQ(test, val_last, val);
1885
1886 /* Make sure we didn't touch the register after the noinc register */
1887 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1888 KUNIT_ASSERT_EQ(test, val_test, val);
1889 }
1890
raw_sync(struct kunit * test)1891 static void raw_sync(struct kunit *test)
1892 {
1893 struct regmap *map;
1894 struct regmap_config config;
1895 struct regmap_ram_data *data;
1896 u16 val[3];
1897 u16 *hw_buf;
1898 unsigned int rval;
1899 int i;
1900
1901 config = raw_regmap_config;
1902
1903 map = gen_raw_regmap(test, &config, &data);
1904 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1905 if (IS_ERR(map))
1906 return;
1907
1908 hw_buf = (u16 *)data->vals;
1909
1910 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1911
1912 /* Do a regular write and a raw write in cache only mode */
1913 regcache_cache_only(map, true);
1914 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1915 sizeof(u16) * 2));
1916 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1917
1918 /* We should read back the new values, and defaults for the rest */
1919 for (i = 0; i < config.max_register + 1; i++) {
1920 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1921
1922 switch (i) {
1923 case 2:
1924 case 3:
1925 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1926 KUNIT_EXPECT_EQ(test, rval,
1927 be16_to_cpu((__force __be16)val[i - 2]));
1928 } else {
1929 KUNIT_EXPECT_EQ(test, rval,
1930 le16_to_cpu((__force __le16)val[i - 2]));
1931 }
1932 break;
1933 case 4:
1934 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1935 break;
1936 default:
1937 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1938 break;
1939 }
1940 }
1941
1942 /*
1943 * The value written via _write() was translated by the core,
1944 * translate the original copy for comparison purposes.
1945 */
1946 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1947 val[2] = cpu_to_be16(val[2]);
1948 else
1949 val[2] = cpu_to_le16(val[2]);
1950
1951 /* The values should not appear in the "hardware" */
1952 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1953
1954 for (i = 0; i < config.max_register + 1; i++)
1955 data->written[i] = false;
1956
1957 /* Do the sync */
1958 regcache_cache_only(map, false);
1959 regcache_mark_dirty(map);
1960 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1961
1962 /* The values should now appear in the "hardware" */
1963 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1964 }
1965
raw_ranges(struct kunit * test)1966 static void raw_ranges(struct kunit *test)
1967 {
1968 struct regmap *map;
1969 struct regmap_config config;
1970 struct regmap_ram_data *data;
1971 unsigned int val;
1972 int i;
1973
1974 config = raw_regmap_config;
1975 config.volatile_reg = test_range_all_volatile;
1976 config.ranges = &test_range;
1977 config.num_ranges = 1;
1978 config.max_register = test_range.range_max;
1979
1980 map = gen_raw_regmap(test, &config, &data);
1981 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1982 if (IS_ERR(map))
1983 return;
1984
1985 /* Reset the page to a non-zero value to trigger a change */
1986 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
1987 test_range.range_max));
1988
1989 /* Check we set the page and use the window for writes */
1990 data->written[test_range.selector_reg] = false;
1991 data->written[test_range.window_start] = false;
1992 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1993 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1994 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1995
1996 data->written[test_range.selector_reg] = false;
1997 data->written[test_range.window_start] = false;
1998 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
1999 test_range.range_min +
2000 test_range.window_len,
2001 0));
2002 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2003 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
2004
2005 /* Same for reads */
2006 data->written[test_range.selector_reg] = false;
2007 data->read[test_range.window_start] = false;
2008 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
2009 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2010 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2011
2012 data->written[test_range.selector_reg] = false;
2013 data->read[test_range.window_start] = false;
2014 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
2015 test_range.range_min +
2016 test_range.window_len,
2017 &val));
2018 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
2019 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
2020
2021 /* No physical access triggered in the virtual range */
2022 for (i = test_range.range_min; i < test_range.range_max; i++) {
2023 KUNIT_EXPECT_FALSE(test, data->read[i]);
2024 KUNIT_EXPECT_FALSE(test, data->written[i]);
2025 }
2026 }
2027
2028 static struct kunit_case regmap_test_cases[] = {
2029 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
2030 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
2031 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
2032 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
2033 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
2034 KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
2035 KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
2036 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
2037 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
2038 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
2039 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
2040 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
2041 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
2042 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
2043 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
2044 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
2045 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
2046 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
2047 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
2048 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
2049 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
2050 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
2051 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
2052 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
2053 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
2054 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
2055 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
2056 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
2057 KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
2058 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
2059
2060 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
2061 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
2062 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
2063 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
2064 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
2065 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
2066 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
2067 {}
2068 };
2069
regmap_test_init(struct kunit * test)2070 static int regmap_test_init(struct kunit *test)
2071 {
2072 struct regmap_test_priv *priv;
2073 struct device *dev;
2074
2075 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
2076 if (!priv)
2077 return -ENOMEM;
2078
2079 test->priv = priv;
2080
2081 dev = kunit_device_register(test, "regmap_test");
2082 if (IS_ERR(dev))
2083 return PTR_ERR(dev);
2084
2085 priv->dev = get_device(dev);
2086 dev_set_drvdata(dev, test);
2087
2088 return 0;
2089 }
2090
regmap_test_exit(struct kunit * test)2091 static void regmap_test_exit(struct kunit *test)
2092 {
2093 struct regmap_test_priv *priv = test->priv;
2094
2095 /* Destroy the dummy struct device */
2096 if (priv && priv->dev)
2097 put_device(priv->dev);
2098 }
2099
2100 static struct kunit_suite regmap_test_suite = {
2101 .name = "regmap",
2102 .init = regmap_test_init,
2103 .exit = regmap_test_exit,
2104 .test_cases = regmap_test_cases,
2105 };
2106 kunit_test_suite(regmap_test_suite);
2107
2108 MODULE_DESCRIPTION("Regmap KUnit tests");
2109 MODULE_LICENSE("GPL v2");
2110