1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // regmap KUnit tests 4 // 5 // Copyright 2023 Arm Ltd 6 7 #include <kunit/device.h> 8 #include <kunit/resource.h> 9 #include <kunit/test.h> 10 #include "internal.h" 11 12 #define BLOCK_TEST_SIZE 12 13 14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *); 15 16 struct regmap_test_priv { 17 struct device *dev; 18 }; 19 20 struct regmap_test_param { 21 enum regcache_type cache; 22 enum regmap_endian val_endian; 23 24 unsigned int from_reg; 25 bool fast_io; 26 }; 27 28 static void get_changed_bytes(void *orig, void *new, size_t size) 29 { 30 char *o = orig; 31 char *n = new; 32 int i; 33 34 get_random_bytes(new, size); 35 36 /* 37 * This could be nicer and more efficient but we shouldn't 38 * super care. 39 */ 40 for (i = 0; i < size; i++) 41 while (n[i] == o[i]) 42 get_random_bytes(&n[i], 1); 43 } 44 45 static const struct regmap_config test_regmap_config = { 46 .reg_stride = 1, 47 .val_bits = sizeof(unsigned int) * 8, 48 }; 49 50 static const char *regcache_type_name(enum regcache_type type) 51 { 52 switch (type) { 53 case REGCACHE_NONE: 54 return "none"; 55 case REGCACHE_FLAT: 56 return "flat"; 57 case REGCACHE_RBTREE: 58 return "rbtree"; 59 case REGCACHE_MAPLE: 60 return "maple"; 61 default: 62 return NULL; 63 } 64 } 65 66 static const char *regmap_endian_name(enum regmap_endian endian) 67 { 68 switch (endian) { 69 case REGMAP_ENDIAN_BIG: 70 return "big"; 71 case REGMAP_ENDIAN_LITTLE: 72 return "little"; 73 case REGMAP_ENDIAN_DEFAULT: 74 return "default"; 75 case REGMAP_ENDIAN_NATIVE: 76 return "native"; 77 default: 78 return NULL; 79 } 80 } 81 82 static void param_to_desc(const struct regmap_test_param *param, char *desc) 83 { 84 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s%s @%#x", 85 regcache_type_name(param->cache), 86 regmap_endian_name(param->val_endian), 87 param->fast_io ? " fast I/O" : "", 88 param->from_reg); 89 } 90 91 static const struct regmap_test_param regcache_types_list[] = { 92 { .cache = REGCACHE_NONE }, 93 { .cache = REGCACHE_NONE, .fast_io = true }, 94 { .cache = REGCACHE_FLAT }, 95 { .cache = REGCACHE_FLAT, .fast_io = true }, 96 { .cache = REGCACHE_RBTREE }, 97 { .cache = REGCACHE_RBTREE, .fast_io = true }, 98 { .cache = REGCACHE_MAPLE }, 99 { .cache = REGCACHE_MAPLE, .fast_io = true }, 100 }; 101 102 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc); 103 104 static const struct regmap_test_param real_cache_types_only_list[] = { 105 { .cache = REGCACHE_FLAT }, 106 { .cache = REGCACHE_FLAT, .fast_io = true }, 107 { .cache = REGCACHE_RBTREE }, 108 { .cache = REGCACHE_RBTREE, .fast_io = true }, 109 { .cache = REGCACHE_MAPLE }, 110 { .cache = REGCACHE_MAPLE, .fast_io = true }, 111 }; 112 113 KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc); 114 115 static const struct regmap_test_param real_cache_types_list[] = { 116 { .cache = REGCACHE_FLAT, .from_reg = 0 }, 117 { .cache = REGCACHE_FLAT, .from_reg = 0, .fast_io = true }, 118 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 }, 119 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 }, 120 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 }, 121 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 }, 122 { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 123 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true }, 124 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 125 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 126 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 127 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 128 { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 129 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true }, 130 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 131 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 132 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 134 }; 135 136 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc); 137 138 static const struct regmap_test_param sparse_cache_types_list[] = { 139 { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 140 { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true }, 141 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 142 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 143 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 144 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 145 { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 146 { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true }, 147 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 148 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 149 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 150 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 151 }; 152 153 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc); 154 155 static struct regmap *gen_regmap(struct kunit *test, 156 struct regmap_config *config, 157 struct regmap_ram_data **data) 158 { 159 const struct regmap_test_param *param = test->param_value; 160 struct regmap_test_priv *priv = test->priv; 161 unsigned int *buf; 162 struct regmap *ret = ERR_PTR(-ENOMEM); 163 size_t size; 164 int i, error; 165 struct reg_default *defaults; 166 167 config->cache_type = param->cache; 168 config->fast_io = param->fast_io; 169 170 if (config->max_register == 0) { 171 config->max_register = param->from_reg; 172 if (config->num_reg_defaults) 173 config->max_register += (config->num_reg_defaults - 1) * 174 config->reg_stride; 175 else 176 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride); 177 } 178 179 size = array_size(config->max_register + 1, sizeof(*buf)); 180 buf = kmalloc(size, GFP_KERNEL); 181 if (!buf) 182 return ERR_PTR(-ENOMEM); 183 184 get_random_bytes(buf, size); 185 186 *data = kzalloc(sizeof(**data), GFP_KERNEL); 187 if (!(*data)) 188 goto out_free; 189 (*data)->vals = buf; 190 191 if (config->num_reg_defaults) { 192 defaults = kunit_kcalloc(test, 193 config->num_reg_defaults, 194 sizeof(struct reg_default), 195 GFP_KERNEL); 196 if (!defaults) 197 goto out_free; 198 199 config->reg_defaults = defaults; 200 201 for (i = 0; i < config->num_reg_defaults; i++) { 202 defaults[i].reg = param->from_reg + (i * config->reg_stride); 203 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)]; 204 } 205 } 206 207 ret = regmap_init_ram(priv->dev, config, *data); 208 if (IS_ERR(ret)) 209 goto out_free; 210 211 /* This calls regmap_exit() on failure, which frees buf and *data */ 212 error = kunit_add_action_or_reset(test, regmap_exit_action, ret); 213 if (error) 214 ret = ERR_PTR(error); 215 216 return ret; 217 218 out_free: 219 kfree(buf); 220 kfree(*data); 221 222 return ret; 223 } 224 225 static bool reg_5_false(struct device *dev, unsigned int reg) 226 { 227 struct kunit *test = dev_get_drvdata(dev); 228 const struct regmap_test_param *param = test->param_value; 229 230 return reg != (param->from_reg + 5); 231 } 232 233 static void basic_read_write(struct kunit *test) 234 { 235 struct regmap *map; 236 struct regmap_config config; 237 struct regmap_ram_data *data; 238 unsigned int val, rval; 239 240 config = test_regmap_config; 241 242 map = gen_regmap(test, &config, &data); 243 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 244 if (IS_ERR(map)) 245 return; 246 247 get_random_bytes(&val, sizeof(val)); 248 249 /* If we write a value to a register we can read it back */ 250 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 251 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 252 KUNIT_EXPECT_EQ(test, val, rval); 253 254 /* If using a cache the cache satisfied the read */ 255 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]); 256 } 257 258 static void bulk_write(struct kunit *test) 259 { 260 struct regmap *map; 261 struct regmap_config config; 262 struct regmap_ram_data *data; 263 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 264 int i; 265 266 config = test_regmap_config; 267 268 map = gen_regmap(test, &config, &data); 269 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 270 if (IS_ERR(map)) 271 return; 272 273 get_random_bytes(&val, sizeof(val)); 274 275 /* 276 * Data written via the bulk API can be read back with single 277 * reads. 278 */ 279 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, 280 BLOCK_TEST_SIZE)); 281 for (i = 0; i < BLOCK_TEST_SIZE; i++) 282 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i])); 283 284 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 285 286 /* If using a cache the cache satisfied the read */ 287 for (i = 0; i < BLOCK_TEST_SIZE; i++) 288 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 289 } 290 291 static void bulk_read(struct kunit *test) 292 { 293 struct regmap *map; 294 struct regmap_config config; 295 struct regmap_ram_data *data; 296 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 297 int i; 298 299 config = test_regmap_config; 300 301 map = gen_regmap(test, &config, &data); 302 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 303 if (IS_ERR(map)) 304 return; 305 306 get_random_bytes(&val, sizeof(val)); 307 308 /* Data written as single writes can be read via the bulk API */ 309 for (i = 0; i < BLOCK_TEST_SIZE; i++) 310 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i])); 311 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 312 BLOCK_TEST_SIZE)); 313 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 314 315 /* If using a cache the cache satisfied the read */ 316 for (i = 0; i < BLOCK_TEST_SIZE; i++) 317 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 318 } 319 320 static void multi_write(struct kunit *test) 321 { 322 struct regmap *map; 323 struct regmap_config config; 324 struct regmap_ram_data *data; 325 struct reg_sequence sequence[BLOCK_TEST_SIZE]; 326 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 327 int i; 328 329 config = test_regmap_config; 330 331 map = gen_regmap(test, &config, &data); 332 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 333 if (IS_ERR(map)) 334 return; 335 336 get_random_bytes(&val, sizeof(val)); 337 338 /* 339 * Data written via the multi API can be read back with single 340 * reads. 341 */ 342 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 343 sequence[i].reg = i; 344 sequence[i].def = val[i]; 345 sequence[i].delay_us = 0; 346 } 347 KUNIT_EXPECT_EQ(test, 0, 348 regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE)); 349 for (i = 0; i < BLOCK_TEST_SIZE; i++) 350 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i])); 351 352 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 353 354 /* If using a cache the cache satisfied the read */ 355 for (i = 0; i < BLOCK_TEST_SIZE; i++) 356 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 357 } 358 359 static void multi_read(struct kunit *test) 360 { 361 struct regmap *map; 362 struct regmap_config config; 363 struct regmap_ram_data *data; 364 unsigned int regs[BLOCK_TEST_SIZE]; 365 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 366 int i; 367 368 config = test_regmap_config; 369 370 map = gen_regmap(test, &config, &data); 371 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 372 if (IS_ERR(map)) 373 return; 374 375 get_random_bytes(&val, sizeof(val)); 376 377 /* Data written as single writes can be read via the multi API */ 378 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 379 regs[i] = i; 380 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i])); 381 } 382 KUNIT_EXPECT_EQ(test, 0, 383 regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE)); 384 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 385 386 /* If using a cache the cache satisfied the read */ 387 for (i = 0; i < BLOCK_TEST_SIZE; i++) 388 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 389 } 390 391 static void read_bypassed(struct kunit *test) 392 { 393 const struct regmap_test_param *param = test->param_value; 394 struct regmap *map; 395 struct regmap_config config; 396 struct regmap_ram_data *data; 397 unsigned int val[BLOCK_TEST_SIZE], rval; 398 int i; 399 400 config = test_regmap_config; 401 402 map = gen_regmap(test, &config, &data); 403 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 404 if (IS_ERR(map)) 405 return; 406 407 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 408 409 get_random_bytes(&val, sizeof(val)); 410 411 /* Write some test values */ 412 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 413 414 regcache_cache_only(map, true); 415 416 /* 417 * While in cache-only regmap_read_bypassed() should return the register 418 * value and leave the map in cache-only. 419 */ 420 for (i = 0; i < ARRAY_SIZE(val); i++) { 421 /* Put inverted bits in rval to prove we really read the value */ 422 rval = ~val[i]; 423 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 424 KUNIT_EXPECT_EQ(test, val[i], rval); 425 426 rval = ~val[i]; 427 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 428 KUNIT_EXPECT_EQ(test, val[i], rval); 429 KUNIT_EXPECT_TRUE(test, map->cache_only); 430 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 431 } 432 433 /* 434 * Change the underlying register values to prove it is returning 435 * real values not cached values. 436 */ 437 for (i = 0; i < ARRAY_SIZE(val); i++) { 438 val[i] = ~val[i]; 439 data->vals[param->from_reg + i] = val[i]; 440 } 441 442 for (i = 0; i < ARRAY_SIZE(val); i++) { 443 rval = ~val[i]; 444 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 445 KUNIT_EXPECT_NE(test, val[i], rval); 446 447 rval = ~val[i]; 448 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 449 KUNIT_EXPECT_EQ(test, val[i], rval); 450 KUNIT_EXPECT_TRUE(test, map->cache_only); 451 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 452 } 453 } 454 455 static void read_bypassed_volatile(struct kunit *test) 456 { 457 const struct regmap_test_param *param = test->param_value; 458 struct regmap *map; 459 struct regmap_config config; 460 struct regmap_ram_data *data; 461 unsigned int val[BLOCK_TEST_SIZE], rval; 462 int i; 463 464 config = test_regmap_config; 465 /* All registers except #5 volatile */ 466 config.volatile_reg = reg_5_false; 467 468 map = gen_regmap(test, &config, &data); 469 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 470 if (IS_ERR(map)) 471 return; 472 473 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 474 475 get_random_bytes(&val, sizeof(val)); 476 477 /* Write some test values */ 478 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 479 480 regcache_cache_only(map, true); 481 482 /* 483 * While in cache-only regmap_read_bypassed() should return the register 484 * value and leave the map in cache-only. 485 */ 486 for (i = 0; i < ARRAY_SIZE(val); i++) { 487 /* Register #5 is non-volatile so should read from cache */ 488 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY, 489 regmap_read(map, param->from_reg + i, &rval)); 490 491 /* Put inverted bits in rval to prove we really read the value */ 492 rval = ~val[i]; 493 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 494 KUNIT_EXPECT_EQ(test, val[i], rval); 495 KUNIT_EXPECT_TRUE(test, map->cache_only); 496 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 497 } 498 499 /* 500 * Change the underlying register values to prove it is returning 501 * real values not cached values. 502 */ 503 for (i = 0; i < ARRAY_SIZE(val); i++) { 504 val[i] = ~val[i]; 505 data->vals[param->from_reg + i] = val[i]; 506 } 507 508 for (i = 0; i < ARRAY_SIZE(val); i++) { 509 if (i == 5) 510 continue; 511 512 rval = ~val[i]; 513 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 514 KUNIT_EXPECT_EQ(test, val[i], rval); 515 KUNIT_EXPECT_TRUE(test, map->cache_only); 516 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 517 } 518 } 519 520 static void write_readonly(struct kunit *test) 521 { 522 struct regmap *map; 523 struct regmap_config config; 524 struct regmap_ram_data *data; 525 unsigned int val; 526 int i; 527 528 config = test_regmap_config; 529 config.num_reg_defaults = BLOCK_TEST_SIZE; 530 config.writeable_reg = reg_5_false; 531 532 map = gen_regmap(test, &config, &data); 533 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 534 if (IS_ERR(map)) 535 return; 536 537 get_random_bytes(&val, sizeof(val)); 538 539 for (i = 0; i < BLOCK_TEST_SIZE; i++) 540 data->written[i] = false; 541 542 /* Change the value of all registers, readonly should fail */ 543 for (i = 0; i < BLOCK_TEST_SIZE; i++) 544 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0); 545 546 /* Did that match what we see on the device? */ 547 for (i = 0; i < BLOCK_TEST_SIZE; i++) 548 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); 549 } 550 551 static void read_writeonly(struct kunit *test) 552 { 553 struct regmap *map; 554 struct regmap_config config; 555 struct regmap_ram_data *data; 556 unsigned int val; 557 int i; 558 559 config = test_regmap_config; 560 config.readable_reg = reg_5_false; 561 562 map = gen_regmap(test, &config, &data); 563 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 564 if (IS_ERR(map)) 565 return; 566 567 for (i = 0; i < BLOCK_TEST_SIZE; i++) 568 data->read[i] = false; 569 570 /* 571 * Try to read all the registers, the writeonly one should 572 * fail if we aren't using the flat cache. 573 */ 574 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 575 if (config.cache_type != REGCACHE_FLAT) { 576 KUNIT_EXPECT_EQ(test, i != 5, 577 regmap_read(map, i, &val) == 0); 578 } else { 579 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); 580 } 581 } 582 583 /* Did we trigger a hardware access? */ 584 KUNIT_EXPECT_FALSE(test, data->read[5]); 585 } 586 587 static void reg_defaults(struct kunit *test) 588 { 589 struct regmap *map; 590 struct regmap_config config; 591 struct regmap_ram_data *data; 592 unsigned int rval[BLOCK_TEST_SIZE]; 593 int i; 594 595 config = test_regmap_config; 596 config.num_reg_defaults = BLOCK_TEST_SIZE; 597 598 map = gen_regmap(test, &config, &data); 599 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 600 if (IS_ERR(map)) 601 return; 602 603 /* Read back the expected default data */ 604 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 605 BLOCK_TEST_SIZE)); 606 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 607 608 /* The data should have been read from cache if there was one */ 609 for (i = 0; i < BLOCK_TEST_SIZE; i++) 610 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 611 } 612 613 static void reg_defaults_read_dev(struct kunit *test) 614 { 615 struct regmap *map; 616 struct regmap_config config; 617 struct regmap_ram_data *data; 618 unsigned int rval[BLOCK_TEST_SIZE]; 619 int i; 620 621 config = test_regmap_config; 622 config.num_reg_defaults_raw = BLOCK_TEST_SIZE; 623 624 map = gen_regmap(test, &config, &data); 625 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 626 if (IS_ERR(map)) 627 return; 628 629 /* We should have read the cache defaults back from the map */ 630 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 631 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]); 632 data->read[i] = false; 633 } 634 635 /* Read back the expected default data */ 636 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 637 BLOCK_TEST_SIZE)); 638 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 639 640 /* The data should have been read from cache if there was one */ 641 for (i = 0; i < BLOCK_TEST_SIZE; i++) 642 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 643 } 644 645 static void register_patch(struct kunit *test) 646 { 647 struct regmap *map; 648 struct regmap_config config; 649 struct regmap_ram_data *data; 650 struct reg_sequence patch[2]; 651 unsigned int rval[BLOCK_TEST_SIZE]; 652 int i; 653 654 /* We need defaults so readback works */ 655 config = test_regmap_config; 656 config.num_reg_defaults = BLOCK_TEST_SIZE; 657 658 map = gen_regmap(test, &config, &data); 659 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 660 if (IS_ERR(map)) 661 return; 662 663 /* Stash the original values */ 664 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 665 BLOCK_TEST_SIZE)); 666 667 /* Patch a couple of values */ 668 patch[0].reg = 2; 669 patch[0].def = rval[2] + 1; 670 patch[0].delay_us = 0; 671 patch[1].reg = 5; 672 patch[1].def = rval[5] + 1; 673 patch[1].delay_us = 0; 674 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, 675 ARRAY_SIZE(patch))); 676 677 /* Only the patched registers are written */ 678 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 679 switch (i) { 680 case 2: 681 case 5: 682 KUNIT_EXPECT_TRUE(test, data->written[i]); 683 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); 684 break; 685 default: 686 KUNIT_EXPECT_FALSE(test, data->written[i]); 687 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); 688 break; 689 } 690 } 691 } 692 693 static void stride(struct kunit *test) 694 { 695 struct regmap *map; 696 struct regmap_config config; 697 struct regmap_ram_data *data; 698 unsigned int rval; 699 int i; 700 701 config = test_regmap_config; 702 config.reg_stride = 2; 703 config.num_reg_defaults = BLOCK_TEST_SIZE / 2; 704 705 /* 706 * Allow one extra register so that the read/written arrays 707 * are sized big enough to include an entry for the odd 708 * address past the final reg_default register. 709 */ 710 config.max_register = BLOCK_TEST_SIZE; 711 712 map = gen_regmap(test, &config, &data); 713 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 714 if (IS_ERR(map)) 715 return; 716 717 /* Only even addresses can be accessed, try both read and write */ 718 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 719 data->read[i] = false; 720 data->written[i] = false; 721 722 if (i % 2) { 723 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval)); 724 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval)); 725 KUNIT_EXPECT_FALSE(test, data->read[i]); 726 KUNIT_EXPECT_FALSE(test, data->written[i]); 727 } else { 728 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 729 KUNIT_EXPECT_EQ(test, data->vals[i], rval); 730 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, 731 data->read[i]); 732 733 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval)); 734 KUNIT_EXPECT_TRUE(test, data->written[i]); 735 } 736 } 737 } 738 739 static struct regmap_range_cfg test_range = { 740 .selector_reg = 1, 741 .selector_mask = 0xff, 742 743 .window_start = 4, 744 .window_len = 10, 745 746 .range_min = 20, 747 .range_max = 40, 748 }; 749 750 static bool test_range_window_volatile(struct device *dev, unsigned int reg) 751 { 752 if (reg >= test_range.window_start && 753 reg <= test_range.window_start + test_range.window_len) 754 return true; 755 756 return false; 757 } 758 759 static bool test_range_all_volatile(struct device *dev, unsigned int reg) 760 { 761 if (test_range_window_volatile(dev, reg)) 762 return true; 763 764 if (reg >= test_range.range_min && reg <= test_range.range_max) 765 return true; 766 767 return false; 768 } 769 770 static void basic_ranges(struct kunit *test) 771 { 772 struct regmap *map; 773 struct regmap_config config; 774 struct regmap_ram_data *data; 775 unsigned int val; 776 int i; 777 778 config = test_regmap_config; 779 config.volatile_reg = test_range_all_volatile; 780 config.ranges = &test_range; 781 config.num_ranges = 1; 782 config.max_register = test_range.range_max; 783 784 map = gen_regmap(test, &config, &data); 785 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 786 if (IS_ERR(map)) 787 return; 788 789 for (i = test_range.range_min; i < test_range.range_max; i++) { 790 data->read[i] = false; 791 data->written[i] = false; 792 } 793 794 /* Reset the page to a non-zero value to trigger a change */ 795 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, 796 test_range.range_max)); 797 798 /* Check we set the page and use the window for writes */ 799 data->written[test_range.selector_reg] = false; 800 data->written[test_range.window_start] = false; 801 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 802 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 803 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 804 805 data->written[test_range.selector_reg] = false; 806 data->written[test_range.window_start] = false; 807 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 808 test_range.range_min + 809 test_range.window_len, 810 0)); 811 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 812 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 813 814 /* Same for reads */ 815 data->written[test_range.selector_reg] = false; 816 data->read[test_range.window_start] = false; 817 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); 818 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 819 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 820 821 data->written[test_range.selector_reg] = false; 822 data->read[test_range.window_start] = false; 823 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 824 test_range.range_min + 825 test_range.window_len, 826 &val)); 827 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 828 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 829 830 /* No physical access triggered in the virtual range */ 831 for (i = test_range.range_min; i < test_range.range_max; i++) { 832 KUNIT_EXPECT_FALSE(test, data->read[i]); 833 KUNIT_EXPECT_FALSE(test, data->written[i]); 834 } 835 } 836 837 /* Try to stress dynamic creation of cache data structures */ 838 static void stress_insert(struct kunit *test) 839 { 840 struct regmap *map; 841 struct regmap_config config; 842 struct regmap_ram_data *data; 843 unsigned int rval, *vals; 844 size_t buf_sz; 845 int i; 846 847 config = test_regmap_config; 848 config.max_register = 300; 849 850 map = gen_regmap(test, &config, &data); 851 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 852 if (IS_ERR(map)) 853 return; 854 855 buf_sz = array_size(sizeof(*vals), config.max_register); 856 vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL); 857 KUNIT_ASSERT_FALSE(test, vals == NULL); 858 859 get_random_bytes(vals, buf_sz); 860 861 /* Write data into the map/cache in ever decreasing strides */ 862 for (i = 0; i < config.max_register; i += 100) 863 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 864 for (i = 0; i < config.max_register; i += 50) 865 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 866 for (i = 0; i < config.max_register; i += 25) 867 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 868 for (i = 0; i < config.max_register; i += 10) 869 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 870 for (i = 0; i < config.max_register; i += 5) 871 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 872 for (i = 0; i < config.max_register; i += 3) 873 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 874 for (i = 0; i < config.max_register; i += 2) 875 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 876 for (i = 0; i < config.max_register; i++) 877 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 878 879 /* Do reads from the cache (if there is one) match? */ 880 for (i = 0; i < config.max_register; i ++) { 881 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 882 KUNIT_EXPECT_EQ(test, rval, vals[i]); 883 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 884 } 885 } 886 887 static void cache_bypass(struct kunit *test) 888 { 889 const struct regmap_test_param *param = test->param_value; 890 struct regmap *map; 891 struct regmap_config config; 892 struct regmap_ram_data *data; 893 unsigned int val, rval; 894 895 config = test_regmap_config; 896 897 map = gen_regmap(test, &config, &data); 898 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 899 if (IS_ERR(map)) 900 return; 901 902 get_random_bytes(&val, sizeof(val)); 903 904 /* Ensure the cache has a value in it */ 905 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val)); 906 907 /* Bypass then write a different value */ 908 regcache_cache_bypass(map, true); 909 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1)); 910 911 /* Read the bypassed value */ 912 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 913 KUNIT_EXPECT_EQ(test, val + 1, rval); 914 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval); 915 916 /* Disable bypass, the cache should still return the original value */ 917 regcache_cache_bypass(map, false); 918 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 919 KUNIT_EXPECT_EQ(test, val, rval); 920 } 921 922 static void cache_sync_marked_dirty(struct kunit *test) 923 { 924 const struct regmap_test_param *param = test->param_value; 925 struct regmap *map; 926 struct regmap_config config; 927 struct regmap_ram_data *data; 928 unsigned int val[BLOCK_TEST_SIZE]; 929 int i; 930 931 config = test_regmap_config; 932 933 map = gen_regmap(test, &config, &data); 934 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 935 if (IS_ERR(map)) 936 return; 937 938 get_random_bytes(&val, sizeof(val)); 939 940 /* Put some data into the cache */ 941 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 942 BLOCK_TEST_SIZE)); 943 for (i = 0; i < BLOCK_TEST_SIZE; i++) 944 data->written[param->from_reg + i] = false; 945 946 /* Trash the data on the device itself then resync */ 947 regcache_mark_dirty(map); 948 memset(data->vals, 0, sizeof(val)); 949 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 950 951 /* Did we just write the correct data out? */ 952 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 953 for (i = 0; i < BLOCK_TEST_SIZE; i++) 954 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 955 } 956 957 static void cache_sync_after_cache_only(struct kunit *test) 958 { 959 const struct regmap_test_param *param = test->param_value; 960 struct regmap *map; 961 struct regmap_config config; 962 struct regmap_ram_data *data; 963 unsigned int val[BLOCK_TEST_SIZE]; 964 unsigned int val_mask; 965 int i; 966 967 config = test_regmap_config; 968 969 map = gen_regmap(test, &config, &data); 970 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 971 if (IS_ERR(map)) 972 return; 973 974 val_mask = GENMASK(config.val_bits - 1, 0); 975 get_random_bytes(&val, sizeof(val)); 976 977 /* Put some data into the cache */ 978 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 979 BLOCK_TEST_SIZE)); 980 for (i = 0; i < BLOCK_TEST_SIZE; i++) 981 data->written[param->from_reg + i] = false; 982 983 /* Set cache-only and change the values */ 984 regcache_cache_only(map, true); 985 for (i = 0; i < ARRAY_SIZE(val); ++i) 986 val[i] = ~val[i] & val_mask; 987 988 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 989 BLOCK_TEST_SIZE)); 990 for (i = 0; i < BLOCK_TEST_SIZE; i++) 991 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 992 993 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 994 995 /* Exit cache-only and sync the cache without marking hardware registers dirty */ 996 regcache_cache_only(map, false); 997 998 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 999 1000 /* Did we just write the correct data out? */ 1001 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 1002 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1003 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]); 1004 } 1005 1006 static void cache_sync_defaults_marked_dirty(struct kunit *test) 1007 { 1008 const struct regmap_test_param *param = test->param_value; 1009 struct regmap *map; 1010 struct regmap_config config; 1011 struct regmap_ram_data *data; 1012 unsigned int val; 1013 int i; 1014 1015 config = test_regmap_config; 1016 config.num_reg_defaults = BLOCK_TEST_SIZE; 1017 1018 map = gen_regmap(test, &config, &data); 1019 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1020 if (IS_ERR(map)) 1021 return; 1022 1023 get_random_bytes(&val, sizeof(val)); 1024 1025 /* Change the value of one register */ 1026 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val)); 1027 1028 /* Resync */ 1029 regcache_mark_dirty(map); 1030 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1031 data->written[param->from_reg + i] = false; 1032 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1033 1034 /* Did we just sync the one register we touched? */ 1035 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1036 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]); 1037 1038 /* Rewrite registers back to their defaults */ 1039 for (i = 0; i < config.num_reg_defaults; ++i) 1040 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg, 1041 config.reg_defaults[i].def)); 1042 1043 /* 1044 * Resync after regcache_mark_dirty() should not write out registers 1045 * that are at default value 1046 */ 1047 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1048 data->written[param->from_reg + i] = false; 1049 regcache_mark_dirty(map); 1050 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1051 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1052 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 1053 } 1054 1055 static void cache_sync_default_after_cache_only(struct kunit *test) 1056 { 1057 const struct regmap_test_param *param = test->param_value; 1058 struct regmap *map; 1059 struct regmap_config config; 1060 struct regmap_ram_data *data; 1061 unsigned int orig_val; 1062 int i; 1063 1064 config = test_regmap_config; 1065 config.num_reg_defaults = BLOCK_TEST_SIZE; 1066 1067 map = gen_regmap(test, &config, &data); 1068 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1069 if (IS_ERR(map)) 1070 return; 1071 1072 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val)); 1073 1074 /* Enter cache-only and change the value of one register */ 1075 regcache_cache_only(map, true); 1076 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1)); 1077 1078 /* Exit cache-only and resync, should write out the changed register */ 1079 regcache_cache_only(map, false); 1080 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1081 data->written[param->from_reg + i] = false; 1082 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1083 1084 /* Was the register written out? */ 1085 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 1086 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1); 1087 1088 /* Enter cache-only and write register back to its default value */ 1089 regcache_cache_only(map, true); 1090 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val)); 1091 1092 /* Resync should write out the new value */ 1093 regcache_cache_only(map, false); 1094 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1095 data->written[param->from_reg + i] = false; 1096 1097 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1098 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 1099 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val); 1100 } 1101 1102 static void cache_sync_readonly(struct kunit *test) 1103 { 1104 const struct regmap_test_param *param = test->param_value; 1105 struct regmap *map; 1106 struct regmap_config config; 1107 struct regmap_ram_data *data; 1108 unsigned int val; 1109 int i; 1110 1111 config = test_regmap_config; 1112 config.writeable_reg = reg_5_false; 1113 1114 map = gen_regmap(test, &config, &data); 1115 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1116 if (IS_ERR(map)) 1117 return; 1118 1119 /* Read all registers to fill the cache */ 1120 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1121 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1122 1123 /* Change the value of all registers, readonly should fail */ 1124 get_random_bytes(&val, sizeof(val)); 1125 regcache_cache_only(map, true); 1126 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1127 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0); 1128 regcache_cache_only(map, false); 1129 1130 /* Resync */ 1131 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1132 data->written[param->from_reg + i] = false; 1133 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1134 1135 /* Did that match what we see on the device? */ 1136 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1137 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]); 1138 } 1139 1140 static void cache_sync_patch(struct kunit *test) 1141 { 1142 const struct regmap_test_param *param = test->param_value; 1143 struct regmap *map; 1144 struct regmap_config config; 1145 struct regmap_ram_data *data; 1146 struct reg_sequence patch[2]; 1147 unsigned int rval[BLOCK_TEST_SIZE], val; 1148 int i; 1149 1150 /* We need defaults so readback works */ 1151 config = test_regmap_config; 1152 config.num_reg_defaults = BLOCK_TEST_SIZE; 1153 1154 map = gen_regmap(test, &config, &data); 1155 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1156 if (IS_ERR(map)) 1157 return; 1158 1159 /* Stash the original values */ 1160 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1161 BLOCK_TEST_SIZE)); 1162 1163 /* Patch a couple of values */ 1164 patch[0].reg = param->from_reg + 2; 1165 patch[0].def = rval[2] + 1; 1166 patch[0].delay_us = 0; 1167 patch[1].reg = param->from_reg + 5; 1168 patch[1].def = rval[5] + 1; 1169 patch[1].delay_us = 0; 1170 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, 1171 ARRAY_SIZE(patch))); 1172 1173 /* Sync the cache */ 1174 regcache_mark_dirty(map); 1175 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1176 data->written[param->from_reg + i] = false; 1177 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1178 1179 /* The patch should be on the device but not in the cache */ 1180 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1181 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1182 KUNIT_EXPECT_EQ(test, val, rval[i]); 1183 1184 switch (i) { 1185 case 2: 1186 case 5: 1187 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 1188 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1); 1189 break; 1190 default: 1191 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]); 1192 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]); 1193 break; 1194 } 1195 } 1196 } 1197 1198 static void cache_drop(struct kunit *test) 1199 { 1200 const struct regmap_test_param *param = test->param_value; 1201 struct regmap *map; 1202 struct regmap_config config; 1203 struct regmap_ram_data *data; 1204 unsigned int rval[BLOCK_TEST_SIZE]; 1205 int i; 1206 1207 config = test_regmap_config; 1208 config.num_reg_defaults = BLOCK_TEST_SIZE; 1209 1210 map = gen_regmap(test, &config, &data); 1211 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1212 if (IS_ERR(map)) 1213 return; 1214 1215 /* Ensure the data is read from the cache */ 1216 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1217 data->read[param->from_reg + i] = false; 1218 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1219 BLOCK_TEST_SIZE)); 1220 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1221 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]); 1222 data->read[param->from_reg + i] = false; 1223 } 1224 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1225 1226 /* Drop some registers */ 1227 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3, 1228 param->from_reg + 5)); 1229 1230 /* Reread and check only the dropped registers hit the device. */ 1231 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1232 BLOCK_TEST_SIZE)); 1233 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1234 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5); 1235 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1236 } 1237 1238 static void cache_drop_with_non_contiguous_ranges(struct kunit *test) 1239 { 1240 const struct regmap_test_param *param = test->param_value; 1241 struct regmap *map; 1242 struct regmap_config config; 1243 struct regmap_ram_data *data; 1244 unsigned int val[4][BLOCK_TEST_SIZE]; 1245 unsigned int reg; 1246 const int num_ranges = ARRAY_SIZE(val) * 2; 1247 int rangeidx, i; 1248 1249 static_assert(ARRAY_SIZE(val) == 4); 1250 1251 config = test_regmap_config; 1252 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE); 1253 1254 map = gen_regmap(test, &config, &data); 1255 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1256 if (IS_ERR(map)) 1257 return; 1258 1259 for (i = 0; i < config.max_register + 1; i++) 1260 data->written[i] = false; 1261 1262 /* Create non-contiguous cache blocks by writing every other range */ 1263 get_random_bytes(&val, sizeof(val)); 1264 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 1265 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1266 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg, 1267 &val[rangeidx / 2], 1268 BLOCK_TEST_SIZE)); 1269 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 1270 &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 1271 } 1272 1273 /* Check that odd ranges weren't written */ 1274 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 1275 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1276 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1277 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1278 } 1279 1280 /* Drop range 2 */ 1281 reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 1282 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1)); 1283 1284 /* Drop part of range 4 */ 1285 reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 1286 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5)); 1287 1288 /* Mark dirty and reset mock registers to 0 */ 1289 regcache_mark_dirty(map); 1290 for (i = 0; i < config.max_register + 1; i++) { 1291 data->vals[i] = 0; 1292 data->written[i] = false; 1293 } 1294 1295 /* The registers that were dropped from range 4 should now remain at 0 */ 1296 val[4 / 2][3] = 0; 1297 val[4 / 2][4] = 0; 1298 val[4 / 2][5] = 0; 1299 1300 /* Sync and check that the expected register ranges were written */ 1301 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1302 1303 /* Check that odd ranges weren't written */ 1304 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 1305 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1306 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1307 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1308 } 1309 1310 /* Check that even ranges (except 2 and 4) were written */ 1311 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 1312 if ((rangeidx == 2) || (rangeidx == 4)) 1313 continue; 1314 1315 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1316 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1317 KUNIT_EXPECT_TRUE(test, data->written[reg + i]); 1318 1319 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 1320 &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 1321 } 1322 1323 /* Check that range 2 wasn't written */ 1324 reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 1325 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1326 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1327 1328 /* Check that range 4 was partially written */ 1329 reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 1330 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1331 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5); 1332 1333 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2])); 1334 1335 /* Nothing before param->from_reg should have been written */ 1336 for (i = 0; i < param->from_reg; i++) 1337 KUNIT_EXPECT_FALSE(test, data->written[i]); 1338 } 1339 1340 static void cache_drop_all_and_sync_marked_dirty(struct kunit *test) 1341 { 1342 const struct regmap_test_param *param = test->param_value; 1343 struct regmap *map; 1344 struct regmap_config config; 1345 struct regmap_ram_data *data; 1346 unsigned int rval[BLOCK_TEST_SIZE]; 1347 int i; 1348 1349 config = test_regmap_config; 1350 config.num_reg_defaults = BLOCK_TEST_SIZE; 1351 1352 map = gen_regmap(test, &config, &data); 1353 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1354 if (IS_ERR(map)) 1355 return; 1356 1357 /* Ensure the data is read from the cache */ 1358 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1359 data->read[param->from_reg + i] = false; 1360 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1361 BLOCK_TEST_SIZE)); 1362 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1363 1364 /* Change all values in cache from defaults */ 1365 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1366 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1367 1368 /* Drop all registers */ 1369 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1370 1371 /* Mark dirty and cache sync should not write anything. */ 1372 regcache_mark_dirty(map); 1373 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1374 data->written[param->from_reg + i] = false; 1375 1376 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1377 for (i = 0; i <= config.max_register; i++) 1378 KUNIT_EXPECT_FALSE(test, data->written[i]); 1379 } 1380 1381 static void cache_drop_all_and_sync_no_defaults(struct kunit *test) 1382 { 1383 const struct regmap_test_param *param = test->param_value; 1384 struct regmap *map; 1385 struct regmap_config config; 1386 struct regmap_ram_data *data; 1387 unsigned int rval[BLOCK_TEST_SIZE]; 1388 int i; 1389 1390 config = test_regmap_config; 1391 1392 map = gen_regmap(test, &config, &data); 1393 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1394 if (IS_ERR(map)) 1395 return; 1396 1397 /* Ensure the data is read from the cache */ 1398 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1399 data->read[param->from_reg + i] = false; 1400 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1401 BLOCK_TEST_SIZE)); 1402 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1403 1404 /* Change all values in cache */ 1405 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1406 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1407 1408 /* Drop all registers */ 1409 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1410 1411 /* 1412 * Sync cache without marking it dirty. All registers were dropped 1413 * so the cache should not have any entries to write out. 1414 */ 1415 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1416 data->written[param->from_reg + i] = false; 1417 1418 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1419 for (i = 0; i <= config.max_register; i++) 1420 KUNIT_EXPECT_FALSE(test, data->written[i]); 1421 } 1422 1423 static void cache_drop_all_and_sync_has_defaults(struct kunit *test) 1424 { 1425 const struct regmap_test_param *param = test->param_value; 1426 struct regmap *map; 1427 struct regmap_config config; 1428 struct regmap_ram_data *data; 1429 unsigned int rval[BLOCK_TEST_SIZE]; 1430 int i; 1431 1432 config = test_regmap_config; 1433 config.num_reg_defaults = BLOCK_TEST_SIZE; 1434 1435 map = gen_regmap(test, &config, &data); 1436 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1437 if (IS_ERR(map)) 1438 return; 1439 1440 /* Ensure the data is read from the cache */ 1441 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1442 data->read[param->from_reg + i] = false; 1443 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1444 BLOCK_TEST_SIZE)); 1445 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1446 1447 /* Change all values in cache from defaults */ 1448 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1449 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1450 1451 /* Drop all registers */ 1452 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1453 1454 /* 1455 * Sync cache without marking it dirty. All registers were dropped 1456 * so the cache should not have any entries to write out. 1457 */ 1458 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1459 data->written[param->from_reg + i] = false; 1460 1461 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1462 for (i = 0; i <= config.max_register; i++) 1463 KUNIT_EXPECT_FALSE(test, data->written[i]); 1464 } 1465 1466 static void cache_present(struct kunit *test) 1467 { 1468 const struct regmap_test_param *param = test->param_value; 1469 struct regmap *map; 1470 struct regmap_config config; 1471 struct regmap_ram_data *data; 1472 unsigned int val; 1473 int i; 1474 1475 config = test_regmap_config; 1476 1477 map = gen_regmap(test, &config, &data); 1478 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1479 if (IS_ERR(map)) 1480 return; 1481 1482 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1483 data->read[param->from_reg + i] = false; 1484 1485 /* No defaults so no registers cached. */ 1486 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1487 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i)); 1488 1489 /* We didn't trigger any reads */ 1490 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1491 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]); 1492 1493 /* Fill the cache */ 1494 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1495 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1496 1497 /* Now everything should be cached */ 1498 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1499 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i)); 1500 } 1501 1502 /* Check that caching the window register works with sync */ 1503 static void cache_range_window_reg(struct kunit *test) 1504 { 1505 struct regmap *map; 1506 struct regmap_config config; 1507 struct regmap_ram_data *data; 1508 unsigned int val; 1509 int i; 1510 1511 config = test_regmap_config; 1512 config.volatile_reg = test_range_window_volatile; 1513 config.ranges = &test_range; 1514 config.num_ranges = 1; 1515 config.max_register = test_range.range_max; 1516 1517 map = gen_regmap(test, &config, &data); 1518 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1519 if (IS_ERR(map)) 1520 return; 1521 1522 /* Write new values to the entire range */ 1523 for (i = test_range.range_min; i <= test_range.range_max; i++) 1524 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0)); 1525 1526 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1527 KUNIT_ASSERT_EQ(test, val, 2); 1528 1529 /* Write to the first register in the range to reset the page */ 1530 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1531 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1532 KUNIT_ASSERT_EQ(test, val, 0); 1533 1534 /* Trigger a cache sync */ 1535 regcache_mark_dirty(map); 1536 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); 1537 1538 /* Write to the first register again, the page should be reset */ 1539 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1540 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1541 KUNIT_ASSERT_EQ(test, val, 0); 1542 1543 /* Trigger another cache sync */ 1544 regcache_mark_dirty(map); 1545 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); 1546 1547 /* Write to the last register again, the page should be reset */ 1548 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0)); 1549 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1550 KUNIT_ASSERT_EQ(test, val, 2); 1551 } 1552 1553 static const struct regmap_test_param raw_types_list[] = { 1554 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1555 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG }, 1556 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 1557 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 1558 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1559 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 1560 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1561 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1562 }; 1563 1564 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc); 1565 1566 static const struct regmap_test_param raw_cache_types_list[] = { 1567 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 1568 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 1569 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1570 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 1571 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1572 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1573 }; 1574 1575 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc); 1576 1577 static const struct regmap_config raw_regmap_config = { 1578 .max_register = BLOCK_TEST_SIZE, 1579 1580 .reg_format_endian = REGMAP_ENDIAN_LITTLE, 1581 .reg_bits = 16, 1582 .val_bits = 16, 1583 }; 1584 1585 static struct regmap *gen_raw_regmap(struct kunit *test, 1586 struct regmap_config *config, 1587 struct regmap_ram_data **data) 1588 { 1589 struct regmap_test_priv *priv = test->priv; 1590 const struct regmap_test_param *param = test->param_value; 1591 u16 *buf; 1592 struct regmap *ret = ERR_PTR(-ENOMEM); 1593 int i, error; 1594 struct reg_default *defaults; 1595 size_t size; 1596 1597 config->cache_type = param->cache; 1598 config->val_format_endian = param->val_endian; 1599 config->disable_locking = config->cache_type == REGCACHE_RBTREE || 1600 config->cache_type == REGCACHE_MAPLE; 1601 1602 size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits)); 1603 buf = kmalloc(size, GFP_KERNEL); 1604 if (!buf) 1605 return ERR_PTR(-ENOMEM); 1606 1607 get_random_bytes(buf, size); 1608 1609 *data = kzalloc(sizeof(**data), GFP_KERNEL); 1610 if (!(*data)) 1611 goto out_free; 1612 (*data)->vals = (void *)buf; 1613 1614 config->num_reg_defaults = config->max_register + 1; 1615 defaults = kunit_kcalloc(test, 1616 config->num_reg_defaults, 1617 sizeof(struct reg_default), 1618 GFP_KERNEL); 1619 if (!defaults) 1620 goto out_free; 1621 config->reg_defaults = defaults; 1622 1623 for (i = 0; i < config->num_reg_defaults; i++) { 1624 defaults[i].reg = i; 1625 switch (param->val_endian) { 1626 case REGMAP_ENDIAN_LITTLE: 1627 defaults[i].def = le16_to_cpu(buf[i]); 1628 break; 1629 case REGMAP_ENDIAN_BIG: 1630 defaults[i].def = be16_to_cpu(buf[i]); 1631 break; 1632 default: 1633 ret = ERR_PTR(-EINVAL); 1634 goto out_free; 1635 } 1636 } 1637 1638 /* 1639 * We use the defaults in the tests but they don't make sense 1640 * to the core if there's no cache. 1641 */ 1642 if (config->cache_type == REGCACHE_NONE) 1643 config->num_reg_defaults = 0; 1644 1645 ret = regmap_init_raw_ram(priv->dev, config, *data); 1646 if (IS_ERR(ret)) 1647 goto out_free; 1648 1649 /* This calls regmap_exit() on failure, which frees buf and *data */ 1650 error = kunit_add_action_or_reset(test, regmap_exit_action, ret); 1651 if (error) 1652 ret = ERR_PTR(error); 1653 1654 return ret; 1655 1656 out_free: 1657 kfree(buf); 1658 kfree(*data); 1659 1660 return ret; 1661 } 1662 1663 static void raw_read_defaults_single(struct kunit *test) 1664 { 1665 struct regmap *map; 1666 struct regmap_config config; 1667 struct regmap_ram_data *data; 1668 unsigned int rval; 1669 int i; 1670 1671 config = raw_regmap_config; 1672 1673 map = gen_raw_regmap(test, &config, &data); 1674 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1675 if (IS_ERR(map)) 1676 return; 1677 1678 /* Check that we can read the defaults via the API */ 1679 for (i = 0; i < config.max_register + 1; i++) { 1680 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1681 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1682 } 1683 } 1684 1685 static void raw_read_defaults(struct kunit *test) 1686 { 1687 struct regmap *map; 1688 struct regmap_config config; 1689 struct regmap_ram_data *data; 1690 u16 *rval; 1691 u16 def; 1692 size_t val_len; 1693 int i; 1694 1695 config = raw_regmap_config; 1696 1697 map = gen_raw_regmap(test, &config, &data); 1698 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1699 if (IS_ERR(map)) 1700 return; 1701 1702 val_len = array_size(sizeof(*rval), config.max_register + 1); 1703 rval = kunit_kmalloc(test, val_len, GFP_KERNEL); 1704 KUNIT_ASSERT_TRUE(test, rval != NULL); 1705 if (!rval) 1706 return; 1707 1708 /* Check that we can read the defaults via the API */ 1709 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len)); 1710 for (i = 0; i < config.max_register + 1; i++) { 1711 def = config.reg_defaults[i].def; 1712 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1713 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i])); 1714 } else { 1715 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i])); 1716 } 1717 } 1718 } 1719 1720 static void raw_write_read_single(struct kunit *test) 1721 { 1722 struct regmap *map; 1723 struct regmap_config config; 1724 struct regmap_ram_data *data; 1725 u16 val; 1726 unsigned int rval; 1727 1728 config = raw_regmap_config; 1729 1730 map = gen_raw_regmap(test, &config, &data); 1731 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1732 if (IS_ERR(map)) 1733 return; 1734 1735 get_random_bytes(&val, sizeof(val)); 1736 1737 /* If we write a value to a register we can read it back */ 1738 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 1739 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 1740 KUNIT_EXPECT_EQ(test, val, rval); 1741 } 1742 1743 static void raw_write(struct kunit *test) 1744 { 1745 struct regmap *map; 1746 struct regmap_config config; 1747 struct regmap_ram_data *data; 1748 u16 *hw_buf; 1749 u16 val[2]; 1750 unsigned int rval; 1751 int i; 1752 1753 config = raw_regmap_config; 1754 1755 map = gen_raw_regmap(test, &config, &data); 1756 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1757 if (IS_ERR(map)) 1758 return; 1759 1760 hw_buf = (u16 *)data->vals; 1761 1762 get_random_bytes(&val, sizeof(val)); 1763 1764 /* Do a raw write */ 1765 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); 1766 1767 /* We should read back the new values, and defaults for the rest */ 1768 for (i = 0; i < config.max_register + 1; i++) { 1769 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1770 1771 switch (i) { 1772 case 2: 1773 case 3: 1774 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1775 KUNIT_EXPECT_EQ(test, rval, 1776 be16_to_cpu((__force __be16)val[i % 2])); 1777 } else { 1778 KUNIT_EXPECT_EQ(test, rval, 1779 le16_to_cpu((__force __le16)val[i % 2])); 1780 } 1781 break; 1782 default: 1783 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1784 break; 1785 } 1786 } 1787 1788 /* The values should appear in the "hardware" */ 1789 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); 1790 } 1791 1792 static bool reg_zero(struct device *dev, unsigned int reg) 1793 { 1794 return reg == 0; 1795 } 1796 1797 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg) 1798 { 1799 return reg == 0; 1800 } 1801 1802 static void raw_noinc_write(struct kunit *test) 1803 { 1804 struct regmap *map; 1805 struct regmap_config config; 1806 struct regmap_ram_data *data; 1807 unsigned int val; 1808 u16 val_test, val_last; 1809 u16 val_array[BLOCK_TEST_SIZE]; 1810 1811 config = raw_regmap_config; 1812 config.volatile_reg = reg_zero; 1813 config.writeable_noinc_reg = reg_zero; 1814 config.readable_noinc_reg = reg_zero; 1815 1816 map = gen_raw_regmap(test, &config, &data); 1817 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1818 if (IS_ERR(map)) 1819 return; 1820 1821 data->noinc_reg = ram_reg_zero; 1822 1823 get_random_bytes(&val_array, sizeof(val_array)); 1824 1825 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1826 val_test = be16_to_cpu(val_array[1]) + 100; 1827 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); 1828 } else { 1829 val_test = le16_to_cpu(val_array[1]) + 100; 1830 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); 1831 } 1832 1833 /* Put some data into the register following the noinc register */ 1834 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test)); 1835 1836 /* Write some data to the noinc register */ 1837 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array, 1838 sizeof(val_array))); 1839 1840 /* We should read back the last value written */ 1841 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val)); 1842 KUNIT_ASSERT_EQ(test, val_last, val); 1843 1844 /* Make sure we didn't touch the register after the noinc register */ 1845 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val)); 1846 KUNIT_ASSERT_EQ(test, val_test, val); 1847 } 1848 1849 static void raw_sync(struct kunit *test) 1850 { 1851 struct regmap *map; 1852 struct regmap_config config; 1853 struct regmap_ram_data *data; 1854 u16 val[3]; 1855 u16 *hw_buf; 1856 unsigned int rval; 1857 int i; 1858 1859 config = raw_regmap_config; 1860 1861 map = gen_raw_regmap(test, &config, &data); 1862 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1863 if (IS_ERR(map)) 1864 return; 1865 1866 hw_buf = (u16 *)data->vals; 1867 1868 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val)); 1869 1870 /* Do a regular write and a raw write in cache only mode */ 1871 regcache_cache_only(map, true); 1872 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, 1873 sizeof(u16) * 2)); 1874 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2])); 1875 1876 /* We should read back the new values, and defaults for the rest */ 1877 for (i = 0; i < config.max_register + 1; i++) { 1878 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1879 1880 switch (i) { 1881 case 2: 1882 case 3: 1883 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1884 KUNIT_EXPECT_EQ(test, rval, 1885 be16_to_cpu((__force __be16)val[i - 2])); 1886 } else { 1887 KUNIT_EXPECT_EQ(test, rval, 1888 le16_to_cpu((__force __le16)val[i - 2])); 1889 } 1890 break; 1891 case 4: 1892 KUNIT_EXPECT_EQ(test, rval, val[i - 2]); 1893 break; 1894 default: 1895 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1896 break; 1897 } 1898 } 1899 1900 /* 1901 * The value written via _write() was translated by the core, 1902 * translate the original copy for comparison purposes. 1903 */ 1904 if (config.val_format_endian == REGMAP_ENDIAN_BIG) 1905 val[2] = cpu_to_be16(val[2]); 1906 else 1907 val[2] = cpu_to_le16(val[2]); 1908 1909 /* The values should not appear in the "hardware" */ 1910 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1911 1912 for (i = 0; i < config.max_register + 1; i++) 1913 data->written[i] = false; 1914 1915 /* Do the sync */ 1916 regcache_cache_only(map, false); 1917 regcache_mark_dirty(map); 1918 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1919 1920 /* The values should now appear in the "hardware" */ 1921 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1922 } 1923 1924 static void raw_ranges(struct kunit *test) 1925 { 1926 struct regmap *map; 1927 struct regmap_config config; 1928 struct regmap_ram_data *data; 1929 unsigned int val; 1930 int i; 1931 1932 config = raw_regmap_config; 1933 config.volatile_reg = test_range_all_volatile; 1934 config.ranges = &test_range; 1935 config.num_ranges = 1; 1936 config.max_register = test_range.range_max; 1937 1938 map = gen_raw_regmap(test, &config, &data); 1939 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1940 if (IS_ERR(map)) 1941 return; 1942 1943 /* Reset the page to a non-zero value to trigger a change */ 1944 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, 1945 test_range.range_max)); 1946 1947 /* Check we set the page and use the window for writes */ 1948 data->written[test_range.selector_reg] = false; 1949 data->written[test_range.window_start] = false; 1950 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1951 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1952 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 1953 1954 data->written[test_range.selector_reg] = false; 1955 data->written[test_range.window_start] = false; 1956 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1957 test_range.range_min + 1958 test_range.window_len, 1959 0)); 1960 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1961 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 1962 1963 /* Same for reads */ 1964 data->written[test_range.selector_reg] = false; 1965 data->read[test_range.window_start] = false; 1966 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); 1967 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1968 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 1969 1970 data->written[test_range.selector_reg] = false; 1971 data->read[test_range.window_start] = false; 1972 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1973 test_range.range_min + 1974 test_range.window_len, 1975 &val)); 1976 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1977 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 1978 1979 /* No physical access triggered in the virtual range */ 1980 for (i = test_range.range_min; i < test_range.range_max; i++) { 1981 KUNIT_EXPECT_FALSE(test, data->read[i]); 1982 KUNIT_EXPECT_FALSE(test, data->written[i]); 1983 } 1984 } 1985 1986 static struct kunit_case regmap_test_cases[] = { 1987 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), 1988 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params), 1989 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params), 1990 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), 1991 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), 1992 KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params), 1993 KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params), 1994 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params), 1995 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params), 1996 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params), 1997 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params), 1998 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params), 1999 KUNIT_CASE_PARAM(stride, regcache_types_gen_params), 2000 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params), 2001 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params), 2002 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), 2003 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params), 2004 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params), 2005 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params), 2006 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params), 2007 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params), 2008 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), 2009 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), 2010 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params), 2011 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params), 2012 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params), 2013 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params), 2014 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), 2015 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params), 2016 2017 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), 2018 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), 2019 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params), 2020 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params), 2021 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params), 2022 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params), 2023 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params), 2024 {} 2025 }; 2026 2027 static int regmap_test_init(struct kunit *test) 2028 { 2029 struct regmap_test_priv *priv; 2030 struct device *dev; 2031 2032 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); 2033 if (!priv) 2034 return -ENOMEM; 2035 2036 test->priv = priv; 2037 2038 dev = kunit_device_register(test, "regmap_test"); 2039 if (IS_ERR(dev)) 2040 return PTR_ERR(dev); 2041 2042 priv->dev = get_device(dev); 2043 dev_set_drvdata(dev, test); 2044 2045 return 0; 2046 } 2047 2048 static void regmap_test_exit(struct kunit *test) 2049 { 2050 struct regmap_test_priv *priv = test->priv; 2051 2052 /* Destroy the dummy struct device */ 2053 if (priv && priv->dev) 2054 put_device(priv->dev); 2055 } 2056 2057 static struct kunit_suite regmap_test_suite = { 2058 .name = "regmap", 2059 .init = regmap_test_init, 2060 .exit = regmap_test_exit, 2061 .test_cases = regmap_test_cases, 2062 }; 2063 kunit_test_suite(regmap_test_suite); 2064 2065 MODULE_DESCRIPTION("Regmap KUnit tests"); 2066 MODULE_LICENSE("GPL v2"); 2067