1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // regmap KUnit tests 4 // 5 // Copyright 2023 Arm Ltd 6 7 #include <kunit/device.h> 8 #include <kunit/resource.h> 9 #include <kunit/test.h> 10 #include "internal.h" 11 12 #define BLOCK_TEST_SIZE 12 13 14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *); 15 16 struct regmap_test_priv { 17 struct device *dev; 18 }; 19 20 struct regmap_test_param { 21 enum regcache_type cache; 22 enum regmap_endian val_endian; 23 24 unsigned int from_reg; 25 }; 26 27 static void get_changed_bytes(void *orig, void *new, size_t size) 28 { 29 char *o = orig; 30 char *n = new; 31 int i; 32 33 get_random_bytes(new, size); 34 35 /* 36 * This could be nicer and more efficient but we shouldn't 37 * super care. 38 */ 39 for (i = 0; i < size; i++) 40 while (n[i] == o[i]) 41 get_random_bytes(&n[i], 1); 42 } 43 44 static const struct regmap_config test_regmap_config = { 45 .reg_stride = 1, 46 .val_bits = sizeof(unsigned int) * 8, 47 }; 48 49 static const char *regcache_type_name(enum regcache_type type) 50 { 51 switch (type) { 52 case REGCACHE_NONE: 53 return "none"; 54 case REGCACHE_FLAT: 55 return "flat"; 56 case REGCACHE_RBTREE: 57 return "rbtree"; 58 case REGCACHE_MAPLE: 59 return "maple"; 60 default: 61 return NULL; 62 } 63 } 64 65 static const char *regmap_endian_name(enum regmap_endian endian) 66 { 67 switch (endian) { 68 case REGMAP_ENDIAN_BIG: 69 return "big"; 70 case REGMAP_ENDIAN_LITTLE: 71 return "little"; 72 case REGMAP_ENDIAN_DEFAULT: 73 return "default"; 74 case REGMAP_ENDIAN_NATIVE: 75 return "native"; 76 default: 77 return NULL; 78 } 79 } 80 81 static void param_to_desc(const struct regmap_test_param *param, char *desc) 82 { 83 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x", 84 regcache_type_name(param->cache), 85 regmap_endian_name(param->val_endian), 86 param->from_reg); 87 } 88 89 static const struct regmap_test_param regcache_types_list[] = { 90 { .cache = REGCACHE_NONE }, 91 { .cache = REGCACHE_FLAT }, 92 { .cache = REGCACHE_RBTREE }, 93 { .cache = REGCACHE_MAPLE }, 94 }; 95 96 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc); 97 98 static const struct regmap_test_param real_cache_types_only_list[] = { 99 { .cache = REGCACHE_FLAT }, 100 { .cache = REGCACHE_RBTREE }, 101 { .cache = REGCACHE_MAPLE }, 102 }; 103 104 KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc); 105 106 static const struct regmap_test_param real_cache_types_list[] = { 107 { .cache = REGCACHE_FLAT, .from_reg = 0 }, 108 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 }, 109 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 }, 110 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 }, 111 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 }, 112 { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 113 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 114 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 115 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 116 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 117 { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 118 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 119 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 120 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 121 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 122 }; 123 124 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc); 125 126 static const struct regmap_test_param sparse_cache_types_list[] = { 127 { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 128 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 129 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 130 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 131 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 132 { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 134 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 135 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 136 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 137 }; 138 139 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc); 140 141 static struct regmap *gen_regmap(struct kunit *test, 142 struct regmap_config *config, 143 struct regmap_ram_data **data) 144 { 145 const struct regmap_test_param *param = test->param_value; 146 struct regmap_test_priv *priv = test->priv; 147 unsigned int *buf; 148 struct regmap *ret; 149 size_t size; 150 int i; 151 struct reg_default *defaults; 152 153 config->cache_type = param->cache; 154 config->disable_locking = config->cache_type == REGCACHE_RBTREE || 155 config->cache_type == REGCACHE_MAPLE; 156 157 if (config->max_register == 0) { 158 config->max_register = param->from_reg; 159 if (config->num_reg_defaults) 160 config->max_register += (config->num_reg_defaults - 1) * 161 config->reg_stride; 162 else 163 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride); 164 } 165 166 size = (config->max_register + 1) * sizeof(unsigned int); 167 buf = kmalloc(size, GFP_KERNEL); 168 if (!buf) 169 return ERR_PTR(-ENOMEM); 170 171 get_random_bytes(buf, size); 172 173 *data = kzalloc(sizeof(**data), GFP_KERNEL); 174 if (!(*data)) 175 return ERR_PTR(-ENOMEM); 176 (*data)->vals = buf; 177 178 if (config->num_reg_defaults) { 179 defaults = kcalloc(config->num_reg_defaults, 180 sizeof(struct reg_default), 181 GFP_KERNEL); 182 if (!defaults) 183 return ERR_PTR(-ENOMEM); 184 config->reg_defaults = defaults; 185 186 for (i = 0; i < config->num_reg_defaults; i++) { 187 defaults[i].reg = param->from_reg + (i * config->reg_stride); 188 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)]; 189 } 190 } 191 192 ret = regmap_init_ram(priv->dev, config, *data); 193 if (IS_ERR(ret)) { 194 kfree(buf); 195 kfree(*data); 196 } else { 197 kunit_add_action(test, regmap_exit_action, ret); 198 } 199 200 return ret; 201 } 202 203 static bool reg_5_false(struct device *dev, unsigned int reg) 204 { 205 struct kunit *test = dev_get_drvdata(dev); 206 const struct regmap_test_param *param = test->param_value; 207 208 return reg != (param->from_reg + 5); 209 } 210 211 static void basic_read_write(struct kunit *test) 212 { 213 struct regmap *map; 214 struct regmap_config config; 215 struct regmap_ram_data *data; 216 unsigned int val, rval; 217 218 config = test_regmap_config; 219 220 map = gen_regmap(test, &config, &data); 221 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 222 if (IS_ERR(map)) 223 return; 224 225 get_random_bytes(&val, sizeof(val)); 226 227 /* If we write a value to a register we can read it back */ 228 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 229 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 230 KUNIT_EXPECT_EQ(test, val, rval); 231 232 /* If using a cache the cache satisfied the read */ 233 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]); 234 } 235 236 static void bulk_write(struct kunit *test) 237 { 238 struct regmap *map; 239 struct regmap_config config; 240 struct regmap_ram_data *data; 241 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 242 int i; 243 244 config = test_regmap_config; 245 246 map = gen_regmap(test, &config, &data); 247 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 248 if (IS_ERR(map)) 249 return; 250 251 get_random_bytes(&val, sizeof(val)); 252 253 /* 254 * Data written via the bulk API can be read back with single 255 * reads. 256 */ 257 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, 258 BLOCK_TEST_SIZE)); 259 for (i = 0; i < BLOCK_TEST_SIZE; i++) 260 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i])); 261 262 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 263 264 /* If using a cache the cache satisfied the read */ 265 for (i = 0; i < BLOCK_TEST_SIZE; i++) 266 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 267 } 268 269 static void bulk_read(struct kunit *test) 270 { 271 struct regmap *map; 272 struct regmap_config config; 273 struct regmap_ram_data *data; 274 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 275 int i; 276 277 config = test_regmap_config; 278 279 map = gen_regmap(test, &config, &data); 280 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 281 if (IS_ERR(map)) 282 return; 283 284 get_random_bytes(&val, sizeof(val)); 285 286 /* Data written as single writes can be read via the bulk API */ 287 for (i = 0; i < BLOCK_TEST_SIZE; i++) 288 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i])); 289 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 290 BLOCK_TEST_SIZE)); 291 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 292 293 /* If using a cache the cache satisfied the read */ 294 for (i = 0; i < BLOCK_TEST_SIZE; i++) 295 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 296 } 297 298 static void read_bypassed(struct kunit *test) 299 { 300 const struct regmap_test_param *param = test->param_value; 301 struct regmap *map; 302 struct regmap_config config; 303 struct regmap_ram_data *data; 304 unsigned int val[BLOCK_TEST_SIZE], rval; 305 int i; 306 307 config = test_regmap_config; 308 309 map = gen_regmap(test, &config, &data); 310 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 311 if (IS_ERR(map)) 312 return; 313 314 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 315 316 get_random_bytes(&val, sizeof(val)); 317 318 /* Write some test values */ 319 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 320 321 regcache_cache_only(map, true); 322 323 /* 324 * While in cache-only regmap_read_bypassed() should return the register 325 * value and leave the map in cache-only. 326 */ 327 for (i = 0; i < ARRAY_SIZE(val); i++) { 328 /* Put inverted bits in rval to prove we really read the value */ 329 rval = ~val[i]; 330 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 331 KUNIT_EXPECT_EQ(test, val[i], rval); 332 333 rval = ~val[i]; 334 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 335 KUNIT_EXPECT_EQ(test, val[i], rval); 336 KUNIT_EXPECT_TRUE(test, map->cache_only); 337 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 338 } 339 340 /* 341 * Change the underlying register values to prove it is returning 342 * real values not cached values. 343 */ 344 for (i = 0; i < ARRAY_SIZE(val); i++) { 345 val[i] = ~val[i]; 346 data->vals[param->from_reg + i] = val[i]; 347 } 348 349 for (i = 0; i < ARRAY_SIZE(val); i++) { 350 rval = ~val[i]; 351 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 352 KUNIT_EXPECT_NE(test, val[i], rval); 353 354 rval = ~val[i]; 355 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 356 KUNIT_EXPECT_EQ(test, val[i], rval); 357 KUNIT_EXPECT_TRUE(test, map->cache_only); 358 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 359 } 360 } 361 362 static void read_bypassed_volatile(struct kunit *test) 363 { 364 const struct regmap_test_param *param = test->param_value; 365 struct regmap *map; 366 struct regmap_config config; 367 struct regmap_ram_data *data; 368 unsigned int val[BLOCK_TEST_SIZE], rval; 369 int i; 370 371 config = test_regmap_config; 372 /* All registers except #5 volatile */ 373 config.volatile_reg = reg_5_false; 374 375 map = gen_regmap(test, &config, &data); 376 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 377 if (IS_ERR(map)) 378 return; 379 380 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 381 382 get_random_bytes(&val, sizeof(val)); 383 384 /* Write some test values */ 385 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 386 387 regcache_cache_only(map, true); 388 389 /* 390 * While in cache-only regmap_read_bypassed() should return the register 391 * value and leave the map in cache-only. 392 */ 393 for (i = 0; i < ARRAY_SIZE(val); i++) { 394 /* Register #5 is non-volatile so should read from cache */ 395 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY, 396 regmap_read(map, param->from_reg + i, &rval)); 397 398 /* Put inverted bits in rval to prove we really read the value */ 399 rval = ~val[i]; 400 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 401 KUNIT_EXPECT_EQ(test, val[i], rval); 402 KUNIT_EXPECT_TRUE(test, map->cache_only); 403 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 404 } 405 406 /* 407 * Change the underlying register values to prove it is returning 408 * real values not cached values. 409 */ 410 for (i = 0; i < ARRAY_SIZE(val); i++) { 411 val[i] = ~val[i]; 412 data->vals[param->from_reg + i] = val[i]; 413 } 414 415 for (i = 0; i < ARRAY_SIZE(val); i++) { 416 if (i == 5) 417 continue; 418 419 rval = ~val[i]; 420 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 421 KUNIT_EXPECT_EQ(test, val[i], rval); 422 KUNIT_EXPECT_TRUE(test, map->cache_only); 423 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 424 } 425 } 426 427 static void write_readonly(struct kunit *test) 428 { 429 struct regmap *map; 430 struct regmap_config config; 431 struct regmap_ram_data *data; 432 unsigned int val; 433 int i; 434 435 config = test_regmap_config; 436 config.num_reg_defaults = BLOCK_TEST_SIZE; 437 config.writeable_reg = reg_5_false; 438 439 map = gen_regmap(test, &config, &data); 440 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 441 if (IS_ERR(map)) 442 return; 443 444 get_random_bytes(&val, sizeof(val)); 445 446 for (i = 0; i < BLOCK_TEST_SIZE; i++) 447 data->written[i] = false; 448 449 /* Change the value of all registers, readonly should fail */ 450 for (i = 0; i < BLOCK_TEST_SIZE; i++) 451 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0); 452 453 /* Did that match what we see on the device? */ 454 for (i = 0; i < BLOCK_TEST_SIZE; i++) 455 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); 456 } 457 458 static void read_writeonly(struct kunit *test) 459 { 460 struct regmap *map; 461 struct regmap_config config; 462 struct regmap_ram_data *data; 463 unsigned int val; 464 int i; 465 466 config = test_regmap_config; 467 config.readable_reg = reg_5_false; 468 469 map = gen_regmap(test, &config, &data); 470 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 471 if (IS_ERR(map)) 472 return; 473 474 for (i = 0; i < BLOCK_TEST_SIZE; i++) 475 data->read[i] = false; 476 477 /* 478 * Try to read all the registers, the writeonly one should 479 * fail if we aren't using the flat cache. 480 */ 481 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 482 if (config.cache_type != REGCACHE_FLAT) { 483 KUNIT_EXPECT_EQ(test, i != 5, 484 regmap_read(map, i, &val) == 0); 485 } else { 486 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); 487 } 488 } 489 490 /* Did we trigger a hardware access? */ 491 KUNIT_EXPECT_FALSE(test, data->read[5]); 492 } 493 494 static void reg_defaults(struct kunit *test) 495 { 496 struct regmap *map; 497 struct regmap_config config; 498 struct regmap_ram_data *data; 499 unsigned int rval[BLOCK_TEST_SIZE]; 500 int i; 501 502 config = test_regmap_config; 503 config.num_reg_defaults = BLOCK_TEST_SIZE; 504 505 map = gen_regmap(test, &config, &data); 506 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 507 if (IS_ERR(map)) 508 return; 509 510 /* Read back the expected default data */ 511 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 512 BLOCK_TEST_SIZE)); 513 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 514 515 /* The data should have been read from cache if there was one */ 516 for (i = 0; i < BLOCK_TEST_SIZE; i++) 517 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 518 } 519 520 static void reg_defaults_read_dev(struct kunit *test) 521 { 522 struct regmap *map; 523 struct regmap_config config; 524 struct regmap_ram_data *data; 525 unsigned int rval[BLOCK_TEST_SIZE]; 526 int i; 527 528 config = test_regmap_config; 529 config.num_reg_defaults_raw = BLOCK_TEST_SIZE; 530 531 map = gen_regmap(test, &config, &data); 532 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 533 if (IS_ERR(map)) 534 return; 535 536 /* We should have read the cache defaults back from the map */ 537 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 538 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]); 539 data->read[i] = false; 540 } 541 542 /* Read back the expected default data */ 543 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 544 BLOCK_TEST_SIZE)); 545 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 546 547 /* The data should have been read from cache if there was one */ 548 for (i = 0; i < BLOCK_TEST_SIZE; i++) 549 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 550 } 551 552 static void register_patch(struct kunit *test) 553 { 554 struct regmap *map; 555 struct regmap_config config; 556 struct regmap_ram_data *data; 557 struct reg_sequence patch[2]; 558 unsigned int rval[BLOCK_TEST_SIZE]; 559 int i; 560 561 /* We need defaults so readback works */ 562 config = test_regmap_config; 563 config.num_reg_defaults = BLOCK_TEST_SIZE; 564 565 map = gen_regmap(test, &config, &data); 566 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 567 if (IS_ERR(map)) 568 return; 569 570 /* Stash the original values */ 571 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 572 BLOCK_TEST_SIZE)); 573 574 /* Patch a couple of values */ 575 patch[0].reg = 2; 576 patch[0].def = rval[2] + 1; 577 patch[0].delay_us = 0; 578 patch[1].reg = 5; 579 patch[1].def = rval[5] + 1; 580 patch[1].delay_us = 0; 581 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, 582 ARRAY_SIZE(patch))); 583 584 /* Only the patched registers are written */ 585 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 586 switch (i) { 587 case 2: 588 case 5: 589 KUNIT_EXPECT_TRUE(test, data->written[i]); 590 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); 591 break; 592 default: 593 KUNIT_EXPECT_FALSE(test, data->written[i]); 594 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); 595 break; 596 } 597 } 598 } 599 600 static void stride(struct kunit *test) 601 { 602 struct regmap *map; 603 struct regmap_config config; 604 struct regmap_ram_data *data; 605 unsigned int rval; 606 int i; 607 608 config = test_regmap_config; 609 config.reg_stride = 2; 610 config.num_reg_defaults = BLOCK_TEST_SIZE / 2; 611 612 map = gen_regmap(test, &config, &data); 613 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 614 if (IS_ERR(map)) 615 return; 616 617 /* Only even registers can be accessed, try both read and write */ 618 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 619 data->read[i] = false; 620 data->written[i] = false; 621 622 if (i % 2) { 623 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval)); 624 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval)); 625 KUNIT_EXPECT_FALSE(test, data->read[i]); 626 KUNIT_EXPECT_FALSE(test, data->written[i]); 627 } else { 628 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 629 KUNIT_EXPECT_EQ(test, data->vals[i], rval); 630 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, 631 data->read[i]); 632 633 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval)); 634 KUNIT_EXPECT_TRUE(test, data->written[i]); 635 } 636 } 637 } 638 639 static struct regmap_range_cfg test_range = { 640 .selector_reg = 1, 641 .selector_mask = 0xff, 642 643 .window_start = 4, 644 .window_len = 10, 645 646 .range_min = 20, 647 .range_max = 40, 648 }; 649 650 static bool test_range_window_volatile(struct device *dev, unsigned int reg) 651 { 652 if (reg >= test_range.window_start && 653 reg <= test_range.window_start + test_range.window_len) 654 return true; 655 656 return false; 657 } 658 659 static bool test_range_all_volatile(struct device *dev, unsigned int reg) 660 { 661 if (test_range_window_volatile(dev, reg)) 662 return true; 663 664 if (reg >= test_range.range_min && reg <= test_range.range_max) 665 return true; 666 667 return false; 668 } 669 670 static void basic_ranges(struct kunit *test) 671 { 672 struct regmap *map; 673 struct regmap_config config; 674 struct regmap_ram_data *data; 675 unsigned int val; 676 int i; 677 678 config = test_regmap_config; 679 config.volatile_reg = test_range_all_volatile; 680 config.ranges = &test_range; 681 config.num_ranges = 1; 682 config.max_register = test_range.range_max; 683 684 map = gen_regmap(test, &config, &data); 685 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 686 if (IS_ERR(map)) 687 return; 688 689 for (i = test_range.range_min; i < test_range.range_max; i++) { 690 data->read[i] = false; 691 data->written[i] = false; 692 } 693 694 /* Reset the page to a non-zero value to trigger a change */ 695 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, 696 test_range.range_max)); 697 698 /* Check we set the page and use the window for writes */ 699 data->written[test_range.selector_reg] = false; 700 data->written[test_range.window_start] = false; 701 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 702 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 703 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 704 705 data->written[test_range.selector_reg] = false; 706 data->written[test_range.window_start] = false; 707 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 708 test_range.range_min + 709 test_range.window_len, 710 0)); 711 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 712 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 713 714 /* Same for reads */ 715 data->written[test_range.selector_reg] = false; 716 data->read[test_range.window_start] = false; 717 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); 718 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 719 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 720 721 data->written[test_range.selector_reg] = false; 722 data->read[test_range.window_start] = false; 723 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 724 test_range.range_min + 725 test_range.window_len, 726 &val)); 727 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 728 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 729 730 /* No physical access triggered in the virtual range */ 731 for (i = test_range.range_min; i < test_range.range_max; i++) { 732 KUNIT_EXPECT_FALSE(test, data->read[i]); 733 KUNIT_EXPECT_FALSE(test, data->written[i]); 734 } 735 } 736 737 /* Try to stress dynamic creation of cache data structures */ 738 static void stress_insert(struct kunit *test) 739 { 740 struct regmap *map; 741 struct regmap_config config; 742 struct regmap_ram_data *data; 743 unsigned int rval, *vals; 744 size_t buf_sz; 745 int i; 746 747 config = test_regmap_config; 748 config.max_register = 300; 749 750 map = gen_regmap(test, &config, &data); 751 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 752 if (IS_ERR(map)) 753 return; 754 755 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register, 756 GFP_KERNEL); 757 KUNIT_ASSERT_FALSE(test, vals == NULL); 758 buf_sz = sizeof(unsigned long) * config.max_register; 759 760 get_random_bytes(vals, buf_sz); 761 762 /* Write data into the map/cache in ever decreasing strides */ 763 for (i = 0; i < config.max_register; i += 100) 764 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 765 for (i = 0; i < config.max_register; i += 50) 766 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 767 for (i = 0; i < config.max_register; i += 25) 768 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 769 for (i = 0; i < config.max_register; i += 10) 770 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 771 for (i = 0; i < config.max_register; i += 5) 772 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 773 for (i = 0; i < config.max_register; i += 3) 774 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 775 for (i = 0; i < config.max_register; i += 2) 776 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 777 for (i = 0; i < config.max_register; i++) 778 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 779 780 /* Do reads from the cache (if there is one) match? */ 781 for (i = 0; i < config.max_register; i ++) { 782 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 783 KUNIT_EXPECT_EQ(test, rval, vals[i]); 784 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 785 } 786 } 787 788 static void cache_bypass(struct kunit *test) 789 { 790 const struct regmap_test_param *param = test->param_value; 791 struct regmap *map; 792 struct regmap_config config; 793 struct regmap_ram_data *data; 794 unsigned int val, rval; 795 796 config = test_regmap_config; 797 798 map = gen_regmap(test, &config, &data); 799 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 800 if (IS_ERR(map)) 801 return; 802 803 get_random_bytes(&val, sizeof(val)); 804 805 /* Ensure the cache has a value in it */ 806 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val)); 807 808 /* Bypass then write a different value */ 809 regcache_cache_bypass(map, true); 810 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1)); 811 812 /* Read the bypassed value */ 813 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 814 KUNIT_EXPECT_EQ(test, val + 1, rval); 815 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval); 816 817 /* Disable bypass, the cache should still return the original value */ 818 regcache_cache_bypass(map, false); 819 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 820 KUNIT_EXPECT_EQ(test, val, rval); 821 } 822 823 static void cache_sync_marked_dirty(struct kunit *test) 824 { 825 const struct regmap_test_param *param = test->param_value; 826 struct regmap *map; 827 struct regmap_config config; 828 struct regmap_ram_data *data; 829 unsigned int val[BLOCK_TEST_SIZE]; 830 int i; 831 832 config = test_regmap_config; 833 834 map = gen_regmap(test, &config, &data); 835 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 836 if (IS_ERR(map)) 837 return; 838 839 get_random_bytes(&val, sizeof(val)); 840 841 /* Put some data into the cache */ 842 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 843 BLOCK_TEST_SIZE)); 844 for (i = 0; i < BLOCK_TEST_SIZE; i++) 845 data->written[param->from_reg + i] = false; 846 847 /* Trash the data on the device itself then resync */ 848 regcache_mark_dirty(map); 849 memset(data->vals, 0, sizeof(val)); 850 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 851 852 /* Did we just write the correct data out? */ 853 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 854 for (i = 0; i < BLOCK_TEST_SIZE; i++) 855 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 856 } 857 858 static void cache_sync_after_cache_only(struct kunit *test) 859 { 860 const struct regmap_test_param *param = test->param_value; 861 struct regmap *map; 862 struct regmap_config config; 863 struct regmap_ram_data *data; 864 unsigned int val[BLOCK_TEST_SIZE]; 865 unsigned int val_mask; 866 int i; 867 868 config = test_regmap_config; 869 870 map = gen_regmap(test, &config, &data); 871 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 872 if (IS_ERR(map)) 873 return; 874 875 val_mask = GENMASK(config.val_bits - 1, 0); 876 get_random_bytes(&val, sizeof(val)); 877 878 /* Put some data into the cache */ 879 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 880 BLOCK_TEST_SIZE)); 881 for (i = 0; i < BLOCK_TEST_SIZE; i++) 882 data->written[param->from_reg + i] = false; 883 884 /* Set cache-only and change the values */ 885 regcache_cache_only(map, true); 886 for (i = 0; i < ARRAY_SIZE(val); ++i) 887 val[i] = ~val[i] & val_mask; 888 889 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 890 BLOCK_TEST_SIZE)); 891 for (i = 0; i < BLOCK_TEST_SIZE; i++) 892 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 893 894 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 895 896 /* Exit cache-only and sync the cache without marking hardware registers dirty */ 897 regcache_cache_only(map, false); 898 899 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 900 901 /* Did we just write the correct data out? */ 902 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 903 for (i = 0; i < BLOCK_TEST_SIZE; i++) 904 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]); 905 } 906 907 static void cache_sync_defaults_marked_dirty(struct kunit *test) 908 { 909 const struct regmap_test_param *param = test->param_value; 910 struct regmap *map; 911 struct regmap_config config; 912 struct regmap_ram_data *data; 913 unsigned int val; 914 int i; 915 916 config = test_regmap_config; 917 config.num_reg_defaults = BLOCK_TEST_SIZE; 918 919 map = gen_regmap(test, &config, &data); 920 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 921 if (IS_ERR(map)) 922 return; 923 924 get_random_bytes(&val, sizeof(val)); 925 926 /* Change the value of one register */ 927 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val)); 928 929 /* Resync */ 930 regcache_mark_dirty(map); 931 for (i = 0; i < BLOCK_TEST_SIZE; i++) 932 data->written[param->from_reg + i] = false; 933 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 934 935 /* Did we just sync the one register we touched? */ 936 for (i = 0; i < BLOCK_TEST_SIZE; i++) 937 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]); 938 939 /* Rewrite registers back to their defaults */ 940 for (i = 0; i < config.num_reg_defaults; ++i) 941 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg, 942 config.reg_defaults[i].def)); 943 944 /* 945 * Resync after regcache_mark_dirty() should not write out registers 946 * that are at default value 947 */ 948 for (i = 0; i < BLOCK_TEST_SIZE; i++) 949 data->written[param->from_reg + i] = false; 950 regcache_mark_dirty(map); 951 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 952 for (i = 0; i < BLOCK_TEST_SIZE; i++) 953 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 954 } 955 956 static void cache_sync_default_after_cache_only(struct kunit *test) 957 { 958 const struct regmap_test_param *param = test->param_value; 959 struct regmap *map; 960 struct regmap_config config; 961 struct regmap_ram_data *data; 962 unsigned int orig_val; 963 int i; 964 965 config = test_regmap_config; 966 config.num_reg_defaults = BLOCK_TEST_SIZE; 967 968 map = gen_regmap(test, &config, &data); 969 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 970 if (IS_ERR(map)) 971 return; 972 973 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val)); 974 975 /* Enter cache-only and change the value of one register */ 976 regcache_cache_only(map, true); 977 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1)); 978 979 /* Exit cache-only and resync, should write out the changed register */ 980 regcache_cache_only(map, false); 981 for (i = 0; i < BLOCK_TEST_SIZE; i++) 982 data->written[param->from_reg + i] = false; 983 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 984 985 /* Was the register written out? */ 986 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 987 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1); 988 989 /* Enter cache-only and write register back to its default value */ 990 regcache_cache_only(map, true); 991 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val)); 992 993 /* Resync should write out the new value */ 994 regcache_cache_only(map, false); 995 for (i = 0; i < BLOCK_TEST_SIZE; i++) 996 data->written[param->from_reg + i] = false; 997 998 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 999 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 1000 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val); 1001 } 1002 1003 static void cache_sync_readonly(struct kunit *test) 1004 { 1005 const struct regmap_test_param *param = test->param_value; 1006 struct regmap *map; 1007 struct regmap_config config; 1008 struct regmap_ram_data *data; 1009 unsigned int val; 1010 int i; 1011 1012 config = test_regmap_config; 1013 config.writeable_reg = reg_5_false; 1014 1015 map = gen_regmap(test, &config, &data); 1016 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1017 if (IS_ERR(map)) 1018 return; 1019 1020 /* Read all registers to fill the cache */ 1021 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1022 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1023 1024 /* Change the value of all registers, readonly should fail */ 1025 get_random_bytes(&val, sizeof(val)); 1026 regcache_cache_only(map, true); 1027 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1028 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0); 1029 regcache_cache_only(map, false); 1030 1031 /* Resync */ 1032 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1033 data->written[param->from_reg + i] = false; 1034 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1035 1036 /* Did that match what we see on the device? */ 1037 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1038 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]); 1039 } 1040 1041 static void cache_sync_patch(struct kunit *test) 1042 { 1043 const struct regmap_test_param *param = test->param_value; 1044 struct regmap *map; 1045 struct regmap_config config; 1046 struct regmap_ram_data *data; 1047 struct reg_sequence patch[2]; 1048 unsigned int rval[BLOCK_TEST_SIZE], val; 1049 int i; 1050 1051 /* We need defaults so readback works */ 1052 config = test_regmap_config; 1053 config.num_reg_defaults = BLOCK_TEST_SIZE; 1054 1055 map = gen_regmap(test, &config, &data); 1056 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1057 if (IS_ERR(map)) 1058 return; 1059 1060 /* Stash the original values */ 1061 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1062 BLOCK_TEST_SIZE)); 1063 1064 /* Patch a couple of values */ 1065 patch[0].reg = param->from_reg + 2; 1066 patch[0].def = rval[2] + 1; 1067 patch[0].delay_us = 0; 1068 patch[1].reg = param->from_reg + 5; 1069 patch[1].def = rval[5] + 1; 1070 patch[1].delay_us = 0; 1071 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, 1072 ARRAY_SIZE(patch))); 1073 1074 /* Sync the cache */ 1075 regcache_mark_dirty(map); 1076 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1077 data->written[param->from_reg + i] = false; 1078 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1079 1080 /* The patch should be on the device but not in the cache */ 1081 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1082 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1083 KUNIT_EXPECT_EQ(test, val, rval[i]); 1084 1085 switch (i) { 1086 case 2: 1087 case 5: 1088 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 1089 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1); 1090 break; 1091 default: 1092 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]); 1093 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]); 1094 break; 1095 } 1096 } 1097 } 1098 1099 static void cache_drop(struct kunit *test) 1100 { 1101 const struct regmap_test_param *param = test->param_value; 1102 struct regmap *map; 1103 struct regmap_config config; 1104 struct regmap_ram_data *data; 1105 unsigned int rval[BLOCK_TEST_SIZE]; 1106 int i; 1107 1108 config = test_regmap_config; 1109 config.num_reg_defaults = BLOCK_TEST_SIZE; 1110 1111 map = gen_regmap(test, &config, &data); 1112 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1113 if (IS_ERR(map)) 1114 return; 1115 1116 /* Ensure the data is read from the cache */ 1117 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1118 data->read[param->from_reg + i] = false; 1119 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1120 BLOCK_TEST_SIZE)); 1121 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1122 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]); 1123 data->read[param->from_reg + i] = false; 1124 } 1125 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1126 1127 /* Drop some registers */ 1128 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3, 1129 param->from_reg + 5)); 1130 1131 /* Reread and check only the dropped registers hit the device. */ 1132 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1133 BLOCK_TEST_SIZE)); 1134 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1135 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5); 1136 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1137 } 1138 1139 static void cache_drop_with_non_contiguous_ranges(struct kunit *test) 1140 { 1141 const struct regmap_test_param *param = test->param_value; 1142 struct regmap *map; 1143 struct regmap_config config; 1144 struct regmap_ram_data *data; 1145 unsigned int val[4][BLOCK_TEST_SIZE]; 1146 unsigned int reg; 1147 const int num_ranges = ARRAY_SIZE(val) * 2; 1148 int rangeidx, i; 1149 1150 static_assert(ARRAY_SIZE(val) == 4); 1151 1152 config = test_regmap_config; 1153 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE); 1154 1155 map = gen_regmap(test, &config, &data); 1156 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1157 if (IS_ERR(map)) 1158 return; 1159 1160 for (i = 0; i < config.max_register + 1; i++) 1161 data->written[i] = false; 1162 1163 /* Create non-contiguous cache blocks by writing every other range */ 1164 get_random_bytes(&val, sizeof(val)); 1165 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 1166 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1167 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg, 1168 &val[rangeidx / 2], 1169 BLOCK_TEST_SIZE)); 1170 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 1171 &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 1172 } 1173 1174 /* Check that odd ranges weren't written */ 1175 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 1176 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1177 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1178 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1179 } 1180 1181 /* Drop range 2 */ 1182 reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 1183 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1)); 1184 1185 /* Drop part of range 4 */ 1186 reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 1187 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5)); 1188 1189 /* Mark dirty and reset mock registers to 0 */ 1190 regcache_mark_dirty(map); 1191 for (i = 0; i < config.max_register + 1; i++) { 1192 data->vals[i] = 0; 1193 data->written[i] = false; 1194 } 1195 1196 /* The registers that were dropped from range 4 should now remain at 0 */ 1197 val[4 / 2][3] = 0; 1198 val[4 / 2][4] = 0; 1199 val[4 / 2][5] = 0; 1200 1201 /* Sync and check that the expected register ranges were written */ 1202 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1203 1204 /* Check that odd ranges weren't written */ 1205 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 1206 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1207 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1208 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1209 } 1210 1211 /* Check that even ranges (except 2 and 4) were written */ 1212 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 1213 if ((rangeidx == 2) || (rangeidx == 4)) 1214 continue; 1215 1216 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1217 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1218 KUNIT_EXPECT_TRUE(test, data->written[reg + i]); 1219 1220 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 1221 &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 1222 } 1223 1224 /* Check that range 2 wasn't written */ 1225 reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 1226 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1227 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1228 1229 /* Check that range 4 was partially written */ 1230 reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 1231 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1232 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5); 1233 1234 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2])); 1235 1236 /* Nothing before param->from_reg should have been written */ 1237 for (i = 0; i < param->from_reg; i++) 1238 KUNIT_EXPECT_FALSE(test, data->written[i]); 1239 } 1240 1241 static void cache_drop_all_and_sync_marked_dirty(struct kunit *test) 1242 { 1243 const struct regmap_test_param *param = test->param_value; 1244 struct regmap *map; 1245 struct regmap_config config; 1246 struct regmap_ram_data *data; 1247 unsigned int rval[BLOCK_TEST_SIZE]; 1248 int i; 1249 1250 config = test_regmap_config; 1251 config.num_reg_defaults = BLOCK_TEST_SIZE; 1252 1253 map = gen_regmap(test, &config, &data); 1254 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1255 if (IS_ERR(map)) 1256 return; 1257 1258 /* Ensure the data is read from the cache */ 1259 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1260 data->read[param->from_reg + i] = false; 1261 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1262 BLOCK_TEST_SIZE)); 1263 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1264 1265 /* Change all values in cache from defaults */ 1266 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1267 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1268 1269 /* Drop all registers */ 1270 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1271 1272 /* Mark dirty and cache sync should not write anything. */ 1273 regcache_mark_dirty(map); 1274 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1275 data->written[param->from_reg + i] = false; 1276 1277 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1278 for (i = 0; i <= config.max_register; i++) 1279 KUNIT_EXPECT_FALSE(test, data->written[i]); 1280 } 1281 1282 static void cache_drop_all_and_sync_no_defaults(struct kunit *test) 1283 { 1284 const struct regmap_test_param *param = test->param_value; 1285 struct regmap *map; 1286 struct regmap_config config; 1287 struct regmap_ram_data *data; 1288 unsigned int rval[BLOCK_TEST_SIZE]; 1289 int i; 1290 1291 config = test_regmap_config; 1292 1293 map = gen_regmap(test, &config, &data); 1294 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1295 if (IS_ERR(map)) 1296 return; 1297 1298 /* Ensure the data is read from the cache */ 1299 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1300 data->read[param->from_reg + i] = false; 1301 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1302 BLOCK_TEST_SIZE)); 1303 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1304 1305 /* Change all values in cache */ 1306 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1307 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1308 1309 /* Drop all registers */ 1310 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1311 1312 /* 1313 * Sync cache without marking it dirty. All registers were dropped 1314 * so the cache should not have any entries to write out. 1315 */ 1316 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1317 data->written[param->from_reg + i] = false; 1318 1319 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1320 for (i = 0; i <= config.max_register; i++) 1321 KUNIT_EXPECT_FALSE(test, data->written[i]); 1322 } 1323 1324 static void cache_drop_all_and_sync_has_defaults(struct kunit *test) 1325 { 1326 const struct regmap_test_param *param = test->param_value; 1327 struct regmap *map; 1328 struct regmap_config config; 1329 struct regmap_ram_data *data; 1330 unsigned int rval[BLOCK_TEST_SIZE]; 1331 int i; 1332 1333 config = test_regmap_config; 1334 config.num_reg_defaults = BLOCK_TEST_SIZE; 1335 1336 map = gen_regmap(test, &config, &data); 1337 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1338 if (IS_ERR(map)) 1339 return; 1340 1341 /* Ensure the data is read from the cache */ 1342 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1343 data->read[param->from_reg + i] = false; 1344 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1345 BLOCK_TEST_SIZE)); 1346 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1347 1348 /* Change all values in cache from defaults */ 1349 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1350 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1351 1352 /* Drop all registers */ 1353 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1354 1355 /* 1356 * Sync cache without marking it dirty. All registers were dropped 1357 * so the cache should not have any entries to write out. 1358 */ 1359 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1360 data->written[param->from_reg + i] = false; 1361 1362 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1363 for (i = 0; i <= config.max_register; i++) 1364 KUNIT_EXPECT_FALSE(test, data->written[i]); 1365 } 1366 1367 static void cache_present(struct kunit *test) 1368 { 1369 const struct regmap_test_param *param = test->param_value; 1370 struct regmap *map; 1371 struct regmap_config config; 1372 struct regmap_ram_data *data; 1373 unsigned int val; 1374 int i; 1375 1376 config = test_regmap_config; 1377 1378 map = gen_regmap(test, &config, &data); 1379 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1380 if (IS_ERR(map)) 1381 return; 1382 1383 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1384 data->read[param->from_reg + i] = false; 1385 1386 /* No defaults so no registers cached. */ 1387 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1388 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i)); 1389 1390 /* We didn't trigger any reads */ 1391 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1392 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]); 1393 1394 /* Fill the cache */ 1395 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1396 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1397 1398 /* Now everything should be cached */ 1399 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1400 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i)); 1401 } 1402 1403 /* Check that caching the window register works with sync */ 1404 static void cache_range_window_reg(struct kunit *test) 1405 { 1406 struct regmap *map; 1407 struct regmap_config config; 1408 struct regmap_ram_data *data; 1409 unsigned int val; 1410 int i; 1411 1412 config = test_regmap_config; 1413 config.volatile_reg = test_range_window_volatile; 1414 config.ranges = &test_range; 1415 config.num_ranges = 1; 1416 config.max_register = test_range.range_max; 1417 1418 map = gen_regmap(test, &config, &data); 1419 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1420 if (IS_ERR(map)) 1421 return; 1422 1423 /* Write new values to the entire range */ 1424 for (i = test_range.range_min; i <= test_range.range_max; i++) 1425 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0)); 1426 1427 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1428 KUNIT_ASSERT_EQ(test, val, 2); 1429 1430 /* Write to the first register in the range to reset the page */ 1431 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1432 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1433 KUNIT_ASSERT_EQ(test, val, 0); 1434 1435 /* Trigger a cache sync */ 1436 regcache_mark_dirty(map); 1437 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); 1438 1439 /* Write to the first register again, the page should be reset */ 1440 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1441 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1442 KUNIT_ASSERT_EQ(test, val, 0); 1443 1444 /* Trigger another cache sync */ 1445 regcache_mark_dirty(map); 1446 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); 1447 1448 /* Write to the last register again, the page should be reset */ 1449 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0)); 1450 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1451 KUNIT_ASSERT_EQ(test, val, 2); 1452 } 1453 1454 static const struct regmap_test_param raw_types_list[] = { 1455 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1456 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG }, 1457 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 1458 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 1459 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1460 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 1461 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1462 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1463 }; 1464 1465 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc); 1466 1467 static const struct regmap_test_param raw_cache_types_list[] = { 1468 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 1469 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 1470 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1471 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 1472 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1473 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1474 }; 1475 1476 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc); 1477 1478 static const struct regmap_config raw_regmap_config = { 1479 .max_register = BLOCK_TEST_SIZE, 1480 1481 .reg_format_endian = REGMAP_ENDIAN_LITTLE, 1482 .reg_bits = 16, 1483 .val_bits = 16, 1484 }; 1485 1486 static struct regmap *gen_raw_regmap(struct kunit *test, 1487 struct regmap_config *config, 1488 struct regmap_ram_data **data) 1489 { 1490 struct regmap_test_priv *priv = test->priv; 1491 const struct regmap_test_param *param = test->param_value; 1492 u16 *buf; 1493 struct regmap *ret; 1494 size_t size = (config->max_register + 1) * config->reg_bits / 8; 1495 int i; 1496 struct reg_default *defaults; 1497 1498 config->cache_type = param->cache; 1499 config->val_format_endian = param->val_endian; 1500 config->disable_locking = config->cache_type == REGCACHE_RBTREE || 1501 config->cache_type == REGCACHE_MAPLE; 1502 1503 buf = kmalloc(size, GFP_KERNEL); 1504 if (!buf) 1505 return ERR_PTR(-ENOMEM); 1506 1507 get_random_bytes(buf, size); 1508 1509 *data = kzalloc(sizeof(**data), GFP_KERNEL); 1510 if (!(*data)) 1511 return ERR_PTR(-ENOMEM); 1512 (*data)->vals = (void *)buf; 1513 1514 config->num_reg_defaults = config->max_register + 1; 1515 defaults = kcalloc(config->num_reg_defaults, 1516 sizeof(struct reg_default), 1517 GFP_KERNEL); 1518 if (!defaults) 1519 return ERR_PTR(-ENOMEM); 1520 config->reg_defaults = defaults; 1521 1522 for (i = 0; i < config->num_reg_defaults; i++) { 1523 defaults[i].reg = i; 1524 switch (param->val_endian) { 1525 case REGMAP_ENDIAN_LITTLE: 1526 defaults[i].def = le16_to_cpu(buf[i]); 1527 break; 1528 case REGMAP_ENDIAN_BIG: 1529 defaults[i].def = be16_to_cpu(buf[i]); 1530 break; 1531 default: 1532 return ERR_PTR(-EINVAL); 1533 } 1534 } 1535 1536 /* 1537 * We use the defaults in the tests but they don't make sense 1538 * to the core if there's no cache. 1539 */ 1540 if (config->cache_type == REGCACHE_NONE) 1541 config->num_reg_defaults = 0; 1542 1543 ret = regmap_init_raw_ram(priv->dev, config, *data); 1544 if (IS_ERR(ret)) { 1545 kfree(buf); 1546 kfree(*data); 1547 } else { 1548 kunit_add_action(test, regmap_exit_action, ret); 1549 } 1550 1551 return ret; 1552 } 1553 1554 static void raw_read_defaults_single(struct kunit *test) 1555 { 1556 struct regmap *map; 1557 struct regmap_config config; 1558 struct regmap_ram_data *data; 1559 unsigned int rval; 1560 int i; 1561 1562 config = raw_regmap_config; 1563 1564 map = gen_raw_regmap(test, &config, &data); 1565 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1566 if (IS_ERR(map)) 1567 return; 1568 1569 /* Check that we can read the defaults via the API */ 1570 for (i = 0; i < config.max_register + 1; i++) { 1571 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1572 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1573 } 1574 } 1575 1576 static void raw_read_defaults(struct kunit *test) 1577 { 1578 struct regmap *map; 1579 struct regmap_config config; 1580 struct regmap_ram_data *data; 1581 u16 *rval; 1582 u16 def; 1583 size_t val_len; 1584 int i; 1585 1586 config = raw_regmap_config; 1587 1588 map = gen_raw_regmap(test, &config, &data); 1589 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1590 if (IS_ERR(map)) 1591 return; 1592 1593 val_len = sizeof(*rval) * (config.max_register + 1); 1594 rval = kunit_kmalloc(test, val_len, GFP_KERNEL); 1595 KUNIT_ASSERT_TRUE(test, rval != NULL); 1596 if (!rval) 1597 return; 1598 1599 /* Check that we can read the defaults via the API */ 1600 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len)); 1601 for (i = 0; i < config.max_register + 1; i++) { 1602 def = config.reg_defaults[i].def; 1603 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1604 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i])); 1605 } else { 1606 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i])); 1607 } 1608 } 1609 } 1610 1611 static void raw_write_read_single(struct kunit *test) 1612 { 1613 struct regmap *map; 1614 struct regmap_config config; 1615 struct regmap_ram_data *data; 1616 u16 val; 1617 unsigned int rval; 1618 1619 config = raw_regmap_config; 1620 1621 map = gen_raw_regmap(test, &config, &data); 1622 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1623 if (IS_ERR(map)) 1624 return; 1625 1626 get_random_bytes(&val, sizeof(val)); 1627 1628 /* If we write a value to a register we can read it back */ 1629 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 1630 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 1631 KUNIT_EXPECT_EQ(test, val, rval); 1632 } 1633 1634 static void raw_write(struct kunit *test) 1635 { 1636 struct regmap *map; 1637 struct regmap_config config; 1638 struct regmap_ram_data *data; 1639 u16 *hw_buf; 1640 u16 val[2]; 1641 unsigned int rval; 1642 int i; 1643 1644 config = raw_regmap_config; 1645 1646 map = gen_raw_regmap(test, &config, &data); 1647 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1648 if (IS_ERR(map)) 1649 return; 1650 1651 hw_buf = (u16 *)data->vals; 1652 1653 get_random_bytes(&val, sizeof(val)); 1654 1655 /* Do a raw write */ 1656 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); 1657 1658 /* We should read back the new values, and defaults for the rest */ 1659 for (i = 0; i < config.max_register + 1; i++) { 1660 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1661 1662 switch (i) { 1663 case 2: 1664 case 3: 1665 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1666 KUNIT_EXPECT_EQ(test, rval, 1667 be16_to_cpu((__force __be16)val[i % 2])); 1668 } else { 1669 KUNIT_EXPECT_EQ(test, rval, 1670 le16_to_cpu((__force __le16)val[i % 2])); 1671 } 1672 break; 1673 default: 1674 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1675 break; 1676 } 1677 } 1678 1679 /* The values should appear in the "hardware" */ 1680 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); 1681 } 1682 1683 static bool reg_zero(struct device *dev, unsigned int reg) 1684 { 1685 return reg == 0; 1686 } 1687 1688 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg) 1689 { 1690 return reg == 0; 1691 } 1692 1693 static void raw_noinc_write(struct kunit *test) 1694 { 1695 struct regmap *map; 1696 struct regmap_config config; 1697 struct regmap_ram_data *data; 1698 unsigned int val; 1699 u16 val_test, val_last; 1700 u16 val_array[BLOCK_TEST_SIZE]; 1701 1702 config = raw_regmap_config; 1703 config.volatile_reg = reg_zero; 1704 config.writeable_noinc_reg = reg_zero; 1705 config.readable_noinc_reg = reg_zero; 1706 1707 map = gen_raw_regmap(test, &config, &data); 1708 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1709 if (IS_ERR(map)) 1710 return; 1711 1712 data->noinc_reg = ram_reg_zero; 1713 1714 get_random_bytes(&val_array, sizeof(val_array)); 1715 1716 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1717 val_test = be16_to_cpu(val_array[1]) + 100; 1718 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); 1719 } else { 1720 val_test = le16_to_cpu(val_array[1]) + 100; 1721 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); 1722 } 1723 1724 /* Put some data into the register following the noinc register */ 1725 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test)); 1726 1727 /* Write some data to the noinc register */ 1728 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array, 1729 sizeof(val_array))); 1730 1731 /* We should read back the last value written */ 1732 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val)); 1733 KUNIT_ASSERT_EQ(test, val_last, val); 1734 1735 /* Make sure we didn't touch the register after the noinc register */ 1736 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val)); 1737 KUNIT_ASSERT_EQ(test, val_test, val); 1738 } 1739 1740 static void raw_sync(struct kunit *test) 1741 { 1742 struct regmap *map; 1743 struct regmap_config config; 1744 struct regmap_ram_data *data; 1745 u16 val[3]; 1746 u16 *hw_buf; 1747 unsigned int rval; 1748 int i; 1749 1750 config = raw_regmap_config; 1751 1752 map = gen_raw_regmap(test, &config, &data); 1753 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1754 if (IS_ERR(map)) 1755 return; 1756 1757 hw_buf = (u16 *)data->vals; 1758 1759 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val)); 1760 1761 /* Do a regular write and a raw write in cache only mode */ 1762 regcache_cache_only(map, true); 1763 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, 1764 sizeof(u16) * 2)); 1765 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2])); 1766 1767 /* We should read back the new values, and defaults for the rest */ 1768 for (i = 0; i < config.max_register + 1; i++) { 1769 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1770 1771 switch (i) { 1772 case 2: 1773 case 3: 1774 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1775 KUNIT_EXPECT_EQ(test, rval, 1776 be16_to_cpu((__force __be16)val[i - 2])); 1777 } else { 1778 KUNIT_EXPECT_EQ(test, rval, 1779 le16_to_cpu((__force __le16)val[i - 2])); 1780 } 1781 break; 1782 case 4: 1783 KUNIT_EXPECT_EQ(test, rval, val[i - 2]); 1784 break; 1785 default: 1786 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1787 break; 1788 } 1789 } 1790 1791 /* 1792 * The value written via _write() was translated by the core, 1793 * translate the original copy for comparison purposes. 1794 */ 1795 if (config.val_format_endian == REGMAP_ENDIAN_BIG) 1796 val[2] = cpu_to_be16(val[2]); 1797 else 1798 val[2] = cpu_to_le16(val[2]); 1799 1800 /* The values should not appear in the "hardware" */ 1801 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1802 1803 for (i = 0; i < config.max_register + 1; i++) 1804 data->written[i] = false; 1805 1806 /* Do the sync */ 1807 regcache_cache_only(map, false); 1808 regcache_mark_dirty(map); 1809 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1810 1811 /* The values should now appear in the "hardware" */ 1812 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1813 } 1814 1815 static void raw_ranges(struct kunit *test) 1816 { 1817 struct regmap *map; 1818 struct regmap_config config; 1819 struct regmap_ram_data *data; 1820 unsigned int val; 1821 int i; 1822 1823 config = raw_regmap_config; 1824 config.volatile_reg = test_range_all_volatile; 1825 config.ranges = &test_range; 1826 config.num_ranges = 1; 1827 config.max_register = test_range.range_max; 1828 1829 map = gen_raw_regmap(test, &config, &data); 1830 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1831 if (IS_ERR(map)) 1832 return; 1833 1834 /* Reset the page to a non-zero value to trigger a change */ 1835 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, 1836 test_range.range_max)); 1837 1838 /* Check we set the page and use the window for writes */ 1839 data->written[test_range.selector_reg] = false; 1840 data->written[test_range.window_start] = false; 1841 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1842 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1843 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 1844 1845 data->written[test_range.selector_reg] = false; 1846 data->written[test_range.window_start] = false; 1847 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1848 test_range.range_min + 1849 test_range.window_len, 1850 0)); 1851 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1852 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 1853 1854 /* Same for reads */ 1855 data->written[test_range.selector_reg] = false; 1856 data->read[test_range.window_start] = false; 1857 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); 1858 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1859 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 1860 1861 data->written[test_range.selector_reg] = false; 1862 data->read[test_range.window_start] = false; 1863 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1864 test_range.range_min + 1865 test_range.window_len, 1866 &val)); 1867 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1868 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 1869 1870 /* No physical access triggered in the virtual range */ 1871 for (i = test_range.range_min; i < test_range.range_max; i++) { 1872 KUNIT_EXPECT_FALSE(test, data->read[i]); 1873 KUNIT_EXPECT_FALSE(test, data->written[i]); 1874 } 1875 } 1876 1877 static struct kunit_case regmap_test_cases[] = { 1878 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), 1879 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params), 1880 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params), 1881 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), 1882 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), 1883 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params), 1884 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params), 1885 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params), 1886 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params), 1887 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params), 1888 KUNIT_CASE_PARAM(stride, regcache_types_gen_params), 1889 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params), 1890 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params), 1891 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), 1892 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params), 1893 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params), 1894 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params), 1895 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params), 1896 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params), 1897 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), 1898 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), 1899 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params), 1900 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params), 1901 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params), 1902 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params), 1903 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), 1904 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params), 1905 1906 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), 1907 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), 1908 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params), 1909 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params), 1910 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params), 1911 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params), 1912 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params), 1913 {} 1914 }; 1915 1916 static int regmap_test_init(struct kunit *test) 1917 { 1918 struct regmap_test_priv *priv; 1919 struct device *dev; 1920 1921 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); 1922 if (!priv) 1923 return -ENOMEM; 1924 1925 test->priv = priv; 1926 1927 dev = kunit_device_register(test, "regmap_test"); 1928 if (IS_ERR(dev)) 1929 return PTR_ERR(dev); 1930 1931 priv->dev = get_device(dev); 1932 dev_set_drvdata(dev, test); 1933 1934 return 0; 1935 } 1936 1937 static void regmap_test_exit(struct kunit *test) 1938 { 1939 struct regmap_test_priv *priv = test->priv; 1940 1941 /* Destroy the dummy struct device */ 1942 if (priv && priv->dev) 1943 put_device(priv->dev); 1944 } 1945 1946 static struct kunit_suite regmap_test_suite = { 1947 .name = "regmap", 1948 .init = regmap_test_init, 1949 .exit = regmap_test_exit, 1950 .test_cases = regmap_test_cases, 1951 }; 1952 kunit_test_suite(regmap_test_suite); 1953 1954 MODULE_LICENSE("GPL v2"); 1955