1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // regmap KUnit tests 4 // 5 // Copyright 2023 Arm Ltd 6 7 #include <kunit/device.h> 8 #include <kunit/resource.h> 9 #include <kunit/test.h> 10 #include "internal.h" 11 12 #define BLOCK_TEST_SIZE 12 13 14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *); 15 16 struct regmap_test_priv { 17 struct device *dev; 18 }; 19 20 struct regmap_test_param { 21 enum regcache_type cache; 22 enum regmap_endian val_endian; 23 24 unsigned int from_reg; 25 }; 26 27 static void get_changed_bytes(void *orig, void *new, size_t size) 28 { 29 char *o = orig; 30 char *n = new; 31 int i; 32 33 get_random_bytes(new, size); 34 35 /* 36 * This could be nicer and more efficient but we shouldn't 37 * super care. 38 */ 39 for (i = 0; i < size; i++) 40 while (n[i] == o[i]) 41 get_random_bytes(&n[i], 1); 42 } 43 44 static const struct regmap_config test_regmap_config = { 45 .reg_stride = 1, 46 .val_bits = sizeof(unsigned int) * 8, 47 }; 48 49 static const char *regcache_type_name(enum regcache_type type) 50 { 51 switch (type) { 52 case REGCACHE_NONE: 53 return "none"; 54 case REGCACHE_FLAT: 55 return "flat"; 56 case REGCACHE_RBTREE: 57 return "rbtree"; 58 case REGCACHE_MAPLE: 59 return "maple"; 60 default: 61 return NULL; 62 } 63 } 64 65 static const char *regmap_endian_name(enum regmap_endian endian) 66 { 67 switch (endian) { 68 case REGMAP_ENDIAN_BIG: 69 return "big"; 70 case REGMAP_ENDIAN_LITTLE: 71 return "little"; 72 case REGMAP_ENDIAN_DEFAULT: 73 return "default"; 74 case REGMAP_ENDIAN_NATIVE: 75 return "native"; 76 default: 77 return NULL; 78 } 79 } 80 81 static void param_to_desc(const struct regmap_test_param *param, char *desc) 82 { 83 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x", 84 regcache_type_name(param->cache), 85 regmap_endian_name(param->val_endian), 86 param->from_reg); 87 } 88 89 static const struct regmap_test_param regcache_types_list[] = { 90 { .cache = REGCACHE_NONE }, 91 { .cache = REGCACHE_FLAT }, 92 { .cache = REGCACHE_RBTREE }, 93 { .cache = REGCACHE_MAPLE }, 94 }; 95 96 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc); 97 98 static const struct regmap_test_param real_cache_types_only_list[] = { 99 { .cache = REGCACHE_FLAT }, 100 { .cache = REGCACHE_RBTREE }, 101 { .cache = REGCACHE_MAPLE }, 102 }; 103 104 KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc); 105 106 static const struct regmap_test_param real_cache_types_list[] = { 107 { .cache = REGCACHE_FLAT, .from_reg = 0 }, 108 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 }, 109 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 }, 110 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 }, 111 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 }, 112 { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 113 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 114 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 115 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 116 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 117 { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 118 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 119 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 120 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 121 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 122 }; 123 124 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc); 125 126 static const struct regmap_test_param sparse_cache_types_list[] = { 127 { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 128 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 129 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 130 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 131 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 132 { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 134 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 135 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 136 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 137 }; 138 139 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc); 140 141 static struct regmap *gen_regmap(struct kunit *test, 142 struct regmap_config *config, 143 struct regmap_ram_data **data) 144 { 145 const struct regmap_test_param *param = test->param_value; 146 struct regmap_test_priv *priv = test->priv; 147 unsigned int *buf; 148 struct regmap *ret; 149 size_t size; 150 int i; 151 struct reg_default *defaults; 152 153 config->cache_type = param->cache; 154 config->disable_locking = config->cache_type == REGCACHE_RBTREE || 155 config->cache_type == REGCACHE_MAPLE; 156 157 if (config->max_register == 0) { 158 config->max_register = param->from_reg; 159 if (config->num_reg_defaults) 160 config->max_register += (config->num_reg_defaults - 1) * 161 config->reg_stride; 162 else 163 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride); 164 } 165 166 size = (config->max_register + 1) * sizeof(unsigned int); 167 buf = kmalloc(size, GFP_KERNEL); 168 if (!buf) 169 return ERR_PTR(-ENOMEM); 170 171 get_random_bytes(buf, size); 172 173 *data = kzalloc(sizeof(**data), GFP_KERNEL); 174 if (!(*data)) 175 return ERR_PTR(-ENOMEM); 176 (*data)->vals = buf; 177 178 if (config->num_reg_defaults) { 179 defaults = kcalloc(config->num_reg_defaults, 180 sizeof(struct reg_default), 181 GFP_KERNEL); 182 if (!defaults) 183 return ERR_PTR(-ENOMEM); 184 config->reg_defaults = defaults; 185 186 for (i = 0; i < config->num_reg_defaults; i++) { 187 defaults[i].reg = param->from_reg + (i * config->reg_stride); 188 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)]; 189 } 190 } 191 192 ret = regmap_init_ram(priv->dev, config, *data); 193 if (IS_ERR(ret)) { 194 kfree(buf); 195 kfree(*data); 196 } else { 197 kunit_add_action(test, regmap_exit_action, ret); 198 } 199 200 return ret; 201 } 202 203 static bool reg_5_false(struct device *dev, unsigned int reg) 204 { 205 struct kunit *test = dev_get_drvdata(dev); 206 const struct regmap_test_param *param = test->param_value; 207 208 return reg != (param->from_reg + 5); 209 } 210 211 static void basic_read_write(struct kunit *test) 212 { 213 struct regmap *map; 214 struct regmap_config config; 215 struct regmap_ram_data *data; 216 unsigned int val, rval; 217 218 config = test_regmap_config; 219 220 map = gen_regmap(test, &config, &data); 221 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 222 if (IS_ERR(map)) 223 return; 224 225 get_random_bytes(&val, sizeof(val)); 226 227 /* If we write a value to a register we can read it back */ 228 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 229 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 230 KUNIT_EXPECT_EQ(test, val, rval); 231 232 /* If using a cache the cache satisfied the read */ 233 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]); 234 } 235 236 static void bulk_write(struct kunit *test) 237 { 238 struct regmap *map; 239 struct regmap_config config; 240 struct regmap_ram_data *data; 241 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 242 int i; 243 244 config = test_regmap_config; 245 246 map = gen_regmap(test, &config, &data); 247 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 248 if (IS_ERR(map)) 249 return; 250 251 get_random_bytes(&val, sizeof(val)); 252 253 /* 254 * Data written via the bulk API can be read back with single 255 * reads. 256 */ 257 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, 258 BLOCK_TEST_SIZE)); 259 for (i = 0; i < BLOCK_TEST_SIZE; i++) 260 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i])); 261 262 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 263 264 /* If using a cache the cache satisfied the read */ 265 for (i = 0; i < BLOCK_TEST_SIZE; i++) 266 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 267 } 268 269 static void bulk_read(struct kunit *test) 270 { 271 struct regmap *map; 272 struct regmap_config config; 273 struct regmap_ram_data *data; 274 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE]; 275 int i; 276 277 config = test_regmap_config; 278 279 map = gen_regmap(test, &config, &data); 280 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 281 if (IS_ERR(map)) 282 return; 283 284 get_random_bytes(&val, sizeof(val)); 285 286 /* Data written as single writes can be read via the bulk API */ 287 for (i = 0; i < BLOCK_TEST_SIZE; i++) 288 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i])); 289 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 290 BLOCK_TEST_SIZE)); 291 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val)); 292 293 /* If using a cache the cache satisfied the read */ 294 for (i = 0; i < BLOCK_TEST_SIZE; i++) 295 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 296 } 297 298 static void read_bypassed(struct kunit *test) 299 { 300 const struct regmap_test_param *param = test->param_value; 301 struct regmap *map; 302 struct regmap_config config; 303 struct regmap_ram_data *data; 304 unsigned int val[BLOCK_TEST_SIZE], rval; 305 int i; 306 307 config = test_regmap_config; 308 309 map = gen_regmap(test, &config, &data); 310 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 311 if (IS_ERR(map)) 312 return; 313 314 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 315 316 get_random_bytes(&val, sizeof(val)); 317 318 /* Write some test values */ 319 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 320 321 regcache_cache_only(map, true); 322 323 /* 324 * While in cache-only regmap_read_bypassed() should return the register 325 * value and leave the map in cache-only. 326 */ 327 for (i = 0; i < ARRAY_SIZE(val); i++) { 328 /* Put inverted bits in rval to prove we really read the value */ 329 rval = ~val[i]; 330 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 331 KUNIT_EXPECT_EQ(test, val[i], rval); 332 333 rval = ~val[i]; 334 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 335 KUNIT_EXPECT_EQ(test, val[i], rval); 336 KUNIT_EXPECT_TRUE(test, map->cache_only); 337 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 338 } 339 340 /* 341 * Change the underlying register values to prove it is returning 342 * real values not cached values. 343 */ 344 for (i = 0; i < ARRAY_SIZE(val); i++) { 345 val[i] = ~val[i]; 346 data->vals[param->from_reg + i] = val[i]; 347 } 348 349 for (i = 0; i < ARRAY_SIZE(val); i++) { 350 rval = ~val[i]; 351 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 352 KUNIT_EXPECT_NE(test, val[i], rval); 353 354 rval = ~val[i]; 355 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 356 KUNIT_EXPECT_EQ(test, val[i], rval); 357 KUNIT_EXPECT_TRUE(test, map->cache_only); 358 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 359 } 360 } 361 362 static void read_bypassed_volatile(struct kunit *test) 363 { 364 const struct regmap_test_param *param = test->param_value; 365 struct regmap *map; 366 struct regmap_config config; 367 struct regmap_ram_data *data; 368 unsigned int val[BLOCK_TEST_SIZE], rval; 369 int i; 370 371 config = test_regmap_config; 372 /* All registers except #5 volatile */ 373 config.volatile_reg = reg_5_false; 374 375 map = gen_regmap(test, &config, &data); 376 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 377 if (IS_ERR(map)) 378 return; 379 380 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 381 382 get_random_bytes(&val, sizeof(val)); 383 384 /* Write some test values */ 385 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 386 387 regcache_cache_only(map, true); 388 389 /* 390 * While in cache-only regmap_read_bypassed() should return the register 391 * value and leave the map in cache-only. 392 */ 393 for (i = 0; i < ARRAY_SIZE(val); i++) { 394 /* Register #5 is non-volatile so should read from cache */ 395 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY, 396 regmap_read(map, param->from_reg + i, &rval)); 397 398 /* Put inverted bits in rval to prove we really read the value */ 399 rval = ~val[i]; 400 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 401 KUNIT_EXPECT_EQ(test, val[i], rval); 402 KUNIT_EXPECT_TRUE(test, map->cache_only); 403 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 404 } 405 406 /* 407 * Change the underlying register values to prove it is returning 408 * real values not cached values. 409 */ 410 for (i = 0; i < ARRAY_SIZE(val); i++) { 411 val[i] = ~val[i]; 412 data->vals[param->from_reg + i] = val[i]; 413 } 414 415 for (i = 0; i < ARRAY_SIZE(val); i++) { 416 if (i == 5) 417 continue; 418 419 rval = ~val[i]; 420 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 421 KUNIT_EXPECT_EQ(test, val[i], rval); 422 KUNIT_EXPECT_TRUE(test, map->cache_only); 423 KUNIT_EXPECT_FALSE(test, map->cache_bypass); 424 } 425 } 426 427 static void write_readonly(struct kunit *test) 428 { 429 struct regmap *map; 430 struct regmap_config config; 431 struct regmap_ram_data *data; 432 unsigned int val; 433 int i; 434 435 config = test_regmap_config; 436 config.num_reg_defaults = BLOCK_TEST_SIZE; 437 config.writeable_reg = reg_5_false; 438 439 map = gen_regmap(test, &config, &data); 440 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 441 if (IS_ERR(map)) 442 return; 443 444 get_random_bytes(&val, sizeof(val)); 445 446 for (i = 0; i < BLOCK_TEST_SIZE; i++) 447 data->written[i] = false; 448 449 /* Change the value of all registers, readonly should fail */ 450 for (i = 0; i < BLOCK_TEST_SIZE; i++) 451 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0); 452 453 /* Did that match what we see on the device? */ 454 for (i = 0; i < BLOCK_TEST_SIZE; i++) 455 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); 456 } 457 458 static void read_writeonly(struct kunit *test) 459 { 460 struct regmap *map; 461 struct regmap_config config; 462 struct regmap_ram_data *data; 463 unsigned int val; 464 int i; 465 466 config = test_regmap_config; 467 config.readable_reg = reg_5_false; 468 469 map = gen_regmap(test, &config, &data); 470 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 471 if (IS_ERR(map)) 472 return; 473 474 for (i = 0; i < BLOCK_TEST_SIZE; i++) 475 data->read[i] = false; 476 477 /* 478 * Try to read all the registers, the writeonly one should 479 * fail if we aren't using the flat cache. 480 */ 481 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 482 if (config.cache_type != REGCACHE_FLAT) { 483 KUNIT_EXPECT_EQ(test, i != 5, 484 regmap_read(map, i, &val) == 0); 485 } else { 486 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); 487 } 488 } 489 490 /* Did we trigger a hardware access? */ 491 KUNIT_EXPECT_FALSE(test, data->read[5]); 492 } 493 494 static void reg_defaults(struct kunit *test) 495 { 496 struct regmap *map; 497 struct regmap_config config; 498 struct regmap_ram_data *data; 499 unsigned int rval[BLOCK_TEST_SIZE]; 500 int i; 501 502 config = test_regmap_config; 503 config.num_reg_defaults = BLOCK_TEST_SIZE; 504 505 map = gen_regmap(test, &config, &data); 506 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 507 if (IS_ERR(map)) 508 return; 509 510 /* Read back the expected default data */ 511 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 512 BLOCK_TEST_SIZE)); 513 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 514 515 /* The data should have been read from cache if there was one */ 516 for (i = 0; i < BLOCK_TEST_SIZE; i++) 517 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 518 } 519 520 static void reg_defaults_read_dev(struct kunit *test) 521 { 522 struct regmap *map; 523 struct regmap_config config; 524 struct regmap_ram_data *data; 525 unsigned int rval[BLOCK_TEST_SIZE]; 526 int i; 527 528 config = test_regmap_config; 529 config.num_reg_defaults_raw = BLOCK_TEST_SIZE; 530 531 map = gen_regmap(test, &config, &data); 532 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 533 if (IS_ERR(map)) 534 return; 535 536 /* We should have read the cache defaults back from the map */ 537 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 538 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]); 539 data->read[i] = false; 540 } 541 542 /* Read back the expected default data */ 543 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 544 BLOCK_TEST_SIZE)); 545 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 546 547 /* The data should have been read from cache if there was one */ 548 for (i = 0; i < BLOCK_TEST_SIZE; i++) 549 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 550 } 551 552 static void register_patch(struct kunit *test) 553 { 554 struct regmap *map; 555 struct regmap_config config; 556 struct regmap_ram_data *data; 557 struct reg_sequence patch[2]; 558 unsigned int rval[BLOCK_TEST_SIZE]; 559 int i; 560 561 /* We need defaults so readback works */ 562 config = test_regmap_config; 563 config.num_reg_defaults = BLOCK_TEST_SIZE; 564 565 map = gen_regmap(test, &config, &data); 566 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 567 if (IS_ERR(map)) 568 return; 569 570 /* Stash the original values */ 571 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 572 BLOCK_TEST_SIZE)); 573 574 /* Patch a couple of values */ 575 patch[0].reg = 2; 576 patch[0].def = rval[2] + 1; 577 patch[0].delay_us = 0; 578 patch[1].reg = 5; 579 patch[1].def = rval[5] + 1; 580 patch[1].delay_us = 0; 581 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, 582 ARRAY_SIZE(patch))); 583 584 /* Only the patched registers are written */ 585 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 586 switch (i) { 587 case 2: 588 case 5: 589 KUNIT_EXPECT_TRUE(test, data->written[i]); 590 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); 591 break; 592 default: 593 KUNIT_EXPECT_FALSE(test, data->written[i]); 594 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); 595 break; 596 } 597 } 598 } 599 600 static void stride(struct kunit *test) 601 { 602 struct regmap *map; 603 struct regmap_config config; 604 struct regmap_ram_data *data; 605 unsigned int rval; 606 int i; 607 608 config = test_regmap_config; 609 config.reg_stride = 2; 610 config.num_reg_defaults = BLOCK_TEST_SIZE / 2; 611 612 /* 613 * Allow one extra register so that the read/written arrays 614 * are sized big enough to include an entry for the odd 615 * address past the final reg_default register. 616 */ 617 config.max_register = BLOCK_TEST_SIZE; 618 619 map = gen_regmap(test, &config, &data); 620 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 621 if (IS_ERR(map)) 622 return; 623 624 /* Only even addresses can be accessed, try both read and write */ 625 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 626 data->read[i] = false; 627 data->written[i] = false; 628 629 if (i % 2) { 630 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval)); 631 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval)); 632 KUNIT_EXPECT_FALSE(test, data->read[i]); 633 KUNIT_EXPECT_FALSE(test, data->written[i]); 634 } else { 635 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 636 KUNIT_EXPECT_EQ(test, data->vals[i], rval); 637 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, 638 data->read[i]); 639 640 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval)); 641 KUNIT_EXPECT_TRUE(test, data->written[i]); 642 } 643 } 644 } 645 646 static struct regmap_range_cfg test_range = { 647 .selector_reg = 1, 648 .selector_mask = 0xff, 649 650 .window_start = 4, 651 .window_len = 10, 652 653 .range_min = 20, 654 .range_max = 40, 655 }; 656 657 static bool test_range_window_volatile(struct device *dev, unsigned int reg) 658 { 659 if (reg >= test_range.window_start && 660 reg <= test_range.window_start + test_range.window_len) 661 return true; 662 663 return false; 664 } 665 666 static bool test_range_all_volatile(struct device *dev, unsigned int reg) 667 { 668 if (test_range_window_volatile(dev, reg)) 669 return true; 670 671 if (reg >= test_range.range_min && reg <= test_range.range_max) 672 return true; 673 674 return false; 675 } 676 677 static void basic_ranges(struct kunit *test) 678 { 679 struct regmap *map; 680 struct regmap_config config; 681 struct regmap_ram_data *data; 682 unsigned int val; 683 int i; 684 685 config = test_regmap_config; 686 config.volatile_reg = test_range_all_volatile; 687 config.ranges = &test_range; 688 config.num_ranges = 1; 689 config.max_register = test_range.range_max; 690 691 map = gen_regmap(test, &config, &data); 692 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 693 if (IS_ERR(map)) 694 return; 695 696 for (i = test_range.range_min; i < test_range.range_max; i++) { 697 data->read[i] = false; 698 data->written[i] = false; 699 } 700 701 /* Reset the page to a non-zero value to trigger a change */ 702 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, 703 test_range.range_max)); 704 705 /* Check we set the page and use the window for writes */ 706 data->written[test_range.selector_reg] = false; 707 data->written[test_range.window_start] = false; 708 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 709 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 710 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 711 712 data->written[test_range.selector_reg] = false; 713 data->written[test_range.window_start] = false; 714 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 715 test_range.range_min + 716 test_range.window_len, 717 0)); 718 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 719 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 720 721 /* Same for reads */ 722 data->written[test_range.selector_reg] = false; 723 data->read[test_range.window_start] = false; 724 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); 725 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 726 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 727 728 data->written[test_range.selector_reg] = false; 729 data->read[test_range.window_start] = false; 730 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 731 test_range.range_min + 732 test_range.window_len, 733 &val)); 734 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 735 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 736 737 /* No physical access triggered in the virtual range */ 738 for (i = test_range.range_min; i < test_range.range_max; i++) { 739 KUNIT_EXPECT_FALSE(test, data->read[i]); 740 KUNIT_EXPECT_FALSE(test, data->written[i]); 741 } 742 } 743 744 /* Try to stress dynamic creation of cache data structures */ 745 static void stress_insert(struct kunit *test) 746 { 747 struct regmap *map; 748 struct regmap_config config; 749 struct regmap_ram_data *data; 750 unsigned int rval, *vals; 751 size_t buf_sz; 752 int i; 753 754 config = test_regmap_config; 755 config.max_register = 300; 756 757 map = gen_regmap(test, &config, &data); 758 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 759 if (IS_ERR(map)) 760 return; 761 762 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register, 763 GFP_KERNEL); 764 KUNIT_ASSERT_FALSE(test, vals == NULL); 765 buf_sz = sizeof(unsigned long) * config.max_register; 766 767 get_random_bytes(vals, buf_sz); 768 769 /* Write data into the map/cache in ever decreasing strides */ 770 for (i = 0; i < config.max_register; i += 100) 771 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 772 for (i = 0; i < config.max_register; i += 50) 773 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 774 for (i = 0; i < config.max_register; i += 25) 775 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 776 for (i = 0; i < config.max_register; i += 10) 777 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 778 for (i = 0; i < config.max_register; i += 5) 779 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 780 for (i = 0; i < config.max_register; i += 3) 781 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 782 for (i = 0; i < config.max_register; i += 2) 783 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 784 for (i = 0; i < config.max_register; i++) 785 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i])); 786 787 /* Do reads from the cache (if there is one) match? */ 788 for (i = 0; i < config.max_register; i ++) { 789 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 790 KUNIT_EXPECT_EQ(test, rval, vals[i]); 791 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 792 } 793 } 794 795 static void cache_bypass(struct kunit *test) 796 { 797 const struct regmap_test_param *param = test->param_value; 798 struct regmap *map; 799 struct regmap_config config; 800 struct regmap_ram_data *data; 801 unsigned int val, rval; 802 803 config = test_regmap_config; 804 805 map = gen_regmap(test, &config, &data); 806 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 807 if (IS_ERR(map)) 808 return; 809 810 get_random_bytes(&val, sizeof(val)); 811 812 /* Ensure the cache has a value in it */ 813 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val)); 814 815 /* Bypass then write a different value */ 816 regcache_cache_bypass(map, true); 817 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1)); 818 819 /* Read the bypassed value */ 820 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 821 KUNIT_EXPECT_EQ(test, val + 1, rval); 822 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval); 823 824 /* Disable bypass, the cache should still return the original value */ 825 regcache_cache_bypass(map, false); 826 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 827 KUNIT_EXPECT_EQ(test, val, rval); 828 } 829 830 static void cache_sync_marked_dirty(struct kunit *test) 831 { 832 const struct regmap_test_param *param = test->param_value; 833 struct regmap *map; 834 struct regmap_config config; 835 struct regmap_ram_data *data; 836 unsigned int val[BLOCK_TEST_SIZE]; 837 int i; 838 839 config = test_regmap_config; 840 841 map = gen_regmap(test, &config, &data); 842 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 843 if (IS_ERR(map)) 844 return; 845 846 get_random_bytes(&val, sizeof(val)); 847 848 /* Put some data into the cache */ 849 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 850 BLOCK_TEST_SIZE)); 851 for (i = 0; i < BLOCK_TEST_SIZE; i++) 852 data->written[param->from_reg + i] = false; 853 854 /* Trash the data on the device itself then resync */ 855 regcache_mark_dirty(map); 856 memset(data->vals, 0, sizeof(val)); 857 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 858 859 /* Did we just write the correct data out? */ 860 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 861 for (i = 0; i < BLOCK_TEST_SIZE; i++) 862 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 863 } 864 865 static void cache_sync_after_cache_only(struct kunit *test) 866 { 867 const struct regmap_test_param *param = test->param_value; 868 struct regmap *map; 869 struct regmap_config config; 870 struct regmap_ram_data *data; 871 unsigned int val[BLOCK_TEST_SIZE]; 872 unsigned int val_mask; 873 int i; 874 875 config = test_regmap_config; 876 877 map = gen_regmap(test, &config, &data); 878 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 879 if (IS_ERR(map)) 880 return; 881 882 val_mask = GENMASK(config.val_bits - 1, 0); 883 get_random_bytes(&val, sizeof(val)); 884 885 /* Put some data into the cache */ 886 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 887 BLOCK_TEST_SIZE)); 888 for (i = 0; i < BLOCK_TEST_SIZE; i++) 889 data->written[param->from_reg + i] = false; 890 891 /* Set cache-only and change the values */ 892 regcache_cache_only(map, true); 893 for (i = 0; i < ARRAY_SIZE(val); ++i) 894 val[i] = ~val[i] & val_mask; 895 896 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 897 BLOCK_TEST_SIZE)); 898 for (i = 0; i < BLOCK_TEST_SIZE; i++) 899 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 900 901 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 902 903 /* Exit cache-only and sync the cache without marking hardware registers dirty */ 904 regcache_cache_only(map, false); 905 906 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 907 908 /* Did we just write the correct data out? */ 909 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 910 for (i = 0; i < BLOCK_TEST_SIZE; i++) 911 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]); 912 } 913 914 static void cache_sync_defaults_marked_dirty(struct kunit *test) 915 { 916 const struct regmap_test_param *param = test->param_value; 917 struct regmap *map; 918 struct regmap_config config; 919 struct regmap_ram_data *data; 920 unsigned int val; 921 int i; 922 923 config = test_regmap_config; 924 config.num_reg_defaults = BLOCK_TEST_SIZE; 925 926 map = gen_regmap(test, &config, &data); 927 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 928 if (IS_ERR(map)) 929 return; 930 931 get_random_bytes(&val, sizeof(val)); 932 933 /* Change the value of one register */ 934 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val)); 935 936 /* Resync */ 937 regcache_mark_dirty(map); 938 for (i = 0; i < BLOCK_TEST_SIZE; i++) 939 data->written[param->from_reg + i] = false; 940 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 941 942 /* Did we just sync the one register we touched? */ 943 for (i = 0; i < BLOCK_TEST_SIZE; i++) 944 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]); 945 946 /* Rewrite registers back to their defaults */ 947 for (i = 0; i < config.num_reg_defaults; ++i) 948 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg, 949 config.reg_defaults[i].def)); 950 951 /* 952 * Resync after regcache_mark_dirty() should not write out registers 953 * that are at default value 954 */ 955 for (i = 0; i < BLOCK_TEST_SIZE; i++) 956 data->written[param->from_reg + i] = false; 957 regcache_mark_dirty(map); 958 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 959 for (i = 0; i < BLOCK_TEST_SIZE; i++) 960 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 961 } 962 963 static void cache_sync_default_after_cache_only(struct kunit *test) 964 { 965 const struct regmap_test_param *param = test->param_value; 966 struct regmap *map; 967 struct regmap_config config; 968 struct regmap_ram_data *data; 969 unsigned int orig_val; 970 int i; 971 972 config = test_regmap_config; 973 config.num_reg_defaults = BLOCK_TEST_SIZE; 974 975 map = gen_regmap(test, &config, &data); 976 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 977 if (IS_ERR(map)) 978 return; 979 980 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val)); 981 982 /* Enter cache-only and change the value of one register */ 983 regcache_cache_only(map, true); 984 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1)); 985 986 /* Exit cache-only and resync, should write out the changed register */ 987 regcache_cache_only(map, false); 988 for (i = 0; i < BLOCK_TEST_SIZE; i++) 989 data->written[param->from_reg + i] = false; 990 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 991 992 /* Was the register written out? */ 993 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 994 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1); 995 996 /* Enter cache-only and write register back to its default value */ 997 regcache_cache_only(map, true); 998 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val)); 999 1000 /* Resync should write out the new value */ 1001 regcache_cache_only(map, false); 1002 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1003 data->written[param->from_reg + i] = false; 1004 1005 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1006 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 1007 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val); 1008 } 1009 1010 static void cache_sync_readonly(struct kunit *test) 1011 { 1012 const struct regmap_test_param *param = test->param_value; 1013 struct regmap *map; 1014 struct regmap_config config; 1015 struct regmap_ram_data *data; 1016 unsigned int val; 1017 int i; 1018 1019 config = test_regmap_config; 1020 config.writeable_reg = reg_5_false; 1021 1022 map = gen_regmap(test, &config, &data); 1023 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1024 if (IS_ERR(map)) 1025 return; 1026 1027 /* Read all registers to fill the cache */ 1028 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1029 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1030 1031 /* Change the value of all registers, readonly should fail */ 1032 get_random_bytes(&val, sizeof(val)); 1033 regcache_cache_only(map, true); 1034 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1035 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0); 1036 regcache_cache_only(map, false); 1037 1038 /* Resync */ 1039 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1040 data->written[param->from_reg + i] = false; 1041 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1042 1043 /* Did that match what we see on the device? */ 1044 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1045 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]); 1046 } 1047 1048 static void cache_sync_patch(struct kunit *test) 1049 { 1050 const struct regmap_test_param *param = test->param_value; 1051 struct regmap *map; 1052 struct regmap_config config; 1053 struct regmap_ram_data *data; 1054 struct reg_sequence patch[2]; 1055 unsigned int rval[BLOCK_TEST_SIZE], val; 1056 int i; 1057 1058 /* We need defaults so readback works */ 1059 config = test_regmap_config; 1060 config.num_reg_defaults = BLOCK_TEST_SIZE; 1061 1062 map = gen_regmap(test, &config, &data); 1063 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1064 if (IS_ERR(map)) 1065 return; 1066 1067 /* Stash the original values */ 1068 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1069 BLOCK_TEST_SIZE)); 1070 1071 /* Patch a couple of values */ 1072 patch[0].reg = param->from_reg + 2; 1073 patch[0].def = rval[2] + 1; 1074 patch[0].delay_us = 0; 1075 patch[1].reg = param->from_reg + 5; 1076 patch[1].def = rval[5] + 1; 1077 patch[1].delay_us = 0; 1078 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, 1079 ARRAY_SIZE(patch))); 1080 1081 /* Sync the cache */ 1082 regcache_mark_dirty(map); 1083 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1084 data->written[param->from_reg + i] = false; 1085 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1086 1087 /* The patch should be on the device but not in the cache */ 1088 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1089 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1090 KUNIT_EXPECT_EQ(test, val, rval[i]); 1091 1092 switch (i) { 1093 case 2: 1094 case 5: 1095 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 1096 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1); 1097 break; 1098 default: 1099 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]); 1100 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]); 1101 break; 1102 } 1103 } 1104 } 1105 1106 static void cache_drop(struct kunit *test) 1107 { 1108 const struct regmap_test_param *param = test->param_value; 1109 struct regmap *map; 1110 struct regmap_config config; 1111 struct regmap_ram_data *data; 1112 unsigned int rval[BLOCK_TEST_SIZE]; 1113 int i; 1114 1115 config = test_regmap_config; 1116 config.num_reg_defaults = BLOCK_TEST_SIZE; 1117 1118 map = gen_regmap(test, &config, &data); 1119 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1120 if (IS_ERR(map)) 1121 return; 1122 1123 /* Ensure the data is read from the cache */ 1124 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1125 data->read[param->from_reg + i] = false; 1126 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1127 BLOCK_TEST_SIZE)); 1128 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1129 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]); 1130 data->read[param->from_reg + i] = false; 1131 } 1132 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1133 1134 /* Drop some registers */ 1135 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3, 1136 param->from_reg + 5)); 1137 1138 /* Reread and check only the dropped registers hit the device. */ 1139 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1140 BLOCK_TEST_SIZE)); 1141 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1142 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5); 1143 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1144 } 1145 1146 static void cache_drop_with_non_contiguous_ranges(struct kunit *test) 1147 { 1148 const struct regmap_test_param *param = test->param_value; 1149 struct regmap *map; 1150 struct regmap_config config; 1151 struct regmap_ram_data *data; 1152 unsigned int val[4][BLOCK_TEST_SIZE]; 1153 unsigned int reg; 1154 const int num_ranges = ARRAY_SIZE(val) * 2; 1155 int rangeidx, i; 1156 1157 static_assert(ARRAY_SIZE(val) == 4); 1158 1159 config = test_regmap_config; 1160 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE); 1161 1162 map = gen_regmap(test, &config, &data); 1163 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1164 if (IS_ERR(map)) 1165 return; 1166 1167 for (i = 0; i < config.max_register + 1; i++) 1168 data->written[i] = false; 1169 1170 /* Create non-contiguous cache blocks by writing every other range */ 1171 get_random_bytes(&val, sizeof(val)); 1172 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 1173 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1174 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg, 1175 &val[rangeidx / 2], 1176 BLOCK_TEST_SIZE)); 1177 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 1178 &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 1179 } 1180 1181 /* Check that odd ranges weren't written */ 1182 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 1183 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1184 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1185 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1186 } 1187 1188 /* Drop range 2 */ 1189 reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 1190 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1)); 1191 1192 /* Drop part of range 4 */ 1193 reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 1194 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5)); 1195 1196 /* Mark dirty and reset mock registers to 0 */ 1197 regcache_mark_dirty(map); 1198 for (i = 0; i < config.max_register + 1; i++) { 1199 data->vals[i] = 0; 1200 data->written[i] = false; 1201 } 1202 1203 /* The registers that were dropped from range 4 should now remain at 0 */ 1204 val[4 / 2][3] = 0; 1205 val[4 / 2][4] = 0; 1206 val[4 / 2][5] = 0; 1207 1208 /* Sync and check that the expected register ranges were written */ 1209 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1210 1211 /* Check that odd ranges weren't written */ 1212 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 1213 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1214 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1215 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1216 } 1217 1218 /* Check that even ranges (except 2 and 4) were written */ 1219 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 1220 if ((rangeidx == 2) || (rangeidx == 4)) 1221 continue; 1222 1223 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 1224 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1225 KUNIT_EXPECT_TRUE(test, data->written[reg + i]); 1226 1227 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 1228 &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 1229 } 1230 1231 /* Check that range 2 wasn't written */ 1232 reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 1233 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1234 KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 1235 1236 /* Check that range 4 was partially written */ 1237 reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 1238 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1239 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5); 1240 1241 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2])); 1242 1243 /* Nothing before param->from_reg should have been written */ 1244 for (i = 0; i < param->from_reg; i++) 1245 KUNIT_EXPECT_FALSE(test, data->written[i]); 1246 } 1247 1248 static void cache_drop_all_and_sync_marked_dirty(struct kunit *test) 1249 { 1250 const struct regmap_test_param *param = test->param_value; 1251 struct regmap *map; 1252 struct regmap_config config; 1253 struct regmap_ram_data *data; 1254 unsigned int rval[BLOCK_TEST_SIZE]; 1255 int i; 1256 1257 config = test_regmap_config; 1258 config.num_reg_defaults = BLOCK_TEST_SIZE; 1259 1260 map = gen_regmap(test, &config, &data); 1261 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1262 if (IS_ERR(map)) 1263 return; 1264 1265 /* Ensure the data is read from the cache */ 1266 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1267 data->read[param->from_reg + i] = false; 1268 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1269 BLOCK_TEST_SIZE)); 1270 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1271 1272 /* Change all values in cache from defaults */ 1273 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1274 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1275 1276 /* Drop all registers */ 1277 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1278 1279 /* Mark dirty and cache sync should not write anything. */ 1280 regcache_mark_dirty(map); 1281 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1282 data->written[param->from_reg + i] = false; 1283 1284 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1285 for (i = 0; i <= config.max_register; i++) 1286 KUNIT_EXPECT_FALSE(test, data->written[i]); 1287 } 1288 1289 static void cache_drop_all_and_sync_no_defaults(struct kunit *test) 1290 { 1291 const struct regmap_test_param *param = test->param_value; 1292 struct regmap *map; 1293 struct regmap_config config; 1294 struct regmap_ram_data *data; 1295 unsigned int rval[BLOCK_TEST_SIZE]; 1296 int i; 1297 1298 config = test_regmap_config; 1299 1300 map = gen_regmap(test, &config, &data); 1301 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1302 if (IS_ERR(map)) 1303 return; 1304 1305 /* Ensure the data is read from the cache */ 1306 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1307 data->read[param->from_reg + i] = false; 1308 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1309 BLOCK_TEST_SIZE)); 1310 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1311 1312 /* Change all values in cache */ 1313 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1314 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1315 1316 /* Drop all registers */ 1317 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1318 1319 /* 1320 * Sync cache without marking it dirty. All registers were dropped 1321 * so the cache should not have any entries to write out. 1322 */ 1323 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1324 data->written[param->from_reg + i] = false; 1325 1326 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1327 for (i = 0; i <= config.max_register; i++) 1328 KUNIT_EXPECT_FALSE(test, data->written[i]); 1329 } 1330 1331 static void cache_drop_all_and_sync_has_defaults(struct kunit *test) 1332 { 1333 const struct regmap_test_param *param = test->param_value; 1334 struct regmap *map; 1335 struct regmap_config config; 1336 struct regmap_ram_data *data; 1337 unsigned int rval[BLOCK_TEST_SIZE]; 1338 int i; 1339 1340 config = test_regmap_config; 1341 config.num_reg_defaults = BLOCK_TEST_SIZE; 1342 1343 map = gen_regmap(test, &config, &data); 1344 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1345 if (IS_ERR(map)) 1346 return; 1347 1348 /* Ensure the data is read from the cache */ 1349 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1350 data->read[param->from_reg + i] = false; 1351 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1352 BLOCK_TEST_SIZE)); 1353 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1354 1355 /* Change all values in cache from defaults */ 1356 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1357 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1358 1359 /* Drop all registers */ 1360 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1361 1362 /* 1363 * Sync cache without marking it dirty. All registers were dropped 1364 * so the cache should not have any entries to write out. 1365 */ 1366 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1367 data->written[param->from_reg + i] = false; 1368 1369 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1370 for (i = 0; i <= config.max_register; i++) 1371 KUNIT_EXPECT_FALSE(test, data->written[i]); 1372 } 1373 1374 static void cache_present(struct kunit *test) 1375 { 1376 const struct regmap_test_param *param = test->param_value; 1377 struct regmap *map; 1378 struct regmap_config config; 1379 struct regmap_ram_data *data; 1380 unsigned int val; 1381 int i; 1382 1383 config = test_regmap_config; 1384 1385 map = gen_regmap(test, &config, &data); 1386 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1387 if (IS_ERR(map)) 1388 return; 1389 1390 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1391 data->read[param->from_reg + i] = false; 1392 1393 /* No defaults so no registers cached. */ 1394 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1395 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i)); 1396 1397 /* We didn't trigger any reads */ 1398 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1399 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]); 1400 1401 /* Fill the cache */ 1402 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1403 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1404 1405 /* Now everything should be cached */ 1406 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1407 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i)); 1408 } 1409 1410 /* Check that caching the window register works with sync */ 1411 static void cache_range_window_reg(struct kunit *test) 1412 { 1413 struct regmap *map; 1414 struct regmap_config config; 1415 struct regmap_ram_data *data; 1416 unsigned int val; 1417 int i; 1418 1419 config = test_regmap_config; 1420 config.volatile_reg = test_range_window_volatile; 1421 config.ranges = &test_range; 1422 config.num_ranges = 1; 1423 config.max_register = test_range.range_max; 1424 1425 map = gen_regmap(test, &config, &data); 1426 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1427 if (IS_ERR(map)) 1428 return; 1429 1430 /* Write new values to the entire range */ 1431 for (i = test_range.range_min; i <= test_range.range_max; i++) 1432 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0)); 1433 1434 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1435 KUNIT_ASSERT_EQ(test, val, 2); 1436 1437 /* Write to the first register in the range to reset the page */ 1438 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1439 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1440 KUNIT_ASSERT_EQ(test, val, 0); 1441 1442 /* Trigger a cache sync */ 1443 regcache_mark_dirty(map); 1444 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); 1445 1446 /* Write to the first register again, the page should be reset */ 1447 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1448 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1449 KUNIT_ASSERT_EQ(test, val, 0); 1450 1451 /* Trigger another cache sync */ 1452 regcache_mark_dirty(map); 1453 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map)); 1454 1455 /* Write to the last register again, the page should be reset */ 1456 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0)); 1457 val = data->vals[test_range.selector_reg] & test_range.selector_mask; 1458 KUNIT_ASSERT_EQ(test, val, 2); 1459 } 1460 1461 static const struct regmap_test_param raw_types_list[] = { 1462 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1463 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG }, 1464 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 1465 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 1466 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1467 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 1468 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1469 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1470 }; 1471 1472 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc); 1473 1474 static const struct regmap_test_param raw_cache_types_list[] = { 1475 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 1476 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 1477 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1478 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 1479 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 1480 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1481 }; 1482 1483 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc); 1484 1485 static const struct regmap_config raw_regmap_config = { 1486 .max_register = BLOCK_TEST_SIZE, 1487 1488 .reg_format_endian = REGMAP_ENDIAN_LITTLE, 1489 .reg_bits = 16, 1490 .val_bits = 16, 1491 }; 1492 1493 static struct regmap *gen_raw_regmap(struct kunit *test, 1494 struct regmap_config *config, 1495 struct regmap_ram_data **data) 1496 { 1497 struct regmap_test_priv *priv = test->priv; 1498 const struct regmap_test_param *param = test->param_value; 1499 u16 *buf; 1500 struct regmap *ret; 1501 size_t size = (config->max_register + 1) * config->reg_bits / 8; 1502 int i; 1503 struct reg_default *defaults; 1504 1505 config->cache_type = param->cache; 1506 config->val_format_endian = param->val_endian; 1507 config->disable_locking = config->cache_type == REGCACHE_RBTREE || 1508 config->cache_type == REGCACHE_MAPLE; 1509 1510 buf = kmalloc(size, GFP_KERNEL); 1511 if (!buf) 1512 return ERR_PTR(-ENOMEM); 1513 1514 get_random_bytes(buf, size); 1515 1516 *data = kzalloc(sizeof(**data), GFP_KERNEL); 1517 if (!(*data)) 1518 return ERR_PTR(-ENOMEM); 1519 (*data)->vals = (void *)buf; 1520 1521 config->num_reg_defaults = config->max_register + 1; 1522 defaults = kcalloc(config->num_reg_defaults, 1523 sizeof(struct reg_default), 1524 GFP_KERNEL); 1525 if (!defaults) 1526 return ERR_PTR(-ENOMEM); 1527 config->reg_defaults = defaults; 1528 1529 for (i = 0; i < config->num_reg_defaults; i++) { 1530 defaults[i].reg = i; 1531 switch (param->val_endian) { 1532 case REGMAP_ENDIAN_LITTLE: 1533 defaults[i].def = le16_to_cpu(buf[i]); 1534 break; 1535 case REGMAP_ENDIAN_BIG: 1536 defaults[i].def = be16_to_cpu(buf[i]); 1537 break; 1538 default: 1539 return ERR_PTR(-EINVAL); 1540 } 1541 } 1542 1543 /* 1544 * We use the defaults in the tests but they don't make sense 1545 * to the core if there's no cache. 1546 */ 1547 if (config->cache_type == REGCACHE_NONE) 1548 config->num_reg_defaults = 0; 1549 1550 ret = regmap_init_raw_ram(priv->dev, config, *data); 1551 if (IS_ERR(ret)) { 1552 kfree(buf); 1553 kfree(*data); 1554 } else { 1555 kunit_add_action(test, regmap_exit_action, ret); 1556 } 1557 1558 return ret; 1559 } 1560 1561 static void raw_read_defaults_single(struct kunit *test) 1562 { 1563 struct regmap *map; 1564 struct regmap_config config; 1565 struct regmap_ram_data *data; 1566 unsigned int rval; 1567 int i; 1568 1569 config = raw_regmap_config; 1570 1571 map = gen_raw_regmap(test, &config, &data); 1572 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1573 if (IS_ERR(map)) 1574 return; 1575 1576 /* Check that we can read the defaults via the API */ 1577 for (i = 0; i < config.max_register + 1; i++) { 1578 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1579 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1580 } 1581 } 1582 1583 static void raw_read_defaults(struct kunit *test) 1584 { 1585 struct regmap *map; 1586 struct regmap_config config; 1587 struct regmap_ram_data *data; 1588 u16 *rval; 1589 u16 def; 1590 size_t val_len; 1591 int i; 1592 1593 config = raw_regmap_config; 1594 1595 map = gen_raw_regmap(test, &config, &data); 1596 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1597 if (IS_ERR(map)) 1598 return; 1599 1600 val_len = sizeof(*rval) * (config.max_register + 1); 1601 rval = kunit_kmalloc(test, val_len, GFP_KERNEL); 1602 KUNIT_ASSERT_TRUE(test, rval != NULL); 1603 if (!rval) 1604 return; 1605 1606 /* Check that we can read the defaults via the API */ 1607 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len)); 1608 for (i = 0; i < config.max_register + 1; i++) { 1609 def = config.reg_defaults[i].def; 1610 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1611 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i])); 1612 } else { 1613 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i])); 1614 } 1615 } 1616 } 1617 1618 static void raw_write_read_single(struct kunit *test) 1619 { 1620 struct regmap *map; 1621 struct regmap_config config; 1622 struct regmap_ram_data *data; 1623 u16 val; 1624 unsigned int rval; 1625 1626 config = raw_regmap_config; 1627 1628 map = gen_raw_regmap(test, &config, &data); 1629 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1630 if (IS_ERR(map)) 1631 return; 1632 1633 get_random_bytes(&val, sizeof(val)); 1634 1635 /* If we write a value to a register we can read it back */ 1636 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 1637 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 1638 KUNIT_EXPECT_EQ(test, val, rval); 1639 } 1640 1641 static void raw_write(struct kunit *test) 1642 { 1643 struct regmap *map; 1644 struct regmap_config config; 1645 struct regmap_ram_data *data; 1646 u16 *hw_buf; 1647 u16 val[2]; 1648 unsigned int rval; 1649 int i; 1650 1651 config = raw_regmap_config; 1652 1653 map = gen_raw_regmap(test, &config, &data); 1654 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1655 if (IS_ERR(map)) 1656 return; 1657 1658 hw_buf = (u16 *)data->vals; 1659 1660 get_random_bytes(&val, sizeof(val)); 1661 1662 /* Do a raw write */ 1663 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val))); 1664 1665 /* We should read back the new values, and defaults for the rest */ 1666 for (i = 0; i < config.max_register + 1; i++) { 1667 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1668 1669 switch (i) { 1670 case 2: 1671 case 3: 1672 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1673 KUNIT_EXPECT_EQ(test, rval, 1674 be16_to_cpu((__force __be16)val[i % 2])); 1675 } else { 1676 KUNIT_EXPECT_EQ(test, rval, 1677 le16_to_cpu((__force __le16)val[i % 2])); 1678 } 1679 break; 1680 default: 1681 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1682 break; 1683 } 1684 } 1685 1686 /* The values should appear in the "hardware" */ 1687 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); 1688 } 1689 1690 static bool reg_zero(struct device *dev, unsigned int reg) 1691 { 1692 return reg == 0; 1693 } 1694 1695 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg) 1696 { 1697 return reg == 0; 1698 } 1699 1700 static void raw_noinc_write(struct kunit *test) 1701 { 1702 struct regmap *map; 1703 struct regmap_config config; 1704 struct regmap_ram_data *data; 1705 unsigned int val; 1706 u16 val_test, val_last; 1707 u16 val_array[BLOCK_TEST_SIZE]; 1708 1709 config = raw_regmap_config; 1710 config.volatile_reg = reg_zero; 1711 config.writeable_noinc_reg = reg_zero; 1712 config.readable_noinc_reg = reg_zero; 1713 1714 map = gen_raw_regmap(test, &config, &data); 1715 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1716 if (IS_ERR(map)) 1717 return; 1718 1719 data->noinc_reg = ram_reg_zero; 1720 1721 get_random_bytes(&val_array, sizeof(val_array)); 1722 1723 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1724 val_test = be16_to_cpu(val_array[1]) + 100; 1725 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); 1726 } else { 1727 val_test = le16_to_cpu(val_array[1]) + 100; 1728 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]); 1729 } 1730 1731 /* Put some data into the register following the noinc register */ 1732 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test)); 1733 1734 /* Write some data to the noinc register */ 1735 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array, 1736 sizeof(val_array))); 1737 1738 /* We should read back the last value written */ 1739 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val)); 1740 KUNIT_ASSERT_EQ(test, val_last, val); 1741 1742 /* Make sure we didn't touch the register after the noinc register */ 1743 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val)); 1744 KUNIT_ASSERT_EQ(test, val_test, val); 1745 } 1746 1747 static void raw_sync(struct kunit *test) 1748 { 1749 struct regmap *map; 1750 struct regmap_config config; 1751 struct regmap_ram_data *data; 1752 u16 val[3]; 1753 u16 *hw_buf; 1754 unsigned int rval; 1755 int i; 1756 1757 config = raw_regmap_config; 1758 1759 map = gen_raw_regmap(test, &config, &data); 1760 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1761 if (IS_ERR(map)) 1762 return; 1763 1764 hw_buf = (u16 *)data->vals; 1765 1766 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val)); 1767 1768 /* Do a regular write and a raw write in cache only mode */ 1769 regcache_cache_only(map, true); 1770 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, 1771 sizeof(u16) * 2)); 1772 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2])); 1773 1774 /* We should read back the new values, and defaults for the rest */ 1775 for (i = 0; i < config.max_register + 1; i++) { 1776 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1777 1778 switch (i) { 1779 case 2: 1780 case 3: 1781 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1782 KUNIT_EXPECT_EQ(test, rval, 1783 be16_to_cpu((__force __be16)val[i - 2])); 1784 } else { 1785 KUNIT_EXPECT_EQ(test, rval, 1786 le16_to_cpu((__force __le16)val[i - 2])); 1787 } 1788 break; 1789 case 4: 1790 KUNIT_EXPECT_EQ(test, rval, val[i - 2]); 1791 break; 1792 default: 1793 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1794 break; 1795 } 1796 } 1797 1798 /* 1799 * The value written via _write() was translated by the core, 1800 * translate the original copy for comparison purposes. 1801 */ 1802 if (config.val_format_endian == REGMAP_ENDIAN_BIG) 1803 val[2] = cpu_to_be16(val[2]); 1804 else 1805 val[2] = cpu_to_le16(val[2]); 1806 1807 /* The values should not appear in the "hardware" */ 1808 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1809 1810 for (i = 0; i < config.max_register + 1; i++) 1811 data->written[i] = false; 1812 1813 /* Do the sync */ 1814 regcache_cache_only(map, false); 1815 regcache_mark_dirty(map); 1816 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1817 1818 /* The values should now appear in the "hardware" */ 1819 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1820 } 1821 1822 static void raw_ranges(struct kunit *test) 1823 { 1824 struct regmap *map; 1825 struct regmap_config config; 1826 struct regmap_ram_data *data; 1827 unsigned int val; 1828 int i; 1829 1830 config = raw_regmap_config; 1831 config.volatile_reg = test_range_all_volatile; 1832 config.ranges = &test_range; 1833 config.num_ranges = 1; 1834 config.max_register = test_range.range_max; 1835 1836 map = gen_raw_regmap(test, &config, &data); 1837 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1838 if (IS_ERR(map)) 1839 return; 1840 1841 /* Reset the page to a non-zero value to trigger a change */ 1842 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg, 1843 test_range.range_max)); 1844 1845 /* Check we set the page and use the window for writes */ 1846 data->written[test_range.selector_reg] = false; 1847 data->written[test_range.window_start] = false; 1848 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0)); 1849 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1850 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 1851 1852 data->written[test_range.selector_reg] = false; 1853 data->written[test_range.window_start] = false; 1854 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1855 test_range.range_min + 1856 test_range.window_len, 1857 0)); 1858 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1859 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]); 1860 1861 /* Same for reads */ 1862 data->written[test_range.selector_reg] = false; 1863 data->read[test_range.window_start] = false; 1864 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val)); 1865 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1866 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 1867 1868 data->written[test_range.selector_reg] = false; 1869 data->read[test_range.window_start] = false; 1870 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1871 test_range.range_min + 1872 test_range.window_len, 1873 &val)); 1874 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]); 1875 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]); 1876 1877 /* No physical access triggered in the virtual range */ 1878 for (i = test_range.range_min; i < test_range.range_max; i++) { 1879 KUNIT_EXPECT_FALSE(test, data->read[i]); 1880 KUNIT_EXPECT_FALSE(test, data->written[i]); 1881 } 1882 } 1883 1884 static struct kunit_case regmap_test_cases[] = { 1885 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), 1886 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params), 1887 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params), 1888 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), 1889 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), 1890 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params), 1891 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params), 1892 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params), 1893 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params), 1894 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params), 1895 KUNIT_CASE_PARAM(stride, regcache_types_gen_params), 1896 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params), 1897 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params), 1898 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), 1899 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params), 1900 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params), 1901 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params), 1902 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params), 1903 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params), 1904 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), 1905 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), 1906 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params), 1907 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params), 1908 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params), 1909 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params), 1910 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), 1911 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params), 1912 1913 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), 1914 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), 1915 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params), 1916 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params), 1917 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params), 1918 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params), 1919 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params), 1920 {} 1921 }; 1922 1923 static int regmap_test_init(struct kunit *test) 1924 { 1925 struct regmap_test_priv *priv; 1926 struct device *dev; 1927 1928 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); 1929 if (!priv) 1930 return -ENOMEM; 1931 1932 test->priv = priv; 1933 1934 dev = kunit_device_register(test, "regmap_test"); 1935 if (IS_ERR(dev)) 1936 return PTR_ERR(dev); 1937 1938 priv->dev = get_device(dev); 1939 dev_set_drvdata(dev, test); 1940 1941 return 0; 1942 } 1943 1944 static void regmap_test_exit(struct kunit *test) 1945 { 1946 struct regmap_test_priv *priv = test->priv; 1947 1948 /* Destroy the dummy struct device */ 1949 if (priv && priv->dev) 1950 put_device(priv->dev); 1951 } 1952 1953 static struct kunit_suite regmap_test_suite = { 1954 .name = "regmap", 1955 .init = regmap_test_init, 1956 .exit = regmap_test_exit, 1957 .test_cases = regmap_test_cases, 1958 }; 1959 kunit_test_suite(regmap_test_suite); 1960 1961 MODULE_LICENSE("GPL v2"); 1962