1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <linux/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register_is_set && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register_is_set && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register_is_set && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_12_20_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 out[0] = reg >> 4; 218 out[1] = (reg << 4) | (val >> 16); 219 out[2] = val >> 8; 220 out[3] = val; 221 } 222 223 224 static void regmap_format_2_6_write(struct regmap *map, 225 unsigned int reg, unsigned int val) 226 { 227 u8 *out = map->work_buf; 228 229 *out = (reg << 6) | val; 230 } 231 232 static void regmap_format_4_12_write(struct regmap *map, 233 unsigned int reg, unsigned int val) 234 { 235 __be16 *out = map->work_buf; 236 *out = cpu_to_be16((reg << 12) | val); 237 } 238 239 static void regmap_format_7_9_write(struct regmap *map, 240 unsigned int reg, unsigned int val) 241 { 242 __be16 *out = map->work_buf; 243 *out = cpu_to_be16((reg << 9) | val); 244 } 245 246 static void regmap_format_7_17_write(struct regmap *map, 247 unsigned int reg, unsigned int val) 248 { 249 u8 *out = map->work_buf; 250 251 out[2] = val; 252 out[1] = val >> 8; 253 out[0] = (val >> 16) | (reg << 1); 254 } 255 256 static void regmap_format_10_14_write(struct regmap *map, 257 unsigned int reg, unsigned int val) 258 { 259 u8 *out = map->work_buf; 260 261 out[2] = val; 262 out[1] = (val >> 8) | (reg << 6); 263 out[0] = reg >> 2; 264 } 265 266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 267 { 268 u8 *b = buf; 269 270 b[0] = val << shift; 271 } 272 273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 274 { 275 put_unaligned_be16(val << shift, buf); 276 } 277 278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 279 { 280 put_unaligned_le16(val << shift, buf); 281 } 282 283 static void regmap_format_16_native(void *buf, unsigned int val, 284 unsigned int shift) 285 { 286 u16 v = val << shift; 287 288 memcpy(buf, &v, sizeof(v)); 289 } 290 291 static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift) 292 { 293 put_unaligned_be24(val << shift, buf); 294 } 295 296 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 297 { 298 put_unaligned_be32(val << shift, buf); 299 } 300 301 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 302 { 303 put_unaligned_le32(val << shift, buf); 304 } 305 306 static void regmap_format_32_native(void *buf, unsigned int val, 307 unsigned int shift) 308 { 309 u32 v = val << shift; 310 311 memcpy(buf, &v, sizeof(v)); 312 } 313 314 static void regmap_parse_inplace_noop(void *buf) 315 { 316 } 317 318 static unsigned int regmap_parse_8(const void *buf) 319 { 320 const u8 *b = buf; 321 322 return b[0]; 323 } 324 325 static unsigned int regmap_parse_16_be(const void *buf) 326 { 327 return get_unaligned_be16(buf); 328 } 329 330 static unsigned int regmap_parse_16_le(const void *buf) 331 { 332 return get_unaligned_le16(buf); 333 } 334 335 static void regmap_parse_16_be_inplace(void *buf) 336 { 337 u16 v = get_unaligned_be16(buf); 338 339 memcpy(buf, &v, sizeof(v)); 340 } 341 342 static void regmap_parse_16_le_inplace(void *buf) 343 { 344 u16 v = get_unaligned_le16(buf); 345 346 memcpy(buf, &v, sizeof(v)); 347 } 348 349 static unsigned int regmap_parse_16_native(const void *buf) 350 { 351 u16 v; 352 353 memcpy(&v, buf, sizeof(v)); 354 return v; 355 } 356 357 static unsigned int regmap_parse_24_be(const void *buf) 358 { 359 return get_unaligned_be24(buf); 360 } 361 362 static unsigned int regmap_parse_32_be(const void *buf) 363 { 364 return get_unaligned_be32(buf); 365 } 366 367 static unsigned int regmap_parse_32_le(const void *buf) 368 { 369 return get_unaligned_le32(buf); 370 } 371 372 static void regmap_parse_32_be_inplace(void *buf) 373 { 374 u32 v = get_unaligned_be32(buf); 375 376 memcpy(buf, &v, sizeof(v)); 377 } 378 379 static void regmap_parse_32_le_inplace(void *buf) 380 { 381 u32 v = get_unaligned_le32(buf); 382 383 memcpy(buf, &v, sizeof(v)); 384 } 385 386 static unsigned int regmap_parse_32_native(const void *buf) 387 { 388 u32 v; 389 390 memcpy(&v, buf, sizeof(v)); 391 return v; 392 } 393 394 static void regmap_lock_hwlock(void *__map) 395 { 396 struct regmap *map = __map; 397 398 hwspin_lock_timeout(map->hwlock, UINT_MAX); 399 } 400 401 static void regmap_lock_hwlock_irq(void *__map) 402 { 403 struct regmap *map = __map; 404 405 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 406 } 407 408 static void regmap_lock_hwlock_irqsave(void *__map) 409 { 410 struct regmap *map = __map; 411 unsigned long flags = 0; 412 413 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 414 &flags); 415 map->spinlock_flags = flags; 416 } 417 418 static void regmap_unlock_hwlock(void *__map) 419 { 420 struct regmap *map = __map; 421 422 hwspin_unlock(map->hwlock); 423 } 424 425 static void regmap_unlock_hwlock_irq(void *__map) 426 { 427 struct regmap *map = __map; 428 429 hwspin_unlock_irq(map->hwlock); 430 } 431 432 static void regmap_unlock_hwlock_irqrestore(void *__map) 433 { 434 struct regmap *map = __map; 435 436 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 437 } 438 439 static void regmap_lock_unlock_none(void *__map) 440 { 441 442 } 443 444 static void regmap_lock_mutex(void *__map) 445 { 446 struct regmap *map = __map; 447 mutex_lock(&map->mutex); 448 } 449 450 static void regmap_unlock_mutex(void *__map) 451 { 452 struct regmap *map = __map; 453 mutex_unlock(&map->mutex); 454 } 455 456 static void regmap_lock_spinlock(void *__map) 457 __acquires(&map->spinlock) 458 { 459 struct regmap *map = __map; 460 unsigned long flags; 461 462 spin_lock_irqsave(&map->spinlock, flags); 463 map->spinlock_flags = flags; 464 } 465 466 static void regmap_unlock_spinlock(void *__map) 467 __releases(&map->spinlock) 468 { 469 struct regmap *map = __map; 470 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 471 } 472 473 static void regmap_lock_raw_spinlock(void *__map) 474 __acquires(&map->raw_spinlock) 475 { 476 struct regmap *map = __map; 477 unsigned long flags; 478 479 raw_spin_lock_irqsave(&map->raw_spinlock, flags); 480 map->raw_spinlock_flags = flags; 481 } 482 483 static void regmap_unlock_raw_spinlock(void *__map) 484 __releases(&map->raw_spinlock) 485 { 486 struct regmap *map = __map; 487 raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags); 488 } 489 490 static void dev_get_regmap_release(struct device *dev, void *res) 491 { 492 /* 493 * We don't actually have anything to do here; the goal here 494 * is not to manage the regmap but to provide a simple way to 495 * get the regmap back given a struct device. 496 */ 497 } 498 499 static bool _regmap_range_add(struct regmap *map, 500 struct regmap_range_node *data) 501 { 502 struct rb_root *root = &map->range_tree; 503 struct rb_node **new = &(root->rb_node), *parent = NULL; 504 505 while (*new) { 506 struct regmap_range_node *this = 507 rb_entry(*new, struct regmap_range_node, node); 508 509 parent = *new; 510 if (data->range_max < this->range_min) 511 new = &((*new)->rb_left); 512 else if (data->range_min > this->range_max) 513 new = &((*new)->rb_right); 514 else 515 return false; 516 } 517 518 rb_link_node(&data->node, parent, new); 519 rb_insert_color(&data->node, root); 520 521 return true; 522 } 523 524 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 525 unsigned int reg) 526 { 527 struct rb_node *node = map->range_tree.rb_node; 528 529 while (node) { 530 struct regmap_range_node *this = 531 rb_entry(node, struct regmap_range_node, node); 532 533 if (reg < this->range_min) 534 node = node->rb_left; 535 else if (reg > this->range_max) 536 node = node->rb_right; 537 else 538 return this; 539 } 540 541 return NULL; 542 } 543 544 static void regmap_range_exit(struct regmap *map) 545 { 546 struct rb_node *next; 547 struct regmap_range_node *range_node; 548 549 next = rb_first(&map->range_tree); 550 while (next) { 551 range_node = rb_entry(next, struct regmap_range_node, node); 552 next = rb_next(&range_node->node); 553 rb_erase(&range_node->node, &map->range_tree); 554 kfree(range_node); 555 } 556 557 kfree(map->selector_work_buf); 558 } 559 560 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 561 { 562 if (config->name) { 563 const char *name = kstrdup_const(config->name, GFP_KERNEL); 564 565 if (!name) 566 return -ENOMEM; 567 568 kfree_const(map->name); 569 map->name = name; 570 } 571 572 return 0; 573 } 574 575 int regmap_attach_dev(struct device *dev, struct regmap *map, 576 const struct regmap_config *config) 577 { 578 struct regmap **m; 579 int ret; 580 581 map->dev = dev; 582 583 ret = regmap_set_name(map, config); 584 if (ret) 585 return ret; 586 587 regmap_debugfs_exit(map); 588 regmap_debugfs_init(map); 589 590 /* Add a devres resource for dev_get_regmap() */ 591 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 592 if (!m) { 593 regmap_debugfs_exit(map); 594 return -ENOMEM; 595 } 596 *m = map; 597 devres_add(dev, m); 598 599 return 0; 600 } 601 EXPORT_SYMBOL_GPL(regmap_attach_dev); 602 603 static int dev_get_regmap_match(struct device *dev, void *res, void *data); 604 605 static int regmap_detach_dev(struct device *dev, struct regmap *map) 606 { 607 if (!dev) 608 return 0; 609 610 return devres_release(dev, dev_get_regmap_release, 611 dev_get_regmap_match, (void *)map->name); 612 } 613 614 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 615 const struct regmap_config *config) 616 { 617 enum regmap_endian endian; 618 619 /* Retrieve the endianness specification from the regmap config */ 620 endian = config->reg_format_endian; 621 622 /* If the regmap config specified a non-default value, use that */ 623 if (endian != REGMAP_ENDIAN_DEFAULT) 624 return endian; 625 626 /* Retrieve the endianness specification from the bus config */ 627 if (bus && bus->reg_format_endian_default) 628 endian = bus->reg_format_endian_default; 629 630 /* If the bus specified a non-default value, use that */ 631 if (endian != REGMAP_ENDIAN_DEFAULT) 632 return endian; 633 634 /* Use this if no other value was found */ 635 return REGMAP_ENDIAN_BIG; 636 } 637 638 enum regmap_endian regmap_get_val_endian(struct device *dev, 639 const struct regmap_bus *bus, 640 const struct regmap_config *config) 641 { 642 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 643 enum regmap_endian endian; 644 645 /* Retrieve the endianness specification from the regmap config */ 646 endian = config->val_format_endian; 647 648 /* If the regmap config specified a non-default value, use that */ 649 if (endian != REGMAP_ENDIAN_DEFAULT) 650 return endian; 651 652 /* If the firmware node exist try to get endianness from it */ 653 if (fwnode_property_read_bool(fwnode, "big-endian")) 654 endian = REGMAP_ENDIAN_BIG; 655 else if (fwnode_property_read_bool(fwnode, "little-endian")) 656 endian = REGMAP_ENDIAN_LITTLE; 657 else if (fwnode_property_read_bool(fwnode, "native-endian")) 658 endian = REGMAP_ENDIAN_NATIVE; 659 660 /* If the endianness was specified in fwnode, use that */ 661 if (endian != REGMAP_ENDIAN_DEFAULT) 662 return endian; 663 664 /* Retrieve the endianness specification from the bus config */ 665 if (bus && bus->val_format_endian_default) 666 endian = bus->val_format_endian_default; 667 668 /* If the bus specified a non-default value, use that */ 669 if (endian != REGMAP_ENDIAN_DEFAULT) 670 return endian; 671 672 /* Use this if no other value was found */ 673 return REGMAP_ENDIAN_BIG; 674 } 675 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 676 677 struct regmap *__regmap_init(struct device *dev, 678 const struct regmap_bus *bus, 679 void *bus_context, 680 const struct regmap_config *config, 681 struct lock_class_key *lock_key, 682 const char *lock_name) 683 { 684 struct regmap *map; 685 int ret = -EINVAL; 686 enum regmap_endian reg_endian, val_endian; 687 int i, j; 688 689 if (!config) 690 goto err; 691 692 map = kzalloc_obj(*map); 693 if (map == NULL) { 694 ret = -ENOMEM; 695 goto err; 696 } 697 698 ret = regmap_set_name(map, config); 699 if (ret) 700 goto err_map; 701 702 ret = -EINVAL; /* Later error paths rely on this */ 703 704 if (config->disable_locking) { 705 map->lock = map->unlock = regmap_lock_unlock_none; 706 map->can_sleep = config->can_sleep; 707 regmap_debugfs_disable(map); 708 } else if (config->lock && config->unlock) { 709 map->lock = config->lock; 710 map->unlock = config->unlock; 711 map->lock_arg = config->lock_arg; 712 map->can_sleep = config->can_sleep; 713 } else if (config->use_hwlock) { 714 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 715 if (!map->hwlock) { 716 ret = -ENXIO; 717 goto err_name; 718 } 719 720 switch (config->hwlock_mode) { 721 case HWLOCK_IRQSTATE: 722 map->lock = regmap_lock_hwlock_irqsave; 723 map->unlock = regmap_unlock_hwlock_irqrestore; 724 break; 725 case HWLOCK_IRQ: 726 map->lock = regmap_lock_hwlock_irq; 727 map->unlock = regmap_unlock_hwlock_irq; 728 break; 729 default: 730 map->lock = regmap_lock_hwlock; 731 map->unlock = regmap_unlock_hwlock; 732 break; 733 } 734 735 map->lock_arg = map; 736 } else { 737 if ((bus && bus->fast_io) || 738 config->fast_io) { 739 if (config->use_raw_spinlock) { 740 raw_spin_lock_init(&map->raw_spinlock); 741 map->lock = regmap_lock_raw_spinlock; 742 map->unlock = regmap_unlock_raw_spinlock; 743 lockdep_set_class_and_name(&map->raw_spinlock, 744 lock_key, lock_name); 745 } else { 746 spin_lock_init(&map->spinlock); 747 map->lock = regmap_lock_spinlock; 748 map->unlock = regmap_unlock_spinlock; 749 lockdep_set_class_and_name(&map->spinlock, 750 lock_key, lock_name); 751 } 752 } else { 753 mutex_init(&map->mutex); 754 map->lock = regmap_lock_mutex; 755 map->unlock = regmap_unlock_mutex; 756 map->can_sleep = true; 757 lockdep_set_class_and_name(&map->mutex, 758 lock_key, lock_name); 759 } 760 map->lock_arg = map; 761 map->lock_key = lock_key; 762 } 763 764 /* 765 * When we write in fast-paths with regmap_bulk_write() don't allocate 766 * scratch buffers with sleeping allocations. 767 */ 768 if ((bus && bus->fast_io) || config->fast_io) 769 map->alloc_flags = GFP_ATOMIC; 770 else 771 map->alloc_flags = GFP_KERNEL; 772 773 map->reg_base = config->reg_base; 774 map->reg_shift = config->pad_bits % 8; 775 776 map->format.pad_bytes = config->pad_bits / 8; 777 map->format.reg_shift = config->reg_shift; 778 map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits); 779 map->format.val_bytes = BITS_TO_BYTES(config->val_bits); 780 map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits); 781 if (config->reg_stride) 782 map->reg_stride = config->reg_stride; 783 else 784 map->reg_stride = 1; 785 if (is_power_of_2(map->reg_stride)) 786 map->reg_stride_order = ilog2(map->reg_stride); 787 else 788 map->reg_stride_order = -1; 789 map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read)); 790 map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write)); 791 map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write)); 792 if (bus) { 793 map->max_raw_read = bus->max_raw_read; 794 map->max_raw_write = bus->max_raw_write; 795 } else if (config->max_raw_read && config->max_raw_write) { 796 map->max_raw_read = config->max_raw_read; 797 map->max_raw_write = config->max_raw_write; 798 } 799 map->dev = dev; 800 map->bus = bus; 801 map->bus_context = bus_context; 802 map->max_register = config->max_register; 803 map->max_register_is_set = map->max_register ?: config->max_register_is_0; 804 map->wr_table = config->wr_table; 805 map->rd_table = config->rd_table; 806 map->volatile_table = config->volatile_table; 807 map->precious_table = config->precious_table; 808 map->wr_noinc_table = config->wr_noinc_table; 809 map->rd_noinc_table = config->rd_noinc_table; 810 map->writeable_reg = config->writeable_reg; 811 map->readable_reg = config->readable_reg; 812 map->volatile_reg = config->volatile_reg; 813 map->precious_reg = config->precious_reg; 814 map->writeable_noinc_reg = config->writeable_noinc_reg; 815 map->readable_noinc_reg = config->readable_noinc_reg; 816 map->reg_default_cb = config->reg_default_cb; 817 map->cache_type = config->cache_type; 818 819 spin_lock_init(&map->async_lock); 820 INIT_LIST_HEAD(&map->async_list); 821 INIT_LIST_HEAD(&map->async_free); 822 init_waitqueue_head(&map->async_waitq); 823 824 if (config->read_flag_mask || 825 config->write_flag_mask || 826 config->zero_flag_mask) { 827 map->read_flag_mask = config->read_flag_mask; 828 map->write_flag_mask = config->write_flag_mask; 829 } else if (bus) { 830 map->read_flag_mask = bus->read_flag_mask; 831 } 832 833 if (config->read && config->write) { 834 map->reg_read = _regmap_bus_read; 835 if (config->reg_update_bits) 836 map->reg_update_bits = config->reg_update_bits; 837 838 /* Bulk read/write */ 839 map->read = config->read; 840 map->write = config->write; 841 842 reg_endian = REGMAP_ENDIAN_NATIVE; 843 val_endian = REGMAP_ENDIAN_NATIVE; 844 } else if (!bus) { 845 map->reg_read = config->reg_read; 846 map->reg_write = config->reg_write; 847 map->reg_update_bits = config->reg_update_bits; 848 849 map->defer_caching = false; 850 goto skip_format_initialization; 851 } else if (!bus->read || !bus->write) { 852 map->reg_read = _regmap_bus_reg_read; 853 map->reg_write = _regmap_bus_reg_write; 854 map->reg_update_bits = bus->reg_update_bits; 855 856 map->defer_caching = false; 857 goto skip_format_initialization; 858 } else { 859 map->reg_read = _regmap_bus_read; 860 map->reg_update_bits = bus->reg_update_bits; 861 /* Bulk read/write */ 862 map->read = bus->read; 863 map->write = bus->write; 864 865 reg_endian = regmap_get_reg_endian(bus, config); 866 val_endian = regmap_get_val_endian(dev, bus, config); 867 } 868 869 switch (config->reg_bits + map->reg_shift) { 870 case 2: 871 switch (config->val_bits) { 872 case 6: 873 map->format.format_write = regmap_format_2_6_write; 874 break; 875 default: 876 goto err_hwlock; 877 } 878 break; 879 880 case 4: 881 switch (config->val_bits) { 882 case 12: 883 map->format.format_write = regmap_format_4_12_write; 884 break; 885 default: 886 goto err_hwlock; 887 } 888 break; 889 890 case 7: 891 switch (config->val_bits) { 892 case 9: 893 map->format.format_write = regmap_format_7_9_write; 894 break; 895 case 17: 896 map->format.format_write = regmap_format_7_17_write; 897 break; 898 default: 899 goto err_hwlock; 900 } 901 break; 902 903 case 10: 904 switch (config->val_bits) { 905 case 14: 906 map->format.format_write = regmap_format_10_14_write; 907 break; 908 default: 909 goto err_hwlock; 910 } 911 break; 912 913 case 12: 914 switch (config->val_bits) { 915 case 20: 916 map->format.format_write = regmap_format_12_20_write; 917 break; 918 default: 919 goto err_hwlock; 920 } 921 break; 922 923 case 8: 924 map->format.format_reg = regmap_format_8; 925 break; 926 927 case 16: 928 switch (reg_endian) { 929 case REGMAP_ENDIAN_BIG: 930 map->format.format_reg = regmap_format_16_be; 931 break; 932 case REGMAP_ENDIAN_LITTLE: 933 map->format.format_reg = regmap_format_16_le; 934 break; 935 case REGMAP_ENDIAN_NATIVE: 936 map->format.format_reg = regmap_format_16_native; 937 break; 938 default: 939 goto err_hwlock; 940 } 941 break; 942 943 case 24: 944 switch (reg_endian) { 945 case REGMAP_ENDIAN_BIG: 946 map->format.format_reg = regmap_format_24_be; 947 break; 948 default: 949 goto err_hwlock; 950 } 951 break; 952 953 case 32: 954 switch (reg_endian) { 955 case REGMAP_ENDIAN_BIG: 956 map->format.format_reg = regmap_format_32_be; 957 break; 958 case REGMAP_ENDIAN_LITTLE: 959 map->format.format_reg = regmap_format_32_le; 960 break; 961 case REGMAP_ENDIAN_NATIVE: 962 map->format.format_reg = regmap_format_32_native; 963 break; 964 default: 965 goto err_hwlock; 966 } 967 break; 968 969 default: 970 goto err_hwlock; 971 } 972 973 if (val_endian == REGMAP_ENDIAN_NATIVE) 974 map->format.parse_inplace = regmap_parse_inplace_noop; 975 976 switch (config->val_bits) { 977 case 8: 978 map->format.format_val = regmap_format_8; 979 map->format.parse_val = regmap_parse_8; 980 map->format.parse_inplace = regmap_parse_inplace_noop; 981 break; 982 case 16: 983 switch (val_endian) { 984 case REGMAP_ENDIAN_BIG: 985 map->format.format_val = regmap_format_16_be; 986 map->format.parse_val = regmap_parse_16_be; 987 map->format.parse_inplace = regmap_parse_16_be_inplace; 988 break; 989 case REGMAP_ENDIAN_LITTLE: 990 map->format.format_val = regmap_format_16_le; 991 map->format.parse_val = regmap_parse_16_le; 992 map->format.parse_inplace = regmap_parse_16_le_inplace; 993 break; 994 case REGMAP_ENDIAN_NATIVE: 995 map->format.format_val = regmap_format_16_native; 996 map->format.parse_val = regmap_parse_16_native; 997 break; 998 default: 999 goto err_hwlock; 1000 } 1001 break; 1002 case 24: 1003 switch (val_endian) { 1004 case REGMAP_ENDIAN_BIG: 1005 map->format.format_val = regmap_format_24_be; 1006 map->format.parse_val = regmap_parse_24_be; 1007 break; 1008 default: 1009 goto err_hwlock; 1010 } 1011 break; 1012 case 32: 1013 switch (val_endian) { 1014 case REGMAP_ENDIAN_BIG: 1015 map->format.format_val = regmap_format_32_be; 1016 map->format.parse_val = regmap_parse_32_be; 1017 map->format.parse_inplace = regmap_parse_32_be_inplace; 1018 break; 1019 case REGMAP_ENDIAN_LITTLE: 1020 map->format.format_val = regmap_format_32_le; 1021 map->format.parse_val = regmap_parse_32_le; 1022 map->format.parse_inplace = regmap_parse_32_le_inplace; 1023 break; 1024 case REGMAP_ENDIAN_NATIVE: 1025 map->format.format_val = regmap_format_32_native; 1026 map->format.parse_val = regmap_parse_32_native; 1027 break; 1028 default: 1029 goto err_hwlock; 1030 } 1031 break; 1032 } 1033 1034 if (map->format.format_write) { 1035 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1036 (val_endian != REGMAP_ENDIAN_BIG)) 1037 goto err_hwlock; 1038 map->use_single_write = true; 1039 } 1040 1041 if (!map->format.format_write && 1042 !(map->format.format_reg && map->format.format_val)) 1043 goto err_hwlock; 1044 1045 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1046 if (map->work_buf == NULL) { 1047 ret = -ENOMEM; 1048 goto err_hwlock; 1049 } 1050 1051 if (map->format.format_write) { 1052 map->defer_caching = false; 1053 map->reg_write = _regmap_bus_formatted_write; 1054 } else if (map->format.format_val) { 1055 map->defer_caching = true; 1056 map->reg_write = _regmap_bus_raw_write; 1057 } 1058 1059 skip_format_initialization: 1060 1061 map->range_tree = RB_ROOT; 1062 for (i = 0; i < config->num_ranges; i++) { 1063 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1064 struct regmap_range_node *new; 1065 1066 /* Sanity check */ 1067 if (range_cfg->range_max < range_cfg->range_min) { 1068 dev_err(map->dev, "Invalid range %d: %u < %u\n", i, 1069 range_cfg->range_max, range_cfg->range_min); 1070 goto err_range; 1071 } 1072 1073 if (range_cfg->range_max > map->max_register) { 1074 dev_err(map->dev, "Invalid range %d: %u > %u\n", i, 1075 range_cfg->range_max, map->max_register); 1076 goto err_range; 1077 } 1078 1079 if (range_cfg->selector_reg > map->max_register) { 1080 dev_err(map->dev, 1081 "Invalid range %d: selector out of map\n", i); 1082 goto err_range; 1083 } 1084 1085 if (range_cfg->window_len == 0) { 1086 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1087 i); 1088 goto err_range; 1089 } 1090 1091 /* Make sure, that this register range has no selector 1092 or data window within its boundary */ 1093 for (j = 0; j < config->num_ranges; j++) { 1094 unsigned int sel_reg = config->ranges[j].selector_reg; 1095 unsigned int win_min = config->ranges[j].window_start; 1096 unsigned int win_max = win_min + 1097 config->ranges[j].window_len - 1; 1098 1099 /* Allow data window inside its own virtual range */ 1100 if (j == i) 1101 continue; 1102 1103 if (range_cfg->range_min <= sel_reg && 1104 sel_reg <= range_cfg->range_max) { 1105 dev_err(map->dev, 1106 "Range %d: selector for %d in window\n", 1107 i, j); 1108 goto err_range; 1109 } 1110 1111 if (!(win_max < range_cfg->range_min || 1112 win_min > range_cfg->range_max)) { 1113 dev_err(map->dev, 1114 "Range %d: window for %d in window\n", 1115 i, j); 1116 goto err_range; 1117 } 1118 } 1119 1120 new = kzalloc_obj(*new); 1121 if (new == NULL) { 1122 ret = -ENOMEM; 1123 goto err_range; 1124 } 1125 1126 new->map = map; 1127 new->name = range_cfg->name; 1128 new->range_min = range_cfg->range_min; 1129 new->range_max = range_cfg->range_max; 1130 new->selector_reg = range_cfg->selector_reg; 1131 new->selector_mask = range_cfg->selector_mask; 1132 new->selector_shift = range_cfg->selector_shift; 1133 new->window_start = range_cfg->window_start; 1134 new->window_len = range_cfg->window_len; 1135 1136 if (!_regmap_range_add(map, new)) { 1137 dev_err(map->dev, "Failed to add range %d\n", i); 1138 kfree(new); 1139 goto err_range; 1140 } 1141 1142 if (map->selector_work_buf == NULL) { 1143 map->selector_work_buf = 1144 kzalloc(map->format.buf_size, GFP_KERNEL); 1145 if (map->selector_work_buf == NULL) { 1146 ret = -ENOMEM; 1147 goto err_range; 1148 } 1149 } 1150 } 1151 1152 ret = regcache_init(map, config); 1153 if (ret != 0) 1154 goto err_range; 1155 1156 if (dev) { 1157 ret = regmap_attach_dev(dev, map, config); 1158 if (ret != 0) 1159 goto err_regcache; 1160 } else { 1161 regmap_debugfs_init(map); 1162 } 1163 1164 return map; 1165 1166 err_regcache: 1167 regcache_exit(map); 1168 err_range: 1169 regmap_range_exit(map); 1170 kfree(map->work_buf); 1171 err_hwlock: 1172 if (map->hwlock) 1173 hwspin_lock_free(map->hwlock); 1174 err_name: 1175 kfree_const(map->name); 1176 err_map: 1177 kfree(map); 1178 err: 1179 if (bus && bus->free_on_exit) 1180 kfree(bus); 1181 return ERR_PTR(ret); 1182 } 1183 EXPORT_SYMBOL_GPL(__regmap_init); 1184 1185 static void devm_regmap_release(struct device *dev, void *res) 1186 { 1187 regmap_exit(*(struct regmap **)res); 1188 } 1189 1190 struct regmap *__devm_regmap_init(struct device *dev, 1191 const struct regmap_bus *bus, 1192 void *bus_context, 1193 const struct regmap_config *config, 1194 struct lock_class_key *lock_key, 1195 const char *lock_name) 1196 { 1197 struct regmap **ptr, *regmap; 1198 1199 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1200 if (!ptr) 1201 return ERR_PTR(-ENOMEM); 1202 1203 regmap = __regmap_init(dev, bus, bus_context, config, 1204 lock_key, lock_name); 1205 if (!IS_ERR(regmap)) { 1206 *ptr = regmap; 1207 devres_add(dev, ptr); 1208 } else { 1209 devres_free(ptr); 1210 } 1211 1212 return regmap; 1213 } 1214 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1215 1216 static void regmap_field_init(struct regmap_field *rm_field, 1217 struct regmap *regmap, struct reg_field reg_field) 1218 { 1219 rm_field->regmap = regmap; 1220 rm_field->reg = reg_field.reg; 1221 rm_field->shift = reg_field.lsb; 1222 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1223 1224 WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n"); 1225 1226 rm_field->id_size = reg_field.id_size; 1227 rm_field->id_offset = reg_field.id_offset; 1228 } 1229 1230 /** 1231 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1232 * 1233 * @dev: Device that will be interacted with 1234 * @regmap: regmap bank in which this register field is located. 1235 * @reg_field: Register field with in the bank. 1236 * 1237 * The return value will be an ERR_PTR() on error or a valid pointer 1238 * to a struct regmap_field. The regmap_field will be automatically freed 1239 * by the device management code. 1240 */ 1241 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1242 struct regmap *regmap, struct reg_field reg_field) 1243 { 1244 struct regmap_field *rm_field = devm_kzalloc(dev, 1245 sizeof(*rm_field), GFP_KERNEL); 1246 if (!rm_field) 1247 return ERR_PTR(-ENOMEM); 1248 1249 regmap_field_init(rm_field, regmap, reg_field); 1250 1251 return rm_field; 1252 1253 } 1254 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1255 1256 1257 /** 1258 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. 1259 * 1260 * @regmap: regmap bank in which this register field is located. 1261 * @rm_field: regmap register fields within the bank. 1262 * @reg_field: Register fields within the bank. 1263 * @num_fields: Number of register fields. 1264 * 1265 * The return value will be an -ENOMEM on error or zero for success. 1266 * Newly allocated regmap_fields should be freed by calling 1267 * regmap_field_bulk_free() 1268 */ 1269 int regmap_field_bulk_alloc(struct regmap *regmap, 1270 struct regmap_field **rm_field, 1271 const struct reg_field *reg_field, 1272 int num_fields) 1273 { 1274 struct regmap_field *rf; 1275 int i; 1276 1277 rf = kzalloc_objs(*rf, num_fields); 1278 if (!rf) 1279 return -ENOMEM; 1280 1281 for (i = 0; i < num_fields; i++) { 1282 regmap_field_init(&rf[i], regmap, reg_field[i]); 1283 rm_field[i] = &rf[i]; 1284 } 1285 1286 return 0; 1287 } 1288 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); 1289 1290 /** 1291 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register 1292 * fields. 1293 * 1294 * @dev: Device that will be interacted with 1295 * @regmap: regmap bank in which this register field is located. 1296 * @rm_field: regmap register fields within the bank. 1297 * @reg_field: Register fields within the bank. 1298 * @num_fields: Number of register fields. 1299 * 1300 * The return value will be an -ENOMEM on error or zero for success. 1301 * Newly allocated regmap_fields will be automatically freed by the 1302 * device management code. 1303 */ 1304 int devm_regmap_field_bulk_alloc(struct device *dev, 1305 struct regmap *regmap, 1306 struct regmap_field **rm_field, 1307 const struct reg_field *reg_field, 1308 int num_fields) 1309 { 1310 struct regmap_field *rf; 1311 int i; 1312 1313 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); 1314 if (!rf) 1315 return -ENOMEM; 1316 1317 for (i = 0; i < num_fields; i++) { 1318 regmap_field_init(&rf[i], regmap, reg_field[i]); 1319 rm_field[i] = &rf[i]; 1320 } 1321 1322 return 0; 1323 } 1324 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); 1325 1326 /** 1327 * regmap_field_bulk_free() - Free register field allocated using 1328 * regmap_field_bulk_alloc. 1329 * 1330 * @field: regmap fields which should be freed. 1331 */ 1332 void regmap_field_bulk_free(struct regmap_field *field) 1333 { 1334 kfree(field); 1335 } 1336 EXPORT_SYMBOL_GPL(regmap_field_bulk_free); 1337 1338 /** 1339 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using 1340 * devm_regmap_field_bulk_alloc. 1341 * 1342 * @dev: Device that will be interacted with 1343 * @field: regmap field which should be freed. 1344 * 1345 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually 1346 * drivers need not call this function, as the memory allocated via devm 1347 * will be freed as per device-driver life-cycle. 1348 */ 1349 void devm_regmap_field_bulk_free(struct device *dev, 1350 struct regmap_field *field) 1351 { 1352 devm_kfree(dev, field); 1353 } 1354 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); 1355 1356 /** 1357 * devm_regmap_field_free() - Free a register field allocated using 1358 * devm_regmap_field_alloc. 1359 * 1360 * @dev: Device that will be interacted with 1361 * @field: regmap field which should be freed. 1362 * 1363 * Free register field allocated using devm_regmap_field_alloc(). Usually 1364 * drivers need not call this function, as the memory allocated via devm 1365 * will be freed as per device-driver life-cyle. 1366 */ 1367 void devm_regmap_field_free(struct device *dev, 1368 struct regmap_field *field) 1369 { 1370 devm_kfree(dev, field); 1371 } 1372 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1373 1374 /** 1375 * regmap_field_alloc() - Allocate and initialise a register field. 1376 * 1377 * @regmap: regmap bank in which this register field is located. 1378 * @reg_field: Register field with in the bank. 1379 * 1380 * The return value will be an ERR_PTR() on error or a valid pointer 1381 * to a struct regmap_field. The regmap_field should be freed by the 1382 * user once its finished working with it using regmap_field_free(). 1383 */ 1384 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1385 struct reg_field reg_field) 1386 { 1387 struct regmap_field *rm_field = kzalloc_obj(*rm_field); 1388 1389 if (!rm_field) 1390 return ERR_PTR(-ENOMEM); 1391 1392 regmap_field_init(rm_field, regmap, reg_field); 1393 1394 return rm_field; 1395 } 1396 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1397 1398 /** 1399 * regmap_field_free() - Free register field allocated using 1400 * regmap_field_alloc. 1401 * 1402 * @field: regmap field which should be freed. 1403 */ 1404 void regmap_field_free(struct regmap_field *field) 1405 { 1406 kfree(field); 1407 } 1408 EXPORT_SYMBOL_GPL(regmap_field_free); 1409 1410 /** 1411 * regmap_reinit_cache() - Reinitialise the current register cache 1412 * 1413 * @map: Register map to operate on. 1414 * @config: New configuration. Only the cache data will be used. 1415 * 1416 * Discard any existing register cache for the map and initialize a 1417 * new cache. This can be used to restore the cache to defaults or to 1418 * update the cache configuration to reflect runtime discovery of the 1419 * hardware. 1420 * 1421 * No explicit locking is done here, the user needs to ensure that 1422 * this function will not race with other calls to regmap. 1423 */ 1424 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1425 { 1426 int ret; 1427 1428 regcache_exit(map); 1429 regmap_debugfs_exit(map); 1430 1431 map->max_register = config->max_register; 1432 map->max_register_is_set = map->max_register ?: config->max_register_is_0; 1433 map->writeable_reg = config->writeable_reg; 1434 map->readable_reg = config->readable_reg; 1435 map->volatile_reg = config->volatile_reg; 1436 map->precious_reg = config->precious_reg; 1437 map->writeable_noinc_reg = config->writeable_noinc_reg; 1438 map->readable_noinc_reg = config->readable_noinc_reg; 1439 map->reg_default_cb = config->reg_default_cb; 1440 map->cache_type = config->cache_type; 1441 1442 ret = regmap_set_name(map, config); 1443 if (ret) 1444 return ret; 1445 1446 regmap_debugfs_init(map); 1447 1448 map->cache_bypass = false; 1449 map->cache_only = false; 1450 1451 return regcache_init(map, config); 1452 } 1453 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1454 1455 /** 1456 * regmap_exit() - Free a previously allocated register map 1457 * 1458 * @map: Register map to operate on. 1459 */ 1460 void regmap_exit(struct regmap *map) 1461 { 1462 struct regmap_async *async; 1463 1464 regmap_detach_dev(map->dev, map); 1465 regcache_exit(map); 1466 1467 regmap_debugfs_exit(map); 1468 regmap_range_exit(map); 1469 if (map->bus && map->bus->free_context) 1470 map->bus->free_context(map->bus_context); 1471 kfree(map->work_buf); 1472 while (!list_empty(&map->async_free)) { 1473 async = list_first_entry_or_null(&map->async_free, 1474 struct regmap_async, 1475 list); 1476 list_del(&async->list); 1477 kfree(async->work_buf); 1478 kfree(async); 1479 } 1480 if (map->hwlock) 1481 hwspin_lock_free(map->hwlock); 1482 if (map->lock == regmap_lock_mutex) 1483 mutex_destroy(&map->mutex); 1484 kfree_const(map->name); 1485 kfree(map->patch); 1486 if (map->bus && map->bus->free_on_exit) 1487 kfree(map->bus); 1488 kfree(map); 1489 } 1490 EXPORT_SYMBOL_GPL(regmap_exit); 1491 1492 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1493 { 1494 struct regmap **r = res; 1495 if (!r || !*r) { 1496 WARN_ON(!r || !*r); 1497 return 0; 1498 } 1499 1500 /* If the user didn't specify a name match any */ 1501 if (data) 1502 return (*r)->name && !strcmp((*r)->name, data); 1503 else 1504 return 1; 1505 } 1506 1507 /** 1508 * dev_get_regmap() - Obtain the regmap (if any) for a device 1509 * 1510 * @dev: Device to retrieve the map for 1511 * @name: Optional name for the register map, usually NULL. 1512 * 1513 * Returns the regmap for the device if one is present, or NULL. If 1514 * name is specified then it must match the name specified when 1515 * registering the device, if it is NULL then the first regmap found 1516 * will be used. Devices with multiple register maps are very rare, 1517 * generic code should normally not need to specify a name. 1518 */ 1519 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1520 { 1521 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1522 dev_get_regmap_match, (void *)name); 1523 1524 if (!r) 1525 return NULL; 1526 return *r; 1527 } 1528 EXPORT_SYMBOL_GPL(dev_get_regmap); 1529 1530 /** 1531 * regmap_get_device() - Obtain the device from a regmap 1532 * 1533 * @map: Register map to operate on. 1534 * 1535 * Returns the underlying device that the regmap has been created for. 1536 */ 1537 struct device *regmap_get_device(struct regmap *map) 1538 { 1539 return map->dev; 1540 } 1541 EXPORT_SYMBOL_GPL(regmap_get_device); 1542 1543 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1544 struct regmap_range_node *range, 1545 unsigned int val_num) 1546 { 1547 void *orig_work_buf; 1548 unsigned int selector_reg; 1549 unsigned int win_offset; 1550 unsigned int win_page; 1551 bool page_chg; 1552 int ret; 1553 1554 win_offset = (*reg - range->range_min) % range->window_len; 1555 win_page = (*reg - range->range_min) / range->window_len; 1556 1557 if (val_num > 1) { 1558 /* Bulk write shouldn't cross range boundary */ 1559 if (*reg + val_num - 1 > range->range_max) 1560 return -EINVAL; 1561 1562 /* ... or single page boundary */ 1563 if (val_num > range->window_len - win_offset) 1564 return -EINVAL; 1565 } 1566 1567 /* 1568 * Calculate the address of the selector register in the corresponding 1569 * data window if it is located on every page. 1570 */ 1571 page_chg = in_range(range->selector_reg, range->window_start, range->window_len); 1572 if (page_chg) 1573 selector_reg = range->range_min + win_page * range->window_len + 1574 range->selector_reg - range->window_start; 1575 1576 /* 1577 * It is possible to have selector register inside data window. 1578 * In that case, selector register is located on every page and it 1579 * needs no page switching, when accessed alone. 1580 * 1581 * Nevertheless we should synchronize the cache values for it. 1582 * This can't be properly achieved if the selector register is 1583 * the first and the only one to be read inside the data window. 1584 * That's why we update it in that case as well. 1585 * 1586 * However, we specifically avoid updating it for the default page, 1587 * when it's overlapped with the real data window, to prevent from 1588 * infinite looping. 1589 */ 1590 if (val_num > 1 || 1591 (page_chg && selector_reg != range->selector_reg) || 1592 range->window_start + win_offset != range->selector_reg) { 1593 /* Use separate work_buf during page switching */ 1594 orig_work_buf = map->work_buf; 1595 map->work_buf = map->selector_work_buf; 1596 1597 ret = _regmap_update_bits(map, range->selector_reg, 1598 range->selector_mask, 1599 win_page << range->selector_shift, 1600 NULL, false); 1601 1602 map->work_buf = orig_work_buf; 1603 1604 if (ret != 0) 1605 return ret; 1606 } 1607 1608 *reg = range->window_start + win_offset; 1609 1610 return 0; 1611 } 1612 1613 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1614 unsigned long mask) 1615 { 1616 u8 *buf; 1617 int i; 1618 1619 if (!mask || !map->work_buf) 1620 return; 1621 1622 buf = map->work_buf; 1623 1624 for (i = 0; i < max_bytes; i++) 1625 buf[i] |= (mask >> (8 * i)) & 0xff; 1626 } 1627 1628 static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg) 1629 { 1630 reg += map->reg_base; 1631 1632 if (map->format.reg_shift > 0) 1633 reg >>= map->format.reg_shift; 1634 else if (map->format.reg_shift < 0) 1635 reg <<= -(map->format.reg_shift); 1636 1637 return reg; 1638 } 1639 1640 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1641 const void *val, size_t val_len, bool noinc) 1642 { 1643 struct regmap_range_node *range; 1644 unsigned long flags; 1645 void *work_val = map->work_buf + map->format.reg_bytes + 1646 map->format.pad_bytes; 1647 void *buf; 1648 int ret = -ENOTSUPP; 1649 size_t len; 1650 int i; 1651 1652 /* Check for unwritable or noinc registers in range 1653 * before we start 1654 */ 1655 if (!regmap_writeable_noinc(map, reg)) { 1656 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1657 unsigned int element = 1658 reg + regmap_get_offset(map, i); 1659 if (!regmap_writeable(map, element) || 1660 regmap_writeable_noinc(map, element)) 1661 return -EINVAL; 1662 } 1663 } 1664 1665 if (!map->cache_bypass && map->format.parse_val) { 1666 unsigned int ival, offset; 1667 int val_bytes = map->format.val_bytes; 1668 1669 /* Cache the last written value for noinc writes */ 1670 i = noinc ? val_len - val_bytes : 0; 1671 for (; i < val_len; i += val_bytes) { 1672 ival = map->format.parse_val(val + i); 1673 offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes); 1674 ret = regcache_write(map, reg + offset, ival); 1675 if (ret) { 1676 dev_err(map->dev, 1677 "Error in caching of register: %x ret: %d\n", 1678 reg + offset, ret); 1679 return ret; 1680 } 1681 } 1682 if (map->cache_only) { 1683 map->cache_dirty = true; 1684 return 0; 1685 } 1686 } 1687 1688 range = _regmap_range_lookup(map, reg); 1689 if (range) { 1690 int val_num = val_len / map->format.val_bytes; 1691 int win_offset = (reg - range->range_min) % range->window_len; 1692 int win_residue = range->window_len - win_offset; 1693 1694 /* If the write goes beyond the end of the window split it */ 1695 while (val_num > win_residue) { 1696 dev_dbg(map->dev, "Writing window %d/%zu\n", 1697 win_residue, val_len / map->format.val_bytes); 1698 ret = _regmap_raw_write_impl(map, reg, val, 1699 win_residue * 1700 map->format.val_bytes, noinc); 1701 if (ret != 0) 1702 return ret; 1703 1704 reg += win_residue; 1705 val_num -= win_residue; 1706 val += win_residue * map->format.val_bytes; 1707 val_len -= win_residue * map->format.val_bytes; 1708 1709 win_offset = (reg - range->range_min) % 1710 range->window_len; 1711 win_residue = range->window_len - win_offset; 1712 } 1713 1714 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1715 if (ret != 0) 1716 return ret; 1717 } 1718 1719 reg = regmap_reg_addr(map, reg); 1720 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1721 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1722 map->write_flag_mask); 1723 1724 /* 1725 * Essentially all I/O mechanisms will be faster with a single 1726 * buffer to write. Since register syncs often generate raw 1727 * writes of single registers optimise that case. 1728 */ 1729 if (val != work_val && val_len == map->format.val_bytes) { 1730 memcpy(work_val, val, map->format.val_bytes); 1731 val = work_val; 1732 } 1733 1734 if (map->async && map->bus && map->bus->async_write) { 1735 struct regmap_async *async; 1736 1737 trace_regmap_async_write_start(map, reg, val_len); 1738 1739 spin_lock_irqsave(&map->async_lock, flags); 1740 async = list_first_entry_or_null(&map->async_free, 1741 struct regmap_async, 1742 list); 1743 if (async) 1744 list_del(&async->list); 1745 spin_unlock_irqrestore(&map->async_lock, flags); 1746 1747 if (!async) { 1748 async = map->bus->async_alloc(); 1749 if (!async) 1750 return -ENOMEM; 1751 1752 async->work_buf = kzalloc(map->format.buf_size, 1753 GFP_KERNEL | GFP_DMA); 1754 if (!async->work_buf) { 1755 kfree(async); 1756 return -ENOMEM; 1757 } 1758 } 1759 1760 async->map = map; 1761 1762 /* If the caller supplied the value we can use it safely. */ 1763 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1764 map->format.reg_bytes + map->format.val_bytes); 1765 1766 spin_lock_irqsave(&map->async_lock, flags); 1767 list_add_tail(&async->list, &map->async_list); 1768 spin_unlock_irqrestore(&map->async_lock, flags); 1769 1770 if (val != work_val) 1771 ret = map->bus->async_write(map->bus_context, 1772 async->work_buf, 1773 map->format.reg_bytes + 1774 map->format.pad_bytes, 1775 val, val_len, async); 1776 else 1777 ret = map->bus->async_write(map->bus_context, 1778 async->work_buf, 1779 map->format.reg_bytes + 1780 map->format.pad_bytes + 1781 val_len, NULL, 0, async); 1782 1783 if (ret != 0) { 1784 dev_err(map->dev, "Failed to schedule write: %d\n", 1785 ret); 1786 1787 spin_lock_irqsave(&map->async_lock, flags); 1788 list_move(&async->list, &map->async_free); 1789 spin_unlock_irqrestore(&map->async_lock, flags); 1790 } 1791 1792 return ret; 1793 } 1794 1795 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1796 1797 /* If we're doing a single register write we can probably just 1798 * send the work_buf directly, otherwise try to do a gather 1799 * write. 1800 */ 1801 if (val == work_val) 1802 ret = map->write(map->bus_context, map->work_buf, 1803 map->format.reg_bytes + 1804 map->format.pad_bytes + 1805 val_len); 1806 else if (map->bus && map->bus->gather_write) 1807 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1808 map->format.reg_bytes + 1809 map->format.pad_bytes, 1810 val, val_len); 1811 else 1812 ret = -ENOTSUPP; 1813 1814 /* If that didn't work fall back on linearising by hand. */ 1815 if (ret == -ENOTSUPP) { 1816 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1817 buf = kzalloc(len, GFP_KERNEL); 1818 if (!buf) 1819 return -ENOMEM; 1820 1821 memcpy(buf, map->work_buf, map->format.reg_bytes); 1822 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1823 val, val_len); 1824 ret = map->write(map->bus_context, buf, len); 1825 1826 kfree(buf); 1827 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1828 /* regcache_drop_region() takes lock that we already have, 1829 * thus call map->cache_ops->drop() directly 1830 */ 1831 if (map->cache_ops && map->cache_ops->drop) 1832 map->cache_ops->drop(map, reg, reg + 1); 1833 } 1834 1835 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1836 1837 return ret; 1838 } 1839 1840 /** 1841 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1842 * 1843 * @map: Map to check. 1844 */ 1845 bool regmap_can_raw_write(struct regmap *map) 1846 { 1847 return map->write && map->format.format_val && map->format.format_reg; 1848 } 1849 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1850 1851 /** 1852 * regmap_get_raw_read_max - Get the maximum size we can read 1853 * 1854 * @map: Map to check. 1855 */ 1856 size_t regmap_get_raw_read_max(struct regmap *map) 1857 { 1858 return map->max_raw_read; 1859 } 1860 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1861 1862 /** 1863 * regmap_get_raw_write_max - Get the maximum size we can read 1864 * 1865 * @map: Map to check. 1866 */ 1867 size_t regmap_get_raw_write_max(struct regmap *map) 1868 { 1869 return map->max_raw_write; 1870 } 1871 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1872 1873 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1874 unsigned int val) 1875 { 1876 int ret; 1877 struct regmap_range_node *range; 1878 struct regmap *map = context; 1879 1880 WARN_ON(!map->format.format_write); 1881 1882 range = _regmap_range_lookup(map, reg); 1883 if (range) { 1884 ret = _regmap_select_page(map, ®, range, 1); 1885 if (ret != 0) 1886 return ret; 1887 } 1888 1889 reg = regmap_reg_addr(map, reg); 1890 map->format.format_write(map, reg, val); 1891 1892 trace_regmap_hw_write_start(map, reg, 1); 1893 1894 ret = map->write(map->bus_context, map->work_buf, map->format.buf_size); 1895 1896 trace_regmap_hw_write_done(map, reg, 1); 1897 1898 return ret; 1899 } 1900 1901 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1902 unsigned int val) 1903 { 1904 struct regmap *map = context; 1905 struct regmap_range_node *range; 1906 int ret; 1907 1908 range = _regmap_range_lookup(map, reg); 1909 if (range) { 1910 ret = _regmap_select_page(map, ®, range, 1); 1911 if (ret != 0) 1912 return ret; 1913 } 1914 1915 reg = regmap_reg_addr(map, reg); 1916 return map->bus->reg_write(map->bus_context, reg, val); 1917 } 1918 1919 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1920 unsigned int val) 1921 { 1922 struct regmap *map = context; 1923 1924 WARN_ON(!map->format.format_val); 1925 1926 map->format.format_val(map->work_buf + map->format.reg_bytes 1927 + map->format.pad_bytes, val, 0); 1928 return _regmap_raw_write_impl(map, reg, 1929 map->work_buf + 1930 map->format.reg_bytes + 1931 map->format.pad_bytes, 1932 map->format.val_bytes, 1933 false); 1934 } 1935 1936 static inline void *_regmap_map_get_context(struct regmap *map) 1937 { 1938 return (map->bus || (!map->bus && map->read)) ? map : map->bus_context; 1939 } 1940 1941 int _regmap_write(struct regmap *map, unsigned int reg, 1942 unsigned int val) 1943 { 1944 int ret; 1945 void *context = _regmap_map_get_context(map); 1946 1947 if (!regmap_writeable(map, reg)) 1948 return -EIO; 1949 1950 if (!map->cache_bypass && !map->defer_caching) { 1951 ret = regcache_write(map, reg, val); 1952 if (ret != 0) 1953 return ret; 1954 if (map->cache_only) { 1955 map->cache_dirty = true; 1956 return 0; 1957 } 1958 } 1959 1960 ret = map->reg_write(context, reg, val); 1961 if (ret == 0) { 1962 if (regmap_should_log(map)) 1963 dev_info(map->dev, "%x <= %x\n", reg, val); 1964 1965 trace_regmap_reg_write(map, reg, val); 1966 } 1967 1968 return ret; 1969 } 1970 1971 /** 1972 * regmap_write() - Write a value to a single register 1973 * 1974 * @map: Register map to write to 1975 * @reg: Register to write to 1976 * @val: Value to be written 1977 * 1978 * A value of zero will be returned on success, a negative errno will 1979 * be returned in error cases. 1980 */ 1981 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1982 { 1983 int ret; 1984 1985 if (!IS_ALIGNED(reg, map->reg_stride)) 1986 return -EINVAL; 1987 1988 map->lock(map->lock_arg); 1989 1990 ret = _regmap_write(map, reg, val); 1991 1992 map->unlock(map->lock_arg); 1993 1994 return ret; 1995 } 1996 EXPORT_SYMBOL_GPL(regmap_write); 1997 1998 /** 1999 * regmap_write_async() - Write a value to a single register asynchronously 2000 * 2001 * @map: Register map to write to 2002 * @reg: Register to write to 2003 * @val: Value to be written 2004 * 2005 * A value of zero will be returned on success, a negative errno will 2006 * be returned in error cases. 2007 */ 2008 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 2009 { 2010 int ret; 2011 2012 if (!IS_ALIGNED(reg, map->reg_stride)) 2013 return -EINVAL; 2014 2015 map->lock(map->lock_arg); 2016 2017 map->async = true; 2018 2019 ret = _regmap_write(map, reg, val); 2020 2021 map->async = false; 2022 2023 map->unlock(map->lock_arg); 2024 2025 return ret; 2026 } 2027 EXPORT_SYMBOL_GPL(regmap_write_async); 2028 2029 int _regmap_raw_write(struct regmap *map, unsigned int reg, 2030 const void *val, size_t val_len, bool noinc) 2031 { 2032 size_t val_bytes = map->format.val_bytes; 2033 size_t val_count = val_len / val_bytes; 2034 size_t chunk_count, chunk_bytes; 2035 size_t chunk_regs = val_count; 2036 int ret, i; 2037 2038 if (!val_count) 2039 return -EINVAL; 2040 2041 if (map->use_single_write) 2042 chunk_regs = 1; 2043 else if (map->max_raw_write && val_len > map->max_raw_write) 2044 chunk_regs = map->max_raw_write / val_bytes; 2045 2046 chunk_count = val_count / chunk_regs; 2047 chunk_bytes = chunk_regs * val_bytes; 2048 2049 /* Write as many bytes as possible with chunk_size */ 2050 for (i = 0; i < chunk_count; i++) { 2051 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 2052 if (ret) 2053 return ret; 2054 2055 reg += regmap_get_offset(map, chunk_regs); 2056 val += chunk_bytes; 2057 val_len -= chunk_bytes; 2058 } 2059 2060 /* Write remaining bytes */ 2061 if (val_len) 2062 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 2063 2064 return ret; 2065 } 2066 2067 /** 2068 * regmap_raw_write() - Write raw values to one or more registers 2069 * 2070 * @map: Register map to write to 2071 * @reg: Initial register to write to 2072 * @val: Block of data to be written, laid out for direct transmission to the 2073 * device 2074 * @val_len: Length of data pointed to by val. 2075 * 2076 * This function is intended to be used for things like firmware 2077 * download where a large block of data needs to be transferred to the 2078 * device. No formatting will be done on the data provided. 2079 * 2080 * A value of zero will be returned on success, a negative errno will 2081 * be returned in error cases. 2082 */ 2083 int regmap_raw_write(struct regmap *map, unsigned int reg, 2084 const void *val, size_t val_len) 2085 { 2086 int ret; 2087 2088 if (!regmap_can_raw_write(map)) 2089 return -EINVAL; 2090 if (val_len % map->format.val_bytes) 2091 return -EINVAL; 2092 2093 map->lock(map->lock_arg); 2094 2095 ret = _regmap_raw_write(map, reg, val, val_len, false); 2096 2097 map->unlock(map->lock_arg); 2098 2099 return ret; 2100 } 2101 EXPORT_SYMBOL_GPL(regmap_raw_write); 2102 2103 static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg, 2104 void *val, unsigned int val_len, bool write) 2105 { 2106 size_t val_bytes = map->format.val_bytes; 2107 size_t val_count = val_len / val_bytes; 2108 unsigned int lastval; 2109 u8 *u8p; 2110 u16 *u16p; 2111 u32 *u32p; 2112 int ret; 2113 int i; 2114 2115 switch (val_bytes) { 2116 case 1: 2117 u8p = val; 2118 if (write) 2119 lastval = (unsigned int)u8p[val_count - 1]; 2120 break; 2121 case 2: 2122 u16p = val; 2123 if (write) 2124 lastval = (unsigned int)u16p[val_count - 1]; 2125 break; 2126 case 4: 2127 u32p = val; 2128 if (write) 2129 lastval = (unsigned int)u32p[val_count - 1]; 2130 break; 2131 default: 2132 return -EINVAL; 2133 } 2134 2135 /* 2136 * Update the cache with the last value we write, the rest is just 2137 * gone down in the hardware FIFO. We can't cache FIFOs. This makes 2138 * sure a single read from the cache will work. 2139 */ 2140 if (write) { 2141 if (!map->cache_bypass && !map->defer_caching) { 2142 ret = regcache_write(map, reg, lastval); 2143 if (ret != 0) 2144 return ret; 2145 if (map->cache_only) { 2146 map->cache_dirty = true; 2147 return 0; 2148 } 2149 } 2150 ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count); 2151 } else { 2152 ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count); 2153 } 2154 2155 if (!ret && regmap_should_log(map)) { 2156 dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>"); 2157 for (i = 0; i < val_count; i++) { 2158 switch (val_bytes) { 2159 case 1: 2160 pr_cont("%x", u8p[i]); 2161 break; 2162 case 2: 2163 pr_cont("%x", u16p[i]); 2164 break; 2165 case 4: 2166 pr_cont("%x", u32p[i]); 2167 break; 2168 default: 2169 break; 2170 } 2171 if (i == (val_count - 1)) 2172 pr_cont("]\n"); 2173 else 2174 pr_cont(","); 2175 } 2176 } 2177 2178 return 0; 2179 } 2180 2181 /** 2182 * regmap_noinc_write(): Write data to a register without incrementing the 2183 * register number 2184 * 2185 * @map: Register map to write to 2186 * @reg: Register to write to 2187 * @val: Pointer to data buffer 2188 * @val_len: Length of output buffer in bytes. 2189 * 2190 * The regmap API usually assumes that bulk bus write operations will write a 2191 * range of registers. Some devices have certain registers for which a write 2192 * operation can write to an internal FIFO. 2193 * 2194 * The target register must be volatile but registers after it can be 2195 * completely unrelated cacheable registers. 2196 * 2197 * This will attempt multiple writes as required to write val_len bytes. 2198 * 2199 * A value of zero will be returned on success, a negative errno will be 2200 * returned in error cases. 2201 */ 2202 int regmap_noinc_write(struct regmap *map, unsigned int reg, 2203 const void *val, size_t val_len) 2204 { 2205 size_t write_len; 2206 int ret; 2207 2208 if (!map->write && !(map->bus && map->bus->reg_noinc_write)) 2209 return -EINVAL; 2210 if (val_len % map->format.val_bytes) 2211 return -EINVAL; 2212 if (!IS_ALIGNED(reg, map->reg_stride)) 2213 return -EINVAL; 2214 if (val_len == 0) 2215 return -EINVAL; 2216 2217 map->lock(map->lock_arg); 2218 2219 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 2220 ret = -EINVAL; 2221 goto out_unlock; 2222 } 2223 2224 /* 2225 * Use the accelerated operation if we can. The val drops the const 2226 * typing in order to facilitate code reuse in regmap_noinc_readwrite(). 2227 */ 2228 if (map->bus->reg_noinc_write) { 2229 ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true); 2230 goto out_unlock; 2231 } 2232 2233 while (val_len) { 2234 if (map->max_raw_write && map->max_raw_write < val_len) 2235 write_len = map->max_raw_write; 2236 else 2237 write_len = val_len; 2238 ret = _regmap_raw_write(map, reg, val, write_len, true); 2239 if (ret) 2240 goto out_unlock; 2241 val = ((u8 *)val) + write_len; 2242 val_len -= write_len; 2243 } 2244 2245 out_unlock: 2246 map->unlock(map->lock_arg); 2247 return ret; 2248 } 2249 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2250 2251 /** 2252 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2253 * register field. 2254 * 2255 * @field: Register field to write to 2256 * @mask: Bitmask to change 2257 * @val: Value to be written 2258 * @change: Boolean indicating if a write was done 2259 * @async: Boolean indicating asynchronously 2260 * @force: Boolean indicating use force update 2261 * 2262 * Perform a read/modify/write cycle on the register field with change, 2263 * async, force option. 2264 * 2265 * A value of zero will be returned on success, a negative errno will 2266 * be returned in error cases. 2267 */ 2268 int regmap_field_update_bits_base(struct regmap_field *field, 2269 unsigned int mask, unsigned int val, 2270 bool *change, bool async, bool force) 2271 { 2272 mask = (mask << field->shift) & field->mask; 2273 2274 return regmap_update_bits_base(field->regmap, field->reg, 2275 mask, val << field->shift, 2276 change, async, force); 2277 } 2278 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2279 2280 /** 2281 * regmap_field_test_bits() - Check if all specified bits are set in a 2282 * register field. 2283 * 2284 * @field: Register field to operate on 2285 * @bits: Bits to test 2286 * 2287 * Returns negative errno if the underlying regmap_field_read() fails, 2288 * 0 if at least one of the tested bits is not set and 1 if all tested 2289 * bits are set. 2290 */ 2291 int regmap_field_test_bits(struct regmap_field *field, unsigned int bits) 2292 { 2293 unsigned int val; 2294 int ret; 2295 2296 ret = regmap_field_read(field, &val); 2297 if (ret) 2298 return ret; 2299 2300 return (val & bits) == bits; 2301 } 2302 EXPORT_SYMBOL_GPL(regmap_field_test_bits); 2303 2304 /** 2305 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2306 * register field with port ID 2307 * 2308 * @field: Register field to write to 2309 * @id: port ID 2310 * @mask: Bitmask to change 2311 * @val: Value to be written 2312 * @change: Boolean indicating if a write was done 2313 * @async: Boolean indicating asynchronously 2314 * @force: Boolean indicating use force update 2315 * 2316 * A value of zero will be returned on success, a negative errno will 2317 * be returned in error cases. 2318 */ 2319 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2320 unsigned int mask, unsigned int val, 2321 bool *change, bool async, bool force) 2322 { 2323 if (id >= field->id_size) 2324 return -EINVAL; 2325 2326 mask = (mask << field->shift) & field->mask; 2327 2328 return regmap_update_bits_base(field->regmap, 2329 field->reg + (field->id_offset * id), 2330 mask, val << field->shift, 2331 change, async, force); 2332 } 2333 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2334 2335 /** 2336 * regmap_bulk_write() - Write multiple registers to the device 2337 * 2338 * @map: Register map to write to 2339 * @reg: First register to be write from 2340 * @val: Block of data to be written, in native register size for device 2341 * @val_count: Number of registers to write 2342 * 2343 * This function is intended to be used for writing a large block of 2344 * data to the device either in single transfer or multiple transfer. 2345 * 2346 * A value of zero will be returned on success, a negative errno will 2347 * be returned in error cases. 2348 */ 2349 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2350 size_t val_count) 2351 { 2352 int ret = 0, i; 2353 size_t val_bytes = map->format.val_bytes; 2354 2355 if (!IS_ALIGNED(reg, map->reg_stride)) 2356 return -EINVAL; 2357 2358 /* 2359 * Some devices don't support bulk write, for them we have a series of 2360 * single write operations. 2361 */ 2362 if (!map->write || !map->format.parse_inplace) { 2363 map->lock(map->lock_arg); 2364 for (i = 0; i < val_count; i++) { 2365 unsigned int ival; 2366 2367 switch (val_bytes) { 2368 case 1: 2369 ival = *(u8 *)(val + (i * val_bytes)); 2370 break; 2371 case 2: 2372 ival = *(u16 *)(val + (i * val_bytes)); 2373 break; 2374 case 4: 2375 ival = *(u32 *)(val + (i * val_bytes)); 2376 break; 2377 default: 2378 ret = -EINVAL; 2379 goto out; 2380 } 2381 2382 ret = _regmap_write(map, 2383 reg + regmap_get_offset(map, i), 2384 ival); 2385 if (ret != 0) 2386 goto out; 2387 } 2388 out: 2389 map->unlock(map->lock_arg); 2390 } else { 2391 void *wval; 2392 2393 wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags); 2394 if (!wval) 2395 return -ENOMEM; 2396 2397 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2398 map->format.parse_inplace(wval + i); 2399 2400 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2401 2402 kfree(wval); 2403 } 2404 2405 if (!ret) 2406 trace_regmap_bulk_write(map, reg, val, val_bytes * val_count); 2407 2408 return ret; 2409 } 2410 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2411 2412 /* 2413 * _regmap_raw_multi_reg_write() 2414 * 2415 * the (register,newvalue) pairs in regs have not been formatted, but 2416 * they are all in the same page and have been changed to being page 2417 * relative. The page register has been written if that was necessary. 2418 */ 2419 static int _regmap_raw_multi_reg_write(struct regmap *map, 2420 const struct reg_sequence *regs, 2421 size_t num_regs) 2422 { 2423 int ret; 2424 void *buf; 2425 int i; 2426 u8 *u8; 2427 size_t val_bytes = map->format.val_bytes; 2428 size_t reg_bytes = map->format.reg_bytes; 2429 size_t pad_bytes = map->format.pad_bytes; 2430 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2431 size_t len = pair_size * num_regs; 2432 2433 if (!len) 2434 return -EINVAL; 2435 2436 buf = kzalloc(len, GFP_KERNEL); 2437 if (!buf) 2438 return -ENOMEM; 2439 2440 /* We have to linearise by hand. */ 2441 2442 u8 = buf; 2443 2444 for (i = 0; i < num_regs; i++) { 2445 unsigned int reg = regs[i].reg; 2446 unsigned int val = regs[i].def; 2447 trace_regmap_hw_write_start(map, reg, 1); 2448 reg = regmap_reg_addr(map, reg); 2449 map->format.format_reg(u8, reg, map->reg_shift); 2450 u8 += reg_bytes + pad_bytes; 2451 map->format.format_val(u8, val, 0); 2452 u8 += val_bytes; 2453 } 2454 u8 = buf; 2455 *u8 |= map->write_flag_mask; 2456 2457 ret = map->write(map->bus_context, buf, len); 2458 2459 kfree(buf); 2460 2461 for (i = 0; i < num_regs; i++) { 2462 int reg = regs[i].reg; 2463 trace_regmap_hw_write_done(map, reg, 1); 2464 } 2465 return ret; 2466 } 2467 2468 static unsigned int _regmap_register_page(struct regmap *map, 2469 unsigned int reg, 2470 struct regmap_range_node *range) 2471 { 2472 unsigned int win_page = (reg - range->range_min) / range->window_len; 2473 2474 return win_page; 2475 } 2476 2477 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2478 struct reg_sequence *regs, 2479 size_t num_regs) 2480 { 2481 int ret; 2482 int i, n; 2483 struct reg_sequence *base; 2484 unsigned int this_page = 0; 2485 unsigned int page_change = 0; 2486 /* 2487 * the set of registers are not neccessarily in order, but 2488 * since the order of write must be preserved this algorithm 2489 * chops the set each time the page changes. This also applies 2490 * if there is a delay required at any point in the sequence. 2491 */ 2492 base = regs; 2493 for (i = 0, n = 0; i < num_regs; i++, n++) { 2494 unsigned int reg = regs[i].reg; 2495 struct regmap_range_node *range; 2496 2497 range = _regmap_range_lookup(map, reg); 2498 if (range) { 2499 unsigned int win_page = _regmap_register_page(map, reg, 2500 range); 2501 2502 if (i == 0) 2503 this_page = win_page; 2504 if (win_page != this_page) { 2505 this_page = win_page; 2506 page_change = 1; 2507 } 2508 } 2509 2510 /* If we have both a page change and a delay make sure to 2511 * write the regs and apply the delay before we change the 2512 * page. 2513 */ 2514 2515 if (page_change || regs[i].delay_us) { 2516 2517 /* For situations where the first write requires 2518 * a delay we need to make sure we don't call 2519 * raw_multi_reg_write with n=0 2520 * This can't occur with page breaks as we 2521 * never write on the first iteration 2522 */ 2523 if (regs[i].delay_us && i == 0) 2524 n = 1; 2525 2526 ret = _regmap_raw_multi_reg_write(map, base, n); 2527 if (ret != 0) 2528 return ret; 2529 2530 if (regs[i].delay_us) { 2531 if (map->can_sleep) 2532 fsleep(regs[i].delay_us); 2533 else 2534 udelay(regs[i].delay_us); 2535 } 2536 2537 base += n; 2538 n = 0; 2539 2540 if (page_change) { 2541 ret = _regmap_select_page(map, 2542 &base[n].reg, 2543 range, 1); 2544 if (ret != 0) 2545 return ret; 2546 2547 page_change = 0; 2548 } 2549 2550 } 2551 2552 } 2553 if (n > 0) 2554 return _regmap_raw_multi_reg_write(map, base, n); 2555 return 0; 2556 } 2557 2558 static int _regmap_multi_reg_write(struct regmap *map, 2559 const struct reg_sequence *regs, 2560 size_t num_regs) 2561 { 2562 int i; 2563 int ret; 2564 2565 if (!map->can_multi_write) { 2566 for (i = 0; i < num_regs; i++) { 2567 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2568 if (ret != 0) 2569 return ret; 2570 2571 if (regs[i].delay_us) { 2572 if (map->can_sleep) 2573 fsleep(regs[i].delay_us); 2574 else 2575 udelay(regs[i].delay_us); 2576 } 2577 } 2578 return 0; 2579 } 2580 2581 if (!map->format.parse_inplace) 2582 return -EINVAL; 2583 2584 if (map->writeable_reg) 2585 for (i = 0; i < num_regs; i++) { 2586 int reg = regs[i].reg; 2587 if (!map->writeable_reg(map->dev, reg)) 2588 return -EINVAL; 2589 if (!IS_ALIGNED(reg, map->reg_stride)) 2590 return -EINVAL; 2591 } 2592 2593 if (!map->cache_bypass) { 2594 for (i = 0; i < num_regs; i++) { 2595 unsigned int val = regs[i].def; 2596 unsigned int reg = regs[i].reg; 2597 ret = regcache_write(map, reg, val); 2598 if (ret) { 2599 dev_err(map->dev, 2600 "Error in caching of register: %x ret: %d\n", 2601 reg, ret); 2602 return ret; 2603 } 2604 } 2605 if (map->cache_only) { 2606 map->cache_dirty = true; 2607 return 0; 2608 } 2609 } 2610 2611 WARN_ON(!map->bus); 2612 2613 for (i = 0; i < num_regs; i++) { 2614 unsigned int reg = regs[i].reg; 2615 struct regmap_range_node *range; 2616 2617 /* Coalesce all the writes between a page break or a delay 2618 * in a sequence 2619 */ 2620 range = _regmap_range_lookup(map, reg); 2621 if (range || regs[i].delay_us) { 2622 size_t len = sizeof(struct reg_sequence)*num_regs; 2623 struct reg_sequence *base = kmemdup(regs, len, 2624 GFP_KERNEL); 2625 if (!base) 2626 return -ENOMEM; 2627 ret = _regmap_range_multi_paged_reg_write(map, base, 2628 num_regs); 2629 kfree(base); 2630 2631 return ret; 2632 } 2633 } 2634 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2635 } 2636 2637 /** 2638 * regmap_multi_reg_write() - Write multiple registers to the device 2639 * 2640 * @map: Register map to write to 2641 * @regs: Array of structures containing register,value to be written 2642 * @num_regs: Number of registers to write 2643 * 2644 * Write multiple registers to the device where the set of register, value 2645 * pairs are supplied in any order, possibly not all in a single range. 2646 * 2647 * The 'normal' block write mode will send ultimately send data on the 2648 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2649 * addressed. However, this alternative block multi write mode will send 2650 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2651 * must of course support the mode. 2652 * 2653 * A value of zero will be returned on success, a negative errno will be 2654 * returned in error cases. 2655 */ 2656 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2657 int num_regs) 2658 { 2659 int ret; 2660 2661 map->lock(map->lock_arg); 2662 2663 ret = _regmap_multi_reg_write(map, regs, num_regs); 2664 2665 map->unlock(map->lock_arg); 2666 2667 return ret; 2668 } 2669 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2670 2671 /** 2672 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2673 * device but not the cache 2674 * 2675 * @map: Register map to write to 2676 * @regs: Array of structures containing register,value to be written 2677 * @num_regs: Number of registers to write 2678 * 2679 * Write multiple registers to the device but not the cache where the set 2680 * of register are supplied in any order. 2681 * 2682 * This function is intended to be used for writing a large block of data 2683 * atomically to the device in single transfer for those I2C client devices 2684 * that implement this alternative block write mode. 2685 * 2686 * A value of zero will be returned on success, a negative errno will 2687 * be returned in error cases. 2688 */ 2689 int regmap_multi_reg_write_bypassed(struct regmap *map, 2690 const struct reg_sequence *regs, 2691 int num_regs) 2692 { 2693 int ret; 2694 bool bypass; 2695 2696 map->lock(map->lock_arg); 2697 2698 bypass = map->cache_bypass; 2699 map->cache_bypass = true; 2700 2701 ret = _regmap_multi_reg_write(map, regs, num_regs); 2702 2703 map->cache_bypass = bypass; 2704 2705 map->unlock(map->lock_arg); 2706 2707 return ret; 2708 } 2709 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2710 2711 /** 2712 * regmap_raw_write_async() - Write raw values to one or more registers 2713 * asynchronously 2714 * 2715 * @map: Register map to write to 2716 * @reg: Initial register to write to 2717 * @val: Block of data to be written, laid out for direct transmission to the 2718 * device. Must be valid until regmap_async_complete() is called. 2719 * @val_len: Length of data pointed to by val. 2720 * 2721 * This function is intended to be used for things like firmware 2722 * download where a large block of data needs to be transferred to the 2723 * device. No formatting will be done on the data provided. 2724 * 2725 * If supported by the underlying bus the write will be scheduled 2726 * asynchronously, helping maximise I/O speed on higher speed buses 2727 * like SPI. regmap_async_complete() can be called to ensure that all 2728 * asynchrnous writes have been completed. 2729 * 2730 * A value of zero will be returned on success, a negative errno will 2731 * be returned in error cases. 2732 */ 2733 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2734 const void *val, size_t val_len) 2735 { 2736 int ret; 2737 2738 if (val_len % map->format.val_bytes) 2739 return -EINVAL; 2740 if (!IS_ALIGNED(reg, map->reg_stride)) 2741 return -EINVAL; 2742 2743 map->lock(map->lock_arg); 2744 2745 map->async = true; 2746 2747 ret = _regmap_raw_write(map, reg, val, val_len, false); 2748 2749 map->async = false; 2750 2751 map->unlock(map->lock_arg); 2752 2753 return ret; 2754 } 2755 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2756 2757 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2758 unsigned int val_len, bool noinc) 2759 { 2760 struct regmap_range_node *range; 2761 int ret; 2762 2763 if (!map->read) 2764 return -EINVAL; 2765 2766 range = _regmap_range_lookup(map, reg); 2767 if (range) { 2768 ret = _regmap_select_page(map, ®, range, 2769 noinc ? 1 : val_len / map->format.val_bytes); 2770 if (ret != 0) 2771 return ret; 2772 } 2773 2774 reg = regmap_reg_addr(map, reg); 2775 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2776 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2777 map->read_flag_mask); 2778 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2779 2780 ret = map->read(map->bus_context, map->work_buf, 2781 map->format.reg_bytes + map->format.pad_bytes, 2782 val, val_len); 2783 2784 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2785 2786 return ret; 2787 } 2788 2789 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2790 unsigned int *val) 2791 { 2792 struct regmap *map = context; 2793 struct regmap_range_node *range; 2794 int ret; 2795 2796 range = _regmap_range_lookup(map, reg); 2797 if (range) { 2798 ret = _regmap_select_page(map, ®, range, 1); 2799 if (ret != 0) 2800 return ret; 2801 } 2802 2803 reg = regmap_reg_addr(map, reg); 2804 return map->bus->reg_read(map->bus_context, reg, val); 2805 } 2806 2807 static int _regmap_bus_read(void *context, unsigned int reg, 2808 unsigned int *val) 2809 { 2810 int ret; 2811 struct regmap *map = context; 2812 void *work_val = map->work_buf + map->format.reg_bytes + 2813 map->format.pad_bytes; 2814 2815 if (!map->format.parse_val) 2816 return -EINVAL; 2817 2818 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2819 if (ret == 0) 2820 *val = map->format.parse_val(work_val); 2821 2822 return ret; 2823 } 2824 2825 static int _regmap_read(struct regmap *map, unsigned int reg, 2826 unsigned int *val) 2827 { 2828 int ret; 2829 void *context = _regmap_map_get_context(map); 2830 2831 if (!map->cache_bypass) { 2832 ret = regcache_read(map, reg, val); 2833 if (ret == 0) 2834 return 0; 2835 } 2836 2837 if (map->cache_only) 2838 return -EBUSY; 2839 2840 if (!regmap_readable(map, reg)) 2841 return -EIO; 2842 2843 ret = map->reg_read(context, reg, val); 2844 if (ret == 0) { 2845 if (regmap_should_log(map)) 2846 dev_info(map->dev, "%x => %x\n", reg, *val); 2847 2848 trace_regmap_reg_read(map, reg, *val); 2849 2850 if (!map->cache_bypass) 2851 regcache_write(map, reg, *val); 2852 } 2853 2854 return ret; 2855 } 2856 2857 /** 2858 * regmap_read() - Read a value from a single register 2859 * 2860 * @map: Register map to read from 2861 * @reg: Register to be read from 2862 * @val: Pointer to store read value 2863 * 2864 * A value of zero will be returned on success, a negative errno will 2865 * be returned in error cases. 2866 */ 2867 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2868 { 2869 int ret; 2870 2871 if (!IS_ALIGNED(reg, map->reg_stride)) 2872 return -EINVAL; 2873 2874 map->lock(map->lock_arg); 2875 2876 ret = _regmap_read(map, reg, val); 2877 2878 map->unlock(map->lock_arg); 2879 2880 return ret; 2881 } 2882 EXPORT_SYMBOL_GPL(regmap_read); 2883 2884 /** 2885 * regmap_read_bypassed() - Read a value from a single register direct 2886 * from the device, bypassing the cache 2887 * 2888 * @map: Register map to read from 2889 * @reg: Register to be read from 2890 * @val: Pointer to store read value 2891 * 2892 * A value of zero will be returned on success, a negative errno will 2893 * be returned in error cases. 2894 */ 2895 int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val) 2896 { 2897 int ret; 2898 bool bypass, cache_only; 2899 2900 if (!IS_ALIGNED(reg, map->reg_stride)) 2901 return -EINVAL; 2902 2903 map->lock(map->lock_arg); 2904 2905 bypass = map->cache_bypass; 2906 cache_only = map->cache_only; 2907 map->cache_bypass = true; 2908 map->cache_only = false; 2909 2910 ret = _regmap_read(map, reg, val); 2911 2912 map->cache_bypass = bypass; 2913 map->cache_only = cache_only; 2914 2915 map->unlock(map->lock_arg); 2916 2917 return ret; 2918 } 2919 EXPORT_SYMBOL_GPL(regmap_read_bypassed); 2920 2921 /** 2922 * regmap_raw_read() - Read raw data from the device 2923 * 2924 * @map: Register map to read from 2925 * @reg: First register to be read from 2926 * @val: Pointer to store read value 2927 * @val_len: Size of data to read 2928 * 2929 * A value of zero will be returned on success, a negative errno will 2930 * be returned in error cases. 2931 */ 2932 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2933 size_t val_len) 2934 { 2935 size_t val_bytes = map->format.val_bytes; 2936 size_t val_count = val_len / val_bytes; 2937 unsigned int v; 2938 int ret, i; 2939 2940 if (val_len % map->format.val_bytes) 2941 return -EINVAL; 2942 if (!IS_ALIGNED(reg, map->reg_stride)) 2943 return -EINVAL; 2944 if (val_count == 0) 2945 return -EINVAL; 2946 2947 map->lock(map->lock_arg); 2948 2949 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2950 map->cache_type == REGCACHE_NONE) { 2951 size_t chunk_count, chunk_bytes; 2952 size_t chunk_regs = val_count; 2953 2954 if (!map->cache_bypass && map->cache_only) { 2955 ret = -EBUSY; 2956 goto out; 2957 } 2958 2959 if (!map->read) { 2960 ret = -ENOTSUPP; 2961 goto out; 2962 } 2963 2964 if (map->use_single_read) 2965 chunk_regs = 1; 2966 else if (map->max_raw_read && val_len > map->max_raw_read) 2967 chunk_regs = map->max_raw_read / val_bytes; 2968 2969 chunk_count = val_count / chunk_regs; 2970 chunk_bytes = chunk_regs * val_bytes; 2971 2972 /* Read bytes that fit into whole chunks */ 2973 for (i = 0; i < chunk_count; i++) { 2974 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2975 if (ret != 0) 2976 goto out; 2977 2978 reg += regmap_get_offset(map, chunk_regs); 2979 val += chunk_bytes; 2980 val_len -= chunk_bytes; 2981 } 2982 2983 /* Read remaining bytes */ 2984 if (val_len) { 2985 ret = _regmap_raw_read(map, reg, val, val_len, false); 2986 if (ret != 0) 2987 goto out; 2988 } 2989 } else { 2990 /* Otherwise go word by word for the cache; should be low 2991 * cost as we expect to hit the cache. 2992 */ 2993 for (i = 0; i < val_count; i++) { 2994 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2995 &v); 2996 if (ret != 0) 2997 goto out; 2998 2999 map->format.format_val(val + (i * val_bytes), v, 0); 3000 } 3001 } 3002 3003 out: 3004 map->unlock(map->lock_arg); 3005 3006 return ret; 3007 } 3008 EXPORT_SYMBOL_GPL(regmap_raw_read); 3009 3010 /** 3011 * regmap_noinc_read(): Read data from a register without incrementing the 3012 * register number 3013 * 3014 * @map: Register map to read from 3015 * @reg: Register to read from 3016 * @val: Pointer to data buffer 3017 * @val_len: Length of output buffer in bytes. 3018 * 3019 * The regmap API usually assumes that bulk read operations will read a 3020 * range of registers. Some devices have certain registers for which a read 3021 * operation read will read from an internal FIFO. 3022 * 3023 * The target register must be volatile but registers after it can be 3024 * completely unrelated cacheable registers. 3025 * 3026 * This will attempt multiple reads as required to read val_len bytes. 3027 * 3028 * A value of zero will be returned on success, a negative errno will be 3029 * returned in error cases. 3030 */ 3031 int regmap_noinc_read(struct regmap *map, unsigned int reg, 3032 void *val, size_t val_len) 3033 { 3034 size_t read_len; 3035 int ret; 3036 3037 if (!map->read) 3038 return -ENOTSUPP; 3039 3040 if (val_len % map->format.val_bytes) 3041 return -EINVAL; 3042 if (!IS_ALIGNED(reg, map->reg_stride)) 3043 return -EINVAL; 3044 if (val_len == 0) 3045 return -EINVAL; 3046 3047 map->lock(map->lock_arg); 3048 3049 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 3050 ret = -EINVAL; 3051 goto out_unlock; 3052 } 3053 3054 /* 3055 * We have not defined the FIFO semantics for cache, as the 3056 * cache is just one value deep. Should we return the last 3057 * written value? Just avoid this by always reading the FIFO 3058 * even when using cache. Cache only will not work. 3059 */ 3060 if (!map->cache_bypass && map->cache_only) { 3061 ret = -EBUSY; 3062 goto out_unlock; 3063 } 3064 3065 /* Use the accelerated operation if we can */ 3066 if (map->bus->reg_noinc_read) { 3067 ret = regmap_noinc_readwrite(map, reg, val, val_len, false); 3068 goto out_unlock; 3069 } 3070 3071 while (val_len) { 3072 if (map->max_raw_read && map->max_raw_read < val_len) 3073 read_len = map->max_raw_read; 3074 else 3075 read_len = val_len; 3076 ret = _regmap_raw_read(map, reg, val, read_len, true); 3077 if (ret) 3078 goto out_unlock; 3079 val = ((u8 *)val) + read_len; 3080 val_len -= read_len; 3081 } 3082 3083 out_unlock: 3084 map->unlock(map->lock_arg); 3085 return ret; 3086 } 3087 EXPORT_SYMBOL_GPL(regmap_noinc_read); 3088 3089 /** 3090 * regmap_field_read(): Read a value to a single register field 3091 * 3092 * @field: Register field to read from 3093 * @val: Pointer to store read value 3094 * 3095 * A value of zero will be returned on success, a negative errno will 3096 * be returned in error cases. 3097 */ 3098 int regmap_field_read(struct regmap_field *field, unsigned int *val) 3099 { 3100 int ret; 3101 unsigned int reg_val; 3102 ret = regmap_read(field->regmap, field->reg, ®_val); 3103 if (ret != 0) 3104 return ret; 3105 3106 reg_val &= field->mask; 3107 reg_val >>= field->shift; 3108 *val = reg_val; 3109 3110 return ret; 3111 } 3112 EXPORT_SYMBOL_GPL(regmap_field_read); 3113 3114 /** 3115 * regmap_fields_read() - Read a value to a single register field with port ID 3116 * 3117 * @field: Register field to read from 3118 * @id: port ID 3119 * @val: Pointer to store read value 3120 * 3121 * A value of zero will be returned on success, a negative errno will 3122 * be returned in error cases. 3123 */ 3124 int regmap_fields_read(struct regmap_field *field, unsigned int id, 3125 unsigned int *val) 3126 { 3127 int ret; 3128 unsigned int reg_val; 3129 3130 if (id >= field->id_size) 3131 return -EINVAL; 3132 3133 ret = regmap_read(field->regmap, 3134 field->reg + (field->id_offset * id), 3135 ®_val); 3136 if (ret != 0) 3137 return ret; 3138 3139 reg_val &= field->mask; 3140 reg_val >>= field->shift; 3141 *val = reg_val; 3142 3143 return ret; 3144 } 3145 EXPORT_SYMBOL_GPL(regmap_fields_read); 3146 3147 static int _regmap_bulk_read(struct regmap *map, unsigned int reg, 3148 const unsigned int *regs, void *val, size_t val_count) 3149 { 3150 u32 *u32 = val; 3151 u16 *u16 = val; 3152 u8 *u8 = val; 3153 int ret, i; 3154 3155 map->lock(map->lock_arg); 3156 3157 for (i = 0; i < val_count; i++) { 3158 unsigned int ival; 3159 3160 if (regs) { 3161 if (!IS_ALIGNED(regs[i], map->reg_stride)) { 3162 ret = -EINVAL; 3163 goto out; 3164 } 3165 ret = _regmap_read(map, regs[i], &ival); 3166 } else { 3167 ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival); 3168 } 3169 if (ret != 0) 3170 goto out; 3171 3172 switch (map->format.val_bytes) { 3173 case 4: 3174 u32[i] = ival; 3175 break; 3176 case 2: 3177 u16[i] = ival; 3178 break; 3179 case 1: 3180 u8[i] = ival; 3181 break; 3182 default: 3183 ret = -EINVAL; 3184 goto out; 3185 } 3186 } 3187 out: 3188 map->unlock(map->lock_arg); 3189 return ret; 3190 } 3191 3192 /** 3193 * regmap_bulk_read() - Read multiple sequential registers from the device 3194 * 3195 * @map: Register map to read from 3196 * @reg: First register to be read from 3197 * @val: Pointer to store read value, in native register size for device 3198 * @val_count: Number of registers to read 3199 * 3200 * A value of zero will be returned on success, a negative errno will 3201 * be returned in error cases. 3202 */ 3203 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 3204 size_t val_count) 3205 { 3206 int ret, i; 3207 size_t val_bytes = map->format.val_bytes; 3208 bool vol = regmap_volatile_range(map, reg, val_count); 3209 3210 if (!IS_ALIGNED(reg, map->reg_stride)) 3211 return -EINVAL; 3212 if (val_count == 0) 3213 return -EINVAL; 3214 3215 if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 3216 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 3217 if (ret != 0) 3218 return ret; 3219 3220 for (i = 0; i < val_count * val_bytes; i += val_bytes) 3221 map->format.parse_inplace(val + i); 3222 } else { 3223 ret = _regmap_bulk_read(map, reg, NULL, val, val_count); 3224 } 3225 if (!ret) 3226 trace_regmap_bulk_read(map, reg, val, val_bytes * val_count); 3227 return ret; 3228 } 3229 EXPORT_SYMBOL_GPL(regmap_bulk_read); 3230 3231 /** 3232 * regmap_multi_reg_read() - Read multiple non-sequential registers from the device 3233 * 3234 * @map: Register map to read from 3235 * @regs: Array of registers to read from 3236 * @val: Pointer to store read value, in native register size for device 3237 * @val_count: Number of registers to read 3238 * 3239 * A value of zero will be returned on success, a negative errno will 3240 * be returned in error cases. 3241 */ 3242 int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val, 3243 size_t val_count) 3244 { 3245 if (val_count == 0) 3246 return -EINVAL; 3247 3248 return _regmap_bulk_read(map, 0, regs, val, val_count); 3249 } 3250 EXPORT_SYMBOL_GPL(regmap_multi_reg_read); 3251 3252 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 3253 unsigned int mask, unsigned int val, 3254 bool *change, bool force_write) 3255 { 3256 int ret; 3257 unsigned int tmp, orig; 3258 3259 if (change) 3260 *change = false; 3261 3262 if (regmap_volatile(map, reg) && map->reg_update_bits) { 3263 reg = regmap_reg_addr(map, reg); 3264 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 3265 if (ret == 0 && change) 3266 *change = true; 3267 } else { 3268 ret = _regmap_read(map, reg, &orig); 3269 if (ret != 0) 3270 return ret; 3271 3272 tmp = orig & ~mask; 3273 tmp |= val & mask; 3274 3275 if (force_write || (tmp != orig) || map->force_write_field) { 3276 ret = _regmap_write(map, reg, tmp); 3277 if (ret == 0 && change) 3278 *change = true; 3279 } 3280 } 3281 3282 return ret; 3283 } 3284 3285 /** 3286 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 3287 * 3288 * @map: Register map to update 3289 * @reg: Register to update 3290 * @mask: Bitmask to change 3291 * @val: New value for bitmask 3292 * @change: Boolean indicating if a write was done 3293 * @async: Boolean indicating asynchronously 3294 * @force: Boolean indicating use force update 3295 * 3296 * Perform a read/modify/write cycle on a register map with change, async, force 3297 * options. 3298 * 3299 * If async is true: 3300 * 3301 * With most buses the read must be done synchronously so this is most useful 3302 * for devices with a cache which do not need to interact with the hardware to 3303 * determine the current register value. 3304 * 3305 * Returns zero for success, a negative number on error. 3306 */ 3307 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 3308 unsigned int mask, unsigned int val, 3309 bool *change, bool async, bool force) 3310 { 3311 int ret; 3312 3313 map->lock(map->lock_arg); 3314 3315 map->async = async; 3316 3317 ret = _regmap_update_bits(map, reg, mask, val, change, force); 3318 3319 map->async = false; 3320 3321 map->unlock(map->lock_arg); 3322 3323 return ret; 3324 } 3325 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 3326 3327 /** 3328 * regmap_test_bits() - Check if all specified bits are set in a register. 3329 * 3330 * @map: Register map to operate on 3331 * @reg: Register to read from 3332 * @bits: Bits to test 3333 * 3334 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 3335 * bits are set and a negative error number if the underlying regmap_read() 3336 * fails. 3337 */ 3338 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 3339 { 3340 unsigned int val; 3341 int ret; 3342 3343 ret = regmap_read(map, reg, &val); 3344 if (ret) 3345 return ret; 3346 3347 return (val & bits) == bits; 3348 } 3349 EXPORT_SYMBOL_GPL(regmap_test_bits); 3350 3351 void regmap_async_complete_cb(struct regmap_async *async, int ret) 3352 { 3353 struct regmap *map = async->map; 3354 bool wake; 3355 3356 trace_regmap_async_io_complete(map); 3357 3358 spin_lock(&map->async_lock); 3359 list_move(&async->list, &map->async_free); 3360 wake = list_empty(&map->async_list); 3361 3362 if (ret != 0) 3363 map->async_ret = ret; 3364 3365 spin_unlock(&map->async_lock); 3366 3367 if (wake) 3368 wake_up(&map->async_waitq); 3369 } 3370 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 3371 3372 static int regmap_async_is_done(struct regmap *map) 3373 { 3374 unsigned long flags; 3375 int ret; 3376 3377 spin_lock_irqsave(&map->async_lock, flags); 3378 ret = list_empty(&map->async_list); 3379 spin_unlock_irqrestore(&map->async_lock, flags); 3380 3381 return ret; 3382 } 3383 3384 /** 3385 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3386 * 3387 * @map: Map to operate on. 3388 * 3389 * Blocks until any pending asynchronous I/O has completed. Returns 3390 * an error code for any failed I/O operations. 3391 */ 3392 int regmap_async_complete(struct regmap *map) 3393 { 3394 unsigned long flags; 3395 int ret; 3396 3397 /* Nothing to do with no async support */ 3398 if (!map->bus || !map->bus->async_write) 3399 return 0; 3400 3401 trace_regmap_async_complete_start(map); 3402 3403 wait_event(map->async_waitq, regmap_async_is_done(map)); 3404 3405 spin_lock_irqsave(&map->async_lock, flags); 3406 ret = map->async_ret; 3407 map->async_ret = 0; 3408 spin_unlock_irqrestore(&map->async_lock, flags); 3409 3410 trace_regmap_async_complete_done(map); 3411 3412 return ret; 3413 } 3414 EXPORT_SYMBOL_GPL(regmap_async_complete); 3415 3416 /** 3417 * regmap_register_patch - Register and apply register updates to be applied 3418 * on device initialistion 3419 * 3420 * @map: Register map to apply updates to. 3421 * @regs: Values to update. 3422 * @num_regs: Number of entries in regs. 3423 * 3424 * Register a set of register updates to be applied to the device 3425 * whenever the device registers are synchronised with the cache and 3426 * apply them immediately. Typically this is used to apply 3427 * corrections to be applied to the device defaults on startup, such 3428 * as the updates some vendors provide to undocumented registers. 3429 * 3430 * The caller must ensure that this function cannot be called 3431 * concurrently with either itself or regcache_sync(). 3432 */ 3433 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3434 int num_regs) 3435 { 3436 struct reg_sequence *p; 3437 int ret; 3438 bool bypass; 3439 3440 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3441 num_regs)) 3442 return 0; 3443 3444 p = krealloc(map->patch, 3445 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3446 GFP_KERNEL); 3447 if (p) { 3448 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3449 map->patch = p; 3450 map->patch_regs += num_regs; 3451 } else { 3452 return -ENOMEM; 3453 } 3454 3455 map->lock(map->lock_arg); 3456 3457 bypass = map->cache_bypass; 3458 3459 map->cache_bypass = true; 3460 map->async = true; 3461 3462 ret = _regmap_multi_reg_write(map, regs, num_regs); 3463 3464 map->async = false; 3465 map->cache_bypass = bypass; 3466 3467 map->unlock(map->lock_arg); 3468 3469 regmap_async_complete(map); 3470 3471 return ret; 3472 } 3473 EXPORT_SYMBOL_GPL(regmap_register_patch); 3474 3475 /** 3476 * regmap_get_val_bytes() - Report the size of a register value 3477 * 3478 * @map: Register map to operate on. 3479 * 3480 * Report the size of a register value, mainly intended to for use by 3481 * generic infrastructure built on top of regmap. 3482 */ 3483 int regmap_get_val_bytes(struct regmap *map) 3484 { 3485 if (map->format.format_write) 3486 return -EINVAL; 3487 3488 return map->format.val_bytes; 3489 } 3490 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3491 3492 /** 3493 * regmap_get_max_register() - Report the max register value 3494 * 3495 * @map: Register map to operate on. 3496 * 3497 * Report the max register value, mainly intended to for use by 3498 * generic infrastructure built on top of regmap. 3499 */ 3500 int regmap_get_max_register(struct regmap *map) 3501 { 3502 return map->max_register_is_set ? map->max_register : -EINVAL; 3503 } 3504 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3505 3506 /** 3507 * regmap_get_reg_stride() - Report the register address stride 3508 * 3509 * @map: Register map to operate on. 3510 * 3511 * Report the register address stride, mainly intended to for use by 3512 * generic infrastructure built on top of regmap. 3513 */ 3514 int regmap_get_reg_stride(struct regmap *map) 3515 { 3516 return map->reg_stride; 3517 } 3518 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3519 3520 /** 3521 * regmap_might_sleep() - Returns whether a regmap access might sleep. 3522 * 3523 * @map: Register map to operate on. 3524 * 3525 * Returns true if an access to the register might sleep, else false. 3526 */ 3527 bool regmap_might_sleep(struct regmap *map) 3528 { 3529 return map->can_sleep; 3530 } 3531 EXPORT_SYMBOL_GPL(regmap_might_sleep); 3532 3533 int regmap_parse_val(struct regmap *map, const void *buf, 3534 unsigned int *val) 3535 { 3536 if (!map->format.parse_val) 3537 return -EINVAL; 3538 3539 *val = map->format.parse_val(buf); 3540 3541 return 0; 3542 } 3543 EXPORT_SYMBOL_GPL(regmap_parse_val); 3544 3545 static int __init regmap_initcall(void) 3546 { 3547 regmap_debugfs_initcall(); 3548 3549 return 0; 3550 } 3551 postcore_initcall(regmap_initcall); 3552