1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/of.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 21 #define CREATE_TRACE_POINTS 22 #include "trace.h" 23 24 #include "internal.h" 25 26 /* 27 * Sometimes for failures during very early init the trace 28 * infrastructure isn't available early enough to be used. For this 29 * sort of problem defining LOG_DEVICE will add printks for basic 30 * register I/O on a specific device. 31 */ 32 #undef LOG_DEVICE 33 34 #ifdef LOG_DEVICE 35 static inline bool regmap_should_log(struct regmap *map) 36 { 37 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 38 } 39 #else 40 static inline bool regmap_should_log(struct regmap *map) { return false; } 41 #endif 42 43 44 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 45 unsigned int mask, unsigned int val, 46 bool *change, bool force_write); 47 48 static int _regmap_bus_reg_read(void *context, unsigned int reg, 49 unsigned int *val); 50 static int _regmap_bus_read(void *context, unsigned int reg, 51 unsigned int *val); 52 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 53 unsigned int val); 54 static int _regmap_bus_reg_write(void *context, unsigned int reg, 55 unsigned int val); 56 static int _regmap_bus_raw_write(void *context, unsigned int reg, 57 unsigned int val); 58 59 bool regmap_reg_in_ranges(unsigned int reg, 60 const struct regmap_range *ranges, 61 unsigned int nranges) 62 { 63 const struct regmap_range *r; 64 int i; 65 66 for (i = 0, r = ranges; i < nranges; i++, r++) 67 if (regmap_reg_in_range(reg, r)) 68 return true; 69 return false; 70 } 71 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 72 73 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 74 const struct regmap_access_table *table) 75 { 76 /* Check "no ranges" first */ 77 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 78 return false; 79 80 /* In case zero "yes ranges" are supplied, any reg is OK */ 81 if (!table->n_yes_ranges) 82 return true; 83 84 return regmap_reg_in_ranges(reg, table->yes_ranges, 85 table->n_yes_ranges); 86 } 87 EXPORT_SYMBOL_GPL(regmap_check_range_table); 88 89 bool regmap_writeable(struct regmap *map, unsigned int reg) 90 { 91 if (map->max_register && reg > map->max_register) 92 return false; 93 94 if (map->writeable_reg) 95 return map->writeable_reg(map->dev, reg); 96 97 if (map->wr_table) 98 return regmap_check_range_table(map, reg, map->wr_table); 99 100 return true; 101 } 102 103 bool regmap_cached(struct regmap *map, unsigned int reg) 104 { 105 int ret; 106 unsigned int val; 107 108 if (map->cache_type == REGCACHE_NONE) 109 return false; 110 111 if (!map->cache_ops) 112 return false; 113 114 if (map->max_register && reg > map->max_register) 115 return false; 116 117 map->lock(map->lock_arg); 118 ret = regcache_read(map, reg, &val); 119 map->unlock(map->lock_arg); 120 if (ret) 121 return false; 122 123 return true; 124 } 125 126 bool regmap_readable(struct regmap *map, unsigned int reg) 127 { 128 if (!map->reg_read) 129 return false; 130 131 if (map->max_register && reg > map->max_register) 132 return false; 133 134 if (map->format.format_write) 135 return false; 136 137 if (map->readable_reg) 138 return map->readable_reg(map->dev, reg); 139 140 if (map->rd_table) 141 return regmap_check_range_table(map, reg, map->rd_table); 142 143 return true; 144 } 145 146 bool regmap_volatile(struct regmap *map, unsigned int reg) 147 { 148 if (!map->format.format_write && !regmap_readable(map, reg)) 149 return false; 150 151 if (map->volatile_reg) 152 return map->volatile_reg(map->dev, reg); 153 154 if (map->volatile_table) 155 return regmap_check_range_table(map, reg, map->volatile_table); 156 157 if (map->cache_ops) 158 return false; 159 else 160 return true; 161 } 162 163 bool regmap_precious(struct regmap *map, unsigned int reg) 164 { 165 if (!regmap_readable(map, reg)) 166 return false; 167 168 if (map->precious_reg) 169 return map->precious_reg(map->dev, reg); 170 171 if (map->precious_table) 172 return regmap_check_range_table(map, reg, map->precious_table); 173 174 return false; 175 } 176 177 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 178 { 179 if (map->writeable_noinc_reg) 180 return map->writeable_noinc_reg(map->dev, reg); 181 182 if (map->wr_noinc_table) 183 return regmap_check_range_table(map, reg, map->wr_noinc_table); 184 185 return true; 186 } 187 188 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 189 { 190 if (map->readable_noinc_reg) 191 return map->readable_noinc_reg(map->dev, reg); 192 193 if (map->rd_noinc_table) 194 return regmap_check_range_table(map, reg, map->rd_noinc_table); 195 196 return true; 197 } 198 199 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 200 size_t num) 201 { 202 unsigned int i; 203 204 for (i = 0; i < num; i++) 205 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 206 return false; 207 208 return true; 209 } 210 211 static void regmap_format_2_6_write(struct regmap *map, 212 unsigned int reg, unsigned int val) 213 { 214 u8 *out = map->work_buf; 215 216 *out = (reg << 6) | val; 217 } 218 219 static void regmap_format_4_12_write(struct regmap *map, 220 unsigned int reg, unsigned int val) 221 { 222 __be16 *out = map->work_buf; 223 *out = cpu_to_be16((reg << 12) | val); 224 } 225 226 static void regmap_format_7_9_write(struct regmap *map, 227 unsigned int reg, unsigned int val) 228 { 229 __be16 *out = map->work_buf; 230 *out = cpu_to_be16((reg << 9) | val); 231 } 232 233 static void regmap_format_10_14_write(struct regmap *map, 234 unsigned int reg, unsigned int val) 235 { 236 u8 *out = map->work_buf; 237 238 out[2] = val; 239 out[1] = (val >> 8) | (reg << 6); 240 out[0] = reg >> 2; 241 } 242 243 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 244 { 245 u8 *b = buf; 246 247 b[0] = val << shift; 248 } 249 250 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 251 { 252 __be16 *b = buf; 253 254 b[0] = cpu_to_be16(val << shift); 255 } 256 257 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 258 { 259 __le16 *b = buf; 260 261 b[0] = cpu_to_le16(val << shift); 262 } 263 264 static void regmap_format_16_native(void *buf, unsigned int val, 265 unsigned int shift) 266 { 267 *(u16 *)buf = val << shift; 268 } 269 270 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 271 { 272 u8 *b = buf; 273 274 val <<= shift; 275 276 b[0] = val >> 16; 277 b[1] = val >> 8; 278 b[2] = val; 279 } 280 281 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 282 { 283 __be32 *b = buf; 284 285 b[0] = cpu_to_be32(val << shift); 286 } 287 288 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 289 { 290 __le32 *b = buf; 291 292 b[0] = cpu_to_le32(val << shift); 293 } 294 295 static void regmap_format_32_native(void *buf, unsigned int val, 296 unsigned int shift) 297 { 298 *(u32 *)buf = val << shift; 299 } 300 301 #ifdef CONFIG_64BIT 302 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 303 { 304 __be64 *b = buf; 305 306 b[0] = cpu_to_be64((u64)val << shift); 307 } 308 309 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 310 { 311 __le64 *b = buf; 312 313 b[0] = cpu_to_le64((u64)val << shift); 314 } 315 316 static void regmap_format_64_native(void *buf, unsigned int val, 317 unsigned int shift) 318 { 319 *(u64 *)buf = (u64)val << shift; 320 } 321 #endif 322 323 static void regmap_parse_inplace_noop(void *buf) 324 { 325 } 326 327 static unsigned int regmap_parse_8(const void *buf) 328 { 329 const u8 *b = buf; 330 331 return b[0]; 332 } 333 334 static unsigned int regmap_parse_16_be(const void *buf) 335 { 336 const __be16 *b = buf; 337 338 return be16_to_cpu(b[0]); 339 } 340 341 static unsigned int regmap_parse_16_le(const void *buf) 342 { 343 const __le16 *b = buf; 344 345 return le16_to_cpu(b[0]); 346 } 347 348 static void regmap_parse_16_be_inplace(void *buf) 349 { 350 __be16 *b = buf; 351 352 b[0] = be16_to_cpu(b[0]); 353 } 354 355 static void regmap_parse_16_le_inplace(void *buf) 356 { 357 __le16 *b = buf; 358 359 b[0] = le16_to_cpu(b[0]); 360 } 361 362 static unsigned int regmap_parse_16_native(const void *buf) 363 { 364 return *(u16 *)buf; 365 } 366 367 static unsigned int regmap_parse_24(const void *buf) 368 { 369 const u8 *b = buf; 370 unsigned int ret = b[2]; 371 ret |= ((unsigned int)b[1]) << 8; 372 ret |= ((unsigned int)b[0]) << 16; 373 374 return ret; 375 } 376 377 static unsigned int regmap_parse_32_be(const void *buf) 378 { 379 const __be32 *b = buf; 380 381 return be32_to_cpu(b[0]); 382 } 383 384 static unsigned int regmap_parse_32_le(const void *buf) 385 { 386 const __le32 *b = buf; 387 388 return le32_to_cpu(b[0]); 389 } 390 391 static void regmap_parse_32_be_inplace(void *buf) 392 { 393 __be32 *b = buf; 394 395 b[0] = be32_to_cpu(b[0]); 396 } 397 398 static void regmap_parse_32_le_inplace(void *buf) 399 { 400 __le32 *b = buf; 401 402 b[0] = le32_to_cpu(b[0]); 403 } 404 405 static unsigned int regmap_parse_32_native(const void *buf) 406 { 407 return *(u32 *)buf; 408 } 409 410 #ifdef CONFIG_64BIT 411 static unsigned int regmap_parse_64_be(const void *buf) 412 { 413 const __be64 *b = buf; 414 415 return be64_to_cpu(b[0]); 416 } 417 418 static unsigned int regmap_parse_64_le(const void *buf) 419 { 420 const __le64 *b = buf; 421 422 return le64_to_cpu(b[0]); 423 } 424 425 static void regmap_parse_64_be_inplace(void *buf) 426 { 427 __be64 *b = buf; 428 429 b[0] = be64_to_cpu(b[0]); 430 } 431 432 static void regmap_parse_64_le_inplace(void *buf) 433 { 434 __le64 *b = buf; 435 436 b[0] = le64_to_cpu(b[0]); 437 } 438 439 static unsigned int regmap_parse_64_native(const void *buf) 440 { 441 return *(u64 *)buf; 442 } 443 #endif 444 445 static void regmap_lock_hwlock(void *__map) 446 { 447 struct regmap *map = __map; 448 449 hwspin_lock_timeout(map->hwlock, UINT_MAX); 450 } 451 452 static void regmap_lock_hwlock_irq(void *__map) 453 { 454 struct regmap *map = __map; 455 456 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 457 } 458 459 static void regmap_lock_hwlock_irqsave(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 464 &map->spinlock_flags); 465 } 466 467 static void regmap_unlock_hwlock(void *__map) 468 { 469 struct regmap *map = __map; 470 471 hwspin_unlock(map->hwlock); 472 } 473 474 static void regmap_unlock_hwlock_irq(void *__map) 475 { 476 struct regmap *map = __map; 477 478 hwspin_unlock_irq(map->hwlock); 479 } 480 481 static void regmap_unlock_hwlock_irqrestore(void *__map) 482 { 483 struct regmap *map = __map; 484 485 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 486 } 487 488 static void regmap_lock_unlock_none(void *__map) 489 { 490 491 } 492 493 static void regmap_lock_mutex(void *__map) 494 { 495 struct regmap *map = __map; 496 mutex_lock(&map->mutex); 497 } 498 499 static void regmap_unlock_mutex(void *__map) 500 { 501 struct regmap *map = __map; 502 mutex_unlock(&map->mutex); 503 } 504 505 static void regmap_lock_spinlock(void *__map) 506 __acquires(&map->spinlock) 507 { 508 struct regmap *map = __map; 509 unsigned long flags; 510 511 spin_lock_irqsave(&map->spinlock, flags); 512 map->spinlock_flags = flags; 513 } 514 515 static void regmap_unlock_spinlock(void *__map) 516 __releases(&map->spinlock) 517 { 518 struct regmap *map = __map; 519 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 520 } 521 522 static void dev_get_regmap_release(struct device *dev, void *res) 523 { 524 /* 525 * We don't actually have anything to do here; the goal here 526 * is not to manage the regmap but to provide a simple way to 527 * get the regmap back given a struct device. 528 */ 529 } 530 531 static bool _regmap_range_add(struct regmap *map, 532 struct regmap_range_node *data) 533 { 534 struct rb_root *root = &map->range_tree; 535 struct rb_node **new = &(root->rb_node), *parent = NULL; 536 537 while (*new) { 538 struct regmap_range_node *this = 539 rb_entry(*new, struct regmap_range_node, node); 540 541 parent = *new; 542 if (data->range_max < this->range_min) 543 new = &((*new)->rb_left); 544 else if (data->range_min > this->range_max) 545 new = &((*new)->rb_right); 546 else 547 return false; 548 } 549 550 rb_link_node(&data->node, parent, new); 551 rb_insert_color(&data->node, root); 552 553 return true; 554 } 555 556 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 557 unsigned int reg) 558 { 559 struct rb_node *node = map->range_tree.rb_node; 560 561 while (node) { 562 struct regmap_range_node *this = 563 rb_entry(node, struct regmap_range_node, node); 564 565 if (reg < this->range_min) 566 node = node->rb_left; 567 else if (reg > this->range_max) 568 node = node->rb_right; 569 else 570 return this; 571 } 572 573 return NULL; 574 } 575 576 static void regmap_range_exit(struct regmap *map) 577 { 578 struct rb_node *next; 579 struct regmap_range_node *range_node; 580 581 next = rb_first(&map->range_tree); 582 while (next) { 583 range_node = rb_entry(next, struct regmap_range_node, node); 584 next = rb_next(&range_node->node); 585 rb_erase(&range_node->node, &map->range_tree); 586 kfree(range_node); 587 } 588 589 kfree(map->selector_work_buf); 590 } 591 592 int regmap_attach_dev(struct device *dev, struct regmap *map, 593 const struct regmap_config *config) 594 { 595 struct regmap **m; 596 597 map->dev = dev; 598 599 regmap_debugfs_init(map, config->name); 600 601 /* Add a devres resource for dev_get_regmap() */ 602 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 603 if (!m) { 604 regmap_debugfs_exit(map); 605 return -ENOMEM; 606 } 607 *m = map; 608 devres_add(dev, m); 609 610 return 0; 611 } 612 EXPORT_SYMBOL_GPL(regmap_attach_dev); 613 614 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 615 const struct regmap_config *config) 616 { 617 enum regmap_endian endian; 618 619 /* Retrieve the endianness specification from the regmap config */ 620 endian = config->reg_format_endian; 621 622 /* If the regmap config specified a non-default value, use that */ 623 if (endian != REGMAP_ENDIAN_DEFAULT) 624 return endian; 625 626 /* Retrieve the endianness specification from the bus config */ 627 if (bus && bus->reg_format_endian_default) 628 endian = bus->reg_format_endian_default; 629 630 /* If the bus specified a non-default value, use that */ 631 if (endian != REGMAP_ENDIAN_DEFAULT) 632 return endian; 633 634 /* Use this if no other value was found */ 635 return REGMAP_ENDIAN_BIG; 636 } 637 638 enum regmap_endian regmap_get_val_endian(struct device *dev, 639 const struct regmap_bus *bus, 640 const struct regmap_config *config) 641 { 642 struct device_node *np; 643 enum regmap_endian endian; 644 645 /* Retrieve the endianness specification from the regmap config */ 646 endian = config->val_format_endian; 647 648 /* If the regmap config specified a non-default value, use that */ 649 if (endian != REGMAP_ENDIAN_DEFAULT) 650 return endian; 651 652 /* If the dev and dev->of_node exist try to get endianness from DT */ 653 if (dev && dev->of_node) { 654 np = dev->of_node; 655 656 /* Parse the device's DT node for an endianness specification */ 657 if (of_property_read_bool(np, "big-endian")) 658 endian = REGMAP_ENDIAN_BIG; 659 else if (of_property_read_bool(np, "little-endian")) 660 endian = REGMAP_ENDIAN_LITTLE; 661 else if (of_property_read_bool(np, "native-endian")) 662 endian = REGMAP_ENDIAN_NATIVE; 663 664 /* If the endianness was specified in DT, use that */ 665 if (endian != REGMAP_ENDIAN_DEFAULT) 666 return endian; 667 } 668 669 /* Retrieve the endianness specification from the bus config */ 670 if (bus && bus->val_format_endian_default) 671 endian = bus->val_format_endian_default; 672 673 /* If the bus specified a non-default value, use that */ 674 if (endian != REGMAP_ENDIAN_DEFAULT) 675 return endian; 676 677 /* Use this if no other value was found */ 678 return REGMAP_ENDIAN_BIG; 679 } 680 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 681 682 struct regmap *__regmap_init(struct device *dev, 683 const struct regmap_bus *bus, 684 void *bus_context, 685 const struct regmap_config *config, 686 struct lock_class_key *lock_key, 687 const char *lock_name) 688 { 689 struct regmap *map; 690 int ret = -EINVAL; 691 enum regmap_endian reg_endian, val_endian; 692 int i, j; 693 694 if (!config) 695 goto err; 696 697 map = kzalloc(sizeof(*map), GFP_KERNEL); 698 if (map == NULL) { 699 ret = -ENOMEM; 700 goto err; 701 } 702 703 if (config->name) { 704 map->name = kstrdup_const(config->name, GFP_KERNEL); 705 if (!map->name) { 706 ret = -ENOMEM; 707 goto err_map; 708 } 709 } 710 711 if (config->disable_locking) { 712 map->lock = map->unlock = regmap_lock_unlock_none; 713 regmap_debugfs_disable(map); 714 } else if (config->lock && config->unlock) { 715 map->lock = config->lock; 716 map->unlock = config->unlock; 717 map->lock_arg = config->lock_arg; 718 } else if (config->use_hwlock) { 719 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 720 if (!map->hwlock) { 721 ret = -ENXIO; 722 goto err_name; 723 } 724 725 switch (config->hwlock_mode) { 726 case HWLOCK_IRQSTATE: 727 map->lock = regmap_lock_hwlock_irqsave; 728 map->unlock = regmap_unlock_hwlock_irqrestore; 729 break; 730 case HWLOCK_IRQ: 731 map->lock = regmap_lock_hwlock_irq; 732 map->unlock = regmap_unlock_hwlock_irq; 733 break; 734 default: 735 map->lock = regmap_lock_hwlock; 736 map->unlock = regmap_unlock_hwlock; 737 break; 738 } 739 740 map->lock_arg = map; 741 } else { 742 if ((bus && bus->fast_io) || 743 config->fast_io) { 744 spin_lock_init(&map->spinlock); 745 map->lock = regmap_lock_spinlock; 746 map->unlock = regmap_unlock_spinlock; 747 lockdep_set_class_and_name(&map->spinlock, 748 lock_key, lock_name); 749 } else { 750 mutex_init(&map->mutex); 751 map->lock = regmap_lock_mutex; 752 map->unlock = regmap_unlock_mutex; 753 lockdep_set_class_and_name(&map->mutex, 754 lock_key, lock_name); 755 } 756 map->lock_arg = map; 757 } 758 759 /* 760 * When we write in fast-paths with regmap_bulk_write() don't allocate 761 * scratch buffers with sleeping allocations. 762 */ 763 if ((bus && bus->fast_io) || config->fast_io) 764 map->alloc_flags = GFP_ATOMIC; 765 else 766 map->alloc_flags = GFP_KERNEL; 767 768 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 769 map->format.pad_bytes = config->pad_bits / 8; 770 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 771 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 772 config->val_bits + config->pad_bits, 8); 773 map->reg_shift = config->pad_bits % 8; 774 if (config->reg_stride) 775 map->reg_stride = config->reg_stride; 776 else 777 map->reg_stride = 1; 778 if (is_power_of_2(map->reg_stride)) 779 map->reg_stride_order = ilog2(map->reg_stride); 780 else 781 map->reg_stride_order = -1; 782 map->use_single_read = config->use_single_read || !bus || !bus->read; 783 map->use_single_write = config->use_single_write || !bus || !bus->write; 784 map->can_multi_write = config->can_multi_write && bus && bus->write; 785 if (bus) { 786 map->max_raw_read = bus->max_raw_read; 787 map->max_raw_write = bus->max_raw_write; 788 } 789 map->dev = dev; 790 map->bus = bus; 791 map->bus_context = bus_context; 792 map->max_register = config->max_register; 793 map->wr_table = config->wr_table; 794 map->rd_table = config->rd_table; 795 map->volatile_table = config->volatile_table; 796 map->precious_table = config->precious_table; 797 map->wr_noinc_table = config->wr_noinc_table; 798 map->rd_noinc_table = config->rd_noinc_table; 799 map->writeable_reg = config->writeable_reg; 800 map->readable_reg = config->readable_reg; 801 map->volatile_reg = config->volatile_reg; 802 map->precious_reg = config->precious_reg; 803 map->writeable_noinc_reg = config->writeable_noinc_reg; 804 map->readable_noinc_reg = config->readable_noinc_reg; 805 map->cache_type = config->cache_type; 806 807 spin_lock_init(&map->async_lock); 808 INIT_LIST_HEAD(&map->async_list); 809 INIT_LIST_HEAD(&map->async_free); 810 init_waitqueue_head(&map->async_waitq); 811 812 if (config->read_flag_mask || 813 config->write_flag_mask || 814 config->zero_flag_mask) { 815 map->read_flag_mask = config->read_flag_mask; 816 map->write_flag_mask = config->write_flag_mask; 817 } else if (bus) { 818 map->read_flag_mask = bus->read_flag_mask; 819 } 820 821 if (!bus) { 822 map->reg_read = config->reg_read; 823 map->reg_write = config->reg_write; 824 825 map->defer_caching = false; 826 goto skip_format_initialization; 827 } else if (!bus->read || !bus->write) { 828 map->reg_read = _regmap_bus_reg_read; 829 map->reg_write = _regmap_bus_reg_write; 830 831 map->defer_caching = false; 832 goto skip_format_initialization; 833 } else { 834 map->reg_read = _regmap_bus_read; 835 map->reg_update_bits = bus->reg_update_bits; 836 } 837 838 reg_endian = regmap_get_reg_endian(bus, config); 839 val_endian = regmap_get_val_endian(dev, bus, config); 840 841 switch (config->reg_bits + map->reg_shift) { 842 case 2: 843 switch (config->val_bits) { 844 case 6: 845 map->format.format_write = regmap_format_2_6_write; 846 break; 847 default: 848 goto err_hwlock; 849 } 850 break; 851 852 case 4: 853 switch (config->val_bits) { 854 case 12: 855 map->format.format_write = regmap_format_4_12_write; 856 break; 857 default: 858 goto err_hwlock; 859 } 860 break; 861 862 case 7: 863 switch (config->val_bits) { 864 case 9: 865 map->format.format_write = regmap_format_7_9_write; 866 break; 867 default: 868 goto err_hwlock; 869 } 870 break; 871 872 case 10: 873 switch (config->val_bits) { 874 case 14: 875 map->format.format_write = regmap_format_10_14_write; 876 break; 877 default: 878 goto err_hwlock; 879 } 880 break; 881 882 case 8: 883 map->format.format_reg = regmap_format_8; 884 break; 885 886 case 16: 887 switch (reg_endian) { 888 case REGMAP_ENDIAN_BIG: 889 map->format.format_reg = regmap_format_16_be; 890 break; 891 case REGMAP_ENDIAN_LITTLE: 892 map->format.format_reg = regmap_format_16_le; 893 break; 894 case REGMAP_ENDIAN_NATIVE: 895 map->format.format_reg = regmap_format_16_native; 896 break; 897 default: 898 goto err_hwlock; 899 } 900 break; 901 902 case 24: 903 if (reg_endian != REGMAP_ENDIAN_BIG) 904 goto err_hwlock; 905 map->format.format_reg = regmap_format_24; 906 break; 907 908 case 32: 909 switch (reg_endian) { 910 case REGMAP_ENDIAN_BIG: 911 map->format.format_reg = regmap_format_32_be; 912 break; 913 case REGMAP_ENDIAN_LITTLE: 914 map->format.format_reg = regmap_format_32_le; 915 break; 916 case REGMAP_ENDIAN_NATIVE: 917 map->format.format_reg = regmap_format_32_native; 918 break; 919 default: 920 goto err_hwlock; 921 } 922 break; 923 924 #ifdef CONFIG_64BIT 925 case 64: 926 switch (reg_endian) { 927 case REGMAP_ENDIAN_BIG: 928 map->format.format_reg = regmap_format_64_be; 929 break; 930 case REGMAP_ENDIAN_LITTLE: 931 map->format.format_reg = regmap_format_64_le; 932 break; 933 case REGMAP_ENDIAN_NATIVE: 934 map->format.format_reg = regmap_format_64_native; 935 break; 936 default: 937 goto err_hwlock; 938 } 939 break; 940 #endif 941 942 default: 943 goto err_hwlock; 944 } 945 946 if (val_endian == REGMAP_ENDIAN_NATIVE) 947 map->format.parse_inplace = regmap_parse_inplace_noop; 948 949 switch (config->val_bits) { 950 case 8: 951 map->format.format_val = regmap_format_8; 952 map->format.parse_val = regmap_parse_8; 953 map->format.parse_inplace = regmap_parse_inplace_noop; 954 break; 955 case 16: 956 switch (val_endian) { 957 case REGMAP_ENDIAN_BIG: 958 map->format.format_val = regmap_format_16_be; 959 map->format.parse_val = regmap_parse_16_be; 960 map->format.parse_inplace = regmap_parse_16_be_inplace; 961 break; 962 case REGMAP_ENDIAN_LITTLE: 963 map->format.format_val = regmap_format_16_le; 964 map->format.parse_val = regmap_parse_16_le; 965 map->format.parse_inplace = regmap_parse_16_le_inplace; 966 break; 967 case REGMAP_ENDIAN_NATIVE: 968 map->format.format_val = regmap_format_16_native; 969 map->format.parse_val = regmap_parse_16_native; 970 break; 971 default: 972 goto err_hwlock; 973 } 974 break; 975 case 24: 976 if (val_endian != REGMAP_ENDIAN_BIG) 977 goto err_hwlock; 978 map->format.format_val = regmap_format_24; 979 map->format.parse_val = regmap_parse_24; 980 break; 981 case 32: 982 switch (val_endian) { 983 case REGMAP_ENDIAN_BIG: 984 map->format.format_val = regmap_format_32_be; 985 map->format.parse_val = regmap_parse_32_be; 986 map->format.parse_inplace = regmap_parse_32_be_inplace; 987 break; 988 case REGMAP_ENDIAN_LITTLE: 989 map->format.format_val = regmap_format_32_le; 990 map->format.parse_val = regmap_parse_32_le; 991 map->format.parse_inplace = regmap_parse_32_le_inplace; 992 break; 993 case REGMAP_ENDIAN_NATIVE: 994 map->format.format_val = regmap_format_32_native; 995 map->format.parse_val = regmap_parse_32_native; 996 break; 997 default: 998 goto err_hwlock; 999 } 1000 break; 1001 #ifdef CONFIG_64BIT 1002 case 64: 1003 switch (val_endian) { 1004 case REGMAP_ENDIAN_BIG: 1005 map->format.format_val = regmap_format_64_be; 1006 map->format.parse_val = regmap_parse_64_be; 1007 map->format.parse_inplace = regmap_parse_64_be_inplace; 1008 break; 1009 case REGMAP_ENDIAN_LITTLE: 1010 map->format.format_val = regmap_format_64_le; 1011 map->format.parse_val = regmap_parse_64_le; 1012 map->format.parse_inplace = regmap_parse_64_le_inplace; 1013 break; 1014 case REGMAP_ENDIAN_NATIVE: 1015 map->format.format_val = regmap_format_64_native; 1016 map->format.parse_val = regmap_parse_64_native; 1017 break; 1018 default: 1019 goto err_hwlock; 1020 } 1021 break; 1022 #endif 1023 } 1024 1025 if (map->format.format_write) { 1026 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1027 (val_endian != REGMAP_ENDIAN_BIG)) 1028 goto err_hwlock; 1029 map->use_single_write = true; 1030 } 1031 1032 if (!map->format.format_write && 1033 !(map->format.format_reg && map->format.format_val)) 1034 goto err_hwlock; 1035 1036 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1037 if (map->work_buf == NULL) { 1038 ret = -ENOMEM; 1039 goto err_hwlock; 1040 } 1041 1042 if (map->format.format_write) { 1043 map->defer_caching = false; 1044 map->reg_write = _regmap_bus_formatted_write; 1045 } else if (map->format.format_val) { 1046 map->defer_caching = true; 1047 map->reg_write = _regmap_bus_raw_write; 1048 } 1049 1050 skip_format_initialization: 1051 1052 map->range_tree = RB_ROOT; 1053 for (i = 0; i < config->num_ranges; i++) { 1054 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1055 struct regmap_range_node *new; 1056 1057 /* Sanity check */ 1058 if (range_cfg->range_max < range_cfg->range_min) { 1059 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1060 range_cfg->range_max, range_cfg->range_min); 1061 goto err_range; 1062 } 1063 1064 if (range_cfg->range_max > map->max_register) { 1065 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1066 range_cfg->range_max, map->max_register); 1067 goto err_range; 1068 } 1069 1070 if (range_cfg->selector_reg > map->max_register) { 1071 dev_err(map->dev, 1072 "Invalid range %d: selector out of map\n", i); 1073 goto err_range; 1074 } 1075 1076 if (range_cfg->window_len == 0) { 1077 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1078 i); 1079 goto err_range; 1080 } 1081 1082 /* Make sure, that this register range has no selector 1083 or data window within its boundary */ 1084 for (j = 0; j < config->num_ranges; j++) { 1085 unsigned sel_reg = config->ranges[j].selector_reg; 1086 unsigned win_min = config->ranges[j].window_start; 1087 unsigned win_max = win_min + 1088 config->ranges[j].window_len - 1; 1089 1090 /* Allow data window inside its own virtual range */ 1091 if (j == i) 1092 continue; 1093 1094 if (range_cfg->range_min <= sel_reg && 1095 sel_reg <= range_cfg->range_max) { 1096 dev_err(map->dev, 1097 "Range %d: selector for %d in window\n", 1098 i, j); 1099 goto err_range; 1100 } 1101 1102 if (!(win_max < range_cfg->range_min || 1103 win_min > range_cfg->range_max)) { 1104 dev_err(map->dev, 1105 "Range %d: window for %d in window\n", 1106 i, j); 1107 goto err_range; 1108 } 1109 } 1110 1111 new = kzalloc(sizeof(*new), GFP_KERNEL); 1112 if (new == NULL) { 1113 ret = -ENOMEM; 1114 goto err_range; 1115 } 1116 1117 new->map = map; 1118 new->name = range_cfg->name; 1119 new->range_min = range_cfg->range_min; 1120 new->range_max = range_cfg->range_max; 1121 new->selector_reg = range_cfg->selector_reg; 1122 new->selector_mask = range_cfg->selector_mask; 1123 new->selector_shift = range_cfg->selector_shift; 1124 new->window_start = range_cfg->window_start; 1125 new->window_len = range_cfg->window_len; 1126 1127 if (!_regmap_range_add(map, new)) { 1128 dev_err(map->dev, "Failed to add range %d\n", i); 1129 kfree(new); 1130 goto err_range; 1131 } 1132 1133 if (map->selector_work_buf == NULL) { 1134 map->selector_work_buf = 1135 kzalloc(map->format.buf_size, GFP_KERNEL); 1136 if (map->selector_work_buf == NULL) { 1137 ret = -ENOMEM; 1138 goto err_range; 1139 } 1140 } 1141 } 1142 1143 ret = regcache_init(map, config); 1144 if (ret != 0) 1145 goto err_range; 1146 1147 if (dev) { 1148 ret = regmap_attach_dev(dev, map, config); 1149 if (ret != 0) 1150 goto err_regcache; 1151 } else { 1152 regmap_debugfs_init(map, config->name); 1153 } 1154 1155 return map; 1156 1157 err_regcache: 1158 regcache_exit(map); 1159 err_range: 1160 regmap_range_exit(map); 1161 kfree(map->work_buf); 1162 err_hwlock: 1163 if (map->hwlock) 1164 hwspin_lock_free(map->hwlock); 1165 err_name: 1166 kfree_const(map->name); 1167 err_map: 1168 kfree(map); 1169 err: 1170 return ERR_PTR(ret); 1171 } 1172 EXPORT_SYMBOL_GPL(__regmap_init); 1173 1174 static void devm_regmap_release(struct device *dev, void *res) 1175 { 1176 regmap_exit(*(struct regmap **)res); 1177 } 1178 1179 struct regmap *__devm_regmap_init(struct device *dev, 1180 const struct regmap_bus *bus, 1181 void *bus_context, 1182 const struct regmap_config *config, 1183 struct lock_class_key *lock_key, 1184 const char *lock_name) 1185 { 1186 struct regmap **ptr, *regmap; 1187 1188 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1189 if (!ptr) 1190 return ERR_PTR(-ENOMEM); 1191 1192 regmap = __regmap_init(dev, bus, bus_context, config, 1193 lock_key, lock_name); 1194 if (!IS_ERR(regmap)) { 1195 *ptr = regmap; 1196 devres_add(dev, ptr); 1197 } else { 1198 devres_free(ptr); 1199 } 1200 1201 return regmap; 1202 } 1203 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1204 1205 static void regmap_field_init(struct regmap_field *rm_field, 1206 struct regmap *regmap, struct reg_field reg_field) 1207 { 1208 rm_field->regmap = regmap; 1209 rm_field->reg = reg_field.reg; 1210 rm_field->shift = reg_field.lsb; 1211 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1212 rm_field->id_size = reg_field.id_size; 1213 rm_field->id_offset = reg_field.id_offset; 1214 } 1215 1216 /** 1217 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1218 * 1219 * @dev: Device that will be interacted with 1220 * @regmap: regmap bank in which this register field is located. 1221 * @reg_field: Register field with in the bank. 1222 * 1223 * The return value will be an ERR_PTR() on error or a valid pointer 1224 * to a struct regmap_field. The regmap_field will be automatically freed 1225 * by the device management code. 1226 */ 1227 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1228 struct regmap *regmap, struct reg_field reg_field) 1229 { 1230 struct regmap_field *rm_field = devm_kzalloc(dev, 1231 sizeof(*rm_field), GFP_KERNEL); 1232 if (!rm_field) 1233 return ERR_PTR(-ENOMEM); 1234 1235 regmap_field_init(rm_field, regmap, reg_field); 1236 1237 return rm_field; 1238 1239 } 1240 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1241 1242 /** 1243 * devm_regmap_field_free() - Free a register field allocated using 1244 * devm_regmap_field_alloc. 1245 * 1246 * @dev: Device that will be interacted with 1247 * @field: regmap field which should be freed. 1248 * 1249 * Free register field allocated using devm_regmap_field_alloc(). Usually 1250 * drivers need not call this function, as the memory allocated via devm 1251 * will be freed as per device-driver life-cyle. 1252 */ 1253 void devm_regmap_field_free(struct device *dev, 1254 struct regmap_field *field) 1255 { 1256 devm_kfree(dev, field); 1257 } 1258 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1259 1260 /** 1261 * regmap_field_alloc() - Allocate and initialise a register field. 1262 * 1263 * @regmap: regmap bank in which this register field is located. 1264 * @reg_field: Register field with in the bank. 1265 * 1266 * The return value will be an ERR_PTR() on error or a valid pointer 1267 * to a struct regmap_field. The regmap_field should be freed by the 1268 * user once its finished working with it using regmap_field_free(). 1269 */ 1270 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1271 struct reg_field reg_field) 1272 { 1273 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1274 1275 if (!rm_field) 1276 return ERR_PTR(-ENOMEM); 1277 1278 regmap_field_init(rm_field, regmap, reg_field); 1279 1280 return rm_field; 1281 } 1282 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1283 1284 /** 1285 * regmap_field_free() - Free register field allocated using 1286 * regmap_field_alloc. 1287 * 1288 * @field: regmap field which should be freed. 1289 */ 1290 void regmap_field_free(struct regmap_field *field) 1291 { 1292 kfree(field); 1293 } 1294 EXPORT_SYMBOL_GPL(regmap_field_free); 1295 1296 /** 1297 * regmap_reinit_cache() - Reinitialise the current register cache 1298 * 1299 * @map: Register map to operate on. 1300 * @config: New configuration. Only the cache data will be used. 1301 * 1302 * Discard any existing register cache for the map and initialize a 1303 * new cache. This can be used to restore the cache to defaults or to 1304 * update the cache configuration to reflect runtime discovery of the 1305 * hardware. 1306 * 1307 * No explicit locking is done here, the user needs to ensure that 1308 * this function will not race with other calls to regmap. 1309 */ 1310 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1311 { 1312 regcache_exit(map); 1313 regmap_debugfs_exit(map); 1314 1315 map->max_register = config->max_register; 1316 map->writeable_reg = config->writeable_reg; 1317 map->readable_reg = config->readable_reg; 1318 map->volatile_reg = config->volatile_reg; 1319 map->precious_reg = config->precious_reg; 1320 map->writeable_noinc_reg = config->writeable_noinc_reg; 1321 map->readable_noinc_reg = config->readable_noinc_reg; 1322 map->cache_type = config->cache_type; 1323 1324 regmap_debugfs_init(map, config->name); 1325 1326 map->cache_bypass = false; 1327 map->cache_only = false; 1328 1329 return regcache_init(map, config); 1330 } 1331 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1332 1333 /** 1334 * regmap_exit() - Free a previously allocated register map 1335 * 1336 * @map: Register map to operate on. 1337 */ 1338 void regmap_exit(struct regmap *map) 1339 { 1340 struct regmap_async *async; 1341 1342 regcache_exit(map); 1343 regmap_debugfs_exit(map); 1344 regmap_range_exit(map); 1345 if (map->bus && map->bus->free_context) 1346 map->bus->free_context(map->bus_context); 1347 kfree(map->work_buf); 1348 while (!list_empty(&map->async_free)) { 1349 async = list_first_entry_or_null(&map->async_free, 1350 struct regmap_async, 1351 list); 1352 list_del(&async->list); 1353 kfree(async->work_buf); 1354 kfree(async); 1355 } 1356 if (map->hwlock) 1357 hwspin_lock_free(map->hwlock); 1358 kfree_const(map->name); 1359 kfree(map); 1360 } 1361 EXPORT_SYMBOL_GPL(regmap_exit); 1362 1363 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1364 { 1365 struct regmap **r = res; 1366 if (!r || !*r) { 1367 WARN_ON(!r || !*r); 1368 return 0; 1369 } 1370 1371 /* If the user didn't specify a name match any */ 1372 if (data) 1373 return (*r)->name == data; 1374 else 1375 return 1; 1376 } 1377 1378 /** 1379 * dev_get_regmap() - Obtain the regmap (if any) for a device 1380 * 1381 * @dev: Device to retrieve the map for 1382 * @name: Optional name for the register map, usually NULL. 1383 * 1384 * Returns the regmap for the device if one is present, or NULL. If 1385 * name is specified then it must match the name specified when 1386 * registering the device, if it is NULL then the first regmap found 1387 * will be used. Devices with multiple register maps are very rare, 1388 * generic code should normally not need to specify a name. 1389 */ 1390 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1391 { 1392 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1393 dev_get_regmap_match, (void *)name); 1394 1395 if (!r) 1396 return NULL; 1397 return *r; 1398 } 1399 EXPORT_SYMBOL_GPL(dev_get_regmap); 1400 1401 /** 1402 * regmap_get_device() - Obtain the device from a regmap 1403 * 1404 * @map: Register map to operate on. 1405 * 1406 * Returns the underlying device that the regmap has been created for. 1407 */ 1408 struct device *regmap_get_device(struct regmap *map) 1409 { 1410 return map->dev; 1411 } 1412 EXPORT_SYMBOL_GPL(regmap_get_device); 1413 1414 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1415 struct regmap_range_node *range, 1416 unsigned int val_num) 1417 { 1418 void *orig_work_buf; 1419 unsigned int win_offset; 1420 unsigned int win_page; 1421 bool page_chg; 1422 int ret; 1423 1424 win_offset = (*reg - range->range_min) % range->window_len; 1425 win_page = (*reg - range->range_min) / range->window_len; 1426 1427 if (val_num > 1) { 1428 /* Bulk write shouldn't cross range boundary */ 1429 if (*reg + val_num - 1 > range->range_max) 1430 return -EINVAL; 1431 1432 /* ... or single page boundary */ 1433 if (val_num > range->window_len - win_offset) 1434 return -EINVAL; 1435 } 1436 1437 /* It is possible to have selector register inside data window. 1438 In that case, selector register is located on every page and 1439 it needs no page switching, when accessed alone. */ 1440 if (val_num > 1 || 1441 range->window_start + win_offset != range->selector_reg) { 1442 /* Use separate work_buf during page switching */ 1443 orig_work_buf = map->work_buf; 1444 map->work_buf = map->selector_work_buf; 1445 1446 ret = _regmap_update_bits(map, range->selector_reg, 1447 range->selector_mask, 1448 win_page << range->selector_shift, 1449 &page_chg, false); 1450 1451 map->work_buf = orig_work_buf; 1452 1453 if (ret != 0) 1454 return ret; 1455 } 1456 1457 *reg = range->window_start + win_offset; 1458 1459 return 0; 1460 } 1461 1462 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1463 unsigned long mask) 1464 { 1465 u8 *buf; 1466 int i; 1467 1468 if (!mask || !map->work_buf) 1469 return; 1470 1471 buf = map->work_buf; 1472 1473 for (i = 0; i < max_bytes; i++) 1474 buf[i] |= (mask >> (8 * i)) & 0xff; 1475 } 1476 1477 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1478 const void *val, size_t val_len) 1479 { 1480 struct regmap_range_node *range; 1481 unsigned long flags; 1482 void *work_val = map->work_buf + map->format.reg_bytes + 1483 map->format.pad_bytes; 1484 void *buf; 1485 int ret = -ENOTSUPP; 1486 size_t len; 1487 int i; 1488 1489 WARN_ON(!map->bus); 1490 1491 /* Check for unwritable registers before we start */ 1492 for (i = 0; i < val_len / map->format.val_bytes; i++) 1493 if (!regmap_writeable(map, 1494 reg + regmap_get_offset(map, i))) 1495 return -EINVAL; 1496 1497 if (!map->cache_bypass && map->format.parse_val) { 1498 unsigned int ival; 1499 int val_bytes = map->format.val_bytes; 1500 for (i = 0; i < val_len / val_bytes; i++) { 1501 ival = map->format.parse_val(val + (i * val_bytes)); 1502 ret = regcache_write(map, 1503 reg + regmap_get_offset(map, i), 1504 ival); 1505 if (ret) { 1506 dev_err(map->dev, 1507 "Error in caching of register: %x ret: %d\n", 1508 reg + i, ret); 1509 return ret; 1510 } 1511 } 1512 if (map->cache_only) { 1513 map->cache_dirty = true; 1514 return 0; 1515 } 1516 } 1517 1518 range = _regmap_range_lookup(map, reg); 1519 if (range) { 1520 int val_num = val_len / map->format.val_bytes; 1521 int win_offset = (reg - range->range_min) % range->window_len; 1522 int win_residue = range->window_len - win_offset; 1523 1524 /* If the write goes beyond the end of the window split it */ 1525 while (val_num > win_residue) { 1526 dev_dbg(map->dev, "Writing window %d/%zu\n", 1527 win_residue, val_len / map->format.val_bytes); 1528 ret = _regmap_raw_write_impl(map, reg, val, 1529 win_residue * 1530 map->format.val_bytes); 1531 if (ret != 0) 1532 return ret; 1533 1534 reg += win_residue; 1535 val_num -= win_residue; 1536 val += win_residue * map->format.val_bytes; 1537 val_len -= win_residue * map->format.val_bytes; 1538 1539 win_offset = (reg - range->range_min) % 1540 range->window_len; 1541 win_residue = range->window_len - win_offset; 1542 } 1543 1544 ret = _regmap_select_page(map, ®, range, val_num); 1545 if (ret != 0) 1546 return ret; 1547 } 1548 1549 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1550 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1551 map->write_flag_mask); 1552 1553 /* 1554 * Essentially all I/O mechanisms will be faster with a single 1555 * buffer to write. Since register syncs often generate raw 1556 * writes of single registers optimise that case. 1557 */ 1558 if (val != work_val && val_len == map->format.val_bytes) { 1559 memcpy(work_val, val, map->format.val_bytes); 1560 val = work_val; 1561 } 1562 1563 if (map->async && map->bus->async_write) { 1564 struct regmap_async *async; 1565 1566 trace_regmap_async_write_start(map, reg, val_len); 1567 1568 spin_lock_irqsave(&map->async_lock, flags); 1569 async = list_first_entry_or_null(&map->async_free, 1570 struct regmap_async, 1571 list); 1572 if (async) 1573 list_del(&async->list); 1574 spin_unlock_irqrestore(&map->async_lock, flags); 1575 1576 if (!async) { 1577 async = map->bus->async_alloc(); 1578 if (!async) 1579 return -ENOMEM; 1580 1581 async->work_buf = kzalloc(map->format.buf_size, 1582 GFP_KERNEL | GFP_DMA); 1583 if (!async->work_buf) { 1584 kfree(async); 1585 return -ENOMEM; 1586 } 1587 } 1588 1589 async->map = map; 1590 1591 /* If the caller supplied the value we can use it safely. */ 1592 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1593 map->format.reg_bytes + map->format.val_bytes); 1594 1595 spin_lock_irqsave(&map->async_lock, flags); 1596 list_add_tail(&async->list, &map->async_list); 1597 spin_unlock_irqrestore(&map->async_lock, flags); 1598 1599 if (val != work_val) 1600 ret = map->bus->async_write(map->bus_context, 1601 async->work_buf, 1602 map->format.reg_bytes + 1603 map->format.pad_bytes, 1604 val, val_len, async); 1605 else 1606 ret = map->bus->async_write(map->bus_context, 1607 async->work_buf, 1608 map->format.reg_bytes + 1609 map->format.pad_bytes + 1610 val_len, NULL, 0, async); 1611 1612 if (ret != 0) { 1613 dev_err(map->dev, "Failed to schedule write: %d\n", 1614 ret); 1615 1616 spin_lock_irqsave(&map->async_lock, flags); 1617 list_move(&async->list, &map->async_free); 1618 spin_unlock_irqrestore(&map->async_lock, flags); 1619 } 1620 1621 return ret; 1622 } 1623 1624 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1625 1626 /* If we're doing a single register write we can probably just 1627 * send the work_buf directly, otherwise try to do a gather 1628 * write. 1629 */ 1630 if (val == work_val) 1631 ret = map->bus->write(map->bus_context, map->work_buf, 1632 map->format.reg_bytes + 1633 map->format.pad_bytes + 1634 val_len); 1635 else if (map->bus->gather_write) 1636 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1637 map->format.reg_bytes + 1638 map->format.pad_bytes, 1639 val, val_len); 1640 else 1641 ret = -ENOTSUPP; 1642 1643 /* If that didn't work fall back on linearising by hand. */ 1644 if (ret == -ENOTSUPP) { 1645 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1646 buf = kzalloc(len, GFP_KERNEL); 1647 if (!buf) 1648 return -ENOMEM; 1649 1650 memcpy(buf, map->work_buf, map->format.reg_bytes); 1651 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1652 val, val_len); 1653 ret = map->bus->write(map->bus_context, buf, len); 1654 1655 kfree(buf); 1656 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1657 /* regcache_drop_region() takes lock that we already have, 1658 * thus call map->cache_ops->drop() directly 1659 */ 1660 if (map->cache_ops && map->cache_ops->drop) 1661 map->cache_ops->drop(map, reg, reg + 1); 1662 } 1663 1664 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1665 1666 return ret; 1667 } 1668 1669 /** 1670 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1671 * 1672 * @map: Map to check. 1673 */ 1674 bool regmap_can_raw_write(struct regmap *map) 1675 { 1676 return map->bus && map->bus->write && map->format.format_val && 1677 map->format.format_reg; 1678 } 1679 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1680 1681 /** 1682 * regmap_get_raw_read_max - Get the maximum size we can read 1683 * 1684 * @map: Map to check. 1685 */ 1686 size_t regmap_get_raw_read_max(struct regmap *map) 1687 { 1688 return map->max_raw_read; 1689 } 1690 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1691 1692 /** 1693 * regmap_get_raw_write_max - Get the maximum size we can read 1694 * 1695 * @map: Map to check. 1696 */ 1697 size_t regmap_get_raw_write_max(struct regmap *map) 1698 { 1699 return map->max_raw_write; 1700 } 1701 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1702 1703 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1704 unsigned int val) 1705 { 1706 int ret; 1707 struct regmap_range_node *range; 1708 struct regmap *map = context; 1709 1710 WARN_ON(!map->bus || !map->format.format_write); 1711 1712 range = _regmap_range_lookup(map, reg); 1713 if (range) { 1714 ret = _regmap_select_page(map, ®, range, 1); 1715 if (ret != 0) 1716 return ret; 1717 } 1718 1719 map->format.format_write(map, reg, val); 1720 1721 trace_regmap_hw_write_start(map, reg, 1); 1722 1723 ret = map->bus->write(map->bus_context, map->work_buf, 1724 map->format.buf_size); 1725 1726 trace_regmap_hw_write_done(map, reg, 1); 1727 1728 return ret; 1729 } 1730 1731 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1732 unsigned int val) 1733 { 1734 struct regmap *map = context; 1735 1736 return map->bus->reg_write(map->bus_context, reg, val); 1737 } 1738 1739 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1740 unsigned int val) 1741 { 1742 struct regmap *map = context; 1743 1744 WARN_ON(!map->bus || !map->format.format_val); 1745 1746 map->format.format_val(map->work_buf + map->format.reg_bytes 1747 + map->format.pad_bytes, val, 0); 1748 return _regmap_raw_write_impl(map, reg, 1749 map->work_buf + 1750 map->format.reg_bytes + 1751 map->format.pad_bytes, 1752 map->format.val_bytes); 1753 } 1754 1755 static inline void *_regmap_map_get_context(struct regmap *map) 1756 { 1757 return (map->bus) ? map : map->bus_context; 1758 } 1759 1760 int _regmap_write(struct regmap *map, unsigned int reg, 1761 unsigned int val) 1762 { 1763 int ret; 1764 void *context = _regmap_map_get_context(map); 1765 1766 if (!regmap_writeable(map, reg)) 1767 return -EIO; 1768 1769 if (!map->cache_bypass && !map->defer_caching) { 1770 ret = regcache_write(map, reg, val); 1771 if (ret != 0) 1772 return ret; 1773 if (map->cache_only) { 1774 map->cache_dirty = true; 1775 return 0; 1776 } 1777 } 1778 1779 if (regmap_should_log(map)) 1780 dev_info(map->dev, "%x <= %x\n", reg, val); 1781 1782 trace_regmap_reg_write(map, reg, val); 1783 1784 return map->reg_write(context, reg, val); 1785 } 1786 1787 /** 1788 * regmap_write() - Write a value to a single register 1789 * 1790 * @map: Register map to write to 1791 * @reg: Register to write to 1792 * @val: Value to be written 1793 * 1794 * A value of zero will be returned on success, a negative errno will 1795 * be returned in error cases. 1796 */ 1797 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1798 { 1799 int ret; 1800 1801 if (!IS_ALIGNED(reg, map->reg_stride)) 1802 return -EINVAL; 1803 1804 map->lock(map->lock_arg); 1805 1806 ret = _regmap_write(map, reg, val); 1807 1808 map->unlock(map->lock_arg); 1809 1810 return ret; 1811 } 1812 EXPORT_SYMBOL_GPL(regmap_write); 1813 1814 /** 1815 * regmap_write_async() - Write a value to a single register asynchronously 1816 * 1817 * @map: Register map to write to 1818 * @reg: Register to write to 1819 * @val: Value to be written 1820 * 1821 * A value of zero will be returned on success, a negative errno will 1822 * be returned in error cases. 1823 */ 1824 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1825 { 1826 int ret; 1827 1828 if (!IS_ALIGNED(reg, map->reg_stride)) 1829 return -EINVAL; 1830 1831 map->lock(map->lock_arg); 1832 1833 map->async = true; 1834 1835 ret = _regmap_write(map, reg, val); 1836 1837 map->async = false; 1838 1839 map->unlock(map->lock_arg); 1840 1841 return ret; 1842 } 1843 EXPORT_SYMBOL_GPL(regmap_write_async); 1844 1845 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1846 const void *val, size_t val_len) 1847 { 1848 size_t val_bytes = map->format.val_bytes; 1849 size_t val_count = val_len / val_bytes; 1850 size_t chunk_count, chunk_bytes; 1851 size_t chunk_regs = val_count; 1852 int ret, i; 1853 1854 if (!val_count) 1855 return -EINVAL; 1856 1857 if (map->use_single_write) 1858 chunk_regs = 1; 1859 else if (map->max_raw_write && val_len > map->max_raw_write) 1860 chunk_regs = map->max_raw_write / val_bytes; 1861 1862 chunk_count = val_count / chunk_regs; 1863 chunk_bytes = chunk_regs * val_bytes; 1864 1865 /* Write as many bytes as possible with chunk_size */ 1866 for (i = 0; i < chunk_count; i++) { 1867 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); 1868 if (ret) 1869 return ret; 1870 1871 reg += regmap_get_offset(map, chunk_regs); 1872 val += chunk_bytes; 1873 val_len -= chunk_bytes; 1874 } 1875 1876 /* Write remaining bytes */ 1877 if (val_len) 1878 ret = _regmap_raw_write_impl(map, reg, val, val_len); 1879 1880 return ret; 1881 } 1882 1883 /** 1884 * regmap_raw_write() - Write raw values to one or more registers 1885 * 1886 * @map: Register map to write to 1887 * @reg: Initial register to write to 1888 * @val: Block of data to be written, laid out for direct transmission to the 1889 * device 1890 * @val_len: Length of data pointed to by val. 1891 * 1892 * This function is intended to be used for things like firmware 1893 * download where a large block of data needs to be transferred to the 1894 * device. No formatting will be done on the data provided. 1895 * 1896 * A value of zero will be returned on success, a negative errno will 1897 * be returned in error cases. 1898 */ 1899 int regmap_raw_write(struct regmap *map, unsigned int reg, 1900 const void *val, size_t val_len) 1901 { 1902 int ret; 1903 1904 if (!regmap_can_raw_write(map)) 1905 return -EINVAL; 1906 if (val_len % map->format.val_bytes) 1907 return -EINVAL; 1908 1909 map->lock(map->lock_arg); 1910 1911 ret = _regmap_raw_write(map, reg, val, val_len); 1912 1913 map->unlock(map->lock_arg); 1914 1915 return ret; 1916 } 1917 EXPORT_SYMBOL_GPL(regmap_raw_write); 1918 1919 /** 1920 * regmap_noinc_write(): Write data from a register without incrementing the 1921 * register number 1922 * 1923 * @map: Register map to write to 1924 * @reg: Register to write to 1925 * @val: Pointer to data buffer 1926 * @val_len: Length of output buffer in bytes. 1927 * 1928 * The regmap API usually assumes that bulk bus write operations will write a 1929 * range of registers. Some devices have certain registers for which a write 1930 * operation can write to an internal FIFO. 1931 * 1932 * The target register must be volatile but registers after it can be 1933 * completely unrelated cacheable registers. 1934 * 1935 * This will attempt multiple writes as required to write val_len bytes. 1936 * 1937 * A value of zero will be returned on success, a negative errno will be 1938 * returned in error cases. 1939 */ 1940 int regmap_noinc_write(struct regmap *map, unsigned int reg, 1941 const void *val, size_t val_len) 1942 { 1943 size_t write_len; 1944 int ret; 1945 1946 if (!map->bus) 1947 return -EINVAL; 1948 if (!map->bus->write) 1949 return -ENOTSUPP; 1950 if (val_len % map->format.val_bytes) 1951 return -EINVAL; 1952 if (!IS_ALIGNED(reg, map->reg_stride)) 1953 return -EINVAL; 1954 if (val_len == 0) 1955 return -EINVAL; 1956 1957 map->lock(map->lock_arg); 1958 1959 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 1960 ret = -EINVAL; 1961 goto out_unlock; 1962 } 1963 1964 while (val_len) { 1965 if (map->max_raw_write && map->max_raw_write < val_len) 1966 write_len = map->max_raw_write; 1967 else 1968 write_len = val_len; 1969 ret = _regmap_raw_write(map, reg, val, write_len); 1970 if (ret) 1971 goto out_unlock; 1972 val = ((u8 *)val) + write_len; 1973 val_len -= write_len; 1974 } 1975 1976 out_unlock: 1977 map->unlock(map->lock_arg); 1978 return ret; 1979 } 1980 EXPORT_SYMBOL_GPL(regmap_noinc_write); 1981 1982 /** 1983 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1984 * register field. 1985 * 1986 * @field: Register field to write to 1987 * @mask: Bitmask to change 1988 * @val: Value to be written 1989 * @change: Boolean indicating if a write was done 1990 * @async: Boolean indicating asynchronously 1991 * @force: Boolean indicating use force update 1992 * 1993 * Perform a read/modify/write cycle on the register field with change, 1994 * async, force option. 1995 * 1996 * A value of zero will be returned on success, a negative errno will 1997 * be returned in error cases. 1998 */ 1999 int regmap_field_update_bits_base(struct regmap_field *field, 2000 unsigned int mask, unsigned int val, 2001 bool *change, bool async, bool force) 2002 { 2003 mask = (mask << field->shift) & field->mask; 2004 2005 return regmap_update_bits_base(field->regmap, field->reg, 2006 mask, val << field->shift, 2007 change, async, force); 2008 } 2009 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2010 2011 /** 2012 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2013 * register field with port ID 2014 * 2015 * @field: Register field to write to 2016 * @id: port ID 2017 * @mask: Bitmask to change 2018 * @val: Value to be written 2019 * @change: Boolean indicating if a write was done 2020 * @async: Boolean indicating asynchronously 2021 * @force: Boolean indicating use force update 2022 * 2023 * A value of zero will be returned on success, a negative errno will 2024 * be returned in error cases. 2025 */ 2026 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2027 unsigned int mask, unsigned int val, 2028 bool *change, bool async, bool force) 2029 { 2030 if (id >= field->id_size) 2031 return -EINVAL; 2032 2033 mask = (mask << field->shift) & field->mask; 2034 2035 return regmap_update_bits_base(field->regmap, 2036 field->reg + (field->id_offset * id), 2037 mask, val << field->shift, 2038 change, async, force); 2039 } 2040 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2041 2042 /** 2043 * regmap_bulk_write() - Write multiple registers to the device 2044 * 2045 * @map: Register map to write to 2046 * @reg: First register to be write from 2047 * @val: Block of data to be written, in native register size for device 2048 * @val_count: Number of registers to write 2049 * 2050 * This function is intended to be used for writing a large block of 2051 * data to the device either in single transfer or multiple transfer. 2052 * 2053 * A value of zero will be returned on success, a negative errno will 2054 * be returned in error cases. 2055 */ 2056 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2057 size_t val_count) 2058 { 2059 int ret = 0, i; 2060 size_t val_bytes = map->format.val_bytes; 2061 2062 if (!IS_ALIGNED(reg, map->reg_stride)) 2063 return -EINVAL; 2064 2065 /* 2066 * Some devices don't support bulk write, for them we have a series of 2067 * single write operations. 2068 */ 2069 if (!map->bus || !map->format.parse_inplace) { 2070 map->lock(map->lock_arg); 2071 for (i = 0; i < val_count; i++) { 2072 unsigned int ival; 2073 2074 switch (val_bytes) { 2075 case 1: 2076 ival = *(u8 *)(val + (i * val_bytes)); 2077 break; 2078 case 2: 2079 ival = *(u16 *)(val + (i * val_bytes)); 2080 break; 2081 case 4: 2082 ival = *(u32 *)(val + (i * val_bytes)); 2083 break; 2084 #ifdef CONFIG_64BIT 2085 case 8: 2086 ival = *(u64 *)(val + (i * val_bytes)); 2087 break; 2088 #endif 2089 default: 2090 ret = -EINVAL; 2091 goto out; 2092 } 2093 2094 ret = _regmap_write(map, 2095 reg + regmap_get_offset(map, i), 2096 ival); 2097 if (ret != 0) 2098 goto out; 2099 } 2100 out: 2101 map->unlock(map->lock_arg); 2102 } else { 2103 void *wval; 2104 2105 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2106 if (!wval) 2107 return -ENOMEM; 2108 2109 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2110 map->format.parse_inplace(wval + i); 2111 2112 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2113 2114 kfree(wval); 2115 } 2116 return ret; 2117 } 2118 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2119 2120 /* 2121 * _regmap_raw_multi_reg_write() 2122 * 2123 * the (register,newvalue) pairs in regs have not been formatted, but 2124 * they are all in the same page and have been changed to being page 2125 * relative. The page register has been written if that was necessary. 2126 */ 2127 static int _regmap_raw_multi_reg_write(struct regmap *map, 2128 const struct reg_sequence *regs, 2129 size_t num_regs) 2130 { 2131 int ret; 2132 void *buf; 2133 int i; 2134 u8 *u8; 2135 size_t val_bytes = map->format.val_bytes; 2136 size_t reg_bytes = map->format.reg_bytes; 2137 size_t pad_bytes = map->format.pad_bytes; 2138 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2139 size_t len = pair_size * num_regs; 2140 2141 if (!len) 2142 return -EINVAL; 2143 2144 buf = kzalloc(len, GFP_KERNEL); 2145 if (!buf) 2146 return -ENOMEM; 2147 2148 /* We have to linearise by hand. */ 2149 2150 u8 = buf; 2151 2152 for (i = 0; i < num_regs; i++) { 2153 unsigned int reg = regs[i].reg; 2154 unsigned int val = regs[i].def; 2155 trace_regmap_hw_write_start(map, reg, 1); 2156 map->format.format_reg(u8, reg, map->reg_shift); 2157 u8 += reg_bytes + pad_bytes; 2158 map->format.format_val(u8, val, 0); 2159 u8 += val_bytes; 2160 } 2161 u8 = buf; 2162 *u8 |= map->write_flag_mask; 2163 2164 ret = map->bus->write(map->bus_context, buf, len); 2165 2166 kfree(buf); 2167 2168 for (i = 0; i < num_regs; i++) { 2169 int reg = regs[i].reg; 2170 trace_regmap_hw_write_done(map, reg, 1); 2171 } 2172 return ret; 2173 } 2174 2175 static unsigned int _regmap_register_page(struct regmap *map, 2176 unsigned int reg, 2177 struct regmap_range_node *range) 2178 { 2179 unsigned int win_page = (reg - range->range_min) / range->window_len; 2180 2181 return win_page; 2182 } 2183 2184 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2185 struct reg_sequence *regs, 2186 size_t num_regs) 2187 { 2188 int ret; 2189 int i, n; 2190 struct reg_sequence *base; 2191 unsigned int this_page = 0; 2192 unsigned int page_change = 0; 2193 /* 2194 * the set of registers are not neccessarily in order, but 2195 * since the order of write must be preserved this algorithm 2196 * chops the set each time the page changes. This also applies 2197 * if there is a delay required at any point in the sequence. 2198 */ 2199 base = regs; 2200 for (i = 0, n = 0; i < num_regs; i++, n++) { 2201 unsigned int reg = regs[i].reg; 2202 struct regmap_range_node *range; 2203 2204 range = _regmap_range_lookup(map, reg); 2205 if (range) { 2206 unsigned int win_page = _regmap_register_page(map, reg, 2207 range); 2208 2209 if (i == 0) 2210 this_page = win_page; 2211 if (win_page != this_page) { 2212 this_page = win_page; 2213 page_change = 1; 2214 } 2215 } 2216 2217 /* If we have both a page change and a delay make sure to 2218 * write the regs and apply the delay before we change the 2219 * page. 2220 */ 2221 2222 if (page_change || regs[i].delay_us) { 2223 2224 /* For situations where the first write requires 2225 * a delay we need to make sure we don't call 2226 * raw_multi_reg_write with n=0 2227 * This can't occur with page breaks as we 2228 * never write on the first iteration 2229 */ 2230 if (regs[i].delay_us && i == 0) 2231 n = 1; 2232 2233 ret = _regmap_raw_multi_reg_write(map, base, n); 2234 if (ret != 0) 2235 return ret; 2236 2237 if (regs[i].delay_us) 2238 udelay(regs[i].delay_us); 2239 2240 base += n; 2241 n = 0; 2242 2243 if (page_change) { 2244 ret = _regmap_select_page(map, 2245 &base[n].reg, 2246 range, 1); 2247 if (ret != 0) 2248 return ret; 2249 2250 page_change = 0; 2251 } 2252 2253 } 2254 2255 } 2256 if (n > 0) 2257 return _regmap_raw_multi_reg_write(map, base, n); 2258 return 0; 2259 } 2260 2261 static int _regmap_multi_reg_write(struct regmap *map, 2262 const struct reg_sequence *regs, 2263 size_t num_regs) 2264 { 2265 int i; 2266 int ret; 2267 2268 if (!map->can_multi_write) { 2269 for (i = 0; i < num_regs; i++) { 2270 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2271 if (ret != 0) 2272 return ret; 2273 2274 if (regs[i].delay_us) 2275 udelay(regs[i].delay_us); 2276 } 2277 return 0; 2278 } 2279 2280 if (!map->format.parse_inplace) 2281 return -EINVAL; 2282 2283 if (map->writeable_reg) 2284 for (i = 0; i < num_regs; i++) { 2285 int reg = regs[i].reg; 2286 if (!map->writeable_reg(map->dev, reg)) 2287 return -EINVAL; 2288 if (!IS_ALIGNED(reg, map->reg_stride)) 2289 return -EINVAL; 2290 } 2291 2292 if (!map->cache_bypass) { 2293 for (i = 0; i < num_regs; i++) { 2294 unsigned int val = regs[i].def; 2295 unsigned int reg = regs[i].reg; 2296 ret = regcache_write(map, reg, val); 2297 if (ret) { 2298 dev_err(map->dev, 2299 "Error in caching of register: %x ret: %d\n", 2300 reg, ret); 2301 return ret; 2302 } 2303 } 2304 if (map->cache_only) { 2305 map->cache_dirty = true; 2306 return 0; 2307 } 2308 } 2309 2310 WARN_ON(!map->bus); 2311 2312 for (i = 0; i < num_regs; i++) { 2313 unsigned int reg = regs[i].reg; 2314 struct regmap_range_node *range; 2315 2316 /* Coalesce all the writes between a page break or a delay 2317 * in a sequence 2318 */ 2319 range = _regmap_range_lookup(map, reg); 2320 if (range || regs[i].delay_us) { 2321 size_t len = sizeof(struct reg_sequence)*num_regs; 2322 struct reg_sequence *base = kmemdup(regs, len, 2323 GFP_KERNEL); 2324 if (!base) 2325 return -ENOMEM; 2326 ret = _regmap_range_multi_paged_reg_write(map, base, 2327 num_regs); 2328 kfree(base); 2329 2330 return ret; 2331 } 2332 } 2333 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2334 } 2335 2336 /** 2337 * regmap_multi_reg_write() - Write multiple registers to the device 2338 * 2339 * @map: Register map to write to 2340 * @regs: Array of structures containing register,value to be written 2341 * @num_regs: Number of registers to write 2342 * 2343 * Write multiple registers to the device where the set of register, value 2344 * pairs are supplied in any order, possibly not all in a single range. 2345 * 2346 * The 'normal' block write mode will send ultimately send data on the 2347 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2348 * addressed. However, this alternative block multi write mode will send 2349 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2350 * must of course support the mode. 2351 * 2352 * A value of zero will be returned on success, a negative errno will be 2353 * returned in error cases. 2354 */ 2355 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2356 int num_regs) 2357 { 2358 int ret; 2359 2360 map->lock(map->lock_arg); 2361 2362 ret = _regmap_multi_reg_write(map, regs, num_regs); 2363 2364 map->unlock(map->lock_arg); 2365 2366 return ret; 2367 } 2368 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2369 2370 /** 2371 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2372 * device but not the cache 2373 * 2374 * @map: Register map to write to 2375 * @regs: Array of structures containing register,value to be written 2376 * @num_regs: Number of registers to write 2377 * 2378 * Write multiple registers to the device but not the cache where the set 2379 * of register are supplied in any order. 2380 * 2381 * This function is intended to be used for writing a large block of data 2382 * atomically to the device in single transfer for those I2C client devices 2383 * that implement this alternative block write mode. 2384 * 2385 * A value of zero will be returned on success, a negative errno will 2386 * be returned in error cases. 2387 */ 2388 int regmap_multi_reg_write_bypassed(struct regmap *map, 2389 const struct reg_sequence *regs, 2390 int num_regs) 2391 { 2392 int ret; 2393 bool bypass; 2394 2395 map->lock(map->lock_arg); 2396 2397 bypass = map->cache_bypass; 2398 map->cache_bypass = true; 2399 2400 ret = _regmap_multi_reg_write(map, regs, num_regs); 2401 2402 map->cache_bypass = bypass; 2403 2404 map->unlock(map->lock_arg); 2405 2406 return ret; 2407 } 2408 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2409 2410 /** 2411 * regmap_raw_write_async() - Write raw values to one or more registers 2412 * asynchronously 2413 * 2414 * @map: Register map to write to 2415 * @reg: Initial register to write to 2416 * @val: Block of data to be written, laid out for direct transmission to the 2417 * device. Must be valid until regmap_async_complete() is called. 2418 * @val_len: Length of data pointed to by val. 2419 * 2420 * This function is intended to be used for things like firmware 2421 * download where a large block of data needs to be transferred to the 2422 * device. No formatting will be done on the data provided. 2423 * 2424 * If supported by the underlying bus the write will be scheduled 2425 * asynchronously, helping maximise I/O speed on higher speed buses 2426 * like SPI. regmap_async_complete() can be called to ensure that all 2427 * asynchrnous writes have been completed. 2428 * 2429 * A value of zero will be returned on success, a negative errno will 2430 * be returned in error cases. 2431 */ 2432 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2433 const void *val, size_t val_len) 2434 { 2435 int ret; 2436 2437 if (val_len % map->format.val_bytes) 2438 return -EINVAL; 2439 if (!IS_ALIGNED(reg, map->reg_stride)) 2440 return -EINVAL; 2441 2442 map->lock(map->lock_arg); 2443 2444 map->async = true; 2445 2446 ret = _regmap_raw_write(map, reg, val, val_len); 2447 2448 map->async = false; 2449 2450 map->unlock(map->lock_arg); 2451 2452 return ret; 2453 } 2454 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2455 2456 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2457 unsigned int val_len) 2458 { 2459 struct regmap_range_node *range; 2460 int ret; 2461 2462 WARN_ON(!map->bus); 2463 2464 if (!map->bus || !map->bus->read) 2465 return -EINVAL; 2466 2467 range = _regmap_range_lookup(map, reg); 2468 if (range) { 2469 ret = _regmap_select_page(map, ®, range, 2470 val_len / map->format.val_bytes); 2471 if (ret != 0) 2472 return ret; 2473 } 2474 2475 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2476 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2477 map->read_flag_mask); 2478 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2479 2480 ret = map->bus->read(map->bus_context, map->work_buf, 2481 map->format.reg_bytes + map->format.pad_bytes, 2482 val, val_len); 2483 2484 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2485 2486 return ret; 2487 } 2488 2489 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2490 unsigned int *val) 2491 { 2492 struct regmap *map = context; 2493 2494 return map->bus->reg_read(map->bus_context, reg, val); 2495 } 2496 2497 static int _regmap_bus_read(void *context, unsigned int reg, 2498 unsigned int *val) 2499 { 2500 int ret; 2501 struct regmap *map = context; 2502 void *work_val = map->work_buf + map->format.reg_bytes + 2503 map->format.pad_bytes; 2504 2505 if (!map->format.parse_val) 2506 return -EINVAL; 2507 2508 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2509 if (ret == 0) 2510 *val = map->format.parse_val(work_val); 2511 2512 return ret; 2513 } 2514 2515 static int _regmap_read(struct regmap *map, unsigned int reg, 2516 unsigned int *val) 2517 { 2518 int ret; 2519 void *context = _regmap_map_get_context(map); 2520 2521 if (!map->cache_bypass) { 2522 ret = regcache_read(map, reg, val); 2523 if (ret == 0) 2524 return 0; 2525 } 2526 2527 if (map->cache_only) 2528 return -EBUSY; 2529 2530 if (!regmap_readable(map, reg)) 2531 return -EIO; 2532 2533 ret = map->reg_read(context, reg, val); 2534 if (ret == 0) { 2535 if (regmap_should_log(map)) 2536 dev_info(map->dev, "%x => %x\n", reg, *val); 2537 2538 trace_regmap_reg_read(map, reg, *val); 2539 2540 if (!map->cache_bypass) 2541 regcache_write(map, reg, *val); 2542 } 2543 2544 return ret; 2545 } 2546 2547 /** 2548 * regmap_read() - Read a value from a single register 2549 * 2550 * @map: Register map to read from 2551 * @reg: Register to be read from 2552 * @val: Pointer to store read value 2553 * 2554 * A value of zero will be returned on success, a negative errno will 2555 * be returned in error cases. 2556 */ 2557 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2558 { 2559 int ret; 2560 2561 if (!IS_ALIGNED(reg, map->reg_stride)) 2562 return -EINVAL; 2563 2564 map->lock(map->lock_arg); 2565 2566 ret = _regmap_read(map, reg, val); 2567 2568 map->unlock(map->lock_arg); 2569 2570 return ret; 2571 } 2572 EXPORT_SYMBOL_GPL(regmap_read); 2573 2574 /** 2575 * regmap_raw_read() - Read raw data from the device 2576 * 2577 * @map: Register map to read from 2578 * @reg: First register to be read from 2579 * @val: Pointer to store read value 2580 * @val_len: Size of data to read 2581 * 2582 * A value of zero will be returned on success, a negative errno will 2583 * be returned in error cases. 2584 */ 2585 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2586 size_t val_len) 2587 { 2588 size_t val_bytes = map->format.val_bytes; 2589 size_t val_count = val_len / val_bytes; 2590 unsigned int v; 2591 int ret, i; 2592 2593 if (!map->bus) 2594 return -EINVAL; 2595 if (val_len % map->format.val_bytes) 2596 return -EINVAL; 2597 if (!IS_ALIGNED(reg, map->reg_stride)) 2598 return -EINVAL; 2599 if (val_count == 0) 2600 return -EINVAL; 2601 2602 map->lock(map->lock_arg); 2603 2604 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2605 map->cache_type == REGCACHE_NONE) { 2606 size_t chunk_count, chunk_bytes; 2607 size_t chunk_regs = val_count; 2608 2609 if (!map->bus->read) { 2610 ret = -ENOTSUPP; 2611 goto out; 2612 } 2613 2614 if (map->use_single_read) 2615 chunk_regs = 1; 2616 else if (map->max_raw_read && val_len > map->max_raw_read) 2617 chunk_regs = map->max_raw_read / val_bytes; 2618 2619 chunk_count = val_count / chunk_regs; 2620 chunk_bytes = chunk_regs * val_bytes; 2621 2622 /* Read bytes that fit into whole chunks */ 2623 for (i = 0; i < chunk_count; i++) { 2624 ret = _regmap_raw_read(map, reg, val, chunk_bytes); 2625 if (ret != 0) 2626 goto out; 2627 2628 reg += regmap_get_offset(map, chunk_regs); 2629 val += chunk_bytes; 2630 val_len -= chunk_bytes; 2631 } 2632 2633 /* Read remaining bytes */ 2634 if (val_len) { 2635 ret = _regmap_raw_read(map, reg, val, val_len); 2636 if (ret != 0) 2637 goto out; 2638 } 2639 } else { 2640 /* Otherwise go word by word for the cache; should be low 2641 * cost as we expect to hit the cache. 2642 */ 2643 for (i = 0; i < val_count; i++) { 2644 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2645 &v); 2646 if (ret != 0) 2647 goto out; 2648 2649 map->format.format_val(val + (i * val_bytes), v, 0); 2650 } 2651 } 2652 2653 out: 2654 map->unlock(map->lock_arg); 2655 2656 return ret; 2657 } 2658 EXPORT_SYMBOL_GPL(regmap_raw_read); 2659 2660 /** 2661 * regmap_noinc_read(): Read data from a register without incrementing the 2662 * register number 2663 * 2664 * @map: Register map to read from 2665 * @reg: Register to read from 2666 * @val: Pointer to data buffer 2667 * @val_len: Length of output buffer in bytes. 2668 * 2669 * The regmap API usually assumes that bulk bus read operations will read a 2670 * range of registers. Some devices have certain registers for which a read 2671 * operation read will read from an internal FIFO. 2672 * 2673 * The target register must be volatile but registers after it can be 2674 * completely unrelated cacheable registers. 2675 * 2676 * This will attempt multiple reads as required to read val_len bytes. 2677 * 2678 * A value of zero will be returned on success, a negative errno will be 2679 * returned in error cases. 2680 */ 2681 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2682 void *val, size_t val_len) 2683 { 2684 size_t read_len; 2685 int ret; 2686 2687 if (!map->bus) 2688 return -EINVAL; 2689 if (!map->bus->read) 2690 return -ENOTSUPP; 2691 if (val_len % map->format.val_bytes) 2692 return -EINVAL; 2693 if (!IS_ALIGNED(reg, map->reg_stride)) 2694 return -EINVAL; 2695 if (val_len == 0) 2696 return -EINVAL; 2697 2698 map->lock(map->lock_arg); 2699 2700 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2701 ret = -EINVAL; 2702 goto out_unlock; 2703 } 2704 2705 while (val_len) { 2706 if (map->max_raw_read && map->max_raw_read < val_len) 2707 read_len = map->max_raw_read; 2708 else 2709 read_len = val_len; 2710 ret = _regmap_raw_read(map, reg, val, read_len); 2711 if (ret) 2712 goto out_unlock; 2713 val = ((u8 *)val) + read_len; 2714 val_len -= read_len; 2715 } 2716 2717 out_unlock: 2718 map->unlock(map->lock_arg); 2719 return ret; 2720 } 2721 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2722 2723 /** 2724 * regmap_field_read(): Read a value to a single register field 2725 * 2726 * @field: Register field to read from 2727 * @val: Pointer to store read value 2728 * 2729 * A value of zero will be returned on success, a negative errno will 2730 * be returned in error cases. 2731 */ 2732 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2733 { 2734 int ret; 2735 unsigned int reg_val; 2736 ret = regmap_read(field->regmap, field->reg, ®_val); 2737 if (ret != 0) 2738 return ret; 2739 2740 reg_val &= field->mask; 2741 reg_val >>= field->shift; 2742 *val = reg_val; 2743 2744 return ret; 2745 } 2746 EXPORT_SYMBOL_GPL(regmap_field_read); 2747 2748 /** 2749 * regmap_fields_read() - Read a value to a single register field with port ID 2750 * 2751 * @field: Register field to read from 2752 * @id: port ID 2753 * @val: Pointer to store read value 2754 * 2755 * A value of zero will be returned on success, a negative errno will 2756 * be returned in error cases. 2757 */ 2758 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2759 unsigned int *val) 2760 { 2761 int ret; 2762 unsigned int reg_val; 2763 2764 if (id >= field->id_size) 2765 return -EINVAL; 2766 2767 ret = regmap_read(field->regmap, 2768 field->reg + (field->id_offset * id), 2769 ®_val); 2770 if (ret != 0) 2771 return ret; 2772 2773 reg_val &= field->mask; 2774 reg_val >>= field->shift; 2775 *val = reg_val; 2776 2777 return ret; 2778 } 2779 EXPORT_SYMBOL_GPL(regmap_fields_read); 2780 2781 /** 2782 * regmap_bulk_read() - Read multiple registers from the device 2783 * 2784 * @map: Register map to read from 2785 * @reg: First register to be read from 2786 * @val: Pointer to store read value, in native register size for device 2787 * @val_count: Number of registers to read 2788 * 2789 * A value of zero will be returned on success, a negative errno will 2790 * be returned in error cases. 2791 */ 2792 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2793 size_t val_count) 2794 { 2795 int ret, i; 2796 size_t val_bytes = map->format.val_bytes; 2797 bool vol = regmap_volatile_range(map, reg, val_count); 2798 2799 if (!IS_ALIGNED(reg, map->reg_stride)) 2800 return -EINVAL; 2801 if (val_count == 0) 2802 return -EINVAL; 2803 2804 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2805 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2806 if (ret != 0) 2807 return ret; 2808 2809 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2810 map->format.parse_inplace(val + i); 2811 } else { 2812 #ifdef CONFIG_64BIT 2813 u64 *u64 = val; 2814 #endif 2815 u32 *u32 = val; 2816 u16 *u16 = val; 2817 u8 *u8 = val; 2818 2819 map->lock(map->lock_arg); 2820 2821 for (i = 0; i < val_count; i++) { 2822 unsigned int ival; 2823 2824 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2825 &ival); 2826 if (ret != 0) 2827 goto out; 2828 2829 switch (map->format.val_bytes) { 2830 #ifdef CONFIG_64BIT 2831 case 8: 2832 u64[i] = ival; 2833 break; 2834 #endif 2835 case 4: 2836 u32[i] = ival; 2837 break; 2838 case 2: 2839 u16[i] = ival; 2840 break; 2841 case 1: 2842 u8[i] = ival; 2843 break; 2844 default: 2845 ret = -EINVAL; 2846 goto out; 2847 } 2848 } 2849 2850 out: 2851 map->unlock(map->lock_arg); 2852 } 2853 2854 return ret; 2855 } 2856 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2857 2858 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2859 unsigned int mask, unsigned int val, 2860 bool *change, bool force_write) 2861 { 2862 int ret; 2863 unsigned int tmp, orig; 2864 2865 if (change) 2866 *change = false; 2867 2868 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2869 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2870 if (ret == 0 && change) 2871 *change = true; 2872 } else { 2873 ret = _regmap_read(map, reg, &orig); 2874 if (ret != 0) 2875 return ret; 2876 2877 tmp = orig & ~mask; 2878 tmp |= val & mask; 2879 2880 if (force_write || (tmp != orig)) { 2881 ret = _regmap_write(map, reg, tmp); 2882 if (ret == 0 && change) 2883 *change = true; 2884 } 2885 } 2886 2887 return ret; 2888 } 2889 2890 /** 2891 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2892 * 2893 * @map: Register map to update 2894 * @reg: Register to update 2895 * @mask: Bitmask to change 2896 * @val: New value for bitmask 2897 * @change: Boolean indicating if a write was done 2898 * @async: Boolean indicating asynchronously 2899 * @force: Boolean indicating use force update 2900 * 2901 * Perform a read/modify/write cycle on a register map with change, async, force 2902 * options. 2903 * 2904 * If async is true: 2905 * 2906 * With most buses the read must be done synchronously so this is most useful 2907 * for devices with a cache which do not need to interact with the hardware to 2908 * determine the current register value. 2909 * 2910 * Returns zero for success, a negative number on error. 2911 */ 2912 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2913 unsigned int mask, unsigned int val, 2914 bool *change, bool async, bool force) 2915 { 2916 int ret; 2917 2918 map->lock(map->lock_arg); 2919 2920 map->async = async; 2921 2922 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2923 2924 map->async = false; 2925 2926 map->unlock(map->lock_arg); 2927 2928 return ret; 2929 } 2930 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2931 2932 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2933 { 2934 struct regmap *map = async->map; 2935 bool wake; 2936 2937 trace_regmap_async_io_complete(map); 2938 2939 spin_lock(&map->async_lock); 2940 list_move(&async->list, &map->async_free); 2941 wake = list_empty(&map->async_list); 2942 2943 if (ret != 0) 2944 map->async_ret = ret; 2945 2946 spin_unlock(&map->async_lock); 2947 2948 if (wake) 2949 wake_up(&map->async_waitq); 2950 } 2951 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2952 2953 static int regmap_async_is_done(struct regmap *map) 2954 { 2955 unsigned long flags; 2956 int ret; 2957 2958 spin_lock_irqsave(&map->async_lock, flags); 2959 ret = list_empty(&map->async_list); 2960 spin_unlock_irqrestore(&map->async_lock, flags); 2961 2962 return ret; 2963 } 2964 2965 /** 2966 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2967 * 2968 * @map: Map to operate on. 2969 * 2970 * Blocks until any pending asynchronous I/O has completed. Returns 2971 * an error code for any failed I/O operations. 2972 */ 2973 int regmap_async_complete(struct regmap *map) 2974 { 2975 unsigned long flags; 2976 int ret; 2977 2978 /* Nothing to do with no async support */ 2979 if (!map->bus || !map->bus->async_write) 2980 return 0; 2981 2982 trace_regmap_async_complete_start(map); 2983 2984 wait_event(map->async_waitq, regmap_async_is_done(map)); 2985 2986 spin_lock_irqsave(&map->async_lock, flags); 2987 ret = map->async_ret; 2988 map->async_ret = 0; 2989 spin_unlock_irqrestore(&map->async_lock, flags); 2990 2991 trace_regmap_async_complete_done(map); 2992 2993 return ret; 2994 } 2995 EXPORT_SYMBOL_GPL(regmap_async_complete); 2996 2997 /** 2998 * regmap_register_patch - Register and apply register updates to be applied 2999 * on device initialistion 3000 * 3001 * @map: Register map to apply updates to. 3002 * @regs: Values to update. 3003 * @num_regs: Number of entries in regs. 3004 * 3005 * Register a set of register updates to be applied to the device 3006 * whenever the device registers are synchronised with the cache and 3007 * apply them immediately. Typically this is used to apply 3008 * corrections to be applied to the device defaults on startup, such 3009 * as the updates some vendors provide to undocumented registers. 3010 * 3011 * The caller must ensure that this function cannot be called 3012 * concurrently with either itself or regcache_sync(). 3013 */ 3014 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3015 int num_regs) 3016 { 3017 struct reg_sequence *p; 3018 int ret; 3019 bool bypass; 3020 3021 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3022 num_regs)) 3023 return 0; 3024 3025 p = krealloc(map->patch, 3026 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3027 GFP_KERNEL); 3028 if (p) { 3029 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3030 map->patch = p; 3031 map->patch_regs += num_regs; 3032 } else { 3033 return -ENOMEM; 3034 } 3035 3036 map->lock(map->lock_arg); 3037 3038 bypass = map->cache_bypass; 3039 3040 map->cache_bypass = true; 3041 map->async = true; 3042 3043 ret = _regmap_multi_reg_write(map, regs, num_regs); 3044 3045 map->async = false; 3046 map->cache_bypass = bypass; 3047 3048 map->unlock(map->lock_arg); 3049 3050 regmap_async_complete(map); 3051 3052 return ret; 3053 } 3054 EXPORT_SYMBOL_GPL(regmap_register_patch); 3055 3056 /** 3057 * regmap_get_val_bytes() - Report the size of a register value 3058 * 3059 * @map: Register map to operate on. 3060 * 3061 * Report the size of a register value, mainly intended to for use by 3062 * generic infrastructure built on top of regmap. 3063 */ 3064 int regmap_get_val_bytes(struct regmap *map) 3065 { 3066 if (map->format.format_write) 3067 return -EINVAL; 3068 3069 return map->format.val_bytes; 3070 } 3071 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3072 3073 /** 3074 * regmap_get_max_register() - Report the max register value 3075 * 3076 * @map: Register map to operate on. 3077 * 3078 * Report the max register value, mainly intended to for use by 3079 * generic infrastructure built on top of regmap. 3080 */ 3081 int regmap_get_max_register(struct regmap *map) 3082 { 3083 return map->max_register ? map->max_register : -EINVAL; 3084 } 3085 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3086 3087 /** 3088 * regmap_get_reg_stride() - Report the register address stride 3089 * 3090 * @map: Register map to operate on. 3091 * 3092 * Report the register address stride, mainly intended to for use by 3093 * generic infrastructure built on top of regmap. 3094 */ 3095 int regmap_get_reg_stride(struct regmap *map) 3096 { 3097 return map->reg_stride; 3098 } 3099 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3100 3101 int regmap_parse_val(struct regmap *map, const void *buf, 3102 unsigned int *val) 3103 { 3104 if (!map->format.parse_val) 3105 return -EINVAL; 3106 3107 *val = map->format.parse_val(buf); 3108 3109 return 0; 3110 } 3111 EXPORT_SYMBOL_GPL(regmap_parse_val); 3112 3113 static int __init regmap_initcall(void) 3114 { 3115 regmap_debugfs_initcall(); 3116 3117 return 0; 3118 } 3119 postcore_initcall(regmap_initcall); 3120