1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <asm/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_2_6_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 *out = (reg << 6) | val; 218 } 219 220 static void regmap_format_4_12_write(struct regmap *map, 221 unsigned int reg, unsigned int val) 222 { 223 __be16 *out = map->work_buf; 224 *out = cpu_to_be16((reg << 12) | val); 225 } 226 227 static void regmap_format_7_9_write(struct regmap *map, 228 unsigned int reg, unsigned int val) 229 { 230 __be16 *out = map->work_buf; 231 *out = cpu_to_be16((reg << 9) | val); 232 } 233 234 static void regmap_format_10_14_write(struct regmap *map, 235 unsigned int reg, unsigned int val) 236 { 237 u8 *out = map->work_buf; 238 239 out[2] = val; 240 out[1] = (val >> 8) | (reg << 6); 241 out[0] = reg >> 2; 242 } 243 244 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 245 { 246 u8 *b = buf; 247 248 b[0] = val << shift; 249 } 250 251 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 252 { 253 put_unaligned_be16(val << shift, buf); 254 } 255 256 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 257 { 258 put_unaligned_le16(val << shift, buf); 259 } 260 261 static void regmap_format_16_native(void *buf, unsigned int val, 262 unsigned int shift) 263 { 264 u16 v = val << shift; 265 266 memcpy(buf, &v, sizeof(v)); 267 } 268 269 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 270 { 271 u8 *b = buf; 272 273 val <<= shift; 274 275 b[0] = val >> 16; 276 b[1] = val >> 8; 277 b[2] = val; 278 } 279 280 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 281 { 282 put_unaligned_be32(val << shift, buf); 283 } 284 285 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 286 { 287 put_unaligned_le32(val << shift, buf); 288 } 289 290 static void regmap_format_32_native(void *buf, unsigned int val, 291 unsigned int shift) 292 { 293 u32 v = val << shift; 294 295 memcpy(buf, &v, sizeof(v)); 296 } 297 298 #ifdef CONFIG_64BIT 299 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 300 { 301 put_unaligned_be64((u64) val << shift, buf); 302 } 303 304 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 305 { 306 put_unaligned_le64((u64) val << shift, buf); 307 } 308 309 static void regmap_format_64_native(void *buf, unsigned int val, 310 unsigned int shift) 311 { 312 u64 v = (u64) val << shift; 313 314 memcpy(buf, &v, sizeof(v)); 315 } 316 #endif 317 318 static void regmap_parse_inplace_noop(void *buf) 319 { 320 } 321 322 static unsigned int regmap_parse_8(const void *buf) 323 { 324 const u8 *b = buf; 325 326 return b[0]; 327 } 328 329 static unsigned int regmap_parse_16_be(const void *buf) 330 { 331 return get_unaligned_be16(buf); 332 } 333 334 static unsigned int regmap_parse_16_le(const void *buf) 335 { 336 return get_unaligned_le16(buf); 337 } 338 339 static void regmap_parse_16_be_inplace(void *buf) 340 { 341 u16 v = get_unaligned_be16(buf); 342 343 memcpy(buf, &v, sizeof(v)); 344 } 345 346 static void regmap_parse_16_le_inplace(void *buf) 347 { 348 u16 v = get_unaligned_le16(buf); 349 350 memcpy(buf, &v, sizeof(v)); 351 } 352 353 static unsigned int regmap_parse_16_native(const void *buf) 354 { 355 u16 v; 356 357 memcpy(&v, buf, sizeof(v)); 358 return v; 359 } 360 361 static unsigned int regmap_parse_24(const void *buf) 362 { 363 const u8 *b = buf; 364 unsigned int ret = b[2]; 365 ret |= ((unsigned int)b[1]) << 8; 366 ret |= ((unsigned int)b[0]) << 16; 367 368 return ret; 369 } 370 371 static unsigned int regmap_parse_32_be(const void *buf) 372 { 373 return get_unaligned_be32(buf); 374 } 375 376 static unsigned int regmap_parse_32_le(const void *buf) 377 { 378 return get_unaligned_le32(buf); 379 } 380 381 static void regmap_parse_32_be_inplace(void *buf) 382 { 383 u32 v = get_unaligned_be32(buf); 384 385 memcpy(buf, &v, sizeof(v)); 386 } 387 388 static void regmap_parse_32_le_inplace(void *buf) 389 { 390 u32 v = get_unaligned_le32(buf); 391 392 memcpy(buf, &v, sizeof(v)); 393 } 394 395 static unsigned int regmap_parse_32_native(const void *buf) 396 { 397 u32 v; 398 399 memcpy(&v, buf, sizeof(v)); 400 return v; 401 } 402 403 #ifdef CONFIG_64BIT 404 static unsigned int regmap_parse_64_be(const void *buf) 405 { 406 return get_unaligned_be64(buf); 407 } 408 409 static unsigned int regmap_parse_64_le(const void *buf) 410 { 411 return get_unaligned_le64(buf); 412 } 413 414 static void regmap_parse_64_be_inplace(void *buf) 415 { 416 u64 v = get_unaligned_be64(buf); 417 418 memcpy(buf, &v, sizeof(v)); 419 } 420 421 static void regmap_parse_64_le_inplace(void *buf) 422 { 423 u64 v = get_unaligned_le64(buf); 424 425 memcpy(buf, &v, sizeof(v)); 426 } 427 428 static unsigned int regmap_parse_64_native(const void *buf) 429 { 430 u64 v; 431 432 memcpy(&v, buf, sizeof(v)); 433 return v; 434 } 435 #endif 436 437 static void regmap_lock_hwlock(void *__map) 438 { 439 struct regmap *map = __map; 440 441 hwspin_lock_timeout(map->hwlock, UINT_MAX); 442 } 443 444 static void regmap_lock_hwlock_irq(void *__map) 445 { 446 struct regmap *map = __map; 447 448 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 449 } 450 451 static void regmap_lock_hwlock_irqsave(void *__map) 452 { 453 struct regmap *map = __map; 454 455 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 456 &map->spinlock_flags); 457 } 458 459 static void regmap_unlock_hwlock(void *__map) 460 { 461 struct regmap *map = __map; 462 463 hwspin_unlock(map->hwlock); 464 } 465 466 static void regmap_unlock_hwlock_irq(void *__map) 467 { 468 struct regmap *map = __map; 469 470 hwspin_unlock_irq(map->hwlock); 471 } 472 473 static void regmap_unlock_hwlock_irqrestore(void *__map) 474 { 475 struct regmap *map = __map; 476 477 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 478 } 479 480 static void regmap_lock_unlock_none(void *__map) 481 { 482 483 } 484 485 static void regmap_lock_mutex(void *__map) 486 { 487 struct regmap *map = __map; 488 mutex_lock(&map->mutex); 489 } 490 491 static void regmap_unlock_mutex(void *__map) 492 { 493 struct regmap *map = __map; 494 mutex_unlock(&map->mutex); 495 } 496 497 static void regmap_lock_spinlock(void *__map) 498 __acquires(&map->spinlock) 499 { 500 struct regmap *map = __map; 501 unsigned long flags; 502 503 spin_lock_irqsave(&map->spinlock, flags); 504 map->spinlock_flags = flags; 505 } 506 507 static void regmap_unlock_spinlock(void *__map) 508 __releases(&map->spinlock) 509 { 510 struct regmap *map = __map; 511 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 512 } 513 514 static void dev_get_regmap_release(struct device *dev, void *res) 515 { 516 /* 517 * We don't actually have anything to do here; the goal here 518 * is not to manage the regmap but to provide a simple way to 519 * get the regmap back given a struct device. 520 */ 521 } 522 523 static bool _regmap_range_add(struct regmap *map, 524 struct regmap_range_node *data) 525 { 526 struct rb_root *root = &map->range_tree; 527 struct rb_node **new = &(root->rb_node), *parent = NULL; 528 529 while (*new) { 530 struct regmap_range_node *this = 531 rb_entry(*new, struct regmap_range_node, node); 532 533 parent = *new; 534 if (data->range_max < this->range_min) 535 new = &((*new)->rb_left); 536 else if (data->range_min > this->range_max) 537 new = &((*new)->rb_right); 538 else 539 return false; 540 } 541 542 rb_link_node(&data->node, parent, new); 543 rb_insert_color(&data->node, root); 544 545 return true; 546 } 547 548 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 549 unsigned int reg) 550 { 551 struct rb_node *node = map->range_tree.rb_node; 552 553 while (node) { 554 struct regmap_range_node *this = 555 rb_entry(node, struct regmap_range_node, node); 556 557 if (reg < this->range_min) 558 node = node->rb_left; 559 else if (reg > this->range_max) 560 node = node->rb_right; 561 else 562 return this; 563 } 564 565 return NULL; 566 } 567 568 static void regmap_range_exit(struct regmap *map) 569 { 570 struct rb_node *next; 571 struct regmap_range_node *range_node; 572 573 next = rb_first(&map->range_tree); 574 while (next) { 575 range_node = rb_entry(next, struct regmap_range_node, node); 576 next = rb_next(&range_node->node); 577 rb_erase(&range_node->node, &map->range_tree); 578 kfree(range_node); 579 } 580 581 kfree(map->selector_work_buf); 582 } 583 584 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 585 { 586 if (config->name) { 587 const char *name = kstrdup_const(config->name, GFP_KERNEL); 588 589 if (!name) 590 return -ENOMEM; 591 592 kfree_const(map->name); 593 map->name = name; 594 } 595 596 return 0; 597 } 598 599 int regmap_attach_dev(struct device *dev, struct regmap *map, 600 const struct regmap_config *config) 601 { 602 struct regmap **m; 603 int ret; 604 605 map->dev = dev; 606 607 ret = regmap_set_name(map, config); 608 if (ret) 609 return ret; 610 611 regmap_debugfs_init(map); 612 613 /* Add a devres resource for dev_get_regmap() */ 614 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 615 if (!m) { 616 regmap_debugfs_exit(map); 617 return -ENOMEM; 618 } 619 *m = map; 620 devres_add(dev, m); 621 622 return 0; 623 } 624 EXPORT_SYMBOL_GPL(regmap_attach_dev); 625 626 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 627 const struct regmap_config *config) 628 { 629 enum regmap_endian endian; 630 631 /* Retrieve the endianness specification from the regmap config */ 632 endian = config->reg_format_endian; 633 634 /* If the regmap config specified a non-default value, use that */ 635 if (endian != REGMAP_ENDIAN_DEFAULT) 636 return endian; 637 638 /* Retrieve the endianness specification from the bus config */ 639 if (bus && bus->reg_format_endian_default) 640 endian = bus->reg_format_endian_default; 641 642 /* If the bus specified a non-default value, use that */ 643 if (endian != REGMAP_ENDIAN_DEFAULT) 644 return endian; 645 646 /* Use this if no other value was found */ 647 return REGMAP_ENDIAN_BIG; 648 } 649 650 enum regmap_endian regmap_get_val_endian(struct device *dev, 651 const struct regmap_bus *bus, 652 const struct regmap_config *config) 653 { 654 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 655 enum regmap_endian endian; 656 657 /* Retrieve the endianness specification from the regmap config */ 658 endian = config->val_format_endian; 659 660 /* If the regmap config specified a non-default value, use that */ 661 if (endian != REGMAP_ENDIAN_DEFAULT) 662 return endian; 663 664 /* If the firmware node exist try to get endianness from it */ 665 if (fwnode_property_read_bool(fwnode, "big-endian")) 666 endian = REGMAP_ENDIAN_BIG; 667 else if (fwnode_property_read_bool(fwnode, "little-endian")) 668 endian = REGMAP_ENDIAN_LITTLE; 669 else if (fwnode_property_read_bool(fwnode, "native-endian")) 670 endian = REGMAP_ENDIAN_NATIVE; 671 672 /* If the endianness was specified in fwnode, use that */ 673 if (endian != REGMAP_ENDIAN_DEFAULT) 674 return endian; 675 676 /* Retrieve the endianness specification from the bus config */ 677 if (bus && bus->val_format_endian_default) 678 endian = bus->val_format_endian_default; 679 680 /* If the bus specified a non-default value, use that */ 681 if (endian != REGMAP_ENDIAN_DEFAULT) 682 return endian; 683 684 /* Use this if no other value was found */ 685 return REGMAP_ENDIAN_BIG; 686 } 687 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 688 689 struct regmap *__regmap_init(struct device *dev, 690 const struct regmap_bus *bus, 691 void *bus_context, 692 const struct regmap_config *config, 693 struct lock_class_key *lock_key, 694 const char *lock_name) 695 { 696 struct regmap *map; 697 int ret = -EINVAL; 698 enum regmap_endian reg_endian, val_endian; 699 int i, j; 700 701 if (!config) 702 goto err; 703 704 map = kzalloc(sizeof(*map), GFP_KERNEL); 705 if (map == NULL) { 706 ret = -ENOMEM; 707 goto err; 708 } 709 710 ret = regmap_set_name(map, config); 711 if (ret) 712 goto err_map; 713 714 if (config->disable_locking) { 715 map->lock = map->unlock = regmap_lock_unlock_none; 716 regmap_debugfs_disable(map); 717 } else if (config->lock && config->unlock) { 718 map->lock = config->lock; 719 map->unlock = config->unlock; 720 map->lock_arg = config->lock_arg; 721 } else if (config->use_hwlock) { 722 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 723 if (!map->hwlock) { 724 ret = -ENXIO; 725 goto err_name; 726 } 727 728 switch (config->hwlock_mode) { 729 case HWLOCK_IRQSTATE: 730 map->lock = regmap_lock_hwlock_irqsave; 731 map->unlock = regmap_unlock_hwlock_irqrestore; 732 break; 733 case HWLOCK_IRQ: 734 map->lock = regmap_lock_hwlock_irq; 735 map->unlock = regmap_unlock_hwlock_irq; 736 break; 737 default: 738 map->lock = regmap_lock_hwlock; 739 map->unlock = regmap_unlock_hwlock; 740 break; 741 } 742 743 map->lock_arg = map; 744 } else { 745 if ((bus && bus->fast_io) || 746 config->fast_io) { 747 spin_lock_init(&map->spinlock); 748 map->lock = regmap_lock_spinlock; 749 map->unlock = regmap_unlock_spinlock; 750 lockdep_set_class_and_name(&map->spinlock, 751 lock_key, lock_name); 752 } else { 753 mutex_init(&map->mutex); 754 map->lock = regmap_lock_mutex; 755 map->unlock = regmap_unlock_mutex; 756 lockdep_set_class_and_name(&map->mutex, 757 lock_key, lock_name); 758 } 759 map->lock_arg = map; 760 } 761 762 /* 763 * When we write in fast-paths with regmap_bulk_write() don't allocate 764 * scratch buffers with sleeping allocations. 765 */ 766 if ((bus && bus->fast_io) || config->fast_io) 767 map->alloc_flags = GFP_ATOMIC; 768 else 769 map->alloc_flags = GFP_KERNEL; 770 771 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 772 map->format.pad_bytes = config->pad_bits / 8; 773 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 774 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 775 config->val_bits + config->pad_bits, 8); 776 map->reg_shift = config->pad_bits % 8; 777 if (config->reg_stride) 778 map->reg_stride = config->reg_stride; 779 else 780 map->reg_stride = 1; 781 if (is_power_of_2(map->reg_stride)) 782 map->reg_stride_order = ilog2(map->reg_stride); 783 else 784 map->reg_stride_order = -1; 785 map->use_single_read = config->use_single_read || !bus || !bus->read; 786 map->use_single_write = config->use_single_write || !bus || !bus->write; 787 map->can_multi_write = config->can_multi_write && bus && bus->write; 788 if (bus) { 789 map->max_raw_read = bus->max_raw_read; 790 map->max_raw_write = bus->max_raw_write; 791 } 792 map->dev = dev; 793 map->bus = bus; 794 map->bus_context = bus_context; 795 map->max_register = config->max_register; 796 map->wr_table = config->wr_table; 797 map->rd_table = config->rd_table; 798 map->volatile_table = config->volatile_table; 799 map->precious_table = config->precious_table; 800 map->wr_noinc_table = config->wr_noinc_table; 801 map->rd_noinc_table = config->rd_noinc_table; 802 map->writeable_reg = config->writeable_reg; 803 map->readable_reg = config->readable_reg; 804 map->volatile_reg = config->volatile_reg; 805 map->precious_reg = config->precious_reg; 806 map->writeable_noinc_reg = config->writeable_noinc_reg; 807 map->readable_noinc_reg = config->readable_noinc_reg; 808 map->cache_type = config->cache_type; 809 810 spin_lock_init(&map->async_lock); 811 INIT_LIST_HEAD(&map->async_list); 812 INIT_LIST_HEAD(&map->async_free); 813 init_waitqueue_head(&map->async_waitq); 814 815 if (config->read_flag_mask || 816 config->write_flag_mask || 817 config->zero_flag_mask) { 818 map->read_flag_mask = config->read_flag_mask; 819 map->write_flag_mask = config->write_flag_mask; 820 } else if (bus) { 821 map->read_flag_mask = bus->read_flag_mask; 822 } 823 824 if (!bus) { 825 map->reg_read = config->reg_read; 826 map->reg_write = config->reg_write; 827 828 map->defer_caching = false; 829 goto skip_format_initialization; 830 } else if (!bus->read || !bus->write) { 831 map->reg_read = _regmap_bus_reg_read; 832 map->reg_write = _regmap_bus_reg_write; 833 map->reg_update_bits = bus->reg_update_bits; 834 835 map->defer_caching = false; 836 goto skip_format_initialization; 837 } else { 838 map->reg_read = _regmap_bus_read; 839 map->reg_update_bits = bus->reg_update_bits; 840 } 841 842 reg_endian = regmap_get_reg_endian(bus, config); 843 val_endian = regmap_get_val_endian(dev, bus, config); 844 845 switch (config->reg_bits + map->reg_shift) { 846 case 2: 847 switch (config->val_bits) { 848 case 6: 849 map->format.format_write = regmap_format_2_6_write; 850 break; 851 default: 852 goto err_hwlock; 853 } 854 break; 855 856 case 4: 857 switch (config->val_bits) { 858 case 12: 859 map->format.format_write = regmap_format_4_12_write; 860 break; 861 default: 862 goto err_hwlock; 863 } 864 break; 865 866 case 7: 867 switch (config->val_bits) { 868 case 9: 869 map->format.format_write = regmap_format_7_9_write; 870 break; 871 default: 872 goto err_hwlock; 873 } 874 break; 875 876 case 10: 877 switch (config->val_bits) { 878 case 14: 879 map->format.format_write = regmap_format_10_14_write; 880 break; 881 default: 882 goto err_hwlock; 883 } 884 break; 885 886 case 8: 887 map->format.format_reg = regmap_format_8; 888 break; 889 890 case 16: 891 switch (reg_endian) { 892 case REGMAP_ENDIAN_BIG: 893 map->format.format_reg = regmap_format_16_be; 894 break; 895 case REGMAP_ENDIAN_LITTLE: 896 map->format.format_reg = regmap_format_16_le; 897 break; 898 case REGMAP_ENDIAN_NATIVE: 899 map->format.format_reg = regmap_format_16_native; 900 break; 901 default: 902 goto err_hwlock; 903 } 904 break; 905 906 case 24: 907 if (reg_endian != REGMAP_ENDIAN_BIG) 908 goto err_hwlock; 909 map->format.format_reg = regmap_format_24; 910 break; 911 912 case 32: 913 switch (reg_endian) { 914 case REGMAP_ENDIAN_BIG: 915 map->format.format_reg = regmap_format_32_be; 916 break; 917 case REGMAP_ENDIAN_LITTLE: 918 map->format.format_reg = regmap_format_32_le; 919 break; 920 case REGMAP_ENDIAN_NATIVE: 921 map->format.format_reg = regmap_format_32_native; 922 break; 923 default: 924 goto err_hwlock; 925 } 926 break; 927 928 #ifdef CONFIG_64BIT 929 case 64: 930 switch (reg_endian) { 931 case REGMAP_ENDIAN_BIG: 932 map->format.format_reg = regmap_format_64_be; 933 break; 934 case REGMAP_ENDIAN_LITTLE: 935 map->format.format_reg = regmap_format_64_le; 936 break; 937 case REGMAP_ENDIAN_NATIVE: 938 map->format.format_reg = regmap_format_64_native; 939 break; 940 default: 941 goto err_hwlock; 942 } 943 break; 944 #endif 945 946 default: 947 goto err_hwlock; 948 } 949 950 if (val_endian == REGMAP_ENDIAN_NATIVE) 951 map->format.parse_inplace = regmap_parse_inplace_noop; 952 953 switch (config->val_bits) { 954 case 8: 955 map->format.format_val = regmap_format_8; 956 map->format.parse_val = regmap_parse_8; 957 map->format.parse_inplace = regmap_parse_inplace_noop; 958 break; 959 case 16: 960 switch (val_endian) { 961 case REGMAP_ENDIAN_BIG: 962 map->format.format_val = regmap_format_16_be; 963 map->format.parse_val = regmap_parse_16_be; 964 map->format.parse_inplace = regmap_parse_16_be_inplace; 965 break; 966 case REGMAP_ENDIAN_LITTLE: 967 map->format.format_val = regmap_format_16_le; 968 map->format.parse_val = regmap_parse_16_le; 969 map->format.parse_inplace = regmap_parse_16_le_inplace; 970 break; 971 case REGMAP_ENDIAN_NATIVE: 972 map->format.format_val = regmap_format_16_native; 973 map->format.parse_val = regmap_parse_16_native; 974 break; 975 default: 976 goto err_hwlock; 977 } 978 break; 979 case 24: 980 if (val_endian != REGMAP_ENDIAN_BIG) 981 goto err_hwlock; 982 map->format.format_val = regmap_format_24; 983 map->format.parse_val = regmap_parse_24; 984 break; 985 case 32: 986 switch (val_endian) { 987 case REGMAP_ENDIAN_BIG: 988 map->format.format_val = regmap_format_32_be; 989 map->format.parse_val = regmap_parse_32_be; 990 map->format.parse_inplace = regmap_parse_32_be_inplace; 991 break; 992 case REGMAP_ENDIAN_LITTLE: 993 map->format.format_val = regmap_format_32_le; 994 map->format.parse_val = regmap_parse_32_le; 995 map->format.parse_inplace = regmap_parse_32_le_inplace; 996 break; 997 case REGMAP_ENDIAN_NATIVE: 998 map->format.format_val = regmap_format_32_native; 999 map->format.parse_val = regmap_parse_32_native; 1000 break; 1001 default: 1002 goto err_hwlock; 1003 } 1004 break; 1005 #ifdef CONFIG_64BIT 1006 case 64: 1007 switch (val_endian) { 1008 case REGMAP_ENDIAN_BIG: 1009 map->format.format_val = regmap_format_64_be; 1010 map->format.parse_val = regmap_parse_64_be; 1011 map->format.parse_inplace = regmap_parse_64_be_inplace; 1012 break; 1013 case REGMAP_ENDIAN_LITTLE: 1014 map->format.format_val = regmap_format_64_le; 1015 map->format.parse_val = regmap_parse_64_le; 1016 map->format.parse_inplace = regmap_parse_64_le_inplace; 1017 break; 1018 case REGMAP_ENDIAN_NATIVE: 1019 map->format.format_val = regmap_format_64_native; 1020 map->format.parse_val = regmap_parse_64_native; 1021 break; 1022 default: 1023 goto err_hwlock; 1024 } 1025 break; 1026 #endif 1027 } 1028 1029 if (map->format.format_write) { 1030 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1031 (val_endian != REGMAP_ENDIAN_BIG)) 1032 goto err_hwlock; 1033 map->use_single_write = true; 1034 } 1035 1036 if (!map->format.format_write && 1037 !(map->format.format_reg && map->format.format_val)) 1038 goto err_hwlock; 1039 1040 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1041 if (map->work_buf == NULL) { 1042 ret = -ENOMEM; 1043 goto err_hwlock; 1044 } 1045 1046 if (map->format.format_write) { 1047 map->defer_caching = false; 1048 map->reg_write = _regmap_bus_formatted_write; 1049 } else if (map->format.format_val) { 1050 map->defer_caching = true; 1051 map->reg_write = _regmap_bus_raw_write; 1052 } 1053 1054 skip_format_initialization: 1055 1056 map->range_tree = RB_ROOT; 1057 for (i = 0; i < config->num_ranges; i++) { 1058 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1059 struct regmap_range_node *new; 1060 1061 /* Sanity check */ 1062 if (range_cfg->range_max < range_cfg->range_min) { 1063 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1064 range_cfg->range_max, range_cfg->range_min); 1065 goto err_range; 1066 } 1067 1068 if (range_cfg->range_max > map->max_register) { 1069 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1070 range_cfg->range_max, map->max_register); 1071 goto err_range; 1072 } 1073 1074 if (range_cfg->selector_reg > map->max_register) { 1075 dev_err(map->dev, 1076 "Invalid range %d: selector out of map\n", i); 1077 goto err_range; 1078 } 1079 1080 if (range_cfg->window_len == 0) { 1081 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1082 i); 1083 goto err_range; 1084 } 1085 1086 /* Make sure, that this register range has no selector 1087 or data window within its boundary */ 1088 for (j = 0; j < config->num_ranges; j++) { 1089 unsigned sel_reg = config->ranges[j].selector_reg; 1090 unsigned win_min = config->ranges[j].window_start; 1091 unsigned win_max = win_min + 1092 config->ranges[j].window_len - 1; 1093 1094 /* Allow data window inside its own virtual range */ 1095 if (j == i) 1096 continue; 1097 1098 if (range_cfg->range_min <= sel_reg && 1099 sel_reg <= range_cfg->range_max) { 1100 dev_err(map->dev, 1101 "Range %d: selector for %d in window\n", 1102 i, j); 1103 goto err_range; 1104 } 1105 1106 if (!(win_max < range_cfg->range_min || 1107 win_min > range_cfg->range_max)) { 1108 dev_err(map->dev, 1109 "Range %d: window for %d in window\n", 1110 i, j); 1111 goto err_range; 1112 } 1113 } 1114 1115 new = kzalloc(sizeof(*new), GFP_KERNEL); 1116 if (new == NULL) { 1117 ret = -ENOMEM; 1118 goto err_range; 1119 } 1120 1121 new->map = map; 1122 new->name = range_cfg->name; 1123 new->range_min = range_cfg->range_min; 1124 new->range_max = range_cfg->range_max; 1125 new->selector_reg = range_cfg->selector_reg; 1126 new->selector_mask = range_cfg->selector_mask; 1127 new->selector_shift = range_cfg->selector_shift; 1128 new->window_start = range_cfg->window_start; 1129 new->window_len = range_cfg->window_len; 1130 1131 if (!_regmap_range_add(map, new)) { 1132 dev_err(map->dev, "Failed to add range %d\n", i); 1133 kfree(new); 1134 goto err_range; 1135 } 1136 1137 if (map->selector_work_buf == NULL) { 1138 map->selector_work_buf = 1139 kzalloc(map->format.buf_size, GFP_KERNEL); 1140 if (map->selector_work_buf == NULL) { 1141 ret = -ENOMEM; 1142 goto err_range; 1143 } 1144 } 1145 } 1146 1147 ret = regcache_init(map, config); 1148 if (ret != 0) 1149 goto err_range; 1150 1151 if (dev) { 1152 ret = regmap_attach_dev(dev, map, config); 1153 if (ret != 0) 1154 goto err_regcache; 1155 } else { 1156 regmap_debugfs_init(map); 1157 } 1158 1159 return map; 1160 1161 err_regcache: 1162 regcache_exit(map); 1163 err_range: 1164 regmap_range_exit(map); 1165 kfree(map->work_buf); 1166 err_hwlock: 1167 if (map->hwlock) 1168 hwspin_lock_free(map->hwlock); 1169 err_name: 1170 kfree_const(map->name); 1171 err_map: 1172 kfree(map); 1173 err: 1174 return ERR_PTR(ret); 1175 } 1176 EXPORT_SYMBOL_GPL(__regmap_init); 1177 1178 static void devm_regmap_release(struct device *dev, void *res) 1179 { 1180 regmap_exit(*(struct regmap **)res); 1181 } 1182 1183 struct regmap *__devm_regmap_init(struct device *dev, 1184 const struct regmap_bus *bus, 1185 void *bus_context, 1186 const struct regmap_config *config, 1187 struct lock_class_key *lock_key, 1188 const char *lock_name) 1189 { 1190 struct regmap **ptr, *regmap; 1191 1192 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1193 if (!ptr) 1194 return ERR_PTR(-ENOMEM); 1195 1196 regmap = __regmap_init(dev, bus, bus_context, config, 1197 lock_key, lock_name); 1198 if (!IS_ERR(regmap)) { 1199 *ptr = regmap; 1200 devres_add(dev, ptr); 1201 } else { 1202 devres_free(ptr); 1203 } 1204 1205 return regmap; 1206 } 1207 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1208 1209 static void regmap_field_init(struct regmap_field *rm_field, 1210 struct regmap *regmap, struct reg_field reg_field) 1211 { 1212 rm_field->regmap = regmap; 1213 rm_field->reg = reg_field.reg; 1214 rm_field->shift = reg_field.lsb; 1215 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1216 rm_field->id_size = reg_field.id_size; 1217 rm_field->id_offset = reg_field.id_offset; 1218 } 1219 1220 /** 1221 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1222 * 1223 * @dev: Device that will be interacted with 1224 * @regmap: regmap bank in which this register field is located. 1225 * @reg_field: Register field with in the bank. 1226 * 1227 * The return value will be an ERR_PTR() on error or a valid pointer 1228 * to a struct regmap_field. The regmap_field will be automatically freed 1229 * by the device management code. 1230 */ 1231 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1232 struct regmap *regmap, struct reg_field reg_field) 1233 { 1234 struct regmap_field *rm_field = devm_kzalloc(dev, 1235 sizeof(*rm_field), GFP_KERNEL); 1236 if (!rm_field) 1237 return ERR_PTR(-ENOMEM); 1238 1239 regmap_field_init(rm_field, regmap, reg_field); 1240 1241 return rm_field; 1242 1243 } 1244 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1245 1246 1247 /** 1248 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. 1249 * 1250 * @regmap: regmap bank in which this register field is located. 1251 * @rm_field: regmap register fields within the bank. 1252 * @reg_field: Register fields within the bank. 1253 * @num_fields: Number of register fields. 1254 * 1255 * The return value will be an -ENOMEM on error or zero for success. 1256 * Newly allocated regmap_fields should be freed by calling 1257 * regmap_field_bulk_free() 1258 */ 1259 int regmap_field_bulk_alloc(struct regmap *regmap, 1260 struct regmap_field **rm_field, 1261 struct reg_field *reg_field, 1262 int num_fields) 1263 { 1264 struct regmap_field *rf; 1265 int i; 1266 1267 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL); 1268 if (!rf) 1269 return -ENOMEM; 1270 1271 for (i = 0; i < num_fields; i++) { 1272 regmap_field_init(&rf[i], regmap, reg_field[i]); 1273 rm_field[i] = &rf[i]; 1274 } 1275 1276 return 0; 1277 } 1278 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); 1279 1280 /** 1281 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register 1282 * fields. 1283 * 1284 * @dev: Device that will be interacted with 1285 * @regmap: regmap bank in which this register field is located. 1286 * @rm_field: regmap register fields within the bank. 1287 * @reg_field: Register fields within the bank. 1288 * @num_fields: Number of register fields. 1289 * 1290 * The return value will be an -ENOMEM on error or zero for success. 1291 * Newly allocated regmap_fields will be automatically freed by the 1292 * device management code. 1293 */ 1294 int devm_regmap_field_bulk_alloc(struct device *dev, 1295 struct regmap *regmap, 1296 struct regmap_field **rm_field, 1297 struct reg_field *reg_field, 1298 int num_fields) 1299 { 1300 struct regmap_field *rf; 1301 int i; 1302 1303 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); 1304 if (!rf) 1305 return -ENOMEM; 1306 1307 for (i = 0; i < num_fields; i++) { 1308 regmap_field_init(&rf[i], regmap, reg_field[i]); 1309 rm_field[i] = &rf[i]; 1310 } 1311 1312 return 0; 1313 } 1314 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); 1315 1316 /** 1317 * regmap_field_bulk_free() - Free register field allocated using 1318 * regmap_field_bulk_alloc. 1319 * 1320 * @field: regmap fields which should be freed. 1321 */ 1322 void regmap_field_bulk_free(struct regmap_field *field) 1323 { 1324 kfree(field); 1325 } 1326 EXPORT_SYMBOL_GPL(regmap_field_bulk_free); 1327 1328 /** 1329 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using 1330 * devm_regmap_field_bulk_alloc. 1331 * 1332 * @dev: Device that will be interacted with 1333 * @field: regmap field which should be freed. 1334 * 1335 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually 1336 * drivers need not call this function, as the memory allocated via devm 1337 * will be freed as per device-driver life-cycle. 1338 */ 1339 void devm_regmap_field_bulk_free(struct device *dev, 1340 struct regmap_field *field) 1341 { 1342 devm_kfree(dev, field); 1343 } 1344 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); 1345 1346 /** 1347 * devm_regmap_field_free() - Free a register field allocated using 1348 * devm_regmap_field_alloc. 1349 * 1350 * @dev: Device that will be interacted with 1351 * @field: regmap field which should be freed. 1352 * 1353 * Free register field allocated using devm_regmap_field_alloc(). Usually 1354 * drivers need not call this function, as the memory allocated via devm 1355 * will be freed as per device-driver life-cyle. 1356 */ 1357 void devm_regmap_field_free(struct device *dev, 1358 struct regmap_field *field) 1359 { 1360 devm_kfree(dev, field); 1361 } 1362 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1363 1364 /** 1365 * regmap_field_alloc() - Allocate and initialise a register field. 1366 * 1367 * @regmap: regmap bank in which this register field is located. 1368 * @reg_field: Register field with in the bank. 1369 * 1370 * The return value will be an ERR_PTR() on error or a valid pointer 1371 * to a struct regmap_field. The regmap_field should be freed by the 1372 * user once its finished working with it using regmap_field_free(). 1373 */ 1374 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1375 struct reg_field reg_field) 1376 { 1377 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1378 1379 if (!rm_field) 1380 return ERR_PTR(-ENOMEM); 1381 1382 regmap_field_init(rm_field, regmap, reg_field); 1383 1384 return rm_field; 1385 } 1386 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1387 1388 /** 1389 * regmap_field_free() - Free register field allocated using 1390 * regmap_field_alloc. 1391 * 1392 * @field: regmap field which should be freed. 1393 */ 1394 void regmap_field_free(struct regmap_field *field) 1395 { 1396 kfree(field); 1397 } 1398 EXPORT_SYMBOL_GPL(regmap_field_free); 1399 1400 /** 1401 * regmap_reinit_cache() - Reinitialise the current register cache 1402 * 1403 * @map: Register map to operate on. 1404 * @config: New configuration. Only the cache data will be used. 1405 * 1406 * Discard any existing register cache for the map and initialize a 1407 * new cache. This can be used to restore the cache to defaults or to 1408 * update the cache configuration to reflect runtime discovery of the 1409 * hardware. 1410 * 1411 * No explicit locking is done here, the user needs to ensure that 1412 * this function will not race with other calls to regmap. 1413 */ 1414 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1415 { 1416 int ret; 1417 1418 regcache_exit(map); 1419 regmap_debugfs_exit(map); 1420 1421 map->max_register = config->max_register; 1422 map->writeable_reg = config->writeable_reg; 1423 map->readable_reg = config->readable_reg; 1424 map->volatile_reg = config->volatile_reg; 1425 map->precious_reg = config->precious_reg; 1426 map->writeable_noinc_reg = config->writeable_noinc_reg; 1427 map->readable_noinc_reg = config->readable_noinc_reg; 1428 map->cache_type = config->cache_type; 1429 1430 ret = regmap_set_name(map, config); 1431 if (ret) 1432 return ret; 1433 1434 regmap_debugfs_init(map); 1435 1436 map->cache_bypass = false; 1437 map->cache_only = false; 1438 1439 return regcache_init(map, config); 1440 } 1441 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1442 1443 /** 1444 * regmap_exit() - Free a previously allocated register map 1445 * 1446 * @map: Register map to operate on. 1447 */ 1448 void regmap_exit(struct regmap *map) 1449 { 1450 struct regmap_async *async; 1451 1452 regcache_exit(map); 1453 regmap_debugfs_exit(map); 1454 regmap_range_exit(map); 1455 if (map->bus && map->bus->free_context) 1456 map->bus->free_context(map->bus_context); 1457 kfree(map->work_buf); 1458 while (!list_empty(&map->async_free)) { 1459 async = list_first_entry_or_null(&map->async_free, 1460 struct regmap_async, 1461 list); 1462 list_del(&async->list); 1463 kfree(async->work_buf); 1464 kfree(async); 1465 } 1466 if (map->hwlock) 1467 hwspin_lock_free(map->hwlock); 1468 kfree_const(map->name); 1469 kfree(map->patch); 1470 kfree(map); 1471 } 1472 EXPORT_SYMBOL_GPL(regmap_exit); 1473 1474 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1475 { 1476 struct regmap **r = res; 1477 if (!r || !*r) { 1478 WARN_ON(!r || !*r); 1479 return 0; 1480 } 1481 1482 /* If the user didn't specify a name match any */ 1483 if (data) 1484 return !strcmp((*r)->name, data); 1485 else 1486 return 1; 1487 } 1488 1489 /** 1490 * dev_get_regmap() - Obtain the regmap (if any) for a device 1491 * 1492 * @dev: Device to retrieve the map for 1493 * @name: Optional name for the register map, usually NULL. 1494 * 1495 * Returns the regmap for the device if one is present, or NULL. If 1496 * name is specified then it must match the name specified when 1497 * registering the device, if it is NULL then the first regmap found 1498 * will be used. Devices with multiple register maps are very rare, 1499 * generic code should normally not need to specify a name. 1500 */ 1501 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1502 { 1503 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1504 dev_get_regmap_match, (void *)name); 1505 1506 if (!r) 1507 return NULL; 1508 return *r; 1509 } 1510 EXPORT_SYMBOL_GPL(dev_get_regmap); 1511 1512 /** 1513 * regmap_get_device() - Obtain the device from a regmap 1514 * 1515 * @map: Register map to operate on. 1516 * 1517 * Returns the underlying device that the regmap has been created for. 1518 */ 1519 struct device *regmap_get_device(struct regmap *map) 1520 { 1521 return map->dev; 1522 } 1523 EXPORT_SYMBOL_GPL(regmap_get_device); 1524 1525 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1526 struct regmap_range_node *range, 1527 unsigned int val_num) 1528 { 1529 void *orig_work_buf; 1530 unsigned int win_offset; 1531 unsigned int win_page; 1532 bool page_chg; 1533 int ret; 1534 1535 win_offset = (*reg - range->range_min) % range->window_len; 1536 win_page = (*reg - range->range_min) / range->window_len; 1537 1538 if (val_num > 1) { 1539 /* Bulk write shouldn't cross range boundary */ 1540 if (*reg + val_num - 1 > range->range_max) 1541 return -EINVAL; 1542 1543 /* ... or single page boundary */ 1544 if (val_num > range->window_len - win_offset) 1545 return -EINVAL; 1546 } 1547 1548 /* It is possible to have selector register inside data window. 1549 In that case, selector register is located on every page and 1550 it needs no page switching, when accessed alone. */ 1551 if (val_num > 1 || 1552 range->window_start + win_offset != range->selector_reg) { 1553 /* Use separate work_buf during page switching */ 1554 orig_work_buf = map->work_buf; 1555 map->work_buf = map->selector_work_buf; 1556 1557 ret = _regmap_update_bits(map, range->selector_reg, 1558 range->selector_mask, 1559 win_page << range->selector_shift, 1560 &page_chg, false); 1561 1562 map->work_buf = orig_work_buf; 1563 1564 if (ret != 0) 1565 return ret; 1566 } 1567 1568 *reg = range->window_start + win_offset; 1569 1570 return 0; 1571 } 1572 1573 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1574 unsigned long mask) 1575 { 1576 u8 *buf; 1577 int i; 1578 1579 if (!mask || !map->work_buf) 1580 return; 1581 1582 buf = map->work_buf; 1583 1584 for (i = 0; i < max_bytes; i++) 1585 buf[i] |= (mask >> (8 * i)) & 0xff; 1586 } 1587 1588 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1589 const void *val, size_t val_len, bool noinc) 1590 { 1591 struct regmap_range_node *range; 1592 unsigned long flags; 1593 void *work_val = map->work_buf + map->format.reg_bytes + 1594 map->format.pad_bytes; 1595 void *buf; 1596 int ret = -ENOTSUPP; 1597 size_t len; 1598 int i; 1599 1600 WARN_ON(!map->bus); 1601 1602 /* Check for unwritable or noinc registers in range 1603 * before we start 1604 */ 1605 if (!regmap_writeable_noinc(map, reg)) { 1606 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1607 unsigned int element = 1608 reg + regmap_get_offset(map, i); 1609 if (!regmap_writeable(map, element) || 1610 regmap_writeable_noinc(map, element)) 1611 return -EINVAL; 1612 } 1613 } 1614 1615 if (!map->cache_bypass && map->format.parse_val) { 1616 unsigned int ival; 1617 int val_bytes = map->format.val_bytes; 1618 for (i = 0; i < val_len / val_bytes; i++) { 1619 ival = map->format.parse_val(val + (i * val_bytes)); 1620 ret = regcache_write(map, 1621 reg + regmap_get_offset(map, i), 1622 ival); 1623 if (ret) { 1624 dev_err(map->dev, 1625 "Error in caching of register: %x ret: %d\n", 1626 reg + i, ret); 1627 return ret; 1628 } 1629 } 1630 if (map->cache_only) { 1631 map->cache_dirty = true; 1632 return 0; 1633 } 1634 } 1635 1636 range = _regmap_range_lookup(map, reg); 1637 if (range) { 1638 int val_num = val_len / map->format.val_bytes; 1639 int win_offset = (reg - range->range_min) % range->window_len; 1640 int win_residue = range->window_len - win_offset; 1641 1642 /* If the write goes beyond the end of the window split it */ 1643 while (val_num > win_residue) { 1644 dev_dbg(map->dev, "Writing window %d/%zu\n", 1645 win_residue, val_len / map->format.val_bytes); 1646 ret = _regmap_raw_write_impl(map, reg, val, 1647 win_residue * 1648 map->format.val_bytes, noinc); 1649 if (ret != 0) 1650 return ret; 1651 1652 reg += win_residue; 1653 val_num -= win_residue; 1654 val += win_residue * map->format.val_bytes; 1655 val_len -= win_residue * map->format.val_bytes; 1656 1657 win_offset = (reg - range->range_min) % 1658 range->window_len; 1659 win_residue = range->window_len - win_offset; 1660 } 1661 1662 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1663 if (ret != 0) 1664 return ret; 1665 } 1666 1667 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1668 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1669 map->write_flag_mask); 1670 1671 /* 1672 * Essentially all I/O mechanisms will be faster with a single 1673 * buffer to write. Since register syncs often generate raw 1674 * writes of single registers optimise that case. 1675 */ 1676 if (val != work_val && val_len == map->format.val_bytes) { 1677 memcpy(work_val, val, map->format.val_bytes); 1678 val = work_val; 1679 } 1680 1681 if (map->async && map->bus->async_write) { 1682 struct regmap_async *async; 1683 1684 trace_regmap_async_write_start(map, reg, val_len); 1685 1686 spin_lock_irqsave(&map->async_lock, flags); 1687 async = list_first_entry_or_null(&map->async_free, 1688 struct regmap_async, 1689 list); 1690 if (async) 1691 list_del(&async->list); 1692 spin_unlock_irqrestore(&map->async_lock, flags); 1693 1694 if (!async) { 1695 async = map->bus->async_alloc(); 1696 if (!async) 1697 return -ENOMEM; 1698 1699 async->work_buf = kzalloc(map->format.buf_size, 1700 GFP_KERNEL | GFP_DMA); 1701 if (!async->work_buf) { 1702 kfree(async); 1703 return -ENOMEM; 1704 } 1705 } 1706 1707 async->map = map; 1708 1709 /* If the caller supplied the value we can use it safely. */ 1710 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1711 map->format.reg_bytes + map->format.val_bytes); 1712 1713 spin_lock_irqsave(&map->async_lock, flags); 1714 list_add_tail(&async->list, &map->async_list); 1715 spin_unlock_irqrestore(&map->async_lock, flags); 1716 1717 if (val != work_val) 1718 ret = map->bus->async_write(map->bus_context, 1719 async->work_buf, 1720 map->format.reg_bytes + 1721 map->format.pad_bytes, 1722 val, val_len, async); 1723 else 1724 ret = map->bus->async_write(map->bus_context, 1725 async->work_buf, 1726 map->format.reg_bytes + 1727 map->format.pad_bytes + 1728 val_len, NULL, 0, async); 1729 1730 if (ret != 0) { 1731 dev_err(map->dev, "Failed to schedule write: %d\n", 1732 ret); 1733 1734 spin_lock_irqsave(&map->async_lock, flags); 1735 list_move(&async->list, &map->async_free); 1736 spin_unlock_irqrestore(&map->async_lock, flags); 1737 } 1738 1739 return ret; 1740 } 1741 1742 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1743 1744 /* If we're doing a single register write we can probably just 1745 * send the work_buf directly, otherwise try to do a gather 1746 * write. 1747 */ 1748 if (val == work_val) 1749 ret = map->bus->write(map->bus_context, map->work_buf, 1750 map->format.reg_bytes + 1751 map->format.pad_bytes + 1752 val_len); 1753 else if (map->bus->gather_write) 1754 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1755 map->format.reg_bytes + 1756 map->format.pad_bytes, 1757 val, val_len); 1758 else 1759 ret = -ENOTSUPP; 1760 1761 /* If that didn't work fall back on linearising by hand. */ 1762 if (ret == -ENOTSUPP) { 1763 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1764 buf = kzalloc(len, GFP_KERNEL); 1765 if (!buf) 1766 return -ENOMEM; 1767 1768 memcpy(buf, map->work_buf, map->format.reg_bytes); 1769 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1770 val, val_len); 1771 ret = map->bus->write(map->bus_context, buf, len); 1772 1773 kfree(buf); 1774 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1775 /* regcache_drop_region() takes lock that we already have, 1776 * thus call map->cache_ops->drop() directly 1777 */ 1778 if (map->cache_ops && map->cache_ops->drop) 1779 map->cache_ops->drop(map, reg, reg + 1); 1780 } 1781 1782 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1783 1784 return ret; 1785 } 1786 1787 /** 1788 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1789 * 1790 * @map: Map to check. 1791 */ 1792 bool regmap_can_raw_write(struct regmap *map) 1793 { 1794 return map->bus && map->bus->write && map->format.format_val && 1795 map->format.format_reg; 1796 } 1797 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1798 1799 /** 1800 * regmap_get_raw_read_max - Get the maximum size we can read 1801 * 1802 * @map: Map to check. 1803 */ 1804 size_t regmap_get_raw_read_max(struct regmap *map) 1805 { 1806 return map->max_raw_read; 1807 } 1808 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1809 1810 /** 1811 * regmap_get_raw_write_max - Get the maximum size we can read 1812 * 1813 * @map: Map to check. 1814 */ 1815 size_t regmap_get_raw_write_max(struct regmap *map) 1816 { 1817 return map->max_raw_write; 1818 } 1819 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1820 1821 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1822 unsigned int val) 1823 { 1824 int ret; 1825 struct regmap_range_node *range; 1826 struct regmap *map = context; 1827 1828 WARN_ON(!map->bus || !map->format.format_write); 1829 1830 range = _regmap_range_lookup(map, reg); 1831 if (range) { 1832 ret = _regmap_select_page(map, ®, range, 1); 1833 if (ret != 0) 1834 return ret; 1835 } 1836 1837 map->format.format_write(map, reg, val); 1838 1839 trace_regmap_hw_write_start(map, reg, 1); 1840 1841 ret = map->bus->write(map->bus_context, map->work_buf, 1842 map->format.buf_size); 1843 1844 trace_regmap_hw_write_done(map, reg, 1); 1845 1846 return ret; 1847 } 1848 1849 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1850 unsigned int val) 1851 { 1852 struct regmap *map = context; 1853 1854 return map->bus->reg_write(map->bus_context, reg, val); 1855 } 1856 1857 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1858 unsigned int val) 1859 { 1860 struct regmap *map = context; 1861 1862 WARN_ON(!map->bus || !map->format.format_val); 1863 1864 map->format.format_val(map->work_buf + map->format.reg_bytes 1865 + map->format.pad_bytes, val, 0); 1866 return _regmap_raw_write_impl(map, reg, 1867 map->work_buf + 1868 map->format.reg_bytes + 1869 map->format.pad_bytes, 1870 map->format.val_bytes, 1871 false); 1872 } 1873 1874 static inline void *_regmap_map_get_context(struct regmap *map) 1875 { 1876 return (map->bus) ? map : map->bus_context; 1877 } 1878 1879 int _regmap_write(struct regmap *map, unsigned int reg, 1880 unsigned int val) 1881 { 1882 int ret; 1883 void *context = _regmap_map_get_context(map); 1884 1885 if (!regmap_writeable(map, reg)) 1886 return -EIO; 1887 1888 if (!map->cache_bypass && !map->defer_caching) { 1889 ret = regcache_write(map, reg, val); 1890 if (ret != 0) 1891 return ret; 1892 if (map->cache_only) { 1893 map->cache_dirty = true; 1894 return 0; 1895 } 1896 } 1897 1898 if (regmap_should_log(map)) 1899 dev_info(map->dev, "%x <= %x\n", reg, val); 1900 1901 trace_regmap_reg_write(map, reg, val); 1902 1903 return map->reg_write(context, reg, val); 1904 } 1905 1906 /** 1907 * regmap_write() - Write a value to a single register 1908 * 1909 * @map: Register map to write to 1910 * @reg: Register to write to 1911 * @val: Value to be written 1912 * 1913 * A value of zero will be returned on success, a negative errno will 1914 * be returned in error cases. 1915 */ 1916 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1917 { 1918 int ret; 1919 1920 if (!IS_ALIGNED(reg, map->reg_stride)) 1921 return -EINVAL; 1922 1923 map->lock(map->lock_arg); 1924 1925 ret = _regmap_write(map, reg, val); 1926 1927 map->unlock(map->lock_arg); 1928 1929 return ret; 1930 } 1931 EXPORT_SYMBOL_GPL(regmap_write); 1932 1933 /** 1934 * regmap_write_async() - Write a value to a single register asynchronously 1935 * 1936 * @map: Register map to write to 1937 * @reg: Register to write to 1938 * @val: Value to be written 1939 * 1940 * A value of zero will be returned on success, a negative errno will 1941 * be returned in error cases. 1942 */ 1943 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1944 { 1945 int ret; 1946 1947 if (!IS_ALIGNED(reg, map->reg_stride)) 1948 return -EINVAL; 1949 1950 map->lock(map->lock_arg); 1951 1952 map->async = true; 1953 1954 ret = _regmap_write(map, reg, val); 1955 1956 map->async = false; 1957 1958 map->unlock(map->lock_arg); 1959 1960 return ret; 1961 } 1962 EXPORT_SYMBOL_GPL(regmap_write_async); 1963 1964 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1965 const void *val, size_t val_len, bool noinc) 1966 { 1967 size_t val_bytes = map->format.val_bytes; 1968 size_t val_count = val_len / val_bytes; 1969 size_t chunk_count, chunk_bytes; 1970 size_t chunk_regs = val_count; 1971 int ret, i; 1972 1973 if (!val_count) 1974 return -EINVAL; 1975 1976 if (map->use_single_write) 1977 chunk_regs = 1; 1978 else if (map->max_raw_write && val_len > map->max_raw_write) 1979 chunk_regs = map->max_raw_write / val_bytes; 1980 1981 chunk_count = val_count / chunk_regs; 1982 chunk_bytes = chunk_regs * val_bytes; 1983 1984 /* Write as many bytes as possible with chunk_size */ 1985 for (i = 0; i < chunk_count; i++) { 1986 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 1987 if (ret) 1988 return ret; 1989 1990 reg += regmap_get_offset(map, chunk_regs); 1991 val += chunk_bytes; 1992 val_len -= chunk_bytes; 1993 } 1994 1995 /* Write remaining bytes */ 1996 if (val_len) 1997 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 1998 1999 return ret; 2000 } 2001 2002 /** 2003 * regmap_raw_write() - Write raw values to one or more registers 2004 * 2005 * @map: Register map to write to 2006 * @reg: Initial register to write to 2007 * @val: Block of data to be written, laid out for direct transmission to the 2008 * device 2009 * @val_len: Length of data pointed to by val. 2010 * 2011 * This function is intended to be used for things like firmware 2012 * download where a large block of data needs to be transferred to the 2013 * device. No formatting will be done on the data provided. 2014 * 2015 * A value of zero will be returned on success, a negative errno will 2016 * be returned in error cases. 2017 */ 2018 int regmap_raw_write(struct regmap *map, unsigned int reg, 2019 const void *val, size_t val_len) 2020 { 2021 int ret; 2022 2023 if (!regmap_can_raw_write(map)) 2024 return -EINVAL; 2025 if (val_len % map->format.val_bytes) 2026 return -EINVAL; 2027 2028 map->lock(map->lock_arg); 2029 2030 ret = _regmap_raw_write(map, reg, val, val_len, false); 2031 2032 map->unlock(map->lock_arg); 2033 2034 return ret; 2035 } 2036 EXPORT_SYMBOL_GPL(regmap_raw_write); 2037 2038 /** 2039 * regmap_noinc_write(): Write data from a register without incrementing the 2040 * register number 2041 * 2042 * @map: Register map to write to 2043 * @reg: Register to write to 2044 * @val: Pointer to data buffer 2045 * @val_len: Length of output buffer in bytes. 2046 * 2047 * The regmap API usually assumes that bulk bus write operations will write a 2048 * range of registers. Some devices have certain registers for which a write 2049 * operation can write to an internal FIFO. 2050 * 2051 * The target register must be volatile but registers after it can be 2052 * completely unrelated cacheable registers. 2053 * 2054 * This will attempt multiple writes as required to write val_len bytes. 2055 * 2056 * A value of zero will be returned on success, a negative errno will be 2057 * returned in error cases. 2058 */ 2059 int regmap_noinc_write(struct regmap *map, unsigned int reg, 2060 const void *val, size_t val_len) 2061 { 2062 size_t write_len; 2063 int ret; 2064 2065 if (!map->bus) 2066 return -EINVAL; 2067 if (!map->bus->write) 2068 return -ENOTSUPP; 2069 if (val_len % map->format.val_bytes) 2070 return -EINVAL; 2071 if (!IS_ALIGNED(reg, map->reg_stride)) 2072 return -EINVAL; 2073 if (val_len == 0) 2074 return -EINVAL; 2075 2076 map->lock(map->lock_arg); 2077 2078 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 2079 ret = -EINVAL; 2080 goto out_unlock; 2081 } 2082 2083 while (val_len) { 2084 if (map->max_raw_write && map->max_raw_write < val_len) 2085 write_len = map->max_raw_write; 2086 else 2087 write_len = val_len; 2088 ret = _regmap_raw_write(map, reg, val, write_len, true); 2089 if (ret) 2090 goto out_unlock; 2091 val = ((u8 *)val) + write_len; 2092 val_len -= write_len; 2093 } 2094 2095 out_unlock: 2096 map->unlock(map->lock_arg); 2097 return ret; 2098 } 2099 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2100 2101 /** 2102 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2103 * register field. 2104 * 2105 * @field: Register field to write to 2106 * @mask: Bitmask to change 2107 * @val: Value to be written 2108 * @change: Boolean indicating if a write was done 2109 * @async: Boolean indicating asynchronously 2110 * @force: Boolean indicating use force update 2111 * 2112 * Perform a read/modify/write cycle on the register field with change, 2113 * async, force option. 2114 * 2115 * A value of zero will be returned on success, a negative errno will 2116 * be returned in error cases. 2117 */ 2118 int regmap_field_update_bits_base(struct regmap_field *field, 2119 unsigned int mask, unsigned int val, 2120 bool *change, bool async, bool force) 2121 { 2122 mask = (mask << field->shift) & field->mask; 2123 2124 return regmap_update_bits_base(field->regmap, field->reg, 2125 mask, val << field->shift, 2126 change, async, force); 2127 } 2128 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2129 2130 /** 2131 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2132 * register field with port ID 2133 * 2134 * @field: Register field to write to 2135 * @id: port ID 2136 * @mask: Bitmask to change 2137 * @val: Value to be written 2138 * @change: Boolean indicating if a write was done 2139 * @async: Boolean indicating asynchronously 2140 * @force: Boolean indicating use force update 2141 * 2142 * A value of zero will be returned on success, a negative errno will 2143 * be returned in error cases. 2144 */ 2145 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2146 unsigned int mask, unsigned int val, 2147 bool *change, bool async, bool force) 2148 { 2149 if (id >= field->id_size) 2150 return -EINVAL; 2151 2152 mask = (mask << field->shift) & field->mask; 2153 2154 return regmap_update_bits_base(field->regmap, 2155 field->reg + (field->id_offset * id), 2156 mask, val << field->shift, 2157 change, async, force); 2158 } 2159 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2160 2161 /** 2162 * regmap_bulk_write() - Write multiple registers to the device 2163 * 2164 * @map: Register map to write to 2165 * @reg: First register to be write from 2166 * @val: Block of data to be written, in native register size for device 2167 * @val_count: Number of registers to write 2168 * 2169 * This function is intended to be used for writing a large block of 2170 * data to the device either in single transfer or multiple transfer. 2171 * 2172 * A value of zero will be returned on success, a negative errno will 2173 * be returned in error cases. 2174 */ 2175 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2176 size_t val_count) 2177 { 2178 int ret = 0, i; 2179 size_t val_bytes = map->format.val_bytes; 2180 2181 if (!IS_ALIGNED(reg, map->reg_stride)) 2182 return -EINVAL; 2183 2184 /* 2185 * Some devices don't support bulk write, for them we have a series of 2186 * single write operations. 2187 */ 2188 if (!map->bus || !map->format.parse_inplace) { 2189 map->lock(map->lock_arg); 2190 for (i = 0; i < val_count; i++) { 2191 unsigned int ival; 2192 2193 switch (val_bytes) { 2194 case 1: 2195 ival = *(u8 *)(val + (i * val_bytes)); 2196 break; 2197 case 2: 2198 ival = *(u16 *)(val + (i * val_bytes)); 2199 break; 2200 case 4: 2201 ival = *(u32 *)(val + (i * val_bytes)); 2202 break; 2203 #ifdef CONFIG_64BIT 2204 case 8: 2205 ival = *(u64 *)(val + (i * val_bytes)); 2206 break; 2207 #endif 2208 default: 2209 ret = -EINVAL; 2210 goto out; 2211 } 2212 2213 ret = _regmap_write(map, 2214 reg + regmap_get_offset(map, i), 2215 ival); 2216 if (ret != 0) 2217 goto out; 2218 } 2219 out: 2220 map->unlock(map->lock_arg); 2221 } else { 2222 void *wval; 2223 2224 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2225 if (!wval) 2226 return -ENOMEM; 2227 2228 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2229 map->format.parse_inplace(wval + i); 2230 2231 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2232 2233 kfree(wval); 2234 } 2235 return ret; 2236 } 2237 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2238 2239 /* 2240 * _regmap_raw_multi_reg_write() 2241 * 2242 * the (register,newvalue) pairs in regs have not been formatted, but 2243 * they are all in the same page and have been changed to being page 2244 * relative. The page register has been written if that was necessary. 2245 */ 2246 static int _regmap_raw_multi_reg_write(struct regmap *map, 2247 const struct reg_sequence *regs, 2248 size_t num_regs) 2249 { 2250 int ret; 2251 void *buf; 2252 int i; 2253 u8 *u8; 2254 size_t val_bytes = map->format.val_bytes; 2255 size_t reg_bytes = map->format.reg_bytes; 2256 size_t pad_bytes = map->format.pad_bytes; 2257 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2258 size_t len = pair_size * num_regs; 2259 2260 if (!len) 2261 return -EINVAL; 2262 2263 buf = kzalloc(len, GFP_KERNEL); 2264 if (!buf) 2265 return -ENOMEM; 2266 2267 /* We have to linearise by hand. */ 2268 2269 u8 = buf; 2270 2271 for (i = 0; i < num_regs; i++) { 2272 unsigned int reg = regs[i].reg; 2273 unsigned int val = regs[i].def; 2274 trace_regmap_hw_write_start(map, reg, 1); 2275 map->format.format_reg(u8, reg, map->reg_shift); 2276 u8 += reg_bytes + pad_bytes; 2277 map->format.format_val(u8, val, 0); 2278 u8 += val_bytes; 2279 } 2280 u8 = buf; 2281 *u8 |= map->write_flag_mask; 2282 2283 ret = map->bus->write(map->bus_context, buf, len); 2284 2285 kfree(buf); 2286 2287 for (i = 0; i < num_regs; i++) { 2288 int reg = regs[i].reg; 2289 trace_regmap_hw_write_done(map, reg, 1); 2290 } 2291 return ret; 2292 } 2293 2294 static unsigned int _regmap_register_page(struct regmap *map, 2295 unsigned int reg, 2296 struct regmap_range_node *range) 2297 { 2298 unsigned int win_page = (reg - range->range_min) / range->window_len; 2299 2300 return win_page; 2301 } 2302 2303 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2304 struct reg_sequence *regs, 2305 size_t num_regs) 2306 { 2307 int ret; 2308 int i, n; 2309 struct reg_sequence *base; 2310 unsigned int this_page = 0; 2311 unsigned int page_change = 0; 2312 /* 2313 * the set of registers are not neccessarily in order, but 2314 * since the order of write must be preserved this algorithm 2315 * chops the set each time the page changes. This also applies 2316 * if there is a delay required at any point in the sequence. 2317 */ 2318 base = regs; 2319 for (i = 0, n = 0; i < num_regs; i++, n++) { 2320 unsigned int reg = regs[i].reg; 2321 struct regmap_range_node *range; 2322 2323 range = _regmap_range_lookup(map, reg); 2324 if (range) { 2325 unsigned int win_page = _regmap_register_page(map, reg, 2326 range); 2327 2328 if (i == 0) 2329 this_page = win_page; 2330 if (win_page != this_page) { 2331 this_page = win_page; 2332 page_change = 1; 2333 } 2334 } 2335 2336 /* If we have both a page change and a delay make sure to 2337 * write the regs and apply the delay before we change the 2338 * page. 2339 */ 2340 2341 if (page_change || regs[i].delay_us) { 2342 2343 /* For situations where the first write requires 2344 * a delay we need to make sure we don't call 2345 * raw_multi_reg_write with n=0 2346 * This can't occur with page breaks as we 2347 * never write on the first iteration 2348 */ 2349 if (regs[i].delay_us && i == 0) 2350 n = 1; 2351 2352 ret = _regmap_raw_multi_reg_write(map, base, n); 2353 if (ret != 0) 2354 return ret; 2355 2356 if (regs[i].delay_us) 2357 udelay(regs[i].delay_us); 2358 2359 base += n; 2360 n = 0; 2361 2362 if (page_change) { 2363 ret = _regmap_select_page(map, 2364 &base[n].reg, 2365 range, 1); 2366 if (ret != 0) 2367 return ret; 2368 2369 page_change = 0; 2370 } 2371 2372 } 2373 2374 } 2375 if (n > 0) 2376 return _regmap_raw_multi_reg_write(map, base, n); 2377 return 0; 2378 } 2379 2380 static int _regmap_multi_reg_write(struct regmap *map, 2381 const struct reg_sequence *regs, 2382 size_t num_regs) 2383 { 2384 int i; 2385 int ret; 2386 2387 if (!map->can_multi_write) { 2388 for (i = 0; i < num_regs; i++) { 2389 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2390 if (ret != 0) 2391 return ret; 2392 2393 if (regs[i].delay_us) 2394 udelay(regs[i].delay_us); 2395 } 2396 return 0; 2397 } 2398 2399 if (!map->format.parse_inplace) 2400 return -EINVAL; 2401 2402 if (map->writeable_reg) 2403 for (i = 0; i < num_regs; i++) { 2404 int reg = regs[i].reg; 2405 if (!map->writeable_reg(map->dev, reg)) 2406 return -EINVAL; 2407 if (!IS_ALIGNED(reg, map->reg_stride)) 2408 return -EINVAL; 2409 } 2410 2411 if (!map->cache_bypass) { 2412 for (i = 0; i < num_regs; i++) { 2413 unsigned int val = regs[i].def; 2414 unsigned int reg = regs[i].reg; 2415 ret = regcache_write(map, reg, val); 2416 if (ret) { 2417 dev_err(map->dev, 2418 "Error in caching of register: %x ret: %d\n", 2419 reg, ret); 2420 return ret; 2421 } 2422 } 2423 if (map->cache_only) { 2424 map->cache_dirty = true; 2425 return 0; 2426 } 2427 } 2428 2429 WARN_ON(!map->bus); 2430 2431 for (i = 0; i < num_regs; i++) { 2432 unsigned int reg = regs[i].reg; 2433 struct regmap_range_node *range; 2434 2435 /* Coalesce all the writes between a page break or a delay 2436 * in a sequence 2437 */ 2438 range = _regmap_range_lookup(map, reg); 2439 if (range || regs[i].delay_us) { 2440 size_t len = sizeof(struct reg_sequence)*num_regs; 2441 struct reg_sequence *base = kmemdup(regs, len, 2442 GFP_KERNEL); 2443 if (!base) 2444 return -ENOMEM; 2445 ret = _regmap_range_multi_paged_reg_write(map, base, 2446 num_regs); 2447 kfree(base); 2448 2449 return ret; 2450 } 2451 } 2452 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2453 } 2454 2455 /** 2456 * regmap_multi_reg_write() - Write multiple registers to the device 2457 * 2458 * @map: Register map to write to 2459 * @regs: Array of structures containing register,value to be written 2460 * @num_regs: Number of registers to write 2461 * 2462 * Write multiple registers to the device where the set of register, value 2463 * pairs are supplied in any order, possibly not all in a single range. 2464 * 2465 * The 'normal' block write mode will send ultimately send data on the 2466 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2467 * addressed. However, this alternative block multi write mode will send 2468 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2469 * must of course support the mode. 2470 * 2471 * A value of zero will be returned on success, a negative errno will be 2472 * returned in error cases. 2473 */ 2474 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2475 int num_regs) 2476 { 2477 int ret; 2478 2479 map->lock(map->lock_arg); 2480 2481 ret = _regmap_multi_reg_write(map, regs, num_regs); 2482 2483 map->unlock(map->lock_arg); 2484 2485 return ret; 2486 } 2487 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2488 2489 /** 2490 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2491 * device but not the cache 2492 * 2493 * @map: Register map to write to 2494 * @regs: Array of structures containing register,value to be written 2495 * @num_regs: Number of registers to write 2496 * 2497 * Write multiple registers to the device but not the cache where the set 2498 * of register are supplied in any order. 2499 * 2500 * This function is intended to be used for writing a large block of data 2501 * atomically to the device in single transfer for those I2C client devices 2502 * that implement this alternative block write mode. 2503 * 2504 * A value of zero will be returned on success, a negative errno will 2505 * be returned in error cases. 2506 */ 2507 int regmap_multi_reg_write_bypassed(struct regmap *map, 2508 const struct reg_sequence *regs, 2509 int num_regs) 2510 { 2511 int ret; 2512 bool bypass; 2513 2514 map->lock(map->lock_arg); 2515 2516 bypass = map->cache_bypass; 2517 map->cache_bypass = true; 2518 2519 ret = _regmap_multi_reg_write(map, regs, num_regs); 2520 2521 map->cache_bypass = bypass; 2522 2523 map->unlock(map->lock_arg); 2524 2525 return ret; 2526 } 2527 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2528 2529 /** 2530 * regmap_raw_write_async() - Write raw values to one or more registers 2531 * asynchronously 2532 * 2533 * @map: Register map to write to 2534 * @reg: Initial register to write to 2535 * @val: Block of data to be written, laid out for direct transmission to the 2536 * device. Must be valid until regmap_async_complete() is called. 2537 * @val_len: Length of data pointed to by val. 2538 * 2539 * This function is intended to be used for things like firmware 2540 * download where a large block of data needs to be transferred to the 2541 * device. No formatting will be done on the data provided. 2542 * 2543 * If supported by the underlying bus the write will be scheduled 2544 * asynchronously, helping maximise I/O speed on higher speed buses 2545 * like SPI. regmap_async_complete() can be called to ensure that all 2546 * asynchrnous writes have been completed. 2547 * 2548 * A value of zero will be returned on success, a negative errno will 2549 * be returned in error cases. 2550 */ 2551 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2552 const void *val, size_t val_len) 2553 { 2554 int ret; 2555 2556 if (val_len % map->format.val_bytes) 2557 return -EINVAL; 2558 if (!IS_ALIGNED(reg, map->reg_stride)) 2559 return -EINVAL; 2560 2561 map->lock(map->lock_arg); 2562 2563 map->async = true; 2564 2565 ret = _regmap_raw_write(map, reg, val, val_len, false); 2566 2567 map->async = false; 2568 2569 map->unlock(map->lock_arg); 2570 2571 return ret; 2572 } 2573 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2574 2575 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2576 unsigned int val_len, bool noinc) 2577 { 2578 struct regmap_range_node *range; 2579 int ret; 2580 2581 WARN_ON(!map->bus); 2582 2583 if (!map->bus || !map->bus->read) 2584 return -EINVAL; 2585 2586 range = _regmap_range_lookup(map, reg); 2587 if (range) { 2588 ret = _regmap_select_page(map, ®, range, 2589 noinc ? 1 : val_len / map->format.val_bytes); 2590 if (ret != 0) 2591 return ret; 2592 } 2593 2594 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2595 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2596 map->read_flag_mask); 2597 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2598 2599 ret = map->bus->read(map->bus_context, map->work_buf, 2600 map->format.reg_bytes + map->format.pad_bytes, 2601 val, val_len); 2602 2603 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2604 2605 return ret; 2606 } 2607 2608 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2609 unsigned int *val) 2610 { 2611 struct regmap *map = context; 2612 2613 return map->bus->reg_read(map->bus_context, reg, val); 2614 } 2615 2616 static int _regmap_bus_read(void *context, unsigned int reg, 2617 unsigned int *val) 2618 { 2619 int ret; 2620 struct regmap *map = context; 2621 void *work_val = map->work_buf + map->format.reg_bytes + 2622 map->format.pad_bytes; 2623 2624 if (!map->format.parse_val) 2625 return -EINVAL; 2626 2627 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2628 if (ret == 0) 2629 *val = map->format.parse_val(work_val); 2630 2631 return ret; 2632 } 2633 2634 static int _regmap_read(struct regmap *map, unsigned int reg, 2635 unsigned int *val) 2636 { 2637 int ret; 2638 void *context = _regmap_map_get_context(map); 2639 2640 if (!map->cache_bypass) { 2641 ret = regcache_read(map, reg, val); 2642 if (ret == 0) 2643 return 0; 2644 } 2645 2646 if (map->cache_only) 2647 return -EBUSY; 2648 2649 if (!regmap_readable(map, reg)) 2650 return -EIO; 2651 2652 ret = map->reg_read(context, reg, val); 2653 if (ret == 0) { 2654 if (regmap_should_log(map)) 2655 dev_info(map->dev, "%x => %x\n", reg, *val); 2656 2657 trace_regmap_reg_read(map, reg, *val); 2658 2659 if (!map->cache_bypass) 2660 regcache_write(map, reg, *val); 2661 } 2662 2663 return ret; 2664 } 2665 2666 /** 2667 * regmap_read() - Read a value from a single register 2668 * 2669 * @map: Register map to read from 2670 * @reg: Register to be read from 2671 * @val: Pointer to store read value 2672 * 2673 * A value of zero will be returned on success, a negative errno will 2674 * be returned in error cases. 2675 */ 2676 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2677 { 2678 int ret; 2679 2680 if (!IS_ALIGNED(reg, map->reg_stride)) 2681 return -EINVAL; 2682 2683 map->lock(map->lock_arg); 2684 2685 ret = _regmap_read(map, reg, val); 2686 2687 map->unlock(map->lock_arg); 2688 2689 return ret; 2690 } 2691 EXPORT_SYMBOL_GPL(regmap_read); 2692 2693 /** 2694 * regmap_raw_read() - Read raw data from the device 2695 * 2696 * @map: Register map to read from 2697 * @reg: First register to be read from 2698 * @val: Pointer to store read value 2699 * @val_len: Size of data to read 2700 * 2701 * A value of zero will be returned on success, a negative errno will 2702 * be returned in error cases. 2703 */ 2704 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2705 size_t val_len) 2706 { 2707 size_t val_bytes = map->format.val_bytes; 2708 size_t val_count = val_len / val_bytes; 2709 unsigned int v; 2710 int ret, i; 2711 2712 if (!map->bus) 2713 return -EINVAL; 2714 if (val_len % map->format.val_bytes) 2715 return -EINVAL; 2716 if (!IS_ALIGNED(reg, map->reg_stride)) 2717 return -EINVAL; 2718 if (val_count == 0) 2719 return -EINVAL; 2720 2721 map->lock(map->lock_arg); 2722 2723 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2724 map->cache_type == REGCACHE_NONE) { 2725 size_t chunk_count, chunk_bytes; 2726 size_t chunk_regs = val_count; 2727 2728 if (!map->bus->read) { 2729 ret = -ENOTSUPP; 2730 goto out; 2731 } 2732 2733 if (map->use_single_read) 2734 chunk_regs = 1; 2735 else if (map->max_raw_read && val_len > map->max_raw_read) 2736 chunk_regs = map->max_raw_read / val_bytes; 2737 2738 chunk_count = val_count / chunk_regs; 2739 chunk_bytes = chunk_regs * val_bytes; 2740 2741 /* Read bytes that fit into whole chunks */ 2742 for (i = 0; i < chunk_count; i++) { 2743 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2744 if (ret != 0) 2745 goto out; 2746 2747 reg += regmap_get_offset(map, chunk_regs); 2748 val += chunk_bytes; 2749 val_len -= chunk_bytes; 2750 } 2751 2752 /* Read remaining bytes */ 2753 if (val_len) { 2754 ret = _regmap_raw_read(map, reg, val, val_len, false); 2755 if (ret != 0) 2756 goto out; 2757 } 2758 } else { 2759 /* Otherwise go word by word for the cache; should be low 2760 * cost as we expect to hit the cache. 2761 */ 2762 for (i = 0; i < val_count; i++) { 2763 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2764 &v); 2765 if (ret != 0) 2766 goto out; 2767 2768 map->format.format_val(val + (i * val_bytes), v, 0); 2769 } 2770 } 2771 2772 out: 2773 map->unlock(map->lock_arg); 2774 2775 return ret; 2776 } 2777 EXPORT_SYMBOL_GPL(regmap_raw_read); 2778 2779 /** 2780 * regmap_noinc_read(): Read data from a register without incrementing the 2781 * register number 2782 * 2783 * @map: Register map to read from 2784 * @reg: Register to read from 2785 * @val: Pointer to data buffer 2786 * @val_len: Length of output buffer in bytes. 2787 * 2788 * The regmap API usually assumes that bulk bus read operations will read a 2789 * range of registers. Some devices have certain registers for which a read 2790 * operation read will read from an internal FIFO. 2791 * 2792 * The target register must be volatile but registers after it can be 2793 * completely unrelated cacheable registers. 2794 * 2795 * This will attempt multiple reads as required to read val_len bytes. 2796 * 2797 * A value of zero will be returned on success, a negative errno will be 2798 * returned in error cases. 2799 */ 2800 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2801 void *val, size_t val_len) 2802 { 2803 size_t read_len; 2804 int ret; 2805 2806 if (!map->bus) 2807 return -EINVAL; 2808 if (!map->bus->read) 2809 return -ENOTSUPP; 2810 if (val_len % map->format.val_bytes) 2811 return -EINVAL; 2812 if (!IS_ALIGNED(reg, map->reg_stride)) 2813 return -EINVAL; 2814 if (val_len == 0) 2815 return -EINVAL; 2816 2817 map->lock(map->lock_arg); 2818 2819 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 2820 ret = -EINVAL; 2821 goto out_unlock; 2822 } 2823 2824 while (val_len) { 2825 if (map->max_raw_read && map->max_raw_read < val_len) 2826 read_len = map->max_raw_read; 2827 else 2828 read_len = val_len; 2829 ret = _regmap_raw_read(map, reg, val, read_len, true); 2830 if (ret) 2831 goto out_unlock; 2832 val = ((u8 *)val) + read_len; 2833 val_len -= read_len; 2834 } 2835 2836 out_unlock: 2837 map->unlock(map->lock_arg); 2838 return ret; 2839 } 2840 EXPORT_SYMBOL_GPL(regmap_noinc_read); 2841 2842 /** 2843 * regmap_field_read(): Read a value to a single register field 2844 * 2845 * @field: Register field to read from 2846 * @val: Pointer to store read value 2847 * 2848 * A value of zero will be returned on success, a negative errno will 2849 * be returned in error cases. 2850 */ 2851 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2852 { 2853 int ret; 2854 unsigned int reg_val; 2855 ret = regmap_read(field->regmap, field->reg, ®_val); 2856 if (ret != 0) 2857 return ret; 2858 2859 reg_val &= field->mask; 2860 reg_val >>= field->shift; 2861 *val = reg_val; 2862 2863 return ret; 2864 } 2865 EXPORT_SYMBOL_GPL(regmap_field_read); 2866 2867 /** 2868 * regmap_fields_read() - Read a value to a single register field with port ID 2869 * 2870 * @field: Register field to read from 2871 * @id: port ID 2872 * @val: Pointer to store read value 2873 * 2874 * A value of zero will be returned on success, a negative errno will 2875 * be returned in error cases. 2876 */ 2877 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2878 unsigned int *val) 2879 { 2880 int ret; 2881 unsigned int reg_val; 2882 2883 if (id >= field->id_size) 2884 return -EINVAL; 2885 2886 ret = regmap_read(field->regmap, 2887 field->reg + (field->id_offset * id), 2888 ®_val); 2889 if (ret != 0) 2890 return ret; 2891 2892 reg_val &= field->mask; 2893 reg_val >>= field->shift; 2894 *val = reg_val; 2895 2896 return ret; 2897 } 2898 EXPORT_SYMBOL_GPL(regmap_fields_read); 2899 2900 /** 2901 * regmap_bulk_read() - Read multiple registers from the device 2902 * 2903 * @map: Register map to read from 2904 * @reg: First register to be read from 2905 * @val: Pointer to store read value, in native register size for device 2906 * @val_count: Number of registers to read 2907 * 2908 * A value of zero will be returned on success, a negative errno will 2909 * be returned in error cases. 2910 */ 2911 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2912 size_t val_count) 2913 { 2914 int ret, i; 2915 size_t val_bytes = map->format.val_bytes; 2916 bool vol = regmap_volatile_range(map, reg, val_count); 2917 2918 if (!IS_ALIGNED(reg, map->reg_stride)) 2919 return -EINVAL; 2920 if (val_count == 0) 2921 return -EINVAL; 2922 2923 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2924 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2925 if (ret != 0) 2926 return ret; 2927 2928 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2929 map->format.parse_inplace(val + i); 2930 } else { 2931 #ifdef CONFIG_64BIT 2932 u64 *u64 = val; 2933 #endif 2934 u32 *u32 = val; 2935 u16 *u16 = val; 2936 u8 *u8 = val; 2937 2938 map->lock(map->lock_arg); 2939 2940 for (i = 0; i < val_count; i++) { 2941 unsigned int ival; 2942 2943 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2944 &ival); 2945 if (ret != 0) 2946 goto out; 2947 2948 switch (map->format.val_bytes) { 2949 #ifdef CONFIG_64BIT 2950 case 8: 2951 u64[i] = ival; 2952 break; 2953 #endif 2954 case 4: 2955 u32[i] = ival; 2956 break; 2957 case 2: 2958 u16[i] = ival; 2959 break; 2960 case 1: 2961 u8[i] = ival; 2962 break; 2963 default: 2964 ret = -EINVAL; 2965 goto out; 2966 } 2967 } 2968 2969 out: 2970 map->unlock(map->lock_arg); 2971 } 2972 2973 return ret; 2974 } 2975 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2976 2977 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2978 unsigned int mask, unsigned int val, 2979 bool *change, bool force_write) 2980 { 2981 int ret; 2982 unsigned int tmp, orig; 2983 2984 if (change) 2985 *change = false; 2986 2987 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2988 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2989 if (ret == 0 && change) 2990 *change = true; 2991 } else { 2992 ret = _regmap_read(map, reg, &orig); 2993 if (ret != 0) 2994 return ret; 2995 2996 tmp = orig & ~mask; 2997 tmp |= val & mask; 2998 2999 if (force_write || (tmp != orig)) { 3000 ret = _regmap_write(map, reg, tmp); 3001 if (ret == 0 && change) 3002 *change = true; 3003 } 3004 } 3005 3006 return ret; 3007 } 3008 3009 /** 3010 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 3011 * 3012 * @map: Register map to update 3013 * @reg: Register to update 3014 * @mask: Bitmask to change 3015 * @val: New value for bitmask 3016 * @change: Boolean indicating if a write was done 3017 * @async: Boolean indicating asynchronously 3018 * @force: Boolean indicating use force update 3019 * 3020 * Perform a read/modify/write cycle on a register map with change, async, force 3021 * options. 3022 * 3023 * If async is true: 3024 * 3025 * With most buses the read must be done synchronously so this is most useful 3026 * for devices with a cache which do not need to interact with the hardware to 3027 * determine the current register value. 3028 * 3029 * Returns zero for success, a negative number on error. 3030 */ 3031 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 3032 unsigned int mask, unsigned int val, 3033 bool *change, bool async, bool force) 3034 { 3035 int ret; 3036 3037 map->lock(map->lock_arg); 3038 3039 map->async = async; 3040 3041 ret = _regmap_update_bits(map, reg, mask, val, change, force); 3042 3043 map->async = false; 3044 3045 map->unlock(map->lock_arg); 3046 3047 return ret; 3048 } 3049 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 3050 3051 /** 3052 * regmap_test_bits() - Check if all specified bits are set in a register. 3053 * 3054 * @map: Register map to operate on 3055 * @reg: Register to read from 3056 * @bits: Bits to test 3057 * 3058 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 3059 * bits are set and a negative error number if the underlying regmap_read() 3060 * fails. 3061 */ 3062 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 3063 { 3064 unsigned int val, ret; 3065 3066 ret = regmap_read(map, reg, &val); 3067 if (ret) 3068 return ret; 3069 3070 return (val & bits) == bits; 3071 } 3072 EXPORT_SYMBOL_GPL(regmap_test_bits); 3073 3074 void regmap_async_complete_cb(struct regmap_async *async, int ret) 3075 { 3076 struct regmap *map = async->map; 3077 bool wake; 3078 3079 trace_regmap_async_io_complete(map); 3080 3081 spin_lock(&map->async_lock); 3082 list_move(&async->list, &map->async_free); 3083 wake = list_empty(&map->async_list); 3084 3085 if (ret != 0) 3086 map->async_ret = ret; 3087 3088 spin_unlock(&map->async_lock); 3089 3090 if (wake) 3091 wake_up(&map->async_waitq); 3092 } 3093 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 3094 3095 static int regmap_async_is_done(struct regmap *map) 3096 { 3097 unsigned long flags; 3098 int ret; 3099 3100 spin_lock_irqsave(&map->async_lock, flags); 3101 ret = list_empty(&map->async_list); 3102 spin_unlock_irqrestore(&map->async_lock, flags); 3103 3104 return ret; 3105 } 3106 3107 /** 3108 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3109 * 3110 * @map: Map to operate on. 3111 * 3112 * Blocks until any pending asynchronous I/O has completed. Returns 3113 * an error code for any failed I/O operations. 3114 */ 3115 int regmap_async_complete(struct regmap *map) 3116 { 3117 unsigned long flags; 3118 int ret; 3119 3120 /* Nothing to do with no async support */ 3121 if (!map->bus || !map->bus->async_write) 3122 return 0; 3123 3124 trace_regmap_async_complete_start(map); 3125 3126 wait_event(map->async_waitq, regmap_async_is_done(map)); 3127 3128 spin_lock_irqsave(&map->async_lock, flags); 3129 ret = map->async_ret; 3130 map->async_ret = 0; 3131 spin_unlock_irqrestore(&map->async_lock, flags); 3132 3133 trace_regmap_async_complete_done(map); 3134 3135 return ret; 3136 } 3137 EXPORT_SYMBOL_GPL(regmap_async_complete); 3138 3139 /** 3140 * regmap_register_patch - Register and apply register updates to be applied 3141 * on device initialistion 3142 * 3143 * @map: Register map to apply updates to. 3144 * @regs: Values to update. 3145 * @num_regs: Number of entries in regs. 3146 * 3147 * Register a set of register updates to be applied to the device 3148 * whenever the device registers are synchronised with the cache and 3149 * apply them immediately. Typically this is used to apply 3150 * corrections to be applied to the device defaults on startup, such 3151 * as the updates some vendors provide to undocumented registers. 3152 * 3153 * The caller must ensure that this function cannot be called 3154 * concurrently with either itself or regcache_sync(). 3155 */ 3156 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3157 int num_regs) 3158 { 3159 struct reg_sequence *p; 3160 int ret; 3161 bool bypass; 3162 3163 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3164 num_regs)) 3165 return 0; 3166 3167 p = krealloc(map->patch, 3168 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3169 GFP_KERNEL); 3170 if (p) { 3171 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3172 map->patch = p; 3173 map->patch_regs += num_regs; 3174 } else { 3175 return -ENOMEM; 3176 } 3177 3178 map->lock(map->lock_arg); 3179 3180 bypass = map->cache_bypass; 3181 3182 map->cache_bypass = true; 3183 map->async = true; 3184 3185 ret = _regmap_multi_reg_write(map, regs, num_regs); 3186 3187 map->async = false; 3188 map->cache_bypass = bypass; 3189 3190 map->unlock(map->lock_arg); 3191 3192 regmap_async_complete(map); 3193 3194 return ret; 3195 } 3196 EXPORT_SYMBOL_GPL(regmap_register_patch); 3197 3198 /** 3199 * regmap_get_val_bytes() - Report the size of a register value 3200 * 3201 * @map: Register map to operate on. 3202 * 3203 * Report the size of a register value, mainly intended to for use by 3204 * generic infrastructure built on top of regmap. 3205 */ 3206 int regmap_get_val_bytes(struct regmap *map) 3207 { 3208 if (map->format.format_write) 3209 return -EINVAL; 3210 3211 return map->format.val_bytes; 3212 } 3213 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3214 3215 /** 3216 * regmap_get_max_register() - Report the max register value 3217 * 3218 * @map: Register map to operate on. 3219 * 3220 * Report the max register value, mainly intended to for use by 3221 * generic infrastructure built on top of regmap. 3222 */ 3223 int regmap_get_max_register(struct regmap *map) 3224 { 3225 return map->max_register ? map->max_register : -EINVAL; 3226 } 3227 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3228 3229 /** 3230 * regmap_get_reg_stride() - Report the register address stride 3231 * 3232 * @map: Register map to operate on. 3233 * 3234 * Report the register address stride, mainly intended to for use by 3235 * generic infrastructure built on top of regmap. 3236 */ 3237 int regmap_get_reg_stride(struct regmap *map) 3238 { 3239 return map->reg_stride; 3240 } 3241 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3242 3243 int regmap_parse_val(struct regmap *map, const void *buf, 3244 unsigned int *val) 3245 { 3246 if (!map->format.parse_val) 3247 return -EINVAL; 3248 3249 *val = map->format.parse_val(buf); 3250 3251 return 0; 3252 } 3253 EXPORT_SYMBOL_GPL(regmap_parse_val); 3254 3255 static int __init regmap_initcall(void) 3256 { 3257 regmap_debugfs_initcall(); 3258 3259 return 0; 3260 } 3261 postcore_initcall(regmap_initcall); 3262