1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <linux/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register_is_set && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register_is_set && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register_is_set && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_12_20_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 out[0] = reg >> 4; 218 out[1] = (reg << 4) | (val >> 16); 219 out[2] = val >> 8; 220 out[3] = val; 221 } 222 223 224 static void regmap_format_2_6_write(struct regmap *map, 225 unsigned int reg, unsigned int val) 226 { 227 u8 *out = map->work_buf; 228 229 *out = (reg << 6) | val; 230 } 231 232 static void regmap_format_4_12_write(struct regmap *map, 233 unsigned int reg, unsigned int val) 234 { 235 __be16 *out = map->work_buf; 236 *out = cpu_to_be16((reg << 12) | val); 237 } 238 239 static void regmap_format_7_9_write(struct regmap *map, 240 unsigned int reg, unsigned int val) 241 { 242 __be16 *out = map->work_buf; 243 *out = cpu_to_be16((reg << 9) | val); 244 } 245 246 static void regmap_format_7_17_write(struct regmap *map, 247 unsigned int reg, unsigned int val) 248 { 249 u8 *out = map->work_buf; 250 251 out[2] = val; 252 out[1] = val >> 8; 253 out[0] = (val >> 16) | (reg << 1); 254 } 255 256 static void regmap_format_10_14_write(struct regmap *map, 257 unsigned int reg, unsigned int val) 258 { 259 u8 *out = map->work_buf; 260 261 out[2] = val; 262 out[1] = (val >> 8) | (reg << 6); 263 out[0] = reg >> 2; 264 } 265 266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 267 { 268 u8 *b = buf; 269 270 b[0] = val << shift; 271 } 272 273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 274 { 275 put_unaligned_be16(val << shift, buf); 276 } 277 278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 279 { 280 put_unaligned_le16(val << shift, buf); 281 } 282 283 static void regmap_format_16_native(void *buf, unsigned int val, 284 unsigned int shift) 285 { 286 u16 v = val << shift; 287 288 memcpy(buf, &v, sizeof(v)); 289 } 290 291 static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift) 292 { 293 put_unaligned_be24(val << shift, buf); 294 } 295 296 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 297 { 298 put_unaligned_be32(val << shift, buf); 299 } 300 301 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 302 { 303 put_unaligned_le32(val << shift, buf); 304 } 305 306 static void regmap_format_32_native(void *buf, unsigned int val, 307 unsigned int shift) 308 { 309 u32 v = val << shift; 310 311 memcpy(buf, &v, sizeof(v)); 312 } 313 314 static void regmap_parse_inplace_noop(void *buf) 315 { 316 } 317 318 static unsigned int regmap_parse_8(const void *buf) 319 { 320 const u8 *b = buf; 321 322 return b[0]; 323 } 324 325 static unsigned int regmap_parse_16_be(const void *buf) 326 { 327 return get_unaligned_be16(buf); 328 } 329 330 static unsigned int regmap_parse_16_le(const void *buf) 331 { 332 return get_unaligned_le16(buf); 333 } 334 335 static void regmap_parse_16_be_inplace(void *buf) 336 { 337 u16 v = get_unaligned_be16(buf); 338 339 memcpy(buf, &v, sizeof(v)); 340 } 341 342 static void regmap_parse_16_le_inplace(void *buf) 343 { 344 u16 v = get_unaligned_le16(buf); 345 346 memcpy(buf, &v, sizeof(v)); 347 } 348 349 static unsigned int regmap_parse_16_native(const void *buf) 350 { 351 u16 v; 352 353 memcpy(&v, buf, sizeof(v)); 354 return v; 355 } 356 357 static unsigned int regmap_parse_24_be(const void *buf) 358 { 359 return get_unaligned_be24(buf); 360 } 361 362 static unsigned int regmap_parse_32_be(const void *buf) 363 { 364 return get_unaligned_be32(buf); 365 } 366 367 static unsigned int regmap_parse_32_le(const void *buf) 368 { 369 return get_unaligned_le32(buf); 370 } 371 372 static void regmap_parse_32_be_inplace(void *buf) 373 { 374 u32 v = get_unaligned_be32(buf); 375 376 memcpy(buf, &v, sizeof(v)); 377 } 378 379 static void regmap_parse_32_le_inplace(void *buf) 380 { 381 u32 v = get_unaligned_le32(buf); 382 383 memcpy(buf, &v, sizeof(v)); 384 } 385 386 static unsigned int regmap_parse_32_native(const void *buf) 387 { 388 u32 v; 389 390 memcpy(&v, buf, sizeof(v)); 391 return v; 392 } 393 394 static void regmap_lock_hwlock(void *__map) 395 { 396 struct regmap *map = __map; 397 398 hwspin_lock_timeout(map->hwlock, UINT_MAX); 399 } 400 401 static void regmap_lock_hwlock_irq(void *__map) 402 { 403 struct regmap *map = __map; 404 405 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 406 } 407 408 static void regmap_lock_hwlock_irqsave(void *__map) 409 { 410 struct regmap *map = __map; 411 412 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 413 &map->spinlock_flags); 414 } 415 416 static void regmap_unlock_hwlock(void *__map) 417 { 418 struct regmap *map = __map; 419 420 hwspin_unlock(map->hwlock); 421 } 422 423 static void regmap_unlock_hwlock_irq(void *__map) 424 { 425 struct regmap *map = __map; 426 427 hwspin_unlock_irq(map->hwlock); 428 } 429 430 static void regmap_unlock_hwlock_irqrestore(void *__map) 431 { 432 struct regmap *map = __map; 433 434 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 435 } 436 437 static void regmap_lock_unlock_none(void *__map) 438 { 439 440 } 441 442 static void regmap_lock_mutex(void *__map) 443 { 444 struct regmap *map = __map; 445 mutex_lock(&map->mutex); 446 } 447 448 static void regmap_unlock_mutex(void *__map) 449 { 450 struct regmap *map = __map; 451 mutex_unlock(&map->mutex); 452 } 453 454 static void regmap_lock_spinlock(void *__map) 455 __acquires(&map->spinlock) 456 { 457 struct regmap *map = __map; 458 unsigned long flags; 459 460 spin_lock_irqsave(&map->spinlock, flags); 461 map->spinlock_flags = flags; 462 } 463 464 static void regmap_unlock_spinlock(void *__map) 465 __releases(&map->spinlock) 466 { 467 struct regmap *map = __map; 468 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 469 } 470 471 static void regmap_lock_raw_spinlock(void *__map) 472 __acquires(&map->raw_spinlock) 473 { 474 struct regmap *map = __map; 475 unsigned long flags; 476 477 raw_spin_lock_irqsave(&map->raw_spinlock, flags); 478 map->raw_spinlock_flags = flags; 479 } 480 481 static void regmap_unlock_raw_spinlock(void *__map) 482 __releases(&map->raw_spinlock) 483 { 484 struct regmap *map = __map; 485 raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags); 486 } 487 488 static void dev_get_regmap_release(struct device *dev, void *res) 489 { 490 /* 491 * We don't actually have anything to do here; the goal here 492 * is not to manage the regmap but to provide a simple way to 493 * get the regmap back given a struct device. 494 */ 495 } 496 497 static bool _regmap_range_add(struct regmap *map, 498 struct regmap_range_node *data) 499 { 500 struct rb_root *root = &map->range_tree; 501 struct rb_node **new = &(root->rb_node), *parent = NULL; 502 503 while (*new) { 504 struct regmap_range_node *this = 505 rb_entry(*new, struct regmap_range_node, node); 506 507 parent = *new; 508 if (data->range_max < this->range_min) 509 new = &((*new)->rb_left); 510 else if (data->range_min > this->range_max) 511 new = &((*new)->rb_right); 512 else 513 return false; 514 } 515 516 rb_link_node(&data->node, parent, new); 517 rb_insert_color(&data->node, root); 518 519 return true; 520 } 521 522 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 523 unsigned int reg) 524 { 525 struct rb_node *node = map->range_tree.rb_node; 526 527 while (node) { 528 struct regmap_range_node *this = 529 rb_entry(node, struct regmap_range_node, node); 530 531 if (reg < this->range_min) 532 node = node->rb_left; 533 else if (reg > this->range_max) 534 node = node->rb_right; 535 else 536 return this; 537 } 538 539 return NULL; 540 } 541 542 static void regmap_range_exit(struct regmap *map) 543 { 544 struct rb_node *next; 545 struct regmap_range_node *range_node; 546 547 next = rb_first(&map->range_tree); 548 while (next) { 549 range_node = rb_entry(next, struct regmap_range_node, node); 550 next = rb_next(&range_node->node); 551 rb_erase(&range_node->node, &map->range_tree); 552 kfree(range_node); 553 } 554 555 kfree(map->selector_work_buf); 556 } 557 558 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 559 { 560 if (config->name) { 561 const char *name = kstrdup_const(config->name, GFP_KERNEL); 562 563 if (!name) 564 return -ENOMEM; 565 566 kfree_const(map->name); 567 map->name = name; 568 } 569 570 return 0; 571 } 572 573 int regmap_attach_dev(struct device *dev, struct regmap *map, 574 const struct regmap_config *config) 575 { 576 struct regmap **m; 577 int ret; 578 579 map->dev = dev; 580 581 ret = regmap_set_name(map, config); 582 if (ret) 583 return ret; 584 585 regmap_debugfs_exit(map); 586 regmap_debugfs_init(map); 587 588 /* Add a devres resource for dev_get_regmap() */ 589 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 590 if (!m) { 591 regmap_debugfs_exit(map); 592 return -ENOMEM; 593 } 594 *m = map; 595 devres_add(dev, m); 596 597 return 0; 598 } 599 EXPORT_SYMBOL_GPL(regmap_attach_dev); 600 601 static int dev_get_regmap_match(struct device *dev, void *res, void *data); 602 603 static int regmap_detach_dev(struct device *dev, struct regmap *map) 604 { 605 if (!dev) 606 return 0; 607 608 return devres_release(dev, dev_get_regmap_release, 609 dev_get_regmap_match, (void *)map->name); 610 } 611 612 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 613 const struct regmap_config *config) 614 { 615 enum regmap_endian endian; 616 617 /* Retrieve the endianness specification from the regmap config */ 618 endian = config->reg_format_endian; 619 620 /* If the regmap config specified a non-default value, use that */ 621 if (endian != REGMAP_ENDIAN_DEFAULT) 622 return endian; 623 624 /* Retrieve the endianness specification from the bus config */ 625 if (bus && bus->reg_format_endian_default) 626 endian = bus->reg_format_endian_default; 627 628 /* If the bus specified a non-default value, use that */ 629 if (endian != REGMAP_ENDIAN_DEFAULT) 630 return endian; 631 632 /* Use this if no other value was found */ 633 return REGMAP_ENDIAN_BIG; 634 } 635 636 enum regmap_endian regmap_get_val_endian(struct device *dev, 637 const struct regmap_bus *bus, 638 const struct regmap_config *config) 639 { 640 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 641 enum regmap_endian endian; 642 643 /* Retrieve the endianness specification from the regmap config */ 644 endian = config->val_format_endian; 645 646 /* If the regmap config specified a non-default value, use that */ 647 if (endian != REGMAP_ENDIAN_DEFAULT) 648 return endian; 649 650 /* If the firmware node exist try to get endianness from it */ 651 if (fwnode_property_read_bool(fwnode, "big-endian")) 652 endian = REGMAP_ENDIAN_BIG; 653 else if (fwnode_property_read_bool(fwnode, "little-endian")) 654 endian = REGMAP_ENDIAN_LITTLE; 655 else if (fwnode_property_read_bool(fwnode, "native-endian")) 656 endian = REGMAP_ENDIAN_NATIVE; 657 658 /* If the endianness was specified in fwnode, use that */ 659 if (endian != REGMAP_ENDIAN_DEFAULT) 660 return endian; 661 662 /* Retrieve the endianness specification from the bus config */ 663 if (bus && bus->val_format_endian_default) 664 endian = bus->val_format_endian_default; 665 666 /* If the bus specified a non-default value, use that */ 667 if (endian != REGMAP_ENDIAN_DEFAULT) 668 return endian; 669 670 /* Use this if no other value was found */ 671 return REGMAP_ENDIAN_BIG; 672 } 673 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 674 675 struct regmap *__regmap_init(struct device *dev, 676 const struct regmap_bus *bus, 677 void *bus_context, 678 const struct regmap_config *config, 679 struct lock_class_key *lock_key, 680 const char *lock_name) 681 { 682 struct regmap *map; 683 int ret = -EINVAL; 684 enum regmap_endian reg_endian, val_endian; 685 int i, j; 686 687 if (!config) 688 goto err; 689 690 map = kzalloc(sizeof(*map), GFP_KERNEL); 691 if (map == NULL) { 692 ret = -ENOMEM; 693 goto err; 694 } 695 696 ret = regmap_set_name(map, config); 697 if (ret) 698 goto err_map; 699 700 ret = -EINVAL; /* Later error paths rely on this */ 701 702 if (config->disable_locking) { 703 map->lock = map->unlock = regmap_lock_unlock_none; 704 map->can_sleep = config->can_sleep; 705 regmap_debugfs_disable(map); 706 } else if (config->lock && config->unlock) { 707 map->lock = config->lock; 708 map->unlock = config->unlock; 709 map->lock_arg = config->lock_arg; 710 map->can_sleep = config->can_sleep; 711 } else if (config->use_hwlock) { 712 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 713 if (!map->hwlock) { 714 ret = -ENXIO; 715 goto err_name; 716 } 717 718 switch (config->hwlock_mode) { 719 case HWLOCK_IRQSTATE: 720 map->lock = regmap_lock_hwlock_irqsave; 721 map->unlock = regmap_unlock_hwlock_irqrestore; 722 break; 723 case HWLOCK_IRQ: 724 map->lock = regmap_lock_hwlock_irq; 725 map->unlock = regmap_unlock_hwlock_irq; 726 break; 727 default: 728 map->lock = regmap_lock_hwlock; 729 map->unlock = regmap_unlock_hwlock; 730 break; 731 } 732 733 map->lock_arg = map; 734 } else { 735 if ((bus && bus->fast_io) || 736 config->fast_io) { 737 if (config->use_raw_spinlock) { 738 raw_spin_lock_init(&map->raw_spinlock); 739 map->lock = regmap_lock_raw_spinlock; 740 map->unlock = regmap_unlock_raw_spinlock; 741 lockdep_set_class_and_name(&map->raw_spinlock, 742 lock_key, lock_name); 743 } else { 744 spin_lock_init(&map->spinlock); 745 map->lock = regmap_lock_spinlock; 746 map->unlock = regmap_unlock_spinlock; 747 lockdep_set_class_and_name(&map->spinlock, 748 lock_key, lock_name); 749 } 750 } else { 751 mutex_init(&map->mutex); 752 map->lock = regmap_lock_mutex; 753 map->unlock = regmap_unlock_mutex; 754 map->can_sleep = true; 755 lockdep_set_class_and_name(&map->mutex, 756 lock_key, lock_name); 757 } 758 map->lock_arg = map; 759 map->lock_key = lock_key; 760 } 761 762 /* 763 * When we write in fast-paths with regmap_bulk_write() don't allocate 764 * scratch buffers with sleeping allocations. 765 */ 766 if ((bus && bus->fast_io) || config->fast_io) 767 map->alloc_flags = GFP_ATOMIC; 768 else 769 map->alloc_flags = GFP_KERNEL; 770 771 map->reg_base = config->reg_base; 772 773 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 774 map->format.pad_bytes = config->pad_bits / 8; 775 map->format.reg_shift = config->reg_shift; 776 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 777 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 778 config->val_bits + config->pad_bits, 8); 779 map->reg_shift = config->pad_bits % 8; 780 if (config->reg_stride) 781 map->reg_stride = config->reg_stride; 782 else 783 map->reg_stride = 1; 784 if (is_power_of_2(map->reg_stride)) 785 map->reg_stride_order = ilog2(map->reg_stride); 786 else 787 map->reg_stride_order = -1; 788 map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read)); 789 map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write)); 790 map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write)); 791 if (bus) { 792 map->max_raw_read = bus->max_raw_read; 793 map->max_raw_write = bus->max_raw_write; 794 } else if (config->max_raw_read && config->max_raw_write) { 795 map->max_raw_read = config->max_raw_read; 796 map->max_raw_write = config->max_raw_write; 797 } 798 map->dev = dev; 799 map->bus = bus; 800 map->bus_context = bus_context; 801 map->max_register = config->max_register; 802 map->max_register_is_set = map->max_register ?: config->max_register_is_0; 803 map->wr_table = config->wr_table; 804 map->rd_table = config->rd_table; 805 map->volatile_table = config->volatile_table; 806 map->precious_table = config->precious_table; 807 map->wr_noinc_table = config->wr_noinc_table; 808 map->rd_noinc_table = config->rd_noinc_table; 809 map->writeable_reg = config->writeable_reg; 810 map->readable_reg = config->readable_reg; 811 map->volatile_reg = config->volatile_reg; 812 map->precious_reg = config->precious_reg; 813 map->writeable_noinc_reg = config->writeable_noinc_reg; 814 map->readable_noinc_reg = config->readable_noinc_reg; 815 map->cache_type = config->cache_type; 816 817 spin_lock_init(&map->async_lock); 818 INIT_LIST_HEAD(&map->async_list); 819 INIT_LIST_HEAD(&map->async_free); 820 init_waitqueue_head(&map->async_waitq); 821 822 if (config->read_flag_mask || 823 config->write_flag_mask || 824 config->zero_flag_mask) { 825 map->read_flag_mask = config->read_flag_mask; 826 map->write_flag_mask = config->write_flag_mask; 827 } else if (bus) { 828 map->read_flag_mask = bus->read_flag_mask; 829 } 830 831 if (config && config->read && config->write) { 832 map->reg_read = _regmap_bus_read; 833 if (config->reg_update_bits) 834 map->reg_update_bits = config->reg_update_bits; 835 836 /* Bulk read/write */ 837 map->read = config->read; 838 map->write = config->write; 839 840 reg_endian = REGMAP_ENDIAN_NATIVE; 841 val_endian = REGMAP_ENDIAN_NATIVE; 842 } else if (!bus) { 843 map->reg_read = config->reg_read; 844 map->reg_write = config->reg_write; 845 map->reg_update_bits = config->reg_update_bits; 846 847 map->defer_caching = false; 848 goto skip_format_initialization; 849 } else if (!bus->read || !bus->write) { 850 map->reg_read = _regmap_bus_reg_read; 851 map->reg_write = _regmap_bus_reg_write; 852 map->reg_update_bits = bus->reg_update_bits; 853 854 map->defer_caching = false; 855 goto skip_format_initialization; 856 } else { 857 map->reg_read = _regmap_bus_read; 858 map->reg_update_bits = bus->reg_update_bits; 859 /* Bulk read/write */ 860 map->read = bus->read; 861 map->write = bus->write; 862 863 reg_endian = regmap_get_reg_endian(bus, config); 864 val_endian = regmap_get_val_endian(dev, bus, config); 865 } 866 867 switch (config->reg_bits + map->reg_shift) { 868 case 2: 869 switch (config->val_bits) { 870 case 6: 871 map->format.format_write = regmap_format_2_6_write; 872 break; 873 default: 874 goto err_hwlock; 875 } 876 break; 877 878 case 4: 879 switch (config->val_bits) { 880 case 12: 881 map->format.format_write = regmap_format_4_12_write; 882 break; 883 default: 884 goto err_hwlock; 885 } 886 break; 887 888 case 7: 889 switch (config->val_bits) { 890 case 9: 891 map->format.format_write = regmap_format_7_9_write; 892 break; 893 case 17: 894 map->format.format_write = regmap_format_7_17_write; 895 break; 896 default: 897 goto err_hwlock; 898 } 899 break; 900 901 case 10: 902 switch (config->val_bits) { 903 case 14: 904 map->format.format_write = regmap_format_10_14_write; 905 break; 906 default: 907 goto err_hwlock; 908 } 909 break; 910 911 case 12: 912 switch (config->val_bits) { 913 case 20: 914 map->format.format_write = regmap_format_12_20_write; 915 break; 916 default: 917 goto err_hwlock; 918 } 919 break; 920 921 case 8: 922 map->format.format_reg = regmap_format_8; 923 break; 924 925 case 16: 926 switch (reg_endian) { 927 case REGMAP_ENDIAN_BIG: 928 map->format.format_reg = regmap_format_16_be; 929 break; 930 case REGMAP_ENDIAN_LITTLE: 931 map->format.format_reg = regmap_format_16_le; 932 break; 933 case REGMAP_ENDIAN_NATIVE: 934 map->format.format_reg = regmap_format_16_native; 935 break; 936 default: 937 goto err_hwlock; 938 } 939 break; 940 941 case 24: 942 switch (reg_endian) { 943 case REGMAP_ENDIAN_BIG: 944 map->format.format_reg = regmap_format_24_be; 945 break; 946 default: 947 goto err_hwlock; 948 } 949 break; 950 951 case 32: 952 switch (reg_endian) { 953 case REGMAP_ENDIAN_BIG: 954 map->format.format_reg = regmap_format_32_be; 955 break; 956 case REGMAP_ENDIAN_LITTLE: 957 map->format.format_reg = regmap_format_32_le; 958 break; 959 case REGMAP_ENDIAN_NATIVE: 960 map->format.format_reg = regmap_format_32_native; 961 break; 962 default: 963 goto err_hwlock; 964 } 965 break; 966 967 default: 968 goto err_hwlock; 969 } 970 971 if (val_endian == REGMAP_ENDIAN_NATIVE) 972 map->format.parse_inplace = regmap_parse_inplace_noop; 973 974 switch (config->val_bits) { 975 case 8: 976 map->format.format_val = regmap_format_8; 977 map->format.parse_val = regmap_parse_8; 978 map->format.parse_inplace = regmap_parse_inplace_noop; 979 break; 980 case 16: 981 switch (val_endian) { 982 case REGMAP_ENDIAN_BIG: 983 map->format.format_val = regmap_format_16_be; 984 map->format.parse_val = regmap_parse_16_be; 985 map->format.parse_inplace = regmap_parse_16_be_inplace; 986 break; 987 case REGMAP_ENDIAN_LITTLE: 988 map->format.format_val = regmap_format_16_le; 989 map->format.parse_val = regmap_parse_16_le; 990 map->format.parse_inplace = regmap_parse_16_le_inplace; 991 break; 992 case REGMAP_ENDIAN_NATIVE: 993 map->format.format_val = regmap_format_16_native; 994 map->format.parse_val = regmap_parse_16_native; 995 break; 996 default: 997 goto err_hwlock; 998 } 999 break; 1000 case 24: 1001 switch (val_endian) { 1002 case REGMAP_ENDIAN_BIG: 1003 map->format.format_val = regmap_format_24_be; 1004 map->format.parse_val = regmap_parse_24_be; 1005 break; 1006 default: 1007 goto err_hwlock; 1008 } 1009 break; 1010 case 32: 1011 switch (val_endian) { 1012 case REGMAP_ENDIAN_BIG: 1013 map->format.format_val = regmap_format_32_be; 1014 map->format.parse_val = regmap_parse_32_be; 1015 map->format.parse_inplace = regmap_parse_32_be_inplace; 1016 break; 1017 case REGMAP_ENDIAN_LITTLE: 1018 map->format.format_val = regmap_format_32_le; 1019 map->format.parse_val = regmap_parse_32_le; 1020 map->format.parse_inplace = regmap_parse_32_le_inplace; 1021 break; 1022 case REGMAP_ENDIAN_NATIVE: 1023 map->format.format_val = regmap_format_32_native; 1024 map->format.parse_val = regmap_parse_32_native; 1025 break; 1026 default: 1027 goto err_hwlock; 1028 } 1029 break; 1030 } 1031 1032 if (map->format.format_write) { 1033 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1034 (val_endian != REGMAP_ENDIAN_BIG)) 1035 goto err_hwlock; 1036 map->use_single_write = true; 1037 } 1038 1039 if (!map->format.format_write && 1040 !(map->format.format_reg && map->format.format_val)) 1041 goto err_hwlock; 1042 1043 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1044 if (map->work_buf == NULL) { 1045 ret = -ENOMEM; 1046 goto err_hwlock; 1047 } 1048 1049 if (map->format.format_write) { 1050 map->defer_caching = false; 1051 map->reg_write = _regmap_bus_formatted_write; 1052 } else if (map->format.format_val) { 1053 map->defer_caching = true; 1054 map->reg_write = _regmap_bus_raw_write; 1055 } 1056 1057 skip_format_initialization: 1058 1059 map->range_tree = RB_ROOT; 1060 for (i = 0; i < config->num_ranges; i++) { 1061 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1062 struct regmap_range_node *new; 1063 1064 /* Sanity check */ 1065 if (range_cfg->range_max < range_cfg->range_min) { 1066 dev_err(map->dev, "Invalid range %d: %u < %u\n", i, 1067 range_cfg->range_max, range_cfg->range_min); 1068 goto err_range; 1069 } 1070 1071 if (range_cfg->range_max > map->max_register) { 1072 dev_err(map->dev, "Invalid range %d: %u > %u\n", i, 1073 range_cfg->range_max, map->max_register); 1074 goto err_range; 1075 } 1076 1077 if (range_cfg->selector_reg > map->max_register) { 1078 dev_err(map->dev, 1079 "Invalid range %d: selector out of map\n", i); 1080 goto err_range; 1081 } 1082 1083 if (range_cfg->window_len == 0) { 1084 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1085 i); 1086 goto err_range; 1087 } 1088 1089 /* Make sure, that this register range has no selector 1090 or data window within its boundary */ 1091 for (j = 0; j < config->num_ranges; j++) { 1092 unsigned int sel_reg = config->ranges[j].selector_reg; 1093 unsigned int win_min = config->ranges[j].window_start; 1094 unsigned int win_max = win_min + 1095 config->ranges[j].window_len - 1; 1096 1097 /* Allow data window inside its own virtual range */ 1098 if (j == i) 1099 continue; 1100 1101 if (range_cfg->range_min <= sel_reg && 1102 sel_reg <= range_cfg->range_max) { 1103 dev_err(map->dev, 1104 "Range %d: selector for %d in window\n", 1105 i, j); 1106 goto err_range; 1107 } 1108 1109 if (!(win_max < range_cfg->range_min || 1110 win_min > range_cfg->range_max)) { 1111 dev_err(map->dev, 1112 "Range %d: window for %d in window\n", 1113 i, j); 1114 goto err_range; 1115 } 1116 } 1117 1118 new = kzalloc(sizeof(*new), GFP_KERNEL); 1119 if (new == NULL) { 1120 ret = -ENOMEM; 1121 goto err_range; 1122 } 1123 1124 new->map = map; 1125 new->name = range_cfg->name; 1126 new->range_min = range_cfg->range_min; 1127 new->range_max = range_cfg->range_max; 1128 new->selector_reg = range_cfg->selector_reg; 1129 new->selector_mask = range_cfg->selector_mask; 1130 new->selector_shift = range_cfg->selector_shift; 1131 new->window_start = range_cfg->window_start; 1132 new->window_len = range_cfg->window_len; 1133 1134 if (!_regmap_range_add(map, new)) { 1135 dev_err(map->dev, "Failed to add range %d\n", i); 1136 kfree(new); 1137 goto err_range; 1138 } 1139 1140 if (map->selector_work_buf == NULL) { 1141 map->selector_work_buf = 1142 kzalloc(map->format.buf_size, GFP_KERNEL); 1143 if (map->selector_work_buf == NULL) { 1144 ret = -ENOMEM; 1145 goto err_range; 1146 } 1147 } 1148 } 1149 1150 ret = regcache_init(map, config); 1151 if (ret != 0) 1152 goto err_range; 1153 1154 if (dev) { 1155 ret = regmap_attach_dev(dev, map, config); 1156 if (ret != 0) 1157 goto err_regcache; 1158 } else { 1159 regmap_debugfs_init(map); 1160 } 1161 1162 return map; 1163 1164 err_regcache: 1165 regcache_exit(map); 1166 err_range: 1167 regmap_range_exit(map); 1168 kfree(map->work_buf); 1169 err_hwlock: 1170 if (map->hwlock) 1171 hwspin_lock_free(map->hwlock); 1172 err_name: 1173 kfree_const(map->name); 1174 err_map: 1175 kfree(map); 1176 err: 1177 return ERR_PTR(ret); 1178 } 1179 EXPORT_SYMBOL_GPL(__regmap_init); 1180 1181 static void devm_regmap_release(struct device *dev, void *res) 1182 { 1183 regmap_exit(*(struct regmap **)res); 1184 } 1185 1186 struct regmap *__devm_regmap_init(struct device *dev, 1187 const struct regmap_bus *bus, 1188 void *bus_context, 1189 const struct regmap_config *config, 1190 struct lock_class_key *lock_key, 1191 const char *lock_name) 1192 { 1193 struct regmap **ptr, *regmap; 1194 1195 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1196 if (!ptr) 1197 return ERR_PTR(-ENOMEM); 1198 1199 regmap = __regmap_init(dev, bus, bus_context, config, 1200 lock_key, lock_name); 1201 if (!IS_ERR(regmap)) { 1202 *ptr = regmap; 1203 devres_add(dev, ptr); 1204 } else { 1205 devres_free(ptr); 1206 } 1207 1208 return regmap; 1209 } 1210 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1211 1212 static void regmap_field_init(struct regmap_field *rm_field, 1213 struct regmap *regmap, struct reg_field reg_field) 1214 { 1215 rm_field->regmap = regmap; 1216 rm_field->reg = reg_field.reg; 1217 rm_field->shift = reg_field.lsb; 1218 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1219 1220 WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n"); 1221 1222 rm_field->id_size = reg_field.id_size; 1223 rm_field->id_offset = reg_field.id_offset; 1224 } 1225 1226 /** 1227 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1228 * 1229 * @dev: Device that will be interacted with 1230 * @regmap: regmap bank in which this register field is located. 1231 * @reg_field: Register field with in the bank. 1232 * 1233 * The return value will be an ERR_PTR() on error or a valid pointer 1234 * to a struct regmap_field. The regmap_field will be automatically freed 1235 * by the device management code. 1236 */ 1237 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1238 struct regmap *regmap, struct reg_field reg_field) 1239 { 1240 struct regmap_field *rm_field = devm_kzalloc(dev, 1241 sizeof(*rm_field), GFP_KERNEL); 1242 if (!rm_field) 1243 return ERR_PTR(-ENOMEM); 1244 1245 regmap_field_init(rm_field, regmap, reg_field); 1246 1247 return rm_field; 1248 1249 } 1250 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1251 1252 1253 /** 1254 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. 1255 * 1256 * @regmap: regmap bank in which this register field is located. 1257 * @rm_field: regmap register fields within the bank. 1258 * @reg_field: Register fields within the bank. 1259 * @num_fields: Number of register fields. 1260 * 1261 * The return value will be an -ENOMEM on error or zero for success. 1262 * Newly allocated regmap_fields should be freed by calling 1263 * regmap_field_bulk_free() 1264 */ 1265 int regmap_field_bulk_alloc(struct regmap *regmap, 1266 struct regmap_field **rm_field, 1267 const struct reg_field *reg_field, 1268 int num_fields) 1269 { 1270 struct regmap_field *rf; 1271 int i; 1272 1273 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL); 1274 if (!rf) 1275 return -ENOMEM; 1276 1277 for (i = 0; i < num_fields; i++) { 1278 regmap_field_init(&rf[i], regmap, reg_field[i]); 1279 rm_field[i] = &rf[i]; 1280 } 1281 1282 return 0; 1283 } 1284 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); 1285 1286 /** 1287 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register 1288 * fields. 1289 * 1290 * @dev: Device that will be interacted with 1291 * @regmap: regmap bank in which this register field is located. 1292 * @rm_field: regmap register fields within the bank. 1293 * @reg_field: Register fields within the bank. 1294 * @num_fields: Number of register fields. 1295 * 1296 * The return value will be an -ENOMEM on error or zero for success. 1297 * Newly allocated regmap_fields will be automatically freed by the 1298 * device management code. 1299 */ 1300 int devm_regmap_field_bulk_alloc(struct device *dev, 1301 struct regmap *regmap, 1302 struct regmap_field **rm_field, 1303 const struct reg_field *reg_field, 1304 int num_fields) 1305 { 1306 struct regmap_field *rf; 1307 int i; 1308 1309 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); 1310 if (!rf) 1311 return -ENOMEM; 1312 1313 for (i = 0; i < num_fields; i++) { 1314 regmap_field_init(&rf[i], regmap, reg_field[i]); 1315 rm_field[i] = &rf[i]; 1316 } 1317 1318 return 0; 1319 } 1320 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); 1321 1322 /** 1323 * regmap_field_bulk_free() - Free register field allocated using 1324 * regmap_field_bulk_alloc. 1325 * 1326 * @field: regmap fields which should be freed. 1327 */ 1328 void regmap_field_bulk_free(struct regmap_field *field) 1329 { 1330 kfree(field); 1331 } 1332 EXPORT_SYMBOL_GPL(regmap_field_bulk_free); 1333 1334 /** 1335 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using 1336 * devm_regmap_field_bulk_alloc. 1337 * 1338 * @dev: Device that will be interacted with 1339 * @field: regmap field which should be freed. 1340 * 1341 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually 1342 * drivers need not call this function, as the memory allocated via devm 1343 * will be freed as per device-driver life-cycle. 1344 */ 1345 void devm_regmap_field_bulk_free(struct device *dev, 1346 struct regmap_field *field) 1347 { 1348 devm_kfree(dev, field); 1349 } 1350 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); 1351 1352 /** 1353 * devm_regmap_field_free() - Free a register field allocated using 1354 * devm_regmap_field_alloc. 1355 * 1356 * @dev: Device that will be interacted with 1357 * @field: regmap field which should be freed. 1358 * 1359 * Free register field allocated using devm_regmap_field_alloc(). Usually 1360 * drivers need not call this function, as the memory allocated via devm 1361 * will be freed as per device-driver life-cyle. 1362 */ 1363 void devm_regmap_field_free(struct device *dev, 1364 struct regmap_field *field) 1365 { 1366 devm_kfree(dev, field); 1367 } 1368 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1369 1370 /** 1371 * regmap_field_alloc() - Allocate and initialise a register field. 1372 * 1373 * @regmap: regmap bank in which this register field is located. 1374 * @reg_field: Register field with in the bank. 1375 * 1376 * The return value will be an ERR_PTR() on error or a valid pointer 1377 * to a struct regmap_field. The regmap_field should be freed by the 1378 * user once its finished working with it using regmap_field_free(). 1379 */ 1380 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1381 struct reg_field reg_field) 1382 { 1383 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1384 1385 if (!rm_field) 1386 return ERR_PTR(-ENOMEM); 1387 1388 regmap_field_init(rm_field, regmap, reg_field); 1389 1390 return rm_field; 1391 } 1392 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1393 1394 /** 1395 * regmap_field_free() - Free register field allocated using 1396 * regmap_field_alloc. 1397 * 1398 * @field: regmap field which should be freed. 1399 */ 1400 void regmap_field_free(struct regmap_field *field) 1401 { 1402 kfree(field); 1403 } 1404 EXPORT_SYMBOL_GPL(regmap_field_free); 1405 1406 /** 1407 * regmap_reinit_cache() - Reinitialise the current register cache 1408 * 1409 * @map: Register map to operate on. 1410 * @config: New configuration. Only the cache data will be used. 1411 * 1412 * Discard any existing register cache for the map and initialize a 1413 * new cache. This can be used to restore the cache to defaults or to 1414 * update the cache configuration to reflect runtime discovery of the 1415 * hardware. 1416 * 1417 * No explicit locking is done here, the user needs to ensure that 1418 * this function will not race with other calls to regmap. 1419 */ 1420 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1421 { 1422 int ret; 1423 1424 regcache_exit(map); 1425 regmap_debugfs_exit(map); 1426 1427 map->max_register = config->max_register; 1428 map->max_register_is_set = map->max_register ?: config->max_register_is_0; 1429 map->writeable_reg = config->writeable_reg; 1430 map->readable_reg = config->readable_reg; 1431 map->volatile_reg = config->volatile_reg; 1432 map->precious_reg = config->precious_reg; 1433 map->writeable_noinc_reg = config->writeable_noinc_reg; 1434 map->readable_noinc_reg = config->readable_noinc_reg; 1435 map->cache_type = config->cache_type; 1436 1437 ret = regmap_set_name(map, config); 1438 if (ret) 1439 return ret; 1440 1441 regmap_debugfs_init(map); 1442 1443 map->cache_bypass = false; 1444 map->cache_only = false; 1445 1446 return regcache_init(map, config); 1447 } 1448 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1449 1450 /** 1451 * regmap_exit() - Free a previously allocated register map 1452 * 1453 * @map: Register map to operate on. 1454 */ 1455 void regmap_exit(struct regmap *map) 1456 { 1457 struct regmap_async *async; 1458 1459 regmap_detach_dev(map->dev, map); 1460 regcache_exit(map); 1461 1462 regmap_debugfs_exit(map); 1463 regmap_range_exit(map); 1464 if (map->bus && map->bus->free_context) 1465 map->bus->free_context(map->bus_context); 1466 kfree(map->work_buf); 1467 while (!list_empty(&map->async_free)) { 1468 async = list_first_entry_or_null(&map->async_free, 1469 struct regmap_async, 1470 list); 1471 list_del(&async->list); 1472 kfree(async->work_buf); 1473 kfree(async); 1474 } 1475 if (map->hwlock) 1476 hwspin_lock_free(map->hwlock); 1477 if (map->lock == regmap_lock_mutex) 1478 mutex_destroy(&map->mutex); 1479 kfree_const(map->name); 1480 kfree(map->patch); 1481 if (map->bus && map->bus->free_on_exit) 1482 kfree(map->bus); 1483 kfree(map); 1484 } 1485 EXPORT_SYMBOL_GPL(regmap_exit); 1486 1487 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1488 { 1489 struct regmap **r = res; 1490 if (!r || !*r) { 1491 WARN_ON(!r || !*r); 1492 return 0; 1493 } 1494 1495 /* If the user didn't specify a name match any */ 1496 if (data) 1497 return (*r)->name && !strcmp((*r)->name, data); 1498 else 1499 return 1; 1500 } 1501 1502 /** 1503 * dev_get_regmap() - Obtain the regmap (if any) for a device 1504 * 1505 * @dev: Device to retrieve the map for 1506 * @name: Optional name for the register map, usually NULL. 1507 * 1508 * Returns the regmap for the device if one is present, or NULL. If 1509 * name is specified then it must match the name specified when 1510 * registering the device, if it is NULL then the first regmap found 1511 * will be used. Devices with multiple register maps are very rare, 1512 * generic code should normally not need to specify a name. 1513 */ 1514 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1515 { 1516 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1517 dev_get_regmap_match, (void *)name); 1518 1519 if (!r) 1520 return NULL; 1521 return *r; 1522 } 1523 EXPORT_SYMBOL_GPL(dev_get_regmap); 1524 1525 /** 1526 * regmap_get_device() - Obtain the device from a regmap 1527 * 1528 * @map: Register map to operate on. 1529 * 1530 * Returns the underlying device that the regmap has been created for. 1531 */ 1532 struct device *regmap_get_device(struct regmap *map) 1533 { 1534 return map->dev; 1535 } 1536 EXPORT_SYMBOL_GPL(regmap_get_device); 1537 1538 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1539 struct regmap_range_node *range, 1540 unsigned int val_num) 1541 { 1542 void *orig_work_buf; 1543 unsigned int win_offset; 1544 unsigned int win_page; 1545 bool page_chg; 1546 int ret; 1547 1548 win_offset = (*reg - range->range_min) % range->window_len; 1549 win_page = (*reg - range->range_min) / range->window_len; 1550 1551 if (val_num > 1) { 1552 /* Bulk write shouldn't cross range boundary */ 1553 if (*reg + val_num - 1 > range->range_max) 1554 return -EINVAL; 1555 1556 /* ... or single page boundary */ 1557 if (val_num > range->window_len - win_offset) 1558 return -EINVAL; 1559 } 1560 1561 /* It is possible to have selector register inside data window. 1562 In that case, selector register is located on every page and 1563 it needs no page switching, when accessed alone. */ 1564 if (val_num > 1 || 1565 range->window_start + win_offset != range->selector_reg) { 1566 /* Use separate work_buf during page switching */ 1567 orig_work_buf = map->work_buf; 1568 map->work_buf = map->selector_work_buf; 1569 1570 ret = _regmap_update_bits(map, range->selector_reg, 1571 range->selector_mask, 1572 win_page << range->selector_shift, 1573 &page_chg, false); 1574 1575 map->work_buf = orig_work_buf; 1576 1577 if (ret != 0) 1578 return ret; 1579 } 1580 1581 *reg = range->window_start + win_offset; 1582 1583 return 0; 1584 } 1585 1586 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1587 unsigned long mask) 1588 { 1589 u8 *buf; 1590 int i; 1591 1592 if (!mask || !map->work_buf) 1593 return; 1594 1595 buf = map->work_buf; 1596 1597 for (i = 0; i < max_bytes; i++) 1598 buf[i] |= (mask >> (8 * i)) & 0xff; 1599 } 1600 1601 static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg) 1602 { 1603 reg += map->reg_base; 1604 1605 if (map->format.reg_shift > 0) 1606 reg >>= map->format.reg_shift; 1607 else if (map->format.reg_shift < 0) 1608 reg <<= -(map->format.reg_shift); 1609 1610 return reg; 1611 } 1612 1613 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1614 const void *val, size_t val_len, bool noinc) 1615 { 1616 struct regmap_range_node *range; 1617 unsigned long flags; 1618 void *work_val = map->work_buf + map->format.reg_bytes + 1619 map->format.pad_bytes; 1620 void *buf; 1621 int ret = -ENOTSUPP; 1622 size_t len; 1623 int i; 1624 1625 /* Check for unwritable or noinc registers in range 1626 * before we start 1627 */ 1628 if (!regmap_writeable_noinc(map, reg)) { 1629 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1630 unsigned int element = 1631 reg + regmap_get_offset(map, i); 1632 if (!regmap_writeable(map, element) || 1633 regmap_writeable_noinc(map, element)) 1634 return -EINVAL; 1635 } 1636 } 1637 1638 if (!map->cache_bypass && map->format.parse_val) { 1639 unsigned int ival, offset; 1640 int val_bytes = map->format.val_bytes; 1641 1642 /* Cache the last written value for noinc writes */ 1643 i = noinc ? val_len - val_bytes : 0; 1644 for (; i < val_len; i += val_bytes) { 1645 ival = map->format.parse_val(val + i); 1646 offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes); 1647 ret = regcache_write(map, reg + offset, ival); 1648 if (ret) { 1649 dev_err(map->dev, 1650 "Error in caching of register: %x ret: %d\n", 1651 reg + offset, ret); 1652 return ret; 1653 } 1654 } 1655 if (map->cache_only) { 1656 map->cache_dirty = true; 1657 return 0; 1658 } 1659 } 1660 1661 range = _regmap_range_lookup(map, reg); 1662 if (range) { 1663 int val_num = val_len / map->format.val_bytes; 1664 int win_offset = (reg - range->range_min) % range->window_len; 1665 int win_residue = range->window_len - win_offset; 1666 1667 /* If the write goes beyond the end of the window split it */ 1668 while (val_num > win_residue) { 1669 dev_dbg(map->dev, "Writing window %d/%zu\n", 1670 win_residue, val_len / map->format.val_bytes); 1671 ret = _regmap_raw_write_impl(map, reg, val, 1672 win_residue * 1673 map->format.val_bytes, noinc); 1674 if (ret != 0) 1675 return ret; 1676 1677 reg += win_residue; 1678 val_num -= win_residue; 1679 val += win_residue * map->format.val_bytes; 1680 val_len -= win_residue * map->format.val_bytes; 1681 1682 win_offset = (reg - range->range_min) % 1683 range->window_len; 1684 win_residue = range->window_len - win_offset; 1685 } 1686 1687 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1688 if (ret != 0) 1689 return ret; 1690 } 1691 1692 reg = regmap_reg_addr(map, reg); 1693 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1694 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1695 map->write_flag_mask); 1696 1697 /* 1698 * Essentially all I/O mechanisms will be faster with a single 1699 * buffer to write. Since register syncs often generate raw 1700 * writes of single registers optimise that case. 1701 */ 1702 if (val != work_val && val_len == map->format.val_bytes) { 1703 memcpy(work_val, val, map->format.val_bytes); 1704 val = work_val; 1705 } 1706 1707 if (map->async && map->bus && map->bus->async_write) { 1708 struct regmap_async *async; 1709 1710 trace_regmap_async_write_start(map, reg, val_len); 1711 1712 spin_lock_irqsave(&map->async_lock, flags); 1713 async = list_first_entry_or_null(&map->async_free, 1714 struct regmap_async, 1715 list); 1716 if (async) 1717 list_del(&async->list); 1718 spin_unlock_irqrestore(&map->async_lock, flags); 1719 1720 if (!async) { 1721 async = map->bus->async_alloc(); 1722 if (!async) 1723 return -ENOMEM; 1724 1725 async->work_buf = kzalloc(map->format.buf_size, 1726 GFP_KERNEL | GFP_DMA); 1727 if (!async->work_buf) { 1728 kfree(async); 1729 return -ENOMEM; 1730 } 1731 } 1732 1733 async->map = map; 1734 1735 /* If the caller supplied the value we can use it safely. */ 1736 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1737 map->format.reg_bytes + map->format.val_bytes); 1738 1739 spin_lock_irqsave(&map->async_lock, flags); 1740 list_add_tail(&async->list, &map->async_list); 1741 spin_unlock_irqrestore(&map->async_lock, flags); 1742 1743 if (val != work_val) 1744 ret = map->bus->async_write(map->bus_context, 1745 async->work_buf, 1746 map->format.reg_bytes + 1747 map->format.pad_bytes, 1748 val, val_len, async); 1749 else 1750 ret = map->bus->async_write(map->bus_context, 1751 async->work_buf, 1752 map->format.reg_bytes + 1753 map->format.pad_bytes + 1754 val_len, NULL, 0, async); 1755 1756 if (ret != 0) { 1757 dev_err(map->dev, "Failed to schedule write: %d\n", 1758 ret); 1759 1760 spin_lock_irqsave(&map->async_lock, flags); 1761 list_move(&async->list, &map->async_free); 1762 spin_unlock_irqrestore(&map->async_lock, flags); 1763 } 1764 1765 return ret; 1766 } 1767 1768 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1769 1770 /* If we're doing a single register write we can probably just 1771 * send the work_buf directly, otherwise try to do a gather 1772 * write. 1773 */ 1774 if (val == work_val) 1775 ret = map->write(map->bus_context, map->work_buf, 1776 map->format.reg_bytes + 1777 map->format.pad_bytes + 1778 val_len); 1779 else if (map->bus && map->bus->gather_write) 1780 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1781 map->format.reg_bytes + 1782 map->format.pad_bytes, 1783 val, val_len); 1784 else 1785 ret = -ENOTSUPP; 1786 1787 /* If that didn't work fall back on linearising by hand. */ 1788 if (ret == -ENOTSUPP) { 1789 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1790 buf = kzalloc(len, GFP_KERNEL); 1791 if (!buf) 1792 return -ENOMEM; 1793 1794 memcpy(buf, map->work_buf, map->format.reg_bytes); 1795 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1796 val, val_len); 1797 ret = map->write(map->bus_context, buf, len); 1798 1799 kfree(buf); 1800 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1801 /* regcache_drop_region() takes lock that we already have, 1802 * thus call map->cache_ops->drop() directly 1803 */ 1804 if (map->cache_ops && map->cache_ops->drop) 1805 map->cache_ops->drop(map, reg, reg + 1); 1806 } 1807 1808 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1809 1810 return ret; 1811 } 1812 1813 /** 1814 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1815 * 1816 * @map: Map to check. 1817 */ 1818 bool regmap_can_raw_write(struct regmap *map) 1819 { 1820 return map->write && map->format.format_val && map->format.format_reg; 1821 } 1822 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1823 1824 /** 1825 * regmap_get_raw_read_max - Get the maximum size we can read 1826 * 1827 * @map: Map to check. 1828 */ 1829 size_t regmap_get_raw_read_max(struct regmap *map) 1830 { 1831 return map->max_raw_read; 1832 } 1833 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1834 1835 /** 1836 * regmap_get_raw_write_max - Get the maximum size we can read 1837 * 1838 * @map: Map to check. 1839 */ 1840 size_t regmap_get_raw_write_max(struct regmap *map) 1841 { 1842 return map->max_raw_write; 1843 } 1844 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1845 1846 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1847 unsigned int val) 1848 { 1849 int ret; 1850 struct regmap_range_node *range; 1851 struct regmap *map = context; 1852 1853 WARN_ON(!map->format.format_write); 1854 1855 range = _regmap_range_lookup(map, reg); 1856 if (range) { 1857 ret = _regmap_select_page(map, ®, range, 1); 1858 if (ret != 0) 1859 return ret; 1860 } 1861 1862 reg = regmap_reg_addr(map, reg); 1863 map->format.format_write(map, reg, val); 1864 1865 trace_regmap_hw_write_start(map, reg, 1); 1866 1867 ret = map->write(map->bus_context, map->work_buf, map->format.buf_size); 1868 1869 trace_regmap_hw_write_done(map, reg, 1); 1870 1871 return ret; 1872 } 1873 1874 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1875 unsigned int val) 1876 { 1877 struct regmap *map = context; 1878 struct regmap_range_node *range; 1879 int ret; 1880 1881 range = _regmap_range_lookup(map, reg); 1882 if (range) { 1883 ret = _regmap_select_page(map, ®, range, 1); 1884 if (ret != 0) 1885 return ret; 1886 } 1887 1888 reg = regmap_reg_addr(map, reg); 1889 return map->bus->reg_write(map->bus_context, reg, val); 1890 } 1891 1892 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1893 unsigned int val) 1894 { 1895 struct regmap *map = context; 1896 1897 WARN_ON(!map->format.format_val); 1898 1899 map->format.format_val(map->work_buf + map->format.reg_bytes 1900 + map->format.pad_bytes, val, 0); 1901 return _regmap_raw_write_impl(map, reg, 1902 map->work_buf + 1903 map->format.reg_bytes + 1904 map->format.pad_bytes, 1905 map->format.val_bytes, 1906 false); 1907 } 1908 1909 static inline void *_regmap_map_get_context(struct regmap *map) 1910 { 1911 return (map->bus || (!map->bus && map->read)) ? map : map->bus_context; 1912 } 1913 1914 int _regmap_write(struct regmap *map, unsigned int reg, 1915 unsigned int val) 1916 { 1917 int ret; 1918 void *context = _regmap_map_get_context(map); 1919 1920 if (!regmap_writeable(map, reg)) 1921 return -EIO; 1922 1923 if (!map->cache_bypass && !map->defer_caching) { 1924 ret = regcache_write(map, reg, val); 1925 if (ret != 0) 1926 return ret; 1927 if (map->cache_only) { 1928 map->cache_dirty = true; 1929 return 0; 1930 } 1931 } 1932 1933 ret = map->reg_write(context, reg, val); 1934 if (ret == 0) { 1935 if (regmap_should_log(map)) 1936 dev_info(map->dev, "%x <= %x\n", reg, val); 1937 1938 trace_regmap_reg_write(map, reg, val); 1939 } 1940 1941 return ret; 1942 } 1943 1944 /** 1945 * regmap_write() - Write a value to a single register 1946 * 1947 * @map: Register map to write to 1948 * @reg: Register to write to 1949 * @val: Value to be written 1950 * 1951 * A value of zero will be returned on success, a negative errno will 1952 * be returned in error cases. 1953 */ 1954 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1955 { 1956 int ret; 1957 1958 if (!IS_ALIGNED(reg, map->reg_stride)) 1959 return -EINVAL; 1960 1961 map->lock(map->lock_arg); 1962 1963 ret = _regmap_write(map, reg, val); 1964 1965 map->unlock(map->lock_arg); 1966 1967 return ret; 1968 } 1969 EXPORT_SYMBOL_GPL(regmap_write); 1970 1971 /** 1972 * regmap_write_async() - Write a value to a single register asynchronously 1973 * 1974 * @map: Register map to write to 1975 * @reg: Register to write to 1976 * @val: Value to be written 1977 * 1978 * A value of zero will be returned on success, a negative errno will 1979 * be returned in error cases. 1980 */ 1981 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1982 { 1983 int ret; 1984 1985 if (!IS_ALIGNED(reg, map->reg_stride)) 1986 return -EINVAL; 1987 1988 map->lock(map->lock_arg); 1989 1990 map->async = true; 1991 1992 ret = _regmap_write(map, reg, val); 1993 1994 map->async = false; 1995 1996 map->unlock(map->lock_arg); 1997 1998 return ret; 1999 } 2000 EXPORT_SYMBOL_GPL(regmap_write_async); 2001 2002 int _regmap_raw_write(struct regmap *map, unsigned int reg, 2003 const void *val, size_t val_len, bool noinc) 2004 { 2005 size_t val_bytes = map->format.val_bytes; 2006 size_t val_count = val_len / val_bytes; 2007 size_t chunk_count, chunk_bytes; 2008 size_t chunk_regs = val_count; 2009 int ret, i; 2010 2011 if (!val_count) 2012 return -EINVAL; 2013 2014 if (map->use_single_write) 2015 chunk_regs = 1; 2016 else if (map->max_raw_write && val_len > map->max_raw_write) 2017 chunk_regs = map->max_raw_write / val_bytes; 2018 2019 chunk_count = val_count / chunk_regs; 2020 chunk_bytes = chunk_regs * val_bytes; 2021 2022 /* Write as many bytes as possible with chunk_size */ 2023 for (i = 0; i < chunk_count; i++) { 2024 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 2025 if (ret) 2026 return ret; 2027 2028 reg += regmap_get_offset(map, chunk_regs); 2029 val += chunk_bytes; 2030 val_len -= chunk_bytes; 2031 } 2032 2033 /* Write remaining bytes */ 2034 if (val_len) 2035 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 2036 2037 return ret; 2038 } 2039 2040 /** 2041 * regmap_raw_write() - Write raw values to one or more registers 2042 * 2043 * @map: Register map to write to 2044 * @reg: Initial register to write to 2045 * @val: Block of data to be written, laid out for direct transmission to the 2046 * device 2047 * @val_len: Length of data pointed to by val. 2048 * 2049 * This function is intended to be used for things like firmware 2050 * download where a large block of data needs to be transferred to the 2051 * device. No formatting will be done on the data provided. 2052 * 2053 * A value of zero will be returned on success, a negative errno will 2054 * be returned in error cases. 2055 */ 2056 int regmap_raw_write(struct regmap *map, unsigned int reg, 2057 const void *val, size_t val_len) 2058 { 2059 int ret; 2060 2061 if (!regmap_can_raw_write(map)) 2062 return -EINVAL; 2063 if (val_len % map->format.val_bytes) 2064 return -EINVAL; 2065 2066 map->lock(map->lock_arg); 2067 2068 ret = _regmap_raw_write(map, reg, val, val_len, false); 2069 2070 map->unlock(map->lock_arg); 2071 2072 return ret; 2073 } 2074 EXPORT_SYMBOL_GPL(regmap_raw_write); 2075 2076 static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg, 2077 void *val, unsigned int val_len, bool write) 2078 { 2079 size_t val_bytes = map->format.val_bytes; 2080 size_t val_count = val_len / val_bytes; 2081 unsigned int lastval; 2082 u8 *u8p; 2083 u16 *u16p; 2084 u32 *u32p; 2085 int ret; 2086 int i; 2087 2088 switch (val_bytes) { 2089 case 1: 2090 u8p = val; 2091 if (write) 2092 lastval = (unsigned int)u8p[val_count - 1]; 2093 break; 2094 case 2: 2095 u16p = val; 2096 if (write) 2097 lastval = (unsigned int)u16p[val_count - 1]; 2098 break; 2099 case 4: 2100 u32p = val; 2101 if (write) 2102 lastval = (unsigned int)u32p[val_count - 1]; 2103 break; 2104 default: 2105 return -EINVAL; 2106 } 2107 2108 /* 2109 * Update the cache with the last value we write, the rest is just 2110 * gone down in the hardware FIFO. We can't cache FIFOs. This makes 2111 * sure a single read from the cache will work. 2112 */ 2113 if (write) { 2114 if (!map->cache_bypass && !map->defer_caching) { 2115 ret = regcache_write(map, reg, lastval); 2116 if (ret != 0) 2117 return ret; 2118 if (map->cache_only) { 2119 map->cache_dirty = true; 2120 return 0; 2121 } 2122 } 2123 ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count); 2124 } else { 2125 ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count); 2126 } 2127 2128 if (!ret && regmap_should_log(map)) { 2129 dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>"); 2130 for (i = 0; i < val_count; i++) { 2131 switch (val_bytes) { 2132 case 1: 2133 pr_cont("%x", u8p[i]); 2134 break; 2135 case 2: 2136 pr_cont("%x", u16p[i]); 2137 break; 2138 case 4: 2139 pr_cont("%x", u32p[i]); 2140 break; 2141 default: 2142 break; 2143 } 2144 if (i == (val_count - 1)) 2145 pr_cont("]\n"); 2146 else 2147 pr_cont(","); 2148 } 2149 } 2150 2151 return 0; 2152 } 2153 2154 /** 2155 * regmap_noinc_write(): Write data to a register without incrementing the 2156 * register number 2157 * 2158 * @map: Register map to write to 2159 * @reg: Register to write to 2160 * @val: Pointer to data buffer 2161 * @val_len: Length of output buffer in bytes. 2162 * 2163 * The regmap API usually assumes that bulk bus write operations will write a 2164 * range of registers. Some devices have certain registers for which a write 2165 * operation can write to an internal FIFO. 2166 * 2167 * The target register must be volatile but registers after it can be 2168 * completely unrelated cacheable registers. 2169 * 2170 * This will attempt multiple writes as required to write val_len bytes. 2171 * 2172 * A value of zero will be returned on success, a negative errno will be 2173 * returned in error cases. 2174 */ 2175 int regmap_noinc_write(struct regmap *map, unsigned int reg, 2176 const void *val, size_t val_len) 2177 { 2178 size_t write_len; 2179 int ret; 2180 2181 if (!map->write && !(map->bus && map->bus->reg_noinc_write)) 2182 return -EINVAL; 2183 if (val_len % map->format.val_bytes) 2184 return -EINVAL; 2185 if (!IS_ALIGNED(reg, map->reg_stride)) 2186 return -EINVAL; 2187 if (val_len == 0) 2188 return -EINVAL; 2189 2190 map->lock(map->lock_arg); 2191 2192 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 2193 ret = -EINVAL; 2194 goto out_unlock; 2195 } 2196 2197 /* 2198 * Use the accelerated operation if we can. The val drops the const 2199 * typing in order to facilitate code reuse in regmap_noinc_readwrite(). 2200 */ 2201 if (map->bus->reg_noinc_write) { 2202 ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true); 2203 goto out_unlock; 2204 } 2205 2206 while (val_len) { 2207 if (map->max_raw_write && map->max_raw_write < val_len) 2208 write_len = map->max_raw_write; 2209 else 2210 write_len = val_len; 2211 ret = _regmap_raw_write(map, reg, val, write_len, true); 2212 if (ret) 2213 goto out_unlock; 2214 val = ((u8 *)val) + write_len; 2215 val_len -= write_len; 2216 } 2217 2218 out_unlock: 2219 map->unlock(map->lock_arg); 2220 return ret; 2221 } 2222 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2223 2224 /** 2225 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2226 * register field. 2227 * 2228 * @field: Register field to write to 2229 * @mask: Bitmask to change 2230 * @val: Value to be written 2231 * @change: Boolean indicating if a write was done 2232 * @async: Boolean indicating asynchronously 2233 * @force: Boolean indicating use force update 2234 * 2235 * Perform a read/modify/write cycle on the register field with change, 2236 * async, force option. 2237 * 2238 * A value of zero will be returned on success, a negative errno will 2239 * be returned in error cases. 2240 */ 2241 int regmap_field_update_bits_base(struct regmap_field *field, 2242 unsigned int mask, unsigned int val, 2243 bool *change, bool async, bool force) 2244 { 2245 mask = (mask << field->shift) & field->mask; 2246 2247 return regmap_update_bits_base(field->regmap, field->reg, 2248 mask, val << field->shift, 2249 change, async, force); 2250 } 2251 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2252 2253 /** 2254 * regmap_field_test_bits() - Check if all specified bits are set in a 2255 * register field. 2256 * 2257 * @field: Register field to operate on 2258 * @bits: Bits to test 2259 * 2260 * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the 2261 * tested bits is not set and 1 if all tested bits are set. 2262 */ 2263 int regmap_field_test_bits(struct regmap_field *field, unsigned int bits) 2264 { 2265 unsigned int val, ret; 2266 2267 ret = regmap_field_read(field, &val); 2268 if (ret) 2269 return ret; 2270 2271 return (val & bits) == bits; 2272 } 2273 EXPORT_SYMBOL_GPL(regmap_field_test_bits); 2274 2275 /** 2276 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2277 * register field with port ID 2278 * 2279 * @field: Register field to write to 2280 * @id: port ID 2281 * @mask: Bitmask to change 2282 * @val: Value to be written 2283 * @change: Boolean indicating if a write was done 2284 * @async: Boolean indicating asynchronously 2285 * @force: Boolean indicating use force update 2286 * 2287 * A value of zero will be returned on success, a negative errno will 2288 * be returned in error cases. 2289 */ 2290 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2291 unsigned int mask, unsigned int val, 2292 bool *change, bool async, bool force) 2293 { 2294 if (id >= field->id_size) 2295 return -EINVAL; 2296 2297 mask = (mask << field->shift) & field->mask; 2298 2299 return regmap_update_bits_base(field->regmap, 2300 field->reg + (field->id_offset * id), 2301 mask, val << field->shift, 2302 change, async, force); 2303 } 2304 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2305 2306 /** 2307 * regmap_bulk_write() - Write multiple registers to the device 2308 * 2309 * @map: Register map to write to 2310 * @reg: First register to be write from 2311 * @val: Block of data to be written, in native register size for device 2312 * @val_count: Number of registers to write 2313 * 2314 * This function is intended to be used for writing a large block of 2315 * data to the device either in single transfer or multiple transfer. 2316 * 2317 * A value of zero will be returned on success, a negative errno will 2318 * be returned in error cases. 2319 */ 2320 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2321 size_t val_count) 2322 { 2323 int ret = 0, i; 2324 size_t val_bytes = map->format.val_bytes; 2325 2326 if (!IS_ALIGNED(reg, map->reg_stride)) 2327 return -EINVAL; 2328 2329 /* 2330 * Some devices don't support bulk write, for them we have a series of 2331 * single write operations. 2332 */ 2333 if (!map->write || !map->format.parse_inplace) { 2334 map->lock(map->lock_arg); 2335 for (i = 0; i < val_count; i++) { 2336 unsigned int ival; 2337 2338 switch (val_bytes) { 2339 case 1: 2340 ival = *(u8 *)(val + (i * val_bytes)); 2341 break; 2342 case 2: 2343 ival = *(u16 *)(val + (i * val_bytes)); 2344 break; 2345 case 4: 2346 ival = *(u32 *)(val + (i * val_bytes)); 2347 break; 2348 default: 2349 ret = -EINVAL; 2350 goto out; 2351 } 2352 2353 ret = _regmap_write(map, 2354 reg + regmap_get_offset(map, i), 2355 ival); 2356 if (ret != 0) 2357 goto out; 2358 } 2359 out: 2360 map->unlock(map->lock_arg); 2361 } else { 2362 void *wval; 2363 2364 wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags); 2365 if (!wval) 2366 return -ENOMEM; 2367 2368 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2369 map->format.parse_inplace(wval + i); 2370 2371 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2372 2373 kfree(wval); 2374 } 2375 2376 if (!ret) 2377 trace_regmap_bulk_write(map, reg, val, val_bytes * val_count); 2378 2379 return ret; 2380 } 2381 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2382 2383 /* 2384 * _regmap_raw_multi_reg_write() 2385 * 2386 * the (register,newvalue) pairs in regs have not been formatted, but 2387 * they are all in the same page and have been changed to being page 2388 * relative. The page register has been written if that was necessary. 2389 */ 2390 static int _regmap_raw_multi_reg_write(struct regmap *map, 2391 const struct reg_sequence *regs, 2392 size_t num_regs) 2393 { 2394 int ret; 2395 void *buf; 2396 int i; 2397 u8 *u8; 2398 size_t val_bytes = map->format.val_bytes; 2399 size_t reg_bytes = map->format.reg_bytes; 2400 size_t pad_bytes = map->format.pad_bytes; 2401 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2402 size_t len = pair_size * num_regs; 2403 2404 if (!len) 2405 return -EINVAL; 2406 2407 buf = kzalloc(len, GFP_KERNEL); 2408 if (!buf) 2409 return -ENOMEM; 2410 2411 /* We have to linearise by hand. */ 2412 2413 u8 = buf; 2414 2415 for (i = 0; i < num_regs; i++) { 2416 unsigned int reg = regs[i].reg; 2417 unsigned int val = regs[i].def; 2418 trace_regmap_hw_write_start(map, reg, 1); 2419 reg = regmap_reg_addr(map, reg); 2420 map->format.format_reg(u8, reg, map->reg_shift); 2421 u8 += reg_bytes + pad_bytes; 2422 map->format.format_val(u8, val, 0); 2423 u8 += val_bytes; 2424 } 2425 u8 = buf; 2426 *u8 |= map->write_flag_mask; 2427 2428 ret = map->write(map->bus_context, buf, len); 2429 2430 kfree(buf); 2431 2432 for (i = 0; i < num_regs; i++) { 2433 int reg = regs[i].reg; 2434 trace_regmap_hw_write_done(map, reg, 1); 2435 } 2436 return ret; 2437 } 2438 2439 static unsigned int _regmap_register_page(struct regmap *map, 2440 unsigned int reg, 2441 struct regmap_range_node *range) 2442 { 2443 unsigned int win_page = (reg - range->range_min) / range->window_len; 2444 2445 return win_page; 2446 } 2447 2448 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2449 struct reg_sequence *regs, 2450 size_t num_regs) 2451 { 2452 int ret; 2453 int i, n; 2454 struct reg_sequence *base; 2455 unsigned int this_page = 0; 2456 unsigned int page_change = 0; 2457 /* 2458 * the set of registers are not neccessarily in order, but 2459 * since the order of write must be preserved this algorithm 2460 * chops the set each time the page changes. This also applies 2461 * if there is a delay required at any point in the sequence. 2462 */ 2463 base = regs; 2464 for (i = 0, n = 0; i < num_regs; i++, n++) { 2465 unsigned int reg = regs[i].reg; 2466 struct regmap_range_node *range; 2467 2468 range = _regmap_range_lookup(map, reg); 2469 if (range) { 2470 unsigned int win_page = _regmap_register_page(map, reg, 2471 range); 2472 2473 if (i == 0) 2474 this_page = win_page; 2475 if (win_page != this_page) { 2476 this_page = win_page; 2477 page_change = 1; 2478 } 2479 } 2480 2481 /* If we have both a page change and a delay make sure to 2482 * write the regs and apply the delay before we change the 2483 * page. 2484 */ 2485 2486 if (page_change || regs[i].delay_us) { 2487 2488 /* For situations where the first write requires 2489 * a delay we need to make sure we don't call 2490 * raw_multi_reg_write with n=0 2491 * This can't occur with page breaks as we 2492 * never write on the first iteration 2493 */ 2494 if (regs[i].delay_us && i == 0) 2495 n = 1; 2496 2497 ret = _regmap_raw_multi_reg_write(map, base, n); 2498 if (ret != 0) 2499 return ret; 2500 2501 if (regs[i].delay_us) { 2502 if (map->can_sleep) 2503 fsleep(regs[i].delay_us); 2504 else 2505 udelay(regs[i].delay_us); 2506 } 2507 2508 base += n; 2509 n = 0; 2510 2511 if (page_change) { 2512 ret = _regmap_select_page(map, 2513 &base[n].reg, 2514 range, 1); 2515 if (ret != 0) 2516 return ret; 2517 2518 page_change = 0; 2519 } 2520 2521 } 2522 2523 } 2524 if (n > 0) 2525 return _regmap_raw_multi_reg_write(map, base, n); 2526 return 0; 2527 } 2528 2529 static int _regmap_multi_reg_write(struct regmap *map, 2530 const struct reg_sequence *regs, 2531 size_t num_regs) 2532 { 2533 int i; 2534 int ret; 2535 2536 if (!map->can_multi_write) { 2537 for (i = 0; i < num_regs; i++) { 2538 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2539 if (ret != 0) 2540 return ret; 2541 2542 if (regs[i].delay_us) { 2543 if (map->can_sleep) 2544 fsleep(regs[i].delay_us); 2545 else 2546 udelay(regs[i].delay_us); 2547 } 2548 } 2549 return 0; 2550 } 2551 2552 if (!map->format.parse_inplace) 2553 return -EINVAL; 2554 2555 if (map->writeable_reg) 2556 for (i = 0; i < num_regs; i++) { 2557 int reg = regs[i].reg; 2558 if (!map->writeable_reg(map->dev, reg)) 2559 return -EINVAL; 2560 if (!IS_ALIGNED(reg, map->reg_stride)) 2561 return -EINVAL; 2562 } 2563 2564 if (!map->cache_bypass) { 2565 for (i = 0; i < num_regs; i++) { 2566 unsigned int val = regs[i].def; 2567 unsigned int reg = regs[i].reg; 2568 ret = regcache_write(map, reg, val); 2569 if (ret) { 2570 dev_err(map->dev, 2571 "Error in caching of register: %x ret: %d\n", 2572 reg, ret); 2573 return ret; 2574 } 2575 } 2576 if (map->cache_only) { 2577 map->cache_dirty = true; 2578 return 0; 2579 } 2580 } 2581 2582 WARN_ON(!map->bus); 2583 2584 for (i = 0; i < num_regs; i++) { 2585 unsigned int reg = regs[i].reg; 2586 struct regmap_range_node *range; 2587 2588 /* Coalesce all the writes between a page break or a delay 2589 * in a sequence 2590 */ 2591 range = _regmap_range_lookup(map, reg); 2592 if (range || regs[i].delay_us) { 2593 size_t len = sizeof(struct reg_sequence)*num_regs; 2594 struct reg_sequence *base = kmemdup(regs, len, 2595 GFP_KERNEL); 2596 if (!base) 2597 return -ENOMEM; 2598 ret = _regmap_range_multi_paged_reg_write(map, base, 2599 num_regs); 2600 kfree(base); 2601 2602 return ret; 2603 } 2604 } 2605 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2606 } 2607 2608 /** 2609 * regmap_multi_reg_write() - Write multiple registers to the device 2610 * 2611 * @map: Register map to write to 2612 * @regs: Array of structures containing register,value to be written 2613 * @num_regs: Number of registers to write 2614 * 2615 * Write multiple registers to the device where the set of register, value 2616 * pairs are supplied in any order, possibly not all in a single range. 2617 * 2618 * The 'normal' block write mode will send ultimately send data on the 2619 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2620 * addressed. However, this alternative block multi write mode will send 2621 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2622 * must of course support the mode. 2623 * 2624 * A value of zero will be returned on success, a negative errno will be 2625 * returned in error cases. 2626 */ 2627 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2628 int num_regs) 2629 { 2630 int ret; 2631 2632 map->lock(map->lock_arg); 2633 2634 ret = _regmap_multi_reg_write(map, regs, num_regs); 2635 2636 map->unlock(map->lock_arg); 2637 2638 return ret; 2639 } 2640 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2641 2642 /** 2643 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2644 * device but not the cache 2645 * 2646 * @map: Register map to write to 2647 * @regs: Array of structures containing register,value to be written 2648 * @num_regs: Number of registers to write 2649 * 2650 * Write multiple registers to the device but not the cache where the set 2651 * of register are supplied in any order. 2652 * 2653 * This function is intended to be used for writing a large block of data 2654 * atomically to the device in single transfer for those I2C client devices 2655 * that implement this alternative block write mode. 2656 * 2657 * A value of zero will be returned on success, a negative errno will 2658 * be returned in error cases. 2659 */ 2660 int regmap_multi_reg_write_bypassed(struct regmap *map, 2661 const struct reg_sequence *regs, 2662 int num_regs) 2663 { 2664 int ret; 2665 bool bypass; 2666 2667 map->lock(map->lock_arg); 2668 2669 bypass = map->cache_bypass; 2670 map->cache_bypass = true; 2671 2672 ret = _regmap_multi_reg_write(map, regs, num_regs); 2673 2674 map->cache_bypass = bypass; 2675 2676 map->unlock(map->lock_arg); 2677 2678 return ret; 2679 } 2680 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2681 2682 /** 2683 * regmap_raw_write_async() - Write raw values to one or more registers 2684 * asynchronously 2685 * 2686 * @map: Register map to write to 2687 * @reg: Initial register to write to 2688 * @val: Block of data to be written, laid out for direct transmission to the 2689 * device. Must be valid until regmap_async_complete() is called. 2690 * @val_len: Length of data pointed to by val. 2691 * 2692 * This function is intended to be used for things like firmware 2693 * download where a large block of data needs to be transferred to the 2694 * device. No formatting will be done on the data provided. 2695 * 2696 * If supported by the underlying bus the write will be scheduled 2697 * asynchronously, helping maximise I/O speed on higher speed buses 2698 * like SPI. regmap_async_complete() can be called to ensure that all 2699 * asynchrnous writes have been completed. 2700 * 2701 * A value of zero will be returned on success, a negative errno will 2702 * be returned in error cases. 2703 */ 2704 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2705 const void *val, size_t val_len) 2706 { 2707 int ret; 2708 2709 if (val_len % map->format.val_bytes) 2710 return -EINVAL; 2711 if (!IS_ALIGNED(reg, map->reg_stride)) 2712 return -EINVAL; 2713 2714 map->lock(map->lock_arg); 2715 2716 map->async = true; 2717 2718 ret = _regmap_raw_write(map, reg, val, val_len, false); 2719 2720 map->async = false; 2721 2722 map->unlock(map->lock_arg); 2723 2724 return ret; 2725 } 2726 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2727 2728 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2729 unsigned int val_len, bool noinc) 2730 { 2731 struct regmap_range_node *range; 2732 int ret; 2733 2734 if (!map->read) 2735 return -EINVAL; 2736 2737 range = _regmap_range_lookup(map, reg); 2738 if (range) { 2739 ret = _regmap_select_page(map, ®, range, 2740 noinc ? 1 : val_len / map->format.val_bytes); 2741 if (ret != 0) 2742 return ret; 2743 } 2744 2745 reg = regmap_reg_addr(map, reg); 2746 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2747 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2748 map->read_flag_mask); 2749 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2750 2751 ret = map->read(map->bus_context, map->work_buf, 2752 map->format.reg_bytes + map->format.pad_bytes, 2753 val, val_len); 2754 2755 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2756 2757 return ret; 2758 } 2759 2760 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2761 unsigned int *val) 2762 { 2763 struct regmap *map = context; 2764 struct regmap_range_node *range; 2765 int ret; 2766 2767 range = _regmap_range_lookup(map, reg); 2768 if (range) { 2769 ret = _regmap_select_page(map, ®, range, 1); 2770 if (ret != 0) 2771 return ret; 2772 } 2773 2774 reg = regmap_reg_addr(map, reg); 2775 return map->bus->reg_read(map->bus_context, reg, val); 2776 } 2777 2778 static int _regmap_bus_read(void *context, unsigned int reg, 2779 unsigned int *val) 2780 { 2781 int ret; 2782 struct regmap *map = context; 2783 void *work_val = map->work_buf + map->format.reg_bytes + 2784 map->format.pad_bytes; 2785 2786 if (!map->format.parse_val) 2787 return -EINVAL; 2788 2789 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2790 if (ret == 0) 2791 *val = map->format.parse_val(work_val); 2792 2793 return ret; 2794 } 2795 2796 static int _regmap_read(struct regmap *map, unsigned int reg, 2797 unsigned int *val) 2798 { 2799 int ret; 2800 void *context = _regmap_map_get_context(map); 2801 2802 if (!map->cache_bypass) { 2803 ret = regcache_read(map, reg, val); 2804 if (ret == 0) 2805 return 0; 2806 } 2807 2808 if (map->cache_only) 2809 return -EBUSY; 2810 2811 if (!regmap_readable(map, reg)) 2812 return -EIO; 2813 2814 ret = map->reg_read(context, reg, val); 2815 if (ret == 0) { 2816 if (regmap_should_log(map)) 2817 dev_info(map->dev, "%x => %x\n", reg, *val); 2818 2819 trace_regmap_reg_read(map, reg, *val); 2820 2821 if (!map->cache_bypass) 2822 regcache_write(map, reg, *val); 2823 } 2824 2825 return ret; 2826 } 2827 2828 /** 2829 * regmap_read() - Read a value from a single register 2830 * 2831 * @map: Register map to read from 2832 * @reg: Register to be read from 2833 * @val: Pointer to store read value 2834 * 2835 * A value of zero will be returned on success, a negative errno will 2836 * be returned in error cases. 2837 */ 2838 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2839 { 2840 int ret; 2841 2842 if (!IS_ALIGNED(reg, map->reg_stride)) 2843 return -EINVAL; 2844 2845 map->lock(map->lock_arg); 2846 2847 ret = _regmap_read(map, reg, val); 2848 2849 map->unlock(map->lock_arg); 2850 2851 return ret; 2852 } 2853 EXPORT_SYMBOL_GPL(regmap_read); 2854 2855 /** 2856 * regmap_read_bypassed() - Read a value from a single register direct 2857 * from the device, bypassing the cache 2858 * 2859 * @map: Register map to read from 2860 * @reg: Register to be read from 2861 * @val: Pointer to store read value 2862 * 2863 * A value of zero will be returned on success, a negative errno will 2864 * be returned in error cases. 2865 */ 2866 int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val) 2867 { 2868 int ret; 2869 bool bypass, cache_only; 2870 2871 if (!IS_ALIGNED(reg, map->reg_stride)) 2872 return -EINVAL; 2873 2874 map->lock(map->lock_arg); 2875 2876 bypass = map->cache_bypass; 2877 cache_only = map->cache_only; 2878 map->cache_bypass = true; 2879 map->cache_only = false; 2880 2881 ret = _regmap_read(map, reg, val); 2882 2883 map->cache_bypass = bypass; 2884 map->cache_only = cache_only; 2885 2886 map->unlock(map->lock_arg); 2887 2888 return ret; 2889 } 2890 EXPORT_SYMBOL_GPL(regmap_read_bypassed); 2891 2892 /** 2893 * regmap_raw_read() - Read raw data from the device 2894 * 2895 * @map: Register map to read from 2896 * @reg: First register to be read from 2897 * @val: Pointer to store read value 2898 * @val_len: Size of data to read 2899 * 2900 * A value of zero will be returned on success, a negative errno will 2901 * be returned in error cases. 2902 */ 2903 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2904 size_t val_len) 2905 { 2906 size_t val_bytes = map->format.val_bytes; 2907 size_t val_count = val_len / val_bytes; 2908 unsigned int v; 2909 int ret, i; 2910 2911 if (val_len % map->format.val_bytes) 2912 return -EINVAL; 2913 if (!IS_ALIGNED(reg, map->reg_stride)) 2914 return -EINVAL; 2915 if (val_count == 0) 2916 return -EINVAL; 2917 2918 map->lock(map->lock_arg); 2919 2920 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2921 map->cache_type == REGCACHE_NONE) { 2922 size_t chunk_count, chunk_bytes; 2923 size_t chunk_regs = val_count; 2924 2925 if (!map->cache_bypass && map->cache_only) { 2926 ret = -EBUSY; 2927 goto out; 2928 } 2929 2930 if (!map->read) { 2931 ret = -ENOTSUPP; 2932 goto out; 2933 } 2934 2935 if (map->use_single_read) 2936 chunk_regs = 1; 2937 else if (map->max_raw_read && val_len > map->max_raw_read) 2938 chunk_regs = map->max_raw_read / val_bytes; 2939 2940 chunk_count = val_count / chunk_regs; 2941 chunk_bytes = chunk_regs * val_bytes; 2942 2943 /* Read bytes that fit into whole chunks */ 2944 for (i = 0; i < chunk_count; i++) { 2945 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2946 if (ret != 0) 2947 goto out; 2948 2949 reg += regmap_get_offset(map, chunk_regs); 2950 val += chunk_bytes; 2951 val_len -= chunk_bytes; 2952 } 2953 2954 /* Read remaining bytes */ 2955 if (val_len) { 2956 ret = _regmap_raw_read(map, reg, val, val_len, false); 2957 if (ret != 0) 2958 goto out; 2959 } 2960 } else { 2961 /* Otherwise go word by word for the cache; should be low 2962 * cost as we expect to hit the cache. 2963 */ 2964 for (i = 0; i < val_count; i++) { 2965 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2966 &v); 2967 if (ret != 0) 2968 goto out; 2969 2970 map->format.format_val(val + (i * val_bytes), v, 0); 2971 } 2972 } 2973 2974 out: 2975 map->unlock(map->lock_arg); 2976 2977 return ret; 2978 } 2979 EXPORT_SYMBOL_GPL(regmap_raw_read); 2980 2981 /** 2982 * regmap_noinc_read(): Read data from a register without incrementing the 2983 * register number 2984 * 2985 * @map: Register map to read from 2986 * @reg: Register to read from 2987 * @val: Pointer to data buffer 2988 * @val_len: Length of output buffer in bytes. 2989 * 2990 * The regmap API usually assumes that bulk read operations will read a 2991 * range of registers. Some devices have certain registers for which a read 2992 * operation read will read from an internal FIFO. 2993 * 2994 * The target register must be volatile but registers after it can be 2995 * completely unrelated cacheable registers. 2996 * 2997 * This will attempt multiple reads as required to read val_len bytes. 2998 * 2999 * A value of zero will be returned on success, a negative errno will be 3000 * returned in error cases. 3001 */ 3002 int regmap_noinc_read(struct regmap *map, unsigned int reg, 3003 void *val, size_t val_len) 3004 { 3005 size_t read_len; 3006 int ret; 3007 3008 if (!map->read) 3009 return -ENOTSUPP; 3010 3011 if (val_len % map->format.val_bytes) 3012 return -EINVAL; 3013 if (!IS_ALIGNED(reg, map->reg_stride)) 3014 return -EINVAL; 3015 if (val_len == 0) 3016 return -EINVAL; 3017 3018 map->lock(map->lock_arg); 3019 3020 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 3021 ret = -EINVAL; 3022 goto out_unlock; 3023 } 3024 3025 /* 3026 * We have not defined the FIFO semantics for cache, as the 3027 * cache is just one value deep. Should we return the last 3028 * written value? Just avoid this by always reading the FIFO 3029 * even when using cache. Cache only will not work. 3030 */ 3031 if (!map->cache_bypass && map->cache_only) { 3032 ret = -EBUSY; 3033 goto out_unlock; 3034 } 3035 3036 /* Use the accelerated operation if we can */ 3037 if (map->bus->reg_noinc_read) { 3038 ret = regmap_noinc_readwrite(map, reg, val, val_len, false); 3039 goto out_unlock; 3040 } 3041 3042 while (val_len) { 3043 if (map->max_raw_read && map->max_raw_read < val_len) 3044 read_len = map->max_raw_read; 3045 else 3046 read_len = val_len; 3047 ret = _regmap_raw_read(map, reg, val, read_len, true); 3048 if (ret) 3049 goto out_unlock; 3050 val = ((u8 *)val) + read_len; 3051 val_len -= read_len; 3052 } 3053 3054 out_unlock: 3055 map->unlock(map->lock_arg); 3056 return ret; 3057 } 3058 EXPORT_SYMBOL_GPL(regmap_noinc_read); 3059 3060 /** 3061 * regmap_field_read(): Read a value to a single register field 3062 * 3063 * @field: Register field to read from 3064 * @val: Pointer to store read value 3065 * 3066 * A value of zero will be returned on success, a negative errno will 3067 * be returned in error cases. 3068 */ 3069 int regmap_field_read(struct regmap_field *field, unsigned int *val) 3070 { 3071 int ret; 3072 unsigned int reg_val; 3073 ret = regmap_read(field->regmap, field->reg, ®_val); 3074 if (ret != 0) 3075 return ret; 3076 3077 reg_val &= field->mask; 3078 reg_val >>= field->shift; 3079 *val = reg_val; 3080 3081 return ret; 3082 } 3083 EXPORT_SYMBOL_GPL(regmap_field_read); 3084 3085 /** 3086 * regmap_fields_read() - Read a value to a single register field with port ID 3087 * 3088 * @field: Register field to read from 3089 * @id: port ID 3090 * @val: Pointer to store read value 3091 * 3092 * A value of zero will be returned on success, a negative errno will 3093 * be returned in error cases. 3094 */ 3095 int regmap_fields_read(struct regmap_field *field, unsigned int id, 3096 unsigned int *val) 3097 { 3098 int ret; 3099 unsigned int reg_val; 3100 3101 if (id >= field->id_size) 3102 return -EINVAL; 3103 3104 ret = regmap_read(field->regmap, 3105 field->reg + (field->id_offset * id), 3106 ®_val); 3107 if (ret != 0) 3108 return ret; 3109 3110 reg_val &= field->mask; 3111 reg_val >>= field->shift; 3112 *val = reg_val; 3113 3114 return ret; 3115 } 3116 EXPORT_SYMBOL_GPL(regmap_fields_read); 3117 3118 static int _regmap_bulk_read(struct regmap *map, unsigned int reg, 3119 unsigned int *regs, void *val, size_t val_count) 3120 { 3121 u32 *u32 = val; 3122 u16 *u16 = val; 3123 u8 *u8 = val; 3124 int ret, i; 3125 3126 map->lock(map->lock_arg); 3127 3128 for (i = 0; i < val_count; i++) { 3129 unsigned int ival; 3130 3131 if (regs) { 3132 if (!IS_ALIGNED(regs[i], map->reg_stride)) { 3133 ret = -EINVAL; 3134 goto out; 3135 } 3136 ret = _regmap_read(map, regs[i], &ival); 3137 } else { 3138 ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival); 3139 } 3140 if (ret != 0) 3141 goto out; 3142 3143 switch (map->format.val_bytes) { 3144 case 4: 3145 u32[i] = ival; 3146 break; 3147 case 2: 3148 u16[i] = ival; 3149 break; 3150 case 1: 3151 u8[i] = ival; 3152 break; 3153 default: 3154 ret = -EINVAL; 3155 goto out; 3156 } 3157 } 3158 out: 3159 map->unlock(map->lock_arg); 3160 return ret; 3161 } 3162 3163 /** 3164 * regmap_bulk_read() - Read multiple sequential registers from the device 3165 * 3166 * @map: Register map to read from 3167 * @reg: First register to be read from 3168 * @val: Pointer to store read value, in native register size for device 3169 * @val_count: Number of registers to read 3170 * 3171 * A value of zero will be returned on success, a negative errno will 3172 * be returned in error cases. 3173 */ 3174 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 3175 size_t val_count) 3176 { 3177 int ret, i; 3178 size_t val_bytes = map->format.val_bytes; 3179 bool vol = regmap_volatile_range(map, reg, val_count); 3180 3181 if (!IS_ALIGNED(reg, map->reg_stride)) 3182 return -EINVAL; 3183 if (val_count == 0) 3184 return -EINVAL; 3185 3186 if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 3187 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 3188 if (ret != 0) 3189 return ret; 3190 3191 for (i = 0; i < val_count * val_bytes; i += val_bytes) 3192 map->format.parse_inplace(val + i); 3193 } else { 3194 ret = _regmap_bulk_read(map, reg, NULL, val, val_count); 3195 } 3196 if (!ret) 3197 trace_regmap_bulk_read(map, reg, val, val_bytes * val_count); 3198 return ret; 3199 } 3200 EXPORT_SYMBOL_GPL(regmap_bulk_read); 3201 3202 /** 3203 * regmap_multi_reg_read() - Read multiple non-sequential registers from the device 3204 * 3205 * @map: Register map to read from 3206 * @regs: Array of registers to read from 3207 * @val: Pointer to store read value, in native register size for device 3208 * @val_count: Number of registers to read 3209 * 3210 * A value of zero will be returned on success, a negative errno will 3211 * be returned in error cases. 3212 */ 3213 int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val, 3214 size_t val_count) 3215 { 3216 if (val_count == 0) 3217 return -EINVAL; 3218 3219 return _regmap_bulk_read(map, 0, regs, val, val_count); 3220 } 3221 EXPORT_SYMBOL_GPL(regmap_multi_reg_read); 3222 3223 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 3224 unsigned int mask, unsigned int val, 3225 bool *change, bool force_write) 3226 { 3227 int ret; 3228 unsigned int tmp, orig; 3229 3230 if (change) 3231 *change = false; 3232 3233 if (regmap_volatile(map, reg) && map->reg_update_bits) { 3234 reg = regmap_reg_addr(map, reg); 3235 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 3236 if (ret == 0 && change) 3237 *change = true; 3238 } else { 3239 ret = _regmap_read(map, reg, &orig); 3240 if (ret != 0) 3241 return ret; 3242 3243 tmp = orig & ~mask; 3244 tmp |= val & mask; 3245 3246 if (force_write || (tmp != orig) || map->force_write_field) { 3247 ret = _regmap_write(map, reg, tmp); 3248 if (ret == 0 && change) 3249 *change = true; 3250 } 3251 } 3252 3253 return ret; 3254 } 3255 3256 /** 3257 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 3258 * 3259 * @map: Register map to update 3260 * @reg: Register to update 3261 * @mask: Bitmask to change 3262 * @val: New value for bitmask 3263 * @change: Boolean indicating if a write was done 3264 * @async: Boolean indicating asynchronously 3265 * @force: Boolean indicating use force update 3266 * 3267 * Perform a read/modify/write cycle on a register map with change, async, force 3268 * options. 3269 * 3270 * If async is true: 3271 * 3272 * With most buses the read must be done synchronously so this is most useful 3273 * for devices with a cache which do not need to interact with the hardware to 3274 * determine the current register value. 3275 * 3276 * Returns zero for success, a negative number on error. 3277 */ 3278 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 3279 unsigned int mask, unsigned int val, 3280 bool *change, bool async, bool force) 3281 { 3282 int ret; 3283 3284 map->lock(map->lock_arg); 3285 3286 map->async = async; 3287 3288 ret = _regmap_update_bits(map, reg, mask, val, change, force); 3289 3290 map->async = false; 3291 3292 map->unlock(map->lock_arg); 3293 3294 return ret; 3295 } 3296 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 3297 3298 /** 3299 * regmap_test_bits() - Check if all specified bits are set in a register. 3300 * 3301 * @map: Register map to operate on 3302 * @reg: Register to read from 3303 * @bits: Bits to test 3304 * 3305 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 3306 * bits are set and a negative error number if the underlying regmap_read() 3307 * fails. 3308 */ 3309 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 3310 { 3311 unsigned int val, ret; 3312 3313 ret = regmap_read(map, reg, &val); 3314 if (ret) 3315 return ret; 3316 3317 return (val & bits) == bits; 3318 } 3319 EXPORT_SYMBOL_GPL(regmap_test_bits); 3320 3321 void regmap_async_complete_cb(struct regmap_async *async, int ret) 3322 { 3323 struct regmap *map = async->map; 3324 bool wake; 3325 3326 trace_regmap_async_io_complete(map); 3327 3328 spin_lock(&map->async_lock); 3329 list_move(&async->list, &map->async_free); 3330 wake = list_empty(&map->async_list); 3331 3332 if (ret != 0) 3333 map->async_ret = ret; 3334 3335 spin_unlock(&map->async_lock); 3336 3337 if (wake) 3338 wake_up(&map->async_waitq); 3339 } 3340 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 3341 3342 static int regmap_async_is_done(struct regmap *map) 3343 { 3344 unsigned long flags; 3345 int ret; 3346 3347 spin_lock_irqsave(&map->async_lock, flags); 3348 ret = list_empty(&map->async_list); 3349 spin_unlock_irqrestore(&map->async_lock, flags); 3350 3351 return ret; 3352 } 3353 3354 /** 3355 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3356 * 3357 * @map: Map to operate on. 3358 * 3359 * Blocks until any pending asynchronous I/O has completed. Returns 3360 * an error code for any failed I/O operations. 3361 */ 3362 int regmap_async_complete(struct regmap *map) 3363 { 3364 unsigned long flags; 3365 int ret; 3366 3367 /* Nothing to do with no async support */ 3368 if (!map->bus || !map->bus->async_write) 3369 return 0; 3370 3371 trace_regmap_async_complete_start(map); 3372 3373 wait_event(map->async_waitq, regmap_async_is_done(map)); 3374 3375 spin_lock_irqsave(&map->async_lock, flags); 3376 ret = map->async_ret; 3377 map->async_ret = 0; 3378 spin_unlock_irqrestore(&map->async_lock, flags); 3379 3380 trace_regmap_async_complete_done(map); 3381 3382 return ret; 3383 } 3384 EXPORT_SYMBOL_GPL(regmap_async_complete); 3385 3386 /** 3387 * regmap_register_patch - Register and apply register updates to be applied 3388 * on device initialistion 3389 * 3390 * @map: Register map to apply updates to. 3391 * @regs: Values to update. 3392 * @num_regs: Number of entries in regs. 3393 * 3394 * Register a set of register updates to be applied to the device 3395 * whenever the device registers are synchronised with the cache and 3396 * apply them immediately. Typically this is used to apply 3397 * corrections to be applied to the device defaults on startup, such 3398 * as the updates some vendors provide to undocumented registers. 3399 * 3400 * The caller must ensure that this function cannot be called 3401 * concurrently with either itself or regcache_sync(). 3402 */ 3403 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3404 int num_regs) 3405 { 3406 struct reg_sequence *p; 3407 int ret; 3408 bool bypass; 3409 3410 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3411 num_regs)) 3412 return 0; 3413 3414 p = krealloc(map->patch, 3415 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3416 GFP_KERNEL); 3417 if (p) { 3418 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3419 map->patch = p; 3420 map->patch_regs += num_regs; 3421 } else { 3422 return -ENOMEM; 3423 } 3424 3425 map->lock(map->lock_arg); 3426 3427 bypass = map->cache_bypass; 3428 3429 map->cache_bypass = true; 3430 map->async = true; 3431 3432 ret = _regmap_multi_reg_write(map, regs, num_regs); 3433 3434 map->async = false; 3435 map->cache_bypass = bypass; 3436 3437 map->unlock(map->lock_arg); 3438 3439 regmap_async_complete(map); 3440 3441 return ret; 3442 } 3443 EXPORT_SYMBOL_GPL(regmap_register_patch); 3444 3445 /** 3446 * regmap_get_val_bytes() - Report the size of a register value 3447 * 3448 * @map: Register map to operate on. 3449 * 3450 * Report the size of a register value, mainly intended to for use by 3451 * generic infrastructure built on top of regmap. 3452 */ 3453 int regmap_get_val_bytes(struct regmap *map) 3454 { 3455 if (map->format.format_write) 3456 return -EINVAL; 3457 3458 return map->format.val_bytes; 3459 } 3460 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3461 3462 /** 3463 * regmap_get_max_register() - Report the max register value 3464 * 3465 * @map: Register map to operate on. 3466 * 3467 * Report the max register value, mainly intended to for use by 3468 * generic infrastructure built on top of regmap. 3469 */ 3470 int regmap_get_max_register(struct regmap *map) 3471 { 3472 return map->max_register_is_set ? map->max_register : -EINVAL; 3473 } 3474 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3475 3476 /** 3477 * regmap_get_reg_stride() - Report the register address stride 3478 * 3479 * @map: Register map to operate on. 3480 * 3481 * Report the register address stride, mainly intended to for use by 3482 * generic infrastructure built on top of regmap. 3483 */ 3484 int regmap_get_reg_stride(struct regmap *map) 3485 { 3486 return map->reg_stride; 3487 } 3488 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3489 3490 /** 3491 * regmap_might_sleep() - Returns whether a regmap access might sleep. 3492 * 3493 * @map: Register map to operate on. 3494 * 3495 * Returns true if an access to the register might sleep, else false. 3496 */ 3497 bool regmap_might_sleep(struct regmap *map) 3498 { 3499 return map->can_sleep; 3500 } 3501 EXPORT_SYMBOL_GPL(regmap_might_sleep); 3502 3503 int regmap_parse_val(struct regmap *map, const void *buf, 3504 unsigned int *val) 3505 { 3506 if (!map->format.parse_val) 3507 return -EINVAL; 3508 3509 *val = map->format.parse_val(buf); 3510 3511 return 0; 3512 } 3513 EXPORT_SYMBOL_GPL(regmap_parse_val); 3514 3515 static int __init regmap_initcall(void) 3516 { 3517 regmap_debugfs_initcall(); 3518 3519 return 0; 3520 } 3521 postcore_initcall(regmap_initcall); 3522