1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Register map access API 4 // 5 // Copyright 2011 Wolfson Microelectronics plc 6 // 7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/mutex.h> 13 #include <linux/err.h> 14 #include <linux/property.h> 15 #include <linux/rbtree.h> 16 #include <linux/sched.h> 17 #include <linux/delay.h> 18 #include <linux/log2.h> 19 #include <linux/hwspinlock.h> 20 #include <linux/unaligned.h> 21 22 #define CREATE_TRACE_POINTS 23 #include "trace.h" 24 25 #include "internal.h" 26 27 /* 28 * Sometimes for failures during very early init the trace 29 * infrastructure isn't available early enough to be used. For this 30 * sort of problem defining LOG_DEVICE will add printks for basic 31 * register I/O on a specific device. 32 */ 33 #undef LOG_DEVICE 34 35 #ifdef LOG_DEVICE 36 static inline bool regmap_should_log(struct regmap *map) 37 { 38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); 39 } 40 #else 41 static inline bool regmap_should_log(struct regmap *map) { return false; } 42 #endif 43 44 45 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 46 unsigned int mask, unsigned int val, 47 bool *change, bool force_write); 48 49 static int _regmap_bus_reg_read(void *context, unsigned int reg, 50 unsigned int *val); 51 static int _regmap_bus_read(void *context, unsigned int reg, 52 unsigned int *val); 53 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 54 unsigned int val); 55 static int _regmap_bus_reg_write(void *context, unsigned int reg, 56 unsigned int val); 57 static int _regmap_bus_raw_write(void *context, unsigned int reg, 58 unsigned int val); 59 60 bool regmap_reg_in_ranges(unsigned int reg, 61 const struct regmap_range *ranges, 62 unsigned int nranges) 63 { 64 const struct regmap_range *r; 65 int i; 66 67 for (i = 0, r = ranges; i < nranges; i++, r++) 68 if (regmap_reg_in_range(reg, r)) 69 return true; 70 return false; 71 } 72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 73 74 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 75 const struct regmap_access_table *table) 76 { 77 /* Check "no ranges" first */ 78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 79 return false; 80 81 /* In case zero "yes ranges" are supplied, any reg is OK */ 82 if (!table->n_yes_ranges) 83 return true; 84 85 return regmap_reg_in_ranges(reg, table->yes_ranges, 86 table->n_yes_ranges); 87 } 88 EXPORT_SYMBOL_GPL(regmap_check_range_table); 89 90 bool regmap_writeable(struct regmap *map, unsigned int reg) 91 { 92 if (map->max_register_is_set && reg > map->max_register) 93 return false; 94 95 if (map->writeable_reg) 96 return map->writeable_reg(map->dev, reg); 97 98 if (map->wr_table) 99 return regmap_check_range_table(map, reg, map->wr_table); 100 101 return true; 102 } 103 104 bool regmap_cached(struct regmap *map, unsigned int reg) 105 { 106 int ret; 107 unsigned int val; 108 109 if (map->cache_type == REGCACHE_NONE) 110 return false; 111 112 if (!map->cache_ops) 113 return false; 114 115 if (map->max_register_is_set && reg > map->max_register) 116 return false; 117 118 map->lock(map->lock_arg); 119 ret = regcache_read(map, reg, &val); 120 map->unlock(map->lock_arg); 121 if (ret) 122 return false; 123 124 return true; 125 } 126 127 bool regmap_readable(struct regmap *map, unsigned int reg) 128 { 129 if (!map->reg_read) 130 return false; 131 132 if (map->max_register_is_set && reg > map->max_register) 133 return false; 134 135 if (map->format.format_write) 136 return false; 137 138 if (map->readable_reg) 139 return map->readable_reg(map->dev, reg); 140 141 if (map->rd_table) 142 return regmap_check_range_table(map, reg, map->rd_table); 143 144 return true; 145 } 146 147 bool regmap_volatile(struct regmap *map, unsigned int reg) 148 { 149 if (!map->format.format_write && !regmap_readable(map, reg)) 150 return false; 151 152 if (map->volatile_reg) 153 return map->volatile_reg(map->dev, reg); 154 155 if (map->volatile_table) 156 return regmap_check_range_table(map, reg, map->volatile_table); 157 158 if (map->cache_ops) 159 return false; 160 else 161 return true; 162 } 163 164 bool regmap_precious(struct regmap *map, unsigned int reg) 165 { 166 if (!regmap_readable(map, reg)) 167 return false; 168 169 if (map->precious_reg) 170 return map->precious_reg(map->dev, reg); 171 172 if (map->precious_table) 173 return regmap_check_range_table(map, reg, map->precious_table); 174 175 return false; 176 } 177 178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) 179 { 180 if (map->writeable_noinc_reg) 181 return map->writeable_noinc_reg(map->dev, reg); 182 183 if (map->wr_noinc_table) 184 return regmap_check_range_table(map, reg, map->wr_noinc_table); 185 186 return true; 187 } 188 189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg) 190 { 191 if (map->readable_noinc_reg) 192 return map->readable_noinc_reg(map->dev, reg); 193 194 if (map->rd_noinc_table) 195 return regmap_check_range_table(map, reg, map->rd_noinc_table); 196 197 return true; 198 } 199 200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 201 size_t num) 202 { 203 unsigned int i; 204 205 for (i = 0; i < num; i++) 206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 207 return false; 208 209 return true; 210 } 211 212 static void regmap_format_12_20_write(struct regmap *map, 213 unsigned int reg, unsigned int val) 214 { 215 u8 *out = map->work_buf; 216 217 out[0] = reg >> 4; 218 out[1] = (reg << 4) | (val >> 16); 219 out[2] = val >> 8; 220 out[3] = val; 221 } 222 223 224 static void regmap_format_2_6_write(struct regmap *map, 225 unsigned int reg, unsigned int val) 226 { 227 u8 *out = map->work_buf; 228 229 *out = (reg << 6) | val; 230 } 231 232 static void regmap_format_4_12_write(struct regmap *map, 233 unsigned int reg, unsigned int val) 234 { 235 __be16 *out = map->work_buf; 236 *out = cpu_to_be16((reg << 12) | val); 237 } 238 239 static void regmap_format_7_9_write(struct regmap *map, 240 unsigned int reg, unsigned int val) 241 { 242 __be16 *out = map->work_buf; 243 *out = cpu_to_be16((reg << 9) | val); 244 } 245 246 static void regmap_format_7_17_write(struct regmap *map, 247 unsigned int reg, unsigned int val) 248 { 249 u8 *out = map->work_buf; 250 251 out[2] = val; 252 out[1] = val >> 8; 253 out[0] = (val >> 16) | (reg << 1); 254 } 255 256 static void regmap_format_10_14_write(struct regmap *map, 257 unsigned int reg, unsigned int val) 258 { 259 u8 *out = map->work_buf; 260 261 out[2] = val; 262 out[1] = (val >> 8) | (reg << 6); 263 out[0] = reg >> 2; 264 } 265 266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 267 { 268 u8 *b = buf; 269 270 b[0] = val << shift; 271 } 272 273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 274 { 275 put_unaligned_be16(val << shift, buf); 276 } 277 278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 279 { 280 put_unaligned_le16(val << shift, buf); 281 } 282 283 static void regmap_format_16_native(void *buf, unsigned int val, 284 unsigned int shift) 285 { 286 u16 v = val << shift; 287 288 memcpy(buf, &v, sizeof(v)); 289 } 290 291 static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift) 292 { 293 put_unaligned_be24(val << shift, buf); 294 } 295 296 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 297 { 298 put_unaligned_be32(val << shift, buf); 299 } 300 301 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 302 { 303 put_unaligned_le32(val << shift, buf); 304 } 305 306 static void regmap_format_32_native(void *buf, unsigned int val, 307 unsigned int shift) 308 { 309 u32 v = val << shift; 310 311 memcpy(buf, &v, sizeof(v)); 312 } 313 314 static void regmap_parse_inplace_noop(void *buf) 315 { 316 } 317 318 static unsigned int regmap_parse_8(const void *buf) 319 { 320 const u8 *b = buf; 321 322 return b[0]; 323 } 324 325 static unsigned int regmap_parse_16_be(const void *buf) 326 { 327 return get_unaligned_be16(buf); 328 } 329 330 static unsigned int regmap_parse_16_le(const void *buf) 331 { 332 return get_unaligned_le16(buf); 333 } 334 335 static void regmap_parse_16_be_inplace(void *buf) 336 { 337 u16 v = get_unaligned_be16(buf); 338 339 memcpy(buf, &v, sizeof(v)); 340 } 341 342 static void regmap_parse_16_le_inplace(void *buf) 343 { 344 u16 v = get_unaligned_le16(buf); 345 346 memcpy(buf, &v, sizeof(v)); 347 } 348 349 static unsigned int regmap_parse_16_native(const void *buf) 350 { 351 u16 v; 352 353 memcpy(&v, buf, sizeof(v)); 354 return v; 355 } 356 357 static unsigned int regmap_parse_24_be(const void *buf) 358 { 359 return get_unaligned_be24(buf); 360 } 361 362 static unsigned int regmap_parse_32_be(const void *buf) 363 { 364 return get_unaligned_be32(buf); 365 } 366 367 static unsigned int regmap_parse_32_le(const void *buf) 368 { 369 return get_unaligned_le32(buf); 370 } 371 372 static void regmap_parse_32_be_inplace(void *buf) 373 { 374 u32 v = get_unaligned_be32(buf); 375 376 memcpy(buf, &v, sizeof(v)); 377 } 378 379 static void regmap_parse_32_le_inplace(void *buf) 380 { 381 u32 v = get_unaligned_le32(buf); 382 383 memcpy(buf, &v, sizeof(v)); 384 } 385 386 static unsigned int regmap_parse_32_native(const void *buf) 387 { 388 u32 v; 389 390 memcpy(&v, buf, sizeof(v)); 391 return v; 392 } 393 394 static void regmap_lock_hwlock(void *__map) 395 { 396 struct regmap *map = __map; 397 398 hwspin_lock_timeout(map->hwlock, UINT_MAX); 399 } 400 401 static void regmap_lock_hwlock_irq(void *__map) 402 { 403 struct regmap *map = __map; 404 405 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 406 } 407 408 static void regmap_lock_hwlock_irqsave(void *__map) 409 { 410 struct regmap *map = __map; 411 412 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 413 &map->spinlock_flags); 414 } 415 416 static void regmap_unlock_hwlock(void *__map) 417 { 418 struct regmap *map = __map; 419 420 hwspin_unlock(map->hwlock); 421 } 422 423 static void regmap_unlock_hwlock_irq(void *__map) 424 { 425 struct regmap *map = __map; 426 427 hwspin_unlock_irq(map->hwlock); 428 } 429 430 static void regmap_unlock_hwlock_irqrestore(void *__map) 431 { 432 struct regmap *map = __map; 433 434 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 435 } 436 437 static void regmap_lock_unlock_none(void *__map) 438 { 439 440 } 441 442 static void regmap_lock_mutex(void *__map) 443 { 444 struct regmap *map = __map; 445 mutex_lock(&map->mutex); 446 } 447 448 static void regmap_unlock_mutex(void *__map) 449 { 450 struct regmap *map = __map; 451 mutex_unlock(&map->mutex); 452 } 453 454 static void regmap_lock_spinlock(void *__map) 455 __acquires(&map->spinlock) 456 { 457 struct regmap *map = __map; 458 unsigned long flags; 459 460 spin_lock_irqsave(&map->spinlock, flags); 461 map->spinlock_flags = flags; 462 } 463 464 static void regmap_unlock_spinlock(void *__map) 465 __releases(&map->spinlock) 466 { 467 struct regmap *map = __map; 468 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 469 } 470 471 static void regmap_lock_raw_spinlock(void *__map) 472 __acquires(&map->raw_spinlock) 473 { 474 struct regmap *map = __map; 475 unsigned long flags; 476 477 raw_spin_lock_irqsave(&map->raw_spinlock, flags); 478 map->raw_spinlock_flags = flags; 479 } 480 481 static void regmap_unlock_raw_spinlock(void *__map) 482 __releases(&map->raw_spinlock) 483 { 484 struct regmap *map = __map; 485 raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags); 486 } 487 488 static void dev_get_regmap_release(struct device *dev, void *res) 489 { 490 /* 491 * We don't actually have anything to do here; the goal here 492 * is not to manage the regmap but to provide a simple way to 493 * get the regmap back given a struct device. 494 */ 495 } 496 497 static bool _regmap_range_add(struct regmap *map, 498 struct regmap_range_node *data) 499 { 500 struct rb_root *root = &map->range_tree; 501 struct rb_node **new = &(root->rb_node), *parent = NULL; 502 503 while (*new) { 504 struct regmap_range_node *this = 505 rb_entry(*new, struct regmap_range_node, node); 506 507 parent = *new; 508 if (data->range_max < this->range_min) 509 new = &((*new)->rb_left); 510 else if (data->range_min > this->range_max) 511 new = &((*new)->rb_right); 512 else 513 return false; 514 } 515 516 rb_link_node(&data->node, parent, new); 517 rb_insert_color(&data->node, root); 518 519 return true; 520 } 521 522 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 523 unsigned int reg) 524 { 525 struct rb_node *node = map->range_tree.rb_node; 526 527 while (node) { 528 struct regmap_range_node *this = 529 rb_entry(node, struct regmap_range_node, node); 530 531 if (reg < this->range_min) 532 node = node->rb_left; 533 else if (reg > this->range_max) 534 node = node->rb_right; 535 else 536 return this; 537 } 538 539 return NULL; 540 } 541 542 static void regmap_range_exit(struct regmap *map) 543 { 544 struct rb_node *next; 545 struct regmap_range_node *range_node; 546 547 next = rb_first(&map->range_tree); 548 while (next) { 549 range_node = rb_entry(next, struct regmap_range_node, node); 550 next = rb_next(&range_node->node); 551 rb_erase(&range_node->node, &map->range_tree); 552 kfree(range_node); 553 } 554 555 kfree(map->selector_work_buf); 556 } 557 558 static int regmap_set_name(struct regmap *map, const struct regmap_config *config) 559 { 560 if (config->name) { 561 const char *name = kstrdup_const(config->name, GFP_KERNEL); 562 563 if (!name) 564 return -ENOMEM; 565 566 kfree_const(map->name); 567 map->name = name; 568 } 569 570 return 0; 571 } 572 573 int regmap_attach_dev(struct device *dev, struct regmap *map, 574 const struct regmap_config *config) 575 { 576 struct regmap **m; 577 int ret; 578 579 map->dev = dev; 580 581 ret = regmap_set_name(map, config); 582 if (ret) 583 return ret; 584 585 regmap_debugfs_exit(map); 586 regmap_debugfs_init(map); 587 588 /* Add a devres resource for dev_get_regmap() */ 589 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 590 if (!m) { 591 regmap_debugfs_exit(map); 592 return -ENOMEM; 593 } 594 *m = map; 595 devres_add(dev, m); 596 597 return 0; 598 } 599 EXPORT_SYMBOL_GPL(regmap_attach_dev); 600 601 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 602 const struct regmap_config *config) 603 { 604 enum regmap_endian endian; 605 606 /* Retrieve the endianness specification from the regmap config */ 607 endian = config->reg_format_endian; 608 609 /* If the regmap config specified a non-default value, use that */ 610 if (endian != REGMAP_ENDIAN_DEFAULT) 611 return endian; 612 613 /* Retrieve the endianness specification from the bus config */ 614 if (bus && bus->reg_format_endian_default) 615 endian = bus->reg_format_endian_default; 616 617 /* If the bus specified a non-default value, use that */ 618 if (endian != REGMAP_ENDIAN_DEFAULT) 619 return endian; 620 621 /* Use this if no other value was found */ 622 return REGMAP_ENDIAN_BIG; 623 } 624 625 enum regmap_endian regmap_get_val_endian(struct device *dev, 626 const struct regmap_bus *bus, 627 const struct regmap_config *config) 628 { 629 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; 630 enum regmap_endian endian; 631 632 /* Retrieve the endianness specification from the regmap config */ 633 endian = config->val_format_endian; 634 635 /* If the regmap config specified a non-default value, use that */ 636 if (endian != REGMAP_ENDIAN_DEFAULT) 637 return endian; 638 639 /* If the firmware node exist try to get endianness from it */ 640 if (fwnode_property_read_bool(fwnode, "big-endian")) 641 endian = REGMAP_ENDIAN_BIG; 642 else if (fwnode_property_read_bool(fwnode, "little-endian")) 643 endian = REGMAP_ENDIAN_LITTLE; 644 else if (fwnode_property_read_bool(fwnode, "native-endian")) 645 endian = REGMAP_ENDIAN_NATIVE; 646 647 /* If the endianness was specified in fwnode, use that */ 648 if (endian != REGMAP_ENDIAN_DEFAULT) 649 return endian; 650 651 /* Retrieve the endianness specification from the bus config */ 652 if (bus && bus->val_format_endian_default) 653 endian = bus->val_format_endian_default; 654 655 /* If the bus specified a non-default value, use that */ 656 if (endian != REGMAP_ENDIAN_DEFAULT) 657 return endian; 658 659 /* Use this if no other value was found */ 660 return REGMAP_ENDIAN_BIG; 661 } 662 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 663 664 struct regmap *__regmap_init(struct device *dev, 665 const struct regmap_bus *bus, 666 void *bus_context, 667 const struct regmap_config *config, 668 struct lock_class_key *lock_key, 669 const char *lock_name) 670 { 671 struct regmap *map; 672 int ret = -EINVAL; 673 enum regmap_endian reg_endian, val_endian; 674 int i, j; 675 676 if (!config) 677 goto err; 678 679 map = kzalloc(sizeof(*map), GFP_KERNEL); 680 if (map == NULL) { 681 ret = -ENOMEM; 682 goto err; 683 } 684 685 ret = regmap_set_name(map, config); 686 if (ret) 687 goto err_map; 688 689 ret = -EINVAL; /* Later error paths rely on this */ 690 691 if (config->disable_locking) { 692 map->lock = map->unlock = regmap_lock_unlock_none; 693 map->can_sleep = config->can_sleep; 694 regmap_debugfs_disable(map); 695 } else if (config->lock && config->unlock) { 696 map->lock = config->lock; 697 map->unlock = config->unlock; 698 map->lock_arg = config->lock_arg; 699 map->can_sleep = config->can_sleep; 700 } else if (config->use_hwlock) { 701 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 702 if (!map->hwlock) { 703 ret = -ENXIO; 704 goto err_name; 705 } 706 707 switch (config->hwlock_mode) { 708 case HWLOCK_IRQSTATE: 709 map->lock = regmap_lock_hwlock_irqsave; 710 map->unlock = regmap_unlock_hwlock_irqrestore; 711 break; 712 case HWLOCK_IRQ: 713 map->lock = regmap_lock_hwlock_irq; 714 map->unlock = regmap_unlock_hwlock_irq; 715 break; 716 default: 717 map->lock = regmap_lock_hwlock; 718 map->unlock = regmap_unlock_hwlock; 719 break; 720 } 721 722 map->lock_arg = map; 723 } else { 724 if ((bus && bus->fast_io) || 725 config->fast_io) { 726 if (config->use_raw_spinlock) { 727 raw_spin_lock_init(&map->raw_spinlock); 728 map->lock = regmap_lock_raw_spinlock; 729 map->unlock = regmap_unlock_raw_spinlock; 730 lockdep_set_class_and_name(&map->raw_spinlock, 731 lock_key, lock_name); 732 } else { 733 spin_lock_init(&map->spinlock); 734 map->lock = regmap_lock_spinlock; 735 map->unlock = regmap_unlock_spinlock; 736 lockdep_set_class_and_name(&map->spinlock, 737 lock_key, lock_name); 738 } 739 } else { 740 mutex_init(&map->mutex); 741 map->lock = regmap_lock_mutex; 742 map->unlock = regmap_unlock_mutex; 743 map->can_sleep = true; 744 lockdep_set_class_and_name(&map->mutex, 745 lock_key, lock_name); 746 } 747 map->lock_arg = map; 748 map->lock_key = lock_key; 749 } 750 751 /* 752 * When we write in fast-paths with regmap_bulk_write() don't allocate 753 * scratch buffers with sleeping allocations. 754 */ 755 if ((bus && bus->fast_io) || config->fast_io) 756 map->alloc_flags = GFP_ATOMIC; 757 else 758 map->alloc_flags = GFP_KERNEL; 759 760 map->reg_base = config->reg_base; 761 762 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 763 map->format.pad_bytes = config->pad_bits / 8; 764 map->format.reg_shift = config->reg_shift; 765 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 766 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 767 config->val_bits + config->pad_bits, 8); 768 map->reg_shift = config->pad_bits % 8; 769 if (config->reg_stride) 770 map->reg_stride = config->reg_stride; 771 else 772 map->reg_stride = 1; 773 if (is_power_of_2(map->reg_stride)) 774 map->reg_stride_order = ilog2(map->reg_stride); 775 else 776 map->reg_stride_order = -1; 777 map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read)); 778 map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write)); 779 map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write)); 780 if (bus) { 781 map->max_raw_read = bus->max_raw_read; 782 map->max_raw_write = bus->max_raw_write; 783 } else if (config->max_raw_read && config->max_raw_write) { 784 map->max_raw_read = config->max_raw_read; 785 map->max_raw_write = config->max_raw_write; 786 } 787 map->dev = dev; 788 map->bus = bus; 789 map->bus_context = bus_context; 790 map->max_register = config->max_register; 791 map->max_register_is_set = map->max_register ?: config->max_register_is_0; 792 map->wr_table = config->wr_table; 793 map->rd_table = config->rd_table; 794 map->volatile_table = config->volatile_table; 795 map->precious_table = config->precious_table; 796 map->wr_noinc_table = config->wr_noinc_table; 797 map->rd_noinc_table = config->rd_noinc_table; 798 map->writeable_reg = config->writeable_reg; 799 map->readable_reg = config->readable_reg; 800 map->volatile_reg = config->volatile_reg; 801 map->precious_reg = config->precious_reg; 802 map->writeable_noinc_reg = config->writeable_noinc_reg; 803 map->readable_noinc_reg = config->readable_noinc_reg; 804 map->cache_type = config->cache_type; 805 806 spin_lock_init(&map->async_lock); 807 INIT_LIST_HEAD(&map->async_list); 808 INIT_LIST_HEAD(&map->async_free); 809 init_waitqueue_head(&map->async_waitq); 810 811 if (config->read_flag_mask || 812 config->write_flag_mask || 813 config->zero_flag_mask) { 814 map->read_flag_mask = config->read_flag_mask; 815 map->write_flag_mask = config->write_flag_mask; 816 } else if (bus) { 817 map->read_flag_mask = bus->read_flag_mask; 818 } 819 820 if (config && config->read && config->write) { 821 map->reg_read = _regmap_bus_read; 822 if (config->reg_update_bits) 823 map->reg_update_bits = config->reg_update_bits; 824 825 /* Bulk read/write */ 826 map->read = config->read; 827 map->write = config->write; 828 829 reg_endian = REGMAP_ENDIAN_NATIVE; 830 val_endian = REGMAP_ENDIAN_NATIVE; 831 } else if (!bus) { 832 map->reg_read = config->reg_read; 833 map->reg_write = config->reg_write; 834 map->reg_update_bits = config->reg_update_bits; 835 836 map->defer_caching = false; 837 goto skip_format_initialization; 838 } else if (!bus->read || !bus->write) { 839 map->reg_read = _regmap_bus_reg_read; 840 map->reg_write = _regmap_bus_reg_write; 841 map->reg_update_bits = bus->reg_update_bits; 842 843 map->defer_caching = false; 844 goto skip_format_initialization; 845 } else { 846 map->reg_read = _regmap_bus_read; 847 map->reg_update_bits = bus->reg_update_bits; 848 /* Bulk read/write */ 849 map->read = bus->read; 850 map->write = bus->write; 851 852 reg_endian = regmap_get_reg_endian(bus, config); 853 val_endian = regmap_get_val_endian(dev, bus, config); 854 } 855 856 switch (config->reg_bits + map->reg_shift) { 857 case 2: 858 switch (config->val_bits) { 859 case 6: 860 map->format.format_write = regmap_format_2_6_write; 861 break; 862 default: 863 goto err_hwlock; 864 } 865 break; 866 867 case 4: 868 switch (config->val_bits) { 869 case 12: 870 map->format.format_write = regmap_format_4_12_write; 871 break; 872 default: 873 goto err_hwlock; 874 } 875 break; 876 877 case 7: 878 switch (config->val_bits) { 879 case 9: 880 map->format.format_write = regmap_format_7_9_write; 881 break; 882 case 17: 883 map->format.format_write = regmap_format_7_17_write; 884 break; 885 default: 886 goto err_hwlock; 887 } 888 break; 889 890 case 10: 891 switch (config->val_bits) { 892 case 14: 893 map->format.format_write = regmap_format_10_14_write; 894 break; 895 default: 896 goto err_hwlock; 897 } 898 break; 899 900 case 12: 901 switch (config->val_bits) { 902 case 20: 903 map->format.format_write = regmap_format_12_20_write; 904 break; 905 default: 906 goto err_hwlock; 907 } 908 break; 909 910 case 8: 911 map->format.format_reg = regmap_format_8; 912 break; 913 914 case 16: 915 switch (reg_endian) { 916 case REGMAP_ENDIAN_BIG: 917 map->format.format_reg = regmap_format_16_be; 918 break; 919 case REGMAP_ENDIAN_LITTLE: 920 map->format.format_reg = regmap_format_16_le; 921 break; 922 case REGMAP_ENDIAN_NATIVE: 923 map->format.format_reg = regmap_format_16_native; 924 break; 925 default: 926 goto err_hwlock; 927 } 928 break; 929 930 case 24: 931 switch (reg_endian) { 932 case REGMAP_ENDIAN_BIG: 933 map->format.format_reg = regmap_format_24_be; 934 break; 935 default: 936 goto err_hwlock; 937 } 938 break; 939 940 case 32: 941 switch (reg_endian) { 942 case REGMAP_ENDIAN_BIG: 943 map->format.format_reg = regmap_format_32_be; 944 break; 945 case REGMAP_ENDIAN_LITTLE: 946 map->format.format_reg = regmap_format_32_le; 947 break; 948 case REGMAP_ENDIAN_NATIVE: 949 map->format.format_reg = regmap_format_32_native; 950 break; 951 default: 952 goto err_hwlock; 953 } 954 break; 955 956 default: 957 goto err_hwlock; 958 } 959 960 if (val_endian == REGMAP_ENDIAN_NATIVE) 961 map->format.parse_inplace = regmap_parse_inplace_noop; 962 963 switch (config->val_bits) { 964 case 8: 965 map->format.format_val = regmap_format_8; 966 map->format.parse_val = regmap_parse_8; 967 map->format.parse_inplace = regmap_parse_inplace_noop; 968 break; 969 case 16: 970 switch (val_endian) { 971 case REGMAP_ENDIAN_BIG: 972 map->format.format_val = regmap_format_16_be; 973 map->format.parse_val = regmap_parse_16_be; 974 map->format.parse_inplace = regmap_parse_16_be_inplace; 975 break; 976 case REGMAP_ENDIAN_LITTLE: 977 map->format.format_val = regmap_format_16_le; 978 map->format.parse_val = regmap_parse_16_le; 979 map->format.parse_inplace = regmap_parse_16_le_inplace; 980 break; 981 case REGMAP_ENDIAN_NATIVE: 982 map->format.format_val = regmap_format_16_native; 983 map->format.parse_val = regmap_parse_16_native; 984 break; 985 default: 986 goto err_hwlock; 987 } 988 break; 989 case 24: 990 switch (val_endian) { 991 case REGMAP_ENDIAN_BIG: 992 map->format.format_val = regmap_format_24_be; 993 map->format.parse_val = regmap_parse_24_be; 994 break; 995 default: 996 goto err_hwlock; 997 } 998 break; 999 case 32: 1000 switch (val_endian) { 1001 case REGMAP_ENDIAN_BIG: 1002 map->format.format_val = regmap_format_32_be; 1003 map->format.parse_val = regmap_parse_32_be; 1004 map->format.parse_inplace = regmap_parse_32_be_inplace; 1005 break; 1006 case REGMAP_ENDIAN_LITTLE: 1007 map->format.format_val = regmap_format_32_le; 1008 map->format.parse_val = regmap_parse_32_le; 1009 map->format.parse_inplace = regmap_parse_32_le_inplace; 1010 break; 1011 case REGMAP_ENDIAN_NATIVE: 1012 map->format.format_val = regmap_format_32_native; 1013 map->format.parse_val = regmap_parse_32_native; 1014 break; 1015 default: 1016 goto err_hwlock; 1017 } 1018 break; 1019 } 1020 1021 if (map->format.format_write) { 1022 if ((reg_endian != REGMAP_ENDIAN_BIG) || 1023 (val_endian != REGMAP_ENDIAN_BIG)) 1024 goto err_hwlock; 1025 map->use_single_write = true; 1026 } 1027 1028 if (!map->format.format_write && 1029 !(map->format.format_reg && map->format.format_val)) 1030 goto err_hwlock; 1031 1032 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1033 if (map->work_buf == NULL) { 1034 ret = -ENOMEM; 1035 goto err_hwlock; 1036 } 1037 1038 if (map->format.format_write) { 1039 map->defer_caching = false; 1040 map->reg_write = _regmap_bus_formatted_write; 1041 } else if (map->format.format_val) { 1042 map->defer_caching = true; 1043 map->reg_write = _regmap_bus_raw_write; 1044 } 1045 1046 skip_format_initialization: 1047 1048 map->range_tree = RB_ROOT; 1049 for (i = 0; i < config->num_ranges; i++) { 1050 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1051 struct regmap_range_node *new; 1052 1053 /* Sanity check */ 1054 if (range_cfg->range_max < range_cfg->range_min) { 1055 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1056 range_cfg->range_max, range_cfg->range_min); 1057 goto err_range; 1058 } 1059 1060 if (range_cfg->range_max > map->max_register) { 1061 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1062 range_cfg->range_max, map->max_register); 1063 goto err_range; 1064 } 1065 1066 if (range_cfg->selector_reg > map->max_register) { 1067 dev_err(map->dev, 1068 "Invalid range %d: selector out of map\n", i); 1069 goto err_range; 1070 } 1071 1072 if (range_cfg->window_len == 0) { 1073 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1074 i); 1075 goto err_range; 1076 } 1077 1078 /* Make sure, that this register range has no selector 1079 or data window within its boundary */ 1080 for (j = 0; j < config->num_ranges; j++) { 1081 unsigned int sel_reg = config->ranges[j].selector_reg; 1082 unsigned int win_min = config->ranges[j].window_start; 1083 unsigned int win_max = win_min + 1084 config->ranges[j].window_len - 1; 1085 1086 /* Allow data window inside its own virtual range */ 1087 if (j == i) 1088 continue; 1089 1090 if (range_cfg->range_min <= sel_reg && 1091 sel_reg <= range_cfg->range_max) { 1092 dev_err(map->dev, 1093 "Range %d: selector for %d in window\n", 1094 i, j); 1095 goto err_range; 1096 } 1097 1098 if (!(win_max < range_cfg->range_min || 1099 win_min > range_cfg->range_max)) { 1100 dev_err(map->dev, 1101 "Range %d: window for %d in window\n", 1102 i, j); 1103 goto err_range; 1104 } 1105 } 1106 1107 new = kzalloc(sizeof(*new), GFP_KERNEL); 1108 if (new == NULL) { 1109 ret = -ENOMEM; 1110 goto err_range; 1111 } 1112 1113 new->map = map; 1114 new->name = range_cfg->name; 1115 new->range_min = range_cfg->range_min; 1116 new->range_max = range_cfg->range_max; 1117 new->selector_reg = range_cfg->selector_reg; 1118 new->selector_mask = range_cfg->selector_mask; 1119 new->selector_shift = range_cfg->selector_shift; 1120 new->window_start = range_cfg->window_start; 1121 new->window_len = range_cfg->window_len; 1122 1123 if (!_regmap_range_add(map, new)) { 1124 dev_err(map->dev, "Failed to add range %d\n", i); 1125 kfree(new); 1126 goto err_range; 1127 } 1128 1129 if (map->selector_work_buf == NULL) { 1130 map->selector_work_buf = 1131 kzalloc(map->format.buf_size, GFP_KERNEL); 1132 if (map->selector_work_buf == NULL) { 1133 ret = -ENOMEM; 1134 goto err_range; 1135 } 1136 } 1137 } 1138 1139 ret = regcache_init(map, config); 1140 if (ret != 0) 1141 goto err_range; 1142 1143 if (dev) { 1144 ret = regmap_attach_dev(dev, map, config); 1145 if (ret != 0) 1146 goto err_regcache; 1147 } else { 1148 regmap_debugfs_init(map); 1149 } 1150 1151 return map; 1152 1153 err_regcache: 1154 regcache_exit(map); 1155 err_range: 1156 regmap_range_exit(map); 1157 kfree(map->work_buf); 1158 err_hwlock: 1159 if (map->hwlock) 1160 hwspin_lock_free(map->hwlock); 1161 err_name: 1162 kfree_const(map->name); 1163 err_map: 1164 kfree(map); 1165 err: 1166 return ERR_PTR(ret); 1167 } 1168 EXPORT_SYMBOL_GPL(__regmap_init); 1169 1170 static void devm_regmap_release(struct device *dev, void *res) 1171 { 1172 regmap_exit(*(struct regmap **)res); 1173 } 1174 1175 struct regmap *__devm_regmap_init(struct device *dev, 1176 const struct regmap_bus *bus, 1177 void *bus_context, 1178 const struct regmap_config *config, 1179 struct lock_class_key *lock_key, 1180 const char *lock_name) 1181 { 1182 struct regmap **ptr, *regmap; 1183 1184 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1185 if (!ptr) 1186 return ERR_PTR(-ENOMEM); 1187 1188 regmap = __regmap_init(dev, bus, bus_context, config, 1189 lock_key, lock_name); 1190 if (!IS_ERR(regmap)) { 1191 *ptr = regmap; 1192 devres_add(dev, ptr); 1193 } else { 1194 devres_free(ptr); 1195 } 1196 1197 return regmap; 1198 } 1199 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1200 1201 static void regmap_field_init(struct regmap_field *rm_field, 1202 struct regmap *regmap, struct reg_field reg_field) 1203 { 1204 rm_field->regmap = regmap; 1205 rm_field->reg = reg_field.reg; 1206 rm_field->shift = reg_field.lsb; 1207 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1208 1209 WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n"); 1210 1211 rm_field->id_size = reg_field.id_size; 1212 rm_field->id_offset = reg_field.id_offset; 1213 } 1214 1215 /** 1216 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1217 * 1218 * @dev: Device that will be interacted with 1219 * @regmap: regmap bank in which this register field is located. 1220 * @reg_field: Register field with in the bank. 1221 * 1222 * The return value will be an ERR_PTR() on error or a valid pointer 1223 * to a struct regmap_field. The regmap_field will be automatically freed 1224 * by the device management code. 1225 */ 1226 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1227 struct regmap *regmap, struct reg_field reg_field) 1228 { 1229 struct regmap_field *rm_field = devm_kzalloc(dev, 1230 sizeof(*rm_field), GFP_KERNEL); 1231 if (!rm_field) 1232 return ERR_PTR(-ENOMEM); 1233 1234 regmap_field_init(rm_field, regmap, reg_field); 1235 1236 return rm_field; 1237 1238 } 1239 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1240 1241 1242 /** 1243 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. 1244 * 1245 * @regmap: regmap bank in which this register field is located. 1246 * @rm_field: regmap register fields within the bank. 1247 * @reg_field: Register fields within the bank. 1248 * @num_fields: Number of register fields. 1249 * 1250 * The return value will be an -ENOMEM on error or zero for success. 1251 * Newly allocated regmap_fields should be freed by calling 1252 * regmap_field_bulk_free() 1253 */ 1254 int regmap_field_bulk_alloc(struct regmap *regmap, 1255 struct regmap_field **rm_field, 1256 const struct reg_field *reg_field, 1257 int num_fields) 1258 { 1259 struct regmap_field *rf; 1260 int i; 1261 1262 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL); 1263 if (!rf) 1264 return -ENOMEM; 1265 1266 for (i = 0; i < num_fields; i++) { 1267 regmap_field_init(&rf[i], regmap, reg_field[i]); 1268 rm_field[i] = &rf[i]; 1269 } 1270 1271 return 0; 1272 } 1273 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); 1274 1275 /** 1276 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register 1277 * fields. 1278 * 1279 * @dev: Device that will be interacted with 1280 * @regmap: regmap bank in which this register field is located. 1281 * @rm_field: regmap register fields within the bank. 1282 * @reg_field: Register fields within the bank. 1283 * @num_fields: Number of register fields. 1284 * 1285 * The return value will be an -ENOMEM on error or zero for success. 1286 * Newly allocated regmap_fields will be automatically freed by the 1287 * device management code. 1288 */ 1289 int devm_regmap_field_bulk_alloc(struct device *dev, 1290 struct regmap *regmap, 1291 struct regmap_field **rm_field, 1292 const struct reg_field *reg_field, 1293 int num_fields) 1294 { 1295 struct regmap_field *rf; 1296 int i; 1297 1298 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); 1299 if (!rf) 1300 return -ENOMEM; 1301 1302 for (i = 0; i < num_fields; i++) { 1303 regmap_field_init(&rf[i], regmap, reg_field[i]); 1304 rm_field[i] = &rf[i]; 1305 } 1306 1307 return 0; 1308 } 1309 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); 1310 1311 /** 1312 * regmap_field_bulk_free() - Free register field allocated using 1313 * regmap_field_bulk_alloc. 1314 * 1315 * @field: regmap fields which should be freed. 1316 */ 1317 void regmap_field_bulk_free(struct regmap_field *field) 1318 { 1319 kfree(field); 1320 } 1321 EXPORT_SYMBOL_GPL(regmap_field_bulk_free); 1322 1323 /** 1324 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using 1325 * devm_regmap_field_bulk_alloc. 1326 * 1327 * @dev: Device that will be interacted with 1328 * @field: regmap field which should be freed. 1329 * 1330 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually 1331 * drivers need not call this function, as the memory allocated via devm 1332 * will be freed as per device-driver life-cycle. 1333 */ 1334 void devm_regmap_field_bulk_free(struct device *dev, 1335 struct regmap_field *field) 1336 { 1337 devm_kfree(dev, field); 1338 } 1339 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); 1340 1341 /** 1342 * devm_regmap_field_free() - Free a register field allocated using 1343 * devm_regmap_field_alloc. 1344 * 1345 * @dev: Device that will be interacted with 1346 * @field: regmap field which should be freed. 1347 * 1348 * Free register field allocated using devm_regmap_field_alloc(). Usually 1349 * drivers need not call this function, as the memory allocated via devm 1350 * will be freed as per device-driver life-cyle. 1351 */ 1352 void devm_regmap_field_free(struct device *dev, 1353 struct regmap_field *field) 1354 { 1355 devm_kfree(dev, field); 1356 } 1357 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1358 1359 /** 1360 * regmap_field_alloc() - Allocate and initialise a register field. 1361 * 1362 * @regmap: regmap bank in which this register field is located. 1363 * @reg_field: Register field with in the bank. 1364 * 1365 * The return value will be an ERR_PTR() on error or a valid pointer 1366 * to a struct regmap_field. The regmap_field should be freed by the 1367 * user once its finished working with it using regmap_field_free(). 1368 */ 1369 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1370 struct reg_field reg_field) 1371 { 1372 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1373 1374 if (!rm_field) 1375 return ERR_PTR(-ENOMEM); 1376 1377 regmap_field_init(rm_field, regmap, reg_field); 1378 1379 return rm_field; 1380 } 1381 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1382 1383 /** 1384 * regmap_field_free() - Free register field allocated using 1385 * regmap_field_alloc. 1386 * 1387 * @field: regmap field which should be freed. 1388 */ 1389 void regmap_field_free(struct regmap_field *field) 1390 { 1391 kfree(field); 1392 } 1393 EXPORT_SYMBOL_GPL(regmap_field_free); 1394 1395 /** 1396 * regmap_reinit_cache() - Reinitialise the current register cache 1397 * 1398 * @map: Register map to operate on. 1399 * @config: New configuration. Only the cache data will be used. 1400 * 1401 * Discard any existing register cache for the map and initialize a 1402 * new cache. This can be used to restore the cache to defaults or to 1403 * update the cache configuration to reflect runtime discovery of the 1404 * hardware. 1405 * 1406 * No explicit locking is done here, the user needs to ensure that 1407 * this function will not race with other calls to regmap. 1408 */ 1409 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1410 { 1411 int ret; 1412 1413 regcache_exit(map); 1414 regmap_debugfs_exit(map); 1415 1416 map->max_register = config->max_register; 1417 map->max_register_is_set = map->max_register ?: config->max_register_is_0; 1418 map->writeable_reg = config->writeable_reg; 1419 map->readable_reg = config->readable_reg; 1420 map->volatile_reg = config->volatile_reg; 1421 map->precious_reg = config->precious_reg; 1422 map->writeable_noinc_reg = config->writeable_noinc_reg; 1423 map->readable_noinc_reg = config->readable_noinc_reg; 1424 map->cache_type = config->cache_type; 1425 1426 ret = regmap_set_name(map, config); 1427 if (ret) 1428 return ret; 1429 1430 regmap_debugfs_init(map); 1431 1432 map->cache_bypass = false; 1433 map->cache_only = false; 1434 1435 return regcache_init(map, config); 1436 } 1437 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1438 1439 /** 1440 * regmap_exit() - Free a previously allocated register map 1441 * 1442 * @map: Register map to operate on. 1443 */ 1444 void regmap_exit(struct regmap *map) 1445 { 1446 struct regmap_async *async; 1447 1448 regcache_exit(map); 1449 1450 regmap_debugfs_exit(map); 1451 regmap_range_exit(map); 1452 if (map->bus && map->bus->free_context) 1453 map->bus->free_context(map->bus_context); 1454 kfree(map->work_buf); 1455 while (!list_empty(&map->async_free)) { 1456 async = list_first_entry_or_null(&map->async_free, 1457 struct regmap_async, 1458 list); 1459 list_del(&async->list); 1460 kfree(async->work_buf); 1461 kfree(async); 1462 } 1463 if (map->hwlock) 1464 hwspin_lock_free(map->hwlock); 1465 if (map->lock == regmap_lock_mutex) 1466 mutex_destroy(&map->mutex); 1467 kfree_const(map->name); 1468 kfree(map->patch); 1469 if (map->bus && map->bus->free_on_exit) 1470 kfree(map->bus); 1471 kfree(map); 1472 } 1473 EXPORT_SYMBOL_GPL(regmap_exit); 1474 1475 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1476 { 1477 struct regmap **r = res; 1478 if (!r || !*r) { 1479 WARN_ON(!r || !*r); 1480 return 0; 1481 } 1482 1483 /* If the user didn't specify a name match any */ 1484 if (data) 1485 return (*r)->name && !strcmp((*r)->name, data); 1486 else 1487 return 1; 1488 } 1489 1490 /** 1491 * dev_get_regmap() - Obtain the regmap (if any) for a device 1492 * 1493 * @dev: Device to retrieve the map for 1494 * @name: Optional name for the register map, usually NULL. 1495 * 1496 * Returns the regmap for the device if one is present, or NULL. If 1497 * name is specified then it must match the name specified when 1498 * registering the device, if it is NULL then the first regmap found 1499 * will be used. Devices with multiple register maps are very rare, 1500 * generic code should normally not need to specify a name. 1501 */ 1502 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1503 { 1504 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1505 dev_get_regmap_match, (void *)name); 1506 1507 if (!r) 1508 return NULL; 1509 return *r; 1510 } 1511 EXPORT_SYMBOL_GPL(dev_get_regmap); 1512 1513 /** 1514 * regmap_get_device() - Obtain the device from a regmap 1515 * 1516 * @map: Register map to operate on. 1517 * 1518 * Returns the underlying device that the regmap has been created for. 1519 */ 1520 struct device *regmap_get_device(struct regmap *map) 1521 { 1522 return map->dev; 1523 } 1524 EXPORT_SYMBOL_GPL(regmap_get_device); 1525 1526 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1527 struct regmap_range_node *range, 1528 unsigned int val_num) 1529 { 1530 void *orig_work_buf; 1531 unsigned int win_offset; 1532 unsigned int win_page; 1533 bool page_chg; 1534 int ret; 1535 1536 win_offset = (*reg - range->range_min) % range->window_len; 1537 win_page = (*reg - range->range_min) / range->window_len; 1538 1539 if (val_num > 1) { 1540 /* Bulk write shouldn't cross range boundary */ 1541 if (*reg + val_num - 1 > range->range_max) 1542 return -EINVAL; 1543 1544 /* ... or single page boundary */ 1545 if (val_num > range->window_len - win_offset) 1546 return -EINVAL; 1547 } 1548 1549 /* It is possible to have selector register inside data window. 1550 In that case, selector register is located on every page and 1551 it needs no page switching, when accessed alone. */ 1552 if (val_num > 1 || 1553 range->window_start + win_offset != range->selector_reg) { 1554 /* Use separate work_buf during page switching */ 1555 orig_work_buf = map->work_buf; 1556 map->work_buf = map->selector_work_buf; 1557 1558 ret = _regmap_update_bits(map, range->selector_reg, 1559 range->selector_mask, 1560 win_page << range->selector_shift, 1561 &page_chg, false); 1562 1563 map->work_buf = orig_work_buf; 1564 1565 if (ret != 0) 1566 return ret; 1567 } 1568 1569 *reg = range->window_start + win_offset; 1570 1571 return 0; 1572 } 1573 1574 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1575 unsigned long mask) 1576 { 1577 u8 *buf; 1578 int i; 1579 1580 if (!mask || !map->work_buf) 1581 return; 1582 1583 buf = map->work_buf; 1584 1585 for (i = 0; i < max_bytes; i++) 1586 buf[i] |= (mask >> (8 * i)) & 0xff; 1587 } 1588 1589 static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg) 1590 { 1591 reg += map->reg_base; 1592 1593 if (map->format.reg_shift > 0) 1594 reg >>= map->format.reg_shift; 1595 else if (map->format.reg_shift < 0) 1596 reg <<= -(map->format.reg_shift); 1597 1598 return reg; 1599 } 1600 1601 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1602 const void *val, size_t val_len, bool noinc) 1603 { 1604 struct regmap_range_node *range; 1605 unsigned long flags; 1606 void *work_val = map->work_buf + map->format.reg_bytes + 1607 map->format.pad_bytes; 1608 void *buf; 1609 int ret = -ENOTSUPP; 1610 size_t len; 1611 int i; 1612 1613 /* Check for unwritable or noinc registers in range 1614 * before we start 1615 */ 1616 if (!regmap_writeable_noinc(map, reg)) { 1617 for (i = 0; i < val_len / map->format.val_bytes; i++) { 1618 unsigned int element = 1619 reg + regmap_get_offset(map, i); 1620 if (!regmap_writeable(map, element) || 1621 regmap_writeable_noinc(map, element)) 1622 return -EINVAL; 1623 } 1624 } 1625 1626 if (!map->cache_bypass && map->format.parse_val) { 1627 unsigned int ival, offset; 1628 int val_bytes = map->format.val_bytes; 1629 1630 /* Cache the last written value for noinc writes */ 1631 i = noinc ? val_len - val_bytes : 0; 1632 for (; i < val_len; i += val_bytes) { 1633 ival = map->format.parse_val(val + i); 1634 offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes); 1635 ret = regcache_write(map, reg + offset, ival); 1636 if (ret) { 1637 dev_err(map->dev, 1638 "Error in caching of register: %x ret: %d\n", 1639 reg + offset, ret); 1640 return ret; 1641 } 1642 } 1643 if (map->cache_only) { 1644 map->cache_dirty = true; 1645 return 0; 1646 } 1647 } 1648 1649 range = _regmap_range_lookup(map, reg); 1650 if (range) { 1651 int val_num = val_len / map->format.val_bytes; 1652 int win_offset = (reg - range->range_min) % range->window_len; 1653 int win_residue = range->window_len - win_offset; 1654 1655 /* If the write goes beyond the end of the window split it */ 1656 while (val_num > win_residue) { 1657 dev_dbg(map->dev, "Writing window %d/%zu\n", 1658 win_residue, val_len / map->format.val_bytes); 1659 ret = _regmap_raw_write_impl(map, reg, val, 1660 win_residue * 1661 map->format.val_bytes, noinc); 1662 if (ret != 0) 1663 return ret; 1664 1665 reg += win_residue; 1666 val_num -= win_residue; 1667 val += win_residue * map->format.val_bytes; 1668 val_len -= win_residue * map->format.val_bytes; 1669 1670 win_offset = (reg - range->range_min) % 1671 range->window_len; 1672 win_residue = range->window_len - win_offset; 1673 } 1674 1675 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); 1676 if (ret != 0) 1677 return ret; 1678 } 1679 1680 reg = regmap_reg_addr(map, reg); 1681 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1682 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1683 map->write_flag_mask); 1684 1685 /* 1686 * Essentially all I/O mechanisms will be faster with a single 1687 * buffer to write. Since register syncs often generate raw 1688 * writes of single registers optimise that case. 1689 */ 1690 if (val != work_val && val_len == map->format.val_bytes) { 1691 memcpy(work_val, val, map->format.val_bytes); 1692 val = work_val; 1693 } 1694 1695 if (map->async && map->bus && map->bus->async_write) { 1696 struct regmap_async *async; 1697 1698 trace_regmap_async_write_start(map, reg, val_len); 1699 1700 spin_lock_irqsave(&map->async_lock, flags); 1701 async = list_first_entry_or_null(&map->async_free, 1702 struct regmap_async, 1703 list); 1704 if (async) 1705 list_del(&async->list); 1706 spin_unlock_irqrestore(&map->async_lock, flags); 1707 1708 if (!async) { 1709 async = map->bus->async_alloc(); 1710 if (!async) 1711 return -ENOMEM; 1712 1713 async->work_buf = kzalloc(map->format.buf_size, 1714 GFP_KERNEL | GFP_DMA); 1715 if (!async->work_buf) { 1716 kfree(async); 1717 return -ENOMEM; 1718 } 1719 } 1720 1721 async->map = map; 1722 1723 /* If the caller supplied the value we can use it safely. */ 1724 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1725 map->format.reg_bytes + map->format.val_bytes); 1726 1727 spin_lock_irqsave(&map->async_lock, flags); 1728 list_add_tail(&async->list, &map->async_list); 1729 spin_unlock_irqrestore(&map->async_lock, flags); 1730 1731 if (val != work_val) 1732 ret = map->bus->async_write(map->bus_context, 1733 async->work_buf, 1734 map->format.reg_bytes + 1735 map->format.pad_bytes, 1736 val, val_len, async); 1737 else 1738 ret = map->bus->async_write(map->bus_context, 1739 async->work_buf, 1740 map->format.reg_bytes + 1741 map->format.pad_bytes + 1742 val_len, NULL, 0, async); 1743 1744 if (ret != 0) { 1745 dev_err(map->dev, "Failed to schedule write: %d\n", 1746 ret); 1747 1748 spin_lock_irqsave(&map->async_lock, flags); 1749 list_move(&async->list, &map->async_free); 1750 spin_unlock_irqrestore(&map->async_lock, flags); 1751 } 1752 1753 return ret; 1754 } 1755 1756 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1757 1758 /* If we're doing a single register write we can probably just 1759 * send the work_buf directly, otherwise try to do a gather 1760 * write. 1761 */ 1762 if (val == work_val) 1763 ret = map->write(map->bus_context, map->work_buf, 1764 map->format.reg_bytes + 1765 map->format.pad_bytes + 1766 val_len); 1767 else if (map->bus && map->bus->gather_write) 1768 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1769 map->format.reg_bytes + 1770 map->format.pad_bytes, 1771 val, val_len); 1772 else 1773 ret = -ENOTSUPP; 1774 1775 /* If that didn't work fall back on linearising by hand. */ 1776 if (ret == -ENOTSUPP) { 1777 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1778 buf = kzalloc(len, GFP_KERNEL); 1779 if (!buf) 1780 return -ENOMEM; 1781 1782 memcpy(buf, map->work_buf, map->format.reg_bytes); 1783 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1784 val, val_len); 1785 ret = map->write(map->bus_context, buf, len); 1786 1787 kfree(buf); 1788 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1789 /* regcache_drop_region() takes lock that we already have, 1790 * thus call map->cache_ops->drop() directly 1791 */ 1792 if (map->cache_ops && map->cache_ops->drop) 1793 map->cache_ops->drop(map, reg, reg + 1); 1794 } 1795 1796 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1797 1798 return ret; 1799 } 1800 1801 /** 1802 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1803 * 1804 * @map: Map to check. 1805 */ 1806 bool regmap_can_raw_write(struct regmap *map) 1807 { 1808 return map->write && map->format.format_val && map->format.format_reg; 1809 } 1810 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1811 1812 /** 1813 * regmap_get_raw_read_max - Get the maximum size we can read 1814 * 1815 * @map: Map to check. 1816 */ 1817 size_t regmap_get_raw_read_max(struct regmap *map) 1818 { 1819 return map->max_raw_read; 1820 } 1821 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1822 1823 /** 1824 * regmap_get_raw_write_max - Get the maximum size we can read 1825 * 1826 * @map: Map to check. 1827 */ 1828 size_t regmap_get_raw_write_max(struct regmap *map) 1829 { 1830 return map->max_raw_write; 1831 } 1832 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1833 1834 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1835 unsigned int val) 1836 { 1837 int ret; 1838 struct regmap_range_node *range; 1839 struct regmap *map = context; 1840 1841 WARN_ON(!map->format.format_write); 1842 1843 range = _regmap_range_lookup(map, reg); 1844 if (range) { 1845 ret = _regmap_select_page(map, ®, range, 1); 1846 if (ret != 0) 1847 return ret; 1848 } 1849 1850 reg = regmap_reg_addr(map, reg); 1851 map->format.format_write(map, reg, val); 1852 1853 trace_regmap_hw_write_start(map, reg, 1); 1854 1855 ret = map->write(map->bus_context, map->work_buf, map->format.buf_size); 1856 1857 trace_regmap_hw_write_done(map, reg, 1); 1858 1859 return ret; 1860 } 1861 1862 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1863 unsigned int val) 1864 { 1865 struct regmap *map = context; 1866 struct regmap_range_node *range; 1867 int ret; 1868 1869 range = _regmap_range_lookup(map, reg); 1870 if (range) { 1871 ret = _regmap_select_page(map, ®, range, 1); 1872 if (ret != 0) 1873 return ret; 1874 } 1875 1876 reg = regmap_reg_addr(map, reg); 1877 return map->bus->reg_write(map->bus_context, reg, val); 1878 } 1879 1880 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1881 unsigned int val) 1882 { 1883 struct regmap *map = context; 1884 1885 WARN_ON(!map->format.format_val); 1886 1887 map->format.format_val(map->work_buf + map->format.reg_bytes 1888 + map->format.pad_bytes, val, 0); 1889 return _regmap_raw_write_impl(map, reg, 1890 map->work_buf + 1891 map->format.reg_bytes + 1892 map->format.pad_bytes, 1893 map->format.val_bytes, 1894 false); 1895 } 1896 1897 static inline void *_regmap_map_get_context(struct regmap *map) 1898 { 1899 return (map->bus || (!map->bus && map->read)) ? map : map->bus_context; 1900 } 1901 1902 int _regmap_write(struct regmap *map, unsigned int reg, 1903 unsigned int val) 1904 { 1905 int ret; 1906 void *context = _regmap_map_get_context(map); 1907 1908 if (!regmap_writeable(map, reg)) 1909 return -EIO; 1910 1911 if (!map->cache_bypass && !map->defer_caching) { 1912 ret = regcache_write(map, reg, val); 1913 if (ret != 0) 1914 return ret; 1915 if (map->cache_only) { 1916 map->cache_dirty = true; 1917 return 0; 1918 } 1919 } 1920 1921 ret = map->reg_write(context, reg, val); 1922 if (ret == 0) { 1923 if (regmap_should_log(map)) 1924 dev_info(map->dev, "%x <= %x\n", reg, val); 1925 1926 trace_regmap_reg_write(map, reg, val); 1927 } 1928 1929 return ret; 1930 } 1931 1932 /** 1933 * regmap_write() - Write a value to a single register 1934 * 1935 * @map: Register map to write to 1936 * @reg: Register to write to 1937 * @val: Value to be written 1938 * 1939 * A value of zero will be returned on success, a negative errno will 1940 * be returned in error cases. 1941 */ 1942 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1943 { 1944 int ret; 1945 1946 if (!IS_ALIGNED(reg, map->reg_stride)) 1947 return -EINVAL; 1948 1949 map->lock(map->lock_arg); 1950 1951 ret = _regmap_write(map, reg, val); 1952 1953 map->unlock(map->lock_arg); 1954 1955 return ret; 1956 } 1957 EXPORT_SYMBOL_GPL(regmap_write); 1958 1959 /** 1960 * regmap_write_async() - Write a value to a single register asynchronously 1961 * 1962 * @map: Register map to write to 1963 * @reg: Register to write to 1964 * @val: Value to be written 1965 * 1966 * A value of zero will be returned on success, a negative errno will 1967 * be returned in error cases. 1968 */ 1969 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1970 { 1971 int ret; 1972 1973 if (!IS_ALIGNED(reg, map->reg_stride)) 1974 return -EINVAL; 1975 1976 map->lock(map->lock_arg); 1977 1978 map->async = true; 1979 1980 ret = _regmap_write(map, reg, val); 1981 1982 map->async = false; 1983 1984 map->unlock(map->lock_arg); 1985 1986 return ret; 1987 } 1988 EXPORT_SYMBOL_GPL(regmap_write_async); 1989 1990 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1991 const void *val, size_t val_len, bool noinc) 1992 { 1993 size_t val_bytes = map->format.val_bytes; 1994 size_t val_count = val_len / val_bytes; 1995 size_t chunk_count, chunk_bytes; 1996 size_t chunk_regs = val_count; 1997 int ret, i; 1998 1999 if (!val_count) 2000 return -EINVAL; 2001 2002 if (map->use_single_write) 2003 chunk_regs = 1; 2004 else if (map->max_raw_write && val_len > map->max_raw_write) 2005 chunk_regs = map->max_raw_write / val_bytes; 2006 2007 chunk_count = val_count / chunk_regs; 2008 chunk_bytes = chunk_regs * val_bytes; 2009 2010 /* Write as many bytes as possible with chunk_size */ 2011 for (i = 0; i < chunk_count; i++) { 2012 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); 2013 if (ret) 2014 return ret; 2015 2016 reg += regmap_get_offset(map, chunk_regs); 2017 val += chunk_bytes; 2018 val_len -= chunk_bytes; 2019 } 2020 2021 /* Write remaining bytes */ 2022 if (val_len) 2023 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); 2024 2025 return ret; 2026 } 2027 2028 /** 2029 * regmap_raw_write() - Write raw values to one or more registers 2030 * 2031 * @map: Register map to write to 2032 * @reg: Initial register to write to 2033 * @val: Block of data to be written, laid out for direct transmission to the 2034 * device 2035 * @val_len: Length of data pointed to by val. 2036 * 2037 * This function is intended to be used for things like firmware 2038 * download where a large block of data needs to be transferred to the 2039 * device. No formatting will be done on the data provided. 2040 * 2041 * A value of zero will be returned on success, a negative errno will 2042 * be returned in error cases. 2043 */ 2044 int regmap_raw_write(struct regmap *map, unsigned int reg, 2045 const void *val, size_t val_len) 2046 { 2047 int ret; 2048 2049 if (!regmap_can_raw_write(map)) 2050 return -EINVAL; 2051 if (val_len % map->format.val_bytes) 2052 return -EINVAL; 2053 2054 map->lock(map->lock_arg); 2055 2056 ret = _regmap_raw_write(map, reg, val, val_len, false); 2057 2058 map->unlock(map->lock_arg); 2059 2060 return ret; 2061 } 2062 EXPORT_SYMBOL_GPL(regmap_raw_write); 2063 2064 static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg, 2065 void *val, unsigned int val_len, bool write) 2066 { 2067 size_t val_bytes = map->format.val_bytes; 2068 size_t val_count = val_len / val_bytes; 2069 unsigned int lastval; 2070 u8 *u8p; 2071 u16 *u16p; 2072 u32 *u32p; 2073 int ret; 2074 int i; 2075 2076 switch (val_bytes) { 2077 case 1: 2078 u8p = val; 2079 if (write) 2080 lastval = (unsigned int)u8p[val_count - 1]; 2081 break; 2082 case 2: 2083 u16p = val; 2084 if (write) 2085 lastval = (unsigned int)u16p[val_count - 1]; 2086 break; 2087 case 4: 2088 u32p = val; 2089 if (write) 2090 lastval = (unsigned int)u32p[val_count - 1]; 2091 break; 2092 default: 2093 return -EINVAL; 2094 } 2095 2096 /* 2097 * Update the cache with the last value we write, the rest is just 2098 * gone down in the hardware FIFO. We can't cache FIFOs. This makes 2099 * sure a single read from the cache will work. 2100 */ 2101 if (write) { 2102 if (!map->cache_bypass && !map->defer_caching) { 2103 ret = regcache_write(map, reg, lastval); 2104 if (ret != 0) 2105 return ret; 2106 if (map->cache_only) { 2107 map->cache_dirty = true; 2108 return 0; 2109 } 2110 } 2111 ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count); 2112 } else { 2113 ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count); 2114 } 2115 2116 if (!ret && regmap_should_log(map)) { 2117 dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>"); 2118 for (i = 0; i < val_count; i++) { 2119 switch (val_bytes) { 2120 case 1: 2121 pr_cont("%x", u8p[i]); 2122 break; 2123 case 2: 2124 pr_cont("%x", u16p[i]); 2125 break; 2126 case 4: 2127 pr_cont("%x", u32p[i]); 2128 break; 2129 default: 2130 break; 2131 } 2132 if (i == (val_count - 1)) 2133 pr_cont("]\n"); 2134 else 2135 pr_cont(","); 2136 } 2137 } 2138 2139 return 0; 2140 } 2141 2142 /** 2143 * regmap_noinc_write(): Write data to a register without incrementing the 2144 * register number 2145 * 2146 * @map: Register map to write to 2147 * @reg: Register to write to 2148 * @val: Pointer to data buffer 2149 * @val_len: Length of output buffer in bytes. 2150 * 2151 * The regmap API usually assumes that bulk bus write operations will write a 2152 * range of registers. Some devices have certain registers for which a write 2153 * operation can write to an internal FIFO. 2154 * 2155 * The target register must be volatile but registers after it can be 2156 * completely unrelated cacheable registers. 2157 * 2158 * This will attempt multiple writes as required to write val_len bytes. 2159 * 2160 * A value of zero will be returned on success, a negative errno will be 2161 * returned in error cases. 2162 */ 2163 int regmap_noinc_write(struct regmap *map, unsigned int reg, 2164 const void *val, size_t val_len) 2165 { 2166 size_t write_len; 2167 int ret; 2168 2169 if (!map->write && !(map->bus && map->bus->reg_noinc_write)) 2170 return -EINVAL; 2171 if (val_len % map->format.val_bytes) 2172 return -EINVAL; 2173 if (!IS_ALIGNED(reg, map->reg_stride)) 2174 return -EINVAL; 2175 if (val_len == 0) 2176 return -EINVAL; 2177 2178 map->lock(map->lock_arg); 2179 2180 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { 2181 ret = -EINVAL; 2182 goto out_unlock; 2183 } 2184 2185 /* 2186 * Use the accelerated operation if we can. The val drops the const 2187 * typing in order to facilitate code reuse in regmap_noinc_readwrite(). 2188 */ 2189 if (map->bus->reg_noinc_write) { 2190 ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true); 2191 goto out_unlock; 2192 } 2193 2194 while (val_len) { 2195 if (map->max_raw_write && map->max_raw_write < val_len) 2196 write_len = map->max_raw_write; 2197 else 2198 write_len = val_len; 2199 ret = _regmap_raw_write(map, reg, val, write_len, true); 2200 if (ret) 2201 goto out_unlock; 2202 val = ((u8 *)val) + write_len; 2203 val_len -= write_len; 2204 } 2205 2206 out_unlock: 2207 map->unlock(map->lock_arg); 2208 return ret; 2209 } 2210 EXPORT_SYMBOL_GPL(regmap_noinc_write); 2211 2212 /** 2213 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 2214 * register field. 2215 * 2216 * @field: Register field to write to 2217 * @mask: Bitmask to change 2218 * @val: Value to be written 2219 * @change: Boolean indicating if a write was done 2220 * @async: Boolean indicating asynchronously 2221 * @force: Boolean indicating use force update 2222 * 2223 * Perform a read/modify/write cycle on the register field with change, 2224 * async, force option. 2225 * 2226 * A value of zero will be returned on success, a negative errno will 2227 * be returned in error cases. 2228 */ 2229 int regmap_field_update_bits_base(struct regmap_field *field, 2230 unsigned int mask, unsigned int val, 2231 bool *change, bool async, bool force) 2232 { 2233 mask = (mask << field->shift) & field->mask; 2234 2235 return regmap_update_bits_base(field->regmap, field->reg, 2236 mask, val << field->shift, 2237 change, async, force); 2238 } 2239 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 2240 2241 /** 2242 * regmap_field_test_bits() - Check if all specified bits are set in a 2243 * register field. 2244 * 2245 * @field: Register field to operate on 2246 * @bits: Bits to test 2247 * 2248 * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the 2249 * tested bits is not set and 1 if all tested bits are set. 2250 */ 2251 int regmap_field_test_bits(struct regmap_field *field, unsigned int bits) 2252 { 2253 unsigned int val, ret; 2254 2255 ret = regmap_field_read(field, &val); 2256 if (ret) 2257 return ret; 2258 2259 return (val & bits) == bits; 2260 } 2261 EXPORT_SYMBOL_GPL(regmap_field_test_bits); 2262 2263 /** 2264 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 2265 * register field with port ID 2266 * 2267 * @field: Register field to write to 2268 * @id: port ID 2269 * @mask: Bitmask to change 2270 * @val: Value to be written 2271 * @change: Boolean indicating if a write was done 2272 * @async: Boolean indicating asynchronously 2273 * @force: Boolean indicating use force update 2274 * 2275 * A value of zero will be returned on success, a negative errno will 2276 * be returned in error cases. 2277 */ 2278 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 2279 unsigned int mask, unsigned int val, 2280 bool *change, bool async, bool force) 2281 { 2282 if (id >= field->id_size) 2283 return -EINVAL; 2284 2285 mask = (mask << field->shift) & field->mask; 2286 2287 return regmap_update_bits_base(field->regmap, 2288 field->reg + (field->id_offset * id), 2289 mask, val << field->shift, 2290 change, async, force); 2291 } 2292 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 2293 2294 /** 2295 * regmap_bulk_write() - Write multiple registers to the device 2296 * 2297 * @map: Register map to write to 2298 * @reg: First register to be write from 2299 * @val: Block of data to be written, in native register size for device 2300 * @val_count: Number of registers to write 2301 * 2302 * This function is intended to be used for writing a large block of 2303 * data to the device either in single transfer or multiple transfer. 2304 * 2305 * A value of zero will be returned on success, a negative errno will 2306 * be returned in error cases. 2307 */ 2308 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 2309 size_t val_count) 2310 { 2311 int ret = 0, i; 2312 size_t val_bytes = map->format.val_bytes; 2313 2314 if (!IS_ALIGNED(reg, map->reg_stride)) 2315 return -EINVAL; 2316 2317 /* 2318 * Some devices don't support bulk write, for them we have a series of 2319 * single write operations. 2320 */ 2321 if (!map->write || !map->format.parse_inplace) { 2322 map->lock(map->lock_arg); 2323 for (i = 0; i < val_count; i++) { 2324 unsigned int ival; 2325 2326 switch (val_bytes) { 2327 case 1: 2328 ival = *(u8 *)(val + (i * val_bytes)); 2329 break; 2330 case 2: 2331 ival = *(u16 *)(val + (i * val_bytes)); 2332 break; 2333 case 4: 2334 ival = *(u32 *)(val + (i * val_bytes)); 2335 break; 2336 default: 2337 ret = -EINVAL; 2338 goto out; 2339 } 2340 2341 ret = _regmap_write(map, 2342 reg + regmap_get_offset(map, i), 2343 ival); 2344 if (ret != 0) 2345 goto out; 2346 } 2347 out: 2348 map->unlock(map->lock_arg); 2349 } else { 2350 void *wval; 2351 2352 wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags); 2353 if (!wval) 2354 return -ENOMEM; 2355 2356 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2357 map->format.parse_inplace(wval + i); 2358 2359 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2360 2361 kfree(wval); 2362 } 2363 2364 if (!ret) 2365 trace_regmap_bulk_write(map, reg, val, val_bytes * val_count); 2366 2367 return ret; 2368 } 2369 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2370 2371 /* 2372 * _regmap_raw_multi_reg_write() 2373 * 2374 * the (register,newvalue) pairs in regs have not been formatted, but 2375 * they are all in the same page and have been changed to being page 2376 * relative. The page register has been written if that was necessary. 2377 */ 2378 static int _regmap_raw_multi_reg_write(struct regmap *map, 2379 const struct reg_sequence *regs, 2380 size_t num_regs) 2381 { 2382 int ret; 2383 void *buf; 2384 int i; 2385 u8 *u8; 2386 size_t val_bytes = map->format.val_bytes; 2387 size_t reg_bytes = map->format.reg_bytes; 2388 size_t pad_bytes = map->format.pad_bytes; 2389 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2390 size_t len = pair_size * num_regs; 2391 2392 if (!len) 2393 return -EINVAL; 2394 2395 buf = kzalloc(len, GFP_KERNEL); 2396 if (!buf) 2397 return -ENOMEM; 2398 2399 /* We have to linearise by hand. */ 2400 2401 u8 = buf; 2402 2403 for (i = 0; i < num_regs; i++) { 2404 unsigned int reg = regs[i].reg; 2405 unsigned int val = regs[i].def; 2406 trace_regmap_hw_write_start(map, reg, 1); 2407 reg = regmap_reg_addr(map, reg); 2408 map->format.format_reg(u8, reg, map->reg_shift); 2409 u8 += reg_bytes + pad_bytes; 2410 map->format.format_val(u8, val, 0); 2411 u8 += val_bytes; 2412 } 2413 u8 = buf; 2414 *u8 |= map->write_flag_mask; 2415 2416 ret = map->write(map->bus_context, buf, len); 2417 2418 kfree(buf); 2419 2420 for (i = 0; i < num_regs; i++) { 2421 int reg = regs[i].reg; 2422 trace_regmap_hw_write_done(map, reg, 1); 2423 } 2424 return ret; 2425 } 2426 2427 static unsigned int _regmap_register_page(struct regmap *map, 2428 unsigned int reg, 2429 struct regmap_range_node *range) 2430 { 2431 unsigned int win_page = (reg - range->range_min) / range->window_len; 2432 2433 return win_page; 2434 } 2435 2436 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2437 struct reg_sequence *regs, 2438 size_t num_regs) 2439 { 2440 int ret; 2441 int i, n; 2442 struct reg_sequence *base; 2443 unsigned int this_page = 0; 2444 unsigned int page_change = 0; 2445 /* 2446 * the set of registers are not neccessarily in order, but 2447 * since the order of write must be preserved this algorithm 2448 * chops the set each time the page changes. This also applies 2449 * if there is a delay required at any point in the sequence. 2450 */ 2451 base = regs; 2452 for (i = 0, n = 0; i < num_regs; i++, n++) { 2453 unsigned int reg = regs[i].reg; 2454 struct regmap_range_node *range; 2455 2456 range = _regmap_range_lookup(map, reg); 2457 if (range) { 2458 unsigned int win_page = _regmap_register_page(map, reg, 2459 range); 2460 2461 if (i == 0) 2462 this_page = win_page; 2463 if (win_page != this_page) { 2464 this_page = win_page; 2465 page_change = 1; 2466 } 2467 } 2468 2469 /* If we have both a page change and a delay make sure to 2470 * write the regs and apply the delay before we change the 2471 * page. 2472 */ 2473 2474 if (page_change || regs[i].delay_us) { 2475 2476 /* For situations where the first write requires 2477 * a delay we need to make sure we don't call 2478 * raw_multi_reg_write with n=0 2479 * This can't occur with page breaks as we 2480 * never write on the first iteration 2481 */ 2482 if (regs[i].delay_us && i == 0) 2483 n = 1; 2484 2485 ret = _regmap_raw_multi_reg_write(map, base, n); 2486 if (ret != 0) 2487 return ret; 2488 2489 if (regs[i].delay_us) { 2490 if (map->can_sleep) 2491 fsleep(regs[i].delay_us); 2492 else 2493 udelay(regs[i].delay_us); 2494 } 2495 2496 base += n; 2497 n = 0; 2498 2499 if (page_change) { 2500 ret = _regmap_select_page(map, 2501 &base[n].reg, 2502 range, 1); 2503 if (ret != 0) 2504 return ret; 2505 2506 page_change = 0; 2507 } 2508 2509 } 2510 2511 } 2512 if (n > 0) 2513 return _regmap_raw_multi_reg_write(map, base, n); 2514 return 0; 2515 } 2516 2517 static int _regmap_multi_reg_write(struct regmap *map, 2518 const struct reg_sequence *regs, 2519 size_t num_regs) 2520 { 2521 int i; 2522 int ret; 2523 2524 if (!map->can_multi_write) { 2525 for (i = 0; i < num_regs; i++) { 2526 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2527 if (ret != 0) 2528 return ret; 2529 2530 if (regs[i].delay_us) { 2531 if (map->can_sleep) 2532 fsleep(regs[i].delay_us); 2533 else 2534 udelay(regs[i].delay_us); 2535 } 2536 } 2537 return 0; 2538 } 2539 2540 if (!map->format.parse_inplace) 2541 return -EINVAL; 2542 2543 if (map->writeable_reg) 2544 for (i = 0; i < num_regs; i++) { 2545 int reg = regs[i].reg; 2546 if (!map->writeable_reg(map->dev, reg)) 2547 return -EINVAL; 2548 if (!IS_ALIGNED(reg, map->reg_stride)) 2549 return -EINVAL; 2550 } 2551 2552 if (!map->cache_bypass) { 2553 for (i = 0; i < num_regs; i++) { 2554 unsigned int val = regs[i].def; 2555 unsigned int reg = regs[i].reg; 2556 ret = regcache_write(map, reg, val); 2557 if (ret) { 2558 dev_err(map->dev, 2559 "Error in caching of register: %x ret: %d\n", 2560 reg, ret); 2561 return ret; 2562 } 2563 } 2564 if (map->cache_only) { 2565 map->cache_dirty = true; 2566 return 0; 2567 } 2568 } 2569 2570 WARN_ON(!map->bus); 2571 2572 for (i = 0; i < num_regs; i++) { 2573 unsigned int reg = regs[i].reg; 2574 struct regmap_range_node *range; 2575 2576 /* Coalesce all the writes between a page break or a delay 2577 * in a sequence 2578 */ 2579 range = _regmap_range_lookup(map, reg); 2580 if (range || regs[i].delay_us) { 2581 size_t len = sizeof(struct reg_sequence)*num_regs; 2582 struct reg_sequence *base = kmemdup(regs, len, 2583 GFP_KERNEL); 2584 if (!base) 2585 return -ENOMEM; 2586 ret = _regmap_range_multi_paged_reg_write(map, base, 2587 num_regs); 2588 kfree(base); 2589 2590 return ret; 2591 } 2592 } 2593 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2594 } 2595 2596 /** 2597 * regmap_multi_reg_write() - Write multiple registers to the device 2598 * 2599 * @map: Register map to write to 2600 * @regs: Array of structures containing register,value to be written 2601 * @num_regs: Number of registers to write 2602 * 2603 * Write multiple registers to the device where the set of register, value 2604 * pairs are supplied in any order, possibly not all in a single range. 2605 * 2606 * The 'normal' block write mode will send ultimately send data on the 2607 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2608 * addressed. However, this alternative block multi write mode will send 2609 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2610 * must of course support the mode. 2611 * 2612 * A value of zero will be returned on success, a negative errno will be 2613 * returned in error cases. 2614 */ 2615 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2616 int num_regs) 2617 { 2618 int ret; 2619 2620 map->lock(map->lock_arg); 2621 2622 ret = _regmap_multi_reg_write(map, regs, num_regs); 2623 2624 map->unlock(map->lock_arg); 2625 2626 return ret; 2627 } 2628 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2629 2630 /** 2631 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2632 * device but not the cache 2633 * 2634 * @map: Register map to write to 2635 * @regs: Array of structures containing register,value to be written 2636 * @num_regs: Number of registers to write 2637 * 2638 * Write multiple registers to the device but not the cache where the set 2639 * of register are supplied in any order. 2640 * 2641 * This function is intended to be used for writing a large block of data 2642 * atomically to the device in single transfer for those I2C client devices 2643 * that implement this alternative block write mode. 2644 * 2645 * A value of zero will be returned on success, a negative errno will 2646 * be returned in error cases. 2647 */ 2648 int regmap_multi_reg_write_bypassed(struct regmap *map, 2649 const struct reg_sequence *regs, 2650 int num_regs) 2651 { 2652 int ret; 2653 bool bypass; 2654 2655 map->lock(map->lock_arg); 2656 2657 bypass = map->cache_bypass; 2658 map->cache_bypass = true; 2659 2660 ret = _regmap_multi_reg_write(map, regs, num_regs); 2661 2662 map->cache_bypass = bypass; 2663 2664 map->unlock(map->lock_arg); 2665 2666 return ret; 2667 } 2668 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2669 2670 /** 2671 * regmap_raw_write_async() - Write raw values to one or more registers 2672 * asynchronously 2673 * 2674 * @map: Register map to write to 2675 * @reg: Initial register to write to 2676 * @val: Block of data to be written, laid out for direct transmission to the 2677 * device. Must be valid until regmap_async_complete() is called. 2678 * @val_len: Length of data pointed to by val. 2679 * 2680 * This function is intended to be used for things like firmware 2681 * download where a large block of data needs to be transferred to the 2682 * device. No formatting will be done on the data provided. 2683 * 2684 * If supported by the underlying bus the write will be scheduled 2685 * asynchronously, helping maximise I/O speed on higher speed buses 2686 * like SPI. regmap_async_complete() can be called to ensure that all 2687 * asynchrnous writes have been completed. 2688 * 2689 * A value of zero will be returned on success, a negative errno will 2690 * be returned in error cases. 2691 */ 2692 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2693 const void *val, size_t val_len) 2694 { 2695 int ret; 2696 2697 if (val_len % map->format.val_bytes) 2698 return -EINVAL; 2699 if (!IS_ALIGNED(reg, map->reg_stride)) 2700 return -EINVAL; 2701 2702 map->lock(map->lock_arg); 2703 2704 map->async = true; 2705 2706 ret = _regmap_raw_write(map, reg, val, val_len, false); 2707 2708 map->async = false; 2709 2710 map->unlock(map->lock_arg); 2711 2712 return ret; 2713 } 2714 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2715 2716 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2717 unsigned int val_len, bool noinc) 2718 { 2719 struct regmap_range_node *range; 2720 int ret; 2721 2722 if (!map->read) 2723 return -EINVAL; 2724 2725 range = _regmap_range_lookup(map, reg); 2726 if (range) { 2727 ret = _regmap_select_page(map, ®, range, 2728 noinc ? 1 : val_len / map->format.val_bytes); 2729 if (ret != 0) 2730 return ret; 2731 } 2732 2733 reg = regmap_reg_addr(map, reg); 2734 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2735 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2736 map->read_flag_mask); 2737 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2738 2739 ret = map->read(map->bus_context, map->work_buf, 2740 map->format.reg_bytes + map->format.pad_bytes, 2741 val, val_len); 2742 2743 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2744 2745 return ret; 2746 } 2747 2748 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2749 unsigned int *val) 2750 { 2751 struct regmap *map = context; 2752 struct regmap_range_node *range; 2753 int ret; 2754 2755 range = _regmap_range_lookup(map, reg); 2756 if (range) { 2757 ret = _regmap_select_page(map, ®, range, 1); 2758 if (ret != 0) 2759 return ret; 2760 } 2761 2762 reg = regmap_reg_addr(map, reg); 2763 return map->bus->reg_read(map->bus_context, reg, val); 2764 } 2765 2766 static int _regmap_bus_read(void *context, unsigned int reg, 2767 unsigned int *val) 2768 { 2769 int ret; 2770 struct regmap *map = context; 2771 void *work_val = map->work_buf + map->format.reg_bytes + 2772 map->format.pad_bytes; 2773 2774 if (!map->format.parse_val) 2775 return -EINVAL; 2776 2777 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); 2778 if (ret == 0) 2779 *val = map->format.parse_val(work_val); 2780 2781 return ret; 2782 } 2783 2784 static int _regmap_read(struct regmap *map, unsigned int reg, 2785 unsigned int *val) 2786 { 2787 int ret; 2788 void *context = _regmap_map_get_context(map); 2789 2790 if (!map->cache_bypass) { 2791 ret = regcache_read(map, reg, val); 2792 if (ret == 0) 2793 return 0; 2794 } 2795 2796 if (map->cache_only) 2797 return -EBUSY; 2798 2799 if (!regmap_readable(map, reg)) 2800 return -EIO; 2801 2802 ret = map->reg_read(context, reg, val); 2803 if (ret == 0) { 2804 if (regmap_should_log(map)) 2805 dev_info(map->dev, "%x => %x\n", reg, *val); 2806 2807 trace_regmap_reg_read(map, reg, *val); 2808 2809 if (!map->cache_bypass) 2810 regcache_write(map, reg, *val); 2811 } 2812 2813 return ret; 2814 } 2815 2816 /** 2817 * regmap_read() - Read a value from a single register 2818 * 2819 * @map: Register map to read from 2820 * @reg: Register to be read from 2821 * @val: Pointer to store read value 2822 * 2823 * A value of zero will be returned on success, a negative errno will 2824 * be returned in error cases. 2825 */ 2826 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2827 { 2828 int ret; 2829 2830 if (!IS_ALIGNED(reg, map->reg_stride)) 2831 return -EINVAL; 2832 2833 map->lock(map->lock_arg); 2834 2835 ret = _regmap_read(map, reg, val); 2836 2837 map->unlock(map->lock_arg); 2838 2839 return ret; 2840 } 2841 EXPORT_SYMBOL_GPL(regmap_read); 2842 2843 /** 2844 * regmap_read_bypassed() - Read a value from a single register direct 2845 * from the device, bypassing the cache 2846 * 2847 * @map: Register map to read from 2848 * @reg: Register to be read from 2849 * @val: Pointer to store read value 2850 * 2851 * A value of zero will be returned on success, a negative errno will 2852 * be returned in error cases. 2853 */ 2854 int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val) 2855 { 2856 int ret; 2857 bool bypass, cache_only; 2858 2859 if (!IS_ALIGNED(reg, map->reg_stride)) 2860 return -EINVAL; 2861 2862 map->lock(map->lock_arg); 2863 2864 bypass = map->cache_bypass; 2865 cache_only = map->cache_only; 2866 map->cache_bypass = true; 2867 map->cache_only = false; 2868 2869 ret = _regmap_read(map, reg, val); 2870 2871 map->cache_bypass = bypass; 2872 map->cache_only = cache_only; 2873 2874 map->unlock(map->lock_arg); 2875 2876 return ret; 2877 } 2878 EXPORT_SYMBOL_GPL(regmap_read_bypassed); 2879 2880 /** 2881 * regmap_raw_read() - Read raw data from the device 2882 * 2883 * @map: Register map to read from 2884 * @reg: First register to be read from 2885 * @val: Pointer to store read value 2886 * @val_len: Size of data to read 2887 * 2888 * A value of zero will be returned on success, a negative errno will 2889 * be returned in error cases. 2890 */ 2891 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2892 size_t val_len) 2893 { 2894 size_t val_bytes = map->format.val_bytes; 2895 size_t val_count = val_len / val_bytes; 2896 unsigned int v; 2897 int ret, i; 2898 2899 if (val_len % map->format.val_bytes) 2900 return -EINVAL; 2901 if (!IS_ALIGNED(reg, map->reg_stride)) 2902 return -EINVAL; 2903 if (val_count == 0) 2904 return -EINVAL; 2905 2906 map->lock(map->lock_arg); 2907 2908 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2909 map->cache_type == REGCACHE_NONE) { 2910 size_t chunk_count, chunk_bytes; 2911 size_t chunk_regs = val_count; 2912 2913 if (!map->cache_bypass && map->cache_only) { 2914 ret = -EBUSY; 2915 goto out; 2916 } 2917 2918 if (!map->read) { 2919 ret = -ENOTSUPP; 2920 goto out; 2921 } 2922 2923 if (map->use_single_read) 2924 chunk_regs = 1; 2925 else if (map->max_raw_read && val_len > map->max_raw_read) 2926 chunk_regs = map->max_raw_read / val_bytes; 2927 2928 chunk_count = val_count / chunk_regs; 2929 chunk_bytes = chunk_regs * val_bytes; 2930 2931 /* Read bytes that fit into whole chunks */ 2932 for (i = 0; i < chunk_count; i++) { 2933 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); 2934 if (ret != 0) 2935 goto out; 2936 2937 reg += regmap_get_offset(map, chunk_regs); 2938 val += chunk_bytes; 2939 val_len -= chunk_bytes; 2940 } 2941 2942 /* Read remaining bytes */ 2943 if (val_len) { 2944 ret = _regmap_raw_read(map, reg, val, val_len, false); 2945 if (ret != 0) 2946 goto out; 2947 } 2948 } else { 2949 /* Otherwise go word by word for the cache; should be low 2950 * cost as we expect to hit the cache. 2951 */ 2952 for (i = 0; i < val_count; i++) { 2953 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2954 &v); 2955 if (ret != 0) 2956 goto out; 2957 2958 map->format.format_val(val + (i * val_bytes), v, 0); 2959 } 2960 } 2961 2962 out: 2963 map->unlock(map->lock_arg); 2964 2965 return ret; 2966 } 2967 EXPORT_SYMBOL_GPL(regmap_raw_read); 2968 2969 /** 2970 * regmap_noinc_read(): Read data from a register without incrementing the 2971 * register number 2972 * 2973 * @map: Register map to read from 2974 * @reg: Register to read from 2975 * @val: Pointer to data buffer 2976 * @val_len: Length of output buffer in bytes. 2977 * 2978 * The regmap API usually assumes that bulk read operations will read a 2979 * range of registers. Some devices have certain registers for which a read 2980 * operation read will read from an internal FIFO. 2981 * 2982 * The target register must be volatile but registers after it can be 2983 * completely unrelated cacheable registers. 2984 * 2985 * This will attempt multiple reads as required to read val_len bytes. 2986 * 2987 * A value of zero will be returned on success, a negative errno will be 2988 * returned in error cases. 2989 */ 2990 int regmap_noinc_read(struct regmap *map, unsigned int reg, 2991 void *val, size_t val_len) 2992 { 2993 size_t read_len; 2994 int ret; 2995 2996 if (!map->read) 2997 return -ENOTSUPP; 2998 2999 if (val_len % map->format.val_bytes) 3000 return -EINVAL; 3001 if (!IS_ALIGNED(reg, map->reg_stride)) 3002 return -EINVAL; 3003 if (val_len == 0) 3004 return -EINVAL; 3005 3006 map->lock(map->lock_arg); 3007 3008 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { 3009 ret = -EINVAL; 3010 goto out_unlock; 3011 } 3012 3013 /* 3014 * We have not defined the FIFO semantics for cache, as the 3015 * cache is just one value deep. Should we return the last 3016 * written value? Just avoid this by always reading the FIFO 3017 * even when using cache. Cache only will not work. 3018 */ 3019 if (!map->cache_bypass && map->cache_only) { 3020 ret = -EBUSY; 3021 goto out_unlock; 3022 } 3023 3024 /* Use the accelerated operation if we can */ 3025 if (map->bus->reg_noinc_read) { 3026 ret = regmap_noinc_readwrite(map, reg, val, val_len, false); 3027 goto out_unlock; 3028 } 3029 3030 while (val_len) { 3031 if (map->max_raw_read && map->max_raw_read < val_len) 3032 read_len = map->max_raw_read; 3033 else 3034 read_len = val_len; 3035 ret = _regmap_raw_read(map, reg, val, read_len, true); 3036 if (ret) 3037 goto out_unlock; 3038 val = ((u8 *)val) + read_len; 3039 val_len -= read_len; 3040 } 3041 3042 out_unlock: 3043 map->unlock(map->lock_arg); 3044 return ret; 3045 } 3046 EXPORT_SYMBOL_GPL(regmap_noinc_read); 3047 3048 /** 3049 * regmap_field_read(): Read a value to a single register field 3050 * 3051 * @field: Register field to read from 3052 * @val: Pointer to store read value 3053 * 3054 * A value of zero will be returned on success, a negative errno will 3055 * be returned in error cases. 3056 */ 3057 int regmap_field_read(struct regmap_field *field, unsigned int *val) 3058 { 3059 int ret; 3060 unsigned int reg_val; 3061 ret = regmap_read(field->regmap, field->reg, ®_val); 3062 if (ret != 0) 3063 return ret; 3064 3065 reg_val &= field->mask; 3066 reg_val >>= field->shift; 3067 *val = reg_val; 3068 3069 return ret; 3070 } 3071 EXPORT_SYMBOL_GPL(regmap_field_read); 3072 3073 /** 3074 * regmap_fields_read() - Read a value to a single register field with port ID 3075 * 3076 * @field: Register field to read from 3077 * @id: port ID 3078 * @val: Pointer to store read value 3079 * 3080 * A value of zero will be returned on success, a negative errno will 3081 * be returned in error cases. 3082 */ 3083 int regmap_fields_read(struct regmap_field *field, unsigned int id, 3084 unsigned int *val) 3085 { 3086 int ret; 3087 unsigned int reg_val; 3088 3089 if (id >= field->id_size) 3090 return -EINVAL; 3091 3092 ret = regmap_read(field->regmap, 3093 field->reg + (field->id_offset * id), 3094 ®_val); 3095 if (ret != 0) 3096 return ret; 3097 3098 reg_val &= field->mask; 3099 reg_val >>= field->shift; 3100 *val = reg_val; 3101 3102 return ret; 3103 } 3104 EXPORT_SYMBOL_GPL(regmap_fields_read); 3105 3106 static int _regmap_bulk_read(struct regmap *map, unsigned int reg, 3107 unsigned int *regs, void *val, size_t val_count) 3108 { 3109 u32 *u32 = val; 3110 u16 *u16 = val; 3111 u8 *u8 = val; 3112 int ret, i; 3113 3114 map->lock(map->lock_arg); 3115 3116 for (i = 0; i < val_count; i++) { 3117 unsigned int ival; 3118 3119 if (regs) { 3120 if (!IS_ALIGNED(regs[i], map->reg_stride)) { 3121 ret = -EINVAL; 3122 goto out; 3123 } 3124 ret = _regmap_read(map, regs[i], &ival); 3125 } else { 3126 ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival); 3127 } 3128 if (ret != 0) 3129 goto out; 3130 3131 switch (map->format.val_bytes) { 3132 case 4: 3133 u32[i] = ival; 3134 break; 3135 case 2: 3136 u16[i] = ival; 3137 break; 3138 case 1: 3139 u8[i] = ival; 3140 break; 3141 default: 3142 ret = -EINVAL; 3143 goto out; 3144 } 3145 } 3146 out: 3147 map->unlock(map->lock_arg); 3148 return ret; 3149 } 3150 3151 /** 3152 * regmap_bulk_read() - Read multiple sequential registers from the device 3153 * 3154 * @map: Register map to read from 3155 * @reg: First register to be read from 3156 * @val: Pointer to store read value, in native register size for device 3157 * @val_count: Number of registers to read 3158 * 3159 * A value of zero will be returned on success, a negative errno will 3160 * be returned in error cases. 3161 */ 3162 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 3163 size_t val_count) 3164 { 3165 int ret, i; 3166 size_t val_bytes = map->format.val_bytes; 3167 bool vol = regmap_volatile_range(map, reg, val_count); 3168 3169 if (!IS_ALIGNED(reg, map->reg_stride)) 3170 return -EINVAL; 3171 if (val_count == 0) 3172 return -EINVAL; 3173 3174 if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 3175 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 3176 if (ret != 0) 3177 return ret; 3178 3179 for (i = 0; i < val_count * val_bytes; i += val_bytes) 3180 map->format.parse_inplace(val + i); 3181 } else { 3182 ret = _regmap_bulk_read(map, reg, NULL, val, val_count); 3183 } 3184 if (!ret) 3185 trace_regmap_bulk_read(map, reg, val, val_bytes * val_count); 3186 return ret; 3187 } 3188 EXPORT_SYMBOL_GPL(regmap_bulk_read); 3189 3190 /** 3191 * regmap_multi_reg_read() - Read multiple non-sequential registers from the device 3192 * 3193 * @map: Register map to read from 3194 * @regs: Array of registers to read from 3195 * @val: Pointer to store read value, in native register size for device 3196 * @val_count: Number of registers to read 3197 * 3198 * A value of zero will be returned on success, a negative errno will 3199 * be returned in error cases. 3200 */ 3201 int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val, 3202 size_t val_count) 3203 { 3204 if (val_count == 0) 3205 return -EINVAL; 3206 3207 return _regmap_bulk_read(map, 0, regs, val, val_count); 3208 } 3209 EXPORT_SYMBOL_GPL(regmap_multi_reg_read); 3210 3211 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 3212 unsigned int mask, unsigned int val, 3213 bool *change, bool force_write) 3214 { 3215 int ret; 3216 unsigned int tmp, orig; 3217 3218 if (change) 3219 *change = false; 3220 3221 if (regmap_volatile(map, reg) && map->reg_update_bits) { 3222 reg = regmap_reg_addr(map, reg); 3223 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 3224 if (ret == 0 && change) 3225 *change = true; 3226 } else { 3227 ret = _regmap_read(map, reg, &orig); 3228 if (ret != 0) 3229 return ret; 3230 3231 tmp = orig & ~mask; 3232 tmp |= val & mask; 3233 3234 if (force_write || (tmp != orig) || map->force_write_field) { 3235 ret = _regmap_write(map, reg, tmp); 3236 if (ret == 0 && change) 3237 *change = true; 3238 } 3239 } 3240 3241 return ret; 3242 } 3243 3244 /** 3245 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 3246 * 3247 * @map: Register map to update 3248 * @reg: Register to update 3249 * @mask: Bitmask to change 3250 * @val: New value for bitmask 3251 * @change: Boolean indicating if a write was done 3252 * @async: Boolean indicating asynchronously 3253 * @force: Boolean indicating use force update 3254 * 3255 * Perform a read/modify/write cycle on a register map with change, async, force 3256 * options. 3257 * 3258 * If async is true: 3259 * 3260 * With most buses the read must be done synchronously so this is most useful 3261 * for devices with a cache which do not need to interact with the hardware to 3262 * determine the current register value. 3263 * 3264 * Returns zero for success, a negative number on error. 3265 */ 3266 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 3267 unsigned int mask, unsigned int val, 3268 bool *change, bool async, bool force) 3269 { 3270 int ret; 3271 3272 map->lock(map->lock_arg); 3273 3274 map->async = async; 3275 3276 ret = _regmap_update_bits(map, reg, mask, val, change, force); 3277 3278 map->async = false; 3279 3280 map->unlock(map->lock_arg); 3281 3282 return ret; 3283 } 3284 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 3285 3286 /** 3287 * regmap_test_bits() - Check if all specified bits are set in a register. 3288 * 3289 * @map: Register map to operate on 3290 * @reg: Register to read from 3291 * @bits: Bits to test 3292 * 3293 * Returns 0 if at least one of the tested bits is not set, 1 if all tested 3294 * bits are set and a negative error number if the underlying regmap_read() 3295 * fails. 3296 */ 3297 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) 3298 { 3299 unsigned int val, ret; 3300 3301 ret = regmap_read(map, reg, &val); 3302 if (ret) 3303 return ret; 3304 3305 return (val & bits) == bits; 3306 } 3307 EXPORT_SYMBOL_GPL(regmap_test_bits); 3308 3309 void regmap_async_complete_cb(struct regmap_async *async, int ret) 3310 { 3311 struct regmap *map = async->map; 3312 bool wake; 3313 3314 trace_regmap_async_io_complete(map); 3315 3316 spin_lock(&map->async_lock); 3317 list_move(&async->list, &map->async_free); 3318 wake = list_empty(&map->async_list); 3319 3320 if (ret != 0) 3321 map->async_ret = ret; 3322 3323 spin_unlock(&map->async_lock); 3324 3325 if (wake) 3326 wake_up(&map->async_waitq); 3327 } 3328 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 3329 3330 static int regmap_async_is_done(struct regmap *map) 3331 { 3332 unsigned long flags; 3333 int ret; 3334 3335 spin_lock_irqsave(&map->async_lock, flags); 3336 ret = list_empty(&map->async_list); 3337 spin_unlock_irqrestore(&map->async_lock, flags); 3338 3339 return ret; 3340 } 3341 3342 /** 3343 * regmap_async_complete - Ensure all asynchronous I/O has completed. 3344 * 3345 * @map: Map to operate on. 3346 * 3347 * Blocks until any pending asynchronous I/O has completed. Returns 3348 * an error code for any failed I/O operations. 3349 */ 3350 int regmap_async_complete(struct regmap *map) 3351 { 3352 unsigned long flags; 3353 int ret; 3354 3355 /* Nothing to do with no async support */ 3356 if (!map->bus || !map->bus->async_write) 3357 return 0; 3358 3359 trace_regmap_async_complete_start(map); 3360 3361 wait_event(map->async_waitq, regmap_async_is_done(map)); 3362 3363 spin_lock_irqsave(&map->async_lock, flags); 3364 ret = map->async_ret; 3365 map->async_ret = 0; 3366 spin_unlock_irqrestore(&map->async_lock, flags); 3367 3368 trace_regmap_async_complete_done(map); 3369 3370 return ret; 3371 } 3372 EXPORT_SYMBOL_GPL(regmap_async_complete); 3373 3374 /** 3375 * regmap_register_patch - Register and apply register updates to be applied 3376 * on device initialistion 3377 * 3378 * @map: Register map to apply updates to. 3379 * @regs: Values to update. 3380 * @num_regs: Number of entries in regs. 3381 * 3382 * Register a set of register updates to be applied to the device 3383 * whenever the device registers are synchronised with the cache and 3384 * apply them immediately. Typically this is used to apply 3385 * corrections to be applied to the device defaults on startup, such 3386 * as the updates some vendors provide to undocumented registers. 3387 * 3388 * The caller must ensure that this function cannot be called 3389 * concurrently with either itself or regcache_sync(). 3390 */ 3391 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 3392 int num_regs) 3393 { 3394 struct reg_sequence *p; 3395 int ret; 3396 bool bypass; 3397 3398 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 3399 num_regs)) 3400 return 0; 3401 3402 p = krealloc(map->patch, 3403 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 3404 GFP_KERNEL); 3405 if (p) { 3406 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 3407 map->patch = p; 3408 map->patch_regs += num_regs; 3409 } else { 3410 return -ENOMEM; 3411 } 3412 3413 map->lock(map->lock_arg); 3414 3415 bypass = map->cache_bypass; 3416 3417 map->cache_bypass = true; 3418 map->async = true; 3419 3420 ret = _regmap_multi_reg_write(map, regs, num_regs); 3421 3422 map->async = false; 3423 map->cache_bypass = bypass; 3424 3425 map->unlock(map->lock_arg); 3426 3427 regmap_async_complete(map); 3428 3429 return ret; 3430 } 3431 EXPORT_SYMBOL_GPL(regmap_register_patch); 3432 3433 /** 3434 * regmap_get_val_bytes() - Report the size of a register value 3435 * 3436 * @map: Register map to operate on. 3437 * 3438 * Report the size of a register value, mainly intended to for use by 3439 * generic infrastructure built on top of regmap. 3440 */ 3441 int regmap_get_val_bytes(struct regmap *map) 3442 { 3443 if (map->format.format_write) 3444 return -EINVAL; 3445 3446 return map->format.val_bytes; 3447 } 3448 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 3449 3450 /** 3451 * regmap_get_max_register() - Report the max register value 3452 * 3453 * @map: Register map to operate on. 3454 * 3455 * Report the max register value, mainly intended to for use by 3456 * generic infrastructure built on top of regmap. 3457 */ 3458 int regmap_get_max_register(struct regmap *map) 3459 { 3460 return map->max_register_is_set ? map->max_register : -EINVAL; 3461 } 3462 EXPORT_SYMBOL_GPL(regmap_get_max_register); 3463 3464 /** 3465 * regmap_get_reg_stride() - Report the register address stride 3466 * 3467 * @map: Register map to operate on. 3468 * 3469 * Report the register address stride, mainly intended to for use by 3470 * generic infrastructure built on top of regmap. 3471 */ 3472 int regmap_get_reg_stride(struct regmap *map) 3473 { 3474 return map->reg_stride; 3475 } 3476 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 3477 3478 /** 3479 * regmap_might_sleep() - Returns whether a regmap access might sleep. 3480 * 3481 * @map: Register map to operate on. 3482 * 3483 * Returns true if an access to the register might sleep, else false. 3484 */ 3485 bool regmap_might_sleep(struct regmap *map) 3486 { 3487 return map->can_sleep; 3488 } 3489 EXPORT_SYMBOL_GPL(regmap_might_sleep); 3490 3491 int regmap_parse_val(struct regmap *map, const void *buf, 3492 unsigned int *val) 3493 { 3494 if (!map->format.parse_val) 3495 return -EINVAL; 3496 3497 *val = map->format.parse_val(buf); 3498 3499 return 0; 3500 } 3501 EXPORT_SYMBOL_GPL(regmap_parse_val); 3502 3503 static int __init regmap_initcall(void) 3504 { 3505 regmap_debugfs_initcall(); 3506 3507 return 0; 3508 } 3509 postcore_initcall(regmap_initcall); 3510