1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/log2.h> 23 #include <linux/hwspinlock.h> 24 25 #define CREATE_TRACE_POINTS 26 #include "trace.h" 27 28 #include "internal.h" 29 30 /* 31 * Sometimes for failures during very early init the trace 32 * infrastructure isn't available early enough to be used. For this 33 * sort of problem defining LOG_DEVICE will add printks for basic 34 * register I/O on a specific device. 35 */ 36 #undef LOG_DEVICE 37 38 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 39 unsigned int mask, unsigned int val, 40 bool *change, bool force_write); 41 42 static int _regmap_bus_reg_read(void *context, unsigned int reg, 43 unsigned int *val); 44 static int _regmap_bus_read(void *context, unsigned int reg, 45 unsigned int *val); 46 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 47 unsigned int val); 48 static int _regmap_bus_reg_write(void *context, unsigned int reg, 49 unsigned int val); 50 static int _regmap_bus_raw_write(void *context, unsigned int reg, 51 unsigned int val); 52 53 bool regmap_reg_in_ranges(unsigned int reg, 54 const struct regmap_range *ranges, 55 unsigned int nranges) 56 { 57 const struct regmap_range *r; 58 int i; 59 60 for (i = 0, r = ranges; i < nranges; i++, r++) 61 if (regmap_reg_in_range(reg, r)) 62 return true; 63 return false; 64 } 65 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 66 67 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 68 const struct regmap_access_table *table) 69 { 70 /* Check "no ranges" first */ 71 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 72 return false; 73 74 /* In case zero "yes ranges" are supplied, any reg is OK */ 75 if (!table->n_yes_ranges) 76 return true; 77 78 return regmap_reg_in_ranges(reg, table->yes_ranges, 79 table->n_yes_ranges); 80 } 81 EXPORT_SYMBOL_GPL(regmap_check_range_table); 82 83 bool regmap_writeable(struct regmap *map, unsigned int reg) 84 { 85 if (map->max_register && reg > map->max_register) 86 return false; 87 88 if (map->writeable_reg) 89 return map->writeable_reg(map->dev, reg); 90 91 if (map->wr_table) 92 return regmap_check_range_table(map, reg, map->wr_table); 93 94 return true; 95 } 96 97 bool regmap_cached(struct regmap *map, unsigned int reg) 98 { 99 int ret; 100 unsigned int val; 101 102 if (map->cache == REGCACHE_NONE) 103 return false; 104 105 if (!map->cache_ops) 106 return false; 107 108 if (map->max_register && reg > map->max_register) 109 return false; 110 111 map->lock(map->lock_arg); 112 ret = regcache_read(map, reg, &val); 113 map->unlock(map->lock_arg); 114 if (ret) 115 return false; 116 117 return true; 118 } 119 120 bool regmap_readable(struct regmap *map, unsigned int reg) 121 { 122 if (!map->reg_read) 123 return false; 124 125 if (map->max_register && reg > map->max_register) 126 return false; 127 128 if (map->format.format_write) 129 return false; 130 131 if (map->readable_reg) 132 return map->readable_reg(map->dev, reg); 133 134 if (map->rd_table) 135 return regmap_check_range_table(map, reg, map->rd_table); 136 137 return true; 138 } 139 140 bool regmap_volatile(struct regmap *map, unsigned int reg) 141 { 142 if (!map->format.format_write && !regmap_readable(map, reg)) 143 return false; 144 145 if (map->volatile_reg) 146 return map->volatile_reg(map->dev, reg); 147 148 if (map->volatile_table) 149 return regmap_check_range_table(map, reg, map->volatile_table); 150 151 if (map->cache_ops) 152 return false; 153 else 154 return true; 155 } 156 157 bool regmap_precious(struct regmap *map, unsigned int reg) 158 { 159 if (!regmap_readable(map, reg)) 160 return false; 161 162 if (map->precious_reg) 163 return map->precious_reg(map->dev, reg); 164 165 if (map->precious_table) 166 return regmap_check_range_table(map, reg, map->precious_table); 167 168 return false; 169 } 170 171 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 172 size_t num) 173 { 174 unsigned int i; 175 176 for (i = 0; i < num; i++) 177 if (!regmap_volatile(map, reg + i)) 178 return false; 179 180 return true; 181 } 182 183 static void regmap_format_2_6_write(struct regmap *map, 184 unsigned int reg, unsigned int val) 185 { 186 u8 *out = map->work_buf; 187 188 *out = (reg << 6) | val; 189 } 190 191 static void regmap_format_4_12_write(struct regmap *map, 192 unsigned int reg, unsigned int val) 193 { 194 __be16 *out = map->work_buf; 195 *out = cpu_to_be16((reg << 12) | val); 196 } 197 198 static void regmap_format_7_9_write(struct regmap *map, 199 unsigned int reg, unsigned int val) 200 { 201 __be16 *out = map->work_buf; 202 *out = cpu_to_be16((reg << 9) | val); 203 } 204 205 static void regmap_format_10_14_write(struct regmap *map, 206 unsigned int reg, unsigned int val) 207 { 208 u8 *out = map->work_buf; 209 210 out[2] = val; 211 out[1] = (val >> 8) | (reg << 6); 212 out[0] = reg >> 2; 213 } 214 215 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 216 { 217 u8 *b = buf; 218 219 b[0] = val << shift; 220 } 221 222 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 223 { 224 __be16 *b = buf; 225 226 b[0] = cpu_to_be16(val << shift); 227 } 228 229 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 230 { 231 __le16 *b = buf; 232 233 b[0] = cpu_to_le16(val << shift); 234 } 235 236 static void regmap_format_16_native(void *buf, unsigned int val, 237 unsigned int shift) 238 { 239 *(u16 *)buf = val << shift; 240 } 241 242 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 243 { 244 u8 *b = buf; 245 246 val <<= shift; 247 248 b[0] = val >> 16; 249 b[1] = val >> 8; 250 b[2] = val; 251 } 252 253 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 254 { 255 __be32 *b = buf; 256 257 b[0] = cpu_to_be32(val << shift); 258 } 259 260 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 261 { 262 __le32 *b = buf; 263 264 b[0] = cpu_to_le32(val << shift); 265 } 266 267 static void regmap_format_32_native(void *buf, unsigned int val, 268 unsigned int shift) 269 { 270 *(u32 *)buf = val << shift; 271 } 272 273 #ifdef CONFIG_64BIT 274 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 275 { 276 __be64 *b = buf; 277 278 b[0] = cpu_to_be64((u64)val << shift); 279 } 280 281 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 282 { 283 __le64 *b = buf; 284 285 b[0] = cpu_to_le64((u64)val << shift); 286 } 287 288 static void regmap_format_64_native(void *buf, unsigned int val, 289 unsigned int shift) 290 { 291 *(u64 *)buf = (u64)val << shift; 292 } 293 #endif 294 295 static void regmap_parse_inplace_noop(void *buf) 296 { 297 } 298 299 static unsigned int regmap_parse_8(const void *buf) 300 { 301 const u8 *b = buf; 302 303 return b[0]; 304 } 305 306 static unsigned int regmap_parse_16_be(const void *buf) 307 { 308 const __be16 *b = buf; 309 310 return be16_to_cpu(b[0]); 311 } 312 313 static unsigned int regmap_parse_16_le(const void *buf) 314 { 315 const __le16 *b = buf; 316 317 return le16_to_cpu(b[0]); 318 } 319 320 static void regmap_parse_16_be_inplace(void *buf) 321 { 322 __be16 *b = buf; 323 324 b[0] = be16_to_cpu(b[0]); 325 } 326 327 static void regmap_parse_16_le_inplace(void *buf) 328 { 329 __le16 *b = buf; 330 331 b[0] = le16_to_cpu(b[0]); 332 } 333 334 static unsigned int regmap_parse_16_native(const void *buf) 335 { 336 return *(u16 *)buf; 337 } 338 339 static unsigned int regmap_parse_24(const void *buf) 340 { 341 const u8 *b = buf; 342 unsigned int ret = b[2]; 343 ret |= ((unsigned int)b[1]) << 8; 344 ret |= ((unsigned int)b[0]) << 16; 345 346 return ret; 347 } 348 349 static unsigned int regmap_parse_32_be(const void *buf) 350 { 351 const __be32 *b = buf; 352 353 return be32_to_cpu(b[0]); 354 } 355 356 static unsigned int regmap_parse_32_le(const void *buf) 357 { 358 const __le32 *b = buf; 359 360 return le32_to_cpu(b[0]); 361 } 362 363 static void regmap_parse_32_be_inplace(void *buf) 364 { 365 __be32 *b = buf; 366 367 b[0] = be32_to_cpu(b[0]); 368 } 369 370 static void regmap_parse_32_le_inplace(void *buf) 371 { 372 __le32 *b = buf; 373 374 b[0] = le32_to_cpu(b[0]); 375 } 376 377 static unsigned int regmap_parse_32_native(const void *buf) 378 { 379 return *(u32 *)buf; 380 } 381 382 #ifdef CONFIG_64BIT 383 static unsigned int regmap_parse_64_be(const void *buf) 384 { 385 const __be64 *b = buf; 386 387 return be64_to_cpu(b[0]); 388 } 389 390 static unsigned int regmap_parse_64_le(const void *buf) 391 { 392 const __le64 *b = buf; 393 394 return le64_to_cpu(b[0]); 395 } 396 397 static void regmap_parse_64_be_inplace(void *buf) 398 { 399 __be64 *b = buf; 400 401 b[0] = be64_to_cpu(b[0]); 402 } 403 404 static void regmap_parse_64_le_inplace(void *buf) 405 { 406 __le64 *b = buf; 407 408 b[0] = le64_to_cpu(b[0]); 409 } 410 411 static unsigned int regmap_parse_64_native(const void *buf) 412 { 413 return *(u64 *)buf; 414 } 415 #endif 416 417 #ifdef REGMAP_HWSPINLOCK 418 static void regmap_lock_hwlock(void *__map) 419 { 420 struct regmap *map = __map; 421 422 hwspin_lock_timeout(map->hwlock, UINT_MAX); 423 } 424 425 static void regmap_lock_hwlock_irq(void *__map) 426 { 427 struct regmap *map = __map; 428 429 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 430 } 431 432 static void regmap_lock_hwlock_irqsave(void *__map) 433 { 434 struct regmap *map = __map; 435 436 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 437 &map->spinlock_flags); 438 } 439 440 static void regmap_unlock_hwlock(void *__map) 441 { 442 struct regmap *map = __map; 443 444 hwspin_unlock(map->hwlock); 445 } 446 447 static void regmap_unlock_hwlock_irq(void *__map) 448 { 449 struct regmap *map = __map; 450 451 hwspin_unlock_irq(map->hwlock); 452 } 453 454 static void regmap_unlock_hwlock_irqrestore(void *__map) 455 { 456 struct regmap *map = __map; 457 458 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 459 } 460 #endif 461 462 static void regmap_lock_mutex(void *__map) 463 { 464 struct regmap *map = __map; 465 mutex_lock(&map->mutex); 466 } 467 468 static void regmap_unlock_mutex(void *__map) 469 { 470 struct regmap *map = __map; 471 mutex_unlock(&map->mutex); 472 } 473 474 static void regmap_lock_spinlock(void *__map) 475 __acquires(&map->spinlock) 476 { 477 struct regmap *map = __map; 478 unsigned long flags; 479 480 spin_lock_irqsave(&map->spinlock, flags); 481 map->spinlock_flags = flags; 482 } 483 484 static void regmap_unlock_spinlock(void *__map) 485 __releases(&map->spinlock) 486 { 487 struct regmap *map = __map; 488 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 489 } 490 491 static void dev_get_regmap_release(struct device *dev, void *res) 492 { 493 /* 494 * We don't actually have anything to do here; the goal here 495 * is not to manage the regmap but to provide a simple way to 496 * get the regmap back given a struct device. 497 */ 498 } 499 500 static bool _regmap_range_add(struct regmap *map, 501 struct regmap_range_node *data) 502 { 503 struct rb_root *root = &map->range_tree; 504 struct rb_node **new = &(root->rb_node), *parent = NULL; 505 506 while (*new) { 507 struct regmap_range_node *this = 508 rb_entry(*new, struct regmap_range_node, node); 509 510 parent = *new; 511 if (data->range_max < this->range_min) 512 new = &((*new)->rb_left); 513 else if (data->range_min > this->range_max) 514 new = &((*new)->rb_right); 515 else 516 return false; 517 } 518 519 rb_link_node(&data->node, parent, new); 520 rb_insert_color(&data->node, root); 521 522 return true; 523 } 524 525 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 526 unsigned int reg) 527 { 528 struct rb_node *node = map->range_tree.rb_node; 529 530 while (node) { 531 struct regmap_range_node *this = 532 rb_entry(node, struct regmap_range_node, node); 533 534 if (reg < this->range_min) 535 node = node->rb_left; 536 else if (reg > this->range_max) 537 node = node->rb_right; 538 else 539 return this; 540 } 541 542 return NULL; 543 } 544 545 static void regmap_range_exit(struct regmap *map) 546 { 547 struct rb_node *next; 548 struct regmap_range_node *range_node; 549 550 next = rb_first(&map->range_tree); 551 while (next) { 552 range_node = rb_entry(next, struct regmap_range_node, node); 553 next = rb_next(&range_node->node); 554 rb_erase(&range_node->node, &map->range_tree); 555 kfree(range_node); 556 } 557 558 kfree(map->selector_work_buf); 559 } 560 561 int regmap_attach_dev(struct device *dev, struct regmap *map, 562 const struct regmap_config *config) 563 { 564 struct regmap **m; 565 566 map->dev = dev; 567 568 regmap_debugfs_init(map, config->name); 569 570 /* Add a devres resource for dev_get_regmap() */ 571 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 572 if (!m) { 573 regmap_debugfs_exit(map); 574 return -ENOMEM; 575 } 576 *m = map; 577 devres_add(dev, m); 578 579 return 0; 580 } 581 EXPORT_SYMBOL_GPL(regmap_attach_dev); 582 583 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 584 const struct regmap_config *config) 585 { 586 enum regmap_endian endian; 587 588 /* Retrieve the endianness specification from the regmap config */ 589 endian = config->reg_format_endian; 590 591 /* If the regmap config specified a non-default value, use that */ 592 if (endian != REGMAP_ENDIAN_DEFAULT) 593 return endian; 594 595 /* Retrieve the endianness specification from the bus config */ 596 if (bus && bus->reg_format_endian_default) 597 endian = bus->reg_format_endian_default; 598 599 /* If the bus specified a non-default value, use that */ 600 if (endian != REGMAP_ENDIAN_DEFAULT) 601 return endian; 602 603 /* Use this if no other value was found */ 604 return REGMAP_ENDIAN_BIG; 605 } 606 607 enum regmap_endian regmap_get_val_endian(struct device *dev, 608 const struct regmap_bus *bus, 609 const struct regmap_config *config) 610 { 611 struct device_node *np; 612 enum regmap_endian endian; 613 614 /* Retrieve the endianness specification from the regmap config */ 615 endian = config->val_format_endian; 616 617 /* If the regmap config specified a non-default value, use that */ 618 if (endian != REGMAP_ENDIAN_DEFAULT) 619 return endian; 620 621 /* If the dev and dev->of_node exist try to get endianness from DT */ 622 if (dev && dev->of_node) { 623 np = dev->of_node; 624 625 /* Parse the device's DT node for an endianness specification */ 626 if (of_property_read_bool(np, "big-endian")) 627 endian = REGMAP_ENDIAN_BIG; 628 else if (of_property_read_bool(np, "little-endian")) 629 endian = REGMAP_ENDIAN_LITTLE; 630 else if (of_property_read_bool(np, "native-endian")) 631 endian = REGMAP_ENDIAN_NATIVE; 632 633 /* If the endianness was specified in DT, use that */ 634 if (endian != REGMAP_ENDIAN_DEFAULT) 635 return endian; 636 } 637 638 /* Retrieve the endianness specification from the bus config */ 639 if (bus && bus->val_format_endian_default) 640 endian = bus->val_format_endian_default; 641 642 /* If the bus specified a non-default value, use that */ 643 if (endian != REGMAP_ENDIAN_DEFAULT) 644 return endian; 645 646 /* Use this if no other value was found */ 647 return REGMAP_ENDIAN_BIG; 648 } 649 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 650 651 struct regmap *__regmap_init(struct device *dev, 652 const struct regmap_bus *bus, 653 void *bus_context, 654 const struct regmap_config *config, 655 struct lock_class_key *lock_key, 656 const char *lock_name) 657 { 658 struct regmap *map; 659 int ret = -EINVAL; 660 enum regmap_endian reg_endian, val_endian; 661 int i, j; 662 663 if (!config) 664 goto err; 665 666 map = kzalloc(sizeof(*map), GFP_KERNEL); 667 if (map == NULL) { 668 ret = -ENOMEM; 669 goto err; 670 } 671 672 if (config->lock && config->unlock) { 673 map->lock = config->lock; 674 map->unlock = config->unlock; 675 map->lock_arg = config->lock_arg; 676 } else if (config->hwlock_id) { 677 #ifdef REGMAP_HWSPINLOCK 678 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 679 if (!map->hwlock) { 680 ret = -ENXIO; 681 goto err_map; 682 } 683 684 switch (config->hwlock_mode) { 685 case HWLOCK_IRQSTATE: 686 map->lock = regmap_lock_hwlock_irqsave; 687 map->unlock = regmap_unlock_hwlock_irqrestore; 688 break; 689 case HWLOCK_IRQ: 690 map->lock = regmap_lock_hwlock_irq; 691 map->unlock = regmap_unlock_hwlock_irq; 692 break; 693 default: 694 map->lock = regmap_lock_hwlock; 695 map->unlock = regmap_unlock_hwlock; 696 break; 697 } 698 699 map->lock_arg = map; 700 #else 701 ret = -EINVAL; 702 goto err_map; 703 #endif 704 } else { 705 if ((bus && bus->fast_io) || 706 config->fast_io) { 707 spin_lock_init(&map->spinlock); 708 map->lock = regmap_lock_spinlock; 709 map->unlock = regmap_unlock_spinlock; 710 lockdep_set_class_and_name(&map->spinlock, 711 lock_key, lock_name); 712 } else { 713 mutex_init(&map->mutex); 714 map->lock = regmap_lock_mutex; 715 map->unlock = regmap_unlock_mutex; 716 lockdep_set_class_and_name(&map->mutex, 717 lock_key, lock_name); 718 } 719 map->lock_arg = map; 720 } 721 722 /* 723 * When we write in fast-paths with regmap_bulk_write() don't allocate 724 * scratch buffers with sleeping allocations. 725 */ 726 if ((bus && bus->fast_io) || config->fast_io) 727 map->alloc_flags = GFP_ATOMIC; 728 else 729 map->alloc_flags = GFP_KERNEL; 730 731 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 732 map->format.pad_bytes = config->pad_bits / 8; 733 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 734 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 735 config->val_bits + config->pad_bits, 8); 736 map->reg_shift = config->pad_bits % 8; 737 if (config->reg_stride) 738 map->reg_stride = config->reg_stride; 739 else 740 map->reg_stride = 1; 741 if (is_power_of_2(map->reg_stride)) 742 map->reg_stride_order = ilog2(map->reg_stride); 743 else 744 map->reg_stride_order = -1; 745 map->use_single_read = config->use_single_rw || !bus || !bus->read; 746 map->use_single_write = config->use_single_rw || !bus || !bus->write; 747 map->can_multi_write = config->can_multi_write && bus && bus->write; 748 if (bus) { 749 map->max_raw_read = bus->max_raw_read; 750 map->max_raw_write = bus->max_raw_write; 751 } 752 map->dev = dev; 753 map->bus = bus; 754 map->bus_context = bus_context; 755 map->max_register = config->max_register; 756 map->wr_table = config->wr_table; 757 map->rd_table = config->rd_table; 758 map->volatile_table = config->volatile_table; 759 map->precious_table = config->precious_table; 760 map->writeable_reg = config->writeable_reg; 761 map->readable_reg = config->readable_reg; 762 map->volatile_reg = config->volatile_reg; 763 map->precious_reg = config->precious_reg; 764 map->cache_type = config->cache_type; 765 map->name = config->name; 766 767 spin_lock_init(&map->async_lock); 768 INIT_LIST_HEAD(&map->async_list); 769 INIT_LIST_HEAD(&map->async_free); 770 init_waitqueue_head(&map->async_waitq); 771 772 if (config->read_flag_mask || config->write_flag_mask) { 773 map->read_flag_mask = config->read_flag_mask; 774 map->write_flag_mask = config->write_flag_mask; 775 } else if (bus) { 776 map->read_flag_mask = bus->read_flag_mask; 777 } 778 779 if (!bus) { 780 map->reg_read = config->reg_read; 781 map->reg_write = config->reg_write; 782 783 map->defer_caching = false; 784 goto skip_format_initialization; 785 } else if (!bus->read || !bus->write) { 786 map->reg_read = _regmap_bus_reg_read; 787 map->reg_write = _regmap_bus_reg_write; 788 789 map->defer_caching = false; 790 goto skip_format_initialization; 791 } else { 792 map->reg_read = _regmap_bus_read; 793 map->reg_update_bits = bus->reg_update_bits; 794 } 795 796 reg_endian = regmap_get_reg_endian(bus, config); 797 val_endian = regmap_get_val_endian(dev, bus, config); 798 799 switch (config->reg_bits + map->reg_shift) { 800 case 2: 801 switch (config->val_bits) { 802 case 6: 803 map->format.format_write = regmap_format_2_6_write; 804 break; 805 default: 806 goto err_hwlock; 807 } 808 break; 809 810 case 4: 811 switch (config->val_bits) { 812 case 12: 813 map->format.format_write = regmap_format_4_12_write; 814 break; 815 default: 816 goto err_hwlock; 817 } 818 break; 819 820 case 7: 821 switch (config->val_bits) { 822 case 9: 823 map->format.format_write = regmap_format_7_9_write; 824 break; 825 default: 826 goto err_hwlock; 827 } 828 break; 829 830 case 10: 831 switch (config->val_bits) { 832 case 14: 833 map->format.format_write = regmap_format_10_14_write; 834 break; 835 default: 836 goto err_hwlock; 837 } 838 break; 839 840 case 8: 841 map->format.format_reg = regmap_format_8; 842 break; 843 844 case 16: 845 switch (reg_endian) { 846 case REGMAP_ENDIAN_BIG: 847 map->format.format_reg = regmap_format_16_be; 848 break; 849 case REGMAP_ENDIAN_LITTLE: 850 map->format.format_reg = regmap_format_16_le; 851 break; 852 case REGMAP_ENDIAN_NATIVE: 853 map->format.format_reg = regmap_format_16_native; 854 break; 855 default: 856 goto err_hwlock; 857 } 858 break; 859 860 case 24: 861 if (reg_endian != REGMAP_ENDIAN_BIG) 862 goto err_hwlock; 863 map->format.format_reg = regmap_format_24; 864 break; 865 866 case 32: 867 switch (reg_endian) { 868 case REGMAP_ENDIAN_BIG: 869 map->format.format_reg = regmap_format_32_be; 870 break; 871 case REGMAP_ENDIAN_LITTLE: 872 map->format.format_reg = regmap_format_32_le; 873 break; 874 case REGMAP_ENDIAN_NATIVE: 875 map->format.format_reg = regmap_format_32_native; 876 break; 877 default: 878 goto err_hwlock; 879 } 880 break; 881 882 #ifdef CONFIG_64BIT 883 case 64: 884 switch (reg_endian) { 885 case REGMAP_ENDIAN_BIG: 886 map->format.format_reg = regmap_format_64_be; 887 break; 888 case REGMAP_ENDIAN_LITTLE: 889 map->format.format_reg = regmap_format_64_le; 890 break; 891 case REGMAP_ENDIAN_NATIVE: 892 map->format.format_reg = regmap_format_64_native; 893 break; 894 default: 895 goto err_hwlock; 896 } 897 break; 898 #endif 899 900 default: 901 goto err_hwlock; 902 } 903 904 if (val_endian == REGMAP_ENDIAN_NATIVE) 905 map->format.parse_inplace = regmap_parse_inplace_noop; 906 907 switch (config->val_bits) { 908 case 8: 909 map->format.format_val = regmap_format_8; 910 map->format.parse_val = regmap_parse_8; 911 map->format.parse_inplace = regmap_parse_inplace_noop; 912 break; 913 case 16: 914 switch (val_endian) { 915 case REGMAP_ENDIAN_BIG: 916 map->format.format_val = regmap_format_16_be; 917 map->format.parse_val = regmap_parse_16_be; 918 map->format.parse_inplace = regmap_parse_16_be_inplace; 919 break; 920 case REGMAP_ENDIAN_LITTLE: 921 map->format.format_val = regmap_format_16_le; 922 map->format.parse_val = regmap_parse_16_le; 923 map->format.parse_inplace = regmap_parse_16_le_inplace; 924 break; 925 case REGMAP_ENDIAN_NATIVE: 926 map->format.format_val = regmap_format_16_native; 927 map->format.parse_val = regmap_parse_16_native; 928 break; 929 default: 930 goto err_hwlock; 931 } 932 break; 933 case 24: 934 if (val_endian != REGMAP_ENDIAN_BIG) 935 goto err_hwlock; 936 map->format.format_val = regmap_format_24; 937 map->format.parse_val = regmap_parse_24; 938 break; 939 case 32: 940 switch (val_endian) { 941 case REGMAP_ENDIAN_BIG: 942 map->format.format_val = regmap_format_32_be; 943 map->format.parse_val = regmap_parse_32_be; 944 map->format.parse_inplace = regmap_parse_32_be_inplace; 945 break; 946 case REGMAP_ENDIAN_LITTLE: 947 map->format.format_val = regmap_format_32_le; 948 map->format.parse_val = regmap_parse_32_le; 949 map->format.parse_inplace = regmap_parse_32_le_inplace; 950 break; 951 case REGMAP_ENDIAN_NATIVE: 952 map->format.format_val = regmap_format_32_native; 953 map->format.parse_val = regmap_parse_32_native; 954 break; 955 default: 956 goto err_hwlock; 957 } 958 break; 959 #ifdef CONFIG_64BIT 960 case 64: 961 switch (val_endian) { 962 case REGMAP_ENDIAN_BIG: 963 map->format.format_val = regmap_format_64_be; 964 map->format.parse_val = regmap_parse_64_be; 965 map->format.parse_inplace = regmap_parse_64_be_inplace; 966 break; 967 case REGMAP_ENDIAN_LITTLE: 968 map->format.format_val = regmap_format_64_le; 969 map->format.parse_val = regmap_parse_64_le; 970 map->format.parse_inplace = regmap_parse_64_le_inplace; 971 break; 972 case REGMAP_ENDIAN_NATIVE: 973 map->format.format_val = regmap_format_64_native; 974 map->format.parse_val = regmap_parse_64_native; 975 break; 976 default: 977 goto err_hwlock; 978 } 979 break; 980 #endif 981 } 982 983 if (map->format.format_write) { 984 if ((reg_endian != REGMAP_ENDIAN_BIG) || 985 (val_endian != REGMAP_ENDIAN_BIG)) 986 goto err_hwlock; 987 map->use_single_write = true; 988 } 989 990 if (!map->format.format_write && 991 !(map->format.format_reg && map->format.format_val)) 992 goto err_hwlock; 993 994 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 995 if (map->work_buf == NULL) { 996 ret = -ENOMEM; 997 goto err_hwlock; 998 } 999 1000 if (map->format.format_write) { 1001 map->defer_caching = false; 1002 map->reg_write = _regmap_bus_formatted_write; 1003 } else if (map->format.format_val) { 1004 map->defer_caching = true; 1005 map->reg_write = _regmap_bus_raw_write; 1006 } 1007 1008 skip_format_initialization: 1009 1010 map->range_tree = RB_ROOT; 1011 for (i = 0; i < config->num_ranges; i++) { 1012 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1013 struct regmap_range_node *new; 1014 1015 /* Sanity check */ 1016 if (range_cfg->range_max < range_cfg->range_min) { 1017 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1018 range_cfg->range_max, range_cfg->range_min); 1019 goto err_range; 1020 } 1021 1022 if (range_cfg->range_max > map->max_register) { 1023 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1024 range_cfg->range_max, map->max_register); 1025 goto err_range; 1026 } 1027 1028 if (range_cfg->selector_reg > map->max_register) { 1029 dev_err(map->dev, 1030 "Invalid range %d: selector out of map\n", i); 1031 goto err_range; 1032 } 1033 1034 if (range_cfg->window_len == 0) { 1035 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1036 i); 1037 goto err_range; 1038 } 1039 1040 /* Make sure, that this register range has no selector 1041 or data window within its boundary */ 1042 for (j = 0; j < config->num_ranges; j++) { 1043 unsigned sel_reg = config->ranges[j].selector_reg; 1044 unsigned win_min = config->ranges[j].window_start; 1045 unsigned win_max = win_min + 1046 config->ranges[j].window_len - 1; 1047 1048 /* Allow data window inside its own virtual range */ 1049 if (j == i) 1050 continue; 1051 1052 if (range_cfg->range_min <= sel_reg && 1053 sel_reg <= range_cfg->range_max) { 1054 dev_err(map->dev, 1055 "Range %d: selector for %d in window\n", 1056 i, j); 1057 goto err_range; 1058 } 1059 1060 if (!(win_max < range_cfg->range_min || 1061 win_min > range_cfg->range_max)) { 1062 dev_err(map->dev, 1063 "Range %d: window for %d in window\n", 1064 i, j); 1065 goto err_range; 1066 } 1067 } 1068 1069 new = kzalloc(sizeof(*new), GFP_KERNEL); 1070 if (new == NULL) { 1071 ret = -ENOMEM; 1072 goto err_range; 1073 } 1074 1075 new->map = map; 1076 new->name = range_cfg->name; 1077 new->range_min = range_cfg->range_min; 1078 new->range_max = range_cfg->range_max; 1079 new->selector_reg = range_cfg->selector_reg; 1080 new->selector_mask = range_cfg->selector_mask; 1081 new->selector_shift = range_cfg->selector_shift; 1082 new->window_start = range_cfg->window_start; 1083 new->window_len = range_cfg->window_len; 1084 1085 if (!_regmap_range_add(map, new)) { 1086 dev_err(map->dev, "Failed to add range %d\n", i); 1087 kfree(new); 1088 goto err_range; 1089 } 1090 1091 if (map->selector_work_buf == NULL) { 1092 map->selector_work_buf = 1093 kzalloc(map->format.buf_size, GFP_KERNEL); 1094 if (map->selector_work_buf == NULL) { 1095 ret = -ENOMEM; 1096 goto err_range; 1097 } 1098 } 1099 } 1100 1101 ret = regcache_init(map, config); 1102 if (ret != 0) 1103 goto err_range; 1104 1105 if (dev) { 1106 ret = regmap_attach_dev(dev, map, config); 1107 if (ret != 0) 1108 goto err_regcache; 1109 } 1110 1111 return map; 1112 1113 err_regcache: 1114 regcache_exit(map); 1115 err_range: 1116 regmap_range_exit(map); 1117 kfree(map->work_buf); 1118 err_hwlock: 1119 if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock) 1120 hwspin_lock_free(map->hwlock); 1121 err_map: 1122 kfree(map); 1123 err: 1124 return ERR_PTR(ret); 1125 } 1126 EXPORT_SYMBOL_GPL(__regmap_init); 1127 1128 static void devm_regmap_release(struct device *dev, void *res) 1129 { 1130 regmap_exit(*(struct regmap **)res); 1131 } 1132 1133 struct regmap *__devm_regmap_init(struct device *dev, 1134 const struct regmap_bus *bus, 1135 void *bus_context, 1136 const struct regmap_config *config, 1137 struct lock_class_key *lock_key, 1138 const char *lock_name) 1139 { 1140 struct regmap **ptr, *regmap; 1141 1142 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1143 if (!ptr) 1144 return ERR_PTR(-ENOMEM); 1145 1146 regmap = __regmap_init(dev, bus, bus_context, config, 1147 lock_key, lock_name); 1148 if (!IS_ERR(regmap)) { 1149 *ptr = regmap; 1150 devres_add(dev, ptr); 1151 } else { 1152 devres_free(ptr); 1153 } 1154 1155 return regmap; 1156 } 1157 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1158 1159 static void regmap_field_init(struct regmap_field *rm_field, 1160 struct regmap *regmap, struct reg_field reg_field) 1161 { 1162 rm_field->regmap = regmap; 1163 rm_field->reg = reg_field.reg; 1164 rm_field->shift = reg_field.lsb; 1165 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1166 rm_field->id_size = reg_field.id_size; 1167 rm_field->id_offset = reg_field.id_offset; 1168 } 1169 1170 /** 1171 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1172 * 1173 * @dev: Device that will be interacted with 1174 * @regmap: regmap bank in which this register field is located. 1175 * @reg_field: Register field with in the bank. 1176 * 1177 * The return value will be an ERR_PTR() on error or a valid pointer 1178 * to a struct regmap_field. The regmap_field will be automatically freed 1179 * by the device management code. 1180 */ 1181 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1182 struct regmap *regmap, struct reg_field reg_field) 1183 { 1184 struct regmap_field *rm_field = devm_kzalloc(dev, 1185 sizeof(*rm_field), GFP_KERNEL); 1186 if (!rm_field) 1187 return ERR_PTR(-ENOMEM); 1188 1189 regmap_field_init(rm_field, regmap, reg_field); 1190 1191 return rm_field; 1192 1193 } 1194 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1195 1196 /** 1197 * devm_regmap_field_free() - Free a register field allocated using 1198 * devm_regmap_field_alloc. 1199 * 1200 * @dev: Device that will be interacted with 1201 * @field: regmap field which should be freed. 1202 * 1203 * Free register field allocated using devm_regmap_field_alloc(). Usually 1204 * drivers need not call this function, as the memory allocated via devm 1205 * will be freed as per device-driver life-cyle. 1206 */ 1207 void devm_regmap_field_free(struct device *dev, 1208 struct regmap_field *field) 1209 { 1210 devm_kfree(dev, field); 1211 } 1212 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1213 1214 /** 1215 * regmap_field_alloc() - Allocate and initialise a register field. 1216 * 1217 * @regmap: regmap bank in which this register field is located. 1218 * @reg_field: Register field with in the bank. 1219 * 1220 * The return value will be an ERR_PTR() on error or a valid pointer 1221 * to a struct regmap_field. The regmap_field should be freed by the 1222 * user once its finished working with it using regmap_field_free(). 1223 */ 1224 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1225 struct reg_field reg_field) 1226 { 1227 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1228 1229 if (!rm_field) 1230 return ERR_PTR(-ENOMEM); 1231 1232 regmap_field_init(rm_field, regmap, reg_field); 1233 1234 return rm_field; 1235 } 1236 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1237 1238 /** 1239 * regmap_field_free() - Free register field allocated using 1240 * regmap_field_alloc. 1241 * 1242 * @field: regmap field which should be freed. 1243 */ 1244 void regmap_field_free(struct regmap_field *field) 1245 { 1246 kfree(field); 1247 } 1248 EXPORT_SYMBOL_GPL(regmap_field_free); 1249 1250 /** 1251 * regmap_reinit_cache() - Reinitialise the current register cache 1252 * 1253 * @map: Register map to operate on. 1254 * @config: New configuration. Only the cache data will be used. 1255 * 1256 * Discard any existing register cache for the map and initialize a 1257 * new cache. This can be used to restore the cache to defaults or to 1258 * update the cache configuration to reflect runtime discovery of the 1259 * hardware. 1260 * 1261 * No explicit locking is done here, the user needs to ensure that 1262 * this function will not race with other calls to regmap. 1263 */ 1264 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1265 { 1266 regcache_exit(map); 1267 regmap_debugfs_exit(map); 1268 1269 map->max_register = config->max_register; 1270 map->writeable_reg = config->writeable_reg; 1271 map->readable_reg = config->readable_reg; 1272 map->volatile_reg = config->volatile_reg; 1273 map->precious_reg = config->precious_reg; 1274 map->cache_type = config->cache_type; 1275 1276 regmap_debugfs_init(map, config->name); 1277 1278 map->cache_bypass = false; 1279 map->cache_only = false; 1280 1281 return regcache_init(map, config); 1282 } 1283 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1284 1285 /** 1286 * regmap_exit() - Free a previously allocated register map 1287 * 1288 * @map: Register map to operate on. 1289 */ 1290 void regmap_exit(struct regmap *map) 1291 { 1292 struct regmap_async *async; 1293 1294 regcache_exit(map); 1295 regmap_debugfs_exit(map); 1296 regmap_range_exit(map); 1297 if (map->bus && map->bus->free_context) 1298 map->bus->free_context(map->bus_context); 1299 kfree(map->work_buf); 1300 while (!list_empty(&map->async_free)) { 1301 async = list_first_entry_or_null(&map->async_free, 1302 struct regmap_async, 1303 list); 1304 list_del(&async->list); 1305 kfree(async->work_buf); 1306 kfree(async); 1307 } 1308 if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock) 1309 hwspin_lock_free(map->hwlock); 1310 kfree(map); 1311 } 1312 EXPORT_SYMBOL_GPL(regmap_exit); 1313 1314 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1315 { 1316 struct regmap **r = res; 1317 if (!r || !*r) { 1318 WARN_ON(!r || !*r); 1319 return 0; 1320 } 1321 1322 /* If the user didn't specify a name match any */ 1323 if (data) 1324 return (*r)->name == data; 1325 else 1326 return 1; 1327 } 1328 1329 /** 1330 * dev_get_regmap() - Obtain the regmap (if any) for a device 1331 * 1332 * @dev: Device to retrieve the map for 1333 * @name: Optional name for the register map, usually NULL. 1334 * 1335 * Returns the regmap for the device if one is present, or NULL. If 1336 * name is specified then it must match the name specified when 1337 * registering the device, if it is NULL then the first regmap found 1338 * will be used. Devices with multiple register maps are very rare, 1339 * generic code should normally not need to specify a name. 1340 */ 1341 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1342 { 1343 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1344 dev_get_regmap_match, (void *)name); 1345 1346 if (!r) 1347 return NULL; 1348 return *r; 1349 } 1350 EXPORT_SYMBOL_GPL(dev_get_regmap); 1351 1352 /** 1353 * regmap_get_device() - Obtain the device from a regmap 1354 * 1355 * @map: Register map to operate on. 1356 * 1357 * Returns the underlying device that the regmap has been created for. 1358 */ 1359 struct device *regmap_get_device(struct regmap *map) 1360 { 1361 return map->dev; 1362 } 1363 EXPORT_SYMBOL_GPL(regmap_get_device); 1364 1365 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1366 struct regmap_range_node *range, 1367 unsigned int val_num) 1368 { 1369 void *orig_work_buf; 1370 unsigned int win_offset; 1371 unsigned int win_page; 1372 bool page_chg; 1373 int ret; 1374 1375 win_offset = (*reg - range->range_min) % range->window_len; 1376 win_page = (*reg - range->range_min) / range->window_len; 1377 1378 if (val_num > 1) { 1379 /* Bulk write shouldn't cross range boundary */ 1380 if (*reg + val_num - 1 > range->range_max) 1381 return -EINVAL; 1382 1383 /* ... or single page boundary */ 1384 if (val_num > range->window_len - win_offset) 1385 return -EINVAL; 1386 } 1387 1388 /* It is possible to have selector register inside data window. 1389 In that case, selector register is located on every page and 1390 it needs no page switching, when accessed alone. */ 1391 if (val_num > 1 || 1392 range->window_start + win_offset != range->selector_reg) { 1393 /* Use separate work_buf during page switching */ 1394 orig_work_buf = map->work_buf; 1395 map->work_buf = map->selector_work_buf; 1396 1397 ret = _regmap_update_bits(map, range->selector_reg, 1398 range->selector_mask, 1399 win_page << range->selector_shift, 1400 &page_chg, false); 1401 1402 map->work_buf = orig_work_buf; 1403 1404 if (ret != 0) 1405 return ret; 1406 } 1407 1408 *reg = range->window_start + win_offset; 1409 1410 return 0; 1411 } 1412 1413 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1414 unsigned long mask) 1415 { 1416 u8 *buf; 1417 int i; 1418 1419 if (!mask || !map->work_buf) 1420 return; 1421 1422 buf = map->work_buf; 1423 1424 for (i = 0; i < max_bytes; i++) 1425 buf[i] |= (mask >> (8 * i)) & 0xff; 1426 } 1427 1428 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1429 const void *val, size_t val_len) 1430 { 1431 struct regmap_range_node *range; 1432 unsigned long flags; 1433 void *work_val = map->work_buf + map->format.reg_bytes + 1434 map->format.pad_bytes; 1435 void *buf; 1436 int ret = -ENOTSUPP; 1437 size_t len; 1438 int i; 1439 1440 WARN_ON(!map->bus); 1441 1442 /* Check for unwritable registers before we start */ 1443 if (map->writeable_reg) 1444 for (i = 0; i < val_len / map->format.val_bytes; i++) 1445 if (!map->writeable_reg(map->dev, 1446 reg + regmap_get_offset(map, i))) 1447 return -EINVAL; 1448 1449 if (!map->cache_bypass && map->format.parse_val) { 1450 unsigned int ival; 1451 int val_bytes = map->format.val_bytes; 1452 for (i = 0; i < val_len / val_bytes; i++) { 1453 ival = map->format.parse_val(val + (i * val_bytes)); 1454 ret = regcache_write(map, 1455 reg + regmap_get_offset(map, i), 1456 ival); 1457 if (ret) { 1458 dev_err(map->dev, 1459 "Error in caching of register: %x ret: %d\n", 1460 reg + i, ret); 1461 return ret; 1462 } 1463 } 1464 if (map->cache_only) { 1465 map->cache_dirty = true; 1466 return 0; 1467 } 1468 } 1469 1470 range = _regmap_range_lookup(map, reg); 1471 if (range) { 1472 int val_num = val_len / map->format.val_bytes; 1473 int win_offset = (reg - range->range_min) % range->window_len; 1474 int win_residue = range->window_len - win_offset; 1475 1476 /* If the write goes beyond the end of the window split it */ 1477 while (val_num > win_residue) { 1478 dev_dbg(map->dev, "Writing window %d/%zu\n", 1479 win_residue, val_len / map->format.val_bytes); 1480 ret = _regmap_raw_write(map, reg, val, win_residue * 1481 map->format.val_bytes); 1482 if (ret != 0) 1483 return ret; 1484 1485 reg += win_residue; 1486 val_num -= win_residue; 1487 val += win_residue * map->format.val_bytes; 1488 val_len -= win_residue * map->format.val_bytes; 1489 1490 win_offset = (reg - range->range_min) % 1491 range->window_len; 1492 win_residue = range->window_len - win_offset; 1493 } 1494 1495 ret = _regmap_select_page(map, ®, range, val_num); 1496 if (ret != 0) 1497 return ret; 1498 } 1499 1500 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1501 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1502 map->write_flag_mask); 1503 1504 /* 1505 * Essentially all I/O mechanisms will be faster with a single 1506 * buffer to write. Since register syncs often generate raw 1507 * writes of single registers optimise that case. 1508 */ 1509 if (val != work_val && val_len == map->format.val_bytes) { 1510 memcpy(work_val, val, map->format.val_bytes); 1511 val = work_val; 1512 } 1513 1514 if (map->async && map->bus->async_write) { 1515 struct regmap_async *async; 1516 1517 trace_regmap_async_write_start(map, reg, val_len); 1518 1519 spin_lock_irqsave(&map->async_lock, flags); 1520 async = list_first_entry_or_null(&map->async_free, 1521 struct regmap_async, 1522 list); 1523 if (async) 1524 list_del(&async->list); 1525 spin_unlock_irqrestore(&map->async_lock, flags); 1526 1527 if (!async) { 1528 async = map->bus->async_alloc(); 1529 if (!async) 1530 return -ENOMEM; 1531 1532 async->work_buf = kzalloc(map->format.buf_size, 1533 GFP_KERNEL | GFP_DMA); 1534 if (!async->work_buf) { 1535 kfree(async); 1536 return -ENOMEM; 1537 } 1538 } 1539 1540 async->map = map; 1541 1542 /* If the caller supplied the value we can use it safely. */ 1543 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1544 map->format.reg_bytes + map->format.val_bytes); 1545 1546 spin_lock_irqsave(&map->async_lock, flags); 1547 list_add_tail(&async->list, &map->async_list); 1548 spin_unlock_irqrestore(&map->async_lock, flags); 1549 1550 if (val != work_val) 1551 ret = map->bus->async_write(map->bus_context, 1552 async->work_buf, 1553 map->format.reg_bytes + 1554 map->format.pad_bytes, 1555 val, val_len, async); 1556 else 1557 ret = map->bus->async_write(map->bus_context, 1558 async->work_buf, 1559 map->format.reg_bytes + 1560 map->format.pad_bytes + 1561 val_len, NULL, 0, async); 1562 1563 if (ret != 0) { 1564 dev_err(map->dev, "Failed to schedule write: %d\n", 1565 ret); 1566 1567 spin_lock_irqsave(&map->async_lock, flags); 1568 list_move(&async->list, &map->async_free); 1569 spin_unlock_irqrestore(&map->async_lock, flags); 1570 } 1571 1572 return ret; 1573 } 1574 1575 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1576 1577 /* If we're doing a single register write we can probably just 1578 * send the work_buf directly, otherwise try to do a gather 1579 * write. 1580 */ 1581 if (val == work_val) 1582 ret = map->bus->write(map->bus_context, map->work_buf, 1583 map->format.reg_bytes + 1584 map->format.pad_bytes + 1585 val_len); 1586 else if (map->bus->gather_write) 1587 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1588 map->format.reg_bytes + 1589 map->format.pad_bytes, 1590 val, val_len); 1591 1592 /* If that didn't work fall back on linearising by hand. */ 1593 if (ret == -ENOTSUPP) { 1594 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1595 buf = kzalloc(len, GFP_KERNEL); 1596 if (!buf) 1597 return -ENOMEM; 1598 1599 memcpy(buf, map->work_buf, map->format.reg_bytes); 1600 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1601 val, val_len); 1602 ret = map->bus->write(map->bus_context, buf, len); 1603 1604 kfree(buf); 1605 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1606 /* regcache_drop_region() takes lock that we already have, 1607 * thus call map->cache_ops->drop() directly 1608 */ 1609 if (map->cache_ops && map->cache_ops->drop) 1610 map->cache_ops->drop(map, reg, reg + 1); 1611 } 1612 1613 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1614 1615 return ret; 1616 } 1617 1618 /** 1619 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1620 * 1621 * @map: Map to check. 1622 */ 1623 bool regmap_can_raw_write(struct regmap *map) 1624 { 1625 return map->bus && map->bus->write && map->format.format_val && 1626 map->format.format_reg; 1627 } 1628 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1629 1630 /** 1631 * regmap_get_raw_read_max - Get the maximum size we can read 1632 * 1633 * @map: Map to check. 1634 */ 1635 size_t regmap_get_raw_read_max(struct regmap *map) 1636 { 1637 return map->max_raw_read; 1638 } 1639 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1640 1641 /** 1642 * regmap_get_raw_write_max - Get the maximum size we can read 1643 * 1644 * @map: Map to check. 1645 */ 1646 size_t regmap_get_raw_write_max(struct regmap *map) 1647 { 1648 return map->max_raw_write; 1649 } 1650 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1651 1652 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1653 unsigned int val) 1654 { 1655 int ret; 1656 struct regmap_range_node *range; 1657 struct regmap *map = context; 1658 1659 WARN_ON(!map->bus || !map->format.format_write); 1660 1661 range = _regmap_range_lookup(map, reg); 1662 if (range) { 1663 ret = _regmap_select_page(map, ®, range, 1); 1664 if (ret != 0) 1665 return ret; 1666 } 1667 1668 map->format.format_write(map, reg, val); 1669 1670 trace_regmap_hw_write_start(map, reg, 1); 1671 1672 ret = map->bus->write(map->bus_context, map->work_buf, 1673 map->format.buf_size); 1674 1675 trace_regmap_hw_write_done(map, reg, 1); 1676 1677 return ret; 1678 } 1679 1680 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1681 unsigned int val) 1682 { 1683 struct regmap *map = context; 1684 1685 return map->bus->reg_write(map->bus_context, reg, val); 1686 } 1687 1688 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1689 unsigned int val) 1690 { 1691 struct regmap *map = context; 1692 1693 WARN_ON(!map->bus || !map->format.format_val); 1694 1695 map->format.format_val(map->work_buf + map->format.reg_bytes 1696 + map->format.pad_bytes, val, 0); 1697 return _regmap_raw_write(map, reg, 1698 map->work_buf + 1699 map->format.reg_bytes + 1700 map->format.pad_bytes, 1701 map->format.val_bytes); 1702 } 1703 1704 static inline void *_regmap_map_get_context(struct regmap *map) 1705 { 1706 return (map->bus) ? map : map->bus_context; 1707 } 1708 1709 int _regmap_write(struct regmap *map, unsigned int reg, 1710 unsigned int val) 1711 { 1712 int ret; 1713 void *context = _regmap_map_get_context(map); 1714 1715 if (!regmap_writeable(map, reg)) 1716 return -EIO; 1717 1718 if (!map->cache_bypass && !map->defer_caching) { 1719 ret = regcache_write(map, reg, val); 1720 if (ret != 0) 1721 return ret; 1722 if (map->cache_only) { 1723 map->cache_dirty = true; 1724 return 0; 1725 } 1726 } 1727 1728 #ifdef LOG_DEVICE 1729 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1730 dev_info(map->dev, "%x <= %x\n", reg, val); 1731 #endif 1732 1733 trace_regmap_reg_write(map, reg, val); 1734 1735 return map->reg_write(context, reg, val); 1736 } 1737 1738 /** 1739 * regmap_write() - Write a value to a single register 1740 * 1741 * @map: Register map to write to 1742 * @reg: Register to write to 1743 * @val: Value to be written 1744 * 1745 * A value of zero will be returned on success, a negative errno will 1746 * be returned in error cases. 1747 */ 1748 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1749 { 1750 int ret; 1751 1752 if (!IS_ALIGNED(reg, map->reg_stride)) 1753 return -EINVAL; 1754 1755 map->lock(map->lock_arg); 1756 1757 ret = _regmap_write(map, reg, val); 1758 1759 map->unlock(map->lock_arg); 1760 1761 return ret; 1762 } 1763 EXPORT_SYMBOL_GPL(regmap_write); 1764 1765 /** 1766 * regmap_write_async() - Write a value to a single register asynchronously 1767 * 1768 * @map: Register map to write to 1769 * @reg: Register to write to 1770 * @val: Value to be written 1771 * 1772 * A value of zero will be returned on success, a negative errno will 1773 * be returned in error cases. 1774 */ 1775 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1776 { 1777 int ret; 1778 1779 if (!IS_ALIGNED(reg, map->reg_stride)) 1780 return -EINVAL; 1781 1782 map->lock(map->lock_arg); 1783 1784 map->async = true; 1785 1786 ret = _regmap_write(map, reg, val); 1787 1788 map->async = false; 1789 1790 map->unlock(map->lock_arg); 1791 1792 return ret; 1793 } 1794 EXPORT_SYMBOL_GPL(regmap_write_async); 1795 1796 /** 1797 * regmap_raw_write() - Write raw values to one or more registers 1798 * 1799 * @map: Register map to write to 1800 * @reg: Initial register to write to 1801 * @val: Block of data to be written, laid out for direct transmission to the 1802 * device 1803 * @val_len: Length of data pointed to by val. 1804 * 1805 * This function is intended to be used for things like firmware 1806 * download where a large block of data needs to be transferred to the 1807 * device. No formatting will be done on the data provided. 1808 * 1809 * A value of zero will be returned on success, a negative errno will 1810 * be returned in error cases. 1811 */ 1812 int regmap_raw_write(struct regmap *map, unsigned int reg, 1813 const void *val, size_t val_len) 1814 { 1815 int ret; 1816 1817 if (!regmap_can_raw_write(map)) 1818 return -EINVAL; 1819 if (val_len % map->format.val_bytes) 1820 return -EINVAL; 1821 if (map->max_raw_write && map->max_raw_write > val_len) 1822 return -E2BIG; 1823 1824 map->lock(map->lock_arg); 1825 1826 ret = _regmap_raw_write(map, reg, val, val_len); 1827 1828 map->unlock(map->lock_arg); 1829 1830 return ret; 1831 } 1832 EXPORT_SYMBOL_GPL(regmap_raw_write); 1833 1834 /** 1835 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1836 * register field. 1837 * 1838 * @field: Register field to write to 1839 * @mask: Bitmask to change 1840 * @val: Value to be written 1841 * @change: Boolean indicating if a write was done 1842 * @async: Boolean indicating asynchronously 1843 * @force: Boolean indicating use force update 1844 * 1845 * Perform a read/modify/write cycle on the register field with change, 1846 * async, force option. 1847 * 1848 * A value of zero will be returned on success, a negative errno will 1849 * be returned in error cases. 1850 */ 1851 int regmap_field_update_bits_base(struct regmap_field *field, 1852 unsigned int mask, unsigned int val, 1853 bool *change, bool async, bool force) 1854 { 1855 mask = (mask << field->shift) & field->mask; 1856 1857 return regmap_update_bits_base(field->regmap, field->reg, 1858 mask, val << field->shift, 1859 change, async, force); 1860 } 1861 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 1862 1863 /** 1864 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 1865 * register field with port ID 1866 * 1867 * @field: Register field to write to 1868 * @id: port ID 1869 * @mask: Bitmask to change 1870 * @val: Value to be written 1871 * @change: Boolean indicating if a write was done 1872 * @async: Boolean indicating asynchronously 1873 * @force: Boolean indicating use force update 1874 * 1875 * A value of zero will be returned on success, a negative errno will 1876 * be returned in error cases. 1877 */ 1878 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 1879 unsigned int mask, unsigned int val, 1880 bool *change, bool async, bool force) 1881 { 1882 if (id >= field->id_size) 1883 return -EINVAL; 1884 1885 mask = (mask << field->shift) & field->mask; 1886 1887 return regmap_update_bits_base(field->regmap, 1888 field->reg + (field->id_offset * id), 1889 mask, val << field->shift, 1890 change, async, force); 1891 } 1892 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 1893 1894 /** 1895 * regmap_bulk_write() - Write multiple registers to the device 1896 * 1897 * @map: Register map to write to 1898 * @reg: First register to be write from 1899 * @val: Block of data to be written, in native register size for device 1900 * @val_count: Number of registers to write 1901 * 1902 * This function is intended to be used for writing a large block of 1903 * data to the device either in single transfer or multiple transfer. 1904 * 1905 * A value of zero will be returned on success, a negative errno will 1906 * be returned in error cases. 1907 */ 1908 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1909 size_t val_count) 1910 { 1911 int ret = 0, i; 1912 size_t val_bytes = map->format.val_bytes; 1913 size_t total_size = val_bytes * val_count; 1914 1915 if (!IS_ALIGNED(reg, map->reg_stride)) 1916 return -EINVAL; 1917 1918 /* 1919 * Some devices don't support bulk write, for 1920 * them we have a series of single write operations in the first two if 1921 * blocks. 1922 * 1923 * The first if block is used for memory mapped io. It does not allow 1924 * val_bytes of 3 for example. 1925 * The second one is for busses that do not provide raw I/O. 1926 * The third one is used for busses which do not have these limitations 1927 * and can write arbitrary value lengths. 1928 */ 1929 if (!map->bus) { 1930 map->lock(map->lock_arg); 1931 for (i = 0; i < val_count; i++) { 1932 unsigned int ival; 1933 1934 switch (val_bytes) { 1935 case 1: 1936 ival = *(u8 *)(val + (i * val_bytes)); 1937 break; 1938 case 2: 1939 ival = *(u16 *)(val + (i * val_bytes)); 1940 break; 1941 case 4: 1942 ival = *(u32 *)(val + (i * val_bytes)); 1943 break; 1944 #ifdef CONFIG_64BIT 1945 case 8: 1946 ival = *(u64 *)(val + (i * val_bytes)); 1947 break; 1948 #endif 1949 default: 1950 ret = -EINVAL; 1951 goto out; 1952 } 1953 1954 ret = _regmap_write(map, 1955 reg + regmap_get_offset(map, i), 1956 ival); 1957 if (ret != 0) 1958 goto out; 1959 } 1960 out: 1961 map->unlock(map->lock_arg); 1962 } else if (map->bus && !map->format.parse_inplace) { 1963 const u8 *u8 = val; 1964 const u16 *u16 = val; 1965 const u32 *u32 = val; 1966 unsigned int ival; 1967 1968 for (i = 0; i < val_count; i++) { 1969 switch (map->format.val_bytes) { 1970 case 4: 1971 ival = u32[i]; 1972 break; 1973 case 2: 1974 ival = u16[i]; 1975 break; 1976 case 1: 1977 ival = u8[i]; 1978 break; 1979 default: 1980 return -EINVAL; 1981 } 1982 1983 ret = regmap_write(map, reg + (i * map->reg_stride), 1984 ival); 1985 if (ret) 1986 return ret; 1987 } 1988 } else if (map->use_single_write || 1989 (map->max_raw_write && map->max_raw_write < total_size)) { 1990 int chunk_stride = map->reg_stride; 1991 size_t chunk_size = val_bytes; 1992 size_t chunk_count = val_count; 1993 1994 if (!map->use_single_write) { 1995 chunk_size = map->max_raw_write; 1996 if (chunk_size % val_bytes) 1997 chunk_size -= chunk_size % val_bytes; 1998 chunk_count = total_size / chunk_size; 1999 chunk_stride *= chunk_size / val_bytes; 2000 } 2001 2002 map->lock(map->lock_arg); 2003 /* Write as many bytes as possible with chunk_size */ 2004 for (i = 0; i < chunk_count; i++) { 2005 ret = _regmap_raw_write(map, 2006 reg + (i * chunk_stride), 2007 val + (i * chunk_size), 2008 chunk_size); 2009 if (ret) 2010 break; 2011 } 2012 2013 /* Write remaining bytes */ 2014 if (!ret && chunk_size * i < total_size) { 2015 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 2016 val + (i * chunk_size), 2017 total_size - i * chunk_size); 2018 } 2019 map->unlock(map->lock_arg); 2020 } else { 2021 void *wval; 2022 2023 if (!val_count) 2024 return -EINVAL; 2025 2026 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2027 if (!wval) { 2028 dev_err(map->dev, "Error in memory allocation\n"); 2029 return -ENOMEM; 2030 } 2031 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2032 map->format.parse_inplace(wval + i); 2033 2034 map->lock(map->lock_arg); 2035 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 2036 map->unlock(map->lock_arg); 2037 2038 kfree(wval); 2039 } 2040 return ret; 2041 } 2042 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2043 2044 /* 2045 * _regmap_raw_multi_reg_write() 2046 * 2047 * the (register,newvalue) pairs in regs have not been formatted, but 2048 * they are all in the same page and have been changed to being page 2049 * relative. The page register has been written if that was necessary. 2050 */ 2051 static int _regmap_raw_multi_reg_write(struct regmap *map, 2052 const struct reg_sequence *regs, 2053 size_t num_regs) 2054 { 2055 int ret; 2056 void *buf; 2057 int i; 2058 u8 *u8; 2059 size_t val_bytes = map->format.val_bytes; 2060 size_t reg_bytes = map->format.reg_bytes; 2061 size_t pad_bytes = map->format.pad_bytes; 2062 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2063 size_t len = pair_size * num_regs; 2064 2065 if (!len) 2066 return -EINVAL; 2067 2068 buf = kzalloc(len, GFP_KERNEL); 2069 if (!buf) 2070 return -ENOMEM; 2071 2072 /* We have to linearise by hand. */ 2073 2074 u8 = buf; 2075 2076 for (i = 0; i < num_regs; i++) { 2077 unsigned int reg = regs[i].reg; 2078 unsigned int val = regs[i].def; 2079 trace_regmap_hw_write_start(map, reg, 1); 2080 map->format.format_reg(u8, reg, map->reg_shift); 2081 u8 += reg_bytes + pad_bytes; 2082 map->format.format_val(u8, val, 0); 2083 u8 += val_bytes; 2084 } 2085 u8 = buf; 2086 *u8 |= map->write_flag_mask; 2087 2088 ret = map->bus->write(map->bus_context, buf, len); 2089 2090 kfree(buf); 2091 2092 for (i = 0; i < num_regs; i++) { 2093 int reg = regs[i].reg; 2094 trace_regmap_hw_write_done(map, reg, 1); 2095 } 2096 return ret; 2097 } 2098 2099 static unsigned int _regmap_register_page(struct regmap *map, 2100 unsigned int reg, 2101 struct regmap_range_node *range) 2102 { 2103 unsigned int win_page = (reg - range->range_min) / range->window_len; 2104 2105 return win_page; 2106 } 2107 2108 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2109 struct reg_sequence *regs, 2110 size_t num_regs) 2111 { 2112 int ret; 2113 int i, n; 2114 struct reg_sequence *base; 2115 unsigned int this_page = 0; 2116 unsigned int page_change = 0; 2117 /* 2118 * the set of registers are not neccessarily in order, but 2119 * since the order of write must be preserved this algorithm 2120 * chops the set each time the page changes. This also applies 2121 * if there is a delay required at any point in the sequence. 2122 */ 2123 base = regs; 2124 for (i = 0, n = 0; i < num_regs; i++, n++) { 2125 unsigned int reg = regs[i].reg; 2126 struct regmap_range_node *range; 2127 2128 range = _regmap_range_lookup(map, reg); 2129 if (range) { 2130 unsigned int win_page = _regmap_register_page(map, reg, 2131 range); 2132 2133 if (i == 0) 2134 this_page = win_page; 2135 if (win_page != this_page) { 2136 this_page = win_page; 2137 page_change = 1; 2138 } 2139 } 2140 2141 /* If we have both a page change and a delay make sure to 2142 * write the regs and apply the delay before we change the 2143 * page. 2144 */ 2145 2146 if (page_change || regs[i].delay_us) { 2147 2148 /* For situations where the first write requires 2149 * a delay we need to make sure we don't call 2150 * raw_multi_reg_write with n=0 2151 * This can't occur with page breaks as we 2152 * never write on the first iteration 2153 */ 2154 if (regs[i].delay_us && i == 0) 2155 n = 1; 2156 2157 ret = _regmap_raw_multi_reg_write(map, base, n); 2158 if (ret != 0) 2159 return ret; 2160 2161 if (regs[i].delay_us) 2162 udelay(regs[i].delay_us); 2163 2164 base += n; 2165 n = 0; 2166 2167 if (page_change) { 2168 ret = _regmap_select_page(map, 2169 &base[n].reg, 2170 range, 1); 2171 if (ret != 0) 2172 return ret; 2173 2174 page_change = 0; 2175 } 2176 2177 } 2178 2179 } 2180 if (n > 0) 2181 return _regmap_raw_multi_reg_write(map, base, n); 2182 return 0; 2183 } 2184 2185 static int _regmap_multi_reg_write(struct regmap *map, 2186 const struct reg_sequence *regs, 2187 size_t num_regs) 2188 { 2189 int i; 2190 int ret; 2191 2192 if (!map->can_multi_write) { 2193 for (i = 0; i < num_regs; i++) { 2194 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2195 if (ret != 0) 2196 return ret; 2197 2198 if (regs[i].delay_us) 2199 udelay(regs[i].delay_us); 2200 } 2201 return 0; 2202 } 2203 2204 if (!map->format.parse_inplace) 2205 return -EINVAL; 2206 2207 if (map->writeable_reg) 2208 for (i = 0; i < num_regs; i++) { 2209 int reg = regs[i].reg; 2210 if (!map->writeable_reg(map->dev, reg)) 2211 return -EINVAL; 2212 if (!IS_ALIGNED(reg, map->reg_stride)) 2213 return -EINVAL; 2214 } 2215 2216 if (!map->cache_bypass) { 2217 for (i = 0; i < num_regs; i++) { 2218 unsigned int val = regs[i].def; 2219 unsigned int reg = regs[i].reg; 2220 ret = regcache_write(map, reg, val); 2221 if (ret) { 2222 dev_err(map->dev, 2223 "Error in caching of register: %x ret: %d\n", 2224 reg, ret); 2225 return ret; 2226 } 2227 } 2228 if (map->cache_only) { 2229 map->cache_dirty = true; 2230 return 0; 2231 } 2232 } 2233 2234 WARN_ON(!map->bus); 2235 2236 for (i = 0; i < num_regs; i++) { 2237 unsigned int reg = regs[i].reg; 2238 struct regmap_range_node *range; 2239 2240 /* Coalesce all the writes between a page break or a delay 2241 * in a sequence 2242 */ 2243 range = _regmap_range_lookup(map, reg); 2244 if (range || regs[i].delay_us) { 2245 size_t len = sizeof(struct reg_sequence)*num_regs; 2246 struct reg_sequence *base = kmemdup(regs, len, 2247 GFP_KERNEL); 2248 if (!base) 2249 return -ENOMEM; 2250 ret = _regmap_range_multi_paged_reg_write(map, base, 2251 num_regs); 2252 kfree(base); 2253 2254 return ret; 2255 } 2256 } 2257 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2258 } 2259 2260 /** 2261 * regmap_multi_reg_write() - Write multiple registers to the device 2262 * 2263 * @map: Register map to write to 2264 * @regs: Array of structures containing register,value to be written 2265 * @num_regs: Number of registers to write 2266 * 2267 * Write multiple registers to the device where the set of register, value 2268 * pairs are supplied in any order, possibly not all in a single range. 2269 * 2270 * The 'normal' block write mode will send ultimately send data on the 2271 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2272 * addressed. However, this alternative block multi write mode will send 2273 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2274 * must of course support the mode. 2275 * 2276 * A value of zero will be returned on success, a negative errno will be 2277 * returned in error cases. 2278 */ 2279 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2280 int num_regs) 2281 { 2282 int ret; 2283 2284 map->lock(map->lock_arg); 2285 2286 ret = _regmap_multi_reg_write(map, regs, num_regs); 2287 2288 map->unlock(map->lock_arg); 2289 2290 return ret; 2291 } 2292 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2293 2294 /** 2295 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2296 * device but not the cache 2297 * 2298 * @map: Register map to write to 2299 * @regs: Array of structures containing register,value to be written 2300 * @num_regs: Number of registers to write 2301 * 2302 * Write multiple registers to the device but not the cache where the set 2303 * of register are supplied in any order. 2304 * 2305 * This function is intended to be used for writing a large block of data 2306 * atomically to the device in single transfer for those I2C client devices 2307 * that implement this alternative block write mode. 2308 * 2309 * A value of zero will be returned on success, a negative errno will 2310 * be returned in error cases. 2311 */ 2312 int regmap_multi_reg_write_bypassed(struct regmap *map, 2313 const struct reg_sequence *regs, 2314 int num_regs) 2315 { 2316 int ret; 2317 bool bypass; 2318 2319 map->lock(map->lock_arg); 2320 2321 bypass = map->cache_bypass; 2322 map->cache_bypass = true; 2323 2324 ret = _regmap_multi_reg_write(map, regs, num_regs); 2325 2326 map->cache_bypass = bypass; 2327 2328 map->unlock(map->lock_arg); 2329 2330 return ret; 2331 } 2332 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2333 2334 /** 2335 * regmap_raw_write_async() - Write raw values to one or more registers 2336 * asynchronously 2337 * 2338 * @map: Register map to write to 2339 * @reg: Initial register to write to 2340 * @val: Block of data to be written, laid out for direct transmission to the 2341 * device. Must be valid until regmap_async_complete() is called. 2342 * @val_len: Length of data pointed to by val. 2343 * 2344 * This function is intended to be used for things like firmware 2345 * download where a large block of data needs to be transferred to the 2346 * device. No formatting will be done on the data provided. 2347 * 2348 * If supported by the underlying bus the write will be scheduled 2349 * asynchronously, helping maximise I/O speed on higher speed buses 2350 * like SPI. regmap_async_complete() can be called to ensure that all 2351 * asynchrnous writes have been completed. 2352 * 2353 * A value of zero will be returned on success, a negative errno will 2354 * be returned in error cases. 2355 */ 2356 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2357 const void *val, size_t val_len) 2358 { 2359 int ret; 2360 2361 if (val_len % map->format.val_bytes) 2362 return -EINVAL; 2363 if (!IS_ALIGNED(reg, map->reg_stride)) 2364 return -EINVAL; 2365 2366 map->lock(map->lock_arg); 2367 2368 map->async = true; 2369 2370 ret = _regmap_raw_write(map, reg, val, val_len); 2371 2372 map->async = false; 2373 2374 map->unlock(map->lock_arg); 2375 2376 return ret; 2377 } 2378 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2379 2380 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2381 unsigned int val_len) 2382 { 2383 struct regmap_range_node *range; 2384 int ret; 2385 2386 WARN_ON(!map->bus); 2387 2388 if (!map->bus || !map->bus->read) 2389 return -EINVAL; 2390 2391 range = _regmap_range_lookup(map, reg); 2392 if (range) { 2393 ret = _regmap_select_page(map, ®, range, 2394 val_len / map->format.val_bytes); 2395 if (ret != 0) 2396 return ret; 2397 } 2398 2399 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2400 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2401 map->read_flag_mask); 2402 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2403 2404 ret = map->bus->read(map->bus_context, map->work_buf, 2405 map->format.reg_bytes + map->format.pad_bytes, 2406 val, val_len); 2407 2408 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2409 2410 return ret; 2411 } 2412 2413 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2414 unsigned int *val) 2415 { 2416 struct regmap *map = context; 2417 2418 return map->bus->reg_read(map->bus_context, reg, val); 2419 } 2420 2421 static int _regmap_bus_read(void *context, unsigned int reg, 2422 unsigned int *val) 2423 { 2424 int ret; 2425 struct regmap *map = context; 2426 2427 if (!map->format.parse_val) 2428 return -EINVAL; 2429 2430 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2431 if (ret == 0) 2432 *val = map->format.parse_val(map->work_buf); 2433 2434 return ret; 2435 } 2436 2437 static int _regmap_read(struct regmap *map, unsigned int reg, 2438 unsigned int *val) 2439 { 2440 int ret; 2441 void *context = _regmap_map_get_context(map); 2442 2443 if (!map->cache_bypass) { 2444 ret = regcache_read(map, reg, val); 2445 if (ret == 0) 2446 return 0; 2447 } 2448 2449 if (map->cache_only) 2450 return -EBUSY; 2451 2452 if (!regmap_readable(map, reg)) 2453 return -EIO; 2454 2455 ret = map->reg_read(context, reg, val); 2456 if (ret == 0) { 2457 #ifdef LOG_DEVICE 2458 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2459 dev_info(map->dev, "%x => %x\n", reg, *val); 2460 #endif 2461 2462 trace_regmap_reg_read(map, reg, *val); 2463 2464 if (!map->cache_bypass) 2465 regcache_write(map, reg, *val); 2466 } 2467 2468 return ret; 2469 } 2470 2471 /** 2472 * regmap_read() - Read a value from a single register 2473 * 2474 * @map: Register map to read from 2475 * @reg: Register to be read from 2476 * @val: Pointer to store read value 2477 * 2478 * A value of zero will be returned on success, a negative errno will 2479 * be returned in error cases. 2480 */ 2481 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2482 { 2483 int ret; 2484 2485 if (!IS_ALIGNED(reg, map->reg_stride)) 2486 return -EINVAL; 2487 2488 map->lock(map->lock_arg); 2489 2490 ret = _regmap_read(map, reg, val); 2491 2492 map->unlock(map->lock_arg); 2493 2494 return ret; 2495 } 2496 EXPORT_SYMBOL_GPL(regmap_read); 2497 2498 /** 2499 * regmap_raw_read() - Read raw data from the device 2500 * 2501 * @map: Register map to read from 2502 * @reg: First register to be read from 2503 * @val: Pointer to store read value 2504 * @val_len: Size of data to read 2505 * 2506 * A value of zero will be returned on success, a negative errno will 2507 * be returned in error cases. 2508 */ 2509 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2510 size_t val_len) 2511 { 2512 size_t val_bytes = map->format.val_bytes; 2513 size_t val_count = val_len / val_bytes; 2514 unsigned int v; 2515 int ret, i; 2516 2517 if (!map->bus) 2518 return -EINVAL; 2519 if (val_len % map->format.val_bytes) 2520 return -EINVAL; 2521 if (!IS_ALIGNED(reg, map->reg_stride)) 2522 return -EINVAL; 2523 if (val_count == 0) 2524 return -EINVAL; 2525 2526 map->lock(map->lock_arg); 2527 2528 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2529 map->cache_type == REGCACHE_NONE) { 2530 if (!map->bus->read) { 2531 ret = -ENOTSUPP; 2532 goto out; 2533 } 2534 if (map->max_raw_read && map->max_raw_read < val_len) { 2535 ret = -E2BIG; 2536 goto out; 2537 } 2538 2539 /* Physical block read if there's no cache involved */ 2540 ret = _regmap_raw_read(map, reg, val, val_len); 2541 2542 } else { 2543 /* Otherwise go word by word for the cache; should be low 2544 * cost as we expect to hit the cache. 2545 */ 2546 for (i = 0; i < val_count; i++) { 2547 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2548 &v); 2549 if (ret != 0) 2550 goto out; 2551 2552 map->format.format_val(val + (i * val_bytes), v, 0); 2553 } 2554 } 2555 2556 out: 2557 map->unlock(map->lock_arg); 2558 2559 return ret; 2560 } 2561 EXPORT_SYMBOL_GPL(regmap_raw_read); 2562 2563 /** 2564 * regmap_field_read() - Read a value to a single register field 2565 * 2566 * @field: Register field to read from 2567 * @val: Pointer to store read value 2568 * 2569 * A value of zero will be returned on success, a negative errno will 2570 * be returned in error cases. 2571 */ 2572 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2573 { 2574 int ret; 2575 unsigned int reg_val; 2576 ret = regmap_read(field->regmap, field->reg, ®_val); 2577 if (ret != 0) 2578 return ret; 2579 2580 reg_val &= field->mask; 2581 reg_val >>= field->shift; 2582 *val = reg_val; 2583 2584 return ret; 2585 } 2586 EXPORT_SYMBOL_GPL(regmap_field_read); 2587 2588 /** 2589 * regmap_fields_read() - Read a value to a single register field with port ID 2590 * 2591 * @field: Register field to read from 2592 * @id: port ID 2593 * @val: Pointer to store read value 2594 * 2595 * A value of zero will be returned on success, a negative errno will 2596 * be returned in error cases. 2597 */ 2598 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2599 unsigned int *val) 2600 { 2601 int ret; 2602 unsigned int reg_val; 2603 2604 if (id >= field->id_size) 2605 return -EINVAL; 2606 2607 ret = regmap_read(field->regmap, 2608 field->reg + (field->id_offset * id), 2609 ®_val); 2610 if (ret != 0) 2611 return ret; 2612 2613 reg_val &= field->mask; 2614 reg_val >>= field->shift; 2615 *val = reg_val; 2616 2617 return ret; 2618 } 2619 EXPORT_SYMBOL_GPL(regmap_fields_read); 2620 2621 /** 2622 * regmap_bulk_read() - Read multiple registers from the device 2623 * 2624 * @map: Register map to read from 2625 * @reg: First register to be read from 2626 * @val: Pointer to store read value, in native register size for device 2627 * @val_count: Number of registers to read 2628 * 2629 * A value of zero will be returned on success, a negative errno will 2630 * be returned in error cases. 2631 */ 2632 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2633 size_t val_count) 2634 { 2635 int ret, i; 2636 size_t val_bytes = map->format.val_bytes; 2637 bool vol = regmap_volatile_range(map, reg, val_count); 2638 2639 if (!IS_ALIGNED(reg, map->reg_stride)) 2640 return -EINVAL; 2641 2642 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2643 /* 2644 * Some devices does not support bulk read, for 2645 * them we have a series of single read operations. 2646 */ 2647 size_t total_size = val_bytes * val_count; 2648 2649 if (!map->use_single_read && 2650 (!map->max_raw_read || map->max_raw_read > total_size)) { 2651 ret = regmap_raw_read(map, reg, val, 2652 val_bytes * val_count); 2653 if (ret != 0) 2654 return ret; 2655 } else { 2656 /* 2657 * Some devices do not support bulk read or do not 2658 * support large bulk reads, for them we have a series 2659 * of read operations. 2660 */ 2661 int chunk_stride = map->reg_stride; 2662 size_t chunk_size = val_bytes; 2663 size_t chunk_count = val_count; 2664 2665 if (!map->use_single_read) { 2666 chunk_size = map->max_raw_read; 2667 if (chunk_size % val_bytes) 2668 chunk_size -= chunk_size % val_bytes; 2669 chunk_count = total_size / chunk_size; 2670 chunk_stride *= chunk_size / val_bytes; 2671 } 2672 2673 /* Read bytes that fit into a multiple of chunk_size */ 2674 for (i = 0; i < chunk_count; i++) { 2675 ret = regmap_raw_read(map, 2676 reg + (i * chunk_stride), 2677 val + (i * chunk_size), 2678 chunk_size); 2679 if (ret != 0) 2680 return ret; 2681 } 2682 2683 /* Read remaining bytes */ 2684 if (chunk_size * i < total_size) { 2685 ret = regmap_raw_read(map, 2686 reg + (i * chunk_stride), 2687 val + (i * chunk_size), 2688 total_size - i * chunk_size); 2689 if (ret != 0) 2690 return ret; 2691 } 2692 } 2693 2694 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2695 map->format.parse_inplace(val + i); 2696 } else { 2697 for (i = 0; i < val_count; i++) { 2698 unsigned int ival; 2699 ret = regmap_read(map, reg + regmap_get_offset(map, i), 2700 &ival); 2701 if (ret != 0) 2702 return ret; 2703 2704 if (map->format.format_val) { 2705 map->format.format_val(val + (i * val_bytes), ival, 0); 2706 } else { 2707 /* Devices providing read and write 2708 * operations can use the bulk I/O 2709 * functions if they define a val_bytes, 2710 * we assume that the values are native 2711 * endian. 2712 */ 2713 #ifdef CONFIG_64BIT 2714 u64 *u64 = val; 2715 #endif 2716 u32 *u32 = val; 2717 u16 *u16 = val; 2718 u8 *u8 = val; 2719 2720 switch (map->format.val_bytes) { 2721 #ifdef CONFIG_64BIT 2722 case 8: 2723 u64[i] = ival; 2724 break; 2725 #endif 2726 case 4: 2727 u32[i] = ival; 2728 break; 2729 case 2: 2730 u16[i] = ival; 2731 break; 2732 case 1: 2733 u8[i] = ival; 2734 break; 2735 default: 2736 return -EINVAL; 2737 } 2738 } 2739 } 2740 } 2741 2742 return 0; 2743 } 2744 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2745 2746 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2747 unsigned int mask, unsigned int val, 2748 bool *change, bool force_write) 2749 { 2750 int ret; 2751 unsigned int tmp, orig; 2752 2753 if (change) 2754 *change = false; 2755 2756 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2757 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2758 if (ret == 0 && change) 2759 *change = true; 2760 } else { 2761 ret = _regmap_read(map, reg, &orig); 2762 if (ret != 0) 2763 return ret; 2764 2765 tmp = orig & ~mask; 2766 tmp |= val & mask; 2767 2768 if (force_write || (tmp != orig)) { 2769 ret = _regmap_write(map, reg, tmp); 2770 if (ret == 0 && change) 2771 *change = true; 2772 } 2773 } 2774 2775 return ret; 2776 } 2777 2778 /** 2779 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2780 * 2781 * @map: Register map to update 2782 * @reg: Register to update 2783 * @mask: Bitmask to change 2784 * @val: New value for bitmask 2785 * @change: Boolean indicating if a write was done 2786 * @async: Boolean indicating asynchronously 2787 * @force: Boolean indicating use force update 2788 * 2789 * Perform a read/modify/write cycle on a register map with change, async, force 2790 * options. 2791 * 2792 * If async is true: 2793 * 2794 * With most buses the read must be done synchronously so this is most useful 2795 * for devices with a cache which do not need to interact with the hardware to 2796 * determine the current register value. 2797 * 2798 * Returns zero for success, a negative number on error. 2799 */ 2800 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2801 unsigned int mask, unsigned int val, 2802 bool *change, bool async, bool force) 2803 { 2804 int ret; 2805 2806 map->lock(map->lock_arg); 2807 2808 map->async = async; 2809 2810 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2811 2812 map->async = false; 2813 2814 map->unlock(map->lock_arg); 2815 2816 return ret; 2817 } 2818 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2819 2820 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2821 { 2822 struct regmap *map = async->map; 2823 bool wake; 2824 2825 trace_regmap_async_io_complete(map); 2826 2827 spin_lock(&map->async_lock); 2828 list_move(&async->list, &map->async_free); 2829 wake = list_empty(&map->async_list); 2830 2831 if (ret != 0) 2832 map->async_ret = ret; 2833 2834 spin_unlock(&map->async_lock); 2835 2836 if (wake) 2837 wake_up(&map->async_waitq); 2838 } 2839 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2840 2841 static int regmap_async_is_done(struct regmap *map) 2842 { 2843 unsigned long flags; 2844 int ret; 2845 2846 spin_lock_irqsave(&map->async_lock, flags); 2847 ret = list_empty(&map->async_list); 2848 spin_unlock_irqrestore(&map->async_lock, flags); 2849 2850 return ret; 2851 } 2852 2853 /** 2854 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2855 * 2856 * @map: Map to operate on. 2857 * 2858 * Blocks until any pending asynchronous I/O has completed. Returns 2859 * an error code for any failed I/O operations. 2860 */ 2861 int regmap_async_complete(struct regmap *map) 2862 { 2863 unsigned long flags; 2864 int ret; 2865 2866 /* Nothing to do with no async support */ 2867 if (!map->bus || !map->bus->async_write) 2868 return 0; 2869 2870 trace_regmap_async_complete_start(map); 2871 2872 wait_event(map->async_waitq, regmap_async_is_done(map)); 2873 2874 spin_lock_irqsave(&map->async_lock, flags); 2875 ret = map->async_ret; 2876 map->async_ret = 0; 2877 spin_unlock_irqrestore(&map->async_lock, flags); 2878 2879 trace_regmap_async_complete_done(map); 2880 2881 return ret; 2882 } 2883 EXPORT_SYMBOL_GPL(regmap_async_complete); 2884 2885 /** 2886 * regmap_register_patch - Register and apply register updates to be applied 2887 * on device initialistion 2888 * 2889 * @map: Register map to apply updates to. 2890 * @regs: Values to update. 2891 * @num_regs: Number of entries in regs. 2892 * 2893 * Register a set of register updates to be applied to the device 2894 * whenever the device registers are synchronised with the cache and 2895 * apply them immediately. Typically this is used to apply 2896 * corrections to be applied to the device defaults on startup, such 2897 * as the updates some vendors provide to undocumented registers. 2898 * 2899 * The caller must ensure that this function cannot be called 2900 * concurrently with either itself or regcache_sync(). 2901 */ 2902 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2903 int num_regs) 2904 { 2905 struct reg_sequence *p; 2906 int ret; 2907 bool bypass; 2908 2909 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2910 num_regs)) 2911 return 0; 2912 2913 p = krealloc(map->patch, 2914 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2915 GFP_KERNEL); 2916 if (p) { 2917 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2918 map->patch = p; 2919 map->patch_regs += num_regs; 2920 } else { 2921 return -ENOMEM; 2922 } 2923 2924 map->lock(map->lock_arg); 2925 2926 bypass = map->cache_bypass; 2927 2928 map->cache_bypass = true; 2929 map->async = true; 2930 2931 ret = _regmap_multi_reg_write(map, regs, num_regs); 2932 2933 map->async = false; 2934 map->cache_bypass = bypass; 2935 2936 map->unlock(map->lock_arg); 2937 2938 regmap_async_complete(map); 2939 2940 return ret; 2941 } 2942 EXPORT_SYMBOL_GPL(regmap_register_patch); 2943 2944 /** 2945 * regmap_get_val_bytes() - Report the size of a register value 2946 * 2947 * @map: Register map to operate on. 2948 * 2949 * Report the size of a register value, mainly intended to for use by 2950 * generic infrastructure built on top of regmap. 2951 */ 2952 int regmap_get_val_bytes(struct regmap *map) 2953 { 2954 if (map->format.format_write) 2955 return -EINVAL; 2956 2957 return map->format.val_bytes; 2958 } 2959 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2960 2961 /** 2962 * regmap_get_max_register() - Report the max register value 2963 * 2964 * @map: Register map to operate on. 2965 * 2966 * Report the max register value, mainly intended to for use by 2967 * generic infrastructure built on top of regmap. 2968 */ 2969 int regmap_get_max_register(struct regmap *map) 2970 { 2971 return map->max_register ? map->max_register : -EINVAL; 2972 } 2973 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2974 2975 /** 2976 * regmap_get_reg_stride() - Report the register address stride 2977 * 2978 * @map: Register map to operate on. 2979 * 2980 * Report the register address stride, mainly intended to for use by 2981 * generic infrastructure built on top of regmap. 2982 */ 2983 int regmap_get_reg_stride(struct regmap *map) 2984 { 2985 return map->reg_stride; 2986 } 2987 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2988 2989 int regmap_parse_val(struct regmap *map, const void *buf, 2990 unsigned int *val) 2991 { 2992 if (!map->format.parse_val) 2993 return -EINVAL; 2994 2995 *val = map->format.parse_val(buf); 2996 2997 return 0; 2998 } 2999 EXPORT_SYMBOL_GPL(regmap_parse_val); 3000 3001 static int __init regmap_initcall(void) 3002 { 3003 regmap_debugfs_initcall(); 3004 3005 return 0; 3006 } 3007 postcore_initcall(regmap_initcall); 3008