1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/log2.h> 23 #include <linux/hwspinlock.h> 24 25 #define CREATE_TRACE_POINTS 26 #include "trace.h" 27 28 #include "internal.h" 29 30 /* 31 * Sometimes for failures during very early init the trace 32 * infrastructure isn't available early enough to be used. For this 33 * sort of problem defining LOG_DEVICE will add printks for basic 34 * register I/O on a specific device. 35 */ 36 #undef LOG_DEVICE 37 38 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 39 unsigned int mask, unsigned int val, 40 bool *change, bool force_write); 41 42 static int _regmap_bus_reg_read(void *context, unsigned int reg, 43 unsigned int *val); 44 static int _regmap_bus_read(void *context, unsigned int reg, 45 unsigned int *val); 46 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 47 unsigned int val); 48 static int _regmap_bus_reg_write(void *context, unsigned int reg, 49 unsigned int val); 50 static int _regmap_bus_raw_write(void *context, unsigned int reg, 51 unsigned int val); 52 53 bool regmap_reg_in_ranges(unsigned int reg, 54 const struct regmap_range *ranges, 55 unsigned int nranges) 56 { 57 const struct regmap_range *r; 58 int i; 59 60 for (i = 0, r = ranges; i < nranges; i++, r++) 61 if (regmap_reg_in_range(reg, r)) 62 return true; 63 return false; 64 } 65 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 66 67 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 68 const struct regmap_access_table *table) 69 { 70 /* Check "no ranges" first */ 71 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 72 return false; 73 74 /* In case zero "yes ranges" are supplied, any reg is OK */ 75 if (!table->n_yes_ranges) 76 return true; 77 78 return regmap_reg_in_ranges(reg, table->yes_ranges, 79 table->n_yes_ranges); 80 } 81 EXPORT_SYMBOL_GPL(regmap_check_range_table); 82 83 bool regmap_writeable(struct regmap *map, unsigned int reg) 84 { 85 if (map->max_register && reg > map->max_register) 86 return false; 87 88 if (map->writeable_reg) 89 return map->writeable_reg(map->dev, reg); 90 91 if (map->wr_table) 92 return regmap_check_range_table(map, reg, map->wr_table); 93 94 return true; 95 } 96 97 bool regmap_cached(struct regmap *map, unsigned int reg) 98 { 99 int ret; 100 unsigned int val; 101 102 if (map->cache_type == REGCACHE_NONE) 103 return false; 104 105 if (!map->cache_ops) 106 return false; 107 108 if (map->max_register && reg > map->max_register) 109 return false; 110 111 map->lock(map->lock_arg); 112 ret = regcache_read(map, reg, &val); 113 map->unlock(map->lock_arg); 114 if (ret) 115 return false; 116 117 return true; 118 } 119 120 bool regmap_readable(struct regmap *map, unsigned int reg) 121 { 122 if (!map->reg_read) 123 return false; 124 125 if (map->max_register && reg > map->max_register) 126 return false; 127 128 if (map->format.format_write) 129 return false; 130 131 if (map->readable_reg) 132 return map->readable_reg(map->dev, reg); 133 134 if (map->rd_table) 135 return regmap_check_range_table(map, reg, map->rd_table); 136 137 return true; 138 } 139 140 bool regmap_volatile(struct regmap *map, unsigned int reg) 141 { 142 if (!map->format.format_write && !regmap_readable(map, reg)) 143 return false; 144 145 if (map->volatile_reg) 146 return map->volatile_reg(map->dev, reg); 147 148 if (map->volatile_table) 149 return regmap_check_range_table(map, reg, map->volatile_table); 150 151 if (map->cache_ops) 152 return false; 153 else 154 return true; 155 } 156 157 bool regmap_precious(struct regmap *map, unsigned int reg) 158 { 159 if (!regmap_readable(map, reg)) 160 return false; 161 162 if (map->precious_reg) 163 return map->precious_reg(map->dev, reg); 164 165 if (map->precious_table) 166 return regmap_check_range_table(map, reg, map->precious_table); 167 168 return false; 169 } 170 171 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 172 size_t num) 173 { 174 unsigned int i; 175 176 for (i = 0; i < num; i++) 177 if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) 178 return false; 179 180 return true; 181 } 182 183 static void regmap_format_2_6_write(struct regmap *map, 184 unsigned int reg, unsigned int val) 185 { 186 u8 *out = map->work_buf; 187 188 *out = (reg << 6) | val; 189 } 190 191 static void regmap_format_4_12_write(struct regmap *map, 192 unsigned int reg, unsigned int val) 193 { 194 __be16 *out = map->work_buf; 195 *out = cpu_to_be16((reg << 12) | val); 196 } 197 198 static void regmap_format_7_9_write(struct regmap *map, 199 unsigned int reg, unsigned int val) 200 { 201 __be16 *out = map->work_buf; 202 *out = cpu_to_be16((reg << 9) | val); 203 } 204 205 static void regmap_format_10_14_write(struct regmap *map, 206 unsigned int reg, unsigned int val) 207 { 208 u8 *out = map->work_buf; 209 210 out[2] = val; 211 out[1] = (val >> 8) | (reg << 6); 212 out[0] = reg >> 2; 213 } 214 215 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 216 { 217 u8 *b = buf; 218 219 b[0] = val << shift; 220 } 221 222 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 223 { 224 __be16 *b = buf; 225 226 b[0] = cpu_to_be16(val << shift); 227 } 228 229 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 230 { 231 __le16 *b = buf; 232 233 b[0] = cpu_to_le16(val << shift); 234 } 235 236 static void regmap_format_16_native(void *buf, unsigned int val, 237 unsigned int shift) 238 { 239 *(u16 *)buf = val << shift; 240 } 241 242 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 243 { 244 u8 *b = buf; 245 246 val <<= shift; 247 248 b[0] = val >> 16; 249 b[1] = val >> 8; 250 b[2] = val; 251 } 252 253 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 254 { 255 __be32 *b = buf; 256 257 b[0] = cpu_to_be32(val << shift); 258 } 259 260 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 261 { 262 __le32 *b = buf; 263 264 b[0] = cpu_to_le32(val << shift); 265 } 266 267 static void regmap_format_32_native(void *buf, unsigned int val, 268 unsigned int shift) 269 { 270 *(u32 *)buf = val << shift; 271 } 272 273 #ifdef CONFIG_64BIT 274 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 275 { 276 __be64 *b = buf; 277 278 b[0] = cpu_to_be64((u64)val << shift); 279 } 280 281 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 282 { 283 __le64 *b = buf; 284 285 b[0] = cpu_to_le64((u64)val << shift); 286 } 287 288 static void regmap_format_64_native(void *buf, unsigned int val, 289 unsigned int shift) 290 { 291 *(u64 *)buf = (u64)val << shift; 292 } 293 #endif 294 295 static void regmap_parse_inplace_noop(void *buf) 296 { 297 } 298 299 static unsigned int regmap_parse_8(const void *buf) 300 { 301 const u8 *b = buf; 302 303 return b[0]; 304 } 305 306 static unsigned int regmap_parse_16_be(const void *buf) 307 { 308 const __be16 *b = buf; 309 310 return be16_to_cpu(b[0]); 311 } 312 313 static unsigned int regmap_parse_16_le(const void *buf) 314 { 315 const __le16 *b = buf; 316 317 return le16_to_cpu(b[0]); 318 } 319 320 static void regmap_parse_16_be_inplace(void *buf) 321 { 322 __be16 *b = buf; 323 324 b[0] = be16_to_cpu(b[0]); 325 } 326 327 static void regmap_parse_16_le_inplace(void *buf) 328 { 329 __le16 *b = buf; 330 331 b[0] = le16_to_cpu(b[0]); 332 } 333 334 static unsigned int regmap_parse_16_native(const void *buf) 335 { 336 return *(u16 *)buf; 337 } 338 339 static unsigned int regmap_parse_24(const void *buf) 340 { 341 const u8 *b = buf; 342 unsigned int ret = b[2]; 343 ret |= ((unsigned int)b[1]) << 8; 344 ret |= ((unsigned int)b[0]) << 16; 345 346 return ret; 347 } 348 349 static unsigned int regmap_parse_32_be(const void *buf) 350 { 351 const __be32 *b = buf; 352 353 return be32_to_cpu(b[0]); 354 } 355 356 static unsigned int regmap_parse_32_le(const void *buf) 357 { 358 const __le32 *b = buf; 359 360 return le32_to_cpu(b[0]); 361 } 362 363 static void regmap_parse_32_be_inplace(void *buf) 364 { 365 __be32 *b = buf; 366 367 b[0] = be32_to_cpu(b[0]); 368 } 369 370 static void regmap_parse_32_le_inplace(void *buf) 371 { 372 __le32 *b = buf; 373 374 b[0] = le32_to_cpu(b[0]); 375 } 376 377 static unsigned int regmap_parse_32_native(const void *buf) 378 { 379 return *(u32 *)buf; 380 } 381 382 #ifdef CONFIG_64BIT 383 static unsigned int regmap_parse_64_be(const void *buf) 384 { 385 const __be64 *b = buf; 386 387 return be64_to_cpu(b[0]); 388 } 389 390 static unsigned int regmap_parse_64_le(const void *buf) 391 { 392 const __le64 *b = buf; 393 394 return le64_to_cpu(b[0]); 395 } 396 397 static void regmap_parse_64_be_inplace(void *buf) 398 { 399 __be64 *b = buf; 400 401 b[0] = be64_to_cpu(b[0]); 402 } 403 404 static void regmap_parse_64_le_inplace(void *buf) 405 { 406 __le64 *b = buf; 407 408 b[0] = le64_to_cpu(b[0]); 409 } 410 411 static unsigned int regmap_parse_64_native(const void *buf) 412 { 413 return *(u64 *)buf; 414 } 415 #endif 416 417 static void regmap_lock_hwlock(void *__map) 418 { 419 struct regmap *map = __map; 420 421 hwspin_lock_timeout(map->hwlock, UINT_MAX); 422 } 423 424 static void regmap_lock_hwlock_irq(void *__map) 425 { 426 struct regmap *map = __map; 427 428 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); 429 } 430 431 static void regmap_lock_hwlock_irqsave(void *__map) 432 { 433 struct regmap *map = __map; 434 435 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, 436 &map->spinlock_flags); 437 } 438 439 static void regmap_unlock_hwlock(void *__map) 440 { 441 struct regmap *map = __map; 442 443 hwspin_unlock(map->hwlock); 444 } 445 446 static void regmap_unlock_hwlock_irq(void *__map) 447 { 448 struct regmap *map = __map; 449 450 hwspin_unlock_irq(map->hwlock); 451 } 452 453 static void regmap_unlock_hwlock_irqrestore(void *__map) 454 { 455 struct regmap *map = __map; 456 457 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); 458 } 459 460 static void regmap_lock_unlock_none(void *__map) 461 { 462 463 } 464 465 static void regmap_lock_mutex(void *__map) 466 { 467 struct regmap *map = __map; 468 mutex_lock(&map->mutex); 469 } 470 471 static void regmap_unlock_mutex(void *__map) 472 { 473 struct regmap *map = __map; 474 mutex_unlock(&map->mutex); 475 } 476 477 static void regmap_lock_spinlock(void *__map) 478 __acquires(&map->spinlock) 479 { 480 struct regmap *map = __map; 481 unsigned long flags; 482 483 spin_lock_irqsave(&map->spinlock, flags); 484 map->spinlock_flags = flags; 485 } 486 487 static void regmap_unlock_spinlock(void *__map) 488 __releases(&map->spinlock) 489 { 490 struct regmap *map = __map; 491 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 492 } 493 494 static void dev_get_regmap_release(struct device *dev, void *res) 495 { 496 /* 497 * We don't actually have anything to do here; the goal here 498 * is not to manage the regmap but to provide a simple way to 499 * get the regmap back given a struct device. 500 */ 501 } 502 503 static bool _regmap_range_add(struct regmap *map, 504 struct regmap_range_node *data) 505 { 506 struct rb_root *root = &map->range_tree; 507 struct rb_node **new = &(root->rb_node), *parent = NULL; 508 509 while (*new) { 510 struct regmap_range_node *this = 511 rb_entry(*new, struct regmap_range_node, node); 512 513 parent = *new; 514 if (data->range_max < this->range_min) 515 new = &((*new)->rb_left); 516 else if (data->range_min > this->range_max) 517 new = &((*new)->rb_right); 518 else 519 return false; 520 } 521 522 rb_link_node(&data->node, parent, new); 523 rb_insert_color(&data->node, root); 524 525 return true; 526 } 527 528 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 529 unsigned int reg) 530 { 531 struct rb_node *node = map->range_tree.rb_node; 532 533 while (node) { 534 struct regmap_range_node *this = 535 rb_entry(node, struct regmap_range_node, node); 536 537 if (reg < this->range_min) 538 node = node->rb_left; 539 else if (reg > this->range_max) 540 node = node->rb_right; 541 else 542 return this; 543 } 544 545 return NULL; 546 } 547 548 static void regmap_range_exit(struct regmap *map) 549 { 550 struct rb_node *next; 551 struct regmap_range_node *range_node; 552 553 next = rb_first(&map->range_tree); 554 while (next) { 555 range_node = rb_entry(next, struct regmap_range_node, node); 556 next = rb_next(&range_node->node); 557 rb_erase(&range_node->node, &map->range_tree); 558 kfree(range_node); 559 } 560 561 kfree(map->selector_work_buf); 562 } 563 564 int regmap_attach_dev(struct device *dev, struct regmap *map, 565 const struct regmap_config *config) 566 { 567 struct regmap **m; 568 569 map->dev = dev; 570 571 regmap_debugfs_init(map, config->name); 572 573 /* Add a devres resource for dev_get_regmap() */ 574 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 575 if (!m) { 576 regmap_debugfs_exit(map); 577 return -ENOMEM; 578 } 579 *m = map; 580 devres_add(dev, m); 581 582 return 0; 583 } 584 EXPORT_SYMBOL_GPL(regmap_attach_dev); 585 586 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 587 const struct regmap_config *config) 588 { 589 enum regmap_endian endian; 590 591 /* Retrieve the endianness specification from the regmap config */ 592 endian = config->reg_format_endian; 593 594 /* If the regmap config specified a non-default value, use that */ 595 if (endian != REGMAP_ENDIAN_DEFAULT) 596 return endian; 597 598 /* Retrieve the endianness specification from the bus config */ 599 if (bus && bus->reg_format_endian_default) 600 endian = bus->reg_format_endian_default; 601 602 /* If the bus specified a non-default value, use that */ 603 if (endian != REGMAP_ENDIAN_DEFAULT) 604 return endian; 605 606 /* Use this if no other value was found */ 607 return REGMAP_ENDIAN_BIG; 608 } 609 610 enum regmap_endian regmap_get_val_endian(struct device *dev, 611 const struct regmap_bus *bus, 612 const struct regmap_config *config) 613 { 614 struct device_node *np; 615 enum regmap_endian endian; 616 617 /* Retrieve the endianness specification from the regmap config */ 618 endian = config->val_format_endian; 619 620 /* If the regmap config specified a non-default value, use that */ 621 if (endian != REGMAP_ENDIAN_DEFAULT) 622 return endian; 623 624 /* If the dev and dev->of_node exist try to get endianness from DT */ 625 if (dev && dev->of_node) { 626 np = dev->of_node; 627 628 /* Parse the device's DT node for an endianness specification */ 629 if (of_property_read_bool(np, "big-endian")) 630 endian = REGMAP_ENDIAN_BIG; 631 else if (of_property_read_bool(np, "little-endian")) 632 endian = REGMAP_ENDIAN_LITTLE; 633 else if (of_property_read_bool(np, "native-endian")) 634 endian = REGMAP_ENDIAN_NATIVE; 635 636 /* If the endianness was specified in DT, use that */ 637 if (endian != REGMAP_ENDIAN_DEFAULT) 638 return endian; 639 } 640 641 /* Retrieve the endianness specification from the bus config */ 642 if (bus && bus->val_format_endian_default) 643 endian = bus->val_format_endian_default; 644 645 /* If the bus specified a non-default value, use that */ 646 if (endian != REGMAP_ENDIAN_DEFAULT) 647 return endian; 648 649 /* Use this if no other value was found */ 650 return REGMAP_ENDIAN_BIG; 651 } 652 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 653 654 struct regmap *__regmap_init(struct device *dev, 655 const struct regmap_bus *bus, 656 void *bus_context, 657 const struct regmap_config *config, 658 struct lock_class_key *lock_key, 659 const char *lock_name) 660 { 661 struct regmap *map; 662 int ret = -EINVAL; 663 enum regmap_endian reg_endian, val_endian; 664 int i, j; 665 666 if (!config) 667 goto err; 668 669 map = kzalloc(sizeof(*map), GFP_KERNEL); 670 if (map == NULL) { 671 ret = -ENOMEM; 672 goto err; 673 } 674 675 if (config->name) { 676 map->name = kstrdup_const(config->name, GFP_KERNEL); 677 if (!map->name) { 678 ret = -ENOMEM; 679 goto err_map; 680 } 681 } 682 683 if (config->disable_locking) { 684 map->lock = map->unlock = regmap_lock_unlock_none; 685 regmap_debugfs_disable(map); 686 } else if (config->lock && config->unlock) { 687 map->lock = config->lock; 688 map->unlock = config->unlock; 689 map->lock_arg = config->lock_arg; 690 } else if (config->use_hwlock) { 691 map->hwlock = hwspin_lock_request_specific(config->hwlock_id); 692 if (!map->hwlock) { 693 ret = -ENXIO; 694 goto err_name; 695 } 696 697 switch (config->hwlock_mode) { 698 case HWLOCK_IRQSTATE: 699 map->lock = regmap_lock_hwlock_irqsave; 700 map->unlock = regmap_unlock_hwlock_irqrestore; 701 break; 702 case HWLOCK_IRQ: 703 map->lock = regmap_lock_hwlock_irq; 704 map->unlock = regmap_unlock_hwlock_irq; 705 break; 706 default: 707 map->lock = regmap_lock_hwlock; 708 map->unlock = regmap_unlock_hwlock; 709 break; 710 } 711 712 map->lock_arg = map; 713 } else { 714 if ((bus && bus->fast_io) || 715 config->fast_io) { 716 spin_lock_init(&map->spinlock); 717 map->lock = regmap_lock_spinlock; 718 map->unlock = regmap_unlock_spinlock; 719 lockdep_set_class_and_name(&map->spinlock, 720 lock_key, lock_name); 721 } else { 722 mutex_init(&map->mutex); 723 map->lock = regmap_lock_mutex; 724 map->unlock = regmap_unlock_mutex; 725 lockdep_set_class_and_name(&map->mutex, 726 lock_key, lock_name); 727 } 728 map->lock_arg = map; 729 } 730 731 /* 732 * When we write in fast-paths with regmap_bulk_write() don't allocate 733 * scratch buffers with sleeping allocations. 734 */ 735 if ((bus && bus->fast_io) || config->fast_io) 736 map->alloc_flags = GFP_ATOMIC; 737 else 738 map->alloc_flags = GFP_KERNEL; 739 740 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 741 map->format.pad_bytes = config->pad_bits / 8; 742 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 743 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 744 config->val_bits + config->pad_bits, 8); 745 map->reg_shift = config->pad_bits % 8; 746 if (config->reg_stride) 747 map->reg_stride = config->reg_stride; 748 else 749 map->reg_stride = 1; 750 if (is_power_of_2(map->reg_stride)) 751 map->reg_stride_order = ilog2(map->reg_stride); 752 else 753 map->reg_stride_order = -1; 754 map->use_single_read = config->use_single_rw || !bus || !bus->read; 755 map->use_single_write = config->use_single_rw || !bus || !bus->write; 756 map->can_multi_write = config->can_multi_write && bus && bus->write; 757 if (bus) { 758 map->max_raw_read = bus->max_raw_read; 759 map->max_raw_write = bus->max_raw_write; 760 } 761 map->dev = dev; 762 map->bus = bus; 763 map->bus_context = bus_context; 764 map->max_register = config->max_register; 765 map->wr_table = config->wr_table; 766 map->rd_table = config->rd_table; 767 map->volatile_table = config->volatile_table; 768 map->precious_table = config->precious_table; 769 map->writeable_reg = config->writeable_reg; 770 map->readable_reg = config->readable_reg; 771 map->volatile_reg = config->volatile_reg; 772 map->precious_reg = config->precious_reg; 773 map->cache_type = config->cache_type; 774 775 spin_lock_init(&map->async_lock); 776 INIT_LIST_HEAD(&map->async_list); 777 INIT_LIST_HEAD(&map->async_free); 778 init_waitqueue_head(&map->async_waitq); 779 780 if (config->read_flag_mask || 781 config->write_flag_mask || 782 config->zero_flag_mask) { 783 map->read_flag_mask = config->read_flag_mask; 784 map->write_flag_mask = config->write_flag_mask; 785 } else if (bus) { 786 map->read_flag_mask = bus->read_flag_mask; 787 } 788 789 if (!bus) { 790 map->reg_read = config->reg_read; 791 map->reg_write = config->reg_write; 792 793 map->defer_caching = false; 794 goto skip_format_initialization; 795 } else if (!bus->read || !bus->write) { 796 map->reg_read = _regmap_bus_reg_read; 797 map->reg_write = _regmap_bus_reg_write; 798 799 map->defer_caching = false; 800 goto skip_format_initialization; 801 } else { 802 map->reg_read = _regmap_bus_read; 803 map->reg_update_bits = bus->reg_update_bits; 804 } 805 806 reg_endian = regmap_get_reg_endian(bus, config); 807 val_endian = regmap_get_val_endian(dev, bus, config); 808 809 switch (config->reg_bits + map->reg_shift) { 810 case 2: 811 switch (config->val_bits) { 812 case 6: 813 map->format.format_write = regmap_format_2_6_write; 814 break; 815 default: 816 goto err_hwlock; 817 } 818 break; 819 820 case 4: 821 switch (config->val_bits) { 822 case 12: 823 map->format.format_write = regmap_format_4_12_write; 824 break; 825 default: 826 goto err_hwlock; 827 } 828 break; 829 830 case 7: 831 switch (config->val_bits) { 832 case 9: 833 map->format.format_write = regmap_format_7_9_write; 834 break; 835 default: 836 goto err_hwlock; 837 } 838 break; 839 840 case 10: 841 switch (config->val_bits) { 842 case 14: 843 map->format.format_write = regmap_format_10_14_write; 844 break; 845 default: 846 goto err_hwlock; 847 } 848 break; 849 850 case 8: 851 map->format.format_reg = regmap_format_8; 852 break; 853 854 case 16: 855 switch (reg_endian) { 856 case REGMAP_ENDIAN_BIG: 857 map->format.format_reg = regmap_format_16_be; 858 break; 859 case REGMAP_ENDIAN_LITTLE: 860 map->format.format_reg = regmap_format_16_le; 861 break; 862 case REGMAP_ENDIAN_NATIVE: 863 map->format.format_reg = regmap_format_16_native; 864 break; 865 default: 866 goto err_hwlock; 867 } 868 break; 869 870 case 24: 871 if (reg_endian != REGMAP_ENDIAN_BIG) 872 goto err_hwlock; 873 map->format.format_reg = regmap_format_24; 874 break; 875 876 case 32: 877 switch (reg_endian) { 878 case REGMAP_ENDIAN_BIG: 879 map->format.format_reg = regmap_format_32_be; 880 break; 881 case REGMAP_ENDIAN_LITTLE: 882 map->format.format_reg = regmap_format_32_le; 883 break; 884 case REGMAP_ENDIAN_NATIVE: 885 map->format.format_reg = regmap_format_32_native; 886 break; 887 default: 888 goto err_hwlock; 889 } 890 break; 891 892 #ifdef CONFIG_64BIT 893 case 64: 894 switch (reg_endian) { 895 case REGMAP_ENDIAN_BIG: 896 map->format.format_reg = regmap_format_64_be; 897 break; 898 case REGMAP_ENDIAN_LITTLE: 899 map->format.format_reg = regmap_format_64_le; 900 break; 901 case REGMAP_ENDIAN_NATIVE: 902 map->format.format_reg = regmap_format_64_native; 903 break; 904 default: 905 goto err_hwlock; 906 } 907 break; 908 #endif 909 910 default: 911 goto err_hwlock; 912 } 913 914 if (val_endian == REGMAP_ENDIAN_NATIVE) 915 map->format.parse_inplace = regmap_parse_inplace_noop; 916 917 switch (config->val_bits) { 918 case 8: 919 map->format.format_val = regmap_format_8; 920 map->format.parse_val = regmap_parse_8; 921 map->format.parse_inplace = regmap_parse_inplace_noop; 922 break; 923 case 16: 924 switch (val_endian) { 925 case REGMAP_ENDIAN_BIG: 926 map->format.format_val = regmap_format_16_be; 927 map->format.parse_val = regmap_parse_16_be; 928 map->format.parse_inplace = regmap_parse_16_be_inplace; 929 break; 930 case REGMAP_ENDIAN_LITTLE: 931 map->format.format_val = regmap_format_16_le; 932 map->format.parse_val = regmap_parse_16_le; 933 map->format.parse_inplace = regmap_parse_16_le_inplace; 934 break; 935 case REGMAP_ENDIAN_NATIVE: 936 map->format.format_val = regmap_format_16_native; 937 map->format.parse_val = regmap_parse_16_native; 938 break; 939 default: 940 goto err_hwlock; 941 } 942 break; 943 case 24: 944 if (val_endian != REGMAP_ENDIAN_BIG) 945 goto err_hwlock; 946 map->format.format_val = regmap_format_24; 947 map->format.parse_val = regmap_parse_24; 948 break; 949 case 32: 950 switch (val_endian) { 951 case REGMAP_ENDIAN_BIG: 952 map->format.format_val = regmap_format_32_be; 953 map->format.parse_val = regmap_parse_32_be; 954 map->format.parse_inplace = regmap_parse_32_be_inplace; 955 break; 956 case REGMAP_ENDIAN_LITTLE: 957 map->format.format_val = regmap_format_32_le; 958 map->format.parse_val = regmap_parse_32_le; 959 map->format.parse_inplace = regmap_parse_32_le_inplace; 960 break; 961 case REGMAP_ENDIAN_NATIVE: 962 map->format.format_val = regmap_format_32_native; 963 map->format.parse_val = regmap_parse_32_native; 964 break; 965 default: 966 goto err_hwlock; 967 } 968 break; 969 #ifdef CONFIG_64BIT 970 case 64: 971 switch (val_endian) { 972 case REGMAP_ENDIAN_BIG: 973 map->format.format_val = regmap_format_64_be; 974 map->format.parse_val = regmap_parse_64_be; 975 map->format.parse_inplace = regmap_parse_64_be_inplace; 976 break; 977 case REGMAP_ENDIAN_LITTLE: 978 map->format.format_val = regmap_format_64_le; 979 map->format.parse_val = regmap_parse_64_le; 980 map->format.parse_inplace = regmap_parse_64_le_inplace; 981 break; 982 case REGMAP_ENDIAN_NATIVE: 983 map->format.format_val = regmap_format_64_native; 984 map->format.parse_val = regmap_parse_64_native; 985 break; 986 default: 987 goto err_hwlock; 988 } 989 break; 990 #endif 991 } 992 993 if (map->format.format_write) { 994 if ((reg_endian != REGMAP_ENDIAN_BIG) || 995 (val_endian != REGMAP_ENDIAN_BIG)) 996 goto err_hwlock; 997 map->use_single_write = true; 998 } 999 1000 if (!map->format.format_write && 1001 !(map->format.format_reg && map->format.format_val)) 1002 goto err_hwlock; 1003 1004 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 1005 if (map->work_buf == NULL) { 1006 ret = -ENOMEM; 1007 goto err_hwlock; 1008 } 1009 1010 if (map->format.format_write) { 1011 map->defer_caching = false; 1012 map->reg_write = _regmap_bus_formatted_write; 1013 } else if (map->format.format_val) { 1014 map->defer_caching = true; 1015 map->reg_write = _regmap_bus_raw_write; 1016 } 1017 1018 skip_format_initialization: 1019 1020 map->range_tree = RB_ROOT; 1021 for (i = 0; i < config->num_ranges; i++) { 1022 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 1023 struct regmap_range_node *new; 1024 1025 /* Sanity check */ 1026 if (range_cfg->range_max < range_cfg->range_min) { 1027 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 1028 range_cfg->range_max, range_cfg->range_min); 1029 goto err_range; 1030 } 1031 1032 if (range_cfg->range_max > map->max_register) { 1033 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 1034 range_cfg->range_max, map->max_register); 1035 goto err_range; 1036 } 1037 1038 if (range_cfg->selector_reg > map->max_register) { 1039 dev_err(map->dev, 1040 "Invalid range %d: selector out of map\n", i); 1041 goto err_range; 1042 } 1043 1044 if (range_cfg->window_len == 0) { 1045 dev_err(map->dev, "Invalid range %d: window_len 0\n", 1046 i); 1047 goto err_range; 1048 } 1049 1050 /* Make sure, that this register range has no selector 1051 or data window within its boundary */ 1052 for (j = 0; j < config->num_ranges; j++) { 1053 unsigned sel_reg = config->ranges[j].selector_reg; 1054 unsigned win_min = config->ranges[j].window_start; 1055 unsigned win_max = win_min + 1056 config->ranges[j].window_len - 1; 1057 1058 /* Allow data window inside its own virtual range */ 1059 if (j == i) 1060 continue; 1061 1062 if (range_cfg->range_min <= sel_reg && 1063 sel_reg <= range_cfg->range_max) { 1064 dev_err(map->dev, 1065 "Range %d: selector for %d in window\n", 1066 i, j); 1067 goto err_range; 1068 } 1069 1070 if (!(win_max < range_cfg->range_min || 1071 win_min > range_cfg->range_max)) { 1072 dev_err(map->dev, 1073 "Range %d: window for %d in window\n", 1074 i, j); 1075 goto err_range; 1076 } 1077 } 1078 1079 new = kzalloc(sizeof(*new), GFP_KERNEL); 1080 if (new == NULL) { 1081 ret = -ENOMEM; 1082 goto err_range; 1083 } 1084 1085 new->map = map; 1086 new->name = range_cfg->name; 1087 new->range_min = range_cfg->range_min; 1088 new->range_max = range_cfg->range_max; 1089 new->selector_reg = range_cfg->selector_reg; 1090 new->selector_mask = range_cfg->selector_mask; 1091 new->selector_shift = range_cfg->selector_shift; 1092 new->window_start = range_cfg->window_start; 1093 new->window_len = range_cfg->window_len; 1094 1095 if (!_regmap_range_add(map, new)) { 1096 dev_err(map->dev, "Failed to add range %d\n", i); 1097 kfree(new); 1098 goto err_range; 1099 } 1100 1101 if (map->selector_work_buf == NULL) { 1102 map->selector_work_buf = 1103 kzalloc(map->format.buf_size, GFP_KERNEL); 1104 if (map->selector_work_buf == NULL) { 1105 ret = -ENOMEM; 1106 goto err_range; 1107 } 1108 } 1109 } 1110 1111 ret = regcache_init(map, config); 1112 if (ret != 0) 1113 goto err_range; 1114 1115 if (dev) { 1116 ret = regmap_attach_dev(dev, map, config); 1117 if (ret != 0) 1118 goto err_regcache; 1119 } else { 1120 regmap_debugfs_init(map, config->name); 1121 } 1122 1123 return map; 1124 1125 err_regcache: 1126 regcache_exit(map); 1127 err_range: 1128 regmap_range_exit(map); 1129 kfree(map->work_buf); 1130 err_hwlock: 1131 if (map->hwlock) 1132 hwspin_lock_free(map->hwlock); 1133 err_name: 1134 kfree_const(map->name); 1135 err_map: 1136 kfree(map); 1137 err: 1138 return ERR_PTR(ret); 1139 } 1140 EXPORT_SYMBOL_GPL(__regmap_init); 1141 1142 static void devm_regmap_release(struct device *dev, void *res) 1143 { 1144 regmap_exit(*(struct regmap **)res); 1145 } 1146 1147 struct regmap *__devm_regmap_init(struct device *dev, 1148 const struct regmap_bus *bus, 1149 void *bus_context, 1150 const struct regmap_config *config, 1151 struct lock_class_key *lock_key, 1152 const char *lock_name) 1153 { 1154 struct regmap **ptr, *regmap; 1155 1156 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1157 if (!ptr) 1158 return ERR_PTR(-ENOMEM); 1159 1160 regmap = __regmap_init(dev, bus, bus_context, config, 1161 lock_key, lock_name); 1162 if (!IS_ERR(regmap)) { 1163 *ptr = regmap; 1164 devres_add(dev, ptr); 1165 } else { 1166 devres_free(ptr); 1167 } 1168 1169 return regmap; 1170 } 1171 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1172 1173 static void regmap_field_init(struct regmap_field *rm_field, 1174 struct regmap *regmap, struct reg_field reg_field) 1175 { 1176 rm_field->regmap = regmap; 1177 rm_field->reg = reg_field.reg; 1178 rm_field->shift = reg_field.lsb; 1179 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1180 rm_field->id_size = reg_field.id_size; 1181 rm_field->id_offset = reg_field.id_offset; 1182 } 1183 1184 /** 1185 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1186 * 1187 * @dev: Device that will be interacted with 1188 * @regmap: regmap bank in which this register field is located. 1189 * @reg_field: Register field with in the bank. 1190 * 1191 * The return value will be an ERR_PTR() on error or a valid pointer 1192 * to a struct regmap_field. The regmap_field will be automatically freed 1193 * by the device management code. 1194 */ 1195 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1196 struct regmap *regmap, struct reg_field reg_field) 1197 { 1198 struct regmap_field *rm_field = devm_kzalloc(dev, 1199 sizeof(*rm_field), GFP_KERNEL); 1200 if (!rm_field) 1201 return ERR_PTR(-ENOMEM); 1202 1203 regmap_field_init(rm_field, regmap, reg_field); 1204 1205 return rm_field; 1206 1207 } 1208 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1209 1210 /** 1211 * devm_regmap_field_free() - Free a register field allocated using 1212 * devm_regmap_field_alloc. 1213 * 1214 * @dev: Device that will be interacted with 1215 * @field: regmap field which should be freed. 1216 * 1217 * Free register field allocated using devm_regmap_field_alloc(). Usually 1218 * drivers need not call this function, as the memory allocated via devm 1219 * will be freed as per device-driver life-cyle. 1220 */ 1221 void devm_regmap_field_free(struct device *dev, 1222 struct regmap_field *field) 1223 { 1224 devm_kfree(dev, field); 1225 } 1226 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1227 1228 /** 1229 * regmap_field_alloc() - Allocate and initialise a register field. 1230 * 1231 * @regmap: regmap bank in which this register field is located. 1232 * @reg_field: Register field with in the bank. 1233 * 1234 * The return value will be an ERR_PTR() on error or a valid pointer 1235 * to a struct regmap_field. The regmap_field should be freed by the 1236 * user once its finished working with it using regmap_field_free(). 1237 */ 1238 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1239 struct reg_field reg_field) 1240 { 1241 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1242 1243 if (!rm_field) 1244 return ERR_PTR(-ENOMEM); 1245 1246 regmap_field_init(rm_field, regmap, reg_field); 1247 1248 return rm_field; 1249 } 1250 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1251 1252 /** 1253 * regmap_field_free() - Free register field allocated using 1254 * regmap_field_alloc. 1255 * 1256 * @field: regmap field which should be freed. 1257 */ 1258 void regmap_field_free(struct regmap_field *field) 1259 { 1260 kfree(field); 1261 } 1262 EXPORT_SYMBOL_GPL(regmap_field_free); 1263 1264 /** 1265 * regmap_reinit_cache() - Reinitialise the current register cache 1266 * 1267 * @map: Register map to operate on. 1268 * @config: New configuration. Only the cache data will be used. 1269 * 1270 * Discard any existing register cache for the map and initialize a 1271 * new cache. This can be used to restore the cache to defaults or to 1272 * update the cache configuration to reflect runtime discovery of the 1273 * hardware. 1274 * 1275 * No explicit locking is done here, the user needs to ensure that 1276 * this function will not race with other calls to regmap. 1277 */ 1278 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1279 { 1280 regcache_exit(map); 1281 regmap_debugfs_exit(map); 1282 1283 map->max_register = config->max_register; 1284 map->writeable_reg = config->writeable_reg; 1285 map->readable_reg = config->readable_reg; 1286 map->volatile_reg = config->volatile_reg; 1287 map->precious_reg = config->precious_reg; 1288 map->cache_type = config->cache_type; 1289 1290 regmap_debugfs_init(map, config->name); 1291 1292 map->cache_bypass = false; 1293 map->cache_only = false; 1294 1295 return regcache_init(map, config); 1296 } 1297 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1298 1299 /** 1300 * regmap_exit() - Free a previously allocated register map 1301 * 1302 * @map: Register map to operate on. 1303 */ 1304 void regmap_exit(struct regmap *map) 1305 { 1306 struct regmap_async *async; 1307 1308 regcache_exit(map); 1309 regmap_debugfs_exit(map); 1310 regmap_range_exit(map); 1311 if (map->bus && map->bus->free_context) 1312 map->bus->free_context(map->bus_context); 1313 kfree(map->work_buf); 1314 while (!list_empty(&map->async_free)) { 1315 async = list_first_entry_or_null(&map->async_free, 1316 struct regmap_async, 1317 list); 1318 list_del(&async->list); 1319 kfree(async->work_buf); 1320 kfree(async); 1321 } 1322 if (map->hwlock) 1323 hwspin_lock_free(map->hwlock); 1324 kfree_const(map->name); 1325 kfree(map); 1326 } 1327 EXPORT_SYMBOL_GPL(regmap_exit); 1328 1329 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1330 { 1331 struct regmap **r = res; 1332 if (!r || !*r) { 1333 WARN_ON(!r || !*r); 1334 return 0; 1335 } 1336 1337 /* If the user didn't specify a name match any */ 1338 if (data) 1339 return (*r)->name == data; 1340 else 1341 return 1; 1342 } 1343 1344 /** 1345 * dev_get_regmap() - Obtain the regmap (if any) for a device 1346 * 1347 * @dev: Device to retrieve the map for 1348 * @name: Optional name for the register map, usually NULL. 1349 * 1350 * Returns the regmap for the device if one is present, or NULL. If 1351 * name is specified then it must match the name specified when 1352 * registering the device, if it is NULL then the first regmap found 1353 * will be used. Devices with multiple register maps are very rare, 1354 * generic code should normally not need to specify a name. 1355 */ 1356 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1357 { 1358 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1359 dev_get_regmap_match, (void *)name); 1360 1361 if (!r) 1362 return NULL; 1363 return *r; 1364 } 1365 EXPORT_SYMBOL_GPL(dev_get_regmap); 1366 1367 /** 1368 * regmap_get_device() - Obtain the device from a regmap 1369 * 1370 * @map: Register map to operate on. 1371 * 1372 * Returns the underlying device that the regmap has been created for. 1373 */ 1374 struct device *regmap_get_device(struct regmap *map) 1375 { 1376 return map->dev; 1377 } 1378 EXPORT_SYMBOL_GPL(regmap_get_device); 1379 1380 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1381 struct regmap_range_node *range, 1382 unsigned int val_num) 1383 { 1384 void *orig_work_buf; 1385 unsigned int win_offset; 1386 unsigned int win_page; 1387 bool page_chg; 1388 int ret; 1389 1390 win_offset = (*reg - range->range_min) % range->window_len; 1391 win_page = (*reg - range->range_min) / range->window_len; 1392 1393 if (val_num > 1) { 1394 /* Bulk write shouldn't cross range boundary */ 1395 if (*reg + val_num - 1 > range->range_max) 1396 return -EINVAL; 1397 1398 /* ... or single page boundary */ 1399 if (val_num > range->window_len - win_offset) 1400 return -EINVAL; 1401 } 1402 1403 /* It is possible to have selector register inside data window. 1404 In that case, selector register is located on every page and 1405 it needs no page switching, when accessed alone. */ 1406 if (val_num > 1 || 1407 range->window_start + win_offset != range->selector_reg) { 1408 /* Use separate work_buf during page switching */ 1409 orig_work_buf = map->work_buf; 1410 map->work_buf = map->selector_work_buf; 1411 1412 ret = _regmap_update_bits(map, range->selector_reg, 1413 range->selector_mask, 1414 win_page << range->selector_shift, 1415 &page_chg, false); 1416 1417 map->work_buf = orig_work_buf; 1418 1419 if (ret != 0) 1420 return ret; 1421 } 1422 1423 *reg = range->window_start + win_offset; 1424 1425 return 0; 1426 } 1427 1428 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1429 unsigned long mask) 1430 { 1431 u8 *buf; 1432 int i; 1433 1434 if (!mask || !map->work_buf) 1435 return; 1436 1437 buf = map->work_buf; 1438 1439 for (i = 0; i < max_bytes; i++) 1440 buf[i] |= (mask >> (8 * i)) & 0xff; 1441 } 1442 1443 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, 1444 const void *val, size_t val_len) 1445 { 1446 struct regmap_range_node *range; 1447 unsigned long flags; 1448 void *work_val = map->work_buf + map->format.reg_bytes + 1449 map->format.pad_bytes; 1450 void *buf; 1451 int ret = -ENOTSUPP; 1452 size_t len; 1453 int i; 1454 1455 WARN_ON(!map->bus); 1456 1457 /* Check for unwritable registers before we start */ 1458 if (map->writeable_reg) 1459 for (i = 0; i < val_len / map->format.val_bytes; i++) 1460 if (!map->writeable_reg(map->dev, 1461 reg + regmap_get_offset(map, i))) 1462 return -EINVAL; 1463 1464 if (!map->cache_bypass && map->format.parse_val) { 1465 unsigned int ival; 1466 int val_bytes = map->format.val_bytes; 1467 for (i = 0; i < val_len / val_bytes; i++) { 1468 ival = map->format.parse_val(val + (i * val_bytes)); 1469 ret = regcache_write(map, 1470 reg + regmap_get_offset(map, i), 1471 ival); 1472 if (ret) { 1473 dev_err(map->dev, 1474 "Error in caching of register: %x ret: %d\n", 1475 reg + i, ret); 1476 return ret; 1477 } 1478 } 1479 if (map->cache_only) { 1480 map->cache_dirty = true; 1481 return 0; 1482 } 1483 } 1484 1485 range = _regmap_range_lookup(map, reg); 1486 if (range) { 1487 int val_num = val_len / map->format.val_bytes; 1488 int win_offset = (reg - range->range_min) % range->window_len; 1489 int win_residue = range->window_len - win_offset; 1490 1491 /* If the write goes beyond the end of the window split it */ 1492 while (val_num > win_residue) { 1493 dev_dbg(map->dev, "Writing window %d/%zu\n", 1494 win_residue, val_len / map->format.val_bytes); 1495 ret = _regmap_raw_write_impl(map, reg, val, 1496 win_residue * 1497 map->format.val_bytes); 1498 if (ret != 0) 1499 return ret; 1500 1501 reg += win_residue; 1502 val_num -= win_residue; 1503 val += win_residue * map->format.val_bytes; 1504 val_len -= win_residue * map->format.val_bytes; 1505 1506 win_offset = (reg - range->range_min) % 1507 range->window_len; 1508 win_residue = range->window_len - win_offset; 1509 } 1510 1511 ret = _regmap_select_page(map, ®, range, val_num); 1512 if (ret != 0) 1513 return ret; 1514 } 1515 1516 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1517 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1518 map->write_flag_mask); 1519 1520 /* 1521 * Essentially all I/O mechanisms will be faster with a single 1522 * buffer to write. Since register syncs often generate raw 1523 * writes of single registers optimise that case. 1524 */ 1525 if (val != work_val && val_len == map->format.val_bytes) { 1526 memcpy(work_val, val, map->format.val_bytes); 1527 val = work_val; 1528 } 1529 1530 if (map->async && map->bus->async_write) { 1531 struct regmap_async *async; 1532 1533 trace_regmap_async_write_start(map, reg, val_len); 1534 1535 spin_lock_irqsave(&map->async_lock, flags); 1536 async = list_first_entry_or_null(&map->async_free, 1537 struct regmap_async, 1538 list); 1539 if (async) 1540 list_del(&async->list); 1541 spin_unlock_irqrestore(&map->async_lock, flags); 1542 1543 if (!async) { 1544 async = map->bus->async_alloc(); 1545 if (!async) 1546 return -ENOMEM; 1547 1548 async->work_buf = kzalloc(map->format.buf_size, 1549 GFP_KERNEL | GFP_DMA); 1550 if (!async->work_buf) { 1551 kfree(async); 1552 return -ENOMEM; 1553 } 1554 } 1555 1556 async->map = map; 1557 1558 /* If the caller supplied the value we can use it safely. */ 1559 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1560 map->format.reg_bytes + map->format.val_bytes); 1561 1562 spin_lock_irqsave(&map->async_lock, flags); 1563 list_add_tail(&async->list, &map->async_list); 1564 spin_unlock_irqrestore(&map->async_lock, flags); 1565 1566 if (val != work_val) 1567 ret = map->bus->async_write(map->bus_context, 1568 async->work_buf, 1569 map->format.reg_bytes + 1570 map->format.pad_bytes, 1571 val, val_len, async); 1572 else 1573 ret = map->bus->async_write(map->bus_context, 1574 async->work_buf, 1575 map->format.reg_bytes + 1576 map->format.pad_bytes + 1577 val_len, NULL, 0, async); 1578 1579 if (ret != 0) { 1580 dev_err(map->dev, "Failed to schedule write: %d\n", 1581 ret); 1582 1583 spin_lock_irqsave(&map->async_lock, flags); 1584 list_move(&async->list, &map->async_free); 1585 spin_unlock_irqrestore(&map->async_lock, flags); 1586 } 1587 1588 return ret; 1589 } 1590 1591 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1592 1593 /* If we're doing a single register write we can probably just 1594 * send the work_buf directly, otherwise try to do a gather 1595 * write. 1596 */ 1597 if (val == work_val) 1598 ret = map->bus->write(map->bus_context, map->work_buf, 1599 map->format.reg_bytes + 1600 map->format.pad_bytes + 1601 val_len); 1602 else if (map->bus->gather_write) 1603 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1604 map->format.reg_bytes + 1605 map->format.pad_bytes, 1606 val, val_len); 1607 1608 /* If that didn't work fall back on linearising by hand. */ 1609 if (ret == -ENOTSUPP) { 1610 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1611 buf = kzalloc(len, GFP_KERNEL); 1612 if (!buf) 1613 return -ENOMEM; 1614 1615 memcpy(buf, map->work_buf, map->format.reg_bytes); 1616 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1617 val, val_len); 1618 ret = map->bus->write(map->bus_context, buf, len); 1619 1620 kfree(buf); 1621 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1622 /* regcache_drop_region() takes lock that we already have, 1623 * thus call map->cache_ops->drop() directly 1624 */ 1625 if (map->cache_ops && map->cache_ops->drop) 1626 map->cache_ops->drop(map, reg, reg + 1); 1627 } 1628 1629 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1630 1631 return ret; 1632 } 1633 1634 /** 1635 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1636 * 1637 * @map: Map to check. 1638 */ 1639 bool regmap_can_raw_write(struct regmap *map) 1640 { 1641 return map->bus && map->bus->write && map->format.format_val && 1642 map->format.format_reg; 1643 } 1644 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1645 1646 /** 1647 * regmap_get_raw_read_max - Get the maximum size we can read 1648 * 1649 * @map: Map to check. 1650 */ 1651 size_t regmap_get_raw_read_max(struct regmap *map) 1652 { 1653 return map->max_raw_read; 1654 } 1655 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1656 1657 /** 1658 * regmap_get_raw_write_max - Get the maximum size we can read 1659 * 1660 * @map: Map to check. 1661 */ 1662 size_t regmap_get_raw_write_max(struct regmap *map) 1663 { 1664 return map->max_raw_write; 1665 } 1666 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1667 1668 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1669 unsigned int val) 1670 { 1671 int ret; 1672 struct regmap_range_node *range; 1673 struct regmap *map = context; 1674 1675 WARN_ON(!map->bus || !map->format.format_write); 1676 1677 range = _regmap_range_lookup(map, reg); 1678 if (range) { 1679 ret = _regmap_select_page(map, ®, range, 1); 1680 if (ret != 0) 1681 return ret; 1682 } 1683 1684 map->format.format_write(map, reg, val); 1685 1686 trace_regmap_hw_write_start(map, reg, 1); 1687 1688 ret = map->bus->write(map->bus_context, map->work_buf, 1689 map->format.buf_size); 1690 1691 trace_regmap_hw_write_done(map, reg, 1); 1692 1693 return ret; 1694 } 1695 1696 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1697 unsigned int val) 1698 { 1699 struct regmap *map = context; 1700 1701 return map->bus->reg_write(map->bus_context, reg, val); 1702 } 1703 1704 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1705 unsigned int val) 1706 { 1707 struct regmap *map = context; 1708 1709 WARN_ON(!map->bus || !map->format.format_val); 1710 1711 map->format.format_val(map->work_buf + map->format.reg_bytes 1712 + map->format.pad_bytes, val, 0); 1713 return _regmap_raw_write_impl(map, reg, 1714 map->work_buf + 1715 map->format.reg_bytes + 1716 map->format.pad_bytes, 1717 map->format.val_bytes); 1718 } 1719 1720 static inline void *_regmap_map_get_context(struct regmap *map) 1721 { 1722 return (map->bus) ? map : map->bus_context; 1723 } 1724 1725 int _regmap_write(struct regmap *map, unsigned int reg, 1726 unsigned int val) 1727 { 1728 int ret; 1729 void *context = _regmap_map_get_context(map); 1730 1731 if (!regmap_writeable(map, reg)) 1732 return -EIO; 1733 1734 if (!map->cache_bypass && !map->defer_caching) { 1735 ret = regcache_write(map, reg, val); 1736 if (ret != 0) 1737 return ret; 1738 if (map->cache_only) { 1739 map->cache_dirty = true; 1740 return 0; 1741 } 1742 } 1743 1744 #ifdef LOG_DEVICE 1745 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1746 dev_info(map->dev, "%x <= %x\n", reg, val); 1747 #endif 1748 1749 trace_regmap_reg_write(map, reg, val); 1750 1751 return map->reg_write(context, reg, val); 1752 } 1753 1754 /** 1755 * regmap_write() - Write a value to a single register 1756 * 1757 * @map: Register map to write to 1758 * @reg: Register to write to 1759 * @val: Value to be written 1760 * 1761 * A value of zero will be returned on success, a negative errno will 1762 * be returned in error cases. 1763 */ 1764 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1765 { 1766 int ret; 1767 1768 if (!IS_ALIGNED(reg, map->reg_stride)) 1769 return -EINVAL; 1770 1771 map->lock(map->lock_arg); 1772 1773 ret = _regmap_write(map, reg, val); 1774 1775 map->unlock(map->lock_arg); 1776 1777 return ret; 1778 } 1779 EXPORT_SYMBOL_GPL(regmap_write); 1780 1781 /** 1782 * regmap_write_async() - Write a value to a single register asynchronously 1783 * 1784 * @map: Register map to write to 1785 * @reg: Register to write to 1786 * @val: Value to be written 1787 * 1788 * A value of zero will be returned on success, a negative errno will 1789 * be returned in error cases. 1790 */ 1791 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1792 { 1793 int ret; 1794 1795 if (!IS_ALIGNED(reg, map->reg_stride)) 1796 return -EINVAL; 1797 1798 map->lock(map->lock_arg); 1799 1800 map->async = true; 1801 1802 ret = _regmap_write(map, reg, val); 1803 1804 map->async = false; 1805 1806 map->unlock(map->lock_arg); 1807 1808 return ret; 1809 } 1810 EXPORT_SYMBOL_GPL(regmap_write_async); 1811 1812 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1813 const void *val, size_t val_len) 1814 { 1815 size_t val_bytes = map->format.val_bytes; 1816 size_t val_count = val_len / val_bytes; 1817 size_t chunk_count, chunk_bytes; 1818 size_t chunk_regs = val_count; 1819 int ret, i; 1820 1821 if (!val_count) 1822 return -EINVAL; 1823 1824 if (map->use_single_write) 1825 chunk_regs = 1; 1826 else if (map->max_raw_write && val_len > map->max_raw_write) 1827 chunk_regs = map->max_raw_write / val_bytes; 1828 1829 chunk_count = val_count / chunk_regs; 1830 chunk_bytes = chunk_regs * val_bytes; 1831 1832 /* Write as many bytes as possible with chunk_size */ 1833 for (i = 0; i < chunk_count; i++) { 1834 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); 1835 if (ret) 1836 return ret; 1837 1838 reg += regmap_get_offset(map, chunk_regs); 1839 val += chunk_bytes; 1840 val_len -= chunk_bytes; 1841 } 1842 1843 /* Write remaining bytes */ 1844 if (val_len) 1845 ret = _regmap_raw_write_impl(map, reg, val, val_len); 1846 1847 return ret; 1848 } 1849 1850 /** 1851 * regmap_raw_write() - Write raw values to one or more registers 1852 * 1853 * @map: Register map to write to 1854 * @reg: Initial register to write to 1855 * @val: Block of data to be written, laid out for direct transmission to the 1856 * device 1857 * @val_len: Length of data pointed to by val. 1858 * 1859 * This function is intended to be used for things like firmware 1860 * download where a large block of data needs to be transferred to the 1861 * device. No formatting will be done on the data provided. 1862 * 1863 * A value of zero will be returned on success, a negative errno will 1864 * be returned in error cases. 1865 */ 1866 int regmap_raw_write(struct regmap *map, unsigned int reg, 1867 const void *val, size_t val_len) 1868 { 1869 int ret; 1870 1871 if (!regmap_can_raw_write(map)) 1872 return -EINVAL; 1873 if (val_len % map->format.val_bytes) 1874 return -EINVAL; 1875 1876 map->lock(map->lock_arg); 1877 1878 ret = _regmap_raw_write(map, reg, val, val_len); 1879 1880 map->unlock(map->lock_arg); 1881 1882 return ret; 1883 } 1884 EXPORT_SYMBOL_GPL(regmap_raw_write); 1885 1886 /** 1887 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1888 * register field. 1889 * 1890 * @field: Register field to write to 1891 * @mask: Bitmask to change 1892 * @val: Value to be written 1893 * @change: Boolean indicating if a write was done 1894 * @async: Boolean indicating asynchronously 1895 * @force: Boolean indicating use force update 1896 * 1897 * Perform a read/modify/write cycle on the register field with change, 1898 * async, force option. 1899 * 1900 * A value of zero will be returned on success, a negative errno will 1901 * be returned in error cases. 1902 */ 1903 int regmap_field_update_bits_base(struct regmap_field *field, 1904 unsigned int mask, unsigned int val, 1905 bool *change, bool async, bool force) 1906 { 1907 mask = (mask << field->shift) & field->mask; 1908 1909 return regmap_update_bits_base(field->regmap, field->reg, 1910 mask, val << field->shift, 1911 change, async, force); 1912 } 1913 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 1914 1915 /** 1916 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 1917 * register field with port ID 1918 * 1919 * @field: Register field to write to 1920 * @id: port ID 1921 * @mask: Bitmask to change 1922 * @val: Value to be written 1923 * @change: Boolean indicating if a write was done 1924 * @async: Boolean indicating asynchronously 1925 * @force: Boolean indicating use force update 1926 * 1927 * A value of zero will be returned on success, a negative errno will 1928 * be returned in error cases. 1929 */ 1930 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 1931 unsigned int mask, unsigned int val, 1932 bool *change, bool async, bool force) 1933 { 1934 if (id >= field->id_size) 1935 return -EINVAL; 1936 1937 mask = (mask << field->shift) & field->mask; 1938 1939 return regmap_update_bits_base(field->regmap, 1940 field->reg + (field->id_offset * id), 1941 mask, val << field->shift, 1942 change, async, force); 1943 } 1944 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 1945 1946 /** 1947 * regmap_bulk_write() - Write multiple registers to the device 1948 * 1949 * @map: Register map to write to 1950 * @reg: First register to be write from 1951 * @val: Block of data to be written, in native register size for device 1952 * @val_count: Number of registers to write 1953 * 1954 * This function is intended to be used for writing a large block of 1955 * data to the device either in single transfer or multiple transfer. 1956 * 1957 * A value of zero will be returned on success, a negative errno will 1958 * be returned in error cases. 1959 */ 1960 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1961 size_t val_count) 1962 { 1963 int ret = 0, i; 1964 size_t val_bytes = map->format.val_bytes; 1965 1966 if (!IS_ALIGNED(reg, map->reg_stride)) 1967 return -EINVAL; 1968 1969 /* 1970 * Some devices don't support bulk write, for them we have a series of 1971 * single write operations. 1972 */ 1973 if (!map->bus || !map->format.parse_inplace) { 1974 map->lock(map->lock_arg); 1975 for (i = 0; i < val_count; i++) { 1976 unsigned int ival; 1977 1978 switch (val_bytes) { 1979 case 1: 1980 ival = *(u8 *)(val + (i * val_bytes)); 1981 break; 1982 case 2: 1983 ival = *(u16 *)(val + (i * val_bytes)); 1984 break; 1985 case 4: 1986 ival = *(u32 *)(val + (i * val_bytes)); 1987 break; 1988 #ifdef CONFIG_64BIT 1989 case 8: 1990 ival = *(u64 *)(val + (i * val_bytes)); 1991 break; 1992 #endif 1993 default: 1994 ret = -EINVAL; 1995 goto out; 1996 } 1997 1998 ret = _regmap_write(map, 1999 reg + regmap_get_offset(map, i), 2000 ival); 2001 if (ret != 0) 2002 goto out; 2003 } 2004 out: 2005 map->unlock(map->lock_arg); 2006 } else { 2007 void *wval; 2008 2009 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 2010 if (!wval) 2011 return -ENOMEM; 2012 2013 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2014 map->format.parse_inplace(wval + i); 2015 2016 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); 2017 2018 kfree(wval); 2019 } 2020 return ret; 2021 } 2022 EXPORT_SYMBOL_GPL(regmap_bulk_write); 2023 2024 /* 2025 * _regmap_raw_multi_reg_write() 2026 * 2027 * the (register,newvalue) pairs in regs have not been formatted, but 2028 * they are all in the same page and have been changed to being page 2029 * relative. The page register has been written if that was necessary. 2030 */ 2031 static int _regmap_raw_multi_reg_write(struct regmap *map, 2032 const struct reg_sequence *regs, 2033 size_t num_regs) 2034 { 2035 int ret; 2036 void *buf; 2037 int i; 2038 u8 *u8; 2039 size_t val_bytes = map->format.val_bytes; 2040 size_t reg_bytes = map->format.reg_bytes; 2041 size_t pad_bytes = map->format.pad_bytes; 2042 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 2043 size_t len = pair_size * num_regs; 2044 2045 if (!len) 2046 return -EINVAL; 2047 2048 buf = kzalloc(len, GFP_KERNEL); 2049 if (!buf) 2050 return -ENOMEM; 2051 2052 /* We have to linearise by hand. */ 2053 2054 u8 = buf; 2055 2056 for (i = 0; i < num_regs; i++) { 2057 unsigned int reg = regs[i].reg; 2058 unsigned int val = regs[i].def; 2059 trace_regmap_hw_write_start(map, reg, 1); 2060 map->format.format_reg(u8, reg, map->reg_shift); 2061 u8 += reg_bytes + pad_bytes; 2062 map->format.format_val(u8, val, 0); 2063 u8 += val_bytes; 2064 } 2065 u8 = buf; 2066 *u8 |= map->write_flag_mask; 2067 2068 ret = map->bus->write(map->bus_context, buf, len); 2069 2070 kfree(buf); 2071 2072 for (i = 0; i < num_regs; i++) { 2073 int reg = regs[i].reg; 2074 trace_regmap_hw_write_done(map, reg, 1); 2075 } 2076 return ret; 2077 } 2078 2079 static unsigned int _regmap_register_page(struct regmap *map, 2080 unsigned int reg, 2081 struct regmap_range_node *range) 2082 { 2083 unsigned int win_page = (reg - range->range_min) / range->window_len; 2084 2085 return win_page; 2086 } 2087 2088 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2089 struct reg_sequence *regs, 2090 size_t num_regs) 2091 { 2092 int ret; 2093 int i, n; 2094 struct reg_sequence *base; 2095 unsigned int this_page = 0; 2096 unsigned int page_change = 0; 2097 /* 2098 * the set of registers are not neccessarily in order, but 2099 * since the order of write must be preserved this algorithm 2100 * chops the set each time the page changes. This also applies 2101 * if there is a delay required at any point in the sequence. 2102 */ 2103 base = regs; 2104 for (i = 0, n = 0; i < num_regs; i++, n++) { 2105 unsigned int reg = regs[i].reg; 2106 struct regmap_range_node *range; 2107 2108 range = _regmap_range_lookup(map, reg); 2109 if (range) { 2110 unsigned int win_page = _regmap_register_page(map, reg, 2111 range); 2112 2113 if (i == 0) 2114 this_page = win_page; 2115 if (win_page != this_page) { 2116 this_page = win_page; 2117 page_change = 1; 2118 } 2119 } 2120 2121 /* If we have both a page change and a delay make sure to 2122 * write the regs and apply the delay before we change the 2123 * page. 2124 */ 2125 2126 if (page_change || regs[i].delay_us) { 2127 2128 /* For situations where the first write requires 2129 * a delay we need to make sure we don't call 2130 * raw_multi_reg_write with n=0 2131 * This can't occur with page breaks as we 2132 * never write on the first iteration 2133 */ 2134 if (regs[i].delay_us && i == 0) 2135 n = 1; 2136 2137 ret = _regmap_raw_multi_reg_write(map, base, n); 2138 if (ret != 0) 2139 return ret; 2140 2141 if (regs[i].delay_us) 2142 udelay(regs[i].delay_us); 2143 2144 base += n; 2145 n = 0; 2146 2147 if (page_change) { 2148 ret = _regmap_select_page(map, 2149 &base[n].reg, 2150 range, 1); 2151 if (ret != 0) 2152 return ret; 2153 2154 page_change = 0; 2155 } 2156 2157 } 2158 2159 } 2160 if (n > 0) 2161 return _regmap_raw_multi_reg_write(map, base, n); 2162 return 0; 2163 } 2164 2165 static int _regmap_multi_reg_write(struct regmap *map, 2166 const struct reg_sequence *regs, 2167 size_t num_regs) 2168 { 2169 int i; 2170 int ret; 2171 2172 if (!map->can_multi_write) { 2173 for (i = 0; i < num_regs; i++) { 2174 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2175 if (ret != 0) 2176 return ret; 2177 2178 if (regs[i].delay_us) 2179 udelay(regs[i].delay_us); 2180 } 2181 return 0; 2182 } 2183 2184 if (!map->format.parse_inplace) 2185 return -EINVAL; 2186 2187 if (map->writeable_reg) 2188 for (i = 0; i < num_regs; i++) { 2189 int reg = regs[i].reg; 2190 if (!map->writeable_reg(map->dev, reg)) 2191 return -EINVAL; 2192 if (!IS_ALIGNED(reg, map->reg_stride)) 2193 return -EINVAL; 2194 } 2195 2196 if (!map->cache_bypass) { 2197 for (i = 0; i < num_regs; i++) { 2198 unsigned int val = regs[i].def; 2199 unsigned int reg = regs[i].reg; 2200 ret = regcache_write(map, reg, val); 2201 if (ret) { 2202 dev_err(map->dev, 2203 "Error in caching of register: %x ret: %d\n", 2204 reg, ret); 2205 return ret; 2206 } 2207 } 2208 if (map->cache_only) { 2209 map->cache_dirty = true; 2210 return 0; 2211 } 2212 } 2213 2214 WARN_ON(!map->bus); 2215 2216 for (i = 0; i < num_regs; i++) { 2217 unsigned int reg = regs[i].reg; 2218 struct regmap_range_node *range; 2219 2220 /* Coalesce all the writes between a page break or a delay 2221 * in a sequence 2222 */ 2223 range = _regmap_range_lookup(map, reg); 2224 if (range || regs[i].delay_us) { 2225 size_t len = sizeof(struct reg_sequence)*num_regs; 2226 struct reg_sequence *base = kmemdup(regs, len, 2227 GFP_KERNEL); 2228 if (!base) 2229 return -ENOMEM; 2230 ret = _regmap_range_multi_paged_reg_write(map, base, 2231 num_regs); 2232 kfree(base); 2233 2234 return ret; 2235 } 2236 } 2237 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2238 } 2239 2240 /** 2241 * regmap_multi_reg_write() - Write multiple registers to the device 2242 * 2243 * @map: Register map to write to 2244 * @regs: Array of structures containing register,value to be written 2245 * @num_regs: Number of registers to write 2246 * 2247 * Write multiple registers to the device where the set of register, value 2248 * pairs are supplied in any order, possibly not all in a single range. 2249 * 2250 * The 'normal' block write mode will send ultimately send data on the 2251 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2252 * addressed. However, this alternative block multi write mode will send 2253 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2254 * must of course support the mode. 2255 * 2256 * A value of zero will be returned on success, a negative errno will be 2257 * returned in error cases. 2258 */ 2259 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2260 int num_regs) 2261 { 2262 int ret; 2263 2264 map->lock(map->lock_arg); 2265 2266 ret = _regmap_multi_reg_write(map, regs, num_regs); 2267 2268 map->unlock(map->lock_arg); 2269 2270 return ret; 2271 } 2272 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2273 2274 /** 2275 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2276 * device but not the cache 2277 * 2278 * @map: Register map to write to 2279 * @regs: Array of structures containing register,value to be written 2280 * @num_regs: Number of registers to write 2281 * 2282 * Write multiple registers to the device but not the cache where the set 2283 * of register are supplied in any order. 2284 * 2285 * This function is intended to be used for writing a large block of data 2286 * atomically to the device in single transfer for those I2C client devices 2287 * that implement this alternative block write mode. 2288 * 2289 * A value of zero will be returned on success, a negative errno will 2290 * be returned in error cases. 2291 */ 2292 int regmap_multi_reg_write_bypassed(struct regmap *map, 2293 const struct reg_sequence *regs, 2294 int num_regs) 2295 { 2296 int ret; 2297 bool bypass; 2298 2299 map->lock(map->lock_arg); 2300 2301 bypass = map->cache_bypass; 2302 map->cache_bypass = true; 2303 2304 ret = _regmap_multi_reg_write(map, regs, num_regs); 2305 2306 map->cache_bypass = bypass; 2307 2308 map->unlock(map->lock_arg); 2309 2310 return ret; 2311 } 2312 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2313 2314 /** 2315 * regmap_raw_write_async() - Write raw values to one or more registers 2316 * asynchronously 2317 * 2318 * @map: Register map to write to 2319 * @reg: Initial register to write to 2320 * @val: Block of data to be written, laid out for direct transmission to the 2321 * device. Must be valid until regmap_async_complete() is called. 2322 * @val_len: Length of data pointed to by val. 2323 * 2324 * This function is intended to be used for things like firmware 2325 * download where a large block of data needs to be transferred to the 2326 * device. No formatting will be done on the data provided. 2327 * 2328 * If supported by the underlying bus the write will be scheduled 2329 * asynchronously, helping maximise I/O speed on higher speed buses 2330 * like SPI. regmap_async_complete() can be called to ensure that all 2331 * asynchrnous writes have been completed. 2332 * 2333 * A value of zero will be returned on success, a negative errno will 2334 * be returned in error cases. 2335 */ 2336 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2337 const void *val, size_t val_len) 2338 { 2339 int ret; 2340 2341 if (val_len % map->format.val_bytes) 2342 return -EINVAL; 2343 if (!IS_ALIGNED(reg, map->reg_stride)) 2344 return -EINVAL; 2345 2346 map->lock(map->lock_arg); 2347 2348 map->async = true; 2349 2350 ret = _regmap_raw_write(map, reg, val, val_len); 2351 2352 map->async = false; 2353 2354 map->unlock(map->lock_arg); 2355 2356 return ret; 2357 } 2358 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2359 2360 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2361 unsigned int val_len) 2362 { 2363 struct regmap_range_node *range; 2364 int ret; 2365 2366 WARN_ON(!map->bus); 2367 2368 if (!map->bus || !map->bus->read) 2369 return -EINVAL; 2370 2371 range = _regmap_range_lookup(map, reg); 2372 if (range) { 2373 ret = _regmap_select_page(map, ®, range, 2374 val_len / map->format.val_bytes); 2375 if (ret != 0) 2376 return ret; 2377 } 2378 2379 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2380 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2381 map->read_flag_mask); 2382 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2383 2384 ret = map->bus->read(map->bus_context, map->work_buf, 2385 map->format.reg_bytes + map->format.pad_bytes, 2386 val, val_len); 2387 2388 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2389 2390 return ret; 2391 } 2392 2393 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2394 unsigned int *val) 2395 { 2396 struct regmap *map = context; 2397 2398 return map->bus->reg_read(map->bus_context, reg, val); 2399 } 2400 2401 static int _regmap_bus_read(void *context, unsigned int reg, 2402 unsigned int *val) 2403 { 2404 int ret; 2405 struct regmap *map = context; 2406 void *work_val = map->work_buf + map->format.reg_bytes + 2407 map->format.pad_bytes; 2408 2409 if (!map->format.parse_val) 2410 return -EINVAL; 2411 2412 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); 2413 if (ret == 0) 2414 *val = map->format.parse_val(work_val); 2415 2416 return ret; 2417 } 2418 2419 static int _regmap_read(struct regmap *map, unsigned int reg, 2420 unsigned int *val) 2421 { 2422 int ret; 2423 void *context = _regmap_map_get_context(map); 2424 2425 if (!map->cache_bypass) { 2426 ret = regcache_read(map, reg, val); 2427 if (ret == 0) 2428 return 0; 2429 } 2430 2431 if (map->cache_only) 2432 return -EBUSY; 2433 2434 if (!regmap_readable(map, reg)) 2435 return -EIO; 2436 2437 ret = map->reg_read(context, reg, val); 2438 if (ret == 0) { 2439 #ifdef LOG_DEVICE 2440 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2441 dev_info(map->dev, "%x => %x\n", reg, *val); 2442 #endif 2443 2444 trace_regmap_reg_read(map, reg, *val); 2445 2446 if (!map->cache_bypass) 2447 regcache_write(map, reg, *val); 2448 } 2449 2450 return ret; 2451 } 2452 2453 /** 2454 * regmap_read() - Read a value from a single register 2455 * 2456 * @map: Register map to read from 2457 * @reg: Register to be read from 2458 * @val: Pointer to store read value 2459 * 2460 * A value of zero will be returned on success, a negative errno will 2461 * be returned in error cases. 2462 */ 2463 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2464 { 2465 int ret; 2466 2467 if (!IS_ALIGNED(reg, map->reg_stride)) 2468 return -EINVAL; 2469 2470 map->lock(map->lock_arg); 2471 2472 ret = _regmap_read(map, reg, val); 2473 2474 map->unlock(map->lock_arg); 2475 2476 return ret; 2477 } 2478 EXPORT_SYMBOL_GPL(regmap_read); 2479 2480 /** 2481 * regmap_raw_read() - Read raw data from the device 2482 * 2483 * @map: Register map to read from 2484 * @reg: First register to be read from 2485 * @val: Pointer to store read value 2486 * @val_len: Size of data to read 2487 * 2488 * A value of zero will be returned on success, a negative errno will 2489 * be returned in error cases. 2490 */ 2491 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2492 size_t val_len) 2493 { 2494 size_t val_bytes = map->format.val_bytes; 2495 size_t val_count = val_len / val_bytes; 2496 unsigned int v; 2497 int ret, i; 2498 2499 if (!map->bus) 2500 return -EINVAL; 2501 if (val_len % map->format.val_bytes) 2502 return -EINVAL; 2503 if (!IS_ALIGNED(reg, map->reg_stride)) 2504 return -EINVAL; 2505 if (val_count == 0) 2506 return -EINVAL; 2507 2508 map->lock(map->lock_arg); 2509 2510 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2511 map->cache_type == REGCACHE_NONE) { 2512 size_t chunk_count, chunk_bytes; 2513 size_t chunk_regs = val_count; 2514 2515 if (!map->bus->read) { 2516 ret = -ENOTSUPP; 2517 goto out; 2518 } 2519 2520 if (map->use_single_read) 2521 chunk_regs = 1; 2522 else if (map->max_raw_read && val_len > map->max_raw_read) 2523 chunk_regs = map->max_raw_read / val_bytes; 2524 2525 chunk_count = val_count / chunk_regs; 2526 chunk_bytes = chunk_regs * val_bytes; 2527 2528 /* Read bytes that fit into whole chunks */ 2529 for (i = 0; i < chunk_count; i++) { 2530 ret = _regmap_raw_read(map, reg, val, chunk_bytes); 2531 if (ret != 0) 2532 goto out; 2533 2534 reg += regmap_get_offset(map, chunk_regs); 2535 val += chunk_bytes; 2536 val_len -= chunk_bytes; 2537 } 2538 2539 /* Read remaining bytes */ 2540 if (val_len) { 2541 ret = _regmap_raw_read(map, reg, val, val_len); 2542 if (ret != 0) 2543 goto out; 2544 } 2545 } else { 2546 /* Otherwise go word by word for the cache; should be low 2547 * cost as we expect to hit the cache. 2548 */ 2549 for (i = 0; i < val_count; i++) { 2550 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2551 &v); 2552 if (ret != 0) 2553 goto out; 2554 2555 map->format.format_val(val + (i * val_bytes), v, 0); 2556 } 2557 } 2558 2559 out: 2560 map->unlock(map->lock_arg); 2561 2562 return ret; 2563 } 2564 EXPORT_SYMBOL_GPL(regmap_raw_read); 2565 2566 /** 2567 * regmap_field_read() - Read a value to a single register field 2568 * 2569 * @field: Register field to read from 2570 * @val: Pointer to store read value 2571 * 2572 * A value of zero will be returned on success, a negative errno will 2573 * be returned in error cases. 2574 */ 2575 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2576 { 2577 int ret; 2578 unsigned int reg_val; 2579 ret = regmap_read(field->regmap, field->reg, ®_val); 2580 if (ret != 0) 2581 return ret; 2582 2583 reg_val &= field->mask; 2584 reg_val >>= field->shift; 2585 *val = reg_val; 2586 2587 return ret; 2588 } 2589 EXPORT_SYMBOL_GPL(regmap_field_read); 2590 2591 /** 2592 * regmap_fields_read() - Read a value to a single register field with port ID 2593 * 2594 * @field: Register field to read from 2595 * @id: port ID 2596 * @val: Pointer to store read value 2597 * 2598 * A value of zero will be returned on success, a negative errno will 2599 * be returned in error cases. 2600 */ 2601 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2602 unsigned int *val) 2603 { 2604 int ret; 2605 unsigned int reg_val; 2606 2607 if (id >= field->id_size) 2608 return -EINVAL; 2609 2610 ret = regmap_read(field->regmap, 2611 field->reg + (field->id_offset * id), 2612 ®_val); 2613 if (ret != 0) 2614 return ret; 2615 2616 reg_val &= field->mask; 2617 reg_val >>= field->shift; 2618 *val = reg_val; 2619 2620 return ret; 2621 } 2622 EXPORT_SYMBOL_GPL(regmap_fields_read); 2623 2624 /** 2625 * regmap_bulk_read() - Read multiple registers from the device 2626 * 2627 * @map: Register map to read from 2628 * @reg: First register to be read from 2629 * @val: Pointer to store read value, in native register size for device 2630 * @val_count: Number of registers to read 2631 * 2632 * A value of zero will be returned on success, a negative errno will 2633 * be returned in error cases. 2634 */ 2635 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2636 size_t val_count) 2637 { 2638 int ret, i; 2639 size_t val_bytes = map->format.val_bytes; 2640 bool vol = regmap_volatile_range(map, reg, val_count); 2641 2642 if (!IS_ALIGNED(reg, map->reg_stride)) 2643 return -EINVAL; 2644 if (val_count == 0) 2645 return -EINVAL; 2646 2647 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2648 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 2649 if (ret != 0) 2650 return ret; 2651 2652 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2653 map->format.parse_inplace(val + i); 2654 } else { 2655 #ifdef CONFIG_64BIT 2656 u64 *u64 = val; 2657 #endif 2658 u32 *u32 = val; 2659 u16 *u16 = val; 2660 u8 *u8 = val; 2661 2662 map->lock(map->lock_arg); 2663 2664 for (i = 0; i < val_count; i++) { 2665 unsigned int ival; 2666 2667 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2668 &ival); 2669 if (ret != 0) 2670 goto out; 2671 2672 switch (map->format.val_bytes) { 2673 #ifdef CONFIG_64BIT 2674 case 8: 2675 u64[i] = ival; 2676 break; 2677 #endif 2678 case 4: 2679 u32[i] = ival; 2680 break; 2681 case 2: 2682 u16[i] = ival; 2683 break; 2684 case 1: 2685 u8[i] = ival; 2686 break; 2687 default: 2688 ret = -EINVAL; 2689 goto out; 2690 } 2691 } 2692 2693 out: 2694 map->unlock(map->lock_arg); 2695 } 2696 2697 return ret; 2698 } 2699 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2700 2701 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2702 unsigned int mask, unsigned int val, 2703 bool *change, bool force_write) 2704 { 2705 int ret; 2706 unsigned int tmp, orig; 2707 2708 if (change) 2709 *change = false; 2710 2711 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2712 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2713 if (ret == 0 && change) 2714 *change = true; 2715 } else { 2716 ret = _regmap_read(map, reg, &orig); 2717 if (ret != 0) 2718 return ret; 2719 2720 tmp = orig & ~mask; 2721 tmp |= val & mask; 2722 2723 if (force_write || (tmp != orig)) { 2724 ret = _regmap_write(map, reg, tmp); 2725 if (ret == 0 && change) 2726 *change = true; 2727 } 2728 } 2729 2730 return ret; 2731 } 2732 2733 /** 2734 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2735 * 2736 * @map: Register map to update 2737 * @reg: Register to update 2738 * @mask: Bitmask to change 2739 * @val: New value for bitmask 2740 * @change: Boolean indicating if a write was done 2741 * @async: Boolean indicating asynchronously 2742 * @force: Boolean indicating use force update 2743 * 2744 * Perform a read/modify/write cycle on a register map with change, async, force 2745 * options. 2746 * 2747 * If async is true: 2748 * 2749 * With most buses the read must be done synchronously so this is most useful 2750 * for devices with a cache which do not need to interact with the hardware to 2751 * determine the current register value. 2752 * 2753 * Returns zero for success, a negative number on error. 2754 */ 2755 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2756 unsigned int mask, unsigned int val, 2757 bool *change, bool async, bool force) 2758 { 2759 int ret; 2760 2761 map->lock(map->lock_arg); 2762 2763 map->async = async; 2764 2765 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2766 2767 map->async = false; 2768 2769 map->unlock(map->lock_arg); 2770 2771 return ret; 2772 } 2773 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2774 2775 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2776 { 2777 struct regmap *map = async->map; 2778 bool wake; 2779 2780 trace_regmap_async_io_complete(map); 2781 2782 spin_lock(&map->async_lock); 2783 list_move(&async->list, &map->async_free); 2784 wake = list_empty(&map->async_list); 2785 2786 if (ret != 0) 2787 map->async_ret = ret; 2788 2789 spin_unlock(&map->async_lock); 2790 2791 if (wake) 2792 wake_up(&map->async_waitq); 2793 } 2794 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2795 2796 static int regmap_async_is_done(struct regmap *map) 2797 { 2798 unsigned long flags; 2799 int ret; 2800 2801 spin_lock_irqsave(&map->async_lock, flags); 2802 ret = list_empty(&map->async_list); 2803 spin_unlock_irqrestore(&map->async_lock, flags); 2804 2805 return ret; 2806 } 2807 2808 /** 2809 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2810 * 2811 * @map: Map to operate on. 2812 * 2813 * Blocks until any pending asynchronous I/O has completed. Returns 2814 * an error code for any failed I/O operations. 2815 */ 2816 int regmap_async_complete(struct regmap *map) 2817 { 2818 unsigned long flags; 2819 int ret; 2820 2821 /* Nothing to do with no async support */ 2822 if (!map->bus || !map->bus->async_write) 2823 return 0; 2824 2825 trace_regmap_async_complete_start(map); 2826 2827 wait_event(map->async_waitq, regmap_async_is_done(map)); 2828 2829 spin_lock_irqsave(&map->async_lock, flags); 2830 ret = map->async_ret; 2831 map->async_ret = 0; 2832 spin_unlock_irqrestore(&map->async_lock, flags); 2833 2834 trace_regmap_async_complete_done(map); 2835 2836 return ret; 2837 } 2838 EXPORT_SYMBOL_GPL(regmap_async_complete); 2839 2840 /** 2841 * regmap_register_patch - Register and apply register updates to be applied 2842 * on device initialistion 2843 * 2844 * @map: Register map to apply updates to. 2845 * @regs: Values to update. 2846 * @num_regs: Number of entries in regs. 2847 * 2848 * Register a set of register updates to be applied to the device 2849 * whenever the device registers are synchronised with the cache and 2850 * apply them immediately. Typically this is used to apply 2851 * corrections to be applied to the device defaults on startup, such 2852 * as the updates some vendors provide to undocumented registers. 2853 * 2854 * The caller must ensure that this function cannot be called 2855 * concurrently with either itself or regcache_sync(). 2856 */ 2857 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2858 int num_regs) 2859 { 2860 struct reg_sequence *p; 2861 int ret; 2862 bool bypass; 2863 2864 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2865 num_regs)) 2866 return 0; 2867 2868 p = krealloc(map->patch, 2869 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2870 GFP_KERNEL); 2871 if (p) { 2872 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2873 map->patch = p; 2874 map->patch_regs += num_regs; 2875 } else { 2876 return -ENOMEM; 2877 } 2878 2879 map->lock(map->lock_arg); 2880 2881 bypass = map->cache_bypass; 2882 2883 map->cache_bypass = true; 2884 map->async = true; 2885 2886 ret = _regmap_multi_reg_write(map, regs, num_regs); 2887 2888 map->async = false; 2889 map->cache_bypass = bypass; 2890 2891 map->unlock(map->lock_arg); 2892 2893 regmap_async_complete(map); 2894 2895 return ret; 2896 } 2897 EXPORT_SYMBOL_GPL(regmap_register_patch); 2898 2899 /** 2900 * regmap_get_val_bytes() - Report the size of a register value 2901 * 2902 * @map: Register map to operate on. 2903 * 2904 * Report the size of a register value, mainly intended to for use by 2905 * generic infrastructure built on top of regmap. 2906 */ 2907 int regmap_get_val_bytes(struct regmap *map) 2908 { 2909 if (map->format.format_write) 2910 return -EINVAL; 2911 2912 return map->format.val_bytes; 2913 } 2914 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2915 2916 /** 2917 * regmap_get_max_register() - Report the max register value 2918 * 2919 * @map: Register map to operate on. 2920 * 2921 * Report the max register value, mainly intended to for use by 2922 * generic infrastructure built on top of regmap. 2923 */ 2924 int regmap_get_max_register(struct regmap *map) 2925 { 2926 return map->max_register ? map->max_register : -EINVAL; 2927 } 2928 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2929 2930 /** 2931 * regmap_get_reg_stride() - Report the register address stride 2932 * 2933 * @map: Register map to operate on. 2934 * 2935 * Report the register address stride, mainly intended to for use by 2936 * generic infrastructure built on top of regmap. 2937 */ 2938 int regmap_get_reg_stride(struct regmap *map) 2939 { 2940 return map->reg_stride; 2941 } 2942 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2943 2944 int regmap_parse_val(struct regmap *map, const void *buf, 2945 unsigned int *val) 2946 { 2947 if (!map->format.parse_val) 2948 return -EINVAL; 2949 2950 *val = map->format.parse_val(buf); 2951 2952 return 0; 2953 } 2954 EXPORT_SYMBOL_GPL(regmap_parse_val); 2955 2956 static int __init regmap_initcall(void) 2957 { 2958 regmap_debugfs_initcall(); 2959 2960 return 0; 2961 } 2962 postcore_initcall(regmap_initcall); 2963