1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 #include <linux/log2.h> 23 24 #define CREATE_TRACE_POINTS 25 #include "trace.h" 26 27 #include "internal.h" 28 29 /* 30 * Sometimes for failures during very early init the trace 31 * infrastructure isn't available early enough to be used. For this 32 * sort of problem defining LOG_DEVICE will add printks for basic 33 * register I/O on a specific device. 34 */ 35 #undef LOG_DEVICE 36 37 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 38 unsigned int mask, unsigned int val, 39 bool *change, bool force_write); 40 41 static int _regmap_bus_reg_read(void *context, unsigned int reg, 42 unsigned int *val); 43 static int _regmap_bus_read(void *context, unsigned int reg, 44 unsigned int *val); 45 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 46 unsigned int val); 47 static int _regmap_bus_reg_write(void *context, unsigned int reg, 48 unsigned int val); 49 static int _regmap_bus_raw_write(void *context, unsigned int reg, 50 unsigned int val); 51 52 bool regmap_reg_in_ranges(unsigned int reg, 53 const struct regmap_range *ranges, 54 unsigned int nranges) 55 { 56 const struct regmap_range *r; 57 int i; 58 59 for (i = 0, r = ranges; i < nranges; i++, r++) 60 if (regmap_reg_in_range(reg, r)) 61 return true; 62 return false; 63 } 64 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 65 66 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 67 const struct regmap_access_table *table) 68 { 69 /* Check "no ranges" first */ 70 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 71 return false; 72 73 /* In case zero "yes ranges" are supplied, any reg is OK */ 74 if (!table->n_yes_ranges) 75 return true; 76 77 return regmap_reg_in_ranges(reg, table->yes_ranges, 78 table->n_yes_ranges); 79 } 80 EXPORT_SYMBOL_GPL(regmap_check_range_table); 81 82 bool regmap_writeable(struct regmap *map, unsigned int reg) 83 { 84 if (map->max_register && reg > map->max_register) 85 return false; 86 87 if (map->writeable_reg) 88 return map->writeable_reg(map->dev, reg); 89 90 if (map->wr_table) 91 return regmap_check_range_table(map, reg, map->wr_table); 92 93 return true; 94 } 95 96 bool regmap_cached(struct regmap *map, unsigned int reg) 97 { 98 int ret; 99 unsigned int val; 100 101 if (map->cache == REGCACHE_NONE) 102 return false; 103 104 if (!map->cache_ops) 105 return false; 106 107 if (map->max_register && reg > map->max_register) 108 return false; 109 110 map->lock(map->lock_arg); 111 ret = regcache_read(map, reg, &val); 112 map->unlock(map->lock_arg); 113 if (ret) 114 return false; 115 116 return true; 117 } 118 119 bool regmap_readable(struct regmap *map, unsigned int reg) 120 { 121 if (!map->reg_read) 122 return false; 123 124 if (map->max_register && reg > map->max_register) 125 return false; 126 127 if (map->format.format_write) 128 return false; 129 130 if (map->readable_reg) 131 return map->readable_reg(map->dev, reg); 132 133 if (map->rd_table) 134 return regmap_check_range_table(map, reg, map->rd_table); 135 136 return true; 137 } 138 139 bool regmap_volatile(struct regmap *map, unsigned int reg) 140 { 141 if (!map->format.format_write && !regmap_readable(map, reg)) 142 return false; 143 144 if (map->volatile_reg) 145 return map->volatile_reg(map->dev, reg); 146 147 if (map->volatile_table) 148 return regmap_check_range_table(map, reg, map->volatile_table); 149 150 if (map->cache_ops) 151 return false; 152 else 153 return true; 154 } 155 156 bool regmap_precious(struct regmap *map, unsigned int reg) 157 { 158 if (!regmap_readable(map, reg)) 159 return false; 160 161 if (map->precious_reg) 162 return map->precious_reg(map->dev, reg); 163 164 if (map->precious_table) 165 return regmap_check_range_table(map, reg, map->precious_table); 166 167 return false; 168 } 169 170 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 171 size_t num) 172 { 173 unsigned int i; 174 175 for (i = 0; i < num; i++) 176 if (!regmap_volatile(map, reg + i)) 177 return false; 178 179 return true; 180 } 181 182 static void regmap_format_2_6_write(struct regmap *map, 183 unsigned int reg, unsigned int val) 184 { 185 u8 *out = map->work_buf; 186 187 *out = (reg << 6) | val; 188 } 189 190 static void regmap_format_4_12_write(struct regmap *map, 191 unsigned int reg, unsigned int val) 192 { 193 __be16 *out = map->work_buf; 194 *out = cpu_to_be16((reg << 12) | val); 195 } 196 197 static void regmap_format_7_9_write(struct regmap *map, 198 unsigned int reg, unsigned int val) 199 { 200 __be16 *out = map->work_buf; 201 *out = cpu_to_be16((reg << 9) | val); 202 } 203 204 static void regmap_format_10_14_write(struct regmap *map, 205 unsigned int reg, unsigned int val) 206 { 207 u8 *out = map->work_buf; 208 209 out[2] = val; 210 out[1] = (val >> 8) | (reg << 6); 211 out[0] = reg >> 2; 212 } 213 214 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 215 { 216 u8 *b = buf; 217 218 b[0] = val << shift; 219 } 220 221 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 222 { 223 __be16 *b = buf; 224 225 b[0] = cpu_to_be16(val << shift); 226 } 227 228 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 229 { 230 __le16 *b = buf; 231 232 b[0] = cpu_to_le16(val << shift); 233 } 234 235 static void regmap_format_16_native(void *buf, unsigned int val, 236 unsigned int shift) 237 { 238 *(u16 *)buf = val << shift; 239 } 240 241 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 242 { 243 u8 *b = buf; 244 245 val <<= shift; 246 247 b[0] = val >> 16; 248 b[1] = val >> 8; 249 b[2] = val; 250 } 251 252 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 253 { 254 __be32 *b = buf; 255 256 b[0] = cpu_to_be32(val << shift); 257 } 258 259 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 260 { 261 __le32 *b = buf; 262 263 b[0] = cpu_to_le32(val << shift); 264 } 265 266 static void regmap_format_32_native(void *buf, unsigned int val, 267 unsigned int shift) 268 { 269 *(u32 *)buf = val << shift; 270 } 271 272 #ifdef CONFIG_64BIT 273 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 274 { 275 __be64 *b = buf; 276 277 b[0] = cpu_to_be64((u64)val << shift); 278 } 279 280 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 281 { 282 __le64 *b = buf; 283 284 b[0] = cpu_to_le64((u64)val << shift); 285 } 286 287 static void regmap_format_64_native(void *buf, unsigned int val, 288 unsigned int shift) 289 { 290 *(u64 *)buf = (u64)val << shift; 291 } 292 #endif 293 294 static void regmap_parse_inplace_noop(void *buf) 295 { 296 } 297 298 static unsigned int regmap_parse_8(const void *buf) 299 { 300 const u8 *b = buf; 301 302 return b[0]; 303 } 304 305 static unsigned int regmap_parse_16_be(const void *buf) 306 { 307 const __be16 *b = buf; 308 309 return be16_to_cpu(b[0]); 310 } 311 312 static unsigned int regmap_parse_16_le(const void *buf) 313 { 314 const __le16 *b = buf; 315 316 return le16_to_cpu(b[0]); 317 } 318 319 static void regmap_parse_16_be_inplace(void *buf) 320 { 321 __be16 *b = buf; 322 323 b[0] = be16_to_cpu(b[0]); 324 } 325 326 static void regmap_parse_16_le_inplace(void *buf) 327 { 328 __le16 *b = buf; 329 330 b[0] = le16_to_cpu(b[0]); 331 } 332 333 static unsigned int regmap_parse_16_native(const void *buf) 334 { 335 return *(u16 *)buf; 336 } 337 338 static unsigned int regmap_parse_24(const void *buf) 339 { 340 const u8 *b = buf; 341 unsigned int ret = b[2]; 342 ret |= ((unsigned int)b[1]) << 8; 343 ret |= ((unsigned int)b[0]) << 16; 344 345 return ret; 346 } 347 348 static unsigned int regmap_parse_32_be(const void *buf) 349 { 350 const __be32 *b = buf; 351 352 return be32_to_cpu(b[0]); 353 } 354 355 static unsigned int regmap_parse_32_le(const void *buf) 356 { 357 const __le32 *b = buf; 358 359 return le32_to_cpu(b[0]); 360 } 361 362 static void regmap_parse_32_be_inplace(void *buf) 363 { 364 __be32 *b = buf; 365 366 b[0] = be32_to_cpu(b[0]); 367 } 368 369 static void regmap_parse_32_le_inplace(void *buf) 370 { 371 __le32 *b = buf; 372 373 b[0] = le32_to_cpu(b[0]); 374 } 375 376 static unsigned int regmap_parse_32_native(const void *buf) 377 { 378 return *(u32 *)buf; 379 } 380 381 #ifdef CONFIG_64BIT 382 static unsigned int regmap_parse_64_be(const void *buf) 383 { 384 const __be64 *b = buf; 385 386 return be64_to_cpu(b[0]); 387 } 388 389 static unsigned int regmap_parse_64_le(const void *buf) 390 { 391 const __le64 *b = buf; 392 393 return le64_to_cpu(b[0]); 394 } 395 396 static void regmap_parse_64_be_inplace(void *buf) 397 { 398 __be64 *b = buf; 399 400 b[0] = be64_to_cpu(b[0]); 401 } 402 403 static void regmap_parse_64_le_inplace(void *buf) 404 { 405 __le64 *b = buf; 406 407 b[0] = le64_to_cpu(b[0]); 408 } 409 410 static unsigned int regmap_parse_64_native(const void *buf) 411 { 412 return *(u64 *)buf; 413 } 414 #endif 415 416 static void regmap_lock_mutex(void *__map) 417 { 418 struct regmap *map = __map; 419 mutex_lock(&map->mutex); 420 } 421 422 static void regmap_unlock_mutex(void *__map) 423 { 424 struct regmap *map = __map; 425 mutex_unlock(&map->mutex); 426 } 427 428 static void regmap_lock_spinlock(void *__map) 429 __acquires(&map->spinlock) 430 { 431 struct regmap *map = __map; 432 unsigned long flags; 433 434 spin_lock_irqsave(&map->spinlock, flags); 435 map->spinlock_flags = flags; 436 } 437 438 static void regmap_unlock_spinlock(void *__map) 439 __releases(&map->spinlock) 440 { 441 struct regmap *map = __map; 442 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 443 } 444 445 static void dev_get_regmap_release(struct device *dev, void *res) 446 { 447 /* 448 * We don't actually have anything to do here; the goal here 449 * is not to manage the regmap but to provide a simple way to 450 * get the regmap back given a struct device. 451 */ 452 } 453 454 static bool _regmap_range_add(struct regmap *map, 455 struct regmap_range_node *data) 456 { 457 struct rb_root *root = &map->range_tree; 458 struct rb_node **new = &(root->rb_node), *parent = NULL; 459 460 while (*new) { 461 struct regmap_range_node *this = 462 rb_entry(*new, struct regmap_range_node, node); 463 464 parent = *new; 465 if (data->range_max < this->range_min) 466 new = &((*new)->rb_left); 467 else if (data->range_min > this->range_max) 468 new = &((*new)->rb_right); 469 else 470 return false; 471 } 472 473 rb_link_node(&data->node, parent, new); 474 rb_insert_color(&data->node, root); 475 476 return true; 477 } 478 479 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 480 unsigned int reg) 481 { 482 struct rb_node *node = map->range_tree.rb_node; 483 484 while (node) { 485 struct regmap_range_node *this = 486 rb_entry(node, struct regmap_range_node, node); 487 488 if (reg < this->range_min) 489 node = node->rb_left; 490 else if (reg > this->range_max) 491 node = node->rb_right; 492 else 493 return this; 494 } 495 496 return NULL; 497 } 498 499 static void regmap_range_exit(struct regmap *map) 500 { 501 struct rb_node *next; 502 struct regmap_range_node *range_node; 503 504 next = rb_first(&map->range_tree); 505 while (next) { 506 range_node = rb_entry(next, struct regmap_range_node, node); 507 next = rb_next(&range_node->node); 508 rb_erase(&range_node->node, &map->range_tree); 509 kfree(range_node); 510 } 511 512 kfree(map->selector_work_buf); 513 } 514 515 int regmap_attach_dev(struct device *dev, struct regmap *map, 516 const struct regmap_config *config) 517 { 518 struct regmap **m; 519 520 map->dev = dev; 521 522 regmap_debugfs_init(map, config->name); 523 524 /* Add a devres resource for dev_get_regmap() */ 525 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 526 if (!m) { 527 regmap_debugfs_exit(map); 528 return -ENOMEM; 529 } 530 *m = map; 531 devres_add(dev, m); 532 533 return 0; 534 } 535 EXPORT_SYMBOL_GPL(regmap_attach_dev); 536 537 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 538 const struct regmap_config *config) 539 { 540 enum regmap_endian endian; 541 542 /* Retrieve the endianness specification from the regmap config */ 543 endian = config->reg_format_endian; 544 545 /* If the regmap config specified a non-default value, use that */ 546 if (endian != REGMAP_ENDIAN_DEFAULT) 547 return endian; 548 549 /* Retrieve the endianness specification from the bus config */ 550 if (bus && bus->reg_format_endian_default) 551 endian = bus->reg_format_endian_default; 552 553 /* If the bus specified a non-default value, use that */ 554 if (endian != REGMAP_ENDIAN_DEFAULT) 555 return endian; 556 557 /* Use this if no other value was found */ 558 return REGMAP_ENDIAN_BIG; 559 } 560 561 enum regmap_endian regmap_get_val_endian(struct device *dev, 562 const struct regmap_bus *bus, 563 const struct regmap_config *config) 564 { 565 struct device_node *np; 566 enum regmap_endian endian; 567 568 /* Retrieve the endianness specification from the regmap config */ 569 endian = config->val_format_endian; 570 571 /* If the regmap config specified a non-default value, use that */ 572 if (endian != REGMAP_ENDIAN_DEFAULT) 573 return endian; 574 575 /* If the dev and dev->of_node exist try to get endianness from DT */ 576 if (dev && dev->of_node) { 577 np = dev->of_node; 578 579 /* Parse the device's DT node for an endianness specification */ 580 if (of_property_read_bool(np, "big-endian")) 581 endian = REGMAP_ENDIAN_BIG; 582 else if (of_property_read_bool(np, "little-endian")) 583 endian = REGMAP_ENDIAN_LITTLE; 584 else if (of_property_read_bool(np, "native-endian")) 585 endian = REGMAP_ENDIAN_NATIVE; 586 587 /* If the endianness was specified in DT, use that */ 588 if (endian != REGMAP_ENDIAN_DEFAULT) 589 return endian; 590 } 591 592 /* Retrieve the endianness specification from the bus config */ 593 if (bus && bus->val_format_endian_default) 594 endian = bus->val_format_endian_default; 595 596 /* If the bus specified a non-default value, use that */ 597 if (endian != REGMAP_ENDIAN_DEFAULT) 598 return endian; 599 600 /* Use this if no other value was found */ 601 return REGMAP_ENDIAN_BIG; 602 } 603 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 604 605 struct regmap *__regmap_init(struct device *dev, 606 const struct regmap_bus *bus, 607 void *bus_context, 608 const struct regmap_config *config, 609 struct lock_class_key *lock_key, 610 const char *lock_name) 611 { 612 struct regmap *map; 613 int ret = -EINVAL; 614 enum regmap_endian reg_endian, val_endian; 615 int i, j; 616 617 if (!config) 618 goto err; 619 620 map = kzalloc(sizeof(*map), GFP_KERNEL); 621 if (map == NULL) { 622 ret = -ENOMEM; 623 goto err; 624 } 625 626 if (config->lock && config->unlock) { 627 map->lock = config->lock; 628 map->unlock = config->unlock; 629 map->lock_arg = config->lock_arg; 630 } else { 631 if ((bus && bus->fast_io) || 632 config->fast_io) { 633 spin_lock_init(&map->spinlock); 634 map->lock = regmap_lock_spinlock; 635 map->unlock = regmap_unlock_spinlock; 636 lockdep_set_class_and_name(&map->spinlock, 637 lock_key, lock_name); 638 } else { 639 mutex_init(&map->mutex); 640 map->lock = regmap_lock_mutex; 641 map->unlock = regmap_unlock_mutex; 642 lockdep_set_class_and_name(&map->mutex, 643 lock_key, lock_name); 644 } 645 map->lock_arg = map; 646 } 647 648 /* 649 * When we write in fast-paths with regmap_bulk_write() don't allocate 650 * scratch buffers with sleeping allocations. 651 */ 652 if ((bus && bus->fast_io) || config->fast_io) 653 map->alloc_flags = GFP_ATOMIC; 654 else 655 map->alloc_flags = GFP_KERNEL; 656 657 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 658 map->format.pad_bytes = config->pad_bits / 8; 659 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 660 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 661 config->val_bits + config->pad_bits, 8); 662 map->reg_shift = config->pad_bits % 8; 663 if (config->reg_stride) 664 map->reg_stride = config->reg_stride; 665 else 666 map->reg_stride = 1; 667 if (is_power_of_2(map->reg_stride)) 668 map->reg_stride_order = ilog2(map->reg_stride); 669 else 670 map->reg_stride_order = -1; 671 map->use_single_read = config->use_single_rw || !bus || !bus->read; 672 map->use_single_write = config->use_single_rw || !bus || !bus->write; 673 map->can_multi_write = config->can_multi_write && bus && bus->write; 674 if (bus) { 675 map->max_raw_read = bus->max_raw_read; 676 map->max_raw_write = bus->max_raw_write; 677 } 678 map->dev = dev; 679 map->bus = bus; 680 map->bus_context = bus_context; 681 map->max_register = config->max_register; 682 map->wr_table = config->wr_table; 683 map->rd_table = config->rd_table; 684 map->volatile_table = config->volatile_table; 685 map->precious_table = config->precious_table; 686 map->writeable_reg = config->writeable_reg; 687 map->readable_reg = config->readable_reg; 688 map->volatile_reg = config->volatile_reg; 689 map->precious_reg = config->precious_reg; 690 map->cache_type = config->cache_type; 691 map->name = config->name; 692 693 spin_lock_init(&map->async_lock); 694 INIT_LIST_HEAD(&map->async_list); 695 INIT_LIST_HEAD(&map->async_free); 696 init_waitqueue_head(&map->async_waitq); 697 698 if (config->read_flag_mask || config->write_flag_mask) { 699 map->read_flag_mask = config->read_flag_mask; 700 map->write_flag_mask = config->write_flag_mask; 701 } else if (bus) { 702 map->read_flag_mask = bus->read_flag_mask; 703 } 704 705 if (!bus) { 706 map->reg_read = config->reg_read; 707 map->reg_write = config->reg_write; 708 709 map->defer_caching = false; 710 goto skip_format_initialization; 711 } else if (!bus->read || !bus->write) { 712 map->reg_read = _regmap_bus_reg_read; 713 map->reg_write = _regmap_bus_reg_write; 714 715 map->defer_caching = false; 716 goto skip_format_initialization; 717 } else { 718 map->reg_read = _regmap_bus_read; 719 map->reg_update_bits = bus->reg_update_bits; 720 } 721 722 reg_endian = regmap_get_reg_endian(bus, config); 723 val_endian = regmap_get_val_endian(dev, bus, config); 724 725 switch (config->reg_bits + map->reg_shift) { 726 case 2: 727 switch (config->val_bits) { 728 case 6: 729 map->format.format_write = regmap_format_2_6_write; 730 break; 731 default: 732 goto err_map; 733 } 734 break; 735 736 case 4: 737 switch (config->val_bits) { 738 case 12: 739 map->format.format_write = regmap_format_4_12_write; 740 break; 741 default: 742 goto err_map; 743 } 744 break; 745 746 case 7: 747 switch (config->val_bits) { 748 case 9: 749 map->format.format_write = regmap_format_7_9_write; 750 break; 751 default: 752 goto err_map; 753 } 754 break; 755 756 case 10: 757 switch (config->val_bits) { 758 case 14: 759 map->format.format_write = regmap_format_10_14_write; 760 break; 761 default: 762 goto err_map; 763 } 764 break; 765 766 case 8: 767 map->format.format_reg = regmap_format_8; 768 break; 769 770 case 16: 771 switch (reg_endian) { 772 case REGMAP_ENDIAN_BIG: 773 map->format.format_reg = regmap_format_16_be; 774 break; 775 case REGMAP_ENDIAN_LITTLE: 776 map->format.format_reg = regmap_format_16_le; 777 break; 778 case REGMAP_ENDIAN_NATIVE: 779 map->format.format_reg = regmap_format_16_native; 780 break; 781 default: 782 goto err_map; 783 } 784 break; 785 786 case 24: 787 if (reg_endian != REGMAP_ENDIAN_BIG) 788 goto err_map; 789 map->format.format_reg = regmap_format_24; 790 break; 791 792 case 32: 793 switch (reg_endian) { 794 case REGMAP_ENDIAN_BIG: 795 map->format.format_reg = regmap_format_32_be; 796 break; 797 case REGMAP_ENDIAN_LITTLE: 798 map->format.format_reg = regmap_format_32_le; 799 break; 800 case REGMAP_ENDIAN_NATIVE: 801 map->format.format_reg = regmap_format_32_native; 802 break; 803 default: 804 goto err_map; 805 } 806 break; 807 808 #ifdef CONFIG_64BIT 809 case 64: 810 switch (reg_endian) { 811 case REGMAP_ENDIAN_BIG: 812 map->format.format_reg = regmap_format_64_be; 813 break; 814 case REGMAP_ENDIAN_LITTLE: 815 map->format.format_reg = regmap_format_64_le; 816 break; 817 case REGMAP_ENDIAN_NATIVE: 818 map->format.format_reg = regmap_format_64_native; 819 break; 820 default: 821 goto err_map; 822 } 823 break; 824 #endif 825 826 default: 827 goto err_map; 828 } 829 830 if (val_endian == REGMAP_ENDIAN_NATIVE) 831 map->format.parse_inplace = regmap_parse_inplace_noop; 832 833 switch (config->val_bits) { 834 case 8: 835 map->format.format_val = regmap_format_8; 836 map->format.parse_val = regmap_parse_8; 837 map->format.parse_inplace = regmap_parse_inplace_noop; 838 break; 839 case 16: 840 switch (val_endian) { 841 case REGMAP_ENDIAN_BIG: 842 map->format.format_val = regmap_format_16_be; 843 map->format.parse_val = regmap_parse_16_be; 844 map->format.parse_inplace = regmap_parse_16_be_inplace; 845 break; 846 case REGMAP_ENDIAN_LITTLE: 847 map->format.format_val = regmap_format_16_le; 848 map->format.parse_val = regmap_parse_16_le; 849 map->format.parse_inplace = regmap_parse_16_le_inplace; 850 break; 851 case REGMAP_ENDIAN_NATIVE: 852 map->format.format_val = regmap_format_16_native; 853 map->format.parse_val = regmap_parse_16_native; 854 break; 855 default: 856 goto err_map; 857 } 858 break; 859 case 24: 860 if (val_endian != REGMAP_ENDIAN_BIG) 861 goto err_map; 862 map->format.format_val = regmap_format_24; 863 map->format.parse_val = regmap_parse_24; 864 break; 865 case 32: 866 switch (val_endian) { 867 case REGMAP_ENDIAN_BIG: 868 map->format.format_val = regmap_format_32_be; 869 map->format.parse_val = regmap_parse_32_be; 870 map->format.parse_inplace = regmap_parse_32_be_inplace; 871 break; 872 case REGMAP_ENDIAN_LITTLE: 873 map->format.format_val = regmap_format_32_le; 874 map->format.parse_val = regmap_parse_32_le; 875 map->format.parse_inplace = regmap_parse_32_le_inplace; 876 break; 877 case REGMAP_ENDIAN_NATIVE: 878 map->format.format_val = regmap_format_32_native; 879 map->format.parse_val = regmap_parse_32_native; 880 break; 881 default: 882 goto err_map; 883 } 884 break; 885 #ifdef CONFIG_64BIT 886 case 64: 887 switch (val_endian) { 888 case REGMAP_ENDIAN_BIG: 889 map->format.format_val = regmap_format_64_be; 890 map->format.parse_val = regmap_parse_64_be; 891 map->format.parse_inplace = regmap_parse_64_be_inplace; 892 break; 893 case REGMAP_ENDIAN_LITTLE: 894 map->format.format_val = regmap_format_64_le; 895 map->format.parse_val = regmap_parse_64_le; 896 map->format.parse_inplace = regmap_parse_64_le_inplace; 897 break; 898 case REGMAP_ENDIAN_NATIVE: 899 map->format.format_val = regmap_format_64_native; 900 map->format.parse_val = regmap_parse_64_native; 901 break; 902 default: 903 goto err_map; 904 } 905 break; 906 #endif 907 } 908 909 if (map->format.format_write) { 910 if ((reg_endian != REGMAP_ENDIAN_BIG) || 911 (val_endian != REGMAP_ENDIAN_BIG)) 912 goto err_map; 913 map->use_single_write = true; 914 } 915 916 if (!map->format.format_write && 917 !(map->format.format_reg && map->format.format_val)) 918 goto err_map; 919 920 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 921 if (map->work_buf == NULL) { 922 ret = -ENOMEM; 923 goto err_map; 924 } 925 926 if (map->format.format_write) { 927 map->defer_caching = false; 928 map->reg_write = _regmap_bus_formatted_write; 929 } else if (map->format.format_val) { 930 map->defer_caching = true; 931 map->reg_write = _regmap_bus_raw_write; 932 } 933 934 skip_format_initialization: 935 936 map->range_tree = RB_ROOT; 937 for (i = 0; i < config->num_ranges; i++) { 938 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 939 struct regmap_range_node *new; 940 941 /* Sanity check */ 942 if (range_cfg->range_max < range_cfg->range_min) { 943 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 944 range_cfg->range_max, range_cfg->range_min); 945 goto err_range; 946 } 947 948 if (range_cfg->range_max > map->max_register) { 949 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 950 range_cfg->range_max, map->max_register); 951 goto err_range; 952 } 953 954 if (range_cfg->selector_reg > map->max_register) { 955 dev_err(map->dev, 956 "Invalid range %d: selector out of map\n", i); 957 goto err_range; 958 } 959 960 if (range_cfg->window_len == 0) { 961 dev_err(map->dev, "Invalid range %d: window_len 0\n", 962 i); 963 goto err_range; 964 } 965 966 /* Make sure, that this register range has no selector 967 or data window within its boundary */ 968 for (j = 0; j < config->num_ranges; j++) { 969 unsigned sel_reg = config->ranges[j].selector_reg; 970 unsigned win_min = config->ranges[j].window_start; 971 unsigned win_max = win_min + 972 config->ranges[j].window_len - 1; 973 974 /* Allow data window inside its own virtual range */ 975 if (j == i) 976 continue; 977 978 if (range_cfg->range_min <= sel_reg && 979 sel_reg <= range_cfg->range_max) { 980 dev_err(map->dev, 981 "Range %d: selector for %d in window\n", 982 i, j); 983 goto err_range; 984 } 985 986 if (!(win_max < range_cfg->range_min || 987 win_min > range_cfg->range_max)) { 988 dev_err(map->dev, 989 "Range %d: window for %d in window\n", 990 i, j); 991 goto err_range; 992 } 993 } 994 995 new = kzalloc(sizeof(*new), GFP_KERNEL); 996 if (new == NULL) { 997 ret = -ENOMEM; 998 goto err_range; 999 } 1000 1001 new->map = map; 1002 new->name = range_cfg->name; 1003 new->range_min = range_cfg->range_min; 1004 new->range_max = range_cfg->range_max; 1005 new->selector_reg = range_cfg->selector_reg; 1006 new->selector_mask = range_cfg->selector_mask; 1007 new->selector_shift = range_cfg->selector_shift; 1008 new->window_start = range_cfg->window_start; 1009 new->window_len = range_cfg->window_len; 1010 1011 if (!_regmap_range_add(map, new)) { 1012 dev_err(map->dev, "Failed to add range %d\n", i); 1013 kfree(new); 1014 goto err_range; 1015 } 1016 1017 if (map->selector_work_buf == NULL) { 1018 map->selector_work_buf = 1019 kzalloc(map->format.buf_size, GFP_KERNEL); 1020 if (map->selector_work_buf == NULL) { 1021 ret = -ENOMEM; 1022 goto err_range; 1023 } 1024 } 1025 } 1026 1027 ret = regcache_init(map, config); 1028 if (ret != 0) 1029 goto err_range; 1030 1031 if (dev) { 1032 ret = regmap_attach_dev(dev, map, config); 1033 if (ret != 0) 1034 goto err_regcache; 1035 } 1036 1037 return map; 1038 1039 err_regcache: 1040 regcache_exit(map); 1041 err_range: 1042 regmap_range_exit(map); 1043 kfree(map->work_buf); 1044 err_map: 1045 kfree(map); 1046 err: 1047 return ERR_PTR(ret); 1048 } 1049 EXPORT_SYMBOL_GPL(__regmap_init); 1050 1051 static void devm_regmap_release(struct device *dev, void *res) 1052 { 1053 regmap_exit(*(struct regmap **)res); 1054 } 1055 1056 struct regmap *__devm_regmap_init(struct device *dev, 1057 const struct regmap_bus *bus, 1058 void *bus_context, 1059 const struct regmap_config *config, 1060 struct lock_class_key *lock_key, 1061 const char *lock_name) 1062 { 1063 struct regmap **ptr, *regmap; 1064 1065 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1066 if (!ptr) 1067 return ERR_PTR(-ENOMEM); 1068 1069 regmap = __regmap_init(dev, bus, bus_context, config, 1070 lock_key, lock_name); 1071 if (!IS_ERR(regmap)) { 1072 *ptr = regmap; 1073 devres_add(dev, ptr); 1074 } else { 1075 devres_free(ptr); 1076 } 1077 1078 return regmap; 1079 } 1080 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1081 1082 static void regmap_field_init(struct regmap_field *rm_field, 1083 struct regmap *regmap, struct reg_field reg_field) 1084 { 1085 rm_field->regmap = regmap; 1086 rm_field->reg = reg_field.reg; 1087 rm_field->shift = reg_field.lsb; 1088 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1089 rm_field->id_size = reg_field.id_size; 1090 rm_field->id_offset = reg_field.id_offset; 1091 } 1092 1093 /** 1094 * devm_regmap_field_alloc() - Allocate and initialise a register field. 1095 * 1096 * @dev: Device that will be interacted with 1097 * @regmap: regmap bank in which this register field is located. 1098 * @reg_field: Register field with in the bank. 1099 * 1100 * The return value will be an ERR_PTR() on error or a valid pointer 1101 * to a struct regmap_field. The regmap_field will be automatically freed 1102 * by the device management code. 1103 */ 1104 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1105 struct regmap *regmap, struct reg_field reg_field) 1106 { 1107 struct regmap_field *rm_field = devm_kzalloc(dev, 1108 sizeof(*rm_field), GFP_KERNEL); 1109 if (!rm_field) 1110 return ERR_PTR(-ENOMEM); 1111 1112 regmap_field_init(rm_field, regmap, reg_field); 1113 1114 return rm_field; 1115 1116 } 1117 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1118 1119 /** 1120 * devm_regmap_field_free() - Free a register field allocated using 1121 * devm_regmap_field_alloc. 1122 * 1123 * @dev: Device that will be interacted with 1124 * @field: regmap field which should be freed. 1125 * 1126 * Free register field allocated using devm_regmap_field_alloc(). Usually 1127 * drivers need not call this function, as the memory allocated via devm 1128 * will be freed as per device-driver life-cyle. 1129 */ 1130 void devm_regmap_field_free(struct device *dev, 1131 struct regmap_field *field) 1132 { 1133 devm_kfree(dev, field); 1134 } 1135 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1136 1137 /** 1138 * regmap_field_alloc() - Allocate and initialise a register field. 1139 * 1140 * @regmap: regmap bank in which this register field is located. 1141 * @reg_field: Register field with in the bank. 1142 * 1143 * The return value will be an ERR_PTR() on error or a valid pointer 1144 * to a struct regmap_field. The regmap_field should be freed by the 1145 * user once its finished working with it using regmap_field_free(). 1146 */ 1147 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1148 struct reg_field reg_field) 1149 { 1150 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1151 1152 if (!rm_field) 1153 return ERR_PTR(-ENOMEM); 1154 1155 regmap_field_init(rm_field, regmap, reg_field); 1156 1157 return rm_field; 1158 } 1159 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1160 1161 /** 1162 * regmap_field_free() - Free register field allocated using 1163 * regmap_field_alloc. 1164 * 1165 * @field: regmap field which should be freed. 1166 */ 1167 void regmap_field_free(struct regmap_field *field) 1168 { 1169 kfree(field); 1170 } 1171 EXPORT_SYMBOL_GPL(regmap_field_free); 1172 1173 /** 1174 * regmap_reinit_cache() - Reinitialise the current register cache 1175 * 1176 * @map: Register map to operate on. 1177 * @config: New configuration. Only the cache data will be used. 1178 * 1179 * Discard any existing register cache for the map and initialize a 1180 * new cache. This can be used to restore the cache to defaults or to 1181 * update the cache configuration to reflect runtime discovery of the 1182 * hardware. 1183 * 1184 * No explicit locking is done here, the user needs to ensure that 1185 * this function will not race with other calls to regmap. 1186 */ 1187 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1188 { 1189 regcache_exit(map); 1190 regmap_debugfs_exit(map); 1191 1192 map->max_register = config->max_register; 1193 map->writeable_reg = config->writeable_reg; 1194 map->readable_reg = config->readable_reg; 1195 map->volatile_reg = config->volatile_reg; 1196 map->precious_reg = config->precious_reg; 1197 map->cache_type = config->cache_type; 1198 1199 regmap_debugfs_init(map, config->name); 1200 1201 map->cache_bypass = false; 1202 map->cache_only = false; 1203 1204 return regcache_init(map, config); 1205 } 1206 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1207 1208 /** 1209 * regmap_exit() - Free a previously allocated register map 1210 * 1211 * @map: Register map to operate on. 1212 */ 1213 void regmap_exit(struct regmap *map) 1214 { 1215 struct regmap_async *async; 1216 1217 regcache_exit(map); 1218 regmap_debugfs_exit(map); 1219 regmap_range_exit(map); 1220 if (map->bus && map->bus->free_context) 1221 map->bus->free_context(map->bus_context); 1222 kfree(map->work_buf); 1223 while (!list_empty(&map->async_free)) { 1224 async = list_first_entry_or_null(&map->async_free, 1225 struct regmap_async, 1226 list); 1227 list_del(&async->list); 1228 kfree(async->work_buf); 1229 kfree(async); 1230 } 1231 kfree(map); 1232 } 1233 EXPORT_SYMBOL_GPL(regmap_exit); 1234 1235 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1236 { 1237 struct regmap **r = res; 1238 if (!r || !*r) { 1239 WARN_ON(!r || !*r); 1240 return 0; 1241 } 1242 1243 /* If the user didn't specify a name match any */ 1244 if (data) 1245 return (*r)->name == data; 1246 else 1247 return 1; 1248 } 1249 1250 /** 1251 * dev_get_regmap() - Obtain the regmap (if any) for a device 1252 * 1253 * @dev: Device to retrieve the map for 1254 * @name: Optional name for the register map, usually NULL. 1255 * 1256 * Returns the regmap for the device if one is present, or NULL. If 1257 * name is specified then it must match the name specified when 1258 * registering the device, if it is NULL then the first regmap found 1259 * will be used. Devices with multiple register maps are very rare, 1260 * generic code should normally not need to specify a name. 1261 */ 1262 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1263 { 1264 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1265 dev_get_regmap_match, (void *)name); 1266 1267 if (!r) 1268 return NULL; 1269 return *r; 1270 } 1271 EXPORT_SYMBOL_GPL(dev_get_regmap); 1272 1273 /** 1274 * regmap_get_device() - Obtain the device from a regmap 1275 * 1276 * @map: Register map to operate on. 1277 * 1278 * Returns the underlying device that the regmap has been created for. 1279 */ 1280 struct device *regmap_get_device(struct regmap *map) 1281 { 1282 return map->dev; 1283 } 1284 EXPORT_SYMBOL_GPL(regmap_get_device); 1285 1286 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1287 struct regmap_range_node *range, 1288 unsigned int val_num) 1289 { 1290 void *orig_work_buf; 1291 unsigned int win_offset; 1292 unsigned int win_page; 1293 bool page_chg; 1294 int ret; 1295 1296 win_offset = (*reg - range->range_min) % range->window_len; 1297 win_page = (*reg - range->range_min) / range->window_len; 1298 1299 if (val_num > 1) { 1300 /* Bulk write shouldn't cross range boundary */ 1301 if (*reg + val_num - 1 > range->range_max) 1302 return -EINVAL; 1303 1304 /* ... or single page boundary */ 1305 if (val_num > range->window_len - win_offset) 1306 return -EINVAL; 1307 } 1308 1309 /* It is possible to have selector register inside data window. 1310 In that case, selector register is located on every page and 1311 it needs no page switching, when accessed alone. */ 1312 if (val_num > 1 || 1313 range->window_start + win_offset != range->selector_reg) { 1314 /* Use separate work_buf during page switching */ 1315 orig_work_buf = map->work_buf; 1316 map->work_buf = map->selector_work_buf; 1317 1318 ret = _regmap_update_bits(map, range->selector_reg, 1319 range->selector_mask, 1320 win_page << range->selector_shift, 1321 &page_chg, false); 1322 1323 map->work_buf = orig_work_buf; 1324 1325 if (ret != 0) 1326 return ret; 1327 } 1328 1329 *reg = range->window_start + win_offset; 1330 1331 return 0; 1332 } 1333 1334 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, 1335 unsigned long mask) 1336 { 1337 u8 *buf; 1338 int i; 1339 1340 if (!mask || !map->work_buf) 1341 return; 1342 1343 buf = map->work_buf; 1344 1345 for (i = 0; i < max_bytes; i++) 1346 buf[i] |= (mask >> (8 * i)) & 0xff; 1347 } 1348 1349 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1350 const void *val, size_t val_len) 1351 { 1352 struct regmap_range_node *range; 1353 unsigned long flags; 1354 void *work_val = map->work_buf + map->format.reg_bytes + 1355 map->format.pad_bytes; 1356 void *buf; 1357 int ret = -ENOTSUPP; 1358 size_t len; 1359 int i; 1360 1361 WARN_ON(!map->bus); 1362 1363 /* Check for unwritable registers before we start */ 1364 if (map->writeable_reg) 1365 for (i = 0; i < val_len / map->format.val_bytes; i++) 1366 if (!map->writeable_reg(map->dev, 1367 reg + regmap_get_offset(map, i))) 1368 return -EINVAL; 1369 1370 if (!map->cache_bypass && map->format.parse_val) { 1371 unsigned int ival; 1372 int val_bytes = map->format.val_bytes; 1373 for (i = 0; i < val_len / val_bytes; i++) { 1374 ival = map->format.parse_val(val + (i * val_bytes)); 1375 ret = regcache_write(map, 1376 reg + regmap_get_offset(map, i), 1377 ival); 1378 if (ret) { 1379 dev_err(map->dev, 1380 "Error in caching of register: %x ret: %d\n", 1381 reg + i, ret); 1382 return ret; 1383 } 1384 } 1385 if (map->cache_only) { 1386 map->cache_dirty = true; 1387 return 0; 1388 } 1389 } 1390 1391 range = _regmap_range_lookup(map, reg); 1392 if (range) { 1393 int val_num = val_len / map->format.val_bytes; 1394 int win_offset = (reg - range->range_min) % range->window_len; 1395 int win_residue = range->window_len - win_offset; 1396 1397 /* If the write goes beyond the end of the window split it */ 1398 while (val_num > win_residue) { 1399 dev_dbg(map->dev, "Writing window %d/%zu\n", 1400 win_residue, val_len / map->format.val_bytes); 1401 ret = _regmap_raw_write(map, reg, val, win_residue * 1402 map->format.val_bytes); 1403 if (ret != 0) 1404 return ret; 1405 1406 reg += win_residue; 1407 val_num -= win_residue; 1408 val += win_residue * map->format.val_bytes; 1409 val_len -= win_residue * map->format.val_bytes; 1410 1411 win_offset = (reg - range->range_min) % 1412 range->window_len; 1413 win_residue = range->window_len - win_offset; 1414 } 1415 1416 ret = _regmap_select_page(map, ®, range, val_num); 1417 if (ret != 0) 1418 return ret; 1419 } 1420 1421 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1422 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 1423 map->write_flag_mask); 1424 1425 /* 1426 * Essentially all I/O mechanisms will be faster with a single 1427 * buffer to write. Since register syncs often generate raw 1428 * writes of single registers optimise that case. 1429 */ 1430 if (val != work_val && val_len == map->format.val_bytes) { 1431 memcpy(work_val, val, map->format.val_bytes); 1432 val = work_val; 1433 } 1434 1435 if (map->async && map->bus->async_write) { 1436 struct regmap_async *async; 1437 1438 trace_regmap_async_write_start(map, reg, val_len); 1439 1440 spin_lock_irqsave(&map->async_lock, flags); 1441 async = list_first_entry_or_null(&map->async_free, 1442 struct regmap_async, 1443 list); 1444 if (async) 1445 list_del(&async->list); 1446 spin_unlock_irqrestore(&map->async_lock, flags); 1447 1448 if (!async) { 1449 async = map->bus->async_alloc(); 1450 if (!async) 1451 return -ENOMEM; 1452 1453 async->work_buf = kzalloc(map->format.buf_size, 1454 GFP_KERNEL | GFP_DMA); 1455 if (!async->work_buf) { 1456 kfree(async); 1457 return -ENOMEM; 1458 } 1459 } 1460 1461 async->map = map; 1462 1463 /* If the caller supplied the value we can use it safely. */ 1464 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1465 map->format.reg_bytes + map->format.val_bytes); 1466 1467 spin_lock_irqsave(&map->async_lock, flags); 1468 list_add_tail(&async->list, &map->async_list); 1469 spin_unlock_irqrestore(&map->async_lock, flags); 1470 1471 if (val != work_val) 1472 ret = map->bus->async_write(map->bus_context, 1473 async->work_buf, 1474 map->format.reg_bytes + 1475 map->format.pad_bytes, 1476 val, val_len, async); 1477 else 1478 ret = map->bus->async_write(map->bus_context, 1479 async->work_buf, 1480 map->format.reg_bytes + 1481 map->format.pad_bytes + 1482 val_len, NULL, 0, async); 1483 1484 if (ret != 0) { 1485 dev_err(map->dev, "Failed to schedule write: %d\n", 1486 ret); 1487 1488 spin_lock_irqsave(&map->async_lock, flags); 1489 list_move(&async->list, &map->async_free); 1490 spin_unlock_irqrestore(&map->async_lock, flags); 1491 } 1492 1493 return ret; 1494 } 1495 1496 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1497 1498 /* If we're doing a single register write we can probably just 1499 * send the work_buf directly, otherwise try to do a gather 1500 * write. 1501 */ 1502 if (val == work_val) 1503 ret = map->bus->write(map->bus_context, map->work_buf, 1504 map->format.reg_bytes + 1505 map->format.pad_bytes + 1506 val_len); 1507 else if (map->bus->gather_write) 1508 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1509 map->format.reg_bytes + 1510 map->format.pad_bytes, 1511 val, val_len); 1512 1513 /* If that didn't work fall back on linearising by hand. */ 1514 if (ret == -ENOTSUPP) { 1515 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1516 buf = kzalloc(len, GFP_KERNEL); 1517 if (!buf) 1518 return -ENOMEM; 1519 1520 memcpy(buf, map->work_buf, map->format.reg_bytes); 1521 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1522 val, val_len); 1523 ret = map->bus->write(map->bus_context, buf, len); 1524 1525 kfree(buf); 1526 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1527 /* regcache_drop_region() takes lock that we already have, 1528 * thus call map->cache_ops->drop() directly 1529 */ 1530 if (map->cache_ops && map->cache_ops->drop) 1531 map->cache_ops->drop(map, reg, reg + 1); 1532 } 1533 1534 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1535 1536 return ret; 1537 } 1538 1539 /** 1540 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1541 * 1542 * @map: Map to check. 1543 */ 1544 bool regmap_can_raw_write(struct regmap *map) 1545 { 1546 return map->bus && map->bus->write && map->format.format_val && 1547 map->format.format_reg; 1548 } 1549 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1550 1551 /** 1552 * regmap_get_raw_read_max - Get the maximum size we can read 1553 * 1554 * @map: Map to check. 1555 */ 1556 size_t regmap_get_raw_read_max(struct regmap *map) 1557 { 1558 return map->max_raw_read; 1559 } 1560 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1561 1562 /** 1563 * regmap_get_raw_write_max - Get the maximum size we can read 1564 * 1565 * @map: Map to check. 1566 */ 1567 size_t regmap_get_raw_write_max(struct regmap *map) 1568 { 1569 return map->max_raw_write; 1570 } 1571 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1572 1573 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1574 unsigned int val) 1575 { 1576 int ret; 1577 struct regmap_range_node *range; 1578 struct regmap *map = context; 1579 1580 WARN_ON(!map->bus || !map->format.format_write); 1581 1582 range = _regmap_range_lookup(map, reg); 1583 if (range) { 1584 ret = _regmap_select_page(map, ®, range, 1); 1585 if (ret != 0) 1586 return ret; 1587 } 1588 1589 map->format.format_write(map, reg, val); 1590 1591 trace_regmap_hw_write_start(map, reg, 1); 1592 1593 ret = map->bus->write(map->bus_context, map->work_buf, 1594 map->format.buf_size); 1595 1596 trace_regmap_hw_write_done(map, reg, 1); 1597 1598 return ret; 1599 } 1600 1601 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1602 unsigned int val) 1603 { 1604 struct regmap *map = context; 1605 1606 return map->bus->reg_write(map->bus_context, reg, val); 1607 } 1608 1609 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1610 unsigned int val) 1611 { 1612 struct regmap *map = context; 1613 1614 WARN_ON(!map->bus || !map->format.format_val); 1615 1616 map->format.format_val(map->work_buf + map->format.reg_bytes 1617 + map->format.pad_bytes, val, 0); 1618 return _regmap_raw_write(map, reg, 1619 map->work_buf + 1620 map->format.reg_bytes + 1621 map->format.pad_bytes, 1622 map->format.val_bytes); 1623 } 1624 1625 static inline void *_regmap_map_get_context(struct regmap *map) 1626 { 1627 return (map->bus) ? map : map->bus_context; 1628 } 1629 1630 int _regmap_write(struct regmap *map, unsigned int reg, 1631 unsigned int val) 1632 { 1633 int ret; 1634 void *context = _regmap_map_get_context(map); 1635 1636 if (!regmap_writeable(map, reg)) 1637 return -EIO; 1638 1639 if (!map->cache_bypass && !map->defer_caching) { 1640 ret = regcache_write(map, reg, val); 1641 if (ret != 0) 1642 return ret; 1643 if (map->cache_only) { 1644 map->cache_dirty = true; 1645 return 0; 1646 } 1647 } 1648 1649 #ifdef LOG_DEVICE 1650 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1651 dev_info(map->dev, "%x <= %x\n", reg, val); 1652 #endif 1653 1654 trace_regmap_reg_write(map, reg, val); 1655 1656 return map->reg_write(context, reg, val); 1657 } 1658 1659 /** 1660 * regmap_write() - Write a value to a single register 1661 * 1662 * @map: Register map to write to 1663 * @reg: Register to write to 1664 * @val: Value to be written 1665 * 1666 * A value of zero will be returned on success, a negative errno will 1667 * be returned in error cases. 1668 */ 1669 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1670 { 1671 int ret; 1672 1673 if (!IS_ALIGNED(reg, map->reg_stride)) 1674 return -EINVAL; 1675 1676 map->lock(map->lock_arg); 1677 1678 ret = _regmap_write(map, reg, val); 1679 1680 map->unlock(map->lock_arg); 1681 1682 return ret; 1683 } 1684 EXPORT_SYMBOL_GPL(regmap_write); 1685 1686 /** 1687 * regmap_write_async() - Write a value to a single register asynchronously 1688 * 1689 * @map: Register map to write to 1690 * @reg: Register to write to 1691 * @val: Value to be written 1692 * 1693 * A value of zero will be returned on success, a negative errno will 1694 * be returned in error cases. 1695 */ 1696 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1697 { 1698 int ret; 1699 1700 if (!IS_ALIGNED(reg, map->reg_stride)) 1701 return -EINVAL; 1702 1703 map->lock(map->lock_arg); 1704 1705 map->async = true; 1706 1707 ret = _regmap_write(map, reg, val); 1708 1709 map->async = false; 1710 1711 map->unlock(map->lock_arg); 1712 1713 return ret; 1714 } 1715 EXPORT_SYMBOL_GPL(regmap_write_async); 1716 1717 /** 1718 * regmap_raw_write() - Write raw values to one or more registers 1719 * 1720 * @map: Register map to write to 1721 * @reg: Initial register to write to 1722 * @val: Block of data to be written, laid out for direct transmission to the 1723 * device 1724 * @val_len: Length of data pointed to by val. 1725 * 1726 * This function is intended to be used for things like firmware 1727 * download where a large block of data needs to be transferred to the 1728 * device. No formatting will be done on the data provided. 1729 * 1730 * A value of zero will be returned on success, a negative errno will 1731 * be returned in error cases. 1732 */ 1733 int regmap_raw_write(struct regmap *map, unsigned int reg, 1734 const void *val, size_t val_len) 1735 { 1736 int ret; 1737 1738 if (!regmap_can_raw_write(map)) 1739 return -EINVAL; 1740 if (val_len % map->format.val_bytes) 1741 return -EINVAL; 1742 if (map->max_raw_write && map->max_raw_write > val_len) 1743 return -E2BIG; 1744 1745 map->lock(map->lock_arg); 1746 1747 ret = _regmap_raw_write(map, reg, val, val_len); 1748 1749 map->unlock(map->lock_arg); 1750 1751 return ret; 1752 } 1753 EXPORT_SYMBOL_GPL(regmap_raw_write); 1754 1755 /** 1756 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a 1757 * register field. 1758 * 1759 * @field: Register field to write to 1760 * @mask: Bitmask to change 1761 * @val: Value to be written 1762 * @change: Boolean indicating if a write was done 1763 * @async: Boolean indicating asynchronously 1764 * @force: Boolean indicating use force update 1765 * 1766 * Perform a read/modify/write cycle on the register field with change, 1767 * async, force option. 1768 * 1769 * A value of zero will be returned on success, a negative errno will 1770 * be returned in error cases. 1771 */ 1772 int regmap_field_update_bits_base(struct regmap_field *field, 1773 unsigned int mask, unsigned int val, 1774 bool *change, bool async, bool force) 1775 { 1776 mask = (mask << field->shift) & field->mask; 1777 1778 return regmap_update_bits_base(field->regmap, field->reg, 1779 mask, val << field->shift, 1780 change, async, force); 1781 } 1782 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); 1783 1784 /** 1785 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a 1786 * register field with port ID 1787 * 1788 * @field: Register field to write to 1789 * @id: port ID 1790 * @mask: Bitmask to change 1791 * @val: Value to be written 1792 * @change: Boolean indicating if a write was done 1793 * @async: Boolean indicating asynchronously 1794 * @force: Boolean indicating use force update 1795 * 1796 * A value of zero will be returned on success, a negative errno will 1797 * be returned in error cases. 1798 */ 1799 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, 1800 unsigned int mask, unsigned int val, 1801 bool *change, bool async, bool force) 1802 { 1803 if (id >= field->id_size) 1804 return -EINVAL; 1805 1806 mask = (mask << field->shift) & field->mask; 1807 1808 return regmap_update_bits_base(field->regmap, 1809 field->reg + (field->id_offset * id), 1810 mask, val << field->shift, 1811 change, async, force); 1812 } 1813 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); 1814 1815 /** 1816 * regmap_bulk_write() - Write multiple registers to the device 1817 * 1818 * @map: Register map to write to 1819 * @reg: First register to be write from 1820 * @val: Block of data to be written, in native register size for device 1821 * @val_count: Number of registers to write 1822 * 1823 * This function is intended to be used for writing a large block of 1824 * data to the device either in single transfer or multiple transfer. 1825 * 1826 * A value of zero will be returned on success, a negative errno will 1827 * be returned in error cases. 1828 */ 1829 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1830 size_t val_count) 1831 { 1832 int ret = 0, i; 1833 size_t val_bytes = map->format.val_bytes; 1834 size_t total_size = val_bytes * val_count; 1835 1836 if (!IS_ALIGNED(reg, map->reg_stride)) 1837 return -EINVAL; 1838 1839 /* 1840 * Some devices don't support bulk write, for 1841 * them we have a series of single write operations in the first two if 1842 * blocks. 1843 * 1844 * The first if block is used for memory mapped io. It does not allow 1845 * val_bytes of 3 for example. 1846 * The second one is for busses that do not provide raw I/O. 1847 * The third one is used for busses which do not have these limitations 1848 * and can write arbitrary value lengths. 1849 */ 1850 if (!map->bus) { 1851 map->lock(map->lock_arg); 1852 for (i = 0; i < val_count; i++) { 1853 unsigned int ival; 1854 1855 switch (val_bytes) { 1856 case 1: 1857 ival = *(u8 *)(val + (i * val_bytes)); 1858 break; 1859 case 2: 1860 ival = *(u16 *)(val + (i * val_bytes)); 1861 break; 1862 case 4: 1863 ival = *(u32 *)(val + (i * val_bytes)); 1864 break; 1865 #ifdef CONFIG_64BIT 1866 case 8: 1867 ival = *(u64 *)(val + (i * val_bytes)); 1868 break; 1869 #endif 1870 default: 1871 ret = -EINVAL; 1872 goto out; 1873 } 1874 1875 ret = _regmap_write(map, 1876 reg + regmap_get_offset(map, i), 1877 ival); 1878 if (ret != 0) 1879 goto out; 1880 } 1881 out: 1882 map->unlock(map->lock_arg); 1883 } else if (map->bus && !map->format.parse_inplace) { 1884 const u8 *u8 = val; 1885 const u16 *u16 = val; 1886 const u32 *u32 = val; 1887 unsigned int ival; 1888 1889 for (i = 0; i < val_count; i++) { 1890 switch (map->format.val_bytes) { 1891 case 4: 1892 ival = u32[i]; 1893 break; 1894 case 2: 1895 ival = u16[i]; 1896 break; 1897 case 1: 1898 ival = u8[i]; 1899 break; 1900 default: 1901 return -EINVAL; 1902 } 1903 1904 ret = regmap_write(map, reg + (i * map->reg_stride), 1905 ival); 1906 if (ret) 1907 return ret; 1908 } 1909 } else if (map->use_single_write || 1910 (map->max_raw_write && map->max_raw_write < total_size)) { 1911 int chunk_stride = map->reg_stride; 1912 size_t chunk_size = val_bytes; 1913 size_t chunk_count = val_count; 1914 1915 if (!map->use_single_write) { 1916 chunk_size = map->max_raw_write; 1917 if (chunk_size % val_bytes) 1918 chunk_size -= chunk_size % val_bytes; 1919 chunk_count = total_size / chunk_size; 1920 chunk_stride *= chunk_size / val_bytes; 1921 } 1922 1923 map->lock(map->lock_arg); 1924 /* Write as many bytes as possible with chunk_size */ 1925 for (i = 0; i < chunk_count; i++) { 1926 ret = _regmap_raw_write(map, 1927 reg + (i * chunk_stride), 1928 val + (i * chunk_size), 1929 chunk_size); 1930 if (ret) 1931 break; 1932 } 1933 1934 /* Write remaining bytes */ 1935 if (!ret && chunk_size * i < total_size) { 1936 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 1937 val + (i * chunk_size), 1938 total_size - i * chunk_size); 1939 } 1940 map->unlock(map->lock_arg); 1941 } else { 1942 void *wval; 1943 1944 if (!val_count) 1945 return -EINVAL; 1946 1947 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 1948 if (!wval) { 1949 dev_err(map->dev, "Error in memory allocation\n"); 1950 return -ENOMEM; 1951 } 1952 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1953 map->format.parse_inplace(wval + i); 1954 1955 map->lock(map->lock_arg); 1956 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1957 map->unlock(map->lock_arg); 1958 1959 kfree(wval); 1960 } 1961 return ret; 1962 } 1963 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1964 1965 /* 1966 * _regmap_raw_multi_reg_write() 1967 * 1968 * the (register,newvalue) pairs in regs have not been formatted, but 1969 * they are all in the same page and have been changed to being page 1970 * relative. The page register has been written if that was necessary. 1971 */ 1972 static int _regmap_raw_multi_reg_write(struct regmap *map, 1973 const struct reg_sequence *regs, 1974 size_t num_regs) 1975 { 1976 int ret; 1977 void *buf; 1978 int i; 1979 u8 *u8; 1980 size_t val_bytes = map->format.val_bytes; 1981 size_t reg_bytes = map->format.reg_bytes; 1982 size_t pad_bytes = map->format.pad_bytes; 1983 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1984 size_t len = pair_size * num_regs; 1985 1986 if (!len) 1987 return -EINVAL; 1988 1989 buf = kzalloc(len, GFP_KERNEL); 1990 if (!buf) 1991 return -ENOMEM; 1992 1993 /* We have to linearise by hand. */ 1994 1995 u8 = buf; 1996 1997 for (i = 0; i < num_regs; i++) { 1998 unsigned int reg = regs[i].reg; 1999 unsigned int val = regs[i].def; 2000 trace_regmap_hw_write_start(map, reg, 1); 2001 map->format.format_reg(u8, reg, map->reg_shift); 2002 u8 += reg_bytes + pad_bytes; 2003 map->format.format_val(u8, val, 0); 2004 u8 += val_bytes; 2005 } 2006 u8 = buf; 2007 *u8 |= map->write_flag_mask; 2008 2009 ret = map->bus->write(map->bus_context, buf, len); 2010 2011 kfree(buf); 2012 2013 for (i = 0; i < num_regs; i++) { 2014 int reg = regs[i].reg; 2015 trace_regmap_hw_write_done(map, reg, 1); 2016 } 2017 return ret; 2018 } 2019 2020 static unsigned int _regmap_register_page(struct regmap *map, 2021 unsigned int reg, 2022 struct regmap_range_node *range) 2023 { 2024 unsigned int win_page = (reg - range->range_min) / range->window_len; 2025 2026 return win_page; 2027 } 2028 2029 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 2030 struct reg_sequence *regs, 2031 size_t num_regs) 2032 { 2033 int ret; 2034 int i, n; 2035 struct reg_sequence *base; 2036 unsigned int this_page = 0; 2037 unsigned int page_change = 0; 2038 /* 2039 * the set of registers are not neccessarily in order, but 2040 * since the order of write must be preserved this algorithm 2041 * chops the set each time the page changes. This also applies 2042 * if there is a delay required at any point in the sequence. 2043 */ 2044 base = regs; 2045 for (i = 0, n = 0; i < num_regs; i++, n++) { 2046 unsigned int reg = regs[i].reg; 2047 struct regmap_range_node *range; 2048 2049 range = _regmap_range_lookup(map, reg); 2050 if (range) { 2051 unsigned int win_page = _regmap_register_page(map, reg, 2052 range); 2053 2054 if (i == 0) 2055 this_page = win_page; 2056 if (win_page != this_page) { 2057 this_page = win_page; 2058 page_change = 1; 2059 } 2060 } 2061 2062 /* If we have both a page change and a delay make sure to 2063 * write the regs and apply the delay before we change the 2064 * page. 2065 */ 2066 2067 if (page_change || regs[i].delay_us) { 2068 2069 /* For situations where the first write requires 2070 * a delay we need to make sure we don't call 2071 * raw_multi_reg_write with n=0 2072 * This can't occur with page breaks as we 2073 * never write on the first iteration 2074 */ 2075 if (regs[i].delay_us && i == 0) 2076 n = 1; 2077 2078 ret = _regmap_raw_multi_reg_write(map, base, n); 2079 if (ret != 0) 2080 return ret; 2081 2082 if (regs[i].delay_us) 2083 udelay(regs[i].delay_us); 2084 2085 base += n; 2086 n = 0; 2087 2088 if (page_change) { 2089 ret = _regmap_select_page(map, 2090 &base[n].reg, 2091 range, 1); 2092 if (ret != 0) 2093 return ret; 2094 2095 page_change = 0; 2096 } 2097 2098 } 2099 2100 } 2101 if (n > 0) 2102 return _regmap_raw_multi_reg_write(map, base, n); 2103 return 0; 2104 } 2105 2106 static int _regmap_multi_reg_write(struct regmap *map, 2107 const struct reg_sequence *regs, 2108 size_t num_regs) 2109 { 2110 int i; 2111 int ret; 2112 2113 if (!map->can_multi_write) { 2114 for (i = 0; i < num_regs; i++) { 2115 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2116 if (ret != 0) 2117 return ret; 2118 2119 if (regs[i].delay_us) 2120 udelay(regs[i].delay_us); 2121 } 2122 return 0; 2123 } 2124 2125 if (!map->format.parse_inplace) 2126 return -EINVAL; 2127 2128 if (map->writeable_reg) 2129 for (i = 0; i < num_regs; i++) { 2130 int reg = regs[i].reg; 2131 if (!map->writeable_reg(map->dev, reg)) 2132 return -EINVAL; 2133 if (!IS_ALIGNED(reg, map->reg_stride)) 2134 return -EINVAL; 2135 } 2136 2137 if (!map->cache_bypass) { 2138 for (i = 0; i < num_regs; i++) { 2139 unsigned int val = regs[i].def; 2140 unsigned int reg = regs[i].reg; 2141 ret = regcache_write(map, reg, val); 2142 if (ret) { 2143 dev_err(map->dev, 2144 "Error in caching of register: %x ret: %d\n", 2145 reg, ret); 2146 return ret; 2147 } 2148 } 2149 if (map->cache_only) { 2150 map->cache_dirty = true; 2151 return 0; 2152 } 2153 } 2154 2155 WARN_ON(!map->bus); 2156 2157 for (i = 0; i < num_regs; i++) { 2158 unsigned int reg = regs[i].reg; 2159 struct regmap_range_node *range; 2160 2161 /* Coalesce all the writes between a page break or a delay 2162 * in a sequence 2163 */ 2164 range = _regmap_range_lookup(map, reg); 2165 if (range || regs[i].delay_us) { 2166 size_t len = sizeof(struct reg_sequence)*num_regs; 2167 struct reg_sequence *base = kmemdup(regs, len, 2168 GFP_KERNEL); 2169 if (!base) 2170 return -ENOMEM; 2171 ret = _regmap_range_multi_paged_reg_write(map, base, 2172 num_regs); 2173 kfree(base); 2174 2175 return ret; 2176 } 2177 } 2178 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2179 } 2180 2181 /** 2182 * regmap_multi_reg_write() - Write multiple registers to the device 2183 * 2184 * @map: Register map to write to 2185 * @regs: Array of structures containing register,value to be written 2186 * @num_regs: Number of registers to write 2187 * 2188 * Write multiple registers to the device where the set of register, value 2189 * pairs are supplied in any order, possibly not all in a single range. 2190 * 2191 * The 'normal' block write mode will send ultimately send data on the 2192 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are 2193 * addressed. However, this alternative block multi write mode will send 2194 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2195 * must of course support the mode. 2196 * 2197 * A value of zero will be returned on success, a negative errno will be 2198 * returned in error cases. 2199 */ 2200 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2201 int num_regs) 2202 { 2203 int ret; 2204 2205 map->lock(map->lock_arg); 2206 2207 ret = _regmap_multi_reg_write(map, regs, num_regs); 2208 2209 map->unlock(map->lock_arg); 2210 2211 return ret; 2212 } 2213 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2214 2215 /** 2216 * regmap_multi_reg_write_bypassed() - Write multiple registers to the 2217 * device but not the cache 2218 * 2219 * @map: Register map to write to 2220 * @regs: Array of structures containing register,value to be written 2221 * @num_regs: Number of registers to write 2222 * 2223 * Write multiple registers to the device but not the cache where the set 2224 * of register are supplied in any order. 2225 * 2226 * This function is intended to be used for writing a large block of data 2227 * atomically to the device in single transfer for those I2C client devices 2228 * that implement this alternative block write mode. 2229 * 2230 * A value of zero will be returned on success, a negative errno will 2231 * be returned in error cases. 2232 */ 2233 int regmap_multi_reg_write_bypassed(struct regmap *map, 2234 const struct reg_sequence *regs, 2235 int num_regs) 2236 { 2237 int ret; 2238 bool bypass; 2239 2240 map->lock(map->lock_arg); 2241 2242 bypass = map->cache_bypass; 2243 map->cache_bypass = true; 2244 2245 ret = _regmap_multi_reg_write(map, regs, num_regs); 2246 2247 map->cache_bypass = bypass; 2248 2249 map->unlock(map->lock_arg); 2250 2251 return ret; 2252 } 2253 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2254 2255 /** 2256 * regmap_raw_write_async() - Write raw values to one or more registers 2257 * asynchronously 2258 * 2259 * @map: Register map to write to 2260 * @reg: Initial register to write to 2261 * @val: Block of data to be written, laid out for direct transmission to the 2262 * device. Must be valid until regmap_async_complete() is called. 2263 * @val_len: Length of data pointed to by val. 2264 * 2265 * This function is intended to be used for things like firmware 2266 * download where a large block of data needs to be transferred to the 2267 * device. No formatting will be done on the data provided. 2268 * 2269 * If supported by the underlying bus the write will be scheduled 2270 * asynchronously, helping maximise I/O speed on higher speed buses 2271 * like SPI. regmap_async_complete() can be called to ensure that all 2272 * asynchrnous writes have been completed. 2273 * 2274 * A value of zero will be returned on success, a negative errno will 2275 * be returned in error cases. 2276 */ 2277 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2278 const void *val, size_t val_len) 2279 { 2280 int ret; 2281 2282 if (val_len % map->format.val_bytes) 2283 return -EINVAL; 2284 if (!IS_ALIGNED(reg, map->reg_stride)) 2285 return -EINVAL; 2286 2287 map->lock(map->lock_arg); 2288 2289 map->async = true; 2290 2291 ret = _regmap_raw_write(map, reg, val, val_len); 2292 2293 map->async = false; 2294 2295 map->unlock(map->lock_arg); 2296 2297 return ret; 2298 } 2299 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2300 2301 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2302 unsigned int val_len) 2303 { 2304 struct regmap_range_node *range; 2305 int ret; 2306 2307 WARN_ON(!map->bus); 2308 2309 if (!map->bus || !map->bus->read) 2310 return -EINVAL; 2311 2312 range = _regmap_range_lookup(map, reg); 2313 if (range) { 2314 ret = _regmap_select_page(map, ®, range, 2315 val_len / map->format.val_bytes); 2316 if (ret != 0) 2317 return ret; 2318 } 2319 2320 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2321 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, 2322 map->read_flag_mask); 2323 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2324 2325 ret = map->bus->read(map->bus_context, map->work_buf, 2326 map->format.reg_bytes + map->format.pad_bytes, 2327 val, val_len); 2328 2329 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2330 2331 return ret; 2332 } 2333 2334 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2335 unsigned int *val) 2336 { 2337 struct regmap *map = context; 2338 2339 return map->bus->reg_read(map->bus_context, reg, val); 2340 } 2341 2342 static int _regmap_bus_read(void *context, unsigned int reg, 2343 unsigned int *val) 2344 { 2345 int ret; 2346 struct regmap *map = context; 2347 2348 if (!map->format.parse_val) 2349 return -EINVAL; 2350 2351 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2352 if (ret == 0) 2353 *val = map->format.parse_val(map->work_buf); 2354 2355 return ret; 2356 } 2357 2358 static int _regmap_read(struct regmap *map, unsigned int reg, 2359 unsigned int *val) 2360 { 2361 int ret; 2362 void *context = _regmap_map_get_context(map); 2363 2364 if (!map->cache_bypass) { 2365 ret = regcache_read(map, reg, val); 2366 if (ret == 0) 2367 return 0; 2368 } 2369 2370 if (map->cache_only) 2371 return -EBUSY; 2372 2373 if (!regmap_readable(map, reg)) 2374 return -EIO; 2375 2376 ret = map->reg_read(context, reg, val); 2377 if (ret == 0) { 2378 #ifdef LOG_DEVICE 2379 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2380 dev_info(map->dev, "%x => %x\n", reg, *val); 2381 #endif 2382 2383 trace_regmap_reg_read(map, reg, *val); 2384 2385 if (!map->cache_bypass) 2386 regcache_write(map, reg, *val); 2387 } 2388 2389 return ret; 2390 } 2391 2392 /** 2393 * regmap_read() - Read a value from a single register 2394 * 2395 * @map: Register map to read from 2396 * @reg: Register to be read from 2397 * @val: Pointer to store read value 2398 * 2399 * A value of zero will be returned on success, a negative errno will 2400 * be returned in error cases. 2401 */ 2402 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2403 { 2404 int ret; 2405 2406 if (!IS_ALIGNED(reg, map->reg_stride)) 2407 return -EINVAL; 2408 2409 map->lock(map->lock_arg); 2410 2411 ret = _regmap_read(map, reg, val); 2412 2413 map->unlock(map->lock_arg); 2414 2415 return ret; 2416 } 2417 EXPORT_SYMBOL_GPL(regmap_read); 2418 2419 /** 2420 * regmap_raw_read() - Read raw data from the device 2421 * 2422 * @map: Register map to read from 2423 * @reg: First register to be read from 2424 * @val: Pointer to store read value 2425 * @val_len: Size of data to read 2426 * 2427 * A value of zero will be returned on success, a negative errno will 2428 * be returned in error cases. 2429 */ 2430 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2431 size_t val_len) 2432 { 2433 size_t val_bytes = map->format.val_bytes; 2434 size_t val_count = val_len / val_bytes; 2435 unsigned int v; 2436 int ret, i; 2437 2438 if (!map->bus) 2439 return -EINVAL; 2440 if (val_len % map->format.val_bytes) 2441 return -EINVAL; 2442 if (!IS_ALIGNED(reg, map->reg_stride)) 2443 return -EINVAL; 2444 if (val_count == 0) 2445 return -EINVAL; 2446 2447 map->lock(map->lock_arg); 2448 2449 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2450 map->cache_type == REGCACHE_NONE) { 2451 if (!map->bus->read) { 2452 ret = -ENOTSUPP; 2453 goto out; 2454 } 2455 if (map->max_raw_read && map->max_raw_read < val_len) { 2456 ret = -E2BIG; 2457 goto out; 2458 } 2459 2460 /* Physical block read if there's no cache involved */ 2461 ret = _regmap_raw_read(map, reg, val, val_len); 2462 2463 } else { 2464 /* Otherwise go word by word for the cache; should be low 2465 * cost as we expect to hit the cache. 2466 */ 2467 for (i = 0; i < val_count; i++) { 2468 ret = _regmap_read(map, reg + regmap_get_offset(map, i), 2469 &v); 2470 if (ret != 0) 2471 goto out; 2472 2473 map->format.format_val(val + (i * val_bytes), v, 0); 2474 } 2475 } 2476 2477 out: 2478 map->unlock(map->lock_arg); 2479 2480 return ret; 2481 } 2482 EXPORT_SYMBOL_GPL(regmap_raw_read); 2483 2484 /** 2485 * regmap_field_read() - Read a value to a single register field 2486 * 2487 * @field: Register field to read from 2488 * @val: Pointer to store read value 2489 * 2490 * A value of zero will be returned on success, a negative errno will 2491 * be returned in error cases. 2492 */ 2493 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2494 { 2495 int ret; 2496 unsigned int reg_val; 2497 ret = regmap_read(field->regmap, field->reg, ®_val); 2498 if (ret != 0) 2499 return ret; 2500 2501 reg_val &= field->mask; 2502 reg_val >>= field->shift; 2503 *val = reg_val; 2504 2505 return ret; 2506 } 2507 EXPORT_SYMBOL_GPL(regmap_field_read); 2508 2509 /** 2510 * regmap_fields_read() - Read a value to a single register field with port ID 2511 * 2512 * @field: Register field to read from 2513 * @id: port ID 2514 * @val: Pointer to store read value 2515 * 2516 * A value of zero will be returned on success, a negative errno will 2517 * be returned in error cases. 2518 */ 2519 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2520 unsigned int *val) 2521 { 2522 int ret; 2523 unsigned int reg_val; 2524 2525 if (id >= field->id_size) 2526 return -EINVAL; 2527 2528 ret = regmap_read(field->regmap, 2529 field->reg + (field->id_offset * id), 2530 ®_val); 2531 if (ret != 0) 2532 return ret; 2533 2534 reg_val &= field->mask; 2535 reg_val >>= field->shift; 2536 *val = reg_val; 2537 2538 return ret; 2539 } 2540 EXPORT_SYMBOL_GPL(regmap_fields_read); 2541 2542 /** 2543 * regmap_bulk_read() - Read multiple registers from the device 2544 * 2545 * @map: Register map to read from 2546 * @reg: First register to be read from 2547 * @val: Pointer to store read value, in native register size for device 2548 * @val_count: Number of registers to read 2549 * 2550 * A value of zero will be returned on success, a negative errno will 2551 * be returned in error cases. 2552 */ 2553 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2554 size_t val_count) 2555 { 2556 int ret, i; 2557 size_t val_bytes = map->format.val_bytes; 2558 bool vol = regmap_volatile_range(map, reg, val_count); 2559 2560 if (!IS_ALIGNED(reg, map->reg_stride)) 2561 return -EINVAL; 2562 2563 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2564 /* 2565 * Some devices does not support bulk read, for 2566 * them we have a series of single read operations. 2567 */ 2568 size_t total_size = val_bytes * val_count; 2569 2570 if (!map->use_single_read && 2571 (!map->max_raw_read || map->max_raw_read > total_size)) { 2572 ret = regmap_raw_read(map, reg, val, 2573 val_bytes * val_count); 2574 if (ret != 0) 2575 return ret; 2576 } else { 2577 /* 2578 * Some devices do not support bulk read or do not 2579 * support large bulk reads, for them we have a series 2580 * of read operations. 2581 */ 2582 int chunk_stride = map->reg_stride; 2583 size_t chunk_size = val_bytes; 2584 size_t chunk_count = val_count; 2585 2586 if (!map->use_single_read) { 2587 chunk_size = map->max_raw_read; 2588 if (chunk_size % val_bytes) 2589 chunk_size -= chunk_size % val_bytes; 2590 chunk_count = total_size / chunk_size; 2591 chunk_stride *= chunk_size / val_bytes; 2592 } 2593 2594 /* Read bytes that fit into a multiple of chunk_size */ 2595 for (i = 0; i < chunk_count; i++) { 2596 ret = regmap_raw_read(map, 2597 reg + (i * chunk_stride), 2598 val + (i * chunk_size), 2599 chunk_size); 2600 if (ret != 0) 2601 return ret; 2602 } 2603 2604 /* Read remaining bytes */ 2605 if (chunk_size * i < total_size) { 2606 ret = regmap_raw_read(map, 2607 reg + (i * chunk_stride), 2608 val + (i * chunk_size), 2609 total_size - i * chunk_size); 2610 if (ret != 0) 2611 return ret; 2612 } 2613 } 2614 2615 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2616 map->format.parse_inplace(val + i); 2617 } else { 2618 for (i = 0; i < val_count; i++) { 2619 unsigned int ival; 2620 ret = regmap_read(map, reg + regmap_get_offset(map, i), 2621 &ival); 2622 if (ret != 0) 2623 return ret; 2624 2625 if (map->format.format_val) { 2626 map->format.format_val(val + (i * val_bytes), ival, 0); 2627 } else { 2628 /* Devices providing read and write 2629 * operations can use the bulk I/O 2630 * functions if they define a val_bytes, 2631 * we assume that the values are native 2632 * endian. 2633 */ 2634 #ifdef CONFIG_64BIT 2635 u64 *u64 = val; 2636 #endif 2637 u32 *u32 = val; 2638 u16 *u16 = val; 2639 u8 *u8 = val; 2640 2641 switch (map->format.val_bytes) { 2642 #ifdef CONFIG_64BIT 2643 case 8: 2644 u64[i] = ival; 2645 break; 2646 #endif 2647 case 4: 2648 u32[i] = ival; 2649 break; 2650 case 2: 2651 u16[i] = ival; 2652 break; 2653 case 1: 2654 u8[i] = ival; 2655 break; 2656 default: 2657 return -EINVAL; 2658 } 2659 } 2660 } 2661 } 2662 2663 return 0; 2664 } 2665 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2666 2667 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2668 unsigned int mask, unsigned int val, 2669 bool *change, bool force_write) 2670 { 2671 int ret; 2672 unsigned int tmp, orig; 2673 2674 if (change) 2675 *change = false; 2676 2677 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2678 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2679 if (ret == 0 && change) 2680 *change = true; 2681 } else { 2682 ret = _regmap_read(map, reg, &orig); 2683 if (ret != 0) 2684 return ret; 2685 2686 tmp = orig & ~mask; 2687 tmp |= val & mask; 2688 2689 if (force_write || (tmp != orig)) { 2690 ret = _regmap_write(map, reg, tmp); 2691 if (ret == 0 && change) 2692 *change = true; 2693 } 2694 } 2695 2696 return ret; 2697 } 2698 2699 /** 2700 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register 2701 * 2702 * @map: Register map to update 2703 * @reg: Register to update 2704 * @mask: Bitmask to change 2705 * @val: New value for bitmask 2706 * @change: Boolean indicating if a write was done 2707 * @async: Boolean indicating asynchronously 2708 * @force: Boolean indicating use force update 2709 * 2710 * Perform a read/modify/write cycle on a register map with change, async, force 2711 * options. 2712 * 2713 * If async is true: 2714 * 2715 * With most buses the read must be done synchronously so this is most useful 2716 * for devices with a cache which do not need to interact with the hardware to 2717 * determine the current register value. 2718 * 2719 * Returns zero for success, a negative number on error. 2720 */ 2721 int regmap_update_bits_base(struct regmap *map, unsigned int reg, 2722 unsigned int mask, unsigned int val, 2723 bool *change, bool async, bool force) 2724 { 2725 int ret; 2726 2727 map->lock(map->lock_arg); 2728 2729 map->async = async; 2730 2731 ret = _regmap_update_bits(map, reg, mask, val, change, force); 2732 2733 map->async = false; 2734 2735 map->unlock(map->lock_arg); 2736 2737 return ret; 2738 } 2739 EXPORT_SYMBOL_GPL(regmap_update_bits_base); 2740 2741 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2742 { 2743 struct regmap *map = async->map; 2744 bool wake; 2745 2746 trace_regmap_async_io_complete(map); 2747 2748 spin_lock(&map->async_lock); 2749 list_move(&async->list, &map->async_free); 2750 wake = list_empty(&map->async_list); 2751 2752 if (ret != 0) 2753 map->async_ret = ret; 2754 2755 spin_unlock(&map->async_lock); 2756 2757 if (wake) 2758 wake_up(&map->async_waitq); 2759 } 2760 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2761 2762 static int regmap_async_is_done(struct regmap *map) 2763 { 2764 unsigned long flags; 2765 int ret; 2766 2767 spin_lock_irqsave(&map->async_lock, flags); 2768 ret = list_empty(&map->async_list); 2769 spin_unlock_irqrestore(&map->async_lock, flags); 2770 2771 return ret; 2772 } 2773 2774 /** 2775 * regmap_async_complete - Ensure all asynchronous I/O has completed. 2776 * 2777 * @map: Map to operate on. 2778 * 2779 * Blocks until any pending asynchronous I/O has completed. Returns 2780 * an error code for any failed I/O operations. 2781 */ 2782 int regmap_async_complete(struct regmap *map) 2783 { 2784 unsigned long flags; 2785 int ret; 2786 2787 /* Nothing to do with no async support */ 2788 if (!map->bus || !map->bus->async_write) 2789 return 0; 2790 2791 trace_regmap_async_complete_start(map); 2792 2793 wait_event(map->async_waitq, regmap_async_is_done(map)); 2794 2795 spin_lock_irqsave(&map->async_lock, flags); 2796 ret = map->async_ret; 2797 map->async_ret = 0; 2798 spin_unlock_irqrestore(&map->async_lock, flags); 2799 2800 trace_regmap_async_complete_done(map); 2801 2802 return ret; 2803 } 2804 EXPORT_SYMBOL_GPL(regmap_async_complete); 2805 2806 /** 2807 * regmap_register_patch - Register and apply register updates to be applied 2808 * on device initialistion 2809 * 2810 * @map: Register map to apply updates to. 2811 * @regs: Values to update. 2812 * @num_regs: Number of entries in regs. 2813 * 2814 * Register a set of register updates to be applied to the device 2815 * whenever the device registers are synchronised with the cache and 2816 * apply them immediately. Typically this is used to apply 2817 * corrections to be applied to the device defaults on startup, such 2818 * as the updates some vendors provide to undocumented registers. 2819 * 2820 * The caller must ensure that this function cannot be called 2821 * concurrently with either itself or regcache_sync(). 2822 */ 2823 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2824 int num_regs) 2825 { 2826 struct reg_sequence *p; 2827 int ret; 2828 bool bypass; 2829 2830 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2831 num_regs)) 2832 return 0; 2833 2834 p = krealloc(map->patch, 2835 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2836 GFP_KERNEL); 2837 if (p) { 2838 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2839 map->patch = p; 2840 map->patch_regs += num_regs; 2841 } else { 2842 return -ENOMEM; 2843 } 2844 2845 map->lock(map->lock_arg); 2846 2847 bypass = map->cache_bypass; 2848 2849 map->cache_bypass = true; 2850 map->async = true; 2851 2852 ret = _regmap_multi_reg_write(map, regs, num_regs); 2853 2854 map->async = false; 2855 map->cache_bypass = bypass; 2856 2857 map->unlock(map->lock_arg); 2858 2859 regmap_async_complete(map); 2860 2861 return ret; 2862 } 2863 EXPORT_SYMBOL_GPL(regmap_register_patch); 2864 2865 /** 2866 * regmap_get_val_bytes() - Report the size of a register value 2867 * 2868 * @map: Register map to operate on. 2869 * 2870 * Report the size of a register value, mainly intended to for use by 2871 * generic infrastructure built on top of regmap. 2872 */ 2873 int regmap_get_val_bytes(struct regmap *map) 2874 { 2875 if (map->format.format_write) 2876 return -EINVAL; 2877 2878 return map->format.val_bytes; 2879 } 2880 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2881 2882 /** 2883 * regmap_get_max_register() - Report the max register value 2884 * 2885 * @map: Register map to operate on. 2886 * 2887 * Report the max register value, mainly intended to for use by 2888 * generic infrastructure built on top of regmap. 2889 */ 2890 int regmap_get_max_register(struct regmap *map) 2891 { 2892 return map->max_register ? map->max_register : -EINVAL; 2893 } 2894 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2895 2896 /** 2897 * regmap_get_reg_stride() - Report the register address stride 2898 * 2899 * @map: Register map to operate on. 2900 * 2901 * Report the register address stride, mainly intended to for use by 2902 * generic infrastructure built on top of regmap. 2903 */ 2904 int regmap_get_reg_stride(struct regmap *map) 2905 { 2906 return map->reg_stride; 2907 } 2908 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2909 2910 int regmap_parse_val(struct regmap *map, const void *buf, 2911 unsigned int *val) 2912 { 2913 if (!map->format.parse_val) 2914 return -EINVAL; 2915 2916 *val = map->format.parse_val(buf); 2917 2918 return 0; 2919 } 2920 EXPORT_SYMBOL_GPL(regmap_parse_val); 2921 2922 static int __init regmap_initcall(void) 2923 { 2924 regmap_debugfs_initcall(); 2925 2926 return 0; 2927 } 2928 postcore_initcall(regmap_initcall); 2929