1 /* 2 * Register map access API 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/device.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/mutex.h> 17 #include <linux/err.h> 18 #include <linux/of.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/delay.h> 22 23 #define CREATE_TRACE_POINTS 24 #include "trace.h" 25 26 #include "internal.h" 27 28 /* 29 * Sometimes for failures during very early init the trace 30 * infrastructure isn't available early enough to be used. For this 31 * sort of problem defining LOG_DEVICE will add printks for basic 32 * register I/O on a specific device. 33 */ 34 #undef LOG_DEVICE 35 36 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 37 unsigned int mask, unsigned int val, 38 bool *change, bool force_write); 39 40 static int _regmap_bus_reg_read(void *context, unsigned int reg, 41 unsigned int *val); 42 static int _regmap_bus_read(void *context, unsigned int reg, 43 unsigned int *val); 44 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 45 unsigned int val); 46 static int _regmap_bus_reg_write(void *context, unsigned int reg, 47 unsigned int val); 48 static int _regmap_bus_raw_write(void *context, unsigned int reg, 49 unsigned int val); 50 51 bool regmap_reg_in_ranges(unsigned int reg, 52 const struct regmap_range *ranges, 53 unsigned int nranges) 54 { 55 const struct regmap_range *r; 56 int i; 57 58 for (i = 0, r = ranges; i < nranges; i++, r++) 59 if (regmap_reg_in_range(reg, r)) 60 return true; 61 return false; 62 } 63 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); 64 65 bool regmap_check_range_table(struct regmap *map, unsigned int reg, 66 const struct regmap_access_table *table) 67 { 68 /* Check "no ranges" first */ 69 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) 70 return false; 71 72 /* In case zero "yes ranges" are supplied, any reg is OK */ 73 if (!table->n_yes_ranges) 74 return true; 75 76 return regmap_reg_in_ranges(reg, table->yes_ranges, 77 table->n_yes_ranges); 78 } 79 EXPORT_SYMBOL_GPL(regmap_check_range_table); 80 81 bool regmap_writeable(struct regmap *map, unsigned int reg) 82 { 83 if (map->max_register && reg > map->max_register) 84 return false; 85 86 if (map->writeable_reg) 87 return map->writeable_reg(map->dev, reg); 88 89 if (map->wr_table) 90 return regmap_check_range_table(map, reg, map->wr_table); 91 92 return true; 93 } 94 95 bool regmap_readable(struct regmap *map, unsigned int reg) 96 { 97 if (!map->reg_read) 98 return false; 99 100 if (map->max_register && reg > map->max_register) 101 return false; 102 103 if (map->format.format_write) 104 return false; 105 106 if (map->readable_reg) 107 return map->readable_reg(map->dev, reg); 108 109 if (map->rd_table) 110 return regmap_check_range_table(map, reg, map->rd_table); 111 112 return true; 113 } 114 115 bool regmap_volatile(struct regmap *map, unsigned int reg) 116 { 117 if (!map->format.format_write && !regmap_readable(map, reg)) 118 return false; 119 120 if (map->volatile_reg) 121 return map->volatile_reg(map->dev, reg); 122 123 if (map->volatile_table) 124 return regmap_check_range_table(map, reg, map->volatile_table); 125 126 if (map->cache_ops) 127 return false; 128 else 129 return true; 130 } 131 132 bool regmap_precious(struct regmap *map, unsigned int reg) 133 { 134 if (!regmap_readable(map, reg)) 135 return false; 136 137 if (map->precious_reg) 138 return map->precious_reg(map->dev, reg); 139 140 if (map->precious_table) 141 return regmap_check_range_table(map, reg, map->precious_table); 142 143 return false; 144 } 145 146 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 147 size_t num) 148 { 149 unsigned int i; 150 151 for (i = 0; i < num; i++) 152 if (!regmap_volatile(map, reg + i)) 153 return false; 154 155 return true; 156 } 157 158 static void regmap_format_2_6_write(struct regmap *map, 159 unsigned int reg, unsigned int val) 160 { 161 u8 *out = map->work_buf; 162 163 *out = (reg << 6) | val; 164 } 165 166 static void regmap_format_4_12_write(struct regmap *map, 167 unsigned int reg, unsigned int val) 168 { 169 __be16 *out = map->work_buf; 170 *out = cpu_to_be16((reg << 12) | val); 171 } 172 173 static void regmap_format_7_9_write(struct regmap *map, 174 unsigned int reg, unsigned int val) 175 { 176 __be16 *out = map->work_buf; 177 *out = cpu_to_be16((reg << 9) | val); 178 } 179 180 static void regmap_format_10_14_write(struct regmap *map, 181 unsigned int reg, unsigned int val) 182 { 183 u8 *out = map->work_buf; 184 185 out[2] = val; 186 out[1] = (val >> 8) | (reg << 6); 187 out[0] = reg >> 2; 188 } 189 190 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 191 { 192 u8 *b = buf; 193 194 b[0] = val << shift; 195 } 196 197 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) 198 { 199 __be16 *b = buf; 200 201 b[0] = cpu_to_be16(val << shift); 202 } 203 204 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) 205 { 206 __le16 *b = buf; 207 208 b[0] = cpu_to_le16(val << shift); 209 } 210 211 static void regmap_format_16_native(void *buf, unsigned int val, 212 unsigned int shift) 213 { 214 *(u16 *)buf = val << shift; 215 } 216 217 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 218 { 219 u8 *b = buf; 220 221 val <<= shift; 222 223 b[0] = val >> 16; 224 b[1] = val >> 8; 225 b[2] = val; 226 } 227 228 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) 229 { 230 __be32 *b = buf; 231 232 b[0] = cpu_to_be32(val << shift); 233 } 234 235 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) 236 { 237 __le32 *b = buf; 238 239 b[0] = cpu_to_le32(val << shift); 240 } 241 242 static void regmap_format_32_native(void *buf, unsigned int val, 243 unsigned int shift) 244 { 245 *(u32 *)buf = val << shift; 246 } 247 248 #ifdef CONFIG_64BIT 249 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) 250 { 251 __be64 *b = buf; 252 253 b[0] = cpu_to_be64((u64)val << shift); 254 } 255 256 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) 257 { 258 __le64 *b = buf; 259 260 b[0] = cpu_to_le64((u64)val << shift); 261 } 262 263 static void regmap_format_64_native(void *buf, unsigned int val, 264 unsigned int shift) 265 { 266 *(u64 *)buf = (u64)val << shift; 267 } 268 #endif 269 270 static void regmap_parse_inplace_noop(void *buf) 271 { 272 } 273 274 static unsigned int regmap_parse_8(const void *buf) 275 { 276 const u8 *b = buf; 277 278 return b[0]; 279 } 280 281 static unsigned int regmap_parse_16_be(const void *buf) 282 { 283 const __be16 *b = buf; 284 285 return be16_to_cpu(b[0]); 286 } 287 288 static unsigned int regmap_parse_16_le(const void *buf) 289 { 290 const __le16 *b = buf; 291 292 return le16_to_cpu(b[0]); 293 } 294 295 static void regmap_parse_16_be_inplace(void *buf) 296 { 297 __be16 *b = buf; 298 299 b[0] = be16_to_cpu(b[0]); 300 } 301 302 static void regmap_parse_16_le_inplace(void *buf) 303 { 304 __le16 *b = buf; 305 306 b[0] = le16_to_cpu(b[0]); 307 } 308 309 static unsigned int regmap_parse_16_native(const void *buf) 310 { 311 return *(u16 *)buf; 312 } 313 314 static unsigned int regmap_parse_24(const void *buf) 315 { 316 const u8 *b = buf; 317 unsigned int ret = b[2]; 318 ret |= ((unsigned int)b[1]) << 8; 319 ret |= ((unsigned int)b[0]) << 16; 320 321 return ret; 322 } 323 324 static unsigned int regmap_parse_32_be(const void *buf) 325 { 326 const __be32 *b = buf; 327 328 return be32_to_cpu(b[0]); 329 } 330 331 static unsigned int regmap_parse_32_le(const void *buf) 332 { 333 const __le32 *b = buf; 334 335 return le32_to_cpu(b[0]); 336 } 337 338 static void regmap_parse_32_be_inplace(void *buf) 339 { 340 __be32 *b = buf; 341 342 b[0] = be32_to_cpu(b[0]); 343 } 344 345 static void regmap_parse_32_le_inplace(void *buf) 346 { 347 __le32 *b = buf; 348 349 b[0] = le32_to_cpu(b[0]); 350 } 351 352 static unsigned int regmap_parse_32_native(const void *buf) 353 { 354 return *(u32 *)buf; 355 } 356 357 #ifdef CONFIG_64BIT 358 static unsigned int regmap_parse_64_be(const void *buf) 359 { 360 const __be64 *b = buf; 361 362 return be64_to_cpu(b[0]); 363 } 364 365 static unsigned int regmap_parse_64_le(const void *buf) 366 { 367 const __le64 *b = buf; 368 369 return le64_to_cpu(b[0]); 370 } 371 372 static void regmap_parse_64_be_inplace(void *buf) 373 { 374 __be64 *b = buf; 375 376 b[0] = be64_to_cpu(b[0]); 377 } 378 379 static void regmap_parse_64_le_inplace(void *buf) 380 { 381 __le64 *b = buf; 382 383 b[0] = le64_to_cpu(b[0]); 384 } 385 386 static unsigned int regmap_parse_64_native(const void *buf) 387 { 388 return *(u64 *)buf; 389 } 390 #endif 391 392 static void regmap_lock_mutex(void *__map) 393 { 394 struct regmap *map = __map; 395 mutex_lock(&map->mutex); 396 } 397 398 static void regmap_unlock_mutex(void *__map) 399 { 400 struct regmap *map = __map; 401 mutex_unlock(&map->mutex); 402 } 403 404 static void regmap_lock_spinlock(void *__map) 405 __acquires(&map->spinlock) 406 { 407 struct regmap *map = __map; 408 unsigned long flags; 409 410 spin_lock_irqsave(&map->spinlock, flags); 411 map->spinlock_flags = flags; 412 } 413 414 static void regmap_unlock_spinlock(void *__map) 415 __releases(&map->spinlock) 416 { 417 struct regmap *map = __map; 418 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); 419 } 420 421 static void dev_get_regmap_release(struct device *dev, void *res) 422 { 423 /* 424 * We don't actually have anything to do here; the goal here 425 * is not to manage the regmap but to provide a simple way to 426 * get the regmap back given a struct device. 427 */ 428 } 429 430 static bool _regmap_range_add(struct regmap *map, 431 struct regmap_range_node *data) 432 { 433 struct rb_root *root = &map->range_tree; 434 struct rb_node **new = &(root->rb_node), *parent = NULL; 435 436 while (*new) { 437 struct regmap_range_node *this = 438 container_of(*new, struct regmap_range_node, node); 439 440 parent = *new; 441 if (data->range_max < this->range_min) 442 new = &((*new)->rb_left); 443 else if (data->range_min > this->range_max) 444 new = &((*new)->rb_right); 445 else 446 return false; 447 } 448 449 rb_link_node(&data->node, parent, new); 450 rb_insert_color(&data->node, root); 451 452 return true; 453 } 454 455 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, 456 unsigned int reg) 457 { 458 struct rb_node *node = map->range_tree.rb_node; 459 460 while (node) { 461 struct regmap_range_node *this = 462 container_of(node, struct regmap_range_node, node); 463 464 if (reg < this->range_min) 465 node = node->rb_left; 466 else if (reg > this->range_max) 467 node = node->rb_right; 468 else 469 return this; 470 } 471 472 return NULL; 473 } 474 475 static void regmap_range_exit(struct regmap *map) 476 { 477 struct rb_node *next; 478 struct regmap_range_node *range_node; 479 480 next = rb_first(&map->range_tree); 481 while (next) { 482 range_node = rb_entry(next, struct regmap_range_node, node); 483 next = rb_next(&range_node->node); 484 rb_erase(&range_node->node, &map->range_tree); 485 kfree(range_node); 486 } 487 488 kfree(map->selector_work_buf); 489 } 490 491 int regmap_attach_dev(struct device *dev, struct regmap *map, 492 const struct regmap_config *config) 493 { 494 struct regmap **m; 495 496 map->dev = dev; 497 498 regmap_debugfs_init(map, config->name); 499 500 /* Add a devres resource for dev_get_regmap() */ 501 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 502 if (!m) { 503 regmap_debugfs_exit(map); 504 return -ENOMEM; 505 } 506 *m = map; 507 devres_add(dev, m); 508 509 return 0; 510 } 511 EXPORT_SYMBOL_GPL(regmap_attach_dev); 512 513 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, 514 const struct regmap_config *config) 515 { 516 enum regmap_endian endian; 517 518 /* Retrieve the endianness specification from the regmap config */ 519 endian = config->reg_format_endian; 520 521 /* If the regmap config specified a non-default value, use that */ 522 if (endian != REGMAP_ENDIAN_DEFAULT) 523 return endian; 524 525 /* Retrieve the endianness specification from the bus config */ 526 if (bus && bus->reg_format_endian_default) 527 endian = bus->reg_format_endian_default; 528 529 /* If the bus specified a non-default value, use that */ 530 if (endian != REGMAP_ENDIAN_DEFAULT) 531 return endian; 532 533 /* Use this if no other value was found */ 534 return REGMAP_ENDIAN_BIG; 535 } 536 537 enum regmap_endian regmap_get_val_endian(struct device *dev, 538 const struct regmap_bus *bus, 539 const struct regmap_config *config) 540 { 541 struct device_node *np; 542 enum regmap_endian endian; 543 544 /* Retrieve the endianness specification from the regmap config */ 545 endian = config->val_format_endian; 546 547 /* If the regmap config specified a non-default value, use that */ 548 if (endian != REGMAP_ENDIAN_DEFAULT) 549 return endian; 550 551 /* If the dev and dev->of_node exist try to get endianness from DT */ 552 if (dev && dev->of_node) { 553 np = dev->of_node; 554 555 /* Parse the device's DT node for an endianness specification */ 556 if (of_property_read_bool(np, "big-endian")) 557 endian = REGMAP_ENDIAN_BIG; 558 else if (of_property_read_bool(np, "little-endian")) 559 endian = REGMAP_ENDIAN_LITTLE; 560 561 /* If the endianness was specified in DT, use that */ 562 if (endian != REGMAP_ENDIAN_DEFAULT) 563 return endian; 564 } 565 566 /* Retrieve the endianness specification from the bus config */ 567 if (bus && bus->val_format_endian_default) 568 endian = bus->val_format_endian_default; 569 570 /* If the bus specified a non-default value, use that */ 571 if (endian != REGMAP_ENDIAN_DEFAULT) 572 return endian; 573 574 /* Use this if no other value was found */ 575 return REGMAP_ENDIAN_BIG; 576 } 577 EXPORT_SYMBOL_GPL(regmap_get_val_endian); 578 579 struct regmap *__regmap_init(struct device *dev, 580 const struct regmap_bus *bus, 581 void *bus_context, 582 const struct regmap_config *config, 583 struct lock_class_key *lock_key, 584 const char *lock_name) 585 { 586 struct regmap *map; 587 int ret = -EINVAL; 588 enum regmap_endian reg_endian, val_endian; 589 int i, j; 590 591 if (!config) 592 goto err; 593 594 map = kzalloc(sizeof(*map), GFP_KERNEL); 595 if (map == NULL) { 596 ret = -ENOMEM; 597 goto err; 598 } 599 600 if (config->lock && config->unlock) { 601 map->lock = config->lock; 602 map->unlock = config->unlock; 603 map->lock_arg = config->lock_arg; 604 } else { 605 if ((bus && bus->fast_io) || 606 config->fast_io) { 607 spin_lock_init(&map->spinlock); 608 map->lock = regmap_lock_spinlock; 609 map->unlock = regmap_unlock_spinlock; 610 lockdep_set_class_and_name(&map->spinlock, 611 lock_key, lock_name); 612 } else { 613 mutex_init(&map->mutex); 614 map->lock = regmap_lock_mutex; 615 map->unlock = regmap_unlock_mutex; 616 lockdep_set_class_and_name(&map->mutex, 617 lock_key, lock_name); 618 } 619 map->lock_arg = map; 620 } 621 622 /* 623 * When we write in fast-paths with regmap_bulk_write() don't allocate 624 * scratch buffers with sleeping allocations. 625 */ 626 if ((bus && bus->fast_io) || config->fast_io) 627 map->alloc_flags = GFP_ATOMIC; 628 else 629 map->alloc_flags = GFP_KERNEL; 630 631 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 632 map->format.pad_bytes = config->pad_bits / 8; 633 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 634 map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 635 config->val_bits + config->pad_bits, 8); 636 map->reg_shift = config->pad_bits % 8; 637 if (config->reg_stride) 638 map->reg_stride = config->reg_stride; 639 else 640 map->reg_stride = 1; 641 map->use_single_read = config->use_single_rw || !bus || !bus->read; 642 map->use_single_write = config->use_single_rw || !bus || !bus->write; 643 map->can_multi_write = config->can_multi_write && bus && bus->write; 644 if (bus) { 645 map->max_raw_read = bus->max_raw_read; 646 map->max_raw_write = bus->max_raw_write; 647 } 648 map->dev = dev; 649 map->bus = bus; 650 map->bus_context = bus_context; 651 map->max_register = config->max_register; 652 map->wr_table = config->wr_table; 653 map->rd_table = config->rd_table; 654 map->volatile_table = config->volatile_table; 655 map->precious_table = config->precious_table; 656 map->writeable_reg = config->writeable_reg; 657 map->readable_reg = config->readable_reg; 658 map->volatile_reg = config->volatile_reg; 659 map->precious_reg = config->precious_reg; 660 map->cache_type = config->cache_type; 661 map->name = config->name; 662 663 spin_lock_init(&map->async_lock); 664 INIT_LIST_HEAD(&map->async_list); 665 INIT_LIST_HEAD(&map->async_free); 666 init_waitqueue_head(&map->async_waitq); 667 668 if (config->read_flag_mask || config->write_flag_mask) { 669 map->read_flag_mask = config->read_flag_mask; 670 map->write_flag_mask = config->write_flag_mask; 671 } else if (bus) { 672 map->read_flag_mask = bus->read_flag_mask; 673 } 674 675 if (!bus) { 676 map->reg_read = config->reg_read; 677 map->reg_write = config->reg_write; 678 679 map->defer_caching = false; 680 goto skip_format_initialization; 681 } else if (!bus->read || !bus->write) { 682 map->reg_read = _regmap_bus_reg_read; 683 map->reg_write = _regmap_bus_reg_write; 684 685 map->defer_caching = false; 686 goto skip_format_initialization; 687 } else { 688 map->reg_read = _regmap_bus_read; 689 map->reg_update_bits = bus->reg_update_bits; 690 } 691 692 reg_endian = regmap_get_reg_endian(bus, config); 693 val_endian = regmap_get_val_endian(dev, bus, config); 694 695 switch (config->reg_bits + map->reg_shift) { 696 case 2: 697 switch (config->val_bits) { 698 case 6: 699 map->format.format_write = regmap_format_2_6_write; 700 break; 701 default: 702 goto err_map; 703 } 704 break; 705 706 case 4: 707 switch (config->val_bits) { 708 case 12: 709 map->format.format_write = regmap_format_4_12_write; 710 break; 711 default: 712 goto err_map; 713 } 714 break; 715 716 case 7: 717 switch (config->val_bits) { 718 case 9: 719 map->format.format_write = regmap_format_7_9_write; 720 break; 721 default: 722 goto err_map; 723 } 724 break; 725 726 case 10: 727 switch (config->val_bits) { 728 case 14: 729 map->format.format_write = regmap_format_10_14_write; 730 break; 731 default: 732 goto err_map; 733 } 734 break; 735 736 case 8: 737 map->format.format_reg = regmap_format_8; 738 break; 739 740 case 16: 741 switch (reg_endian) { 742 case REGMAP_ENDIAN_BIG: 743 map->format.format_reg = regmap_format_16_be; 744 break; 745 case REGMAP_ENDIAN_NATIVE: 746 map->format.format_reg = regmap_format_16_native; 747 break; 748 default: 749 goto err_map; 750 } 751 break; 752 753 case 24: 754 if (reg_endian != REGMAP_ENDIAN_BIG) 755 goto err_map; 756 map->format.format_reg = regmap_format_24; 757 break; 758 759 case 32: 760 switch (reg_endian) { 761 case REGMAP_ENDIAN_BIG: 762 map->format.format_reg = regmap_format_32_be; 763 break; 764 case REGMAP_ENDIAN_NATIVE: 765 map->format.format_reg = regmap_format_32_native; 766 break; 767 default: 768 goto err_map; 769 } 770 break; 771 772 #ifdef CONFIG_64BIT 773 case 64: 774 switch (reg_endian) { 775 case REGMAP_ENDIAN_BIG: 776 map->format.format_reg = regmap_format_64_be; 777 break; 778 case REGMAP_ENDIAN_NATIVE: 779 map->format.format_reg = regmap_format_64_native; 780 break; 781 default: 782 goto err_map; 783 } 784 break; 785 #endif 786 787 default: 788 goto err_map; 789 } 790 791 if (val_endian == REGMAP_ENDIAN_NATIVE) 792 map->format.parse_inplace = regmap_parse_inplace_noop; 793 794 switch (config->val_bits) { 795 case 8: 796 map->format.format_val = regmap_format_8; 797 map->format.parse_val = regmap_parse_8; 798 map->format.parse_inplace = regmap_parse_inplace_noop; 799 break; 800 case 16: 801 switch (val_endian) { 802 case REGMAP_ENDIAN_BIG: 803 map->format.format_val = regmap_format_16_be; 804 map->format.parse_val = regmap_parse_16_be; 805 map->format.parse_inplace = regmap_parse_16_be_inplace; 806 break; 807 case REGMAP_ENDIAN_LITTLE: 808 map->format.format_val = regmap_format_16_le; 809 map->format.parse_val = regmap_parse_16_le; 810 map->format.parse_inplace = regmap_parse_16_le_inplace; 811 break; 812 case REGMAP_ENDIAN_NATIVE: 813 map->format.format_val = regmap_format_16_native; 814 map->format.parse_val = regmap_parse_16_native; 815 break; 816 default: 817 goto err_map; 818 } 819 break; 820 case 24: 821 if (val_endian != REGMAP_ENDIAN_BIG) 822 goto err_map; 823 map->format.format_val = regmap_format_24; 824 map->format.parse_val = regmap_parse_24; 825 break; 826 case 32: 827 switch (val_endian) { 828 case REGMAP_ENDIAN_BIG: 829 map->format.format_val = regmap_format_32_be; 830 map->format.parse_val = regmap_parse_32_be; 831 map->format.parse_inplace = regmap_parse_32_be_inplace; 832 break; 833 case REGMAP_ENDIAN_LITTLE: 834 map->format.format_val = regmap_format_32_le; 835 map->format.parse_val = regmap_parse_32_le; 836 map->format.parse_inplace = regmap_parse_32_le_inplace; 837 break; 838 case REGMAP_ENDIAN_NATIVE: 839 map->format.format_val = regmap_format_32_native; 840 map->format.parse_val = regmap_parse_32_native; 841 break; 842 default: 843 goto err_map; 844 } 845 break; 846 #ifdef CONFIG_64BIT 847 case 64: 848 switch (val_endian) { 849 case REGMAP_ENDIAN_BIG: 850 map->format.format_val = regmap_format_64_be; 851 map->format.parse_val = regmap_parse_64_be; 852 map->format.parse_inplace = regmap_parse_64_be_inplace; 853 break; 854 case REGMAP_ENDIAN_LITTLE: 855 map->format.format_val = regmap_format_64_le; 856 map->format.parse_val = regmap_parse_64_le; 857 map->format.parse_inplace = regmap_parse_64_le_inplace; 858 break; 859 case REGMAP_ENDIAN_NATIVE: 860 map->format.format_val = regmap_format_64_native; 861 map->format.parse_val = regmap_parse_64_native; 862 break; 863 default: 864 goto err_map; 865 } 866 break; 867 #endif 868 } 869 870 if (map->format.format_write) { 871 if ((reg_endian != REGMAP_ENDIAN_BIG) || 872 (val_endian != REGMAP_ENDIAN_BIG)) 873 goto err_map; 874 map->use_single_write = true; 875 } 876 877 if (!map->format.format_write && 878 !(map->format.format_reg && map->format.format_val)) 879 goto err_map; 880 881 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 882 if (map->work_buf == NULL) { 883 ret = -ENOMEM; 884 goto err_map; 885 } 886 887 if (map->format.format_write) { 888 map->defer_caching = false; 889 map->reg_write = _regmap_bus_formatted_write; 890 } else if (map->format.format_val) { 891 map->defer_caching = true; 892 map->reg_write = _regmap_bus_raw_write; 893 } 894 895 skip_format_initialization: 896 897 map->range_tree = RB_ROOT; 898 for (i = 0; i < config->num_ranges; i++) { 899 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 900 struct regmap_range_node *new; 901 902 /* Sanity check */ 903 if (range_cfg->range_max < range_cfg->range_min) { 904 dev_err(map->dev, "Invalid range %d: %d < %d\n", i, 905 range_cfg->range_max, range_cfg->range_min); 906 goto err_range; 907 } 908 909 if (range_cfg->range_max > map->max_register) { 910 dev_err(map->dev, "Invalid range %d: %d > %d\n", i, 911 range_cfg->range_max, map->max_register); 912 goto err_range; 913 } 914 915 if (range_cfg->selector_reg > map->max_register) { 916 dev_err(map->dev, 917 "Invalid range %d: selector out of map\n", i); 918 goto err_range; 919 } 920 921 if (range_cfg->window_len == 0) { 922 dev_err(map->dev, "Invalid range %d: window_len 0\n", 923 i); 924 goto err_range; 925 } 926 927 /* Make sure, that this register range has no selector 928 or data window within its boundary */ 929 for (j = 0; j < config->num_ranges; j++) { 930 unsigned sel_reg = config->ranges[j].selector_reg; 931 unsigned win_min = config->ranges[j].window_start; 932 unsigned win_max = win_min + 933 config->ranges[j].window_len - 1; 934 935 /* Allow data window inside its own virtual range */ 936 if (j == i) 937 continue; 938 939 if (range_cfg->range_min <= sel_reg && 940 sel_reg <= range_cfg->range_max) { 941 dev_err(map->dev, 942 "Range %d: selector for %d in window\n", 943 i, j); 944 goto err_range; 945 } 946 947 if (!(win_max < range_cfg->range_min || 948 win_min > range_cfg->range_max)) { 949 dev_err(map->dev, 950 "Range %d: window for %d in window\n", 951 i, j); 952 goto err_range; 953 } 954 } 955 956 new = kzalloc(sizeof(*new), GFP_KERNEL); 957 if (new == NULL) { 958 ret = -ENOMEM; 959 goto err_range; 960 } 961 962 new->map = map; 963 new->name = range_cfg->name; 964 new->range_min = range_cfg->range_min; 965 new->range_max = range_cfg->range_max; 966 new->selector_reg = range_cfg->selector_reg; 967 new->selector_mask = range_cfg->selector_mask; 968 new->selector_shift = range_cfg->selector_shift; 969 new->window_start = range_cfg->window_start; 970 new->window_len = range_cfg->window_len; 971 972 if (!_regmap_range_add(map, new)) { 973 dev_err(map->dev, "Failed to add range %d\n", i); 974 kfree(new); 975 goto err_range; 976 } 977 978 if (map->selector_work_buf == NULL) { 979 map->selector_work_buf = 980 kzalloc(map->format.buf_size, GFP_KERNEL); 981 if (map->selector_work_buf == NULL) { 982 ret = -ENOMEM; 983 goto err_range; 984 } 985 } 986 } 987 988 ret = regcache_init(map, config); 989 if (ret != 0) 990 goto err_range; 991 992 if (dev) { 993 ret = regmap_attach_dev(dev, map, config); 994 if (ret != 0) 995 goto err_regcache; 996 } 997 998 return map; 999 1000 err_regcache: 1001 regcache_exit(map); 1002 err_range: 1003 regmap_range_exit(map); 1004 kfree(map->work_buf); 1005 err_map: 1006 kfree(map); 1007 err: 1008 return ERR_PTR(ret); 1009 } 1010 EXPORT_SYMBOL_GPL(__regmap_init); 1011 1012 static void devm_regmap_release(struct device *dev, void *res) 1013 { 1014 regmap_exit(*(struct regmap **)res); 1015 } 1016 1017 struct regmap *__devm_regmap_init(struct device *dev, 1018 const struct regmap_bus *bus, 1019 void *bus_context, 1020 const struct regmap_config *config, 1021 struct lock_class_key *lock_key, 1022 const char *lock_name) 1023 { 1024 struct regmap **ptr, *regmap; 1025 1026 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 1027 if (!ptr) 1028 return ERR_PTR(-ENOMEM); 1029 1030 regmap = __regmap_init(dev, bus, bus_context, config, 1031 lock_key, lock_name); 1032 if (!IS_ERR(regmap)) { 1033 *ptr = regmap; 1034 devres_add(dev, ptr); 1035 } else { 1036 devres_free(ptr); 1037 } 1038 1039 return regmap; 1040 } 1041 EXPORT_SYMBOL_GPL(__devm_regmap_init); 1042 1043 static void regmap_field_init(struct regmap_field *rm_field, 1044 struct regmap *regmap, struct reg_field reg_field) 1045 { 1046 rm_field->regmap = regmap; 1047 rm_field->reg = reg_field.reg; 1048 rm_field->shift = reg_field.lsb; 1049 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); 1050 rm_field->id_size = reg_field.id_size; 1051 rm_field->id_offset = reg_field.id_offset; 1052 } 1053 1054 /** 1055 * devm_regmap_field_alloc(): Allocate and initialise a register field 1056 * in a register map. 1057 * 1058 * @dev: Device that will be interacted with 1059 * @regmap: regmap bank in which this register field is located. 1060 * @reg_field: Register field with in the bank. 1061 * 1062 * The return value will be an ERR_PTR() on error or a valid pointer 1063 * to a struct regmap_field. The regmap_field will be automatically freed 1064 * by the device management code. 1065 */ 1066 struct regmap_field *devm_regmap_field_alloc(struct device *dev, 1067 struct regmap *regmap, struct reg_field reg_field) 1068 { 1069 struct regmap_field *rm_field = devm_kzalloc(dev, 1070 sizeof(*rm_field), GFP_KERNEL); 1071 if (!rm_field) 1072 return ERR_PTR(-ENOMEM); 1073 1074 regmap_field_init(rm_field, regmap, reg_field); 1075 1076 return rm_field; 1077 1078 } 1079 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); 1080 1081 /** 1082 * devm_regmap_field_free(): Free register field allocated using 1083 * devm_regmap_field_alloc. Usally drivers need not call this function, 1084 * as the memory allocated via devm will be freed as per device-driver 1085 * life-cyle. 1086 * 1087 * @dev: Device that will be interacted with 1088 * @field: regmap field which should be freed. 1089 */ 1090 void devm_regmap_field_free(struct device *dev, 1091 struct regmap_field *field) 1092 { 1093 devm_kfree(dev, field); 1094 } 1095 EXPORT_SYMBOL_GPL(devm_regmap_field_free); 1096 1097 /** 1098 * regmap_field_alloc(): Allocate and initialise a register field 1099 * in a register map. 1100 * 1101 * @regmap: regmap bank in which this register field is located. 1102 * @reg_field: Register field with in the bank. 1103 * 1104 * The return value will be an ERR_PTR() on error or a valid pointer 1105 * to a struct regmap_field. The regmap_field should be freed by the 1106 * user once its finished working with it using regmap_field_free(). 1107 */ 1108 struct regmap_field *regmap_field_alloc(struct regmap *regmap, 1109 struct reg_field reg_field) 1110 { 1111 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); 1112 1113 if (!rm_field) 1114 return ERR_PTR(-ENOMEM); 1115 1116 regmap_field_init(rm_field, regmap, reg_field); 1117 1118 return rm_field; 1119 } 1120 EXPORT_SYMBOL_GPL(regmap_field_alloc); 1121 1122 /** 1123 * regmap_field_free(): Free register field allocated using regmap_field_alloc 1124 * 1125 * @field: regmap field which should be freed. 1126 */ 1127 void regmap_field_free(struct regmap_field *field) 1128 { 1129 kfree(field); 1130 } 1131 EXPORT_SYMBOL_GPL(regmap_field_free); 1132 1133 /** 1134 * regmap_reinit_cache(): Reinitialise the current register cache 1135 * 1136 * @map: Register map to operate on. 1137 * @config: New configuration. Only the cache data will be used. 1138 * 1139 * Discard any existing register cache for the map and initialize a 1140 * new cache. This can be used to restore the cache to defaults or to 1141 * update the cache configuration to reflect runtime discovery of the 1142 * hardware. 1143 * 1144 * No explicit locking is done here, the user needs to ensure that 1145 * this function will not race with other calls to regmap. 1146 */ 1147 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 1148 { 1149 regcache_exit(map); 1150 regmap_debugfs_exit(map); 1151 1152 map->max_register = config->max_register; 1153 map->writeable_reg = config->writeable_reg; 1154 map->readable_reg = config->readable_reg; 1155 map->volatile_reg = config->volatile_reg; 1156 map->precious_reg = config->precious_reg; 1157 map->cache_type = config->cache_type; 1158 1159 regmap_debugfs_init(map, config->name); 1160 1161 map->cache_bypass = false; 1162 map->cache_only = false; 1163 1164 return regcache_init(map, config); 1165 } 1166 EXPORT_SYMBOL_GPL(regmap_reinit_cache); 1167 1168 /** 1169 * regmap_exit(): Free a previously allocated register map 1170 */ 1171 void regmap_exit(struct regmap *map) 1172 { 1173 struct regmap_async *async; 1174 1175 regcache_exit(map); 1176 regmap_debugfs_exit(map); 1177 regmap_range_exit(map); 1178 if (map->bus && map->bus->free_context) 1179 map->bus->free_context(map->bus_context); 1180 kfree(map->work_buf); 1181 while (!list_empty(&map->async_free)) { 1182 async = list_first_entry_or_null(&map->async_free, 1183 struct regmap_async, 1184 list); 1185 list_del(&async->list); 1186 kfree(async->work_buf); 1187 kfree(async); 1188 } 1189 kfree(map); 1190 } 1191 EXPORT_SYMBOL_GPL(regmap_exit); 1192 1193 static int dev_get_regmap_match(struct device *dev, void *res, void *data) 1194 { 1195 struct regmap **r = res; 1196 if (!r || !*r) { 1197 WARN_ON(!r || !*r); 1198 return 0; 1199 } 1200 1201 /* If the user didn't specify a name match any */ 1202 if (data) 1203 return (*r)->name == data; 1204 else 1205 return 1; 1206 } 1207 1208 /** 1209 * dev_get_regmap(): Obtain the regmap (if any) for a device 1210 * 1211 * @dev: Device to retrieve the map for 1212 * @name: Optional name for the register map, usually NULL. 1213 * 1214 * Returns the regmap for the device if one is present, or NULL. If 1215 * name is specified then it must match the name specified when 1216 * registering the device, if it is NULL then the first regmap found 1217 * will be used. Devices with multiple register maps are very rare, 1218 * generic code should normally not need to specify a name. 1219 */ 1220 struct regmap *dev_get_regmap(struct device *dev, const char *name) 1221 { 1222 struct regmap **r = devres_find(dev, dev_get_regmap_release, 1223 dev_get_regmap_match, (void *)name); 1224 1225 if (!r) 1226 return NULL; 1227 return *r; 1228 } 1229 EXPORT_SYMBOL_GPL(dev_get_regmap); 1230 1231 /** 1232 * regmap_get_device(): Obtain the device from a regmap 1233 * 1234 * @map: Register map to operate on. 1235 * 1236 * Returns the underlying device that the regmap has been created for. 1237 */ 1238 struct device *regmap_get_device(struct regmap *map) 1239 { 1240 return map->dev; 1241 } 1242 EXPORT_SYMBOL_GPL(regmap_get_device); 1243 1244 static int _regmap_select_page(struct regmap *map, unsigned int *reg, 1245 struct regmap_range_node *range, 1246 unsigned int val_num) 1247 { 1248 void *orig_work_buf; 1249 unsigned int win_offset; 1250 unsigned int win_page; 1251 bool page_chg; 1252 int ret; 1253 1254 win_offset = (*reg - range->range_min) % range->window_len; 1255 win_page = (*reg - range->range_min) / range->window_len; 1256 1257 if (val_num > 1) { 1258 /* Bulk write shouldn't cross range boundary */ 1259 if (*reg + val_num - 1 > range->range_max) 1260 return -EINVAL; 1261 1262 /* ... or single page boundary */ 1263 if (val_num > range->window_len - win_offset) 1264 return -EINVAL; 1265 } 1266 1267 /* It is possible to have selector register inside data window. 1268 In that case, selector register is located on every page and 1269 it needs no page switching, when accessed alone. */ 1270 if (val_num > 1 || 1271 range->window_start + win_offset != range->selector_reg) { 1272 /* Use separate work_buf during page switching */ 1273 orig_work_buf = map->work_buf; 1274 map->work_buf = map->selector_work_buf; 1275 1276 ret = _regmap_update_bits(map, range->selector_reg, 1277 range->selector_mask, 1278 win_page << range->selector_shift, 1279 &page_chg, false); 1280 1281 map->work_buf = orig_work_buf; 1282 1283 if (ret != 0) 1284 return ret; 1285 } 1286 1287 *reg = range->window_start + win_offset; 1288 1289 return 0; 1290 } 1291 1292 int _regmap_raw_write(struct regmap *map, unsigned int reg, 1293 const void *val, size_t val_len) 1294 { 1295 struct regmap_range_node *range; 1296 unsigned long flags; 1297 u8 *u8 = map->work_buf; 1298 void *work_val = map->work_buf + map->format.reg_bytes + 1299 map->format.pad_bytes; 1300 void *buf; 1301 int ret = -ENOTSUPP; 1302 size_t len; 1303 int i; 1304 1305 WARN_ON(!map->bus); 1306 1307 /* Check for unwritable registers before we start */ 1308 if (map->writeable_reg) 1309 for (i = 0; i < val_len / map->format.val_bytes; i++) 1310 if (!map->writeable_reg(map->dev, 1311 reg + (i * map->reg_stride))) 1312 return -EINVAL; 1313 1314 if (!map->cache_bypass && map->format.parse_val) { 1315 unsigned int ival; 1316 int val_bytes = map->format.val_bytes; 1317 for (i = 0; i < val_len / val_bytes; i++) { 1318 ival = map->format.parse_val(val + (i * val_bytes)); 1319 ret = regcache_write(map, reg + (i * map->reg_stride), 1320 ival); 1321 if (ret) { 1322 dev_err(map->dev, 1323 "Error in caching of register: %x ret: %d\n", 1324 reg + i, ret); 1325 return ret; 1326 } 1327 } 1328 if (map->cache_only) { 1329 map->cache_dirty = true; 1330 return 0; 1331 } 1332 } 1333 1334 range = _regmap_range_lookup(map, reg); 1335 if (range) { 1336 int val_num = val_len / map->format.val_bytes; 1337 int win_offset = (reg - range->range_min) % range->window_len; 1338 int win_residue = range->window_len - win_offset; 1339 1340 /* If the write goes beyond the end of the window split it */ 1341 while (val_num > win_residue) { 1342 dev_dbg(map->dev, "Writing window %d/%zu\n", 1343 win_residue, val_len / map->format.val_bytes); 1344 ret = _regmap_raw_write(map, reg, val, win_residue * 1345 map->format.val_bytes); 1346 if (ret != 0) 1347 return ret; 1348 1349 reg += win_residue; 1350 val_num -= win_residue; 1351 val += win_residue * map->format.val_bytes; 1352 val_len -= win_residue * map->format.val_bytes; 1353 1354 win_offset = (reg - range->range_min) % 1355 range->window_len; 1356 win_residue = range->window_len - win_offset; 1357 } 1358 1359 ret = _regmap_select_page(map, ®, range, val_num); 1360 if (ret != 0) 1361 return ret; 1362 } 1363 1364 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1365 1366 u8[0] |= map->write_flag_mask; 1367 1368 /* 1369 * Essentially all I/O mechanisms will be faster with a single 1370 * buffer to write. Since register syncs often generate raw 1371 * writes of single registers optimise that case. 1372 */ 1373 if (val != work_val && val_len == map->format.val_bytes) { 1374 memcpy(work_val, val, map->format.val_bytes); 1375 val = work_val; 1376 } 1377 1378 if (map->async && map->bus->async_write) { 1379 struct regmap_async *async; 1380 1381 trace_regmap_async_write_start(map, reg, val_len); 1382 1383 spin_lock_irqsave(&map->async_lock, flags); 1384 async = list_first_entry_or_null(&map->async_free, 1385 struct regmap_async, 1386 list); 1387 if (async) 1388 list_del(&async->list); 1389 spin_unlock_irqrestore(&map->async_lock, flags); 1390 1391 if (!async) { 1392 async = map->bus->async_alloc(); 1393 if (!async) 1394 return -ENOMEM; 1395 1396 async->work_buf = kzalloc(map->format.buf_size, 1397 GFP_KERNEL | GFP_DMA); 1398 if (!async->work_buf) { 1399 kfree(async); 1400 return -ENOMEM; 1401 } 1402 } 1403 1404 async->map = map; 1405 1406 /* If the caller supplied the value we can use it safely. */ 1407 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + 1408 map->format.reg_bytes + map->format.val_bytes); 1409 1410 spin_lock_irqsave(&map->async_lock, flags); 1411 list_add_tail(&async->list, &map->async_list); 1412 spin_unlock_irqrestore(&map->async_lock, flags); 1413 1414 if (val != work_val) 1415 ret = map->bus->async_write(map->bus_context, 1416 async->work_buf, 1417 map->format.reg_bytes + 1418 map->format.pad_bytes, 1419 val, val_len, async); 1420 else 1421 ret = map->bus->async_write(map->bus_context, 1422 async->work_buf, 1423 map->format.reg_bytes + 1424 map->format.pad_bytes + 1425 val_len, NULL, 0, async); 1426 1427 if (ret != 0) { 1428 dev_err(map->dev, "Failed to schedule write: %d\n", 1429 ret); 1430 1431 spin_lock_irqsave(&map->async_lock, flags); 1432 list_move(&async->list, &map->async_free); 1433 spin_unlock_irqrestore(&map->async_lock, flags); 1434 } 1435 1436 return ret; 1437 } 1438 1439 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); 1440 1441 /* If we're doing a single register write we can probably just 1442 * send the work_buf directly, otherwise try to do a gather 1443 * write. 1444 */ 1445 if (val == work_val) 1446 ret = map->bus->write(map->bus_context, map->work_buf, 1447 map->format.reg_bytes + 1448 map->format.pad_bytes + 1449 val_len); 1450 else if (map->bus->gather_write) 1451 ret = map->bus->gather_write(map->bus_context, map->work_buf, 1452 map->format.reg_bytes + 1453 map->format.pad_bytes, 1454 val, val_len); 1455 1456 /* If that didn't work fall back on linearising by hand. */ 1457 if (ret == -ENOTSUPP) { 1458 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 1459 buf = kzalloc(len, GFP_KERNEL); 1460 if (!buf) 1461 return -ENOMEM; 1462 1463 memcpy(buf, map->work_buf, map->format.reg_bytes); 1464 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 1465 val, val_len); 1466 ret = map->bus->write(map->bus_context, buf, len); 1467 1468 kfree(buf); 1469 } 1470 1471 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1472 1473 return ret; 1474 } 1475 1476 /** 1477 * regmap_can_raw_write - Test if regmap_raw_write() is supported 1478 * 1479 * @map: Map to check. 1480 */ 1481 bool regmap_can_raw_write(struct regmap *map) 1482 { 1483 return map->bus && map->bus->write && map->format.format_val && 1484 map->format.format_reg; 1485 } 1486 EXPORT_SYMBOL_GPL(regmap_can_raw_write); 1487 1488 /** 1489 * regmap_get_raw_read_max - Get the maximum size we can read 1490 * 1491 * @map: Map to check. 1492 */ 1493 size_t regmap_get_raw_read_max(struct regmap *map) 1494 { 1495 return map->max_raw_read; 1496 } 1497 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); 1498 1499 /** 1500 * regmap_get_raw_write_max - Get the maximum size we can read 1501 * 1502 * @map: Map to check. 1503 */ 1504 size_t regmap_get_raw_write_max(struct regmap *map) 1505 { 1506 return map->max_raw_write; 1507 } 1508 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); 1509 1510 static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1511 unsigned int val) 1512 { 1513 int ret; 1514 struct regmap_range_node *range; 1515 struct regmap *map = context; 1516 1517 WARN_ON(!map->bus || !map->format.format_write); 1518 1519 range = _regmap_range_lookup(map, reg); 1520 if (range) { 1521 ret = _regmap_select_page(map, ®, range, 1); 1522 if (ret != 0) 1523 return ret; 1524 } 1525 1526 map->format.format_write(map, reg, val); 1527 1528 trace_regmap_hw_write_start(map, reg, 1); 1529 1530 ret = map->bus->write(map->bus_context, map->work_buf, 1531 map->format.buf_size); 1532 1533 trace_regmap_hw_write_done(map, reg, 1); 1534 1535 return ret; 1536 } 1537 1538 static int _regmap_bus_reg_write(void *context, unsigned int reg, 1539 unsigned int val) 1540 { 1541 struct regmap *map = context; 1542 1543 return map->bus->reg_write(map->bus_context, reg, val); 1544 } 1545 1546 static int _regmap_bus_raw_write(void *context, unsigned int reg, 1547 unsigned int val) 1548 { 1549 struct regmap *map = context; 1550 1551 WARN_ON(!map->bus || !map->format.format_val); 1552 1553 map->format.format_val(map->work_buf + map->format.reg_bytes 1554 + map->format.pad_bytes, val, 0); 1555 return _regmap_raw_write(map, reg, 1556 map->work_buf + 1557 map->format.reg_bytes + 1558 map->format.pad_bytes, 1559 map->format.val_bytes); 1560 } 1561 1562 static inline void *_regmap_map_get_context(struct regmap *map) 1563 { 1564 return (map->bus) ? map : map->bus_context; 1565 } 1566 1567 int _regmap_write(struct regmap *map, unsigned int reg, 1568 unsigned int val) 1569 { 1570 int ret; 1571 void *context = _regmap_map_get_context(map); 1572 1573 if (!regmap_writeable(map, reg)) 1574 return -EIO; 1575 1576 if (!map->cache_bypass && !map->defer_caching) { 1577 ret = regcache_write(map, reg, val); 1578 if (ret != 0) 1579 return ret; 1580 if (map->cache_only) { 1581 map->cache_dirty = true; 1582 return 0; 1583 } 1584 } 1585 1586 #ifdef LOG_DEVICE 1587 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1588 dev_info(map->dev, "%x <= %x\n", reg, val); 1589 #endif 1590 1591 trace_regmap_reg_write(map, reg, val); 1592 1593 return map->reg_write(context, reg, val); 1594 } 1595 1596 /** 1597 * regmap_write(): Write a value to a single register 1598 * 1599 * @map: Register map to write to 1600 * @reg: Register to write to 1601 * @val: Value to be written 1602 * 1603 * A value of zero will be returned on success, a negative errno will 1604 * be returned in error cases. 1605 */ 1606 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 1607 { 1608 int ret; 1609 1610 if (!IS_ALIGNED(reg, map->reg_stride)) 1611 return -EINVAL; 1612 1613 map->lock(map->lock_arg); 1614 1615 ret = _regmap_write(map, reg, val); 1616 1617 map->unlock(map->lock_arg); 1618 1619 return ret; 1620 } 1621 EXPORT_SYMBOL_GPL(regmap_write); 1622 1623 /** 1624 * regmap_write_async(): Write a value to a single register asynchronously 1625 * 1626 * @map: Register map to write to 1627 * @reg: Register to write to 1628 * @val: Value to be written 1629 * 1630 * A value of zero will be returned on success, a negative errno will 1631 * be returned in error cases. 1632 */ 1633 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) 1634 { 1635 int ret; 1636 1637 if (!IS_ALIGNED(reg, map->reg_stride)) 1638 return -EINVAL; 1639 1640 map->lock(map->lock_arg); 1641 1642 map->async = true; 1643 1644 ret = _regmap_write(map, reg, val); 1645 1646 map->async = false; 1647 1648 map->unlock(map->lock_arg); 1649 1650 return ret; 1651 } 1652 EXPORT_SYMBOL_GPL(regmap_write_async); 1653 1654 /** 1655 * regmap_raw_write(): Write raw values to one or more registers 1656 * 1657 * @map: Register map to write to 1658 * @reg: Initial register to write to 1659 * @val: Block of data to be written, laid out for direct transmission to the 1660 * device 1661 * @val_len: Length of data pointed to by val. 1662 * 1663 * This function is intended to be used for things like firmware 1664 * download where a large block of data needs to be transferred to the 1665 * device. No formatting will be done on the data provided. 1666 * 1667 * A value of zero will be returned on success, a negative errno will 1668 * be returned in error cases. 1669 */ 1670 int regmap_raw_write(struct regmap *map, unsigned int reg, 1671 const void *val, size_t val_len) 1672 { 1673 int ret; 1674 1675 if (!regmap_can_raw_write(map)) 1676 return -EINVAL; 1677 if (val_len % map->format.val_bytes) 1678 return -EINVAL; 1679 if (map->max_raw_write && map->max_raw_write > val_len) 1680 return -E2BIG; 1681 1682 map->lock(map->lock_arg); 1683 1684 ret = _regmap_raw_write(map, reg, val, val_len); 1685 1686 map->unlock(map->lock_arg); 1687 1688 return ret; 1689 } 1690 EXPORT_SYMBOL_GPL(regmap_raw_write); 1691 1692 /** 1693 * regmap_field_write(): Write a value to a single register field 1694 * 1695 * @field: Register field to write to 1696 * @val: Value to be written 1697 * 1698 * A value of zero will be returned on success, a negative errno will 1699 * be returned in error cases. 1700 */ 1701 int regmap_field_write(struct regmap_field *field, unsigned int val) 1702 { 1703 return regmap_update_bits(field->regmap, field->reg, 1704 field->mask, val << field->shift); 1705 } 1706 EXPORT_SYMBOL_GPL(regmap_field_write); 1707 1708 /** 1709 * regmap_field_update_bits(): Perform a read/modify/write cycle 1710 * on the register field 1711 * 1712 * @field: Register field to write to 1713 * @mask: Bitmask to change 1714 * @val: Value to be written 1715 * 1716 * A value of zero will be returned on success, a negative errno will 1717 * be returned in error cases. 1718 */ 1719 int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) 1720 { 1721 mask = (mask << field->shift) & field->mask; 1722 1723 return regmap_update_bits(field->regmap, field->reg, 1724 mask, val << field->shift); 1725 } 1726 EXPORT_SYMBOL_GPL(regmap_field_update_bits); 1727 1728 /** 1729 * regmap_fields_write(): Write a value to a single register field with port ID 1730 * 1731 * @field: Register field to write to 1732 * @id: port ID 1733 * @val: Value to be written 1734 * 1735 * A value of zero will be returned on success, a negative errno will 1736 * be returned in error cases. 1737 */ 1738 int regmap_fields_write(struct regmap_field *field, unsigned int id, 1739 unsigned int val) 1740 { 1741 if (id >= field->id_size) 1742 return -EINVAL; 1743 1744 return regmap_update_bits(field->regmap, 1745 field->reg + (field->id_offset * id), 1746 field->mask, val << field->shift); 1747 } 1748 EXPORT_SYMBOL_GPL(regmap_fields_write); 1749 1750 int regmap_fields_force_write(struct regmap_field *field, unsigned int id, 1751 unsigned int val) 1752 { 1753 if (id >= field->id_size) 1754 return -EINVAL; 1755 1756 return regmap_write_bits(field->regmap, 1757 field->reg + (field->id_offset * id), 1758 field->mask, val << field->shift); 1759 } 1760 EXPORT_SYMBOL_GPL(regmap_fields_force_write); 1761 1762 /** 1763 * regmap_fields_update_bits(): Perform a read/modify/write cycle 1764 * on the register field 1765 * 1766 * @field: Register field to write to 1767 * @id: port ID 1768 * @mask: Bitmask to change 1769 * @val: Value to be written 1770 * 1771 * A value of zero will be returned on success, a negative errno will 1772 * be returned in error cases. 1773 */ 1774 int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 1775 unsigned int mask, unsigned int val) 1776 { 1777 if (id >= field->id_size) 1778 return -EINVAL; 1779 1780 mask = (mask << field->shift) & field->mask; 1781 1782 return regmap_update_bits(field->regmap, 1783 field->reg + (field->id_offset * id), 1784 mask, val << field->shift); 1785 } 1786 EXPORT_SYMBOL_GPL(regmap_fields_update_bits); 1787 1788 /* 1789 * regmap_bulk_write(): Write multiple registers to the device 1790 * 1791 * @map: Register map to write to 1792 * @reg: First register to be write from 1793 * @val: Block of data to be written, in native register size for device 1794 * @val_count: Number of registers to write 1795 * 1796 * This function is intended to be used for writing a large block of 1797 * data to the device either in single transfer or multiple transfer. 1798 * 1799 * A value of zero will be returned on success, a negative errno will 1800 * be returned in error cases. 1801 */ 1802 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 1803 size_t val_count) 1804 { 1805 int ret = 0, i; 1806 size_t val_bytes = map->format.val_bytes; 1807 size_t total_size = val_bytes * val_count; 1808 1809 if (map->bus && !map->format.parse_inplace) 1810 return -EINVAL; 1811 if (!IS_ALIGNED(reg, map->reg_stride)) 1812 return -EINVAL; 1813 1814 /* 1815 * Some devices don't support bulk write, for 1816 * them we have a series of single write operations in the first two if 1817 * blocks. 1818 * 1819 * The first if block is used for memory mapped io. It does not allow 1820 * val_bytes of 3 for example. 1821 * The second one is used for busses which do not have this limitation 1822 * and can write arbitrary value lengths. 1823 */ 1824 if (!map->bus) { 1825 map->lock(map->lock_arg); 1826 for (i = 0; i < val_count; i++) { 1827 unsigned int ival; 1828 1829 switch (val_bytes) { 1830 case 1: 1831 ival = *(u8 *)(val + (i * val_bytes)); 1832 break; 1833 case 2: 1834 ival = *(u16 *)(val + (i * val_bytes)); 1835 break; 1836 case 4: 1837 ival = *(u32 *)(val + (i * val_bytes)); 1838 break; 1839 #ifdef CONFIG_64BIT 1840 case 8: 1841 ival = *(u64 *)(val + (i * val_bytes)); 1842 break; 1843 #endif 1844 default: 1845 ret = -EINVAL; 1846 goto out; 1847 } 1848 1849 ret = _regmap_write(map, reg + (i * map->reg_stride), 1850 ival); 1851 if (ret != 0) 1852 goto out; 1853 } 1854 out: 1855 map->unlock(map->lock_arg); 1856 } else if (map->use_single_write || 1857 (map->max_raw_write && map->max_raw_write < total_size)) { 1858 int chunk_stride = map->reg_stride; 1859 size_t chunk_size = val_bytes; 1860 size_t chunk_count = val_count; 1861 1862 if (!map->use_single_write) { 1863 chunk_size = map->max_raw_write; 1864 if (chunk_size % val_bytes) 1865 chunk_size -= chunk_size % val_bytes; 1866 chunk_count = total_size / chunk_size; 1867 chunk_stride *= chunk_size / val_bytes; 1868 } 1869 1870 map->lock(map->lock_arg); 1871 /* Write as many bytes as possible with chunk_size */ 1872 for (i = 0; i < chunk_count; i++) { 1873 ret = _regmap_raw_write(map, 1874 reg + (i * chunk_stride), 1875 val + (i * chunk_size), 1876 chunk_size); 1877 if (ret) 1878 break; 1879 } 1880 1881 /* Write remaining bytes */ 1882 if (!ret && chunk_size * i < total_size) { 1883 ret = _regmap_raw_write(map, reg + (i * chunk_stride), 1884 val + (i * chunk_size), 1885 total_size - i * chunk_size); 1886 } 1887 map->unlock(map->lock_arg); 1888 } else { 1889 void *wval; 1890 1891 if (!val_count) 1892 return -EINVAL; 1893 1894 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); 1895 if (!wval) { 1896 dev_err(map->dev, "Error in memory allocation\n"); 1897 return -ENOMEM; 1898 } 1899 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1900 map->format.parse_inplace(wval + i); 1901 1902 map->lock(map->lock_arg); 1903 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1904 map->unlock(map->lock_arg); 1905 1906 kfree(wval); 1907 } 1908 return ret; 1909 } 1910 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1911 1912 /* 1913 * _regmap_raw_multi_reg_write() 1914 * 1915 * the (register,newvalue) pairs in regs have not been formatted, but 1916 * they are all in the same page and have been changed to being page 1917 * relative. The page register has been written if that was necessary. 1918 */ 1919 static int _regmap_raw_multi_reg_write(struct regmap *map, 1920 const struct reg_sequence *regs, 1921 size_t num_regs) 1922 { 1923 int ret; 1924 void *buf; 1925 int i; 1926 u8 *u8; 1927 size_t val_bytes = map->format.val_bytes; 1928 size_t reg_bytes = map->format.reg_bytes; 1929 size_t pad_bytes = map->format.pad_bytes; 1930 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1931 size_t len = pair_size * num_regs; 1932 1933 if (!len) 1934 return -EINVAL; 1935 1936 buf = kzalloc(len, GFP_KERNEL); 1937 if (!buf) 1938 return -ENOMEM; 1939 1940 /* We have to linearise by hand. */ 1941 1942 u8 = buf; 1943 1944 for (i = 0; i < num_regs; i++) { 1945 unsigned int reg = regs[i].reg; 1946 unsigned int val = regs[i].def; 1947 trace_regmap_hw_write_start(map, reg, 1); 1948 map->format.format_reg(u8, reg, map->reg_shift); 1949 u8 += reg_bytes + pad_bytes; 1950 map->format.format_val(u8, val, 0); 1951 u8 += val_bytes; 1952 } 1953 u8 = buf; 1954 *u8 |= map->write_flag_mask; 1955 1956 ret = map->bus->write(map->bus_context, buf, len); 1957 1958 kfree(buf); 1959 1960 for (i = 0; i < num_regs; i++) { 1961 int reg = regs[i].reg; 1962 trace_regmap_hw_write_done(map, reg, 1); 1963 } 1964 return ret; 1965 } 1966 1967 static unsigned int _regmap_register_page(struct regmap *map, 1968 unsigned int reg, 1969 struct regmap_range_node *range) 1970 { 1971 unsigned int win_page = (reg - range->range_min) / range->window_len; 1972 1973 return win_page; 1974 } 1975 1976 static int _regmap_range_multi_paged_reg_write(struct regmap *map, 1977 struct reg_sequence *regs, 1978 size_t num_regs) 1979 { 1980 int ret; 1981 int i, n; 1982 struct reg_sequence *base; 1983 unsigned int this_page = 0; 1984 unsigned int page_change = 0; 1985 /* 1986 * the set of registers are not neccessarily in order, but 1987 * since the order of write must be preserved this algorithm 1988 * chops the set each time the page changes. This also applies 1989 * if there is a delay required at any point in the sequence. 1990 */ 1991 base = regs; 1992 for (i = 0, n = 0; i < num_regs; i++, n++) { 1993 unsigned int reg = regs[i].reg; 1994 struct regmap_range_node *range; 1995 1996 range = _regmap_range_lookup(map, reg); 1997 if (range) { 1998 unsigned int win_page = _regmap_register_page(map, reg, 1999 range); 2000 2001 if (i == 0) 2002 this_page = win_page; 2003 if (win_page != this_page) { 2004 this_page = win_page; 2005 page_change = 1; 2006 } 2007 } 2008 2009 /* If we have both a page change and a delay make sure to 2010 * write the regs and apply the delay before we change the 2011 * page. 2012 */ 2013 2014 if (page_change || regs[i].delay_us) { 2015 2016 /* For situations where the first write requires 2017 * a delay we need to make sure we don't call 2018 * raw_multi_reg_write with n=0 2019 * This can't occur with page breaks as we 2020 * never write on the first iteration 2021 */ 2022 if (regs[i].delay_us && i == 0) 2023 n = 1; 2024 2025 ret = _regmap_raw_multi_reg_write(map, base, n); 2026 if (ret != 0) 2027 return ret; 2028 2029 if (regs[i].delay_us) 2030 udelay(regs[i].delay_us); 2031 2032 base += n; 2033 n = 0; 2034 2035 if (page_change) { 2036 ret = _regmap_select_page(map, 2037 &base[n].reg, 2038 range, 1); 2039 if (ret != 0) 2040 return ret; 2041 2042 page_change = 0; 2043 } 2044 2045 } 2046 2047 } 2048 if (n > 0) 2049 return _regmap_raw_multi_reg_write(map, base, n); 2050 return 0; 2051 } 2052 2053 static int _regmap_multi_reg_write(struct regmap *map, 2054 const struct reg_sequence *regs, 2055 size_t num_regs) 2056 { 2057 int i; 2058 int ret; 2059 2060 if (!map->can_multi_write) { 2061 for (i = 0; i < num_regs; i++) { 2062 ret = _regmap_write(map, regs[i].reg, regs[i].def); 2063 if (ret != 0) 2064 return ret; 2065 2066 if (regs[i].delay_us) 2067 udelay(regs[i].delay_us); 2068 } 2069 return 0; 2070 } 2071 2072 if (!map->format.parse_inplace) 2073 return -EINVAL; 2074 2075 if (map->writeable_reg) 2076 for (i = 0; i < num_regs; i++) { 2077 int reg = regs[i].reg; 2078 if (!map->writeable_reg(map->dev, reg)) 2079 return -EINVAL; 2080 if (!IS_ALIGNED(reg, map->reg_stride)) 2081 return -EINVAL; 2082 } 2083 2084 if (!map->cache_bypass) { 2085 for (i = 0; i < num_regs; i++) { 2086 unsigned int val = regs[i].def; 2087 unsigned int reg = regs[i].reg; 2088 ret = regcache_write(map, reg, val); 2089 if (ret) { 2090 dev_err(map->dev, 2091 "Error in caching of register: %x ret: %d\n", 2092 reg, ret); 2093 return ret; 2094 } 2095 } 2096 if (map->cache_only) { 2097 map->cache_dirty = true; 2098 return 0; 2099 } 2100 } 2101 2102 WARN_ON(!map->bus); 2103 2104 for (i = 0; i < num_regs; i++) { 2105 unsigned int reg = regs[i].reg; 2106 struct regmap_range_node *range; 2107 2108 /* Coalesce all the writes between a page break or a delay 2109 * in a sequence 2110 */ 2111 range = _regmap_range_lookup(map, reg); 2112 if (range || regs[i].delay_us) { 2113 size_t len = sizeof(struct reg_sequence)*num_regs; 2114 struct reg_sequence *base = kmemdup(regs, len, 2115 GFP_KERNEL); 2116 if (!base) 2117 return -ENOMEM; 2118 ret = _regmap_range_multi_paged_reg_write(map, base, 2119 num_regs); 2120 kfree(base); 2121 2122 return ret; 2123 } 2124 } 2125 return _regmap_raw_multi_reg_write(map, regs, num_regs); 2126 } 2127 2128 /* 2129 * regmap_multi_reg_write(): Write multiple registers to the device 2130 * 2131 * where the set of register,value pairs are supplied in any order, 2132 * possibly not all in a single range. 2133 * 2134 * @map: Register map to write to 2135 * @regs: Array of structures containing register,value to be written 2136 * @num_regs: Number of registers to write 2137 * 2138 * The 'normal' block write mode will send ultimately send data on the 2139 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are 2140 * addressed. However, this alternative block multi write mode will send 2141 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 2142 * must of course support the mode. 2143 * 2144 * A value of zero will be returned on success, a negative errno will be 2145 * returned in error cases. 2146 */ 2147 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 2148 int num_regs) 2149 { 2150 int ret; 2151 2152 map->lock(map->lock_arg); 2153 2154 ret = _regmap_multi_reg_write(map, regs, num_regs); 2155 2156 map->unlock(map->lock_arg); 2157 2158 return ret; 2159 } 2160 EXPORT_SYMBOL_GPL(regmap_multi_reg_write); 2161 2162 /* 2163 * regmap_multi_reg_write_bypassed(): Write multiple registers to the 2164 * device but not the cache 2165 * 2166 * where the set of register are supplied in any order 2167 * 2168 * @map: Register map to write to 2169 * @regs: Array of structures containing register,value to be written 2170 * @num_regs: Number of registers to write 2171 * 2172 * This function is intended to be used for writing a large block of data 2173 * atomically to the device in single transfer for those I2C client devices 2174 * that implement this alternative block write mode. 2175 * 2176 * A value of zero will be returned on success, a negative errno will 2177 * be returned in error cases. 2178 */ 2179 int regmap_multi_reg_write_bypassed(struct regmap *map, 2180 const struct reg_sequence *regs, 2181 int num_regs) 2182 { 2183 int ret; 2184 bool bypass; 2185 2186 map->lock(map->lock_arg); 2187 2188 bypass = map->cache_bypass; 2189 map->cache_bypass = true; 2190 2191 ret = _regmap_multi_reg_write(map, regs, num_regs); 2192 2193 map->cache_bypass = bypass; 2194 2195 map->unlock(map->lock_arg); 2196 2197 return ret; 2198 } 2199 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); 2200 2201 /** 2202 * regmap_raw_write_async(): Write raw values to one or more registers 2203 * asynchronously 2204 * 2205 * @map: Register map to write to 2206 * @reg: Initial register to write to 2207 * @val: Block of data to be written, laid out for direct transmission to the 2208 * device. Must be valid until regmap_async_complete() is called. 2209 * @val_len: Length of data pointed to by val. 2210 * 2211 * This function is intended to be used for things like firmware 2212 * download where a large block of data needs to be transferred to the 2213 * device. No formatting will be done on the data provided. 2214 * 2215 * If supported by the underlying bus the write will be scheduled 2216 * asynchronously, helping maximise I/O speed on higher speed buses 2217 * like SPI. regmap_async_complete() can be called to ensure that all 2218 * asynchrnous writes have been completed. 2219 * 2220 * A value of zero will be returned on success, a negative errno will 2221 * be returned in error cases. 2222 */ 2223 int regmap_raw_write_async(struct regmap *map, unsigned int reg, 2224 const void *val, size_t val_len) 2225 { 2226 int ret; 2227 2228 if (val_len % map->format.val_bytes) 2229 return -EINVAL; 2230 if (!IS_ALIGNED(reg, map->reg_stride)) 2231 return -EINVAL; 2232 2233 map->lock(map->lock_arg); 2234 2235 map->async = true; 2236 2237 ret = _regmap_raw_write(map, reg, val, val_len); 2238 2239 map->async = false; 2240 2241 map->unlock(map->lock_arg); 2242 2243 return ret; 2244 } 2245 EXPORT_SYMBOL_GPL(regmap_raw_write_async); 2246 2247 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2248 unsigned int val_len) 2249 { 2250 struct regmap_range_node *range; 2251 u8 *u8 = map->work_buf; 2252 int ret; 2253 2254 WARN_ON(!map->bus); 2255 2256 range = _regmap_range_lookup(map, reg); 2257 if (range) { 2258 ret = _regmap_select_page(map, ®, range, 2259 val_len / map->format.val_bytes); 2260 if (ret != 0) 2261 return ret; 2262 } 2263 2264 map->format.format_reg(map->work_buf, reg, map->reg_shift); 2265 2266 /* 2267 * Some buses or devices flag reads by setting the high bits in the 2268 * register address; since it's always the high bits for all 2269 * current formats we can do this here rather than in 2270 * formatting. This may break if we get interesting formats. 2271 */ 2272 u8[0] |= map->read_flag_mask; 2273 2274 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); 2275 2276 ret = map->bus->read(map->bus_context, map->work_buf, 2277 map->format.reg_bytes + map->format.pad_bytes, 2278 val, val_len); 2279 2280 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); 2281 2282 return ret; 2283 } 2284 2285 static int _regmap_bus_reg_read(void *context, unsigned int reg, 2286 unsigned int *val) 2287 { 2288 struct regmap *map = context; 2289 2290 return map->bus->reg_read(map->bus_context, reg, val); 2291 } 2292 2293 static int _regmap_bus_read(void *context, unsigned int reg, 2294 unsigned int *val) 2295 { 2296 int ret; 2297 struct regmap *map = context; 2298 2299 if (!map->format.parse_val) 2300 return -EINVAL; 2301 2302 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 2303 if (ret == 0) 2304 *val = map->format.parse_val(map->work_buf); 2305 2306 return ret; 2307 } 2308 2309 static int _regmap_read(struct regmap *map, unsigned int reg, 2310 unsigned int *val) 2311 { 2312 int ret; 2313 void *context = _regmap_map_get_context(map); 2314 2315 if (!map->cache_bypass) { 2316 ret = regcache_read(map, reg, val); 2317 if (ret == 0) 2318 return 0; 2319 } 2320 2321 if (map->cache_only) 2322 return -EBUSY; 2323 2324 if (!regmap_readable(map, reg)) 2325 return -EIO; 2326 2327 ret = map->reg_read(context, reg, val); 2328 if (ret == 0) { 2329 #ifdef LOG_DEVICE 2330 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 2331 dev_info(map->dev, "%x => %x\n", reg, *val); 2332 #endif 2333 2334 trace_regmap_reg_read(map, reg, *val); 2335 2336 if (!map->cache_bypass) 2337 regcache_write(map, reg, *val); 2338 } 2339 2340 return ret; 2341 } 2342 2343 /** 2344 * regmap_read(): Read a value from a single register 2345 * 2346 * @map: Register map to read from 2347 * @reg: Register to be read from 2348 * @val: Pointer to store read value 2349 * 2350 * A value of zero will be returned on success, a negative errno will 2351 * be returned in error cases. 2352 */ 2353 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 2354 { 2355 int ret; 2356 2357 if (!IS_ALIGNED(reg, map->reg_stride)) 2358 return -EINVAL; 2359 2360 map->lock(map->lock_arg); 2361 2362 ret = _regmap_read(map, reg, val); 2363 2364 map->unlock(map->lock_arg); 2365 2366 return ret; 2367 } 2368 EXPORT_SYMBOL_GPL(regmap_read); 2369 2370 /** 2371 * regmap_raw_read(): Read raw data from the device 2372 * 2373 * @map: Register map to read from 2374 * @reg: First register to be read from 2375 * @val: Pointer to store read value 2376 * @val_len: Size of data to read 2377 * 2378 * A value of zero will be returned on success, a negative errno will 2379 * be returned in error cases. 2380 */ 2381 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 2382 size_t val_len) 2383 { 2384 size_t val_bytes = map->format.val_bytes; 2385 size_t val_count = val_len / val_bytes; 2386 unsigned int v; 2387 int ret, i; 2388 2389 if (!map->bus) 2390 return -EINVAL; 2391 if (val_len % map->format.val_bytes) 2392 return -EINVAL; 2393 if (!IS_ALIGNED(reg, map->reg_stride)) 2394 return -EINVAL; 2395 if (val_count == 0) 2396 return -EINVAL; 2397 2398 map->lock(map->lock_arg); 2399 2400 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 2401 map->cache_type == REGCACHE_NONE) { 2402 if (!map->bus->read) { 2403 ret = -ENOTSUPP; 2404 goto out; 2405 } 2406 if (map->max_raw_read && map->max_raw_read < val_len) { 2407 ret = -E2BIG; 2408 goto out; 2409 } 2410 2411 /* Physical block read if there's no cache involved */ 2412 ret = _regmap_raw_read(map, reg, val, val_len); 2413 2414 } else { 2415 /* Otherwise go word by word for the cache; should be low 2416 * cost as we expect to hit the cache. 2417 */ 2418 for (i = 0; i < val_count; i++) { 2419 ret = _regmap_read(map, reg + (i * map->reg_stride), 2420 &v); 2421 if (ret != 0) 2422 goto out; 2423 2424 map->format.format_val(val + (i * val_bytes), v, 0); 2425 } 2426 } 2427 2428 out: 2429 map->unlock(map->lock_arg); 2430 2431 return ret; 2432 } 2433 EXPORT_SYMBOL_GPL(regmap_raw_read); 2434 2435 /** 2436 * regmap_field_read(): Read a value to a single register field 2437 * 2438 * @field: Register field to read from 2439 * @val: Pointer to store read value 2440 * 2441 * A value of zero will be returned on success, a negative errno will 2442 * be returned in error cases. 2443 */ 2444 int regmap_field_read(struct regmap_field *field, unsigned int *val) 2445 { 2446 int ret; 2447 unsigned int reg_val; 2448 ret = regmap_read(field->regmap, field->reg, ®_val); 2449 if (ret != 0) 2450 return ret; 2451 2452 reg_val &= field->mask; 2453 reg_val >>= field->shift; 2454 *val = reg_val; 2455 2456 return ret; 2457 } 2458 EXPORT_SYMBOL_GPL(regmap_field_read); 2459 2460 /** 2461 * regmap_fields_read(): Read a value to a single register field with port ID 2462 * 2463 * @field: Register field to read from 2464 * @id: port ID 2465 * @val: Pointer to store read value 2466 * 2467 * A value of zero will be returned on success, a negative errno will 2468 * be returned in error cases. 2469 */ 2470 int regmap_fields_read(struct regmap_field *field, unsigned int id, 2471 unsigned int *val) 2472 { 2473 int ret; 2474 unsigned int reg_val; 2475 2476 if (id >= field->id_size) 2477 return -EINVAL; 2478 2479 ret = regmap_read(field->regmap, 2480 field->reg + (field->id_offset * id), 2481 ®_val); 2482 if (ret != 0) 2483 return ret; 2484 2485 reg_val &= field->mask; 2486 reg_val >>= field->shift; 2487 *val = reg_val; 2488 2489 return ret; 2490 } 2491 EXPORT_SYMBOL_GPL(regmap_fields_read); 2492 2493 /** 2494 * regmap_bulk_read(): Read multiple registers from the device 2495 * 2496 * @map: Register map to read from 2497 * @reg: First register to be read from 2498 * @val: Pointer to store read value, in native register size for device 2499 * @val_count: Number of registers to read 2500 * 2501 * A value of zero will be returned on success, a negative errno will 2502 * be returned in error cases. 2503 */ 2504 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 2505 size_t val_count) 2506 { 2507 int ret, i; 2508 size_t val_bytes = map->format.val_bytes; 2509 bool vol = regmap_volatile_range(map, reg, val_count); 2510 2511 if (!IS_ALIGNED(reg, map->reg_stride)) 2512 return -EINVAL; 2513 2514 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { 2515 /* 2516 * Some devices does not support bulk read, for 2517 * them we have a series of single read operations. 2518 */ 2519 size_t total_size = val_bytes * val_count; 2520 2521 if (!map->use_single_read && 2522 (!map->max_raw_read || map->max_raw_read > total_size)) { 2523 ret = regmap_raw_read(map, reg, val, 2524 val_bytes * val_count); 2525 if (ret != 0) 2526 return ret; 2527 } else { 2528 /* 2529 * Some devices do not support bulk read or do not 2530 * support large bulk reads, for them we have a series 2531 * of read operations. 2532 */ 2533 int chunk_stride = map->reg_stride; 2534 size_t chunk_size = val_bytes; 2535 size_t chunk_count = val_count; 2536 2537 if (!map->use_single_read) { 2538 chunk_size = map->max_raw_read; 2539 if (chunk_size % val_bytes) 2540 chunk_size -= chunk_size % val_bytes; 2541 chunk_count = total_size / chunk_size; 2542 chunk_stride *= chunk_size / val_bytes; 2543 } 2544 2545 /* Read bytes that fit into a multiple of chunk_size */ 2546 for (i = 0; i < chunk_count; i++) { 2547 ret = regmap_raw_read(map, 2548 reg + (i * chunk_stride), 2549 val + (i * chunk_size), 2550 chunk_size); 2551 if (ret != 0) 2552 return ret; 2553 } 2554 2555 /* Read remaining bytes */ 2556 if (chunk_size * i < total_size) { 2557 ret = regmap_raw_read(map, 2558 reg + (i * chunk_stride), 2559 val + (i * chunk_size), 2560 total_size - i * chunk_size); 2561 if (ret != 0) 2562 return ret; 2563 } 2564 } 2565 2566 for (i = 0; i < val_count * val_bytes; i += val_bytes) 2567 map->format.parse_inplace(val + i); 2568 } else { 2569 for (i = 0; i < val_count; i++) { 2570 unsigned int ival; 2571 ret = regmap_read(map, reg + (i * map->reg_stride), 2572 &ival); 2573 if (ret != 0) 2574 return ret; 2575 2576 if (map->format.format_val) { 2577 map->format.format_val(val + (i * val_bytes), ival, 0); 2578 } else { 2579 /* Devices providing read and write 2580 * operations can use the bulk I/O 2581 * functions if they define a val_bytes, 2582 * we assume that the values are native 2583 * endian. 2584 */ 2585 #ifdef CONFIG_64BIT 2586 u64 *u64 = val; 2587 #endif 2588 u32 *u32 = val; 2589 u16 *u16 = val; 2590 u8 *u8 = val; 2591 2592 switch (map->format.val_bytes) { 2593 #ifdef CONFIG_64BIT 2594 case 8: 2595 u64[i] = ival; 2596 break; 2597 #endif 2598 case 4: 2599 u32[i] = ival; 2600 break; 2601 case 2: 2602 u16[i] = ival; 2603 break; 2604 case 1: 2605 u8[i] = ival; 2606 break; 2607 default: 2608 return -EINVAL; 2609 } 2610 } 2611 } 2612 } 2613 2614 return 0; 2615 } 2616 EXPORT_SYMBOL_GPL(regmap_bulk_read); 2617 2618 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2619 unsigned int mask, unsigned int val, 2620 bool *change, bool force_write) 2621 { 2622 int ret; 2623 unsigned int tmp, orig; 2624 2625 if (change) 2626 *change = false; 2627 2628 if (regmap_volatile(map, reg) && map->reg_update_bits) { 2629 ret = map->reg_update_bits(map->bus_context, reg, mask, val); 2630 if (ret == 0 && change) 2631 *change = true; 2632 } else { 2633 ret = _regmap_read(map, reg, &orig); 2634 if (ret != 0) 2635 return ret; 2636 2637 tmp = orig & ~mask; 2638 tmp |= val & mask; 2639 2640 if (force_write || (tmp != orig)) { 2641 ret = _regmap_write(map, reg, tmp); 2642 if (ret == 0 && change) 2643 *change = true; 2644 } 2645 } 2646 2647 return ret; 2648 } 2649 2650 /** 2651 * regmap_update_bits: Perform a read/modify/write cycle on the register map 2652 * 2653 * @map: Register map to update 2654 * @reg: Register to update 2655 * @mask: Bitmask to change 2656 * @val: New value for bitmask 2657 * 2658 * Returns zero for success, a negative number on error. 2659 */ 2660 int regmap_update_bits(struct regmap *map, unsigned int reg, 2661 unsigned int mask, unsigned int val) 2662 { 2663 int ret; 2664 2665 map->lock(map->lock_arg); 2666 ret = _regmap_update_bits(map, reg, mask, val, NULL, false); 2667 map->unlock(map->lock_arg); 2668 2669 return ret; 2670 } 2671 EXPORT_SYMBOL_GPL(regmap_update_bits); 2672 2673 /** 2674 * regmap_write_bits: Perform a read/modify/write cycle on the register map 2675 * 2676 * @map: Register map to update 2677 * @reg: Register to update 2678 * @mask: Bitmask to change 2679 * @val: New value for bitmask 2680 * 2681 * Returns zero for success, a negative number on error. 2682 */ 2683 int regmap_write_bits(struct regmap *map, unsigned int reg, 2684 unsigned int mask, unsigned int val) 2685 { 2686 int ret; 2687 2688 map->lock(map->lock_arg); 2689 ret = _regmap_update_bits(map, reg, mask, val, NULL, true); 2690 map->unlock(map->lock_arg); 2691 2692 return ret; 2693 } 2694 EXPORT_SYMBOL_GPL(regmap_write_bits); 2695 2696 /** 2697 * regmap_update_bits_async: Perform a read/modify/write cycle on the register 2698 * map asynchronously 2699 * 2700 * @map: Register map to update 2701 * @reg: Register to update 2702 * @mask: Bitmask to change 2703 * @val: New value for bitmask 2704 * 2705 * With most buses the read must be done synchronously so this is most 2706 * useful for devices with a cache which do not need to interact with 2707 * the hardware to determine the current register value. 2708 * 2709 * Returns zero for success, a negative number on error. 2710 */ 2711 int regmap_update_bits_async(struct regmap *map, unsigned int reg, 2712 unsigned int mask, unsigned int val) 2713 { 2714 int ret; 2715 2716 map->lock(map->lock_arg); 2717 2718 map->async = true; 2719 2720 ret = _regmap_update_bits(map, reg, mask, val, NULL, false); 2721 2722 map->async = false; 2723 2724 map->unlock(map->lock_arg); 2725 2726 return ret; 2727 } 2728 EXPORT_SYMBOL_GPL(regmap_update_bits_async); 2729 2730 /** 2731 * regmap_update_bits_check: Perform a read/modify/write cycle on the 2732 * register map and report if updated 2733 * 2734 * @map: Register map to update 2735 * @reg: Register to update 2736 * @mask: Bitmask to change 2737 * @val: New value for bitmask 2738 * @change: Boolean indicating if a write was done 2739 * 2740 * Returns zero for success, a negative number on error. 2741 */ 2742 int regmap_update_bits_check(struct regmap *map, unsigned int reg, 2743 unsigned int mask, unsigned int val, 2744 bool *change) 2745 { 2746 int ret; 2747 2748 map->lock(map->lock_arg); 2749 ret = _regmap_update_bits(map, reg, mask, val, change, false); 2750 map->unlock(map->lock_arg); 2751 return ret; 2752 } 2753 EXPORT_SYMBOL_GPL(regmap_update_bits_check); 2754 2755 /** 2756 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the 2757 * register map asynchronously and report if 2758 * updated 2759 * 2760 * @map: Register map to update 2761 * @reg: Register to update 2762 * @mask: Bitmask to change 2763 * @val: New value for bitmask 2764 * @change: Boolean indicating if a write was done 2765 * 2766 * With most buses the read must be done synchronously so this is most 2767 * useful for devices with a cache which do not need to interact with 2768 * the hardware to determine the current register value. 2769 * 2770 * Returns zero for success, a negative number on error. 2771 */ 2772 int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, 2773 unsigned int mask, unsigned int val, 2774 bool *change) 2775 { 2776 int ret; 2777 2778 map->lock(map->lock_arg); 2779 2780 map->async = true; 2781 2782 ret = _regmap_update_bits(map, reg, mask, val, change, false); 2783 2784 map->async = false; 2785 2786 map->unlock(map->lock_arg); 2787 2788 return ret; 2789 } 2790 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async); 2791 2792 void regmap_async_complete_cb(struct regmap_async *async, int ret) 2793 { 2794 struct regmap *map = async->map; 2795 bool wake; 2796 2797 trace_regmap_async_io_complete(map); 2798 2799 spin_lock(&map->async_lock); 2800 list_move(&async->list, &map->async_free); 2801 wake = list_empty(&map->async_list); 2802 2803 if (ret != 0) 2804 map->async_ret = ret; 2805 2806 spin_unlock(&map->async_lock); 2807 2808 if (wake) 2809 wake_up(&map->async_waitq); 2810 } 2811 EXPORT_SYMBOL_GPL(regmap_async_complete_cb); 2812 2813 static int regmap_async_is_done(struct regmap *map) 2814 { 2815 unsigned long flags; 2816 int ret; 2817 2818 spin_lock_irqsave(&map->async_lock, flags); 2819 ret = list_empty(&map->async_list); 2820 spin_unlock_irqrestore(&map->async_lock, flags); 2821 2822 return ret; 2823 } 2824 2825 /** 2826 * regmap_async_complete: Ensure all asynchronous I/O has completed. 2827 * 2828 * @map: Map to operate on. 2829 * 2830 * Blocks until any pending asynchronous I/O has completed. Returns 2831 * an error code for any failed I/O operations. 2832 */ 2833 int regmap_async_complete(struct regmap *map) 2834 { 2835 unsigned long flags; 2836 int ret; 2837 2838 /* Nothing to do with no async support */ 2839 if (!map->bus || !map->bus->async_write) 2840 return 0; 2841 2842 trace_regmap_async_complete_start(map); 2843 2844 wait_event(map->async_waitq, regmap_async_is_done(map)); 2845 2846 spin_lock_irqsave(&map->async_lock, flags); 2847 ret = map->async_ret; 2848 map->async_ret = 0; 2849 spin_unlock_irqrestore(&map->async_lock, flags); 2850 2851 trace_regmap_async_complete_done(map); 2852 2853 return ret; 2854 } 2855 EXPORT_SYMBOL_GPL(regmap_async_complete); 2856 2857 /** 2858 * regmap_register_patch: Register and apply register updates to be applied 2859 * on device initialistion 2860 * 2861 * @map: Register map to apply updates to. 2862 * @regs: Values to update. 2863 * @num_regs: Number of entries in regs. 2864 * 2865 * Register a set of register updates to be applied to the device 2866 * whenever the device registers are synchronised with the cache and 2867 * apply them immediately. Typically this is used to apply 2868 * corrections to be applied to the device defaults on startup, such 2869 * as the updates some vendors provide to undocumented registers. 2870 * 2871 * The caller must ensure that this function cannot be called 2872 * concurrently with either itself or regcache_sync(). 2873 */ 2874 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, 2875 int num_regs) 2876 { 2877 struct reg_sequence *p; 2878 int ret; 2879 bool bypass; 2880 2881 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", 2882 num_regs)) 2883 return 0; 2884 2885 p = krealloc(map->patch, 2886 sizeof(struct reg_sequence) * (map->patch_regs + num_regs), 2887 GFP_KERNEL); 2888 if (p) { 2889 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2890 map->patch = p; 2891 map->patch_regs += num_regs; 2892 } else { 2893 return -ENOMEM; 2894 } 2895 2896 map->lock(map->lock_arg); 2897 2898 bypass = map->cache_bypass; 2899 2900 map->cache_bypass = true; 2901 map->async = true; 2902 2903 ret = _regmap_multi_reg_write(map, regs, num_regs); 2904 2905 map->async = false; 2906 map->cache_bypass = bypass; 2907 2908 map->unlock(map->lock_arg); 2909 2910 regmap_async_complete(map); 2911 2912 return ret; 2913 } 2914 EXPORT_SYMBOL_GPL(regmap_register_patch); 2915 2916 /* 2917 * regmap_get_val_bytes(): Report the size of a register value 2918 * 2919 * Report the size of a register value, mainly intended to for use by 2920 * generic infrastructure built on top of regmap. 2921 */ 2922 int regmap_get_val_bytes(struct regmap *map) 2923 { 2924 if (map->format.format_write) 2925 return -EINVAL; 2926 2927 return map->format.val_bytes; 2928 } 2929 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 2930 2931 /** 2932 * regmap_get_max_register(): Report the max register value 2933 * 2934 * Report the max register value, mainly intended to for use by 2935 * generic infrastructure built on top of regmap. 2936 */ 2937 int regmap_get_max_register(struct regmap *map) 2938 { 2939 return map->max_register ? map->max_register : -EINVAL; 2940 } 2941 EXPORT_SYMBOL_GPL(regmap_get_max_register); 2942 2943 /** 2944 * regmap_get_reg_stride(): Report the register address stride 2945 * 2946 * Report the register address stride, mainly intended to for use by 2947 * generic infrastructure built on top of regmap. 2948 */ 2949 int regmap_get_reg_stride(struct regmap *map) 2950 { 2951 return map->reg_stride; 2952 } 2953 EXPORT_SYMBOL_GPL(regmap_get_reg_stride); 2954 2955 int regmap_parse_val(struct regmap *map, const void *buf, 2956 unsigned int *val) 2957 { 2958 if (!map->format.parse_val) 2959 return -EINVAL; 2960 2961 *val = map->format.parse_val(buf); 2962 2963 return 0; 2964 } 2965 EXPORT_SYMBOL_GPL(regmap_parse_val); 2966 2967 static int __init regmap_initcall(void) 2968 { 2969 regmap_debugfs_initcall(); 2970 2971 return 0; 2972 } 2973 postcore_initcall(regmap_initcall); 2974