1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 struct nvmem_device { 23 struct module *owner; 24 struct device dev; 25 int stride; 26 int word_size; 27 int id; 28 struct kref refcnt; 29 size_t size; 30 bool read_only; 31 bool root_only; 32 int flags; 33 enum nvmem_type type; 34 struct bin_attribute eeprom; 35 struct device *base_dev; 36 struct list_head cells; 37 const struct nvmem_keepout *keepout; 38 unsigned int nkeepout; 39 nvmem_reg_read_t reg_read; 40 nvmem_reg_write_t reg_write; 41 struct gpio_desc *wp_gpio; 42 void *priv; 43 }; 44 45 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct device_node *np; 56 struct nvmem_device *nvmem; 57 struct list_head node; 58 }; 59 60 static DEFINE_MUTEX(nvmem_mutex); 61 static DEFINE_IDA(nvmem_ida); 62 63 static DEFINE_MUTEX(nvmem_cell_mutex); 64 static LIST_HEAD(nvmem_cell_tables); 65 66 static DEFINE_MUTEX(nvmem_lookup_mutex); 67 static LIST_HEAD(nvmem_lookup_list); 68 69 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 70 71 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 72 void *val, size_t bytes) 73 { 74 if (nvmem->reg_read) 75 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 76 77 return -EINVAL; 78 } 79 80 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 81 void *val, size_t bytes) 82 { 83 int ret; 84 85 if (nvmem->reg_write) { 86 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 87 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 88 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 89 return ret; 90 } 91 92 return -EINVAL; 93 } 94 95 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 96 unsigned int offset, void *val, 97 size_t bytes, int write) 98 { 99 100 unsigned int end = offset + bytes; 101 unsigned int kend, ksize; 102 const struct nvmem_keepout *keepout = nvmem->keepout; 103 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 104 int rc; 105 106 /* 107 * Skip all keepouts before the range being accessed. 108 * Keepouts are sorted. 109 */ 110 while ((keepout < keepoutend) && (keepout->end <= offset)) 111 keepout++; 112 113 while ((offset < end) && (keepout < keepoutend)) { 114 /* Access the valid portion before the keepout. */ 115 if (offset < keepout->start) { 116 kend = min(end, keepout->start); 117 ksize = kend - offset; 118 if (write) 119 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 120 else 121 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 122 123 if (rc) 124 return rc; 125 126 offset += ksize; 127 val += ksize; 128 } 129 130 /* 131 * Now we're aligned to the start of this keepout zone. Go 132 * through it. 133 */ 134 kend = min(end, keepout->end); 135 ksize = kend - offset; 136 if (!write) 137 memset(val, keepout->value, ksize); 138 139 val += ksize; 140 offset += ksize; 141 keepout++; 142 } 143 144 /* 145 * If we ran out of keepouts but there's still stuff to do, send it 146 * down directly 147 */ 148 if (offset < end) { 149 ksize = end - offset; 150 if (write) 151 return __nvmem_reg_write(nvmem, offset, val, ksize); 152 else 153 return __nvmem_reg_read(nvmem, offset, val, ksize); 154 } 155 156 return 0; 157 } 158 159 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 160 void *val, size_t bytes) 161 { 162 if (!nvmem->nkeepout) 163 return __nvmem_reg_read(nvmem, offset, val, bytes); 164 165 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 166 } 167 168 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 169 void *val, size_t bytes) 170 { 171 if (!nvmem->nkeepout) 172 return __nvmem_reg_write(nvmem, offset, val, bytes); 173 174 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 175 } 176 177 #ifdef CONFIG_NVMEM_SYSFS 178 static const char * const nvmem_type_str[] = { 179 [NVMEM_TYPE_UNKNOWN] = "Unknown", 180 [NVMEM_TYPE_EEPROM] = "EEPROM", 181 [NVMEM_TYPE_OTP] = "OTP", 182 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 183 [NVMEM_TYPE_FRAM] = "FRAM", 184 }; 185 186 #ifdef CONFIG_DEBUG_LOCK_ALLOC 187 static struct lock_class_key eeprom_lock_key; 188 #endif 189 190 static ssize_t type_show(struct device *dev, 191 struct device_attribute *attr, char *buf) 192 { 193 struct nvmem_device *nvmem = to_nvmem_device(dev); 194 195 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 196 } 197 198 static DEVICE_ATTR_RO(type); 199 200 static struct attribute *nvmem_attrs[] = { 201 &dev_attr_type.attr, 202 NULL, 203 }; 204 205 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 206 struct bin_attribute *attr, char *buf, 207 loff_t pos, size_t count) 208 { 209 struct device *dev; 210 struct nvmem_device *nvmem; 211 int rc; 212 213 if (attr->private) 214 dev = attr->private; 215 else 216 dev = kobj_to_dev(kobj); 217 nvmem = to_nvmem_device(dev); 218 219 /* Stop the user from reading */ 220 if (pos >= nvmem->size) 221 return 0; 222 223 if (!IS_ALIGNED(pos, nvmem->stride)) 224 return -EINVAL; 225 226 if (count < nvmem->word_size) 227 return -EINVAL; 228 229 if (pos + count > nvmem->size) 230 count = nvmem->size - pos; 231 232 count = round_down(count, nvmem->word_size); 233 234 if (!nvmem->reg_read) 235 return -EPERM; 236 237 rc = nvmem_reg_read(nvmem, pos, buf, count); 238 239 if (rc) 240 return rc; 241 242 return count; 243 } 244 245 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 246 struct bin_attribute *attr, char *buf, 247 loff_t pos, size_t count) 248 { 249 struct device *dev; 250 struct nvmem_device *nvmem; 251 int rc; 252 253 if (attr->private) 254 dev = attr->private; 255 else 256 dev = kobj_to_dev(kobj); 257 nvmem = to_nvmem_device(dev); 258 259 /* Stop the user from writing */ 260 if (pos >= nvmem->size) 261 return -EFBIG; 262 263 if (!IS_ALIGNED(pos, nvmem->stride)) 264 return -EINVAL; 265 266 if (count < nvmem->word_size) 267 return -EINVAL; 268 269 if (pos + count > nvmem->size) 270 count = nvmem->size - pos; 271 272 count = round_down(count, nvmem->word_size); 273 274 if (!nvmem->reg_write) 275 return -EPERM; 276 277 rc = nvmem_reg_write(nvmem, pos, buf, count); 278 279 if (rc) 280 return rc; 281 282 return count; 283 } 284 285 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 286 { 287 umode_t mode = 0400; 288 289 if (!nvmem->root_only) 290 mode |= 0044; 291 292 if (!nvmem->read_only) 293 mode |= 0200; 294 295 if (!nvmem->reg_write) 296 mode &= ~0200; 297 298 if (!nvmem->reg_read) 299 mode &= ~0444; 300 301 return mode; 302 } 303 304 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 305 struct bin_attribute *attr, int i) 306 { 307 struct device *dev = kobj_to_dev(kobj); 308 struct nvmem_device *nvmem = to_nvmem_device(dev); 309 310 return nvmem_bin_attr_get_umode(nvmem); 311 } 312 313 /* default read/write permissions */ 314 static struct bin_attribute bin_attr_rw_nvmem = { 315 .attr = { 316 .name = "nvmem", 317 .mode = 0644, 318 }, 319 .read = bin_attr_nvmem_read, 320 .write = bin_attr_nvmem_write, 321 }; 322 323 static struct bin_attribute *nvmem_bin_attributes[] = { 324 &bin_attr_rw_nvmem, 325 NULL, 326 }; 327 328 static const struct attribute_group nvmem_bin_group = { 329 .bin_attrs = nvmem_bin_attributes, 330 .attrs = nvmem_attrs, 331 .is_bin_visible = nvmem_bin_attr_is_visible, 332 }; 333 334 static const struct attribute_group *nvmem_dev_groups[] = { 335 &nvmem_bin_group, 336 NULL, 337 }; 338 339 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 340 .attr = { 341 .name = "eeprom", 342 }, 343 .read = bin_attr_nvmem_read, 344 .write = bin_attr_nvmem_write, 345 }; 346 347 /* 348 * nvmem_setup_compat() - Create an additional binary entry in 349 * drivers sys directory, to be backwards compatible with the older 350 * drivers/misc/eeprom drivers. 351 */ 352 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 353 const struct nvmem_config *config) 354 { 355 int rval; 356 357 if (!config->compat) 358 return 0; 359 360 if (!config->base_dev) 361 return -EINVAL; 362 363 if (config->type == NVMEM_TYPE_FRAM) 364 bin_attr_nvmem_eeprom_compat.attr.name = "fram"; 365 366 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 367 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 368 nvmem->eeprom.size = nvmem->size; 369 #ifdef CONFIG_DEBUG_LOCK_ALLOC 370 nvmem->eeprom.attr.key = &eeprom_lock_key; 371 #endif 372 nvmem->eeprom.private = &nvmem->dev; 373 nvmem->base_dev = config->base_dev; 374 375 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 376 if (rval) { 377 dev_err(&nvmem->dev, 378 "Failed to create eeprom binary file %d\n", rval); 379 return rval; 380 } 381 382 nvmem->flags |= FLAG_COMPAT; 383 384 return 0; 385 } 386 387 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 388 const struct nvmem_config *config) 389 { 390 if (config->compat) 391 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 392 } 393 394 #else /* CONFIG_NVMEM_SYSFS */ 395 396 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 397 const struct nvmem_config *config) 398 { 399 return -ENOSYS; 400 } 401 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 402 const struct nvmem_config *config) 403 { 404 } 405 406 #endif /* CONFIG_NVMEM_SYSFS */ 407 408 static void nvmem_release(struct device *dev) 409 { 410 struct nvmem_device *nvmem = to_nvmem_device(dev); 411 412 ida_free(&nvmem_ida, nvmem->id); 413 gpiod_put(nvmem->wp_gpio); 414 kfree(nvmem); 415 } 416 417 static const struct device_type nvmem_provider_type = { 418 .release = nvmem_release, 419 }; 420 421 static struct bus_type nvmem_bus_type = { 422 .name = "nvmem", 423 }; 424 425 static void nvmem_cell_drop(struct nvmem_cell *cell) 426 { 427 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 428 mutex_lock(&nvmem_mutex); 429 list_del(&cell->node); 430 mutex_unlock(&nvmem_mutex); 431 of_node_put(cell->np); 432 kfree_const(cell->name); 433 kfree(cell); 434 } 435 436 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 437 { 438 struct nvmem_cell *cell, *p; 439 440 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 441 nvmem_cell_drop(cell); 442 } 443 444 static void nvmem_cell_add(struct nvmem_cell *cell) 445 { 446 mutex_lock(&nvmem_mutex); 447 list_add_tail(&cell->node, &cell->nvmem->cells); 448 mutex_unlock(&nvmem_mutex); 449 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 450 } 451 452 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, 453 const struct nvmem_cell_info *info, 454 struct nvmem_cell *cell) 455 { 456 cell->nvmem = nvmem; 457 cell->offset = info->offset; 458 cell->bytes = info->bytes; 459 cell->name = info->name; 460 461 cell->bit_offset = info->bit_offset; 462 cell->nbits = info->nbits; 463 464 if (cell->nbits) 465 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 466 BITS_PER_BYTE); 467 468 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 469 dev_err(&nvmem->dev, 470 "cell %s unaligned to nvmem stride %d\n", 471 cell->name ?: "<unknown>", nvmem->stride); 472 return -EINVAL; 473 } 474 475 return 0; 476 } 477 478 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 479 const struct nvmem_cell_info *info, 480 struct nvmem_cell *cell) 481 { 482 int err; 483 484 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell); 485 if (err) 486 return err; 487 488 cell->name = kstrdup_const(info->name, GFP_KERNEL); 489 if (!cell->name) 490 return -ENOMEM; 491 492 return 0; 493 } 494 495 /** 496 * nvmem_add_cells() - Add cell information to an nvmem device 497 * 498 * @nvmem: nvmem device to add cells to. 499 * @info: nvmem cell info to add to the device 500 * @ncells: number of cells in info 501 * 502 * Return: 0 or negative error code on failure. 503 */ 504 static int nvmem_add_cells(struct nvmem_device *nvmem, 505 const struct nvmem_cell_info *info, 506 int ncells) 507 { 508 struct nvmem_cell **cells; 509 int i, rval; 510 511 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 512 if (!cells) 513 return -ENOMEM; 514 515 for (i = 0; i < ncells; i++) { 516 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 517 if (!cells[i]) { 518 rval = -ENOMEM; 519 goto err; 520 } 521 522 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 523 if (rval) { 524 kfree(cells[i]); 525 goto err; 526 } 527 528 nvmem_cell_add(cells[i]); 529 } 530 531 /* remove tmp array */ 532 kfree(cells); 533 534 return 0; 535 err: 536 while (i--) 537 nvmem_cell_drop(cells[i]); 538 539 kfree(cells); 540 541 return rval; 542 } 543 544 /** 545 * nvmem_register_notifier() - Register a notifier block for nvmem events. 546 * 547 * @nb: notifier block to be called on nvmem events. 548 * 549 * Return: 0 on success, negative error number on failure. 550 */ 551 int nvmem_register_notifier(struct notifier_block *nb) 552 { 553 return blocking_notifier_chain_register(&nvmem_notifier, nb); 554 } 555 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 556 557 /** 558 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 559 * 560 * @nb: notifier block to be unregistered. 561 * 562 * Return: 0 on success, negative error number on failure. 563 */ 564 int nvmem_unregister_notifier(struct notifier_block *nb) 565 { 566 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 567 } 568 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 569 570 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 571 { 572 const struct nvmem_cell_info *info; 573 struct nvmem_cell_table *table; 574 struct nvmem_cell *cell; 575 int rval = 0, i; 576 577 mutex_lock(&nvmem_cell_mutex); 578 list_for_each_entry(table, &nvmem_cell_tables, node) { 579 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 580 for (i = 0; i < table->ncells; i++) { 581 info = &table->cells[i]; 582 583 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 584 if (!cell) { 585 rval = -ENOMEM; 586 goto out; 587 } 588 589 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 590 info, 591 cell); 592 if (rval) { 593 kfree(cell); 594 goto out; 595 } 596 597 nvmem_cell_add(cell); 598 } 599 } 600 } 601 602 out: 603 mutex_unlock(&nvmem_cell_mutex); 604 return rval; 605 } 606 607 static struct nvmem_cell * 608 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 609 { 610 struct nvmem_cell *iter, *cell = NULL; 611 612 mutex_lock(&nvmem_mutex); 613 list_for_each_entry(iter, &nvmem->cells, node) { 614 if (strcmp(cell_id, iter->name) == 0) { 615 cell = iter; 616 break; 617 } 618 } 619 mutex_unlock(&nvmem_mutex); 620 621 return cell; 622 } 623 624 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 625 { 626 unsigned int cur = 0; 627 const struct nvmem_keepout *keepout = nvmem->keepout; 628 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 629 630 while (keepout < keepoutend) { 631 /* Ensure keepouts are sorted and don't overlap. */ 632 if (keepout->start < cur) { 633 dev_err(&nvmem->dev, 634 "Keepout regions aren't sorted or overlap.\n"); 635 636 return -ERANGE; 637 } 638 639 if (keepout->end < keepout->start) { 640 dev_err(&nvmem->dev, 641 "Invalid keepout region.\n"); 642 643 return -EINVAL; 644 } 645 646 /* 647 * Validate keepouts (and holes between) don't violate 648 * word_size constraints. 649 */ 650 if ((keepout->end - keepout->start < nvmem->word_size) || 651 ((keepout->start != cur) && 652 (keepout->start - cur < nvmem->word_size))) { 653 654 dev_err(&nvmem->dev, 655 "Keepout regions violate word_size constraints.\n"); 656 657 return -ERANGE; 658 } 659 660 /* Validate keepouts don't violate stride (alignment). */ 661 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 662 !IS_ALIGNED(keepout->end, nvmem->stride)) { 663 664 dev_err(&nvmem->dev, 665 "Keepout regions violate stride.\n"); 666 667 return -EINVAL; 668 } 669 670 cur = keepout->end; 671 keepout++; 672 } 673 674 return 0; 675 } 676 677 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 678 { 679 struct device_node *parent, *child; 680 struct device *dev = &nvmem->dev; 681 struct nvmem_cell *cell; 682 const __be32 *addr; 683 int len; 684 685 parent = dev->of_node; 686 687 for_each_child_of_node(parent, child) { 688 addr = of_get_property(child, "reg", &len); 689 if (!addr) 690 continue; 691 if (len < 2 * sizeof(u32)) { 692 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 693 of_node_put(child); 694 return -EINVAL; 695 } 696 697 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 698 if (!cell) { 699 of_node_put(child); 700 return -ENOMEM; 701 } 702 703 cell->nvmem = nvmem; 704 cell->offset = be32_to_cpup(addr++); 705 cell->bytes = be32_to_cpup(addr); 706 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 707 708 addr = of_get_property(child, "bits", &len); 709 if (addr && len == (2 * sizeof(u32))) { 710 cell->bit_offset = be32_to_cpup(addr++); 711 cell->nbits = be32_to_cpup(addr); 712 } 713 714 if (cell->nbits) 715 cell->bytes = DIV_ROUND_UP( 716 cell->nbits + cell->bit_offset, 717 BITS_PER_BYTE); 718 719 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 720 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 721 cell->name, nvmem->stride); 722 /* Cells already added will be freed later. */ 723 kfree_const(cell->name); 724 kfree(cell); 725 of_node_put(child); 726 return -EINVAL; 727 } 728 729 cell->np = of_node_get(child); 730 nvmem_cell_add(cell); 731 } 732 733 return 0; 734 } 735 736 /** 737 * nvmem_register() - Register a nvmem device for given nvmem_config. 738 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 739 * 740 * @config: nvmem device configuration with which nvmem device is created. 741 * 742 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 743 * on success. 744 */ 745 746 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 747 { 748 struct nvmem_device *nvmem; 749 int rval; 750 751 if (!config->dev) 752 return ERR_PTR(-EINVAL); 753 754 if (!config->reg_read && !config->reg_write) 755 return ERR_PTR(-EINVAL); 756 757 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 758 if (!nvmem) 759 return ERR_PTR(-ENOMEM); 760 761 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 762 if (rval < 0) { 763 kfree(nvmem); 764 return ERR_PTR(rval); 765 } 766 767 if (config->wp_gpio) 768 nvmem->wp_gpio = config->wp_gpio; 769 else 770 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 771 GPIOD_OUT_HIGH); 772 if (IS_ERR(nvmem->wp_gpio)) { 773 ida_free(&nvmem_ida, nvmem->id); 774 rval = PTR_ERR(nvmem->wp_gpio); 775 kfree(nvmem); 776 return ERR_PTR(rval); 777 } 778 779 kref_init(&nvmem->refcnt); 780 INIT_LIST_HEAD(&nvmem->cells); 781 782 nvmem->id = rval; 783 nvmem->owner = config->owner; 784 if (!nvmem->owner && config->dev->driver) 785 nvmem->owner = config->dev->driver->owner; 786 nvmem->stride = config->stride ?: 1; 787 nvmem->word_size = config->word_size ?: 1; 788 nvmem->size = config->size; 789 nvmem->dev.type = &nvmem_provider_type; 790 nvmem->dev.bus = &nvmem_bus_type; 791 nvmem->dev.parent = config->dev; 792 nvmem->root_only = config->root_only; 793 nvmem->priv = config->priv; 794 nvmem->type = config->type; 795 nvmem->reg_read = config->reg_read; 796 nvmem->reg_write = config->reg_write; 797 nvmem->keepout = config->keepout; 798 nvmem->nkeepout = config->nkeepout; 799 if (config->of_node) 800 nvmem->dev.of_node = config->of_node; 801 else if (!config->no_of_node) 802 nvmem->dev.of_node = config->dev->of_node; 803 804 switch (config->id) { 805 case NVMEM_DEVID_NONE: 806 dev_set_name(&nvmem->dev, "%s", config->name); 807 break; 808 case NVMEM_DEVID_AUTO: 809 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 810 break; 811 default: 812 dev_set_name(&nvmem->dev, "%s%d", 813 config->name ? : "nvmem", 814 config->name ? config->id : nvmem->id); 815 break; 816 } 817 818 nvmem->read_only = device_property_present(config->dev, "read-only") || 819 config->read_only || !nvmem->reg_write; 820 821 #ifdef CONFIG_NVMEM_SYSFS 822 nvmem->dev.groups = nvmem_dev_groups; 823 #endif 824 825 if (nvmem->nkeepout) { 826 rval = nvmem_validate_keepouts(nvmem); 827 if (rval) 828 goto err_put_device; 829 } 830 831 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 832 833 rval = device_register(&nvmem->dev); 834 if (rval) 835 goto err_put_device; 836 837 if (config->compat) { 838 rval = nvmem_sysfs_setup_compat(nvmem, config); 839 if (rval) 840 goto err_device_del; 841 } 842 843 if (config->cells) { 844 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 845 if (rval) 846 goto err_teardown_compat; 847 } 848 849 rval = nvmem_add_cells_from_table(nvmem); 850 if (rval) 851 goto err_remove_cells; 852 853 rval = nvmem_add_cells_from_of(nvmem); 854 if (rval) 855 goto err_remove_cells; 856 857 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 858 859 return nvmem; 860 861 err_remove_cells: 862 nvmem_device_remove_all_cells(nvmem); 863 err_teardown_compat: 864 if (config->compat) 865 nvmem_sysfs_remove_compat(nvmem, config); 866 err_device_del: 867 device_del(&nvmem->dev); 868 err_put_device: 869 put_device(&nvmem->dev); 870 871 return ERR_PTR(rval); 872 } 873 EXPORT_SYMBOL_GPL(nvmem_register); 874 875 static void nvmem_device_release(struct kref *kref) 876 { 877 struct nvmem_device *nvmem; 878 879 nvmem = container_of(kref, struct nvmem_device, refcnt); 880 881 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 882 883 if (nvmem->flags & FLAG_COMPAT) 884 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 885 886 nvmem_device_remove_all_cells(nvmem); 887 device_unregister(&nvmem->dev); 888 } 889 890 /** 891 * nvmem_unregister() - Unregister previously registered nvmem device 892 * 893 * @nvmem: Pointer to previously registered nvmem device. 894 */ 895 void nvmem_unregister(struct nvmem_device *nvmem) 896 { 897 kref_put(&nvmem->refcnt, nvmem_device_release); 898 } 899 EXPORT_SYMBOL_GPL(nvmem_unregister); 900 901 static void devm_nvmem_release(struct device *dev, void *res) 902 { 903 nvmem_unregister(*(struct nvmem_device **)res); 904 } 905 906 /** 907 * devm_nvmem_register() - Register a managed nvmem device for given 908 * nvmem_config. 909 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 910 * 911 * @dev: Device that uses the nvmem device. 912 * @config: nvmem device configuration with which nvmem device is created. 913 * 914 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 915 * on success. 916 */ 917 struct nvmem_device *devm_nvmem_register(struct device *dev, 918 const struct nvmem_config *config) 919 { 920 struct nvmem_device **ptr, *nvmem; 921 922 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 923 if (!ptr) 924 return ERR_PTR(-ENOMEM); 925 926 nvmem = nvmem_register(config); 927 928 if (!IS_ERR(nvmem)) { 929 *ptr = nvmem; 930 devres_add(dev, ptr); 931 } else { 932 devres_free(ptr); 933 } 934 935 return nvmem; 936 } 937 EXPORT_SYMBOL_GPL(devm_nvmem_register); 938 939 static int devm_nvmem_match(struct device *dev, void *res, void *data) 940 { 941 struct nvmem_device **r = res; 942 943 return *r == data; 944 } 945 946 /** 947 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 948 * device. 949 * 950 * @dev: Device that uses the nvmem device. 951 * @nvmem: Pointer to previously registered nvmem device. 952 * 953 * Return: Will be negative on error or zero on success. 954 */ 955 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 956 { 957 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 958 } 959 EXPORT_SYMBOL(devm_nvmem_unregister); 960 961 static struct nvmem_device *__nvmem_device_get(void *data, 962 int (*match)(struct device *dev, const void *data)) 963 { 964 struct nvmem_device *nvmem = NULL; 965 struct device *dev; 966 967 mutex_lock(&nvmem_mutex); 968 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 969 if (dev) 970 nvmem = to_nvmem_device(dev); 971 mutex_unlock(&nvmem_mutex); 972 if (!nvmem) 973 return ERR_PTR(-EPROBE_DEFER); 974 975 if (!try_module_get(nvmem->owner)) { 976 dev_err(&nvmem->dev, 977 "could not increase module refcount for cell %s\n", 978 nvmem_dev_name(nvmem)); 979 980 put_device(&nvmem->dev); 981 return ERR_PTR(-EINVAL); 982 } 983 984 kref_get(&nvmem->refcnt); 985 986 return nvmem; 987 } 988 989 static void __nvmem_device_put(struct nvmem_device *nvmem) 990 { 991 put_device(&nvmem->dev); 992 module_put(nvmem->owner); 993 kref_put(&nvmem->refcnt, nvmem_device_release); 994 } 995 996 #if IS_ENABLED(CONFIG_OF) 997 /** 998 * of_nvmem_device_get() - Get nvmem device from a given id 999 * 1000 * @np: Device tree node that uses the nvmem device. 1001 * @id: nvmem name from nvmem-names property. 1002 * 1003 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1004 * on success. 1005 */ 1006 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1007 { 1008 1009 struct device_node *nvmem_np; 1010 struct nvmem_device *nvmem; 1011 int index = 0; 1012 1013 if (id) 1014 index = of_property_match_string(np, "nvmem-names", id); 1015 1016 nvmem_np = of_parse_phandle(np, "nvmem", index); 1017 if (!nvmem_np) 1018 return ERR_PTR(-ENOENT); 1019 1020 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1021 of_node_put(nvmem_np); 1022 return nvmem; 1023 } 1024 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1025 #endif 1026 1027 /** 1028 * nvmem_device_get() - Get nvmem device from a given id 1029 * 1030 * @dev: Device that uses the nvmem device. 1031 * @dev_name: name of the requested nvmem device. 1032 * 1033 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1034 * on success. 1035 */ 1036 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1037 { 1038 if (dev->of_node) { /* try dt first */ 1039 struct nvmem_device *nvmem; 1040 1041 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1042 1043 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1044 return nvmem; 1045 1046 } 1047 1048 return __nvmem_device_get((void *)dev_name, device_match_name); 1049 } 1050 EXPORT_SYMBOL_GPL(nvmem_device_get); 1051 1052 /** 1053 * nvmem_device_find() - Find nvmem device with matching function 1054 * 1055 * @data: Data to pass to match function 1056 * @match: Callback function to check device 1057 * 1058 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1059 * on success. 1060 */ 1061 struct nvmem_device *nvmem_device_find(void *data, 1062 int (*match)(struct device *dev, const void *data)) 1063 { 1064 return __nvmem_device_get(data, match); 1065 } 1066 EXPORT_SYMBOL_GPL(nvmem_device_find); 1067 1068 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1069 { 1070 struct nvmem_device **nvmem = res; 1071 1072 if (WARN_ON(!nvmem || !*nvmem)) 1073 return 0; 1074 1075 return *nvmem == data; 1076 } 1077 1078 static void devm_nvmem_device_release(struct device *dev, void *res) 1079 { 1080 nvmem_device_put(*(struct nvmem_device **)res); 1081 } 1082 1083 /** 1084 * devm_nvmem_device_put() - put alredy got nvmem device 1085 * 1086 * @dev: Device that uses the nvmem device. 1087 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1088 * that needs to be released. 1089 */ 1090 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1091 { 1092 int ret; 1093 1094 ret = devres_release(dev, devm_nvmem_device_release, 1095 devm_nvmem_device_match, nvmem); 1096 1097 WARN_ON(ret); 1098 } 1099 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1100 1101 /** 1102 * nvmem_device_put() - put alredy got nvmem device 1103 * 1104 * @nvmem: pointer to nvmem device that needs to be released. 1105 */ 1106 void nvmem_device_put(struct nvmem_device *nvmem) 1107 { 1108 __nvmem_device_put(nvmem); 1109 } 1110 EXPORT_SYMBOL_GPL(nvmem_device_put); 1111 1112 /** 1113 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1114 * 1115 * @dev: Device that requests the nvmem device. 1116 * @id: name id for the requested nvmem device. 1117 * 1118 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1119 * on success. The nvmem_cell will be freed by the automatically once the 1120 * device is freed. 1121 */ 1122 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1123 { 1124 struct nvmem_device **ptr, *nvmem; 1125 1126 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1127 if (!ptr) 1128 return ERR_PTR(-ENOMEM); 1129 1130 nvmem = nvmem_device_get(dev, id); 1131 if (!IS_ERR(nvmem)) { 1132 *ptr = nvmem; 1133 devres_add(dev, ptr); 1134 } else { 1135 devres_free(ptr); 1136 } 1137 1138 return nvmem; 1139 } 1140 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1141 1142 static struct nvmem_cell * 1143 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1144 { 1145 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1146 struct nvmem_cell_lookup *lookup; 1147 struct nvmem_device *nvmem; 1148 const char *dev_id; 1149 1150 if (!dev) 1151 return ERR_PTR(-EINVAL); 1152 1153 dev_id = dev_name(dev); 1154 1155 mutex_lock(&nvmem_lookup_mutex); 1156 1157 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1158 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1159 (strcmp(lookup->con_id, con_id) == 0)) { 1160 /* This is the right entry. */ 1161 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1162 device_match_name); 1163 if (IS_ERR(nvmem)) { 1164 /* Provider may not be registered yet. */ 1165 cell = ERR_CAST(nvmem); 1166 break; 1167 } 1168 1169 cell = nvmem_find_cell_by_name(nvmem, 1170 lookup->cell_name); 1171 if (!cell) { 1172 __nvmem_device_put(nvmem); 1173 cell = ERR_PTR(-ENOENT); 1174 } 1175 break; 1176 } 1177 } 1178 1179 mutex_unlock(&nvmem_lookup_mutex); 1180 return cell; 1181 } 1182 1183 #if IS_ENABLED(CONFIG_OF) 1184 static struct nvmem_cell * 1185 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 1186 { 1187 struct nvmem_cell *iter, *cell = NULL; 1188 1189 mutex_lock(&nvmem_mutex); 1190 list_for_each_entry(iter, &nvmem->cells, node) { 1191 if (np == iter->np) { 1192 cell = iter; 1193 break; 1194 } 1195 } 1196 mutex_unlock(&nvmem_mutex); 1197 1198 return cell; 1199 } 1200 1201 /** 1202 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1203 * 1204 * @np: Device tree node that uses the nvmem cell. 1205 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1206 * for the cell at index 0 (the lone cell with no accompanying 1207 * nvmem-cell-names property). 1208 * 1209 * Return: Will be an ERR_PTR() on error or a valid pointer 1210 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1211 * nvmem_cell_put(). 1212 */ 1213 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1214 { 1215 struct device_node *cell_np, *nvmem_np; 1216 struct nvmem_device *nvmem; 1217 struct nvmem_cell *cell; 1218 int index = 0; 1219 1220 /* if cell name exists, find index to the name */ 1221 if (id) 1222 index = of_property_match_string(np, "nvmem-cell-names", id); 1223 1224 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1225 if (!cell_np) 1226 return ERR_PTR(-ENOENT); 1227 1228 nvmem_np = of_get_next_parent(cell_np); 1229 if (!nvmem_np) 1230 return ERR_PTR(-EINVAL); 1231 1232 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1233 of_node_put(nvmem_np); 1234 if (IS_ERR(nvmem)) 1235 return ERR_CAST(nvmem); 1236 1237 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1238 if (!cell) { 1239 __nvmem_device_put(nvmem); 1240 return ERR_PTR(-ENOENT); 1241 } 1242 1243 return cell; 1244 } 1245 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1246 #endif 1247 1248 /** 1249 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1250 * 1251 * @dev: Device that requests the nvmem cell. 1252 * @id: nvmem cell name to get (this corresponds with the name from the 1253 * nvmem-cell-names property for DT systems and with the con_id from 1254 * the lookup entry for non-DT systems). 1255 * 1256 * Return: Will be an ERR_PTR() on error or a valid pointer 1257 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1258 * nvmem_cell_put(). 1259 */ 1260 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1261 { 1262 struct nvmem_cell *cell; 1263 1264 if (dev->of_node) { /* try dt first */ 1265 cell = of_nvmem_cell_get(dev->of_node, id); 1266 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1267 return cell; 1268 } 1269 1270 /* NULL cell id only allowed for device tree; invalid otherwise */ 1271 if (!id) 1272 return ERR_PTR(-EINVAL); 1273 1274 return nvmem_cell_get_from_lookup(dev, id); 1275 } 1276 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1277 1278 static void devm_nvmem_cell_release(struct device *dev, void *res) 1279 { 1280 nvmem_cell_put(*(struct nvmem_cell **)res); 1281 } 1282 1283 /** 1284 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1285 * 1286 * @dev: Device that requests the nvmem cell. 1287 * @id: nvmem cell name id to get. 1288 * 1289 * Return: Will be an ERR_PTR() on error or a valid pointer 1290 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1291 * automatically once the device is freed. 1292 */ 1293 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1294 { 1295 struct nvmem_cell **ptr, *cell; 1296 1297 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1298 if (!ptr) 1299 return ERR_PTR(-ENOMEM); 1300 1301 cell = nvmem_cell_get(dev, id); 1302 if (!IS_ERR(cell)) { 1303 *ptr = cell; 1304 devres_add(dev, ptr); 1305 } else { 1306 devres_free(ptr); 1307 } 1308 1309 return cell; 1310 } 1311 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1312 1313 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1314 { 1315 struct nvmem_cell **c = res; 1316 1317 if (WARN_ON(!c || !*c)) 1318 return 0; 1319 1320 return *c == data; 1321 } 1322 1323 /** 1324 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1325 * from devm_nvmem_cell_get. 1326 * 1327 * @dev: Device that requests the nvmem cell. 1328 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1329 */ 1330 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1331 { 1332 int ret; 1333 1334 ret = devres_release(dev, devm_nvmem_cell_release, 1335 devm_nvmem_cell_match, cell); 1336 1337 WARN_ON(ret); 1338 } 1339 EXPORT_SYMBOL(devm_nvmem_cell_put); 1340 1341 /** 1342 * nvmem_cell_put() - Release previously allocated nvmem cell. 1343 * 1344 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1345 */ 1346 void nvmem_cell_put(struct nvmem_cell *cell) 1347 { 1348 struct nvmem_device *nvmem = cell->nvmem; 1349 1350 __nvmem_device_put(nvmem); 1351 } 1352 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1353 1354 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1355 { 1356 u8 *p, *b; 1357 int i, extra, bit_offset = cell->bit_offset; 1358 1359 p = b = buf; 1360 if (bit_offset) { 1361 /* First shift */ 1362 *b++ >>= bit_offset; 1363 1364 /* setup rest of the bytes if any */ 1365 for (i = 1; i < cell->bytes; i++) { 1366 /* Get bits from next byte and shift them towards msb */ 1367 *p |= *b << (BITS_PER_BYTE - bit_offset); 1368 1369 p = b; 1370 *b++ >>= bit_offset; 1371 } 1372 } else { 1373 /* point to the msb */ 1374 p += cell->bytes - 1; 1375 } 1376 1377 /* result fits in less bytes */ 1378 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1379 while (--extra >= 0) 1380 *p-- = 0; 1381 1382 /* clear msb bits if any leftover in the last byte */ 1383 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1384 } 1385 1386 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1387 struct nvmem_cell *cell, 1388 void *buf, size_t *len) 1389 { 1390 int rc; 1391 1392 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1393 1394 if (rc) 1395 return rc; 1396 1397 /* shift bits in-place */ 1398 if (cell->bit_offset || cell->nbits) 1399 nvmem_shift_read_buffer_in_place(cell, buf); 1400 1401 if (len) 1402 *len = cell->bytes; 1403 1404 return 0; 1405 } 1406 1407 /** 1408 * nvmem_cell_read() - Read a given nvmem cell 1409 * 1410 * @cell: nvmem cell to be read. 1411 * @len: pointer to length of cell which will be populated on successful read; 1412 * can be NULL. 1413 * 1414 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1415 * buffer should be freed by the consumer with a kfree(). 1416 */ 1417 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1418 { 1419 struct nvmem_device *nvmem = cell->nvmem; 1420 u8 *buf; 1421 int rc; 1422 1423 if (!nvmem) 1424 return ERR_PTR(-EINVAL); 1425 1426 buf = kzalloc(cell->bytes, GFP_KERNEL); 1427 if (!buf) 1428 return ERR_PTR(-ENOMEM); 1429 1430 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1431 if (rc) { 1432 kfree(buf); 1433 return ERR_PTR(rc); 1434 } 1435 1436 return buf; 1437 } 1438 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1439 1440 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1441 u8 *_buf, int len) 1442 { 1443 struct nvmem_device *nvmem = cell->nvmem; 1444 int i, rc, nbits, bit_offset = cell->bit_offset; 1445 u8 v, *p, *buf, *b, pbyte, pbits; 1446 1447 nbits = cell->nbits; 1448 buf = kzalloc(cell->bytes, GFP_KERNEL); 1449 if (!buf) 1450 return ERR_PTR(-ENOMEM); 1451 1452 memcpy(buf, _buf, len); 1453 p = b = buf; 1454 1455 if (bit_offset) { 1456 pbyte = *b; 1457 *b <<= bit_offset; 1458 1459 /* setup the first byte with lsb bits from nvmem */ 1460 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1461 if (rc) 1462 goto err; 1463 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1464 1465 /* setup rest of the byte if any */ 1466 for (i = 1; i < cell->bytes; i++) { 1467 /* Get last byte bits and shift them towards lsb */ 1468 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1469 pbyte = *b; 1470 p = b; 1471 *b <<= bit_offset; 1472 *b++ |= pbits; 1473 } 1474 } 1475 1476 /* if it's not end on byte boundary */ 1477 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1478 /* setup the last byte with msb bits from nvmem */ 1479 rc = nvmem_reg_read(nvmem, 1480 cell->offset + cell->bytes - 1, &v, 1); 1481 if (rc) 1482 goto err; 1483 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1484 1485 } 1486 1487 return buf; 1488 err: 1489 kfree(buf); 1490 return ERR_PTR(rc); 1491 } 1492 1493 /** 1494 * nvmem_cell_write() - Write to a given nvmem cell 1495 * 1496 * @cell: nvmem cell to be written. 1497 * @buf: Buffer to be written. 1498 * @len: length of buffer to be written to nvmem cell. 1499 * 1500 * Return: length of bytes written or negative on failure. 1501 */ 1502 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1503 { 1504 struct nvmem_device *nvmem = cell->nvmem; 1505 int rc; 1506 1507 if (!nvmem || nvmem->read_only || 1508 (cell->bit_offset == 0 && len != cell->bytes)) 1509 return -EINVAL; 1510 1511 if (cell->bit_offset || cell->nbits) { 1512 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1513 if (IS_ERR(buf)) 1514 return PTR_ERR(buf); 1515 } 1516 1517 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1518 1519 /* free the tmp buffer */ 1520 if (cell->bit_offset || cell->nbits) 1521 kfree(buf); 1522 1523 if (rc) 1524 return rc; 1525 1526 return len; 1527 } 1528 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1529 1530 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1531 void *val, size_t count) 1532 { 1533 struct nvmem_cell *cell; 1534 void *buf; 1535 size_t len; 1536 1537 cell = nvmem_cell_get(dev, cell_id); 1538 if (IS_ERR(cell)) 1539 return PTR_ERR(cell); 1540 1541 buf = nvmem_cell_read(cell, &len); 1542 if (IS_ERR(buf)) { 1543 nvmem_cell_put(cell); 1544 return PTR_ERR(buf); 1545 } 1546 if (len != count) { 1547 kfree(buf); 1548 nvmem_cell_put(cell); 1549 return -EINVAL; 1550 } 1551 memcpy(val, buf, count); 1552 kfree(buf); 1553 nvmem_cell_put(cell); 1554 1555 return 0; 1556 } 1557 1558 /** 1559 * nvmem_cell_read_u8() - Read a cell value as a u8 1560 * 1561 * @dev: Device that requests the nvmem cell. 1562 * @cell_id: Name of nvmem cell to read. 1563 * @val: pointer to output value. 1564 * 1565 * Return: 0 on success or negative errno. 1566 */ 1567 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1568 { 1569 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1570 } 1571 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1572 1573 /** 1574 * nvmem_cell_read_u16() - Read a cell value as a u16 1575 * 1576 * @dev: Device that requests the nvmem cell. 1577 * @cell_id: Name of nvmem cell to read. 1578 * @val: pointer to output value. 1579 * 1580 * Return: 0 on success or negative errno. 1581 */ 1582 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1583 { 1584 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1585 } 1586 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1587 1588 /** 1589 * nvmem_cell_read_u32() - Read a cell value as a u32 1590 * 1591 * @dev: Device that requests the nvmem cell. 1592 * @cell_id: Name of nvmem cell to read. 1593 * @val: pointer to output value. 1594 * 1595 * Return: 0 on success or negative errno. 1596 */ 1597 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1598 { 1599 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1600 } 1601 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1602 1603 /** 1604 * nvmem_cell_read_u64() - Read a cell value as a u64 1605 * 1606 * @dev: Device that requests the nvmem cell. 1607 * @cell_id: Name of nvmem cell to read. 1608 * @val: pointer to output value. 1609 * 1610 * Return: 0 on success or negative errno. 1611 */ 1612 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1613 { 1614 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1615 } 1616 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1617 1618 static const void *nvmem_cell_read_variable_common(struct device *dev, 1619 const char *cell_id, 1620 size_t max_len, size_t *len) 1621 { 1622 struct nvmem_cell *cell; 1623 int nbits; 1624 void *buf; 1625 1626 cell = nvmem_cell_get(dev, cell_id); 1627 if (IS_ERR(cell)) 1628 return cell; 1629 1630 nbits = cell->nbits; 1631 buf = nvmem_cell_read(cell, len); 1632 nvmem_cell_put(cell); 1633 if (IS_ERR(buf)) 1634 return buf; 1635 1636 /* 1637 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1638 * the length of the real data. Throw away the extra junk. 1639 */ 1640 if (nbits) 1641 *len = DIV_ROUND_UP(nbits, 8); 1642 1643 if (*len > max_len) { 1644 kfree(buf); 1645 return ERR_PTR(-ERANGE); 1646 } 1647 1648 return buf; 1649 } 1650 1651 /** 1652 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1653 * 1654 * @dev: Device that requests the nvmem cell. 1655 * @cell_id: Name of nvmem cell to read. 1656 * @val: pointer to output value. 1657 * 1658 * Return: 0 on success or negative errno. 1659 */ 1660 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1661 u32 *val) 1662 { 1663 size_t len; 1664 const u8 *buf; 1665 int i; 1666 1667 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1668 if (IS_ERR(buf)) 1669 return PTR_ERR(buf); 1670 1671 /* Copy w/ implicit endian conversion */ 1672 *val = 0; 1673 for (i = 0; i < len; i++) 1674 *val |= buf[i] << (8 * i); 1675 1676 kfree(buf); 1677 1678 return 0; 1679 } 1680 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1681 1682 /** 1683 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1684 * 1685 * @dev: Device that requests the nvmem cell. 1686 * @cell_id: Name of nvmem cell to read. 1687 * @val: pointer to output value. 1688 * 1689 * Return: 0 on success or negative errno. 1690 */ 1691 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1692 u64 *val) 1693 { 1694 size_t len; 1695 const u8 *buf; 1696 int i; 1697 1698 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1699 if (IS_ERR(buf)) 1700 return PTR_ERR(buf); 1701 1702 /* Copy w/ implicit endian conversion */ 1703 *val = 0; 1704 for (i = 0; i < len; i++) 1705 *val |= (uint64_t)buf[i] << (8 * i); 1706 1707 kfree(buf); 1708 1709 return 0; 1710 } 1711 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1712 1713 /** 1714 * nvmem_device_cell_read() - Read a given nvmem device and cell 1715 * 1716 * @nvmem: nvmem device to read from. 1717 * @info: nvmem cell info to be read. 1718 * @buf: buffer pointer which will be populated on successful read. 1719 * 1720 * Return: length of successful bytes read on success and negative 1721 * error code on error. 1722 */ 1723 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1724 struct nvmem_cell_info *info, void *buf) 1725 { 1726 struct nvmem_cell cell; 1727 int rc; 1728 ssize_t len; 1729 1730 if (!nvmem) 1731 return -EINVAL; 1732 1733 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1734 if (rc) 1735 return rc; 1736 1737 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1738 if (rc) 1739 return rc; 1740 1741 return len; 1742 } 1743 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1744 1745 /** 1746 * nvmem_device_cell_write() - Write cell to a given nvmem device 1747 * 1748 * @nvmem: nvmem device to be written to. 1749 * @info: nvmem cell info to be written. 1750 * @buf: buffer to be written to cell. 1751 * 1752 * Return: length of bytes written or negative error code on failure. 1753 */ 1754 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1755 struct nvmem_cell_info *info, void *buf) 1756 { 1757 struct nvmem_cell cell; 1758 int rc; 1759 1760 if (!nvmem) 1761 return -EINVAL; 1762 1763 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell); 1764 if (rc) 1765 return rc; 1766 1767 return nvmem_cell_write(&cell, buf, cell.bytes); 1768 } 1769 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1770 1771 /** 1772 * nvmem_device_read() - Read from a given nvmem device 1773 * 1774 * @nvmem: nvmem device to read from. 1775 * @offset: offset in nvmem device. 1776 * @bytes: number of bytes to read. 1777 * @buf: buffer pointer which will be populated on successful read. 1778 * 1779 * Return: length of successful bytes read on success and negative 1780 * error code on error. 1781 */ 1782 int nvmem_device_read(struct nvmem_device *nvmem, 1783 unsigned int offset, 1784 size_t bytes, void *buf) 1785 { 1786 int rc; 1787 1788 if (!nvmem) 1789 return -EINVAL; 1790 1791 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1792 1793 if (rc) 1794 return rc; 1795 1796 return bytes; 1797 } 1798 EXPORT_SYMBOL_GPL(nvmem_device_read); 1799 1800 /** 1801 * nvmem_device_write() - Write cell to a given nvmem device 1802 * 1803 * @nvmem: nvmem device to be written to. 1804 * @offset: offset in nvmem device. 1805 * @bytes: number of bytes to write. 1806 * @buf: buffer to be written. 1807 * 1808 * Return: length of bytes written or negative error code on failure. 1809 */ 1810 int nvmem_device_write(struct nvmem_device *nvmem, 1811 unsigned int offset, 1812 size_t bytes, void *buf) 1813 { 1814 int rc; 1815 1816 if (!nvmem) 1817 return -EINVAL; 1818 1819 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1820 1821 if (rc) 1822 return rc; 1823 1824 1825 return bytes; 1826 } 1827 EXPORT_SYMBOL_GPL(nvmem_device_write); 1828 1829 /** 1830 * nvmem_add_cell_table() - register a table of cell info entries 1831 * 1832 * @table: table of cell info entries 1833 */ 1834 void nvmem_add_cell_table(struct nvmem_cell_table *table) 1835 { 1836 mutex_lock(&nvmem_cell_mutex); 1837 list_add_tail(&table->node, &nvmem_cell_tables); 1838 mutex_unlock(&nvmem_cell_mutex); 1839 } 1840 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1841 1842 /** 1843 * nvmem_del_cell_table() - remove a previously registered cell info table 1844 * 1845 * @table: table of cell info entries 1846 */ 1847 void nvmem_del_cell_table(struct nvmem_cell_table *table) 1848 { 1849 mutex_lock(&nvmem_cell_mutex); 1850 list_del(&table->node); 1851 mutex_unlock(&nvmem_cell_mutex); 1852 } 1853 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1854 1855 /** 1856 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1857 * 1858 * @entries: array of cell lookup entries 1859 * @nentries: number of cell lookup entries in the array 1860 */ 1861 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1862 { 1863 int i; 1864 1865 mutex_lock(&nvmem_lookup_mutex); 1866 for (i = 0; i < nentries; i++) 1867 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1868 mutex_unlock(&nvmem_lookup_mutex); 1869 } 1870 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1871 1872 /** 1873 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1874 * entries 1875 * 1876 * @entries: array of cell lookup entries 1877 * @nentries: number of cell lookup entries in the array 1878 */ 1879 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1880 { 1881 int i; 1882 1883 mutex_lock(&nvmem_lookup_mutex); 1884 for (i = 0; i < nentries; i++) 1885 list_del(&entries[i].node); 1886 mutex_unlock(&nvmem_lookup_mutex); 1887 } 1888 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1889 1890 /** 1891 * nvmem_dev_name() - Get the name of a given nvmem device. 1892 * 1893 * @nvmem: nvmem device. 1894 * 1895 * Return: name of the nvmem device. 1896 */ 1897 const char *nvmem_dev_name(struct nvmem_device *nvmem) 1898 { 1899 return dev_name(&nvmem->dev); 1900 } 1901 EXPORT_SYMBOL_GPL(nvmem_dev_name); 1902 1903 static int __init nvmem_init(void) 1904 { 1905 return bus_register(&nvmem_bus_type); 1906 } 1907 1908 static void __exit nvmem_exit(void) 1909 { 1910 bus_unregister(&nvmem_bus_type); 1911 } 1912 1913 subsys_initcall(nvmem_init); 1914 module_exit(nvmem_exit); 1915 1916 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1917 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1918 MODULE_DESCRIPTION("nvmem Driver Core"); 1919 MODULE_LICENSE("GPL v2"); 1920