1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/of_device.h> 21 #include <linux/slab.h> 22 23 struct nvmem_device { 24 struct module *owner; 25 struct device dev; 26 int stride; 27 int word_size; 28 int id; 29 struct kref refcnt; 30 size_t size; 31 bool read_only; 32 bool root_only; 33 int flags; 34 enum nvmem_type type; 35 struct bin_attribute eeprom; 36 struct device *base_dev; 37 struct list_head cells; 38 const struct nvmem_keepout *keepout; 39 unsigned int nkeepout; 40 nvmem_reg_read_t reg_read; 41 nvmem_reg_write_t reg_write; 42 struct gpio_desc *wp_gpio; 43 struct nvmem_layout *layout; 44 void *priv; 45 }; 46 47 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 48 49 #define FLAG_COMPAT BIT(0) 50 struct nvmem_cell_entry { 51 const char *name; 52 int offset; 53 size_t raw_len; 54 int bytes; 55 int bit_offset; 56 int nbits; 57 nvmem_cell_post_process_t read_post_process; 58 void *priv; 59 struct device_node *np; 60 struct nvmem_device *nvmem; 61 struct list_head node; 62 }; 63 64 struct nvmem_cell { 65 struct nvmem_cell_entry *entry; 66 const char *id; 67 int index; 68 }; 69 70 static DEFINE_MUTEX(nvmem_mutex); 71 static DEFINE_IDA(nvmem_ida); 72 73 static DEFINE_MUTEX(nvmem_cell_mutex); 74 static LIST_HEAD(nvmem_cell_tables); 75 76 static DEFINE_MUTEX(nvmem_lookup_mutex); 77 static LIST_HEAD(nvmem_lookup_list); 78 79 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 80 81 static DEFINE_SPINLOCK(nvmem_layout_lock); 82 static LIST_HEAD(nvmem_layouts); 83 84 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 85 void *val, size_t bytes) 86 { 87 if (nvmem->reg_read) 88 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 89 90 return -EINVAL; 91 } 92 93 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 94 void *val, size_t bytes) 95 { 96 int ret; 97 98 if (nvmem->reg_write) { 99 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 100 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 101 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 102 return ret; 103 } 104 105 return -EINVAL; 106 } 107 108 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 109 unsigned int offset, void *val, 110 size_t bytes, int write) 111 { 112 113 unsigned int end = offset + bytes; 114 unsigned int kend, ksize; 115 const struct nvmem_keepout *keepout = nvmem->keepout; 116 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 117 int rc; 118 119 /* 120 * Skip all keepouts before the range being accessed. 121 * Keepouts are sorted. 122 */ 123 while ((keepout < keepoutend) && (keepout->end <= offset)) 124 keepout++; 125 126 while ((offset < end) && (keepout < keepoutend)) { 127 /* Access the valid portion before the keepout. */ 128 if (offset < keepout->start) { 129 kend = min(end, keepout->start); 130 ksize = kend - offset; 131 if (write) 132 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 133 else 134 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 135 136 if (rc) 137 return rc; 138 139 offset += ksize; 140 val += ksize; 141 } 142 143 /* 144 * Now we're aligned to the start of this keepout zone. Go 145 * through it. 146 */ 147 kend = min(end, keepout->end); 148 ksize = kend - offset; 149 if (!write) 150 memset(val, keepout->value, ksize); 151 152 val += ksize; 153 offset += ksize; 154 keepout++; 155 } 156 157 /* 158 * If we ran out of keepouts but there's still stuff to do, send it 159 * down directly 160 */ 161 if (offset < end) { 162 ksize = end - offset; 163 if (write) 164 return __nvmem_reg_write(nvmem, offset, val, ksize); 165 else 166 return __nvmem_reg_read(nvmem, offset, val, ksize); 167 } 168 169 return 0; 170 } 171 172 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 173 void *val, size_t bytes) 174 { 175 if (!nvmem->nkeepout) 176 return __nvmem_reg_read(nvmem, offset, val, bytes); 177 178 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 179 } 180 181 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 182 void *val, size_t bytes) 183 { 184 if (!nvmem->nkeepout) 185 return __nvmem_reg_write(nvmem, offset, val, bytes); 186 187 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 188 } 189 190 #ifdef CONFIG_NVMEM_SYSFS 191 static const char * const nvmem_type_str[] = { 192 [NVMEM_TYPE_UNKNOWN] = "Unknown", 193 [NVMEM_TYPE_EEPROM] = "EEPROM", 194 [NVMEM_TYPE_OTP] = "OTP", 195 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 196 [NVMEM_TYPE_FRAM] = "FRAM", 197 }; 198 199 #ifdef CONFIG_DEBUG_LOCK_ALLOC 200 static struct lock_class_key eeprom_lock_key; 201 #endif 202 203 static ssize_t type_show(struct device *dev, 204 struct device_attribute *attr, char *buf) 205 { 206 struct nvmem_device *nvmem = to_nvmem_device(dev); 207 208 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 209 } 210 211 static DEVICE_ATTR_RO(type); 212 213 static struct attribute *nvmem_attrs[] = { 214 &dev_attr_type.attr, 215 NULL, 216 }; 217 218 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 219 struct bin_attribute *attr, char *buf, 220 loff_t pos, size_t count) 221 { 222 struct device *dev; 223 struct nvmem_device *nvmem; 224 int rc; 225 226 if (attr->private) 227 dev = attr->private; 228 else 229 dev = kobj_to_dev(kobj); 230 nvmem = to_nvmem_device(dev); 231 232 /* Stop the user from reading */ 233 if (pos >= nvmem->size) 234 return 0; 235 236 if (!IS_ALIGNED(pos, nvmem->stride)) 237 return -EINVAL; 238 239 if (count < nvmem->word_size) 240 return -EINVAL; 241 242 if (pos + count > nvmem->size) 243 count = nvmem->size - pos; 244 245 count = round_down(count, nvmem->word_size); 246 247 if (!nvmem->reg_read) 248 return -EPERM; 249 250 rc = nvmem_reg_read(nvmem, pos, buf, count); 251 252 if (rc) 253 return rc; 254 255 return count; 256 } 257 258 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 259 struct bin_attribute *attr, char *buf, 260 loff_t pos, size_t count) 261 { 262 struct device *dev; 263 struct nvmem_device *nvmem; 264 int rc; 265 266 if (attr->private) 267 dev = attr->private; 268 else 269 dev = kobj_to_dev(kobj); 270 nvmem = to_nvmem_device(dev); 271 272 /* Stop the user from writing */ 273 if (pos >= nvmem->size) 274 return -EFBIG; 275 276 if (!IS_ALIGNED(pos, nvmem->stride)) 277 return -EINVAL; 278 279 if (count < nvmem->word_size) 280 return -EINVAL; 281 282 if (pos + count > nvmem->size) 283 count = nvmem->size - pos; 284 285 count = round_down(count, nvmem->word_size); 286 287 if (!nvmem->reg_write) 288 return -EPERM; 289 290 rc = nvmem_reg_write(nvmem, pos, buf, count); 291 292 if (rc) 293 return rc; 294 295 return count; 296 } 297 298 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 299 { 300 umode_t mode = 0400; 301 302 if (!nvmem->root_only) 303 mode |= 0044; 304 305 if (!nvmem->read_only) 306 mode |= 0200; 307 308 if (!nvmem->reg_write) 309 mode &= ~0200; 310 311 if (!nvmem->reg_read) 312 mode &= ~0444; 313 314 return mode; 315 } 316 317 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 318 struct bin_attribute *attr, int i) 319 { 320 struct device *dev = kobj_to_dev(kobj); 321 struct nvmem_device *nvmem = to_nvmem_device(dev); 322 323 attr->size = nvmem->size; 324 325 return nvmem_bin_attr_get_umode(nvmem); 326 } 327 328 /* default read/write permissions */ 329 static struct bin_attribute bin_attr_rw_nvmem = { 330 .attr = { 331 .name = "nvmem", 332 .mode = 0644, 333 }, 334 .read = bin_attr_nvmem_read, 335 .write = bin_attr_nvmem_write, 336 }; 337 338 static struct bin_attribute *nvmem_bin_attributes[] = { 339 &bin_attr_rw_nvmem, 340 NULL, 341 }; 342 343 static const struct attribute_group nvmem_bin_group = { 344 .bin_attrs = nvmem_bin_attributes, 345 .attrs = nvmem_attrs, 346 .is_bin_visible = nvmem_bin_attr_is_visible, 347 }; 348 349 static const struct attribute_group *nvmem_dev_groups[] = { 350 &nvmem_bin_group, 351 NULL, 352 }; 353 354 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 355 .attr = { 356 .name = "eeprom", 357 }, 358 .read = bin_attr_nvmem_read, 359 .write = bin_attr_nvmem_write, 360 }; 361 362 /* 363 * nvmem_setup_compat() - Create an additional binary entry in 364 * drivers sys directory, to be backwards compatible with the older 365 * drivers/misc/eeprom drivers. 366 */ 367 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 368 const struct nvmem_config *config) 369 { 370 int rval; 371 372 if (!config->compat) 373 return 0; 374 375 if (!config->base_dev) 376 return -EINVAL; 377 378 if (config->type == NVMEM_TYPE_FRAM) 379 bin_attr_nvmem_eeprom_compat.attr.name = "fram"; 380 381 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 382 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 383 nvmem->eeprom.size = nvmem->size; 384 #ifdef CONFIG_DEBUG_LOCK_ALLOC 385 nvmem->eeprom.attr.key = &eeprom_lock_key; 386 #endif 387 nvmem->eeprom.private = &nvmem->dev; 388 nvmem->base_dev = config->base_dev; 389 390 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 391 if (rval) { 392 dev_err(&nvmem->dev, 393 "Failed to create eeprom binary file %d\n", rval); 394 return rval; 395 } 396 397 nvmem->flags |= FLAG_COMPAT; 398 399 return 0; 400 } 401 402 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 403 const struct nvmem_config *config) 404 { 405 if (config->compat) 406 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 407 } 408 409 #else /* CONFIG_NVMEM_SYSFS */ 410 411 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 412 const struct nvmem_config *config) 413 { 414 return -ENOSYS; 415 } 416 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 417 const struct nvmem_config *config) 418 { 419 } 420 421 #endif /* CONFIG_NVMEM_SYSFS */ 422 423 static void nvmem_release(struct device *dev) 424 { 425 struct nvmem_device *nvmem = to_nvmem_device(dev); 426 427 ida_free(&nvmem_ida, nvmem->id); 428 gpiod_put(nvmem->wp_gpio); 429 kfree(nvmem); 430 } 431 432 static const struct device_type nvmem_provider_type = { 433 .release = nvmem_release, 434 }; 435 436 static struct bus_type nvmem_bus_type = { 437 .name = "nvmem", 438 }; 439 440 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 441 { 442 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 443 mutex_lock(&nvmem_mutex); 444 list_del(&cell->node); 445 mutex_unlock(&nvmem_mutex); 446 of_node_put(cell->np); 447 kfree_const(cell->name); 448 kfree(cell); 449 } 450 451 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 452 { 453 struct nvmem_cell_entry *cell, *p; 454 455 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 456 nvmem_cell_entry_drop(cell); 457 } 458 459 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 460 { 461 mutex_lock(&nvmem_mutex); 462 list_add_tail(&cell->node, &cell->nvmem->cells); 463 mutex_unlock(&nvmem_mutex); 464 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 465 } 466 467 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 468 const struct nvmem_cell_info *info, 469 struct nvmem_cell_entry *cell) 470 { 471 cell->nvmem = nvmem; 472 cell->offset = info->offset; 473 cell->raw_len = info->raw_len ?: info->bytes; 474 cell->bytes = info->bytes; 475 cell->name = info->name; 476 cell->read_post_process = info->read_post_process; 477 cell->priv = info->priv; 478 479 cell->bit_offset = info->bit_offset; 480 cell->nbits = info->nbits; 481 cell->np = info->np; 482 483 if (cell->nbits) 484 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 485 BITS_PER_BYTE); 486 487 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 488 dev_err(&nvmem->dev, 489 "cell %s unaligned to nvmem stride %d\n", 490 cell->name ?: "<unknown>", nvmem->stride); 491 return -EINVAL; 492 } 493 494 return 0; 495 } 496 497 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 498 const struct nvmem_cell_info *info, 499 struct nvmem_cell_entry *cell) 500 { 501 int err; 502 503 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 504 if (err) 505 return err; 506 507 cell->name = kstrdup_const(info->name, GFP_KERNEL); 508 if (!cell->name) 509 return -ENOMEM; 510 511 return 0; 512 } 513 514 /** 515 * nvmem_add_one_cell() - Add one cell information to an nvmem device 516 * 517 * @nvmem: nvmem device to add cells to. 518 * @info: nvmem cell info to add to the device 519 * 520 * Return: 0 or negative error code on failure. 521 */ 522 int nvmem_add_one_cell(struct nvmem_device *nvmem, 523 const struct nvmem_cell_info *info) 524 { 525 struct nvmem_cell_entry *cell; 526 int rval; 527 528 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 529 if (!cell) 530 return -ENOMEM; 531 532 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 533 if (rval) { 534 kfree(cell); 535 return rval; 536 } 537 538 nvmem_cell_entry_add(cell); 539 540 return 0; 541 } 542 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 543 544 /** 545 * nvmem_add_cells() - Add cell information to an nvmem device 546 * 547 * @nvmem: nvmem device to add cells to. 548 * @info: nvmem cell info to add to the device 549 * @ncells: number of cells in info 550 * 551 * Return: 0 or negative error code on failure. 552 */ 553 static int nvmem_add_cells(struct nvmem_device *nvmem, 554 const struct nvmem_cell_info *info, 555 int ncells) 556 { 557 int i, rval; 558 559 for (i = 0; i < ncells; i++) { 560 rval = nvmem_add_one_cell(nvmem, &info[i]); 561 if (rval) 562 return rval; 563 } 564 565 return 0; 566 } 567 568 /** 569 * nvmem_register_notifier() - Register a notifier block for nvmem events. 570 * 571 * @nb: notifier block to be called on nvmem events. 572 * 573 * Return: 0 on success, negative error number on failure. 574 */ 575 int nvmem_register_notifier(struct notifier_block *nb) 576 { 577 return blocking_notifier_chain_register(&nvmem_notifier, nb); 578 } 579 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 580 581 /** 582 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 583 * 584 * @nb: notifier block to be unregistered. 585 * 586 * Return: 0 on success, negative error number on failure. 587 */ 588 int nvmem_unregister_notifier(struct notifier_block *nb) 589 { 590 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 591 } 592 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 593 594 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 595 { 596 const struct nvmem_cell_info *info; 597 struct nvmem_cell_table *table; 598 struct nvmem_cell_entry *cell; 599 int rval = 0, i; 600 601 mutex_lock(&nvmem_cell_mutex); 602 list_for_each_entry(table, &nvmem_cell_tables, node) { 603 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 604 for (i = 0; i < table->ncells; i++) { 605 info = &table->cells[i]; 606 607 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 608 if (!cell) { 609 rval = -ENOMEM; 610 goto out; 611 } 612 613 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 614 if (rval) { 615 kfree(cell); 616 goto out; 617 } 618 619 nvmem_cell_entry_add(cell); 620 } 621 } 622 } 623 624 out: 625 mutex_unlock(&nvmem_cell_mutex); 626 return rval; 627 } 628 629 static struct nvmem_cell_entry * 630 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 631 { 632 struct nvmem_cell_entry *iter, *cell = NULL; 633 634 mutex_lock(&nvmem_mutex); 635 list_for_each_entry(iter, &nvmem->cells, node) { 636 if (strcmp(cell_id, iter->name) == 0) { 637 cell = iter; 638 break; 639 } 640 } 641 mutex_unlock(&nvmem_mutex); 642 643 return cell; 644 } 645 646 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 647 { 648 unsigned int cur = 0; 649 const struct nvmem_keepout *keepout = nvmem->keepout; 650 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 651 652 while (keepout < keepoutend) { 653 /* Ensure keepouts are sorted and don't overlap. */ 654 if (keepout->start < cur) { 655 dev_err(&nvmem->dev, 656 "Keepout regions aren't sorted or overlap.\n"); 657 658 return -ERANGE; 659 } 660 661 if (keepout->end < keepout->start) { 662 dev_err(&nvmem->dev, 663 "Invalid keepout region.\n"); 664 665 return -EINVAL; 666 } 667 668 /* 669 * Validate keepouts (and holes between) don't violate 670 * word_size constraints. 671 */ 672 if ((keepout->end - keepout->start < nvmem->word_size) || 673 ((keepout->start != cur) && 674 (keepout->start - cur < nvmem->word_size))) { 675 676 dev_err(&nvmem->dev, 677 "Keepout regions violate word_size constraints.\n"); 678 679 return -ERANGE; 680 } 681 682 /* Validate keepouts don't violate stride (alignment). */ 683 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 684 !IS_ALIGNED(keepout->end, nvmem->stride)) { 685 686 dev_err(&nvmem->dev, 687 "Keepout regions violate stride.\n"); 688 689 return -EINVAL; 690 } 691 692 cur = keepout->end; 693 keepout++; 694 } 695 696 return 0; 697 } 698 699 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 700 { 701 struct nvmem_layout *layout = nvmem->layout; 702 struct device *dev = &nvmem->dev; 703 struct device_node *child; 704 const __be32 *addr; 705 int len, ret; 706 707 for_each_child_of_node(np, child) { 708 struct nvmem_cell_info info = {0}; 709 710 addr = of_get_property(child, "reg", &len); 711 if (!addr) 712 continue; 713 if (len < 2 * sizeof(u32)) { 714 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 715 of_node_put(child); 716 return -EINVAL; 717 } 718 719 info.offset = be32_to_cpup(addr++); 720 info.bytes = be32_to_cpup(addr); 721 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 722 723 addr = of_get_property(child, "bits", &len); 724 if (addr && len == (2 * sizeof(u32))) { 725 info.bit_offset = be32_to_cpup(addr++); 726 info.nbits = be32_to_cpup(addr); 727 } 728 729 info.np = of_node_get(child); 730 731 if (layout && layout->fixup_cell_info) 732 layout->fixup_cell_info(nvmem, layout, &info); 733 734 ret = nvmem_add_one_cell(nvmem, &info); 735 kfree(info.name); 736 if (ret) { 737 of_node_put(child); 738 return ret; 739 } 740 } 741 742 return 0; 743 } 744 745 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 746 { 747 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 748 } 749 750 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 751 { 752 struct device_node *layout_np; 753 int err = 0; 754 755 layout_np = of_nvmem_layout_get_container(nvmem); 756 if (!layout_np) 757 return 0; 758 759 if (of_device_is_compatible(layout_np, "fixed-layout")) 760 err = nvmem_add_cells_from_dt(nvmem, layout_np); 761 762 of_node_put(layout_np); 763 764 return err; 765 } 766 767 int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner) 768 { 769 layout->owner = owner; 770 771 spin_lock(&nvmem_layout_lock); 772 list_add(&layout->node, &nvmem_layouts); 773 spin_unlock(&nvmem_layout_lock); 774 775 return 0; 776 } 777 EXPORT_SYMBOL_GPL(__nvmem_layout_register); 778 779 void nvmem_layout_unregister(struct nvmem_layout *layout) 780 { 781 spin_lock(&nvmem_layout_lock); 782 list_del(&layout->node); 783 spin_unlock(&nvmem_layout_lock); 784 } 785 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 786 787 static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem) 788 { 789 struct device_node *layout_np, *np = nvmem->dev.of_node; 790 struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER); 791 792 layout_np = of_get_child_by_name(np, "nvmem-layout"); 793 if (!layout_np) 794 return NULL; 795 796 /* 797 * In case the nvmem device was built-in while the layout was built as a 798 * module, we shall manually request the layout driver loading otherwise 799 * we'll never have any match. 800 */ 801 of_request_module(layout_np); 802 803 spin_lock(&nvmem_layout_lock); 804 805 list_for_each_entry(l, &nvmem_layouts, node) { 806 if (of_match_node(l->of_match_table, layout_np)) { 807 if (try_module_get(l->owner)) 808 layout = l; 809 810 break; 811 } 812 } 813 814 spin_unlock(&nvmem_layout_lock); 815 of_node_put(layout_np); 816 817 return layout; 818 } 819 820 static void nvmem_layout_put(struct nvmem_layout *layout) 821 { 822 if (layout) 823 module_put(layout->owner); 824 } 825 826 static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem) 827 { 828 struct nvmem_layout *layout = nvmem->layout; 829 int ret; 830 831 if (layout && layout->add_cells) { 832 ret = layout->add_cells(&nvmem->dev, nvmem, layout); 833 if (ret) 834 return ret; 835 } 836 837 return 0; 838 } 839 840 #if IS_ENABLED(CONFIG_OF) 841 /** 842 * of_nvmem_layout_get_container() - Get OF node to layout container. 843 * 844 * @nvmem: nvmem device. 845 * 846 * Return: a node pointer with refcount incremented or NULL if no 847 * container exists. Use of_node_put() on it when done. 848 */ 849 struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) 850 { 851 return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout"); 852 } 853 EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container); 854 #endif 855 856 const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem, 857 struct nvmem_layout *layout) 858 { 859 struct device_node __maybe_unused *layout_np; 860 const struct of_device_id *match; 861 862 layout_np = of_nvmem_layout_get_container(nvmem); 863 match = of_match_node(layout->of_match_table, layout_np); 864 865 return match ? match->data : NULL; 866 } 867 EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data); 868 869 /** 870 * nvmem_register() - Register a nvmem device for given nvmem_config. 871 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 872 * 873 * @config: nvmem device configuration with which nvmem device is created. 874 * 875 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 876 * on success. 877 */ 878 879 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 880 { 881 struct nvmem_device *nvmem; 882 int rval; 883 884 if (!config->dev) 885 return ERR_PTR(-EINVAL); 886 887 if (!config->reg_read && !config->reg_write) 888 return ERR_PTR(-EINVAL); 889 890 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 891 if (!nvmem) 892 return ERR_PTR(-ENOMEM); 893 894 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 895 if (rval < 0) { 896 kfree(nvmem); 897 return ERR_PTR(rval); 898 } 899 900 nvmem->id = rval; 901 902 nvmem->dev.type = &nvmem_provider_type; 903 nvmem->dev.bus = &nvmem_bus_type; 904 nvmem->dev.parent = config->dev; 905 906 device_initialize(&nvmem->dev); 907 908 if (!config->ignore_wp) 909 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 910 GPIOD_OUT_HIGH); 911 if (IS_ERR(nvmem->wp_gpio)) { 912 rval = PTR_ERR(nvmem->wp_gpio); 913 nvmem->wp_gpio = NULL; 914 goto err_put_device; 915 } 916 917 kref_init(&nvmem->refcnt); 918 INIT_LIST_HEAD(&nvmem->cells); 919 920 nvmem->owner = config->owner; 921 if (!nvmem->owner && config->dev->driver) 922 nvmem->owner = config->dev->driver->owner; 923 nvmem->stride = config->stride ?: 1; 924 nvmem->word_size = config->word_size ?: 1; 925 nvmem->size = config->size; 926 nvmem->root_only = config->root_only; 927 nvmem->priv = config->priv; 928 nvmem->type = config->type; 929 nvmem->reg_read = config->reg_read; 930 nvmem->reg_write = config->reg_write; 931 nvmem->keepout = config->keepout; 932 nvmem->nkeepout = config->nkeepout; 933 if (config->of_node) 934 nvmem->dev.of_node = config->of_node; 935 else if (!config->no_of_node) 936 nvmem->dev.of_node = config->dev->of_node; 937 938 switch (config->id) { 939 case NVMEM_DEVID_NONE: 940 rval = dev_set_name(&nvmem->dev, "%s", config->name); 941 break; 942 case NVMEM_DEVID_AUTO: 943 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 944 break; 945 default: 946 rval = dev_set_name(&nvmem->dev, "%s%d", 947 config->name ? : "nvmem", 948 config->name ? config->id : nvmem->id); 949 break; 950 } 951 952 if (rval) 953 goto err_put_device; 954 955 nvmem->read_only = device_property_present(config->dev, "read-only") || 956 config->read_only || !nvmem->reg_write; 957 958 #ifdef CONFIG_NVMEM_SYSFS 959 nvmem->dev.groups = nvmem_dev_groups; 960 #endif 961 962 if (nvmem->nkeepout) { 963 rval = nvmem_validate_keepouts(nvmem); 964 if (rval) 965 goto err_put_device; 966 } 967 968 if (config->compat) { 969 rval = nvmem_sysfs_setup_compat(nvmem, config); 970 if (rval) 971 goto err_put_device; 972 } 973 974 /* 975 * If the driver supplied a layout by config->layout, the module 976 * pointer will be NULL and nvmem_layout_put() will be a noop. 977 */ 978 nvmem->layout = config->layout ?: nvmem_layout_get(nvmem); 979 if (IS_ERR(nvmem->layout)) { 980 rval = PTR_ERR(nvmem->layout); 981 nvmem->layout = NULL; 982 983 if (rval == -EPROBE_DEFER) 984 goto err_teardown_compat; 985 } 986 987 if (config->cells) { 988 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 989 if (rval) 990 goto err_remove_cells; 991 } 992 993 rval = nvmem_add_cells_from_table(nvmem); 994 if (rval) 995 goto err_remove_cells; 996 997 rval = nvmem_add_cells_from_legacy_of(nvmem); 998 if (rval) 999 goto err_remove_cells; 1000 1001 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1002 1003 rval = device_add(&nvmem->dev); 1004 if (rval) 1005 goto err_remove_cells; 1006 1007 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1008 if (rval) 1009 goto err_remove_cells; 1010 1011 rval = nvmem_add_cells_from_layout(nvmem); 1012 if (rval) 1013 goto err_remove_cells; 1014 1015 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1016 1017 return nvmem; 1018 1019 err_remove_cells: 1020 nvmem_device_remove_all_cells(nvmem); 1021 nvmem_layout_put(nvmem->layout); 1022 err_teardown_compat: 1023 if (config->compat) 1024 nvmem_sysfs_remove_compat(nvmem, config); 1025 err_put_device: 1026 put_device(&nvmem->dev); 1027 1028 return ERR_PTR(rval); 1029 } 1030 EXPORT_SYMBOL_GPL(nvmem_register); 1031 1032 static void nvmem_device_release(struct kref *kref) 1033 { 1034 struct nvmem_device *nvmem; 1035 1036 nvmem = container_of(kref, struct nvmem_device, refcnt); 1037 1038 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1039 1040 if (nvmem->flags & FLAG_COMPAT) 1041 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1042 1043 nvmem_device_remove_all_cells(nvmem); 1044 nvmem_layout_put(nvmem->layout); 1045 device_unregister(&nvmem->dev); 1046 } 1047 1048 /** 1049 * nvmem_unregister() - Unregister previously registered nvmem device 1050 * 1051 * @nvmem: Pointer to previously registered nvmem device. 1052 */ 1053 void nvmem_unregister(struct nvmem_device *nvmem) 1054 { 1055 if (nvmem) 1056 kref_put(&nvmem->refcnt, nvmem_device_release); 1057 } 1058 EXPORT_SYMBOL_GPL(nvmem_unregister); 1059 1060 static void devm_nvmem_unregister(void *nvmem) 1061 { 1062 nvmem_unregister(nvmem); 1063 } 1064 1065 /** 1066 * devm_nvmem_register() - Register a managed nvmem device for given 1067 * nvmem_config. 1068 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1069 * 1070 * @dev: Device that uses the nvmem device. 1071 * @config: nvmem device configuration with which nvmem device is created. 1072 * 1073 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1074 * on success. 1075 */ 1076 struct nvmem_device *devm_nvmem_register(struct device *dev, 1077 const struct nvmem_config *config) 1078 { 1079 struct nvmem_device *nvmem; 1080 int ret; 1081 1082 nvmem = nvmem_register(config); 1083 if (IS_ERR(nvmem)) 1084 return nvmem; 1085 1086 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1087 if (ret) 1088 return ERR_PTR(ret); 1089 1090 return nvmem; 1091 } 1092 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1093 1094 static struct nvmem_device *__nvmem_device_get(void *data, 1095 int (*match)(struct device *dev, const void *data)) 1096 { 1097 struct nvmem_device *nvmem = NULL; 1098 struct device *dev; 1099 1100 mutex_lock(&nvmem_mutex); 1101 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1102 if (dev) 1103 nvmem = to_nvmem_device(dev); 1104 mutex_unlock(&nvmem_mutex); 1105 if (!nvmem) 1106 return ERR_PTR(-EPROBE_DEFER); 1107 1108 if (!try_module_get(nvmem->owner)) { 1109 dev_err(&nvmem->dev, 1110 "could not increase module refcount for cell %s\n", 1111 nvmem_dev_name(nvmem)); 1112 1113 put_device(&nvmem->dev); 1114 return ERR_PTR(-EINVAL); 1115 } 1116 1117 kref_get(&nvmem->refcnt); 1118 1119 return nvmem; 1120 } 1121 1122 static void __nvmem_device_put(struct nvmem_device *nvmem) 1123 { 1124 put_device(&nvmem->dev); 1125 module_put(nvmem->owner); 1126 kref_put(&nvmem->refcnt, nvmem_device_release); 1127 } 1128 1129 #if IS_ENABLED(CONFIG_OF) 1130 /** 1131 * of_nvmem_device_get() - Get nvmem device from a given id 1132 * 1133 * @np: Device tree node that uses the nvmem device. 1134 * @id: nvmem name from nvmem-names property. 1135 * 1136 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1137 * on success. 1138 */ 1139 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1140 { 1141 1142 struct device_node *nvmem_np; 1143 struct nvmem_device *nvmem; 1144 int index = 0; 1145 1146 if (id) 1147 index = of_property_match_string(np, "nvmem-names", id); 1148 1149 nvmem_np = of_parse_phandle(np, "nvmem", index); 1150 if (!nvmem_np) 1151 return ERR_PTR(-ENOENT); 1152 1153 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1154 of_node_put(nvmem_np); 1155 return nvmem; 1156 } 1157 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1158 #endif 1159 1160 /** 1161 * nvmem_device_get() - Get nvmem device from a given id 1162 * 1163 * @dev: Device that uses the nvmem device. 1164 * @dev_name: name of the requested nvmem device. 1165 * 1166 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1167 * on success. 1168 */ 1169 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1170 { 1171 if (dev->of_node) { /* try dt first */ 1172 struct nvmem_device *nvmem; 1173 1174 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1175 1176 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1177 return nvmem; 1178 1179 } 1180 1181 return __nvmem_device_get((void *)dev_name, device_match_name); 1182 } 1183 EXPORT_SYMBOL_GPL(nvmem_device_get); 1184 1185 /** 1186 * nvmem_device_find() - Find nvmem device with matching function 1187 * 1188 * @data: Data to pass to match function 1189 * @match: Callback function to check device 1190 * 1191 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1192 * on success. 1193 */ 1194 struct nvmem_device *nvmem_device_find(void *data, 1195 int (*match)(struct device *dev, const void *data)) 1196 { 1197 return __nvmem_device_get(data, match); 1198 } 1199 EXPORT_SYMBOL_GPL(nvmem_device_find); 1200 1201 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1202 { 1203 struct nvmem_device **nvmem = res; 1204 1205 if (WARN_ON(!nvmem || !*nvmem)) 1206 return 0; 1207 1208 return *nvmem == data; 1209 } 1210 1211 static void devm_nvmem_device_release(struct device *dev, void *res) 1212 { 1213 nvmem_device_put(*(struct nvmem_device **)res); 1214 } 1215 1216 /** 1217 * devm_nvmem_device_put() - put alredy got nvmem device 1218 * 1219 * @dev: Device that uses the nvmem device. 1220 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1221 * that needs to be released. 1222 */ 1223 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1224 { 1225 int ret; 1226 1227 ret = devres_release(dev, devm_nvmem_device_release, 1228 devm_nvmem_device_match, nvmem); 1229 1230 WARN_ON(ret); 1231 } 1232 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1233 1234 /** 1235 * nvmem_device_put() - put alredy got nvmem device 1236 * 1237 * @nvmem: pointer to nvmem device that needs to be released. 1238 */ 1239 void nvmem_device_put(struct nvmem_device *nvmem) 1240 { 1241 __nvmem_device_put(nvmem); 1242 } 1243 EXPORT_SYMBOL_GPL(nvmem_device_put); 1244 1245 /** 1246 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1247 * 1248 * @dev: Device that requests the nvmem device. 1249 * @id: name id for the requested nvmem device. 1250 * 1251 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1252 * on success. The nvmem_cell will be freed by the automatically once the 1253 * device is freed. 1254 */ 1255 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1256 { 1257 struct nvmem_device **ptr, *nvmem; 1258 1259 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1260 if (!ptr) 1261 return ERR_PTR(-ENOMEM); 1262 1263 nvmem = nvmem_device_get(dev, id); 1264 if (!IS_ERR(nvmem)) { 1265 *ptr = nvmem; 1266 devres_add(dev, ptr); 1267 } else { 1268 devres_free(ptr); 1269 } 1270 1271 return nvmem; 1272 } 1273 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1274 1275 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1276 const char *id, int index) 1277 { 1278 struct nvmem_cell *cell; 1279 const char *name = NULL; 1280 1281 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1282 if (!cell) 1283 return ERR_PTR(-ENOMEM); 1284 1285 if (id) { 1286 name = kstrdup_const(id, GFP_KERNEL); 1287 if (!name) { 1288 kfree(cell); 1289 return ERR_PTR(-ENOMEM); 1290 } 1291 } 1292 1293 cell->id = name; 1294 cell->entry = entry; 1295 cell->index = index; 1296 1297 return cell; 1298 } 1299 1300 static struct nvmem_cell * 1301 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1302 { 1303 struct nvmem_cell_entry *cell_entry; 1304 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1305 struct nvmem_cell_lookup *lookup; 1306 struct nvmem_device *nvmem; 1307 const char *dev_id; 1308 1309 if (!dev) 1310 return ERR_PTR(-EINVAL); 1311 1312 dev_id = dev_name(dev); 1313 1314 mutex_lock(&nvmem_lookup_mutex); 1315 1316 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1317 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1318 (strcmp(lookup->con_id, con_id) == 0)) { 1319 /* This is the right entry. */ 1320 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1321 device_match_name); 1322 if (IS_ERR(nvmem)) { 1323 /* Provider may not be registered yet. */ 1324 cell = ERR_CAST(nvmem); 1325 break; 1326 } 1327 1328 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1329 lookup->cell_name); 1330 if (!cell_entry) { 1331 __nvmem_device_put(nvmem); 1332 cell = ERR_PTR(-ENOENT); 1333 } else { 1334 cell = nvmem_create_cell(cell_entry, con_id, 0); 1335 if (IS_ERR(cell)) 1336 __nvmem_device_put(nvmem); 1337 } 1338 break; 1339 } 1340 } 1341 1342 mutex_unlock(&nvmem_lookup_mutex); 1343 return cell; 1344 } 1345 1346 #if IS_ENABLED(CONFIG_OF) 1347 static struct nvmem_cell_entry * 1348 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1349 { 1350 struct nvmem_cell_entry *iter, *cell = NULL; 1351 1352 mutex_lock(&nvmem_mutex); 1353 list_for_each_entry(iter, &nvmem->cells, node) { 1354 if (np == iter->np) { 1355 cell = iter; 1356 break; 1357 } 1358 } 1359 mutex_unlock(&nvmem_mutex); 1360 1361 return cell; 1362 } 1363 1364 /** 1365 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1366 * 1367 * @np: Device tree node that uses the nvmem cell. 1368 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1369 * for the cell at index 0 (the lone cell with no accompanying 1370 * nvmem-cell-names property). 1371 * 1372 * Return: Will be an ERR_PTR() on error or a valid pointer 1373 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1374 * nvmem_cell_put(). 1375 */ 1376 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1377 { 1378 struct device_node *cell_np, *nvmem_np; 1379 struct nvmem_device *nvmem; 1380 struct nvmem_cell_entry *cell_entry; 1381 struct nvmem_cell *cell; 1382 struct of_phandle_args cell_spec; 1383 int index = 0; 1384 int cell_index = 0; 1385 int ret; 1386 1387 /* if cell name exists, find index to the name */ 1388 if (id) 1389 index = of_property_match_string(np, "nvmem-cell-names", id); 1390 1391 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1392 "#nvmem-cell-cells", 1393 index, &cell_spec); 1394 if (ret) 1395 return ERR_PTR(-ENOENT); 1396 1397 if (cell_spec.args_count > 1) 1398 return ERR_PTR(-EINVAL); 1399 1400 cell_np = cell_spec.np; 1401 if (cell_spec.args_count) 1402 cell_index = cell_spec.args[0]; 1403 1404 nvmem_np = of_get_parent(cell_np); 1405 if (!nvmem_np) { 1406 of_node_put(cell_np); 1407 return ERR_PTR(-EINVAL); 1408 } 1409 1410 /* nvmem layouts produce cells within the nvmem-layout container */ 1411 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1412 nvmem_np = of_get_next_parent(nvmem_np); 1413 if (!nvmem_np) { 1414 of_node_put(cell_np); 1415 return ERR_PTR(-EINVAL); 1416 } 1417 } 1418 1419 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1420 of_node_put(nvmem_np); 1421 if (IS_ERR(nvmem)) { 1422 of_node_put(cell_np); 1423 return ERR_CAST(nvmem); 1424 } 1425 1426 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1427 of_node_put(cell_np); 1428 if (!cell_entry) { 1429 __nvmem_device_put(nvmem); 1430 return ERR_PTR(-ENOENT); 1431 } 1432 1433 cell = nvmem_create_cell(cell_entry, id, cell_index); 1434 if (IS_ERR(cell)) 1435 __nvmem_device_put(nvmem); 1436 1437 return cell; 1438 } 1439 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1440 #endif 1441 1442 /** 1443 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1444 * 1445 * @dev: Device that requests the nvmem cell. 1446 * @id: nvmem cell name to get (this corresponds with the name from the 1447 * nvmem-cell-names property for DT systems and with the con_id from 1448 * the lookup entry for non-DT systems). 1449 * 1450 * Return: Will be an ERR_PTR() on error or a valid pointer 1451 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1452 * nvmem_cell_put(). 1453 */ 1454 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1455 { 1456 struct nvmem_cell *cell; 1457 1458 if (dev->of_node) { /* try dt first */ 1459 cell = of_nvmem_cell_get(dev->of_node, id); 1460 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1461 return cell; 1462 } 1463 1464 /* NULL cell id only allowed for device tree; invalid otherwise */ 1465 if (!id) 1466 return ERR_PTR(-EINVAL); 1467 1468 return nvmem_cell_get_from_lookup(dev, id); 1469 } 1470 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1471 1472 static void devm_nvmem_cell_release(struct device *dev, void *res) 1473 { 1474 nvmem_cell_put(*(struct nvmem_cell **)res); 1475 } 1476 1477 /** 1478 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1479 * 1480 * @dev: Device that requests the nvmem cell. 1481 * @id: nvmem cell name id to get. 1482 * 1483 * Return: Will be an ERR_PTR() on error or a valid pointer 1484 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1485 * automatically once the device is freed. 1486 */ 1487 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1488 { 1489 struct nvmem_cell **ptr, *cell; 1490 1491 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1492 if (!ptr) 1493 return ERR_PTR(-ENOMEM); 1494 1495 cell = nvmem_cell_get(dev, id); 1496 if (!IS_ERR(cell)) { 1497 *ptr = cell; 1498 devres_add(dev, ptr); 1499 } else { 1500 devres_free(ptr); 1501 } 1502 1503 return cell; 1504 } 1505 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1506 1507 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1508 { 1509 struct nvmem_cell **c = res; 1510 1511 if (WARN_ON(!c || !*c)) 1512 return 0; 1513 1514 return *c == data; 1515 } 1516 1517 /** 1518 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1519 * from devm_nvmem_cell_get. 1520 * 1521 * @dev: Device that requests the nvmem cell. 1522 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1523 */ 1524 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1525 { 1526 int ret; 1527 1528 ret = devres_release(dev, devm_nvmem_cell_release, 1529 devm_nvmem_cell_match, cell); 1530 1531 WARN_ON(ret); 1532 } 1533 EXPORT_SYMBOL(devm_nvmem_cell_put); 1534 1535 /** 1536 * nvmem_cell_put() - Release previously allocated nvmem cell. 1537 * 1538 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1539 */ 1540 void nvmem_cell_put(struct nvmem_cell *cell) 1541 { 1542 struct nvmem_device *nvmem = cell->entry->nvmem; 1543 1544 if (cell->id) 1545 kfree_const(cell->id); 1546 1547 kfree(cell); 1548 __nvmem_device_put(nvmem); 1549 } 1550 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1551 1552 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1553 { 1554 u8 *p, *b; 1555 int i, extra, bit_offset = cell->bit_offset; 1556 1557 p = b = buf; 1558 if (bit_offset) { 1559 /* First shift */ 1560 *b++ >>= bit_offset; 1561 1562 /* setup rest of the bytes if any */ 1563 for (i = 1; i < cell->bytes; i++) { 1564 /* Get bits from next byte and shift them towards msb */ 1565 *p |= *b << (BITS_PER_BYTE - bit_offset); 1566 1567 p = b; 1568 *b++ >>= bit_offset; 1569 } 1570 } else { 1571 /* point to the msb */ 1572 p += cell->bytes - 1; 1573 } 1574 1575 /* result fits in less bytes */ 1576 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1577 while (--extra >= 0) 1578 *p-- = 0; 1579 1580 /* clear msb bits if any leftover in the last byte */ 1581 if (cell->nbits % BITS_PER_BYTE) 1582 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1583 } 1584 1585 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1586 struct nvmem_cell_entry *cell, 1587 void *buf, size_t *len, const char *id, int index) 1588 { 1589 int rc; 1590 1591 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1592 1593 if (rc) 1594 return rc; 1595 1596 /* shift bits in-place */ 1597 if (cell->bit_offset || cell->nbits) 1598 nvmem_shift_read_buffer_in_place(cell, buf); 1599 1600 if (cell->read_post_process) { 1601 rc = cell->read_post_process(cell->priv, id, index, 1602 cell->offset, buf, cell->raw_len); 1603 if (rc) 1604 return rc; 1605 } 1606 1607 if (len) 1608 *len = cell->bytes; 1609 1610 return 0; 1611 } 1612 1613 /** 1614 * nvmem_cell_read() - Read a given nvmem cell 1615 * 1616 * @cell: nvmem cell to be read. 1617 * @len: pointer to length of cell which will be populated on successful read; 1618 * can be NULL. 1619 * 1620 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1621 * buffer should be freed by the consumer with a kfree(). 1622 */ 1623 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1624 { 1625 struct nvmem_cell_entry *entry = cell->entry; 1626 struct nvmem_device *nvmem = entry->nvmem; 1627 u8 *buf; 1628 int rc; 1629 1630 if (!nvmem) 1631 return ERR_PTR(-EINVAL); 1632 1633 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1634 if (!buf) 1635 return ERR_PTR(-ENOMEM); 1636 1637 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1638 if (rc) { 1639 kfree(buf); 1640 return ERR_PTR(rc); 1641 } 1642 1643 return buf; 1644 } 1645 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1646 1647 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1648 u8 *_buf, int len) 1649 { 1650 struct nvmem_device *nvmem = cell->nvmem; 1651 int i, rc, nbits, bit_offset = cell->bit_offset; 1652 u8 v, *p, *buf, *b, pbyte, pbits; 1653 1654 nbits = cell->nbits; 1655 buf = kzalloc(cell->bytes, GFP_KERNEL); 1656 if (!buf) 1657 return ERR_PTR(-ENOMEM); 1658 1659 memcpy(buf, _buf, len); 1660 p = b = buf; 1661 1662 if (bit_offset) { 1663 pbyte = *b; 1664 *b <<= bit_offset; 1665 1666 /* setup the first byte with lsb bits from nvmem */ 1667 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1668 if (rc) 1669 goto err; 1670 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1671 1672 /* setup rest of the byte if any */ 1673 for (i = 1; i < cell->bytes; i++) { 1674 /* Get last byte bits and shift them towards lsb */ 1675 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1676 pbyte = *b; 1677 p = b; 1678 *b <<= bit_offset; 1679 *b++ |= pbits; 1680 } 1681 } 1682 1683 /* if it's not end on byte boundary */ 1684 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1685 /* setup the last byte with msb bits from nvmem */ 1686 rc = nvmem_reg_read(nvmem, 1687 cell->offset + cell->bytes - 1, &v, 1); 1688 if (rc) 1689 goto err; 1690 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1691 1692 } 1693 1694 return buf; 1695 err: 1696 kfree(buf); 1697 return ERR_PTR(rc); 1698 } 1699 1700 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1701 { 1702 struct nvmem_device *nvmem = cell->nvmem; 1703 int rc; 1704 1705 if (!nvmem || nvmem->read_only || 1706 (cell->bit_offset == 0 && len != cell->bytes)) 1707 return -EINVAL; 1708 1709 /* 1710 * Any cells which have a read_post_process hook are read-only because 1711 * we cannot reverse the operation and it might affect other cells, 1712 * too. 1713 */ 1714 if (cell->read_post_process) 1715 return -EINVAL; 1716 1717 if (cell->bit_offset || cell->nbits) { 1718 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1719 if (IS_ERR(buf)) 1720 return PTR_ERR(buf); 1721 } 1722 1723 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1724 1725 /* free the tmp buffer */ 1726 if (cell->bit_offset || cell->nbits) 1727 kfree(buf); 1728 1729 if (rc) 1730 return rc; 1731 1732 return len; 1733 } 1734 1735 /** 1736 * nvmem_cell_write() - Write to a given nvmem cell 1737 * 1738 * @cell: nvmem cell to be written. 1739 * @buf: Buffer to be written. 1740 * @len: length of buffer to be written to nvmem cell. 1741 * 1742 * Return: length of bytes written or negative on failure. 1743 */ 1744 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1745 { 1746 return __nvmem_cell_entry_write(cell->entry, buf, len); 1747 } 1748 1749 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1750 1751 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1752 void *val, size_t count) 1753 { 1754 struct nvmem_cell *cell; 1755 void *buf; 1756 size_t len; 1757 1758 cell = nvmem_cell_get(dev, cell_id); 1759 if (IS_ERR(cell)) 1760 return PTR_ERR(cell); 1761 1762 buf = nvmem_cell_read(cell, &len); 1763 if (IS_ERR(buf)) { 1764 nvmem_cell_put(cell); 1765 return PTR_ERR(buf); 1766 } 1767 if (len != count) { 1768 kfree(buf); 1769 nvmem_cell_put(cell); 1770 return -EINVAL; 1771 } 1772 memcpy(val, buf, count); 1773 kfree(buf); 1774 nvmem_cell_put(cell); 1775 1776 return 0; 1777 } 1778 1779 /** 1780 * nvmem_cell_read_u8() - Read a cell value as a u8 1781 * 1782 * @dev: Device that requests the nvmem cell. 1783 * @cell_id: Name of nvmem cell to read. 1784 * @val: pointer to output value. 1785 * 1786 * Return: 0 on success or negative errno. 1787 */ 1788 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1789 { 1790 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1791 } 1792 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1793 1794 /** 1795 * nvmem_cell_read_u16() - Read a cell value as a u16 1796 * 1797 * @dev: Device that requests the nvmem cell. 1798 * @cell_id: Name of nvmem cell to read. 1799 * @val: pointer to output value. 1800 * 1801 * Return: 0 on success or negative errno. 1802 */ 1803 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1804 { 1805 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1806 } 1807 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1808 1809 /** 1810 * nvmem_cell_read_u32() - Read a cell value as a u32 1811 * 1812 * @dev: Device that requests the nvmem cell. 1813 * @cell_id: Name of nvmem cell to read. 1814 * @val: pointer to output value. 1815 * 1816 * Return: 0 on success or negative errno. 1817 */ 1818 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1819 { 1820 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1821 } 1822 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1823 1824 /** 1825 * nvmem_cell_read_u64() - Read a cell value as a u64 1826 * 1827 * @dev: Device that requests the nvmem cell. 1828 * @cell_id: Name of nvmem cell to read. 1829 * @val: pointer to output value. 1830 * 1831 * Return: 0 on success or negative errno. 1832 */ 1833 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1834 { 1835 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1836 } 1837 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1838 1839 static const void *nvmem_cell_read_variable_common(struct device *dev, 1840 const char *cell_id, 1841 size_t max_len, size_t *len) 1842 { 1843 struct nvmem_cell *cell; 1844 int nbits; 1845 void *buf; 1846 1847 cell = nvmem_cell_get(dev, cell_id); 1848 if (IS_ERR(cell)) 1849 return cell; 1850 1851 nbits = cell->entry->nbits; 1852 buf = nvmem_cell_read(cell, len); 1853 nvmem_cell_put(cell); 1854 if (IS_ERR(buf)) 1855 return buf; 1856 1857 /* 1858 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1859 * the length of the real data. Throw away the extra junk. 1860 */ 1861 if (nbits) 1862 *len = DIV_ROUND_UP(nbits, 8); 1863 1864 if (*len > max_len) { 1865 kfree(buf); 1866 return ERR_PTR(-ERANGE); 1867 } 1868 1869 return buf; 1870 } 1871 1872 /** 1873 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1874 * 1875 * @dev: Device that requests the nvmem cell. 1876 * @cell_id: Name of nvmem cell to read. 1877 * @val: pointer to output value. 1878 * 1879 * Return: 0 on success or negative errno. 1880 */ 1881 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1882 u32 *val) 1883 { 1884 size_t len; 1885 const u8 *buf; 1886 int i; 1887 1888 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1889 if (IS_ERR(buf)) 1890 return PTR_ERR(buf); 1891 1892 /* Copy w/ implicit endian conversion */ 1893 *val = 0; 1894 for (i = 0; i < len; i++) 1895 *val |= buf[i] << (8 * i); 1896 1897 kfree(buf); 1898 1899 return 0; 1900 } 1901 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1902 1903 /** 1904 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1905 * 1906 * @dev: Device that requests the nvmem cell. 1907 * @cell_id: Name of nvmem cell to read. 1908 * @val: pointer to output value. 1909 * 1910 * Return: 0 on success or negative errno. 1911 */ 1912 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1913 u64 *val) 1914 { 1915 size_t len; 1916 const u8 *buf; 1917 int i; 1918 1919 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1920 if (IS_ERR(buf)) 1921 return PTR_ERR(buf); 1922 1923 /* Copy w/ implicit endian conversion */ 1924 *val = 0; 1925 for (i = 0; i < len; i++) 1926 *val |= (uint64_t)buf[i] << (8 * i); 1927 1928 kfree(buf); 1929 1930 return 0; 1931 } 1932 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1933 1934 /** 1935 * nvmem_device_cell_read() - Read a given nvmem device and cell 1936 * 1937 * @nvmem: nvmem device to read from. 1938 * @info: nvmem cell info to be read. 1939 * @buf: buffer pointer which will be populated on successful read. 1940 * 1941 * Return: length of successful bytes read on success and negative 1942 * error code on error. 1943 */ 1944 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1945 struct nvmem_cell_info *info, void *buf) 1946 { 1947 struct nvmem_cell_entry cell; 1948 int rc; 1949 ssize_t len; 1950 1951 if (!nvmem) 1952 return -EINVAL; 1953 1954 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1955 if (rc) 1956 return rc; 1957 1958 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 1959 if (rc) 1960 return rc; 1961 1962 return len; 1963 } 1964 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1965 1966 /** 1967 * nvmem_device_cell_write() - Write cell to a given nvmem device 1968 * 1969 * @nvmem: nvmem device to be written to. 1970 * @info: nvmem cell info to be written. 1971 * @buf: buffer to be written to cell. 1972 * 1973 * Return: length of bytes written or negative error code on failure. 1974 */ 1975 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1976 struct nvmem_cell_info *info, void *buf) 1977 { 1978 struct nvmem_cell_entry cell; 1979 int rc; 1980 1981 if (!nvmem) 1982 return -EINVAL; 1983 1984 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 1985 if (rc) 1986 return rc; 1987 1988 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 1989 } 1990 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1991 1992 /** 1993 * nvmem_device_read() - Read from a given nvmem device 1994 * 1995 * @nvmem: nvmem device to read from. 1996 * @offset: offset in nvmem device. 1997 * @bytes: number of bytes to read. 1998 * @buf: buffer pointer which will be populated on successful read. 1999 * 2000 * Return: length of successful bytes read on success and negative 2001 * error code on error. 2002 */ 2003 int nvmem_device_read(struct nvmem_device *nvmem, 2004 unsigned int offset, 2005 size_t bytes, void *buf) 2006 { 2007 int rc; 2008 2009 if (!nvmem) 2010 return -EINVAL; 2011 2012 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2013 2014 if (rc) 2015 return rc; 2016 2017 return bytes; 2018 } 2019 EXPORT_SYMBOL_GPL(nvmem_device_read); 2020 2021 /** 2022 * nvmem_device_write() - Write cell to a given nvmem device 2023 * 2024 * @nvmem: nvmem device to be written to. 2025 * @offset: offset in nvmem device. 2026 * @bytes: number of bytes to write. 2027 * @buf: buffer to be written. 2028 * 2029 * Return: length of bytes written or negative error code on failure. 2030 */ 2031 int nvmem_device_write(struct nvmem_device *nvmem, 2032 unsigned int offset, 2033 size_t bytes, void *buf) 2034 { 2035 int rc; 2036 2037 if (!nvmem) 2038 return -EINVAL; 2039 2040 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2041 2042 if (rc) 2043 return rc; 2044 2045 2046 return bytes; 2047 } 2048 EXPORT_SYMBOL_GPL(nvmem_device_write); 2049 2050 /** 2051 * nvmem_add_cell_table() - register a table of cell info entries 2052 * 2053 * @table: table of cell info entries 2054 */ 2055 void nvmem_add_cell_table(struct nvmem_cell_table *table) 2056 { 2057 mutex_lock(&nvmem_cell_mutex); 2058 list_add_tail(&table->node, &nvmem_cell_tables); 2059 mutex_unlock(&nvmem_cell_mutex); 2060 } 2061 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2062 2063 /** 2064 * nvmem_del_cell_table() - remove a previously registered cell info table 2065 * 2066 * @table: table of cell info entries 2067 */ 2068 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2069 { 2070 mutex_lock(&nvmem_cell_mutex); 2071 list_del(&table->node); 2072 mutex_unlock(&nvmem_cell_mutex); 2073 } 2074 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2075 2076 /** 2077 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2078 * 2079 * @entries: array of cell lookup entries 2080 * @nentries: number of cell lookup entries in the array 2081 */ 2082 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2083 { 2084 int i; 2085 2086 mutex_lock(&nvmem_lookup_mutex); 2087 for (i = 0; i < nentries; i++) 2088 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2089 mutex_unlock(&nvmem_lookup_mutex); 2090 } 2091 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2092 2093 /** 2094 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2095 * entries 2096 * 2097 * @entries: array of cell lookup entries 2098 * @nentries: number of cell lookup entries in the array 2099 */ 2100 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2101 { 2102 int i; 2103 2104 mutex_lock(&nvmem_lookup_mutex); 2105 for (i = 0; i < nentries; i++) 2106 list_del(&entries[i].node); 2107 mutex_unlock(&nvmem_lookup_mutex); 2108 } 2109 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2110 2111 /** 2112 * nvmem_dev_name() - Get the name of a given nvmem device. 2113 * 2114 * @nvmem: nvmem device. 2115 * 2116 * Return: name of the nvmem device. 2117 */ 2118 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2119 { 2120 return dev_name(&nvmem->dev); 2121 } 2122 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2123 2124 static int __init nvmem_init(void) 2125 { 2126 return bus_register(&nvmem_bus_type); 2127 } 2128 2129 static void __exit nvmem_exit(void) 2130 { 2131 bus_unregister(&nvmem_bus_type); 2132 } 2133 2134 subsys_initcall(nvmem_init); 2135 module_exit(nvmem_exit); 2136 2137 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2138 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2139 MODULE_DESCRIPTION("nvmem Driver Core"); 2140