1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "internals.h" 23 24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 25 26 #define FLAG_COMPAT BIT(0) 27 struct nvmem_cell_entry { 28 const char *name; 29 int offset; 30 size_t raw_len; 31 int bytes; 32 int bit_offset; 33 int nbits; 34 nvmem_cell_post_process_t read_post_process; 35 void *priv; 36 struct device_node *np; 37 struct nvmem_device *nvmem; 38 struct list_head node; 39 }; 40 41 struct nvmem_cell { 42 struct nvmem_cell_entry *entry; 43 const char *id; 44 int index; 45 }; 46 47 static DEFINE_MUTEX(nvmem_mutex); 48 static DEFINE_IDA(nvmem_ida); 49 50 static DEFINE_MUTEX(nvmem_cell_mutex); 51 static LIST_HEAD(nvmem_cell_tables); 52 53 static DEFINE_MUTEX(nvmem_lookup_mutex); 54 static LIST_HEAD(nvmem_lookup_list); 55 56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 57 58 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 59 void *val, size_t bytes) 60 { 61 if (nvmem->reg_read) 62 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 63 64 return -EINVAL; 65 } 66 67 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 68 void *val, size_t bytes) 69 { 70 int ret; 71 72 if (nvmem->reg_write) { 73 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 74 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 75 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 76 return ret; 77 } 78 79 return -EINVAL; 80 } 81 82 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 83 unsigned int offset, void *val, 84 size_t bytes, int write) 85 { 86 87 unsigned int end = offset + bytes; 88 unsigned int kend, ksize; 89 const struct nvmem_keepout *keepout = nvmem->keepout; 90 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 91 int rc; 92 93 /* 94 * Skip all keepouts before the range being accessed. 95 * Keepouts are sorted. 96 */ 97 while ((keepout < keepoutend) && (keepout->end <= offset)) 98 keepout++; 99 100 while ((offset < end) && (keepout < keepoutend)) { 101 /* Access the valid portion before the keepout. */ 102 if (offset < keepout->start) { 103 kend = min(end, keepout->start); 104 ksize = kend - offset; 105 if (write) 106 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 107 else 108 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 109 110 if (rc) 111 return rc; 112 113 offset += ksize; 114 val += ksize; 115 } 116 117 /* 118 * Now we're aligned to the start of this keepout zone. Go 119 * through it. 120 */ 121 kend = min(end, keepout->end); 122 ksize = kend - offset; 123 if (!write) 124 memset(val, keepout->value, ksize); 125 126 val += ksize; 127 offset += ksize; 128 keepout++; 129 } 130 131 /* 132 * If we ran out of keepouts but there's still stuff to do, send it 133 * down directly 134 */ 135 if (offset < end) { 136 ksize = end - offset; 137 if (write) 138 return __nvmem_reg_write(nvmem, offset, val, ksize); 139 else 140 return __nvmem_reg_read(nvmem, offset, val, ksize); 141 } 142 143 return 0; 144 } 145 146 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 147 void *val, size_t bytes) 148 { 149 if (!nvmem->nkeepout) 150 return __nvmem_reg_read(nvmem, offset, val, bytes); 151 152 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 153 } 154 155 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 156 void *val, size_t bytes) 157 { 158 if (!nvmem->nkeepout) 159 return __nvmem_reg_write(nvmem, offset, val, bytes); 160 161 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 162 } 163 164 #ifdef CONFIG_NVMEM_SYSFS 165 static const char * const nvmem_type_str[] = { 166 [NVMEM_TYPE_UNKNOWN] = "Unknown", 167 [NVMEM_TYPE_EEPROM] = "EEPROM", 168 [NVMEM_TYPE_OTP] = "OTP", 169 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 170 [NVMEM_TYPE_FRAM] = "FRAM", 171 }; 172 173 #ifdef CONFIG_DEBUG_LOCK_ALLOC 174 static struct lock_class_key eeprom_lock_key; 175 #endif 176 177 static ssize_t type_show(struct device *dev, 178 struct device_attribute *attr, char *buf) 179 { 180 struct nvmem_device *nvmem = to_nvmem_device(dev); 181 182 return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]); 183 } 184 185 static DEVICE_ATTR_RO(type); 186 187 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 188 char *buf) 189 { 190 struct nvmem_device *nvmem = to_nvmem_device(dev); 191 192 return sysfs_emit(buf, "%d\n", nvmem->read_only); 193 } 194 195 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 196 const char *buf, size_t count) 197 { 198 struct nvmem_device *nvmem = to_nvmem_device(dev); 199 int ret = kstrtobool(buf, &nvmem->read_only); 200 201 if (ret < 0) 202 return ret; 203 204 return count; 205 } 206 207 static DEVICE_ATTR_RW(force_ro); 208 209 static struct attribute *nvmem_attrs[] = { 210 &dev_attr_force_ro.attr, 211 &dev_attr_type.attr, 212 NULL, 213 }; 214 215 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 216 struct bin_attribute *attr, char *buf, 217 loff_t pos, size_t count) 218 { 219 struct device *dev; 220 struct nvmem_device *nvmem; 221 int rc; 222 223 if (attr->private) 224 dev = attr->private; 225 else 226 dev = kobj_to_dev(kobj); 227 nvmem = to_nvmem_device(dev); 228 229 if (!IS_ALIGNED(pos, nvmem->stride)) 230 return -EINVAL; 231 232 if (count < nvmem->word_size) 233 return -EINVAL; 234 235 count = round_down(count, nvmem->word_size); 236 237 if (!nvmem->reg_read) 238 return -EPERM; 239 240 rc = nvmem_reg_read(nvmem, pos, buf, count); 241 242 if (rc) 243 return rc; 244 245 return count; 246 } 247 248 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 249 struct bin_attribute *attr, char *buf, 250 loff_t pos, size_t count) 251 { 252 struct device *dev; 253 struct nvmem_device *nvmem; 254 int rc; 255 256 if (attr->private) 257 dev = attr->private; 258 else 259 dev = kobj_to_dev(kobj); 260 nvmem = to_nvmem_device(dev); 261 262 if (!IS_ALIGNED(pos, nvmem->stride)) 263 return -EINVAL; 264 265 if (count < nvmem->word_size) 266 return -EINVAL; 267 268 count = round_down(count, nvmem->word_size); 269 270 if (!nvmem->reg_write) 271 return -EPERM; 272 273 rc = nvmem_reg_write(nvmem, pos, buf, count); 274 275 if (rc) 276 return rc; 277 278 return count; 279 } 280 281 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 282 { 283 umode_t mode = 0400; 284 285 if (!nvmem->root_only) 286 mode |= 0044; 287 288 if (!nvmem->read_only) 289 mode |= 0200; 290 291 if (!nvmem->reg_write) 292 mode &= ~0200; 293 294 if (!nvmem->reg_read) 295 mode &= ~0444; 296 297 return mode; 298 } 299 300 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 301 struct bin_attribute *attr, int i) 302 { 303 struct device *dev = kobj_to_dev(kobj); 304 struct nvmem_device *nvmem = to_nvmem_device(dev); 305 306 attr->size = nvmem->size; 307 308 return nvmem_bin_attr_get_umode(nvmem); 309 } 310 311 static umode_t nvmem_attr_is_visible(struct kobject *kobj, 312 struct attribute *attr, int i) 313 { 314 struct device *dev = kobj_to_dev(kobj); 315 struct nvmem_device *nvmem = to_nvmem_device(dev); 316 317 /* 318 * If the device has no .reg_write operation, do not allow 319 * configuration as read-write. 320 * If the device is set as read-only by configuration, it 321 * can be forced into read-write mode using the 'force_ro' 322 * attribute. 323 */ 324 if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write) 325 return 0; /* Attribute not visible */ 326 327 return attr->mode; 328 } 329 330 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 331 const char *id, int index); 332 333 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, 334 struct bin_attribute *attr, char *buf, 335 loff_t pos, size_t count) 336 { 337 struct nvmem_cell_entry *entry; 338 struct nvmem_cell *cell = NULL; 339 size_t cell_sz, read_len; 340 void *content; 341 342 entry = attr->private; 343 cell = nvmem_create_cell(entry, entry->name, 0); 344 if (IS_ERR(cell)) 345 return PTR_ERR(cell); 346 347 if (!cell) 348 return -EINVAL; 349 350 content = nvmem_cell_read(cell, &cell_sz); 351 if (IS_ERR(content)) { 352 read_len = PTR_ERR(content); 353 goto destroy_cell; 354 } 355 356 read_len = min_t(unsigned int, cell_sz - pos, count); 357 memcpy(buf, content + pos, read_len); 358 kfree(content); 359 360 destroy_cell: 361 kfree_const(cell->id); 362 kfree(cell); 363 364 return read_len; 365 } 366 367 /* default read/write permissions */ 368 static struct bin_attribute bin_attr_rw_nvmem = { 369 .attr = { 370 .name = "nvmem", 371 .mode = 0644, 372 }, 373 .read = bin_attr_nvmem_read, 374 .write = bin_attr_nvmem_write, 375 }; 376 377 static struct bin_attribute *nvmem_bin_attributes[] = { 378 &bin_attr_rw_nvmem, 379 NULL, 380 }; 381 382 static const struct attribute_group nvmem_bin_group = { 383 .bin_attrs = nvmem_bin_attributes, 384 .attrs = nvmem_attrs, 385 .is_bin_visible = nvmem_bin_attr_is_visible, 386 .is_visible = nvmem_attr_is_visible, 387 }; 388 389 static const struct attribute_group *nvmem_dev_groups[] = { 390 &nvmem_bin_group, 391 NULL, 392 }; 393 394 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 395 .attr = { 396 .name = "eeprom", 397 }, 398 .read = bin_attr_nvmem_read, 399 .write = bin_attr_nvmem_write, 400 }; 401 402 /* 403 * nvmem_setup_compat() - Create an additional binary entry in 404 * drivers sys directory, to be backwards compatible with the older 405 * drivers/misc/eeprom drivers. 406 */ 407 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 408 const struct nvmem_config *config) 409 { 410 int rval; 411 412 if (!config->compat) 413 return 0; 414 415 if (!config->base_dev) 416 return -EINVAL; 417 418 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 419 if (config->type == NVMEM_TYPE_FRAM) 420 nvmem->eeprom.attr.name = "fram"; 421 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 422 nvmem->eeprom.size = nvmem->size; 423 #ifdef CONFIG_DEBUG_LOCK_ALLOC 424 nvmem->eeprom.attr.key = &eeprom_lock_key; 425 #endif 426 nvmem->eeprom.private = &nvmem->dev; 427 nvmem->base_dev = config->base_dev; 428 429 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 430 if (rval) { 431 dev_err(&nvmem->dev, 432 "Failed to create eeprom binary file %d\n", rval); 433 return rval; 434 } 435 436 nvmem->flags |= FLAG_COMPAT; 437 438 return 0; 439 } 440 441 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 442 const struct nvmem_config *config) 443 { 444 if (config->compat) 445 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 446 } 447 448 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) 449 { 450 struct attribute_group group = { 451 .name = "cells", 452 }; 453 struct nvmem_cell_entry *entry; 454 struct bin_attribute *attrs; 455 unsigned int ncells = 0, i = 0; 456 int ret = 0; 457 458 mutex_lock(&nvmem_mutex); 459 460 if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) 461 goto unlock_mutex; 462 463 /* Allocate an array of attributes with a sentinel */ 464 ncells = list_count_nodes(&nvmem->cells); 465 group.bin_attrs = devm_kcalloc(&nvmem->dev, ncells + 1, 466 sizeof(struct bin_attribute *), GFP_KERNEL); 467 if (!group.bin_attrs) { 468 ret = -ENOMEM; 469 goto unlock_mutex; 470 } 471 472 attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); 473 if (!attrs) { 474 ret = -ENOMEM; 475 goto unlock_mutex; 476 } 477 478 /* Initialize each attribute to take the name and size of the cell */ 479 list_for_each_entry(entry, &nvmem->cells, node) { 480 sysfs_bin_attr_init(&attrs[i]); 481 attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, 482 "%s@%x,%x", entry->name, 483 entry->offset, 484 entry->bit_offset); 485 attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); 486 attrs[i].size = entry->bytes; 487 attrs[i].read = &nvmem_cell_attr_read; 488 attrs[i].private = entry; 489 if (!attrs[i].attr.name) { 490 ret = -ENOMEM; 491 goto unlock_mutex; 492 } 493 494 group.bin_attrs[i] = &attrs[i]; 495 i++; 496 } 497 498 ret = device_add_group(&nvmem->dev, &group); 499 if (ret) 500 goto unlock_mutex; 501 502 nvmem->sysfs_cells_populated = true; 503 504 unlock_mutex: 505 mutex_unlock(&nvmem_mutex); 506 507 return ret; 508 } 509 510 #else /* CONFIG_NVMEM_SYSFS */ 511 512 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 513 const struct nvmem_config *config) 514 { 515 return -ENOSYS; 516 } 517 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 518 const struct nvmem_config *config) 519 { 520 } 521 522 #endif /* CONFIG_NVMEM_SYSFS */ 523 524 static void nvmem_release(struct device *dev) 525 { 526 struct nvmem_device *nvmem = to_nvmem_device(dev); 527 528 ida_free(&nvmem_ida, nvmem->id); 529 gpiod_put(nvmem->wp_gpio); 530 kfree(nvmem); 531 } 532 533 static const struct device_type nvmem_provider_type = { 534 .release = nvmem_release, 535 }; 536 537 static struct bus_type nvmem_bus_type = { 538 .name = "nvmem", 539 }; 540 541 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 542 { 543 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 544 mutex_lock(&nvmem_mutex); 545 list_del(&cell->node); 546 mutex_unlock(&nvmem_mutex); 547 of_node_put(cell->np); 548 kfree_const(cell->name); 549 kfree(cell); 550 } 551 552 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 553 { 554 struct nvmem_cell_entry *cell, *p; 555 556 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 557 nvmem_cell_entry_drop(cell); 558 } 559 560 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 561 { 562 mutex_lock(&nvmem_mutex); 563 list_add_tail(&cell->node, &cell->nvmem->cells); 564 mutex_unlock(&nvmem_mutex); 565 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 566 } 567 568 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 569 const struct nvmem_cell_info *info, 570 struct nvmem_cell_entry *cell) 571 { 572 cell->nvmem = nvmem; 573 cell->offset = info->offset; 574 cell->raw_len = info->raw_len ?: info->bytes; 575 cell->bytes = info->bytes; 576 cell->name = info->name; 577 cell->read_post_process = info->read_post_process; 578 cell->priv = info->priv; 579 580 cell->bit_offset = info->bit_offset; 581 cell->nbits = info->nbits; 582 cell->np = info->np; 583 584 if (cell->nbits) 585 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 586 BITS_PER_BYTE); 587 588 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 589 dev_err(&nvmem->dev, 590 "cell %s unaligned to nvmem stride %d\n", 591 cell->name ?: "<unknown>", nvmem->stride); 592 return -EINVAL; 593 } 594 595 return 0; 596 } 597 598 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 599 const struct nvmem_cell_info *info, 600 struct nvmem_cell_entry *cell) 601 { 602 int err; 603 604 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 605 if (err) 606 return err; 607 608 cell->name = kstrdup_const(info->name, GFP_KERNEL); 609 if (!cell->name) 610 return -ENOMEM; 611 612 return 0; 613 } 614 615 /** 616 * nvmem_add_one_cell() - Add one cell information to an nvmem device 617 * 618 * @nvmem: nvmem device to add cells to. 619 * @info: nvmem cell info to add to the device 620 * 621 * Return: 0 or negative error code on failure. 622 */ 623 int nvmem_add_one_cell(struct nvmem_device *nvmem, 624 const struct nvmem_cell_info *info) 625 { 626 struct nvmem_cell_entry *cell; 627 int rval; 628 629 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 630 if (!cell) 631 return -ENOMEM; 632 633 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 634 if (rval) { 635 kfree(cell); 636 return rval; 637 } 638 639 nvmem_cell_entry_add(cell); 640 641 return 0; 642 } 643 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 644 645 /** 646 * nvmem_add_cells() - Add cell information to an nvmem device 647 * 648 * @nvmem: nvmem device to add cells to. 649 * @info: nvmem cell info to add to the device 650 * @ncells: number of cells in info 651 * 652 * Return: 0 or negative error code on failure. 653 */ 654 static int nvmem_add_cells(struct nvmem_device *nvmem, 655 const struct nvmem_cell_info *info, 656 int ncells) 657 { 658 int i, rval; 659 660 for (i = 0; i < ncells; i++) { 661 rval = nvmem_add_one_cell(nvmem, &info[i]); 662 if (rval) 663 return rval; 664 } 665 666 return 0; 667 } 668 669 /** 670 * nvmem_register_notifier() - Register a notifier block for nvmem events. 671 * 672 * @nb: notifier block to be called on nvmem events. 673 * 674 * Return: 0 on success, negative error number on failure. 675 */ 676 int nvmem_register_notifier(struct notifier_block *nb) 677 { 678 return blocking_notifier_chain_register(&nvmem_notifier, nb); 679 } 680 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 681 682 /** 683 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 684 * 685 * @nb: notifier block to be unregistered. 686 * 687 * Return: 0 on success, negative error number on failure. 688 */ 689 int nvmem_unregister_notifier(struct notifier_block *nb) 690 { 691 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 692 } 693 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 694 695 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 696 { 697 const struct nvmem_cell_info *info; 698 struct nvmem_cell_table *table; 699 struct nvmem_cell_entry *cell; 700 int rval = 0, i; 701 702 mutex_lock(&nvmem_cell_mutex); 703 list_for_each_entry(table, &nvmem_cell_tables, node) { 704 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 705 for (i = 0; i < table->ncells; i++) { 706 info = &table->cells[i]; 707 708 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 709 if (!cell) { 710 rval = -ENOMEM; 711 goto out; 712 } 713 714 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 715 if (rval) { 716 kfree(cell); 717 goto out; 718 } 719 720 nvmem_cell_entry_add(cell); 721 } 722 } 723 } 724 725 out: 726 mutex_unlock(&nvmem_cell_mutex); 727 return rval; 728 } 729 730 static struct nvmem_cell_entry * 731 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 732 { 733 struct nvmem_cell_entry *iter, *cell = NULL; 734 735 mutex_lock(&nvmem_mutex); 736 list_for_each_entry(iter, &nvmem->cells, node) { 737 if (strcmp(cell_id, iter->name) == 0) { 738 cell = iter; 739 break; 740 } 741 } 742 mutex_unlock(&nvmem_mutex); 743 744 return cell; 745 } 746 747 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 748 { 749 unsigned int cur = 0; 750 const struct nvmem_keepout *keepout = nvmem->keepout; 751 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 752 753 while (keepout < keepoutend) { 754 /* Ensure keepouts are sorted and don't overlap. */ 755 if (keepout->start < cur) { 756 dev_err(&nvmem->dev, 757 "Keepout regions aren't sorted or overlap.\n"); 758 759 return -ERANGE; 760 } 761 762 if (keepout->end < keepout->start) { 763 dev_err(&nvmem->dev, 764 "Invalid keepout region.\n"); 765 766 return -EINVAL; 767 } 768 769 /* 770 * Validate keepouts (and holes between) don't violate 771 * word_size constraints. 772 */ 773 if ((keepout->end - keepout->start < nvmem->word_size) || 774 ((keepout->start != cur) && 775 (keepout->start - cur < nvmem->word_size))) { 776 777 dev_err(&nvmem->dev, 778 "Keepout regions violate word_size constraints.\n"); 779 780 return -ERANGE; 781 } 782 783 /* Validate keepouts don't violate stride (alignment). */ 784 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 785 !IS_ALIGNED(keepout->end, nvmem->stride)) { 786 787 dev_err(&nvmem->dev, 788 "Keepout regions violate stride.\n"); 789 790 return -EINVAL; 791 } 792 793 cur = keepout->end; 794 keepout++; 795 } 796 797 return 0; 798 } 799 800 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 801 { 802 struct device *dev = &nvmem->dev; 803 struct device_node *child; 804 const __be32 *addr; 805 int len, ret; 806 807 for_each_child_of_node(np, child) { 808 struct nvmem_cell_info info = {0}; 809 810 addr = of_get_property(child, "reg", &len); 811 if (!addr) 812 continue; 813 if (len < 2 * sizeof(u32)) { 814 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 815 of_node_put(child); 816 return -EINVAL; 817 } 818 819 info.offset = be32_to_cpup(addr++); 820 info.bytes = be32_to_cpup(addr); 821 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 822 823 addr = of_get_property(child, "bits", &len); 824 if (addr && len == (2 * sizeof(u32))) { 825 info.bit_offset = be32_to_cpup(addr++); 826 info.nbits = be32_to_cpup(addr); 827 if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) { 828 dev_err(dev, "nvmem: invalid bits on %pOF\n", child); 829 of_node_put(child); 830 return -EINVAL; 831 } 832 } 833 834 info.np = of_node_get(child); 835 836 if (nvmem->fixup_dt_cell_info) 837 nvmem->fixup_dt_cell_info(nvmem, &info); 838 839 ret = nvmem_add_one_cell(nvmem, &info); 840 kfree(info.name); 841 if (ret) { 842 of_node_put(child); 843 return ret; 844 } 845 } 846 847 return 0; 848 } 849 850 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 851 { 852 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 853 } 854 855 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 856 { 857 struct device_node *layout_np; 858 int err = 0; 859 860 layout_np = of_nvmem_layout_get_container(nvmem); 861 if (!layout_np) 862 return 0; 863 864 if (of_device_is_compatible(layout_np, "fixed-layout")) 865 err = nvmem_add_cells_from_dt(nvmem, layout_np); 866 867 of_node_put(layout_np); 868 869 return err; 870 } 871 872 int nvmem_layout_register(struct nvmem_layout *layout) 873 { 874 int ret; 875 876 if (!layout->add_cells) 877 return -EINVAL; 878 879 /* Populate the cells */ 880 ret = layout->add_cells(layout); 881 if (ret) 882 return ret; 883 884 #ifdef CONFIG_NVMEM_SYSFS 885 ret = nvmem_populate_sysfs_cells(layout->nvmem); 886 if (ret) { 887 nvmem_device_remove_all_cells(layout->nvmem); 888 return ret; 889 } 890 #endif 891 892 return 0; 893 } 894 EXPORT_SYMBOL_GPL(nvmem_layout_register); 895 896 void nvmem_layout_unregister(struct nvmem_layout *layout) 897 { 898 /* Keep the API even with an empty stub in case we need it later */ 899 } 900 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 901 902 /** 903 * nvmem_register() - Register a nvmem device for given nvmem_config. 904 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 905 * 906 * @config: nvmem device configuration with which nvmem device is created. 907 * 908 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 909 * on success. 910 */ 911 912 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 913 { 914 struct nvmem_device *nvmem; 915 int rval; 916 917 if (!config->dev) 918 return ERR_PTR(-EINVAL); 919 920 if (!config->reg_read && !config->reg_write) 921 return ERR_PTR(-EINVAL); 922 923 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 924 if (!nvmem) 925 return ERR_PTR(-ENOMEM); 926 927 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 928 if (rval < 0) { 929 kfree(nvmem); 930 return ERR_PTR(rval); 931 } 932 933 nvmem->id = rval; 934 935 nvmem->dev.type = &nvmem_provider_type; 936 nvmem->dev.bus = &nvmem_bus_type; 937 nvmem->dev.parent = config->dev; 938 939 device_initialize(&nvmem->dev); 940 941 if (!config->ignore_wp) 942 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 943 GPIOD_OUT_HIGH); 944 if (IS_ERR(nvmem->wp_gpio)) { 945 rval = PTR_ERR(nvmem->wp_gpio); 946 nvmem->wp_gpio = NULL; 947 goto err_put_device; 948 } 949 950 kref_init(&nvmem->refcnt); 951 INIT_LIST_HEAD(&nvmem->cells); 952 nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; 953 954 nvmem->owner = config->owner; 955 if (!nvmem->owner && config->dev->driver) 956 nvmem->owner = config->dev->driver->owner; 957 nvmem->stride = config->stride ?: 1; 958 nvmem->word_size = config->word_size ?: 1; 959 nvmem->size = config->size; 960 nvmem->root_only = config->root_only; 961 nvmem->priv = config->priv; 962 nvmem->type = config->type; 963 nvmem->reg_read = config->reg_read; 964 nvmem->reg_write = config->reg_write; 965 nvmem->keepout = config->keepout; 966 nvmem->nkeepout = config->nkeepout; 967 if (config->of_node) 968 nvmem->dev.of_node = config->of_node; 969 else 970 nvmem->dev.of_node = config->dev->of_node; 971 972 switch (config->id) { 973 case NVMEM_DEVID_NONE: 974 rval = dev_set_name(&nvmem->dev, "%s", config->name); 975 break; 976 case NVMEM_DEVID_AUTO: 977 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 978 break; 979 default: 980 rval = dev_set_name(&nvmem->dev, "%s%d", 981 config->name ? : "nvmem", 982 config->name ? config->id : nvmem->id); 983 break; 984 } 985 986 if (rval) 987 goto err_put_device; 988 989 nvmem->read_only = device_property_present(config->dev, "read-only") || 990 config->read_only || !nvmem->reg_write; 991 992 #ifdef CONFIG_NVMEM_SYSFS 993 nvmem->dev.groups = nvmem_dev_groups; 994 #endif 995 996 if (nvmem->nkeepout) { 997 rval = nvmem_validate_keepouts(nvmem); 998 if (rval) 999 goto err_put_device; 1000 } 1001 1002 if (config->compat) { 1003 rval = nvmem_sysfs_setup_compat(nvmem, config); 1004 if (rval) 1005 goto err_put_device; 1006 } 1007 1008 if (config->cells) { 1009 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 1010 if (rval) 1011 goto err_remove_cells; 1012 } 1013 1014 rval = nvmem_add_cells_from_table(nvmem); 1015 if (rval) 1016 goto err_remove_cells; 1017 1018 if (config->add_legacy_fixed_of_cells) { 1019 rval = nvmem_add_cells_from_legacy_of(nvmem); 1020 if (rval) 1021 goto err_remove_cells; 1022 } 1023 1024 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1025 if (rval) 1026 goto err_remove_cells; 1027 1028 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1029 1030 rval = device_add(&nvmem->dev); 1031 if (rval) 1032 goto err_remove_cells; 1033 1034 rval = nvmem_populate_layout(nvmem); 1035 if (rval) 1036 goto err_remove_dev; 1037 1038 #ifdef CONFIG_NVMEM_SYSFS 1039 rval = nvmem_populate_sysfs_cells(nvmem); 1040 if (rval) 1041 goto err_destroy_layout; 1042 #endif 1043 1044 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1045 1046 return nvmem; 1047 1048 #ifdef CONFIG_NVMEM_SYSFS 1049 err_destroy_layout: 1050 nvmem_destroy_layout(nvmem); 1051 #endif 1052 err_remove_dev: 1053 device_del(&nvmem->dev); 1054 err_remove_cells: 1055 nvmem_device_remove_all_cells(nvmem); 1056 if (config->compat) 1057 nvmem_sysfs_remove_compat(nvmem, config); 1058 err_put_device: 1059 put_device(&nvmem->dev); 1060 1061 return ERR_PTR(rval); 1062 } 1063 EXPORT_SYMBOL_GPL(nvmem_register); 1064 1065 static void nvmem_device_release(struct kref *kref) 1066 { 1067 struct nvmem_device *nvmem; 1068 1069 nvmem = container_of(kref, struct nvmem_device, refcnt); 1070 1071 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1072 1073 if (nvmem->flags & FLAG_COMPAT) 1074 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1075 1076 nvmem_device_remove_all_cells(nvmem); 1077 nvmem_destroy_layout(nvmem); 1078 device_unregister(&nvmem->dev); 1079 } 1080 1081 /** 1082 * nvmem_unregister() - Unregister previously registered nvmem device 1083 * 1084 * @nvmem: Pointer to previously registered nvmem device. 1085 */ 1086 void nvmem_unregister(struct nvmem_device *nvmem) 1087 { 1088 if (nvmem) 1089 kref_put(&nvmem->refcnt, nvmem_device_release); 1090 } 1091 EXPORT_SYMBOL_GPL(nvmem_unregister); 1092 1093 static void devm_nvmem_unregister(void *nvmem) 1094 { 1095 nvmem_unregister(nvmem); 1096 } 1097 1098 /** 1099 * devm_nvmem_register() - Register a managed nvmem device for given 1100 * nvmem_config. 1101 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1102 * 1103 * @dev: Device that uses the nvmem device. 1104 * @config: nvmem device configuration with which nvmem device is created. 1105 * 1106 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1107 * on success. 1108 */ 1109 struct nvmem_device *devm_nvmem_register(struct device *dev, 1110 const struct nvmem_config *config) 1111 { 1112 struct nvmem_device *nvmem; 1113 int ret; 1114 1115 nvmem = nvmem_register(config); 1116 if (IS_ERR(nvmem)) 1117 return nvmem; 1118 1119 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1120 if (ret) 1121 return ERR_PTR(ret); 1122 1123 return nvmem; 1124 } 1125 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1126 1127 static struct nvmem_device *__nvmem_device_get(void *data, 1128 int (*match)(struct device *dev, const void *data)) 1129 { 1130 struct nvmem_device *nvmem = NULL; 1131 struct device *dev; 1132 1133 mutex_lock(&nvmem_mutex); 1134 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1135 if (dev) 1136 nvmem = to_nvmem_device(dev); 1137 mutex_unlock(&nvmem_mutex); 1138 if (!nvmem) 1139 return ERR_PTR(-EPROBE_DEFER); 1140 1141 if (!try_module_get(nvmem->owner)) { 1142 dev_err(&nvmem->dev, 1143 "could not increase module refcount for cell %s\n", 1144 nvmem_dev_name(nvmem)); 1145 1146 put_device(&nvmem->dev); 1147 return ERR_PTR(-EINVAL); 1148 } 1149 1150 kref_get(&nvmem->refcnt); 1151 1152 return nvmem; 1153 } 1154 1155 static void __nvmem_device_put(struct nvmem_device *nvmem) 1156 { 1157 put_device(&nvmem->dev); 1158 module_put(nvmem->owner); 1159 kref_put(&nvmem->refcnt, nvmem_device_release); 1160 } 1161 1162 #if IS_ENABLED(CONFIG_OF) 1163 /** 1164 * of_nvmem_device_get() - Get nvmem device from a given id 1165 * 1166 * @np: Device tree node that uses the nvmem device. 1167 * @id: nvmem name from nvmem-names property. 1168 * 1169 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1170 * on success. 1171 */ 1172 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1173 { 1174 1175 struct device_node *nvmem_np; 1176 struct nvmem_device *nvmem; 1177 int index = 0; 1178 1179 if (id) 1180 index = of_property_match_string(np, "nvmem-names", id); 1181 1182 nvmem_np = of_parse_phandle(np, "nvmem", index); 1183 if (!nvmem_np) 1184 return ERR_PTR(-ENOENT); 1185 1186 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1187 of_node_put(nvmem_np); 1188 return nvmem; 1189 } 1190 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1191 #endif 1192 1193 /** 1194 * nvmem_device_get() - Get nvmem device from a given id 1195 * 1196 * @dev: Device that uses the nvmem device. 1197 * @dev_name: name of the requested nvmem device. 1198 * 1199 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1200 * on success. 1201 */ 1202 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1203 { 1204 if (dev->of_node) { /* try dt first */ 1205 struct nvmem_device *nvmem; 1206 1207 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1208 1209 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1210 return nvmem; 1211 1212 } 1213 1214 return __nvmem_device_get((void *)dev_name, device_match_name); 1215 } 1216 EXPORT_SYMBOL_GPL(nvmem_device_get); 1217 1218 /** 1219 * nvmem_device_find() - Find nvmem device with matching function 1220 * 1221 * @data: Data to pass to match function 1222 * @match: Callback function to check device 1223 * 1224 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1225 * on success. 1226 */ 1227 struct nvmem_device *nvmem_device_find(void *data, 1228 int (*match)(struct device *dev, const void *data)) 1229 { 1230 return __nvmem_device_get(data, match); 1231 } 1232 EXPORT_SYMBOL_GPL(nvmem_device_find); 1233 1234 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1235 { 1236 struct nvmem_device **nvmem = res; 1237 1238 if (WARN_ON(!nvmem || !*nvmem)) 1239 return 0; 1240 1241 return *nvmem == data; 1242 } 1243 1244 static void devm_nvmem_device_release(struct device *dev, void *res) 1245 { 1246 nvmem_device_put(*(struct nvmem_device **)res); 1247 } 1248 1249 /** 1250 * devm_nvmem_device_put() - put alredy got nvmem device 1251 * 1252 * @dev: Device that uses the nvmem device. 1253 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1254 * that needs to be released. 1255 */ 1256 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1257 { 1258 int ret; 1259 1260 ret = devres_release(dev, devm_nvmem_device_release, 1261 devm_nvmem_device_match, nvmem); 1262 1263 WARN_ON(ret); 1264 } 1265 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1266 1267 /** 1268 * nvmem_device_put() - put alredy got nvmem device 1269 * 1270 * @nvmem: pointer to nvmem device that needs to be released. 1271 */ 1272 void nvmem_device_put(struct nvmem_device *nvmem) 1273 { 1274 __nvmem_device_put(nvmem); 1275 } 1276 EXPORT_SYMBOL_GPL(nvmem_device_put); 1277 1278 /** 1279 * devm_nvmem_device_get() - Get nvmem device of device form a given id 1280 * 1281 * @dev: Device that requests the nvmem device. 1282 * @id: name id for the requested nvmem device. 1283 * 1284 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1285 * on success. The nvmem_device will be freed by the automatically once the 1286 * device is freed. 1287 */ 1288 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1289 { 1290 struct nvmem_device **ptr, *nvmem; 1291 1292 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1293 if (!ptr) 1294 return ERR_PTR(-ENOMEM); 1295 1296 nvmem = nvmem_device_get(dev, id); 1297 if (!IS_ERR(nvmem)) { 1298 *ptr = nvmem; 1299 devres_add(dev, ptr); 1300 } else { 1301 devres_free(ptr); 1302 } 1303 1304 return nvmem; 1305 } 1306 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1307 1308 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1309 const char *id, int index) 1310 { 1311 struct nvmem_cell *cell; 1312 const char *name = NULL; 1313 1314 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1315 if (!cell) 1316 return ERR_PTR(-ENOMEM); 1317 1318 if (id) { 1319 name = kstrdup_const(id, GFP_KERNEL); 1320 if (!name) { 1321 kfree(cell); 1322 return ERR_PTR(-ENOMEM); 1323 } 1324 } 1325 1326 cell->id = name; 1327 cell->entry = entry; 1328 cell->index = index; 1329 1330 return cell; 1331 } 1332 1333 static struct nvmem_cell * 1334 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1335 { 1336 struct nvmem_cell_entry *cell_entry; 1337 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1338 struct nvmem_cell_lookup *lookup; 1339 struct nvmem_device *nvmem; 1340 const char *dev_id; 1341 1342 if (!dev) 1343 return ERR_PTR(-EINVAL); 1344 1345 dev_id = dev_name(dev); 1346 1347 mutex_lock(&nvmem_lookup_mutex); 1348 1349 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1350 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1351 (strcmp(lookup->con_id, con_id) == 0)) { 1352 /* This is the right entry. */ 1353 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1354 device_match_name); 1355 if (IS_ERR(nvmem)) { 1356 /* Provider may not be registered yet. */ 1357 cell = ERR_CAST(nvmem); 1358 break; 1359 } 1360 1361 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1362 lookup->cell_name); 1363 if (!cell_entry) { 1364 __nvmem_device_put(nvmem); 1365 cell = ERR_PTR(-ENOENT); 1366 } else { 1367 cell = nvmem_create_cell(cell_entry, con_id, 0); 1368 if (IS_ERR(cell)) 1369 __nvmem_device_put(nvmem); 1370 } 1371 break; 1372 } 1373 } 1374 1375 mutex_unlock(&nvmem_lookup_mutex); 1376 return cell; 1377 } 1378 1379 static void nvmem_layout_module_put(struct nvmem_device *nvmem) 1380 { 1381 if (nvmem->layout && nvmem->layout->dev.driver) 1382 module_put(nvmem->layout->dev.driver->owner); 1383 } 1384 1385 #if IS_ENABLED(CONFIG_OF) 1386 static struct nvmem_cell_entry * 1387 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1388 { 1389 struct nvmem_cell_entry *iter, *cell = NULL; 1390 1391 mutex_lock(&nvmem_mutex); 1392 list_for_each_entry(iter, &nvmem->cells, node) { 1393 if (np == iter->np) { 1394 cell = iter; 1395 break; 1396 } 1397 } 1398 mutex_unlock(&nvmem_mutex); 1399 1400 return cell; 1401 } 1402 1403 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) 1404 { 1405 if (!nvmem->layout) 1406 return 0; 1407 1408 if (!nvmem->layout->dev.driver || 1409 !try_module_get(nvmem->layout->dev.driver->owner)) 1410 return -EPROBE_DEFER; 1411 1412 return 0; 1413 } 1414 1415 /** 1416 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1417 * 1418 * @np: Device tree node that uses the nvmem cell. 1419 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1420 * for the cell at index 0 (the lone cell with no accompanying 1421 * nvmem-cell-names property). 1422 * 1423 * Return: Will be an ERR_PTR() on error or a valid pointer 1424 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1425 * nvmem_cell_put(). 1426 */ 1427 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1428 { 1429 struct device_node *cell_np, *nvmem_np; 1430 struct nvmem_device *nvmem; 1431 struct nvmem_cell_entry *cell_entry; 1432 struct nvmem_cell *cell; 1433 struct of_phandle_args cell_spec; 1434 int index = 0; 1435 int cell_index = 0; 1436 int ret; 1437 1438 /* if cell name exists, find index to the name */ 1439 if (id) 1440 index = of_property_match_string(np, "nvmem-cell-names", id); 1441 1442 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1443 "#nvmem-cell-cells", 1444 index, &cell_spec); 1445 if (ret) 1446 return ERR_PTR(-ENOENT); 1447 1448 if (cell_spec.args_count > 1) 1449 return ERR_PTR(-EINVAL); 1450 1451 cell_np = cell_spec.np; 1452 if (cell_spec.args_count) 1453 cell_index = cell_spec.args[0]; 1454 1455 nvmem_np = of_get_parent(cell_np); 1456 if (!nvmem_np) { 1457 of_node_put(cell_np); 1458 return ERR_PTR(-EINVAL); 1459 } 1460 1461 /* nvmem layouts produce cells within the nvmem-layout container */ 1462 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1463 nvmem_np = of_get_next_parent(nvmem_np); 1464 if (!nvmem_np) { 1465 of_node_put(cell_np); 1466 return ERR_PTR(-EINVAL); 1467 } 1468 } 1469 1470 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1471 of_node_put(nvmem_np); 1472 if (IS_ERR(nvmem)) { 1473 of_node_put(cell_np); 1474 return ERR_CAST(nvmem); 1475 } 1476 1477 ret = nvmem_layout_module_get_optional(nvmem); 1478 if (ret) { 1479 of_node_put(cell_np); 1480 __nvmem_device_put(nvmem); 1481 return ERR_PTR(ret); 1482 } 1483 1484 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1485 of_node_put(cell_np); 1486 if (!cell_entry) { 1487 __nvmem_device_put(nvmem); 1488 nvmem_layout_module_put(nvmem); 1489 if (nvmem->layout) 1490 return ERR_PTR(-EPROBE_DEFER); 1491 else 1492 return ERR_PTR(-ENOENT); 1493 } 1494 1495 cell = nvmem_create_cell(cell_entry, id, cell_index); 1496 if (IS_ERR(cell)) { 1497 __nvmem_device_put(nvmem); 1498 nvmem_layout_module_put(nvmem); 1499 } 1500 1501 return cell; 1502 } 1503 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1504 #endif 1505 1506 /** 1507 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1508 * 1509 * @dev: Device that requests the nvmem cell. 1510 * @id: nvmem cell name to get (this corresponds with the name from the 1511 * nvmem-cell-names property for DT systems and with the con_id from 1512 * the lookup entry for non-DT systems). 1513 * 1514 * Return: Will be an ERR_PTR() on error or a valid pointer 1515 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1516 * nvmem_cell_put(). 1517 */ 1518 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1519 { 1520 struct nvmem_cell *cell; 1521 1522 if (dev->of_node) { /* try dt first */ 1523 cell = of_nvmem_cell_get(dev->of_node, id); 1524 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1525 return cell; 1526 } 1527 1528 /* NULL cell id only allowed for device tree; invalid otherwise */ 1529 if (!id) 1530 return ERR_PTR(-EINVAL); 1531 1532 return nvmem_cell_get_from_lookup(dev, id); 1533 } 1534 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1535 1536 static void devm_nvmem_cell_release(struct device *dev, void *res) 1537 { 1538 nvmem_cell_put(*(struct nvmem_cell **)res); 1539 } 1540 1541 /** 1542 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1543 * 1544 * @dev: Device that requests the nvmem cell. 1545 * @id: nvmem cell name id to get. 1546 * 1547 * Return: Will be an ERR_PTR() on error or a valid pointer 1548 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1549 * automatically once the device is freed. 1550 */ 1551 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1552 { 1553 struct nvmem_cell **ptr, *cell; 1554 1555 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1556 if (!ptr) 1557 return ERR_PTR(-ENOMEM); 1558 1559 cell = nvmem_cell_get(dev, id); 1560 if (!IS_ERR(cell)) { 1561 *ptr = cell; 1562 devres_add(dev, ptr); 1563 } else { 1564 devres_free(ptr); 1565 } 1566 1567 return cell; 1568 } 1569 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1570 1571 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1572 { 1573 struct nvmem_cell **c = res; 1574 1575 if (WARN_ON(!c || !*c)) 1576 return 0; 1577 1578 return *c == data; 1579 } 1580 1581 /** 1582 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1583 * from devm_nvmem_cell_get. 1584 * 1585 * @dev: Device that requests the nvmem cell. 1586 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1587 */ 1588 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1589 { 1590 int ret; 1591 1592 ret = devres_release(dev, devm_nvmem_cell_release, 1593 devm_nvmem_cell_match, cell); 1594 1595 WARN_ON(ret); 1596 } 1597 EXPORT_SYMBOL(devm_nvmem_cell_put); 1598 1599 /** 1600 * nvmem_cell_put() - Release previously allocated nvmem cell. 1601 * 1602 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1603 */ 1604 void nvmem_cell_put(struct nvmem_cell *cell) 1605 { 1606 struct nvmem_device *nvmem = cell->entry->nvmem; 1607 1608 if (cell->id) 1609 kfree_const(cell->id); 1610 1611 kfree(cell); 1612 __nvmem_device_put(nvmem); 1613 nvmem_layout_module_put(nvmem); 1614 } 1615 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1616 1617 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1618 { 1619 u8 *p, *b; 1620 int i, extra, bit_offset = cell->bit_offset; 1621 1622 p = b = buf; 1623 if (bit_offset) { 1624 /* First shift */ 1625 *b++ >>= bit_offset; 1626 1627 /* setup rest of the bytes if any */ 1628 for (i = 1; i < cell->bytes; i++) { 1629 /* Get bits from next byte and shift them towards msb */ 1630 *p |= *b << (BITS_PER_BYTE - bit_offset); 1631 1632 p = b; 1633 *b++ >>= bit_offset; 1634 } 1635 } else { 1636 /* point to the msb */ 1637 p += cell->bytes - 1; 1638 } 1639 1640 /* result fits in less bytes */ 1641 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1642 while (--extra >= 0) 1643 *p-- = 0; 1644 1645 /* clear msb bits if any leftover in the last byte */ 1646 if (cell->nbits % BITS_PER_BYTE) 1647 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1648 } 1649 1650 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1651 struct nvmem_cell_entry *cell, 1652 void *buf, size_t *len, const char *id, int index) 1653 { 1654 int rc; 1655 1656 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1657 1658 if (rc) 1659 return rc; 1660 1661 /* shift bits in-place */ 1662 if (cell->bit_offset || cell->nbits) 1663 nvmem_shift_read_buffer_in_place(cell, buf); 1664 1665 if (cell->read_post_process) { 1666 rc = cell->read_post_process(cell->priv, id, index, 1667 cell->offset, buf, cell->raw_len); 1668 if (rc) 1669 return rc; 1670 } 1671 1672 if (len) 1673 *len = cell->bytes; 1674 1675 return 0; 1676 } 1677 1678 /** 1679 * nvmem_cell_read() - Read a given nvmem cell 1680 * 1681 * @cell: nvmem cell to be read. 1682 * @len: pointer to length of cell which will be populated on successful read; 1683 * can be NULL. 1684 * 1685 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1686 * buffer should be freed by the consumer with a kfree(). 1687 */ 1688 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1689 { 1690 struct nvmem_cell_entry *entry = cell->entry; 1691 struct nvmem_device *nvmem = entry->nvmem; 1692 u8 *buf; 1693 int rc; 1694 1695 if (!nvmem) 1696 return ERR_PTR(-EINVAL); 1697 1698 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1699 if (!buf) 1700 return ERR_PTR(-ENOMEM); 1701 1702 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1703 if (rc) { 1704 kfree(buf); 1705 return ERR_PTR(rc); 1706 } 1707 1708 return buf; 1709 } 1710 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1711 1712 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1713 u8 *_buf, int len) 1714 { 1715 struct nvmem_device *nvmem = cell->nvmem; 1716 int i, rc, nbits, bit_offset = cell->bit_offset; 1717 u8 v, *p, *buf, *b, pbyte, pbits; 1718 1719 nbits = cell->nbits; 1720 buf = kzalloc(cell->bytes, GFP_KERNEL); 1721 if (!buf) 1722 return ERR_PTR(-ENOMEM); 1723 1724 memcpy(buf, _buf, len); 1725 p = b = buf; 1726 1727 if (bit_offset) { 1728 pbyte = *b; 1729 *b <<= bit_offset; 1730 1731 /* setup the first byte with lsb bits from nvmem */ 1732 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1733 if (rc) 1734 goto err; 1735 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1736 1737 /* setup rest of the byte if any */ 1738 for (i = 1; i < cell->bytes; i++) { 1739 /* Get last byte bits and shift them towards lsb */ 1740 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1741 pbyte = *b; 1742 p = b; 1743 *b <<= bit_offset; 1744 *b++ |= pbits; 1745 } 1746 } 1747 1748 /* if it's not end on byte boundary */ 1749 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1750 /* setup the last byte with msb bits from nvmem */ 1751 rc = nvmem_reg_read(nvmem, 1752 cell->offset + cell->bytes - 1, &v, 1); 1753 if (rc) 1754 goto err; 1755 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1756 1757 } 1758 1759 return buf; 1760 err: 1761 kfree(buf); 1762 return ERR_PTR(rc); 1763 } 1764 1765 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1766 { 1767 struct nvmem_device *nvmem = cell->nvmem; 1768 int rc; 1769 1770 if (!nvmem || nvmem->read_only || 1771 (cell->bit_offset == 0 && len != cell->bytes)) 1772 return -EINVAL; 1773 1774 /* 1775 * Any cells which have a read_post_process hook are read-only because 1776 * we cannot reverse the operation and it might affect other cells, 1777 * too. 1778 */ 1779 if (cell->read_post_process) 1780 return -EINVAL; 1781 1782 if (cell->bit_offset || cell->nbits) { 1783 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1784 if (IS_ERR(buf)) 1785 return PTR_ERR(buf); 1786 } 1787 1788 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1789 1790 /* free the tmp buffer */ 1791 if (cell->bit_offset || cell->nbits) 1792 kfree(buf); 1793 1794 if (rc) 1795 return rc; 1796 1797 return len; 1798 } 1799 1800 /** 1801 * nvmem_cell_write() - Write to a given nvmem cell 1802 * 1803 * @cell: nvmem cell to be written. 1804 * @buf: Buffer to be written. 1805 * @len: length of buffer to be written to nvmem cell. 1806 * 1807 * Return: length of bytes written or negative on failure. 1808 */ 1809 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1810 { 1811 return __nvmem_cell_entry_write(cell->entry, buf, len); 1812 } 1813 1814 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1815 1816 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1817 void *val, size_t count) 1818 { 1819 struct nvmem_cell *cell; 1820 void *buf; 1821 size_t len; 1822 1823 cell = nvmem_cell_get(dev, cell_id); 1824 if (IS_ERR(cell)) 1825 return PTR_ERR(cell); 1826 1827 buf = nvmem_cell_read(cell, &len); 1828 if (IS_ERR(buf)) { 1829 nvmem_cell_put(cell); 1830 return PTR_ERR(buf); 1831 } 1832 if (len != count) { 1833 kfree(buf); 1834 nvmem_cell_put(cell); 1835 return -EINVAL; 1836 } 1837 memcpy(val, buf, count); 1838 kfree(buf); 1839 nvmem_cell_put(cell); 1840 1841 return 0; 1842 } 1843 1844 /** 1845 * nvmem_cell_read_u8() - Read a cell value as a u8 1846 * 1847 * @dev: Device that requests the nvmem cell. 1848 * @cell_id: Name of nvmem cell to read. 1849 * @val: pointer to output value. 1850 * 1851 * Return: 0 on success or negative errno. 1852 */ 1853 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1854 { 1855 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1856 } 1857 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1858 1859 /** 1860 * nvmem_cell_read_u16() - Read a cell value as a u16 1861 * 1862 * @dev: Device that requests the nvmem cell. 1863 * @cell_id: Name of nvmem cell to read. 1864 * @val: pointer to output value. 1865 * 1866 * Return: 0 on success or negative errno. 1867 */ 1868 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1869 { 1870 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1871 } 1872 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1873 1874 /** 1875 * nvmem_cell_read_u32() - Read a cell value as a u32 1876 * 1877 * @dev: Device that requests the nvmem cell. 1878 * @cell_id: Name of nvmem cell to read. 1879 * @val: pointer to output value. 1880 * 1881 * Return: 0 on success or negative errno. 1882 */ 1883 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1884 { 1885 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1886 } 1887 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1888 1889 /** 1890 * nvmem_cell_read_u64() - Read a cell value as a u64 1891 * 1892 * @dev: Device that requests the nvmem cell. 1893 * @cell_id: Name of nvmem cell to read. 1894 * @val: pointer to output value. 1895 * 1896 * Return: 0 on success or negative errno. 1897 */ 1898 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1899 { 1900 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1901 } 1902 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1903 1904 static const void *nvmem_cell_read_variable_common(struct device *dev, 1905 const char *cell_id, 1906 size_t max_len, size_t *len) 1907 { 1908 struct nvmem_cell *cell; 1909 int nbits; 1910 void *buf; 1911 1912 cell = nvmem_cell_get(dev, cell_id); 1913 if (IS_ERR(cell)) 1914 return cell; 1915 1916 nbits = cell->entry->nbits; 1917 buf = nvmem_cell_read(cell, len); 1918 nvmem_cell_put(cell); 1919 if (IS_ERR(buf)) 1920 return buf; 1921 1922 /* 1923 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1924 * the length of the real data. Throw away the extra junk. 1925 */ 1926 if (nbits) 1927 *len = DIV_ROUND_UP(nbits, 8); 1928 1929 if (*len > max_len) { 1930 kfree(buf); 1931 return ERR_PTR(-ERANGE); 1932 } 1933 1934 return buf; 1935 } 1936 1937 /** 1938 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1939 * 1940 * @dev: Device that requests the nvmem cell. 1941 * @cell_id: Name of nvmem cell to read. 1942 * @val: pointer to output value. 1943 * 1944 * Return: 0 on success or negative errno. 1945 */ 1946 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1947 u32 *val) 1948 { 1949 size_t len; 1950 const u8 *buf; 1951 int i; 1952 1953 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1954 if (IS_ERR(buf)) 1955 return PTR_ERR(buf); 1956 1957 /* Copy w/ implicit endian conversion */ 1958 *val = 0; 1959 for (i = 0; i < len; i++) 1960 *val |= buf[i] << (8 * i); 1961 1962 kfree(buf); 1963 1964 return 0; 1965 } 1966 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1967 1968 /** 1969 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1970 * 1971 * @dev: Device that requests the nvmem cell. 1972 * @cell_id: Name of nvmem cell to read. 1973 * @val: pointer to output value. 1974 * 1975 * Return: 0 on success or negative errno. 1976 */ 1977 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1978 u64 *val) 1979 { 1980 size_t len; 1981 const u8 *buf; 1982 int i; 1983 1984 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1985 if (IS_ERR(buf)) 1986 return PTR_ERR(buf); 1987 1988 /* Copy w/ implicit endian conversion */ 1989 *val = 0; 1990 for (i = 0; i < len; i++) 1991 *val |= (uint64_t)buf[i] << (8 * i); 1992 1993 kfree(buf); 1994 1995 return 0; 1996 } 1997 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1998 1999 /** 2000 * nvmem_device_cell_read() - Read a given nvmem device and cell 2001 * 2002 * @nvmem: nvmem device to read from. 2003 * @info: nvmem cell info to be read. 2004 * @buf: buffer pointer which will be populated on successful read. 2005 * 2006 * Return: length of successful bytes read on success and negative 2007 * error code on error. 2008 */ 2009 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 2010 struct nvmem_cell_info *info, void *buf) 2011 { 2012 struct nvmem_cell_entry cell; 2013 int rc; 2014 ssize_t len; 2015 2016 if (!nvmem) 2017 return -EINVAL; 2018 2019 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2020 if (rc) 2021 return rc; 2022 2023 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 2024 if (rc) 2025 return rc; 2026 2027 return len; 2028 } 2029 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 2030 2031 /** 2032 * nvmem_device_cell_write() - Write cell to a given nvmem device 2033 * 2034 * @nvmem: nvmem device to be written to. 2035 * @info: nvmem cell info to be written. 2036 * @buf: buffer to be written to cell. 2037 * 2038 * Return: length of bytes written or negative error code on failure. 2039 */ 2040 int nvmem_device_cell_write(struct nvmem_device *nvmem, 2041 struct nvmem_cell_info *info, void *buf) 2042 { 2043 struct nvmem_cell_entry cell; 2044 int rc; 2045 2046 if (!nvmem) 2047 return -EINVAL; 2048 2049 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2050 if (rc) 2051 return rc; 2052 2053 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 2054 } 2055 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 2056 2057 /** 2058 * nvmem_device_read() - Read from a given nvmem device 2059 * 2060 * @nvmem: nvmem device to read from. 2061 * @offset: offset in nvmem device. 2062 * @bytes: number of bytes to read. 2063 * @buf: buffer pointer which will be populated on successful read. 2064 * 2065 * Return: length of successful bytes read on success and negative 2066 * error code on error. 2067 */ 2068 int nvmem_device_read(struct nvmem_device *nvmem, 2069 unsigned int offset, 2070 size_t bytes, void *buf) 2071 { 2072 int rc; 2073 2074 if (!nvmem) 2075 return -EINVAL; 2076 2077 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2078 2079 if (rc) 2080 return rc; 2081 2082 return bytes; 2083 } 2084 EXPORT_SYMBOL_GPL(nvmem_device_read); 2085 2086 /** 2087 * nvmem_device_write() - Write cell to a given nvmem device 2088 * 2089 * @nvmem: nvmem device to be written to. 2090 * @offset: offset in nvmem device. 2091 * @bytes: number of bytes to write. 2092 * @buf: buffer to be written. 2093 * 2094 * Return: length of bytes written or negative error code on failure. 2095 */ 2096 int nvmem_device_write(struct nvmem_device *nvmem, 2097 unsigned int offset, 2098 size_t bytes, void *buf) 2099 { 2100 int rc; 2101 2102 if (!nvmem) 2103 return -EINVAL; 2104 2105 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2106 2107 if (rc) 2108 return rc; 2109 2110 2111 return bytes; 2112 } 2113 EXPORT_SYMBOL_GPL(nvmem_device_write); 2114 2115 /** 2116 * nvmem_add_cell_table() - register a table of cell info entries 2117 * 2118 * @table: table of cell info entries 2119 */ 2120 void nvmem_add_cell_table(struct nvmem_cell_table *table) 2121 { 2122 mutex_lock(&nvmem_cell_mutex); 2123 list_add_tail(&table->node, &nvmem_cell_tables); 2124 mutex_unlock(&nvmem_cell_mutex); 2125 } 2126 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2127 2128 /** 2129 * nvmem_del_cell_table() - remove a previously registered cell info table 2130 * 2131 * @table: table of cell info entries 2132 */ 2133 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2134 { 2135 mutex_lock(&nvmem_cell_mutex); 2136 list_del(&table->node); 2137 mutex_unlock(&nvmem_cell_mutex); 2138 } 2139 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2140 2141 /** 2142 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2143 * 2144 * @entries: array of cell lookup entries 2145 * @nentries: number of cell lookup entries in the array 2146 */ 2147 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2148 { 2149 int i; 2150 2151 mutex_lock(&nvmem_lookup_mutex); 2152 for (i = 0; i < nentries; i++) 2153 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2154 mutex_unlock(&nvmem_lookup_mutex); 2155 } 2156 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2157 2158 /** 2159 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2160 * entries 2161 * 2162 * @entries: array of cell lookup entries 2163 * @nentries: number of cell lookup entries in the array 2164 */ 2165 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2166 { 2167 int i; 2168 2169 mutex_lock(&nvmem_lookup_mutex); 2170 for (i = 0; i < nentries; i++) 2171 list_del(&entries[i].node); 2172 mutex_unlock(&nvmem_lookup_mutex); 2173 } 2174 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2175 2176 /** 2177 * nvmem_dev_name() - Get the name of a given nvmem device. 2178 * 2179 * @nvmem: nvmem device. 2180 * 2181 * Return: name of the nvmem device. 2182 */ 2183 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2184 { 2185 return dev_name(&nvmem->dev); 2186 } 2187 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2188 2189 /** 2190 * nvmem_dev_size() - Get the size of a given nvmem device. 2191 * 2192 * @nvmem: nvmem device. 2193 * 2194 * Return: size of the nvmem device. 2195 */ 2196 size_t nvmem_dev_size(struct nvmem_device *nvmem) 2197 { 2198 return nvmem->size; 2199 } 2200 EXPORT_SYMBOL_GPL(nvmem_dev_size); 2201 2202 static int __init nvmem_init(void) 2203 { 2204 int ret; 2205 2206 ret = bus_register(&nvmem_bus_type); 2207 if (ret) 2208 return ret; 2209 2210 ret = nvmem_layout_bus_register(); 2211 if (ret) 2212 bus_unregister(&nvmem_bus_type); 2213 2214 return ret; 2215 } 2216 2217 static void __exit nvmem_exit(void) 2218 { 2219 nvmem_layout_bus_unregister(); 2220 bus_unregister(&nvmem_bus_type); 2221 } 2222 2223 subsys_initcall(nvmem_init); 2224 module_exit(nvmem_exit); 2225 2226 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2227 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2228 MODULE_DESCRIPTION("nvmem Driver Core"); 2229