1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "internals.h" 23 24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 25 26 #define FLAG_COMPAT BIT(0) 27 struct nvmem_cell_entry { 28 const char *name; 29 int offset; 30 size_t raw_len; 31 int bytes; 32 int bit_offset; 33 int nbits; 34 nvmem_cell_post_process_t read_post_process; 35 void *priv; 36 struct device_node *np; 37 struct nvmem_device *nvmem; 38 struct list_head node; 39 }; 40 41 struct nvmem_cell { 42 struct nvmem_cell_entry *entry; 43 const char *id; 44 int index; 45 }; 46 47 static DEFINE_MUTEX(nvmem_mutex); 48 static DEFINE_IDA(nvmem_ida); 49 50 static DEFINE_MUTEX(nvmem_cell_mutex); 51 static LIST_HEAD(nvmem_cell_tables); 52 53 static DEFINE_MUTEX(nvmem_lookup_mutex); 54 static LIST_HEAD(nvmem_lookup_list); 55 56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 57 58 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 59 void *val, size_t bytes) 60 { 61 if (nvmem->reg_read) 62 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 63 64 return -EINVAL; 65 } 66 67 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 68 void *val, size_t bytes) 69 { 70 int ret; 71 72 if (nvmem->reg_write) { 73 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 74 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 75 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 76 return ret; 77 } 78 79 return -EINVAL; 80 } 81 82 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 83 unsigned int offset, void *val, 84 size_t bytes, int write) 85 { 86 87 unsigned int end = offset + bytes; 88 unsigned int kend, ksize; 89 const struct nvmem_keepout *keepout = nvmem->keepout; 90 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 91 int rc; 92 93 /* 94 * Skip all keepouts before the range being accessed. 95 * Keepouts are sorted. 96 */ 97 while ((keepout < keepoutend) && (keepout->end <= offset)) 98 keepout++; 99 100 while ((offset < end) && (keepout < keepoutend)) { 101 /* Access the valid portion before the keepout. */ 102 if (offset < keepout->start) { 103 kend = min(end, keepout->start); 104 ksize = kend - offset; 105 if (write) 106 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 107 else 108 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 109 110 if (rc) 111 return rc; 112 113 offset += ksize; 114 val += ksize; 115 } 116 117 /* 118 * Now we're aligned to the start of this keepout zone. Go 119 * through it. 120 */ 121 kend = min(end, keepout->end); 122 ksize = kend - offset; 123 if (!write) 124 memset(val, keepout->value, ksize); 125 126 val += ksize; 127 offset += ksize; 128 keepout++; 129 } 130 131 /* 132 * If we ran out of keepouts but there's still stuff to do, send it 133 * down directly 134 */ 135 if (offset < end) { 136 ksize = end - offset; 137 if (write) 138 return __nvmem_reg_write(nvmem, offset, val, ksize); 139 else 140 return __nvmem_reg_read(nvmem, offset, val, ksize); 141 } 142 143 return 0; 144 } 145 146 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 147 void *val, size_t bytes) 148 { 149 if (!nvmem->nkeepout) 150 return __nvmem_reg_read(nvmem, offset, val, bytes); 151 152 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 153 } 154 155 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 156 void *val, size_t bytes) 157 { 158 if (!nvmem->nkeepout) 159 return __nvmem_reg_write(nvmem, offset, val, bytes); 160 161 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 162 } 163 164 #ifdef CONFIG_NVMEM_SYSFS 165 static const char * const nvmem_type_str[] = { 166 [NVMEM_TYPE_UNKNOWN] = "Unknown", 167 [NVMEM_TYPE_EEPROM] = "EEPROM", 168 [NVMEM_TYPE_OTP] = "OTP", 169 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 170 [NVMEM_TYPE_FRAM] = "FRAM", 171 }; 172 173 #ifdef CONFIG_DEBUG_LOCK_ALLOC 174 static struct lock_class_key eeprom_lock_key; 175 #endif 176 177 static ssize_t type_show(struct device *dev, 178 struct device_attribute *attr, char *buf) 179 { 180 struct nvmem_device *nvmem = to_nvmem_device(dev); 181 182 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 183 } 184 185 static DEVICE_ATTR_RO(type); 186 187 static struct attribute *nvmem_attrs[] = { 188 &dev_attr_type.attr, 189 NULL, 190 }; 191 192 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 193 struct bin_attribute *attr, char *buf, 194 loff_t pos, size_t count) 195 { 196 struct device *dev; 197 struct nvmem_device *nvmem; 198 int rc; 199 200 if (attr->private) 201 dev = attr->private; 202 else 203 dev = kobj_to_dev(kobj); 204 nvmem = to_nvmem_device(dev); 205 206 /* Stop the user from reading */ 207 if (pos >= nvmem->size) 208 return 0; 209 210 if (!IS_ALIGNED(pos, nvmem->stride)) 211 return -EINVAL; 212 213 if (count < nvmem->word_size) 214 return -EINVAL; 215 216 if (pos + count > nvmem->size) 217 count = nvmem->size - pos; 218 219 count = round_down(count, nvmem->word_size); 220 221 if (!nvmem->reg_read) 222 return -EPERM; 223 224 rc = nvmem_reg_read(nvmem, pos, buf, count); 225 226 if (rc) 227 return rc; 228 229 return count; 230 } 231 232 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 233 struct bin_attribute *attr, char *buf, 234 loff_t pos, size_t count) 235 { 236 struct device *dev; 237 struct nvmem_device *nvmem; 238 int rc; 239 240 if (attr->private) 241 dev = attr->private; 242 else 243 dev = kobj_to_dev(kobj); 244 nvmem = to_nvmem_device(dev); 245 246 /* Stop the user from writing */ 247 if (pos >= nvmem->size) 248 return -EFBIG; 249 250 if (!IS_ALIGNED(pos, nvmem->stride)) 251 return -EINVAL; 252 253 if (count < nvmem->word_size) 254 return -EINVAL; 255 256 if (pos + count > nvmem->size) 257 count = nvmem->size - pos; 258 259 count = round_down(count, nvmem->word_size); 260 261 if (!nvmem->reg_write) 262 return -EPERM; 263 264 rc = nvmem_reg_write(nvmem, pos, buf, count); 265 266 if (rc) 267 return rc; 268 269 return count; 270 } 271 272 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 273 { 274 umode_t mode = 0400; 275 276 if (!nvmem->root_only) 277 mode |= 0044; 278 279 if (!nvmem->read_only) 280 mode |= 0200; 281 282 if (!nvmem->reg_write) 283 mode &= ~0200; 284 285 if (!nvmem->reg_read) 286 mode &= ~0444; 287 288 return mode; 289 } 290 291 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 292 struct bin_attribute *attr, int i) 293 { 294 struct device *dev = kobj_to_dev(kobj); 295 struct nvmem_device *nvmem = to_nvmem_device(dev); 296 297 attr->size = nvmem->size; 298 299 return nvmem_bin_attr_get_umode(nvmem); 300 } 301 302 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 303 const char *id, int index); 304 305 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, 306 struct bin_attribute *attr, char *buf, 307 loff_t pos, size_t count) 308 { 309 struct nvmem_cell_entry *entry; 310 struct nvmem_cell *cell = NULL; 311 size_t cell_sz, read_len; 312 void *content; 313 314 entry = attr->private; 315 cell = nvmem_create_cell(entry, entry->name, 0); 316 if (IS_ERR(cell)) 317 return PTR_ERR(cell); 318 319 if (!cell) 320 return -EINVAL; 321 322 content = nvmem_cell_read(cell, &cell_sz); 323 if (IS_ERR(content)) { 324 read_len = PTR_ERR(content); 325 goto destroy_cell; 326 } 327 328 read_len = min_t(unsigned int, cell_sz - pos, count); 329 memcpy(buf, content + pos, read_len); 330 kfree(content); 331 332 destroy_cell: 333 kfree_const(cell->id); 334 kfree(cell); 335 336 return read_len; 337 } 338 339 /* default read/write permissions */ 340 static struct bin_attribute bin_attr_rw_nvmem = { 341 .attr = { 342 .name = "nvmem", 343 .mode = 0644, 344 }, 345 .read = bin_attr_nvmem_read, 346 .write = bin_attr_nvmem_write, 347 }; 348 349 static struct bin_attribute *nvmem_bin_attributes[] = { 350 &bin_attr_rw_nvmem, 351 NULL, 352 }; 353 354 static const struct attribute_group nvmem_bin_group = { 355 .bin_attrs = nvmem_bin_attributes, 356 .attrs = nvmem_attrs, 357 .is_bin_visible = nvmem_bin_attr_is_visible, 358 }; 359 360 /* Cell attributes will be dynamically allocated */ 361 static struct attribute_group nvmem_cells_group = { 362 .name = "cells", 363 }; 364 365 static const struct attribute_group *nvmem_dev_groups[] = { 366 &nvmem_bin_group, 367 NULL, 368 }; 369 370 static const struct attribute_group *nvmem_cells_groups[] = { 371 &nvmem_cells_group, 372 NULL, 373 }; 374 375 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 376 .attr = { 377 .name = "eeprom", 378 }, 379 .read = bin_attr_nvmem_read, 380 .write = bin_attr_nvmem_write, 381 }; 382 383 /* 384 * nvmem_setup_compat() - Create an additional binary entry in 385 * drivers sys directory, to be backwards compatible with the older 386 * drivers/misc/eeprom drivers. 387 */ 388 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 389 const struct nvmem_config *config) 390 { 391 int rval; 392 393 if (!config->compat) 394 return 0; 395 396 if (!config->base_dev) 397 return -EINVAL; 398 399 if (config->type == NVMEM_TYPE_FRAM) 400 bin_attr_nvmem_eeprom_compat.attr.name = "fram"; 401 402 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 403 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 404 nvmem->eeprom.size = nvmem->size; 405 #ifdef CONFIG_DEBUG_LOCK_ALLOC 406 nvmem->eeprom.attr.key = &eeprom_lock_key; 407 #endif 408 nvmem->eeprom.private = &nvmem->dev; 409 nvmem->base_dev = config->base_dev; 410 411 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 412 if (rval) { 413 dev_err(&nvmem->dev, 414 "Failed to create eeprom binary file %d\n", rval); 415 return rval; 416 } 417 418 nvmem->flags |= FLAG_COMPAT; 419 420 return 0; 421 } 422 423 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 424 const struct nvmem_config *config) 425 { 426 if (config->compat) 427 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 428 } 429 430 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) 431 { 432 struct bin_attribute **cells_attrs, *attrs; 433 struct nvmem_cell_entry *entry; 434 unsigned int ncells = 0, i = 0; 435 int ret = 0; 436 437 mutex_lock(&nvmem_mutex); 438 439 if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) { 440 nvmem_cells_group.bin_attrs = NULL; 441 goto unlock_mutex; 442 } 443 444 /* Allocate an array of attributes with a sentinel */ 445 ncells = list_count_nodes(&nvmem->cells); 446 cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1, 447 sizeof(struct bin_attribute *), GFP_KERNEL); 448 if (!cells_attrs) { 449 ret = -ENOMEM; 450 goto unlock_mutex; 451 } 452 453 attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); 454 if (!attrs) { 455 ret = -ENOMEM; 456 goto unlock_mutex; 457 } 458 459 /* Initialize each attribute to take the name and size of the cell */ 460 list_for_each_entry(entry, &nvmem->cells, node) { 461 sysfs_bin_attr_init(&attrs[i]); 462 attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, 463 "%s@%x,%x", entry->name, 464 entry->offset, 465 entry->bit_offset); 466 attrs[i].attr.mode = 0444; 467 attrs[i].size = entry->bytes; 468 attrs[i].read = &nvmem_cell_attr_read; 469 attrs[i].private = entry; 470 if (!attrs[i].attr.name) { 471 ret = -ENOMEM; 472 goto unlock_mutex; 473 } 474 475 cells_attrs[i] = &attrs[i]; 476 i++; 477 } 478 479 nvmem_cells_group.bin_attrs = cells_attrs; 480 481 ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups); 482 if (ret) 483 goto unlock_mutex; 484 485 nvmem->sysfs_cells_populated = true; 486 487 unlock_mutex: 488 mutex_unlock(&nvmem_mutex); 489 490 return ret; 491 } 492 493 #else /* CONFIG_NVMEM_SYSFS */ 494 495 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 496 const struct nvmem_config *config) 497 { 498 return -ENOSYS; 499 } 500 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 501 const struct nvmem_config *config) 502 { 503 } 504 505 #endif /* CONFIG_NVMEM_SYSFS */ 506 507 static void nvmem_release(struct device *dev) 508 { 509 struct nvmem_device *nvmem = to_nvmem_device(dev); 510 511 ida_free(&nvmem_ida, nvmem->id); 512 gpiod_put(nvmem->wp_gpio); 513 kfree(nvmem); 514 } 515 516 static const struct device_type nvmem_provider_type = { 517 .release = nvmem_release, 518 }; 519 520 static struct bus_type nvmem_bus_type = { 521 .name = "nvmem", 522 }; 523 524 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 525 { 526 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 527 mutex_lock(&nvmem_mutex); 528 list_del(&cell->node); 529 mutex_unlock(&nvmem_mutex); 530 of_node_put(cell->np); 531 kfree_const(cell->name); 532 kfree(cell); 533 } 534 535 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 536 { 537 struct nvmem_cell_entry *cell, *p; 538 539 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 540 nvmem_cell_entry_drop(cell); 541 } 542 543 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 544 { 545 mutex_lock(&nvmem_mutex); 546 list_add_tail(&cell->node, &cell->nvmem->cells); 547 mutex_unlock(&nvmem_mutex); 548 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 549 } 550 551 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 552 const struct nvmem_cell_info *info, 553 struct nvmem_cell_entry *cell) 554 { 555 cell->nvmem = nvmem; 556 cell->offset = info->offset; 557 cell->raw_len = info->raw_len ?: info->bytes; 558 cell->bytes = info->bytes; 559 cell->name = info->name; 560 cell->read_post_process = info->read_post_process; 561 cell->priv = info->priv; 562 563 cell->bit_offset = info->bit_offset; 564 cell->nbits = info->nbits; 565 cell->np = info->np; 566 567 if (cell->nbits) 568 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 569 BITS_PER_BYTE); 570 571 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 572 dev_err(&nvmem->dev, 573 "cell %s unaligned to nvmem stride %d\n", 574 cell->name ?: "<unknown>", nvmem->stride); 575 return -EINVAL; 576 } 577 578 return 0; 579 } 580 581 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 582 const struct nvmem_cell_info *info, 583 struct nvmem_cell_entry *cell) 584 { 585 int err; 586 587 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 588 if (err) 589 return err; 590 591 cell->name = kstrdup_const(info->name, GFP_KERNEL); 592 if (!cell->name) 593 return -ENOMEM; 594 595 return 0; 596 } 597 598 /** 599 * nvmem_add_one_cell() - Add one cell information to an nvmem device 600 * 601 * @nvmem: nvmem device to add cells to. 602 * @info: nvmem cell info to add to the device 603 * 604 * Return: 0 or negative error code on failure. 605 */ 606 int nvmem_add_one_cell(struct nvmem_device *nvmem, 607 const struct nvmem_cell_info *info) 608 { 609 struct nvmem_cell_entry *cell; 610 int rval; 611 612 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 613 if (!cell) 614 return -ENOMEM; 615 616 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 617 if (rval) { 618 kfree(cell); 619 return rval; 620 } 621 622 nvmem_cell_entry_add(cell); 623 624 return 0; 625 } 626 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 627 628 /** 629 * nvmem_add_cells() - Add cell information to an nvmem device 630 * 631 * @nvmem: nvmem device to add cells to. 632 * @info: nvmem cell info to add to the device 633 * @ncells: number of cells in info 634 * 635 * Return: 0 or negative error code on failure. 636 */ 637 static int nvmem_add_cells(struct nvmem_device *nvmem, 638 const struct nvmem_cell_info *info, 639 int ncells) 640 { 641 int i, rval; 642 643 for (i = 0; i < ncells; i++) { 644 rval = nvmem_add_one_cell(nvmem, &info[i]); 645 if (rval) 646 return rval; 647 } 648 649 return 0; 650 } 651 652 /** 653 * nvmem_register_notifier() - Register a notifier block for nvmem events. 654 * 655 * @nb: notifier block to be called on nvmem events. 656 * 657 * Return: 0 on success, negative error number on failure. 658 */ 659 int nvmem_register_notifier(struct notifier_block *nb) 660 { 661 return blocking_notifier_chain_register(&nvmem_notifier, nb); 662 } 663 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 664 665 /** 666 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 667 * 668 * @nb: notifier block to be unregistered. 669 * 670 * Return: 0 on success, negative error number on failure. 671 */ 672 int nvmem_unregister_notifier(struct notifier_block *nb) 673 { 674 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 675 } 676 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 677 678 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 679 { 680 const struct nvmem_cell_info *info; 681 struct nvmem_cell_table *table; 682 struct nvmem_cell_entry *cell; 683 int rval = 0, i; 684 685 mutex_lock(&nvmem_cell_mutex); 686 list_for_each_entry(table, &nvmem_cell_tables, node) { 687 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 688 for (i = 0; i < table->ncells; i++) { 689 info = &table->cells[i]; 690 691 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 692 if (!cell) { 693 rval = -ENOMEM; 694 goto out; 695 } 696 697 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 698 if (rval) { 699 kfree(cell); 700 goto out; 701 } 702 703 nvmem_cell_entry_add(cell); 704 } 705 } 706 } 707 708 out: 709 mutex_unlock(&nvmem_cell_mutex); 710 return rval; 711 } 712 713 static struct nvmem_cell_entry * 714 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 715 { 716 struct nvmem_cell_entry *iter, *cell = NULL; 717 718 mutex_lock(&nvmem_mutex); 719 list_for_each_entry(iter, &nvmem->cells, node) { 720 if (strcmp(cell_id, iter->name) == 0) { 721 cell = iter; 722 break; 723 } 724 } 725 mutex_unlock(&nvmem_mutex); 726 727 return cell; 728 } 729 730 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 731 { 732 unsigned int cur = 0; 733 const struct nvmem_keepout *keepout = nvmem->keepout; 734 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 735 736 while (keepout < keepoutend) { 737 /* Ensure keepouts are sorted and don't overlap. */ 738 if (keepout->start < cur) { 739 dev_err(&nvmem->dev, 740 "Keepout regions aren't sorted or overlap.\n"); 741 742 return -ERANGE; 743 } 744 745 if (keepout->end < keepout->start) { 746 dev_err(&nvmem->dev, 747 "Invalid keepout region.\n"); 748 749 return -EINVAL; 750 } 751 752 /* 753 * Validate keepouts (and holes between) don't violate 754 * word_size constraints. 755 */ 756 if ((keepout->end - keepout->start < nvmem->word_size) || 757 ((keepout->start != cur) && 758 (keepout->start - cur < nvmem->word_size))) { 759 760 dev_err(&nvmem->dev, 761 "Keepout regions violate word_size constraints.\n"); 762 763 return -ERANGE; 764 } 765 766 /* Validate keepouts don't violate stride (alignment). */ 767 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 768 !IS_ALIGNED(keepout->end, nvmem->stride)) { 769 770 dev_err(&nvmem->dev, 771 "Keepout regions violate stride.\n"); 772 773 return -EINVAL; 774 } 775 776 cur = keepout->end; 777 keepout++; 778 } 779 780 return 0; 781 } 782 783 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 784 { 785 struct device *dev = &nvmem->dev; 786 struct device_node *child; 787 const __be32 *addr; 788 int len, ret; 789 790 for_each_child_of_node(np, child) { 791 struct nvmem_cell_info info = {0}; 792 793 addr = of_get_property(child, "reg", &len); 794 if (!addr) 795 continue; 796 if (len < 2 * sizeof(u32)) { 797 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 798 of_node_put(child); 799 return -EINVAL; 800 } 801 802 info.offset = be32_to_cpup(addr++); 803 info.bytes = be32_to_cpup(addr); 804 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 805 806 addr = of_get_property(child, "bits", &len); 807 if (addr && len == (2 * sizeof(u32))) { 808 info.bit_offset = be32_to_cpup(addr++); 809 info.nbits = be32_to_cpup(addr); 810 if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) { 811 dev_err(dev, "nvmem: invalid bits on %pOF\n", child); 812 of_node_put(child); 813 return -EINVAL; 814 } 815 } 816 817 info.np = of_node_get(child); 818 819 if (nvmem->fixup_dt_cell_info) 820 nvmem->fixup_dt_cell_info(nvmem, &info); 821 822 ret = nvmem_add_one_cell(nvmem, &info); 823 kfree(info.name); 824 if (ret) { 825 of_node_put(child); 826 return ret; 827 } 828 } 829 830 return 0; 831 } 832 833 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 834 { 835 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 836 } 837 838 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 839 { 840 struct device_node *layout_np; 841 int err = 0; 842 843 layout_np = of_nvmem_layout_get_container(nvmem); 844 if (!layout_np) 845 return 0; 846 847 if (of_device_is_compatible(layout_np, "fixed-layout")) 848 err = nvmem_add_cells_from_dt(nvmem, layout_np); 849 850 of_node_put(layout_np); 851 852 return err; 853 } 854 855 int nvmem_layout_register(struct nvmem_layout *layout) 856 { 857 int ret; 858 859 if (!layout->add_cells) 860 return -EINVAL; 861 862 /* Populate the cells */ 863 ret = layout->add_cells(layout); 864 if (ret) 865 return ret; 866 867 #ifdef CONFIG_NVMEM_SYSFS 868 ret = nvmem_populate_sysfs_cells(layout->nvmem); 869 if (ret) { 870 nvmem_device_remove_all_cells(layout->nvmem); 871 return ret; 872 } 873 #endif 874 875 return 0; 876 } 877 EXPORT_SYMBOL_GPL(nvmem_layout_register); 878 879 void nvmem_layout_unregister(struct nvmem_layout *layout) 880 { 881 /* Keep the API even with an empty stub in case we need it later */ 882 } 883 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 884 885 /** 886 * nvmem_register() - Register a nvmem device for given nvmem_config. 887 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 888 * 889 * @config: nvmem device configuration with which nvmem device is created. 890 * 891 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 892 * on success. 893 */ 894 895 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 896 { 897 struct nvmem_device *nvmem; 898 int rval; 899 900 if (!config->dev) 901 return ERR_PTR(-EINVAL); 902 903 if (!config->reg_read && !config->reg_write) 904 return ERR_PTR(-EINVAL); 905 906 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 907 if (!nvmem) 908 return ERR_PTR(-ENOMEM); 909 910 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 911 if (rval < 0) { 912 kfree(nvmem); 913 return ERR_PTR(rval); 914 } 915 916 nvmem->id = rval; 917 918 nvmem->dev.type = &nvmem_provider_type; 919 nvmem->dev.bus = &nvmem_bus_type; 920 nvmem->dev.parent = config->dev; 921 922 device_initialize(&nvmem->dev); 923 924 if (!config->ignore_wp) 925 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 926 GPIOD_OUT_HIGH); 927 if (IS_ERR(nvmem->wp_gpio)) { 928 rval = PTR_ERR(nvmem->wp_gpio); 929 nvmem->wp_gpio = NULL; 930 goto err_put_device; 931 } 932 933 kref_init(&nvmem->refcnt); 934 INIT_LIST_HEAD(&nvmem->cells); 935 nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; 936 937 nvmem->owner = config->owner; 938 if (!nvmem->owner && config->dev->driver) 939 nvmem->owner = config->dev->driver->owner; 940 nvmem->stride = config->stride ?: 1; 941 nvmem->word_size = config->word_size ?: 1; 942 nvmem->size = config->size; 943 nvmem->root_only = config->root_only; 944 nvmem->priv = config->priv; 945 nvmem->type = config->type; 946 nvmem->reg_read = config->reg_read; 947 nvmem->reg_write = config->reg_write; 948 nvmem->keepout = config->keepout; 949 nvmem->nkeepout = config->nkeepout; 950 if (config->of_node) 951 nvmem->dev.of_node = config->of_node; 952 else 953 nvmem->dev.of_node = config->dev->of_node; 954 955 switch (config->id) { 956 case NVMEM_DEVID_NONE: 957 rval = dev_set_name(&nvmem->dev, "%s", config->name); 958 break; 959 case NVMEM_DEVID_AUTO: 960 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 961 break; 962 default: 963 rval = dev_set_name(&nvmem->dev, "%s%d", 964 config->name ? : "nvmem", 965 config->name ? config->id : nvmem->id); 966 break; 967 } 968 969 if (rval) 970 goto err_put_device; 971 972 nvmem->read_only = device_property_present(config->dev, "read-only") || 973 config->read_only || !nvmem->reg_write; 974 975 #ifdef CONFIG_NVMEM_SYSFS 976 nvmem->dev.groups = nvmem_dev_groups; 977 #endif 978 979 if (nvmem->nkeepout) { 980 rval = nvmem_validate_keepouts(nvmem); 981 if (rval) 982 goto err_put_device; 983 } 984 985 if (config->compat) { 986 rval = nvmem_sysfs_setup_compat(nvmem, config); 987 if (rval) 988 goto err_put_device; 989 } 990 991 if (config->cells) { 992 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 993 if (rval) 994 goto err_remove_cells; 995 } 996 997 rval = nvmem_add_cells_from_table(nvmem); 998 if (rval) 999 goto err_remove_cells; 1000 1001 if (config->add_legacy_fixed_of_cells) { 1002 rval = nvmem_add_cells_from_legacy_of(nvmem); 1003 if (rval) 1004 goto err_remove_cells; 1005 } 1006 1007 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1008 if (rval) 1009 goto err_remove_cells; 1010 1011 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1012 1013 rval = device_add(&nvmem->dev); 1014 if (rval) 1015 goto err_remove_cells; 1016 1017 rval = nvmem_populate_layout(nvmem); 1018 if (rval) 1019 goto err_remove_dev; 1020 1021 #ifdef CONFIG_NVMEM_SYSFS 1022 rval = nvmem_populate_sysfs_cells(nvmem); 1023 if (rval) 1024 goto err_destroy_layout; 1025 #endif 1026 1027 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1028 1029 return nvmem; 1030 1031 #ifdef CONFIG_NVMEM_SYSFS 1032 err_destroy_layout: 1033 nvmem_destroy_layout(nvmem); 1034 #endif 1035 err_remove_dev: 1036 device_del(&nvmem->dev); 1037 err_remove_cells: 1038 nvmem_device_remove_all_cells(nvmem); 1039 if (config->compat) 1040 nvmem_sysfs_remove_compat(nvmem, config); 1041 err_put_device: 1042 put_device(&nvmem->dev); 1043 1044 return ERR_PTR(rval); 1045 } 1046 EXPORT_SYMBOL_GPL(nvmem_register); 1047 1048 static void nvmem_device_release(struct kref *kref) 1049 { 1050 struct nvmem_device *nvmem; 1051 1052 nvmem = container_of(kref, struct nvmem_device, refcnt); 1053 1054 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1055 1056 if (nvmem->flags & FLAG_COMPAT) 1057 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1058 1059 nvmem_device_remove_all_cells(nvmem); 1060 nvmem_destroy_layout(nvmem); 1061 device_unregister(&nvmem->dev); 1062 } 1063 1064 /** 1065 * nvmem_unregister() - Unregister previously registered nvmem device 1066 * 1067 * @nvmem: Pointer to previously registered nvmem device. 1068 */ 1069 void nvmem_unregister(struct nvmem_device *nvmem) 1070 { 1071 if (nvmem) 1072 kref_put(&nvmem->refcnt, nvmem_device_release); 1073 } 1074 EXPORT_SYMBOL_GPL(nvmem_unregister); 1075 1076 static void devm_nvmem_unregister(void *nvmem) 1077 { 1078 nvmem_unregister(nvmem); 1079 } 1080 1081 /** 1082 * devm_nvmem_register() - Register a managed nvmem device for given 1083 * nvmem_config. 1084 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1085 * 1086 * @dev: Device that uses the nvmem device. 1087 * @config: nvmem device configuration with which nvmem device is created. 1088 * 1089 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1090 * on success. 1091 */ 1092 struct nvmem_device *devm_nvmem_register(struct device *dev, 1093 const struct nvmem_config *config) 1094 { 1095 struct nvmem_device *nvmem; 1096 int ret; 1097 1098 nvmem = nvmem_register(config); 1099 if (IS_ERR(nvmem)) 1100 return nvmem; 1101 1102 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1103 if (ret) 1104 return ERR_PTR(ret); 1105 1106 return nvmem; 1107 } 1108 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1109 1110 static struct nvmem_device *__nvmem_device_get(void *data, 1111 int (*match)(struct device *dev, const void *data)) 1112 { 1113 struct nvmem_device *nvmem = NULL; 1114 struct device *dev; 1115 1116 mutex_lock(&nvmem_mutex); 1117 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1118 if (dev) 1119 nvmem = to_nvmem_device(dev); 1120 mutex_unlock(&nvmem_mutex); 1121 if (!nvmem) 1122 return ERR_PTR(-EPROBE_DEFER); 1123 1124 if (!try_module_get(nvmem->owner)) { 1125 dev_err(&nvmem->dev, 1126 "could not increase module refcount for cell %s\n", 1127 nvmem_dev_name(nvmem)); 1128 1129 put_device(&nvmem->dev); 1130 return ERR_PTR(-EINVAL); 1131 } 1132 1133 kref_get(&nvmem->refcnt); 1134 1135 return nvmem; 1136 } 1137 1138 static void __nvmem_device_put(struct nvmem_device *nvmem) 1139 { 1140 put_device(&nvmem->dev); 1141 module_put(nvmem->owner); 1142 kref_put(&nvmem->refcnt, nvmem_device_release); 1143 } 1144 1145 #if IS_ENABLED(CONFIG_OF) 1146 /** 1147 * of_nvmem_device_get() - Get nvmem device from a given id 1148 * 1149 * @np: Device tree node that uses the nvmem device. 1150 * @id: nvmem name from nvmem-names property. 1151 * 1152 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1153 * on success. 1154 */ 1155 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1156 { 1157 1158 struct device_node *nvmem_np; 1159 struct nvmem_device *nvmem; 1160 int index = 0; 1161 1162 if (id) 1163 index = of_property_match_string(np, "nvmem-names", id); 1164 1165 nvmem_np = of_parse_phandle(np, "nvmem", index); 1166 if (!nvmem_np) 1167 return ERR_PTR(-ENOENT); 1168 1169 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1170 of_node_put(nvmem_np); 1171 return nvmem; 1172 } 1173 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1174 #endif 1175 1176 /** 1177 * nvmem_device_get() - Get nvmem device from a given id 1178 * 1179 * @dev: Device that uses the nvmem device. 1180 * @dev_name: name of the requested nvmem device. 1181 * 1182 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1183 * on success. 1184 */ 1185 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1186 { 1187 if (dev->of_node) { /* try dt first */ 1188 struct nvmem_device *nvmem; 1189 1190 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1191 1192 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1193 return nvmem; 1194 1195 } 1196 1197 return __nvmem_device_get((void *)dev_name, device_match_name); 1198 } 1199 EXPORT_SYMBOL_GPL(nvmem_device_get); 1200 1201 /** 1202 * nvmem_device_find() - Find nvmem device with matching function 1203 * 1204 * @data: Data to pass to match function 1205 * @match: Callback function to check device 1206 * 1207 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1208 * on success. 1209 */ 1210 struct nvmem_device *nvmem_device_find(void *data, 1211 int (*match)(struct device *dev, const void *data)) 1212 { 1213 return __nvmem_device_get(data, match); 1214 } 1215 EXPORT_SYMBOL_GPL(nvmem_device_find); 1216 1217 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1218 { 1219 struct nvmem_device **nvmem = res; 1220 1221 if (WARN_ON(!nvmem || !*nvmem)) 1222 return 0; 1223 1224 return *nvmem == data; 1225 } 1226 1227 static void devm_nvmem_device_release(struct device *dev, void *res) 1228 { 1229 nvmem_device_put(*(struct nvmem_device **)res); 1230 } 1231 1232 /** 1233 * devm_nvmem_device_put() - put alredy got nvmem device 1234 * 1235 * @dev: Device that uses the nvmem device. 1236 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1237 * that needs to be released. 1238 */ 1239 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1240 { 1241 int ret; 1242 1243 ret = devres_release(dev, devm_nvmem_device_release, 1244 devm_nvmem_device_match, nvmem); 1245 1246 WARN_ON(ret); 1247 } 1248 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1249 1250 /** 1251 * nvmem_device_put() - put alredy got nvmem device 1252 * 1253 * @nvmem: pointer to nvmem device that needs to be released. 1254 */ 1255 void nvmem_device_put(struct nvmem_device *nvmem) 1256 { 1257 __nvmem_device_put(nvmem); 1258 } 1259 EXPORT_SYMBOL_GPL(nvmem_device_put); 1260 1261 /** 1262 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 1263 * 1264 * @dev: Device that requests the nvmem device. 1265 * @id: name id for the requested nvmem device. 1266 * 1267 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 1268 * on success. The nvmem_cell will be freed by the automatically once the 1269 * device is freed. 1270 */ 1271 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1272 { 1273 struct nvmem_device **ptr, *nvmem; 1274 1275 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1276 if (!ptr) 1277 return ERR_PTR(-ENOMEM); 1278 1279 nvmem = nvmem_device_get(dev, id); 1280 if (!IS_ERR(nvmem)) { 1281 *ptr = nvmem; 1282 devres_add(dev, ptr); 1283 } else { 1284 devres_free(ptr); 1285 } 1286 1287 return nvmem; 1288 } 1289 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1290 1291 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1292 const char *id, int index) 1293 { 1294 struct nvmem_cell *cell; 1295 const char *name = NULL; 1296 1297 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1298 if (!cell) 1299 return ERR_PTR(-ENOMEM); 1300 1301 if (id) { 1302 name = kstrdup_const(id, GFP_KERNEL); 1303 if (!name) { 1304 kfree(cell); 1305 return ERR_PTR(-ENOMEM); 1306 } 1307 } 1308 1309 cell->id = name; 1310 cell->entry = entry; 1311 cell->index = index; 1312 1313 return cell; 1314 } 1315 1316 static struct nvmem_cell * 1317 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1318 { 1319 struct nvmem_cell_entry *cell_entry; 1320 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1321 struct nvmem_cell_lookup *lookup; 1322 struct nvmem_device *nvmem; 1323 const char *dev_id; 1324 1325 if (!dev) 1326 return ERR_PTR(-EINVAL); 1327 1328 dev_id = dev_name(dev); 1329 1330 mutex_lock(&nvmem_lookup_mutex); 1331 1332 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1333 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1334 (strcmp(lookup->con_id, con_id) == 0)) { 1335 /* This is the right entry. */ 1336 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1337 device_match_name); 1338 if (IS_ERR(nvmem)) { 1339 /* Provider may not be registered yet. */ 1340 cell = ERR_CAST(nvmem); 1341 break; 1342 } 1343 1344 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1345 lookup->cell_name); 1346 if (!cell_entry) { 1347 __nvmem_device_put(nvmem); 1348 cell = ERR_PTR(-ENOENT); 1349 } else { 1350 cell = nvmem_create_cell(cell_entry, con_id, 0); 1351 if (IS_ERR(cell)) 1352 __nvmem_device_put(nvmem); 1353 } 1354 break; 1355 } 1356 } 1357 1358 mutex_unlock(&nvmem_lookup_mutex); 1359 return cell; 1360 } 1361 1362 static void nvmem_layout_module_put(struct nvmem_device *nvmem) 1363 { 1364 if (nvmem->layout && nvmem->layout->dev.driver) 1365 module_put(nvmem->layout->dev.driver->owner); 1366 } 1367 1368 #if IS_ENABLED(CONFIG_OF) 1369 static struct nvmem_cell_entry * 1370 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1371 { 1372 struct nvmem_cell_entry *iter, *cell = NULL; 1373 1374 mutex_lock(&nvmem_mutex); 1375 list_for_each_entry(iter, &nvmem->cells, node) { 1376 if (np == iter->np) { 1377 cell = iter; 1378 break; 1379 } 1380 } 1381 mutex_unlock(&nvmem_mutex); 1382 1383 return cell; 1384 } 1385 1386 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) 1387 { 1388 if (!nvmem->layout) 1389 return 0; 1390 1391 if (!nvmem->layout->dev.driver || 1392 !try_module_get(nvmem->layout->dev.driver->owner)) 1393 return -EPROBE_DEFER; 1394 1395 return 0; 1396 } 1397 1398 /** 1399 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1400 * 1401 * @np: Device tree node that uses the nvmem cell. 1402 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1403 * for the cell at index 0 (the lone cell with no accompanying 1404 * nvmem-cell-names property). 1405 * 1406 * Return: Will be an ERR_PTR() on error or a valid pointer 1407 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1408 * nvmem_cell_put(). 1409 */ 1410 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1411 { 1412 struct device_node *cell_np, *nvmem_np; 1413 struct nvmem_device *nvmem; 1414 struct nvmem_cell_entry *cell_entry; 1415 struct nvmem_cell *cell; 1416 struct of_phandle_args cell_spec; 1417 int index = 0; 1418 int cell_index = 0; 1419 int ret; 1420 1421 /* if cell name exists, find index to the name */ 1422 if (id) 1423 index = of_property_match_string(np, "nvmem-cell-names", id); 1424 1425 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1426 "#nvmem-cell-cells", 1427 index, &cell_spec); 1428 if (ret) 1429 return ERR_PTR(-ENOENT); 1430 1431 if (cell_spec.args_count > 1) 1432 return ERR_PTR(-EINVAL); 1433 1434 cell_np = cell_spec.np; 1435 if (cell_spec.args_count) 1436 cell_index = cell_spec.args[0]; 1437 1438 nvmem_np = of_get_parent(cell_np); 1439 if (!nvmem_np) { 1440 of_node_put(cell_np); 1441 return ERR_PTR(-EINVAL); 1442 } 1443 1444 /* nvmem layouts produce cells within the nvmem-layout container */ 1445 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1446 nvmem_np = of_get_next_parent(nvmem_np); 1447 if (!nvmem_np) { 1448 of_node_put(cell_np); 1449 return ERR_PTR(-EINVAL); 1450 } 1451 } 1452 1453 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1454 of_node_put(nvmem_np); 1455 if (IS_ERR(nvmem)) { 1456 of_node_put(cell_np); 1457 return ERR_CAST(nvmem); 1458 } 1459 1460 ret = nvmem_layout_module_get_optional(nvmem); 1461 if (ret) { 1462 of_node_put(cell_np); 1463 __nvmem_device_put(nvmem); 1464 return ERR_PTR(ret); 1465 } 1466 1467 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1468 of_node_put(cell_np); 1469 if (!cell_entry) { 1470 __nvmem_device_put(nvmem); 1471 nvmem_layout_module_put(nvmem); 1472 if (nvmem->layout) 1473 return ERR_PTR(-EPROBE_DEFER); 1474 else 1475 return ERR_PTR(-ENOENT); 1476 } 1477 1478 cell = nvmem_create_cell(cell_entry, id, cell_index); 1479 if (IS_ERR(cell)) { 1480 __nvmem_device_put(nvmem); 1481 nvmem_layout_module_put(nvmem); 1482 } 1483 1484 return cell; 1485 } 1486 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1487 #endif 1488 1489 /** 1490 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1491 * 1492 * @dev: Device that requests the nvmem cell. 1493 * @id: nvmem cell name to get (this corresponds with the name from the 1494 * nvmem-cell-names property for DT systems and with the con_id from 1495 * the lookup entry for non-DT systems). 1496 * 1497 * Return: Will be an ERR_PTR() on error or a valid pointer 1498 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1499 * nvmem_cell_put(). 1500 */ 1501 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1502 { 1503 struct nvmem_cell *cell; 1504 1505 if (dev->of_node) { /* try dt first */ 1506 cell = of_nvmem_cell_get(dev->of_node, id); 1507 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1508 return cell; 1509 } 1510 1511 /* NULL cell id only allowed for device tree; invalid otherwise */ 1512 if (!id) 1513 return ERR_PTR(-EINVAL); 1514 1515 return nvmem_cell_get_from_lookup(dev, id); 1516 } 1517 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1518 1519 static void devm_nvmem_cell_release(struct device *dev, void *res) 1520 { 1521 nvmem_cell_put(*(struct nvmem_cell **)res); 1522 } 1523 1524 /** 1525 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1526 * 1527 * @dev: Device that requests the nvmem cell. 1528 * @id: nvmem cell name id to get. 1529 * 1530 * Return: Will be an ERR_PTR() on error or a valid pointer 1531 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1532 * automatically once the device is freed. 1533 */ 1534 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1535 { 1536 struct nvmem_cell **ptr, *cell; 1537 1538 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1539 if (!ptr) 1540 return ERR_PTR(-ENOMEM); 1541 1542 cell = nvmem_cell_get(dev, id); 1543 if (!IS_ERR(cell)) { 1544 *ptr = cell; 1545 devres_add(dev, ptr); 1546 } else { 1547 devres_free(ptr); 1548 } 1549 1550 return cell; 1551 } 1552 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1553 1554 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1555 { 1556 struct nvmem_cell **c = res; 1557 1558 if (WARN_ON(!c || !*c)) 1559 return 0; 1560 1561 return *c == data; 1562 } 1563 1564 /** 1565 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1566 * from devm_nvmem_cell_get. 1567 * 1568 * @dev: Device that requests the nvmem cell. 1569 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1570 */ 1571 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1572 { 1573 int ret; 1574 1575 ret = devres_release(dev, devm_nvmem_cell_release, 1576 devm_nvmem_cell_match, cell); 1577 1578 WARN_ON(ret); 1579 } 1580 EXPORT_SYMBOL(devm_nvmem_cell_put); 1581 1582 /** 1583 * nvmem_cell_put() - Release previously allocated nvmem cell. 1584 * 1585 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1586 */ 1587 void nvmem_cell_put(struct nvmem_cell *cell) 1588 { 1589 struct nvmem_device *nvmem = cell->entry->nvmem; 1590 1591 if (cell->id) 1592 kfree_const(cell->id); 1593 1594 kfree(cell); 1595 __nvmem_device_put(nvmem); 1596 nvmem_layout_module_put(nvmem); 1597 } 1598 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1599 1600 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1601 { 1602 u8 *p, *b; 1603 int i, extra, bit_offset = cell->bit_offset; 1604 1605 p = b = buf; 1606 if (bit_offset) { 1607 /* First shift */ 1608 *b++ >>= bit_offset; 1609 1610 /* setup rest of the bytes if any */ 1611 for (i = 1; i < cell->bytes; i++) { 1612 /* Get bits from next byte and shift them towards msb */ 1613 *p |= *b << (BITS_PER_BYTE - bit_offset); 1614 1615 p = b; 1616 *b++ >>= bit_offset; 1617 } 1618 } else { 1619 /* point to the msb */ 1620 p += cell->bytes - 1; 1621 } 1622 1623 /* result fits in less bytes */ 1624 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1625 while (--extra >= 0) 1626 *p-- = 0; 1627 1628 /* clear msb bits if any leftover in the last byte */ 1629 if (cell->nbits % BITS_PER_BYTE) 1630 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1631 } 1632 1633 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1634 struct nvmem_cell_entry *cell, 1635 void *buf, size_t *len, const char *id, int index) 1636 { 1637 int rc; 1638 1639 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1640 1641 if (rc) 1642 return rc; 1643 1644 /* shift bits in-place */ 1645 if (cell->bit_offset || cell->nbits) 1646 nvmem_shift_read_buffer_in_place(cell, buf); 1647 1648 if (cell->read_post_process) { 1649 rc = cell->read_post_process(cell->priv, id, index, 1650 cell->offset, buf, cell->raw_len); 1651 if (rc) 1652 return rc; 1653 } 1654 1655 if (len) 1656 *len = cell->bytes; 1657 1658 return 0; 1659 } 1660 1661 /** 1662 * nvmem_cell_read() - Read a given nvmem cell 1663 * 1664 * @cell: nvmem cell to be read. 1665 * @len: pointer to length of cell which will be populated on successful read; 1666 * can be NULL. 1667 * 1668 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1669 * buffer should be freed by the consumer with a kfree(). 1670 */ 1671 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1672 { 1673 struct nvmem_cell_entry *entry = cell->entry; 1674 struct nvmem_device *nvmem = entry->nvmem; 1675 u8 *buf; 1676 int rc; 1677 1678 if (!nvmem) 1679 return ERR_PTR(-EINVAL); 1680 1681 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1682 if (!buf) 1683 return ERR_PTR(-ENOMEM); 1684 1685 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1686 if (rc) { 1687 kfree(buf); 1688 return ERR_PTR(rc); 1689 } 1690 1691 return buf; 1692 } 1693 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1694 1695 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1696 u8 *_buf, int len) 1697 { 1698 struct nvmem_device *nvmem = cell->nvmem; 1699 int i, rc, nbits, bit_offset = cell->bit_offset; 1700 u8 v, *p, *buf, *b, pbyte, pbits; 1701 1702 nbits = cell->nbits; 1703 buf = kzalloc(cell->bytes, GFP_KERNEL); 1704 if (!buf) 1705 return ERR_PTR(-ENOMEM); 1706 1707 memcpy(buf, _buf, len); 1708 p = b = buf; 1709 1710 if (bit_offset) { 1711 pbyte = *b; 1712 *b <<= bit_offset; 1713 1714 /* setup the first byte with lsb bits from nvmem */ 1715 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1716 if (rc) 1717 goto err; 1718 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1719 1720 /* setup rest of the byte if any */ 1721 for (i = 1; i < cell->bytes; i++) { 1722 /* Get last byte bits and shift them towards lsb */ 1723 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1724 pbyte = *b; 1725 p = b; 1726 *b <<= bit_offset; 1727 *b++ |= pbits; 1728 } 1729 } 1730 1731 /* if it's not end on byte boundary */ 1732 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1733 /* setup the last byte with msb bits from nvmem */ 1734 rc = nvmem_reg_read(nvmem, 1735 cell->offset + cell->bytes - 1, &v, 1); 1736 if (rc) 1737 goto err; 1738 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1739 1740 } 1741 1742 return buf; 1743 err: 1744 kfree(buf); 1745 return ERR_PTR(rc); 1746 } 1747 1748 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1749 { 1750 struct nvmem_device *nvmem = cell->nvmem; 1751 int rc; 1752 1753 if (!nvmem || nvmem->read_only || 1754 (cell->bit_offset == 0 && len != cell->bytes)) 1755 return -EINVAL; 1756 1757 /* 1758 * Any cells which have a read_post_process hook are read-only because 1759 * we cannot reverse the operation and it might affect other cells, 1760 * too. 1761 */ 1762 if (cell->read_post_process) 1763 return -EINVAL; 1764 1765 if (cell->bit_offset || cell->nbits) { 1766 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1767 if (IS_ERR(buf)) 1768 return PTR_ERR(buf); 1769 } 1770 1771 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1772 1773 /* free the tmp buffer */ 1774 if (cell->bit_offset || cell->nbits) 1775 kfree(buf); 1776 1777 if (rc) 1778 return rc; 1779 1780 return len; 1781 } 1782 1783 /** 1784 * nvmem_cell_write() - Write to a given nvmem cell 1785 * 1786 * @cell: nvmem cell to be written. 1787 * @buf: Buffer to be written. 1788 * @len: length of buffer to be written to nvmem cell. 1789 * 1790 * Return: length of bytes written or negative on failure. 1791 */ 1792 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1793 { 1794 return __nvmem_cell_entry_write(cell->entry, buf, len); 1795 } 1796 1797 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1798 1799 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1800 void *val, size_t count) 1801 { 1802 struct nvmem_cell *cell; 1803 void *buf; 1804 size_t len; 1805 1806 cell = nvmem_cell_get(dev, cell_id); 1807 if (IS_ERR(cell)) 1808 return PTR_ERR(cell); 1809 1810 buf = nvmem_cell_read(cell, &len); 1811 if (IS_ERR(buf)) { 1812 nvmem_cell_put(cell); 1813 return PTR_ERR(buf); 1814 } 1815 if (len != count) { 1816 kfree(buf); 1817 nvmem_cell_put(cell); 1818 return -EINVAL; 1819 } 1820 memcpy(val, buf, count); 1821 kfree(buf); 1822 nvmem_cell_put(cell); 1823 1824 return 0; 1825 } 1826 1827 /** 1828 * nvmem_cell_read_u8() - Read a cell value as a u8 1829 * 1830 * @dev: Device that requests the nvmem cell. 1831 * @cell_id: Name of nvmem cell to read. 1832 * @val: pointer to output value. 1833 * 1834 * Return: 0 on success or negative errno. 1835 */ 1836 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1837 { 1838 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1839 } 1840 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1841 1842 /** 1843 * nvmem_cell_read_u16() - Read a cell value as a u16 1844 * 1845 * @dev: Device that requests the nvmem cell. 1846 * @cell_id: Name of nvmem cell to read. 1847 * @val: pointer to output value. 1848 * 1849 * Return: 0 on success or negative errno. 1850 */ 1851 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1852 { 1853 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1854 } 1855 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1856 1857 /** 1858 * nvmem_cell_read_u32() - Read a cell value as a u32 1859 * 1860 * @dev: Device that requests the nvmem cell. 1861 * @cell_id: Name of nvmem cell to read. 1862 * @val: pointer to output value. 1863 * 1864 * Return: 0 on success or negative errno. 1865 */ 1866 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1867 { 1868 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1869 } 1870 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1871 1872 /** 1873 * nvmem_cell_read_u64() - Read a cell value as a u64 1874 * 1875 * @dev: Device that requests the nvmem cell. 1876 * @cell_id: Name of nvmem cell to read. 1877 * @val: pointer to output value. 1878 * 1879 * Return: 0 on success or negative errno. 1880 */ 1881 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1882 { 1883 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1884 } 1885 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1886 1887 static const void *nvmem_cell_read_variable_common(struct device *dev, 1888 const char *cell_id, 1889 size_t max_len, size_t *len) 1890 { 1891 struct nvmem_cell *cell; 1892 int nbits; 1893 void *buf; 1894 1895 cell = nvmem_cell_get(dev, cell_id); 1896 if (IS_ERR(cell)) 1897 return cell; 1898 1899 nbits = cell->entry->nbits; 1900 buf = nvmem_cell_read(cell, len); 1901 nvmem_cell_put(cell); 1902 if (IS_ERR(buf)) 1903 return buf; 1904 1905 /* 1906 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1907 * the length of the real data. Throw away the extra junk. 1908 */ 1909 if (nbits) 1910 *len = DIV_ROUND_UP(nbits, 8); 1911 1912 if (*len > max_len) { 1913 kfree(buf); 1914 return ERR_PTR(-ERANGE); 1915 } 1916 1917 return buf; 1918 } 1919 1920 /** 1921 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1922 * 1923 * @dev: Device that requests the nvmem cell. 1924 * @cell_id: Name of nvmem cell to read. 1925 * @val: pointer to output value. 1926 * 1927 * Return: 0 on success or negative errno. 1928 */ 1929 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1930 u32 *val) 1931 { 1932 size_t len; 1933 const u8 *buf; 1934 int i; 1935 1936 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1937 if (IS_ERR(buf)) 1938 return PTR_ERR(buf); 1939 1940 /* Copy w/ implicit endian conversion */ 1941 *val = 0; 1942 for (i = 0; i < len; i++) 1943 *val |= buf[i] << (8 * i); 1944 1945 kfree(buf); 1946 1947 return 0; 1948 } 1949 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1950 1951 /** 1952 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1953 * 1954 * @dev: Device that requests the nvmem cell. 1955 * @cell_id: Name of nvmem cell to read. 1956 * @val: pointer to output value. 1957 * 1958 * Return: 0 on success or negative errno. 1959 */ 1960 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1961 u64 *val) 1962 { 1963 size_t len; 1964 const u8 *buf; 1965 int i; 1966 1967 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1968 if (IS_ERR(buf)) 1969 return PTR_ERR(buf); 1970 1971 /* Copy w/ implicit endian conversion */ 1972 *val = 0; 1973 for (i = 0; i < len; i++) 1974 *val |= (uint64_t)buf[i] << (8 * i); 1975 1976 kfree(buf); 1977 1978 return 0; 1979 } 1980 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1981 1982 /** 1983 * nvmem_device_cell_read() - Read a given nvmem device and cell 1984 * 1985 * @nvmem: nvmem device to read from. 1986 * @info: nvmem cell info to be read. 1987 * @buf: buffer pointer which will be populated on successful read. 1988 * 1989 * Return: length of successful bytes read on success and negative 1990 * error code on error. 1991 */ 1992 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1993 struct nvmem_cell_info *info, void *buf) 1994 { 1995 struct nvmem_cell_entry cell; 1996 int rc; 1997 ssize_t len; 1998 1999 if (!nvmem) 2000 return -EINVAL; 2001 2002 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2003 if (rc) 2004 return rc; 2005 2006 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 2007 if (rc) 2008 return rc; 2009 2010 return len; 2011 } 2012 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 2013 2014 /** 2015 * nvmem_device_cell_write() - Write cell to a given nvmem device 2016 * 2017 * @nvmem: nvmem device to be written to. 2018 * @info: nvmem cell info to be written. 2019 * @buf: buffer to be written to cell. 2020 * 2021 * Return: length of bytes written or negative error code on failure. 2022 */ 2023 int nvmem_device_cell_write(struct nvmem_device *nvmem, 2024 struct nvmem_cell_info *info, void *buf) 2025 { 2026 struct nvmem_cell_entry cell; 2027 int rc; 2028 2029 if (!nvmem) 2030 return -EINVAL; 2031 2032 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2033 if (rc) 2034 return rc; 2035 2036 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 2037 } 2038 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 2039 2040 /** 2041 * nvmem_device_read() - Read from a given nvmem device 2042 * 2043 * @nvmem: nvmem device to read from. 2044 * @offset: offset in nvmem device. 2045 * @bytes: number of bytes to read. 2046 * @buf: buffer pointer which will be populated on successful read. 2047 * 2048 * Return: length of successful bytes read on success and negative 2049 * error code on error. 2050 */ 2051 int nvmem_device_read(struct nvmem_device *nvmem, 2052 unsigned int offset, 2053 size_t bytes, void *buf) 2054 { 2055 int rc; 2056 2057 if (!nvmem) 2058 return -EINVAL; 2059 2060 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2061 2062 if (rc) 2063 return rc; 2064 2065 return bytes; 2066 } 2067 EXPORT_SYMBOL_GPL(nvmem_device_read); 2068 2069 /** 2070 * nvmem_device_write() - Write cell to a given nvmem device 2071 * 2072 * @nvmem: nvmem device to be written to. 2073 * @offset: offset in nvmem device. 2074 * @bytes: number of bytes to write. 2075 * @buf: buffer to be written. 2076 * 2077 * Return: length of bytes written or negative error code on failure. 2078 */ 2079 int nvmem_device_write(struct nvmem_device *nvmem, 2080 unsigned int offset, 2081 size_t bytes, void *buf) 2082 { 2083 int rc; 2084 2085 if (!nvmem) 2086 return -EINVAL; 2087 2088 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2089 2090 if (rc) 2091 return rc; 2092 2093 2094 return bytes; 2095 } 2096 EXPORT_SYMBOL_GPL(nvmem_device_write); 2097 2098 /** 2099 * nvmem_add_cell_table() - register a table of cell info entries 2100 * 2101 * @table: table of cell info entries 2102 */ 2103 void nvmem_add_cell_table(struct nvmem_cell_table *table) 2104 { 2105 mutex_lock(&nvmem_cell_mutex); 2106 list_add_tail(&table->node, &nvmem_cell_tables); 2107 mutex_unlock(&nvmem_cell_mutex); 2108 } 2109 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2110 2111 /** 2112 * nvmem_del_cell_table() - remove a previously registered cell info table 2113 * 2114 * @table: table of cell info entries 2115 */ 2116 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2117 { 2118 mutex_lock(&nvmem_cell_mutex); 2119 list_del(&table->node); 2120 mutex_unlock(&nvmem_cell_mutex); 2121 } 2122 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2123 2124 /** 2125 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2126 * 2127 * @entries: array of cell lookup entries 2128 * @nentries: number of cell lookup entries in the array 2129 */ 2130 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2131 { 2132 int i; 2133 2134 mutex_lock(&nvmem_lookup_mutex); 2135 for (i = 0; i < nentries; i++) 2136 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2137 mutex_unlock(&nvmem_lookup_mutex); 2138 } 2139 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2140 2141 /** 2142 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2143 * entries 2144 * 2145 * @entries: array of cell lookup entries 2146 * @nentries: number of cell lookup entries in the array 2147 */ 2148 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2149 { 2150 int i; 2151 2152 mutex_lock(&nvmem_lookup_mutex); 2153 for (i = 0; i < nentries; i++) 2154 list_del(&entries[i].node); 2155 mutex_unlock(&nvmem_lookup_mutex); 2156 } 2157 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2158 2159 /** 2160 * nvmem_dev_name() - Get the name of a given nvmem device. 2161 * 2162 * @nvmem: nvmem device. 2163 * 2164 * Return: name of the nvmem device. 2165 */ 2166 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2167 { 2168 return dev_name(&nvmem->dev); 2169 } 2170 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2171 2172 /** 2173 * nvmem_dev_size() - Get the size of a given nvmem device. 2174 * 2175 * @nvmem: nvmem device. 2176 * 2177 * Return: size of the nvmem device. 2178 */ 2179 size_t nvmem_dev_size(struct nvmem_device *nvmem) 2180 { 2181 return nvmem->size; 2182 } 2183 EXPORT_SYMBOL_GPL(nvmem_dev_size); 2184 2185 static int __init nvmem_init(void) 2186 { 2187 int ret; 2188 2189 ret = bus_register(&nvmem_bus_type); 2190 if (ret) 2191 return ret; 2192 2193 ret = nvmem_layout_bus_register(); 2194 if (ret) 2195 bus_unregister(&nvmem_bus_type); 2196 2197 return ret; 2198 } 2199 2200 static void __exit nvmem_exit(void) 2201 { 2202 nvmem_layout_bus_unregister(); 2203 bus_unregister(&nvmem_bus_type); 2204 } 2205 2206 subsys_initcall(nvmem_init); 2207 module_exit(nvmem_exit); 2208 2209 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2210 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2211 MODULE_DESCRIPTION("nvmem Driver Core"); 2212