1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "internals.h" 23 24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 25 26 #define FLAG_COMPAT BIT(0) 27 struct nvmem_cell_entry { 28 const char *name; 29 int offset; 30 size_t raw_len; 31 int bytes; 32 int bit_offset; 33 int nbits; 34 nvmem_cell_post_process_t read_post_process; 35 void *priv; 36 struct device_node *np; 37 struct nvmem_device *nvmem; 38 struct list_head node; 39 }; 40 41 struct nvmem_cell { 42 struct nvmem_cell_entry *entry; 43 const char *id; 44 int index; 45 }; 46 47 static DEFINE_MUTEX(nvmem_mutex); 48 static DEFINE_IDA(nvmem_ida); 49 50 static DEFINE_MUTEX(nvmem_lookup_mutex); 51 static LIST_HEAD(nvmem_lookup_list); 52 53 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 54 55 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 56 void *val, size_t bytes) 57 { 58 if (nvmem->reg_read) 59 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 60 61 return -EINVAL; 62 } 63 64 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 65 void *val, size_t bytes) 66 { 67 int ret; 68 69 if (nvmem->reg_write) { 70 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 71 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 72 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 73 return ret; 74 } 75 76 return -EINVAL; 77 } 78 79 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 80 unsigned int offset, void *val, 81 size_t bytes, int write) 82 { 83 84 unsigned int end = offset + bytes; 85 unsigned int kend, ksize; 86 const struct nvmem_keepout *keepout = nvmem->keepout; 87 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 88 int rc; 89 90 /* 91 * Skip all keepouts before the range being accessed. 92 * Keepouts are sorted. 93 */ 94 while ((keepout < keepoutend) && (keepout->end <= offset)) 95 keepout++; 96 97 while ((offset < end) && (keepout < keepoutend)) { 98 /* Access the valid portion before the keepout. */ 99 if (offset < keepout->start) { 100 kend = min(end, keepout->start); 101 ksize = kend - offset; 102 if (write) 103 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 104 else 105 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 106 107 if (rc) 108 return rc; 109 110 offset += ksize; 111 val += ksize; 112 } 113 114 /* 115 * Now we're aligned to the start of this keepout zone. Go 116 * through it. 117 */ 118 kend = min(end, keepout->end); 119 ksize = kend - offset; 120 if (!write) 121 memset(val, keepout->value, ksize); 122 123 val += ksize; 124 offset += ksize; 125 keepout++; 126 } 127 128 /* 129 * If we ran out of keepouts but there's still stuff to do, send it 130 * down directly 131 */ 132 if (offset < end) { 133 ksize = end - offset; 134 if (write) 135 return __nvmem_reg_write(nvmem, offset, val, ksize); 136 else 137 return __nvmem_reg_read(nvmem, offset, val, ksize); 138 } 139 140 return 0; 141 } 142 143 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 144 void *val, size_t bytes) 145 { 146 if (!nvmem->nkeepout) 147 return __nvmem_reg_read(nvmem, offset, val, bytes); 148 149 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 150 } 151 152 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 153 void *val, size_t bytes) 154 { 155 if (!nvmem->nkeepout) 156 return __nvmem_reg_write(nvmem, offset, val, bytes); 157 158 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 159 } 160 161 #ifdef CONFIG_NVMEM_SYSFS 162 static const char * const nvmem_type_str[] = { 163 [NVMEM_TYPE_UNKNOWN] = "Unknown", 164 [NVMEM_TYPE_EEPROM] = "EEPROM", 165 [NVMEM_TYPE_OTP] = "OTP", 166 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 167 [NVMEM_TYPE_FRAM] = "FRAM", 168 }; 169 170 #ifdef CONFIG_DEBUG_LOCK_ALLOC 171 static struct lock_class_key eeprom_lock_key; 172 #endif 173 174 static ssize_t type_show(struct device *dev, 175 struct device_attribute *attr, char *buf) 176 { 177 struct nvmem_device *nvmem = to_nvmem_device(dev); 178 179 return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]); 180 } 181 182 static DEVICE_ATTR_RO(type); 183 184 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 185 char *buf) 186 { 187 struct nvmem_device *nvmem = to_nvmem_device(dev); 188 189 return sysfs_emit(buf, "%d\n", nvmem->read_only); 190 } 191 192 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 193 const char *buf, size_t count) 194 { 195 struct nvmem_device *nvmem = to_nvmem_device(dev); 196 int ret = kstrtobool(buf, &nvmem->read_only); 197 198 if (ret < 0) 199 return ret; 200 201 return count; 202 } 203 204 static DEVICE_ATTR_RW(force_ro); 205 206 static struct attribute *nvmem_attrs[] = { 207 &dev_attr_force_ro.attr, 208 &dev_attr_type.attr, 209 NULL, 210 }; 211 212 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 213 const struct bin_attribute *attr, char *buf, 214 loff_t pos, size_t count) 215 { 216 struct device *dev; 217 struct nvmem_device *nvmem; 218 int rc; 219 220 if (attr->private) 221 dev = attr->private; 222 else 223 dev = kobj_to_dev(kobj); 224 nvmem = to_nvmem_device(dev); 225 226 if (!IS_ALIGNED(pos, nvmem->stride)) 227 return -EINVAL; 228 229 if (count < nvmem->word_size) 230 return -EINVAL; 231 232 count = round_down(count, nvmem->word_size); 233 234 if (!nvmem->reg_read) 235 return -EPERM; 236 237 rc = nvmem_reg_read(nvmem, pos, buf, count); 238 239 if (rc) 240 return rc; 241 242 return count; 243 } 244 245 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 246 const struct bin_attribute *attr, char *buf, 247 loff_t pos, size_t count) 248 { 249 struct device *dev; 250 struct nvmem_device *nvmem; 251 int rc; 252 253 if (attr->private) 254 dev = attr->private; 255 else 256 dev = kobj_to_dev(kobj); 257 nvmem = to_nvmem_device(dev); 258 259 if (!IS_ALIGNED(pos, nvmem->stride)) 260 return -EINVAL; 261 262 if (count < nvmem->word_size) 263 return -EINVAL; 264 265 count = round_down(count, nvmem->word_size); 266 267 if (!nvmem->reg_write || nvmem->read_only) 268 return -EPERM; 269 270 rc = nvmem_reg_write(nvmem, pos, buf, count); 271 272 if (rc) 273 return rc; 274 275 return count; 276 } 277 278 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 279 { 280 umode_t mode = 0400; 281 282 if (!nvmem->root_only) 283 mode |= 0044; 284 285 if (!nvmem->read_only) 286 mode |= 0200; 287 288 if (!nvmem->reg_write) 289 mode &= ~0200; 290 291 if (!nvmem->reg_read) 292 mode &= ~0444; 293 294 return mode; 295 } 296 297 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 298 const struct bin_attribute *attr, 299 int i) 300 { 301 struct device *dev = kobj_to_dev(kobj); 302 struct nvmem_device *nvmem = to_nvmem_device(dev); 303 304 return nvmem_bin_attr_get_umode(nvmem); 305 } 306 307 static size_t nvmem_bin_attr_size(struct kobject *kobj, 308 const struct bin_attribute *attr, 309 int i) 310 { 311 struct device *dev = kobj_to_dev(kobj); 312 struct nvmem_device *nvmem = to_nvmem_device(dev); 313 314 return nvmem->size; 315 } 316 317 static umode_t nvmem_attr_is_visible(struct kobject *kobj, 318 struct attribute *attr, int i) 319 { 320 struct device *dev = kobj_to_dev(kobj); 321 struct nvmem_device *nvmem = to_nvmem_device(dev); 322 323 /* 324 * If the device has no .reg_write operation, do not allow 325 * configuration as read-write. 326 * If the device is set as read-only by configuration, it 327 * can be forced into read-write mode using the 'force_ro' 328 * attribute. 329 */ 330 if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write) 331 return 0; /* Attribute not visible */ 332 333 return attr->mode; 334 } 335 336 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 337 const char *id, int index); 338 339 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, 340 const struct bin_attribute *attr, char *buf, 341 loff_t pos, size_t count) 342 { 343 struct nvmem_cell_entry *entry; 344 struct nvmem_cell *cell = NULL; 345 size_t cell_sz, read_len; 346 void *content; 347 348 entry = attr->private; 349 cell = nvmem_create_cell(entry, entry->name, 0); 350 if (IS_ERR(cell)) 351 return PTR_ERR(cell); 352 353 if (!cell) 354 return -EINVAL; 355 356 content = nvmem_cell_read(cell, &cell_sz); 357 if (IS_ERR(content)) { 358 read_len = PTR_ERR(content); 359 goto destroy_cell; 360 } 361 362 read_len = min_t(unsigned int, cell_sz - pos, count); 363 memcpy(buf, content + pos, read_len); 364 kfree(content); 365 366 destroy_cell: 367 kfree_const(cell->id); 368 kfree(cell); 369 370 return read_len; 371 } 372 373 /* default read/write permissions */ 374 static const struct bin_attribute bin_attr_rw_nvmem = { 375 .attr = { 376 .name = "nvmem", 377 .mode = 0644, 378 }, 379 .read = bin_attr_nvmem_read, 380 .write = bin_attr_nvmem_write, 381 }; 382 383 static const struct bin_attribute *const nvmem_bin_attributes[] = { 384 &bin_attr_rw_nvmem, 385 NULL, 386 }; 387 388 static const struct attribute_group nvmem_bin_group = { 389 .bin_attrs = nvmem_bin_attributes, 390 .attrs = nvmem_attrs, 391 .is_bin_visible = nvmem_bin_attr_is_visible, 392 .bin_size = nvmem_bin_attr_size, 393 .is_visible = nvmem_attr_is_visible, 394 }; 395 396 static const struct attribute_group *nvmem_dev_groups[] = { 397 &nvmem_bin_group, 398 NULL, 399 }; 400 401 static const struct bin_attribute bin_attr_nvmem_eeprom_compat = { 402 .attr = { 403 .name = "eeprom", 404 }, 405 .read = bin_attr_nvmem_read, 406 .write = bin_attr_nvmem_write, 407 }; 408 409 /* 410 * nvmem_setup_compat() - Create an additional binary entry in 411 * drivers sys directory, to be backwards compatible with the older 412 * drivers/misc/eeprom drivers. 413 */ 414 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 415 const struct nvmem_config *config) 416 { 417 int rval; 418 419 if (!config->compat) 420 return 0; 421 422 if (!config->base_dev) 423 return -EINVAL; 424 425 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 426 if (config->type == NVMEM_TYPE_FRAM) 427 nvmem->eeprom.attr.name = "fram"; 428 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 429 nvmem->eeprom.size = nvmem->size; 430 #ifdef CONFIG_DEBUG_LOCK_ALLOC 431 nvmem->eeprom.attr.key = &eeprom_lock_key; 432 #endif 433 nvmem->eeprom.private = &nvmem->dev; 434 nvmem->base_dev = config->base_dev; 435 436 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 437 if (rval) { 438 dev_err(&nvmem->dev, 439 "Failed to create eeprom binary file %d\n", rval); 440 return rval; 441 } 442 443 nvmem->flags |= FLAG_COMPAT; 444 445 return 0; 446 } 447 448 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 449 const struct nvmem_config *config) 450 { 451 if (config->compat) 452 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 453 } 454 455 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) 456 { 457 struct attribute_group group = { 458 .name = "cells", 459 }; 460 struct nvmem_cell_entry *entry; 461 const struct bin_attribute **pattrs; 462 struct bin_attribute *attrs; 463 unsigned int ncells = 0, i = 0; 464 int ret = 0; 465 466 mutex_lock(&nvmem_mutex); 467 468 if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) 469 goto unlock_mutex; 470 471 /* Allocate an array of attributes with a sentinel */ 472 ncells = list_count_nodes(&nvmem->cells); 473 pattrs = devm_kcalloc(&nvmem->dev, ncells + 1, 474 sizeof(struct bin_attribute *), GFP_KERNEL); 475 if (!pattrs) { 476 ret = -ENOMEM; 477 goto unlock_mutex; 478 } 479 480 attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); 481 if (!attrs) { 482 ret = -ENOMEM; 483 goto unlock_mutex; 484 } 485 486 /* Initialize each attribute to take the name and size of the cell */ 487 list_for_each_entry(entry, &nvmem->cells, node) { 488 sysfs_bin_attr_init(&attrs[i]); 489 attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, 490 "%s@%x,%x", entry->name, 491 entry->offset, 492 entry->bit_offset); 493 attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); 494 attrs[i].size = entry->bytes; 495 attrs[i].read = &nvmem_cell_attr_read; 496 attrs[i].private = entry; 497 if (!attrs[i].attr.name) { 498 ret = -ENOMEM; 499 goto unlock_mutex; 500 } 501 502 pattrs[i] = &attrs[i]; 503 i++; 504 } 505 506 group.bin_attrs = pattrs; 507 508 ret = device_add_group(&nvmem->dev, &group); 509 if (ret) 510 goto unlock_mutex; 511 512 nvmem->sysfs_cells_populated = true; 513 514 unlock_mutex: 515 mutex_unlock(&nvmem_mutex); 516 517 return ret; 518 } 519 520 #else /* CONFIG_NVMEM_SYSFS */ 521 522 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 523 const struct nvmem_config *config) 524 { 525 return -ENOSYS; 526 } 527 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 528 const struct nvmem_config *config) 529 { 530 } 531 532 #endif /* CONFIG_NVMEM_SYSFS */ 533 534 static void nvmem_release(struct device *dev) 535 { 536 struct nvmem_device *nvmem = to_nvmem_device(dev); 537 538 ida_free(&nvmem_ida, nvmem->id); 539 gpiod_put(nvmem->wp_gpio); 540 kfree(nvmem); 541 } 542 543 static const struct device_type nvmem_provider_type = { 544 .release = nvmem_release, 545 }; 546 547 static const struct bus_type nvmem_bus_type = { 548 .name = "nvmem", 549 }; 550 551 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 552 { 553 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 554 mutex_lock(&nvmem_mutex); 555 list_del(&cell->node); 556 mutex_unlock(&nvmem_mutex); 557 of_node_put(cell->np); 558 kfree_const(cell->name); 559 kfree(cell); 560 } 561 562 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 563 { 564 struct nvmem_cell_entry *cell, *p; 565 566 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 567 nvmem_cell_entry_drop(cell); 568 } 569 570 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 571 { 572 mutex_lock(&nvmem_mutex); 573 list_add_tail(&cell->node, &cell->nvmem->cells); 574 mutex_unlock(&nvmem_mutex); 575 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 576 } 577 578 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 579 const struct nvmem_cell_info *info, 580 struct nvmem_cell_entry *cell) 581 { 582 cell->nvmem = nvmem; 583 cell->offset = info->offset; 584 cell->raw_len = info->raw_len ?: info->bytes; 585 cell->bytes = info->bytes; 586 cell->name = info->name; 587 cell->read_post_process = info->read_post_process; 588 cell->priv = info->priv; 589 590 cell->bit_offset = info->bit_offset; 591 cell->nbits = info->nbits; 592 cell->np = info->np; 593 594 if (cell->nbits) { 595 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 596 BITS_PER_BYTE); 597 cell->raw_len = ALIGN(cell->bytes, nvmem->word_size); 598 } 599 600 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 601 dev_err(&nvmem->dev, 602 "cell %s unaligned to nvmem stride %d\n", 603 cell->name ?: "<unknown>", nvmem->stride); 604 return -EINVAL; 605 } 606 607 if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) { 608 dev_err(&nvmem->dev, 609 "cell %s raw len %zd unaligned to nvmem word size %d\n", 610 cell->name ?: "<unknown>", cell->raw_len, 611 nvmem->word_size); 612 613 if (info->raw_len) 614 return -EINVAL; 615 616 cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size); 617 } 618 619 return 0; 620 } 621 622 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 623 const struct nvmem_cell_info *info, 624 struct nvmem_cell_entry *cell) 625 { 626 int err; 627 628 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 629 if (err) 630 return err; 631 632 cell->name = kstrdup_const(info->name, GFP_KERNEL); 633 if (!cell->name) 634 return -ENOMEM; 635 636 return 0; 637 } 638 639 /** 640 * nvmem_add_one_cell() - Add one cell information to an nvmem device 641 * 642 * @nvmem: nvmem device to add cells to. 643 * @info: nvmem cell info to add to the device 644 * 645 * Return: 0 or negative error code on failure. 646 */ 647 int nvmem_add_one_cell(struct nvmem_device *nvmem, 648 const struct nvmem_cell_info *info) 649 { 650 struct nvmem_cell_entry *cell; 651 int rval; 652 653 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 654 if (!cell) 655 return -ENOMEM; 656 657 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 658 if (rval) { 659 kfree(cell); 660 return rval; 661 } 662 663 nvmem_cell_entry_add(cell); 664 665 return 0; 666 } 667 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 668 669 /** 670 * nvmem_add_cells() - Add cell information to an nvmem device 671 * 672 * @nvmem: nvmem device to add cells to. 673 * @info: nvmem cell info to add to the device 674 * @ncells: number of cells in info 675 * 676 * Return: 0 or negative error code on failure. 677 */ 678 static int nvmem_add_cells(struct nvmem_device *nvmem, 679 const struct nvmem_cell_info *info, 680 int ncells) 681 { 682 int i, rval; 683 684 for (i = 0; i < ncells; i++) { 685 rval = nvmem_add_one_cell(nvmem, &info[i]); 686 if (rval) 687 return rval; 688 } 689 690 return 0; 691 } 692 693 /** 694 * nvmem_register_notifier() - Register a notifier block for nvmem events. 695 * 696 * @nb: notifier block to be called on nvmem events. 697 * 698 * Return: 0 on success, negative error number on failure. 699 */ 700 int nvmem_register_notifier(struct notifier_block *nb) 701 { 702 return blocking_notifier_chain_register(&nvmem_notifier, nb); 703 } 704 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 705 706 /** 707 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 708 * 709 * @nb: notifier block to be unregistered. 710 * 711 * Return: 0 on success, negative error number on failure. 712 */ 713 int nvmem_unregister_notifier(struct notifier_block *nb) 714 { 715 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 716 } 717 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 718 719 static struct nvmem_cell_entry * 720 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 721 { 722 struct nvmem_cell_entry *iter, *cell = NULL; 723 724 mutex_lock(&nvmem_mutex); 725 list_for_each_entry(iter, &nvmem->cells, node) { 726 if (strcmp(cell_id, iter->name) == 0) { 727 cell = iter; 728 break; 729 } 730 } 731 mutex_unlock(&nvmem_mutex); 732 733 return cell; 734 } 735 736 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 737 { 738 unsigned int cur = 0; 739 const struct nvmem_keepout *keepout = nvmem->keepout; 740 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 741 742 while (keepout < keepoutend) { 743 /* Ensure keepouts are sorted and don't overlap. */ 744 if (keepout->start < cur) { 745 dev_err(&nvmem->dev, 746 "Keepout regions aren't sorted or overlap.\n"); 747 748 return -ERANGE; 749 } 750 751 if (keepout->end < keepout->start) { 752 dev_err(&nvmem->dev, 753 "Invalid keepout region.\n"); 754 755 return -EINVAL; 756 } 757 758 /* 759 * Validate keepouts (and holes between) don't violate 760 * word_size constraints. 761 */ 762 if ((keepout->end - keepout->start < nvmem->word_size) || 763 ((keepout->start != cur) && 764 (keepout->start - cur < nvmem->word_size))) { 765 766 dev_err(&nvmem->dev, 767 "Keepout regions violate word_size constraints.\n"); 768 769 return -ERANGE; 770 } 771 772 /* Validate keepouts don't violate stride (alignment). */ 773 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 774 !IS_ALIGNED(keepout->end, nvmem->stride)) { 775 776 dev_err(&nvmem->dev, 777 "Keepout regions violate stride.\n"); 778 779 return -EINVAL; 780 } 781 782 cur = keepout->end; 783 keepout++; 784 } 785 786 return 0; 787 } 788 789 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 790 { 791 struct device *dev = &nvmem->dev; 792 const __be32 *addr; 793 int len, ret; 794 795 for_each_child_of_node_scoped(np, child) { 796 struct nvmem_cell_info info = {0}; 797 798 addr = of_get_property(child, "reg", &len); 799 if (!addr) 800 continue; 801 if (len < 2 * sizeof(u32)) { 802 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 803 return -EINVAL; 804 } 805 806 info.offset = be32_to_cpup(addr++); 807 info.bytes = be32_to_cpup(addr); 808 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 809 810 addr = of_get_property(child, "bits", &len); 811 if (addr && len == (2 * sizeof(u32))) { 812 info.bit_offset = be32_to_cpup(addr++); 813 info.nbits = be32_to_cpup(addr); 814 if (info.bit_offset >= BITS_PER_BYTE * info.bytes || 815 info.nbits < 1 || 816 info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) { 817 dev_err(dev, "nvmem: invalid bits on %pOF\n", child); 818 return -EINVAL; 819 } 820 } 821 822 info.np = of_node_get(child); 823 824 if (nvmem->fixup_dt_cell_info) 825 nvmem->fixup_dt_cell_info(nvmem, &info); 826 827 ret = nvmem_add_one_cell(nvmem, &info); 828 kfree(info.name); 829 if (ret) { 830 of_node_put(info.np); 831 return ret; 832 } 833 } 834 835 return 0; 836 } 837 838 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 839 { 840 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 841 } 842 843 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 844 { 845 struct device_node *layout_np; 846 int err = 0; 847 848 layout_np = of_nvmem_layout_get_container(nvmem); 849 if (!layout_np) 850 return 0; 851 852 if (of_device_is_compatible(layout_np, "fixed-layout")) 853 err = nvmem_add_cells_from_dt(nvmem, layout_np); 854 855 of_node_put(layout_np); 856 857 return err; 858 } 859 860 int nvmem_layout_register(struct nvmem_layout *layout) 861 { 862 int ret; 863 864 if (!layout->add_cells) 865 return -EINVAL; 866 867 /* Populate the cells */ 868 ret = layout->add_cells(layout); 869 if (ret) 870 return ret; 871 872 #ifdef CONFIG_NVMEM_SYSFS 873 ret = nvmem_populate_sysfs_cells(layout->nvmem); 874 if (ret) { 875 nvmem_device_remove_all_cells(layout->nvmem); 876 return ret; 877 } 878 #endif 879 880 return 0; 881 } 882 EXPORT_SYMBOL_GPL(nvmem_layout_register); 883 884 void nvmem_layout_unregister(struct nvmem_layout *layout) 885 { 886 /* Keep the API even with an empty stub in case we need it later */ 887 } 888 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 889 890 /** 891 * nvmem_register() - Register a nvmem device for given nvmem_config. 892 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 893 * 894 * @config: nvmem device configuration with which nvmem device is created. 895 * 896 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 897 * on success. 898 */ 899 900 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 901 { 902 struct nvmem_device *nvmem; 903 int rval; 904 905 if (!config->dev) 906 return ERR_PTR(-EINVAL); 907 908 if (!config->reg_read && !config->reg_write) 909 return ERR_PTR(-EINVAL); 910 911 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 912 if (!nvmem) 913 return ERR_PTR(-ENOMEM); 914 915 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 916 if (rval < 0) { 917 kfree(nvmem); 918 return ERR_PTR(rval); 919 } 920 921 nvmem->id = rval; 922 923 nvmem->dev.type = &nvmem_provider_type; 924 nvmem->dev.bus = &nvmem_bus_type; 925 nvmem->dev.parent = config->dev; 926 927 device_initialize(&nvmem->dev); 928 929 if (!config->ignore_wp) 930 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 931 GPIOD_OUT_HIGH); 932 if (IS_ERR(nvmem->wp_gpio)) { 933 rval = PTR_ERR(nvmem->wp_gpio); 934 nvmem->wp_gpio = NULL; 935 goto err_put_device; 936 } 937 938 kref_init(&nvmem->refcnt); 939 INIT_LIST_HEAD(&nvmem->cells); 940 nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; 941 942 nvmem->owner = config->owner; 943 if (!nvmem->owner && config->dev->driver) 944 nvmem->owner = config->dev->driver->owner; 945 nvmem->stride = config->stride ?: 1; 946 nvmem->word_size = config->word_size ?: 1; 947 nvmem->size = config->size; 948 nvmem->root_only = config->root_only; 949 nvmem->priv = config->priv; 950 nvmem->type = config->type; 951 nvmem->reg_read = config->reg_read; 952 nvmem->reg_write = config->reg_write; 953 nvmem->keepout = config->keepout; 954 nvmem->nkeepout = config->nkeepout; 955 if (config->of_node) 956 nvmem->dev.of_node = config->of_node; 957 else 958 nvmem->dev.of_node = config->dev->of_node; 959 960 switch (config->id) { 961 case NVMEM_DEVID_NONE: 962 rval = dev_set_name(&nvmem->dev, "%s", config->name); 963 break; 964 case NVMEM_DEVID_AUTO: 965 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 966 break; 967 default: 968 rval = dev_set_name(&nvmem->dev, "%s%d", 969 config->name ? : "nvmem", 970 config->name ? config->id : nvmem->id); 971 break; 972 } 973 974 if (rval) 975 goto err_put_device; 976 977 nvmem->read_only = device_property_present(config->dev, "read-only") || 978 config->read_only || !nvmem->reg_write; 979 980 #ifdef CONFIG_NVMEM_SYSFS 981 nvmem->dev.groups = nvmem_dev_groups; 982 #endif 983 984 if (nvmem->nkeepout) { 985 rval = nvmem_validate_keepouts(nvmem); 986 if (rval) 987 goto err_put_device; 988 } 989 990 if (config->compat) { 991 rval = nvmem_sysfs_setup_compat(nvmem, config); 992 if (rval) 993 goto err_put_device; 994 } 995 996 if (config->cells) { 997 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 998 if (rval) 999 goto err_remove_cells; 1000 } 1001 1002 if (config->add_legacy_fixed_of_cells) { 1003 rval = nvmem_add_cells_from_legacy_of(nvmem); 1004 if (rval) 1005 goto err_remove_cells; 1006 } 1007 1008 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1009 if (rval) 1010 goto err_remove_cells; 1011 1012 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1013 1014 rval = device_add(&nvmem->dev); 1015 if (rval) 1016 goto err_remove_cells; 1017 1018 rval = nvmem_populate_layout(nvmem); 1019 if (rval) 1020 goto err_remove_dev; 1021 1022 #ifdef CONFIG_NVMEM_SYSFS 1023 rval = nvmem_populate_sysfs_cells(nvmem); 1024 if (rval) 1025 goto err_destroy_layout; 1026 #endif 1027 1028 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1029 1030 return nvmem; 1031 1032 #ifdef CONFIG_NVMEM_SYSFS 1033 err_destroy_layout: 1034 nvmem_destroy_layout(nvmem); 1035 #endif 1036 err_remove_dev: 1037 device_del(&nvmem->dev); 1038 err_remove_cells: 1039 nvmem_device_remove_all_cells(nvmem); 1040 if (config->compat) 1041 nvmem_sysfs_remove_compat(nvmem, config); 1042 err_put_device: 1043 put_device(&nvmem->dev); 1044 1045 return ERR_PTR(rval); 1046 } 1047 EXPORT_SYMBOL_GPL(nvmem_register); 1048 1049 static void nvmem_device_release(struct kref *kref) 1050 { 1051 struct nvmem_device *nvmem; 1052 1053 nvmem = container_of(kref, struct nvmem_device, refcnt); 1054 1055 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1056 1057 if (nvmem->flags & FLAG_COMPAT) 1058 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1059 1060 nvmem_device_remove_all_cells(nvmem); 1061 nvmem_destroy_layout(nvmem); 1062 device_unregister(&nvmem->dev); 1063 } 1064 1065 /** 1066 * nvmem_unregister() - Unregister previously registered nvmem device 1067 * 1068 * @nvmem: Pointer to previously registered nvmem device. 1069 */ 1070 void nvmem_unregister(struct nvmem_device *nvmem) 1071 { 1072 if (nvmem) 1073 kref_put(&nvmem->refcnt, nvmem_device_release); 1074 } 1075 EXPORT_SYMBOL_GPL(nvmem_unregister); 1076 1077 static void devm_nvmem_unregister(void *nvmem) 1078 { 1079 nvmem_unregister(nvmem); 1080 } 1081 1082 /** 1083 * devm_nvmem_register() - Register a managed nvmem device for given 1084 * nvmem_config. 1085 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1086 * 1087 * @dev: Device that uses the nvmem device. 1088 * @config: nvmem device configuration with which nvmem device is created. 1089 * 1090 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1091 * on success. 1092 */ 1093 struct nvmem_device *devm_nvmem_register(struct device *dev, 1094 const struct nvmem_config *config) 1095 { 1096 struct nvmem_device *nvmem; 1097 int ret; 1098 1099 nvmem = nvmem_register(config); 1100 if (IS_ERR(nvmem)) 1101 return nvmem; 1102 1103 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1104 if (ret) 1105 return ERR_PTR(ret); 1106 1107 return nvmem; 1108 } 1109 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1110 1111 static struct nvmem_device *__nvmem_device_get(void *data, 1112 int (*match)(struct device *dev, const void *data)) 1113 { 1114 struct nvmem_device *nvmem = NULL; 1115 struct device *dev; 1116 1117 mutex_lock(&nvmem_mutex); 1118 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1119 if (dev) 1120 nvmem = to_nvmem_device(dev); 1121 mutex_unlock(&nvmem_mutex); 1122 if (!nvmem) 1123 return ERR_PTR(-EPROBE_DEFER); 1124 1125 if (!try_module_get(nvmem->owner)) { 1126 dev_err(&nvmem->dev, 1127 "could not increase module refcount for cell %s\n", 1128 nvmem_dev_name(nvmem)); 1129 1130 put_device(&nvmem->dev); 1131 return ERR_PTR(-EINVAL); 1132 } 1133 1134 kref_get(&nvmem->refcnt); 1135 1136 return nvmem; 1137 } 1138 1139 static void __nvmem_device_put(struct nvmem_device *nvmem) 1140 { 1141 put_device(&nvmem->dev); 1142 module_put(nvmem->owner); 1143 kref_put(&nvmem->refcnt, nvmem_device_release); 1144 } 1145 1146 #if IS_ENABLED(CONFIG_OF) 1147 /** 1148 * of_nvmem_device_get() - Get nvmem device from a given id 1149 * 1150 * @np: Device tree node that uses the nvmem device. 1151 * @id: nvmem name from nvmem-names property. 1152 * 1153 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1154 * on success. 1155 */ 1156 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1157 { 1158 1159 struct device_node *nvmem_np; 1160 struct nvmem_device *nvmem; 1161 int index = 0; 1162 1163 if (id) 1164 index = of_property_match_string(np, "nvmem-names", id); 1165 1166 nvmem_np = of_parse_phandle(np, "nvmem", index); 1167 if (!nvmem_np) 1168 return ERR_PTR(-ENOENT); 1169 1170 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1171 of_node_put(nvmem_np); 1172 return nvmem; 1173 } 1174 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1175 #endif 1176 1177 /** 1178 * nvmem_device_get() - Get nvmem device from a given id 1179 * 1180 * @dev: Device that uses the nvmem device. 1181 * @dev_name: name of the requested nvmem device. 1182 * 1183 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1184 * on success. 1185 */ 1186 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1187 { 1188 if (dev->of_node) { /* try dt first */ 1189 struct nvmem_device *nvmem; 1190 1191 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1192 1193 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1194 return nvmem; 1195 1196 } 1197 1198 return __nvmem_device_get((void *)dev_name, device_match_name); 1199 } 1200 EXPORT_SYMBOL_GPL(nvmem_device_get); 1201 1202 /** 1203 * nvmem_device_find() - Find nvmem device with matching function 1204 * 1205 * @data: Data to pass to match function 1206 * @match: Callback function to check device 1207 * 1208 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1209 * on success. 1210 */ 1211 struct nvmem_device *nvmem_device_find(void *data, 1212 int (*match)(struct device *dev, const void *data)) 1213 { 1214 return __nvmem_device_get(data, match); 1215 } 1216 EXPORT_SYMBOL_GPL(nvmem_device_find); 1217 1218 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1219 { 1220 struct nvmem_device **nvmem = res; 1221 1222 if (WARN_ON(!nvmem || !*nvmem)) 1223 return 0; 1224 1225 return *nvmem == data; 1226 } 1227 1228 static void devm_nvmem_device_release(struct device *dev, void *res) 1229 { 1230 nvmem_device_put(*(struct nvmem_device **)res); 1231 } 1232 1233 /** 1234 * devm_nvmem_device_put() - put already got nvmem device 1235 * 1236 * @dev: Device that uses the nvmem device. 1237 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1238 * that needs to be released. 1239 */ 1240 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1241 { 1242 int ret; 1243 1244 ret = devres_release(dev, devm_nvmem_device_release, 1245 devm_nvmem_device_match, nvmem); 1246 1247 WARN_ON(ret); 1248 } 1249 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1250 1251 /** 1252 * nvmem_device_put() - put already got nvmem device 1253 * 1254 * @nvmem: pointer to nvmem device that needs to be released. 1255 */ 1256 void nvmem_device_put(struct nvmem_device *nvmem) 1257 { 1258 __nvmem_device_put(nvmem); 1259 } 1260 EXPORT_SYMBOL_GPL(nvmem_device_put); 1261 1262 /** 1263 * devm_nvmem_device_get() - Get nvmem device of device from a given id 1264 * 1265 * @dev: Device that requests the nvmem device. 1266 * @id: name id for the requested nvmem device. 1267 * 1268 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1269 * on success. The nvmem_device will be freed by the automatically once the 1270 * device is freed. 1271 */ 1272 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1273 { 1274 struct nvmem_device **ptr, *nvmem; 1275 1276 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1277 if (!ptr) 1278 return ERR_PTR(-ENOMEM); 1279 1280 nvmem = nvmem_device_get(dev, id); 1281 if (!IS_ERR(nvmem)) { 1282 *ptr = nvmem; 1283 devres_add(dev, ptr); 1284 } else { 1285 devres_free(ptr); 1286 } 1287 1288 return nvmem; 1289 } 1290 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1291 1292 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1293 const char *id, int index) 1294 { 1295 struct nvmem_cell *cell; 1296 const char *name = NULL; 1297 1298 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1299 if (!cell) 1300 return ERR_PTR(-ENOMEM); 1301 1302 if (id) { 1303 name = kstrdup_const(id, GFP_KERNEL); 1304 if (!name) { 1305 kfree(cell); 1306 return ERR_PTR(-ENOMEM); 1307 } 1308 } 1309 1310 cell->id = name; 1311 cell->entry = entry; 1312 cell->index = index; 1313 1314 return cell; 1315 } 1316 1317 static struct nvmem_cell * 1318 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1319 { 1320 struct nvmem_cell_entry *cell_entry; 1321 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1322 struct nvmem_cell_lookup *lookup; 1323 struct nvmem_device *nvmem; 1324 const char *dev_id; 1325 1326 if (!dev) 1327 return ERR_PTR(-EINVAL); 1328 1329 dev_id = dev_name(dev); 1330 1331 mutex_lock(&nvmem_lookup_mutex); 1332 1333 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1334 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1335 (strcmp(lookup->con_id, con_id) == 0)) { 1336 /* This is the right entry. */ 1337 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1338 device_match_name); 1339 if (IS_ERR(nvmem)) { 1340 /* Provider may not be registered yet. */ 1341 cell = ERR_CAST(nvmem); 1342 break; 1343 } 1344 1345 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1346 lookup->cell_name); 1347 if (!cell_entry) { 1348 __nvmem_device_put(nvmem); 1349 cell = ERR_PTR(-ENOENT); 1350 } else { 1351 cell = nvmem_create_cell(cell_entry, con_id, 0); 1352 if (IS_ERR(cell)) 1353 __nvmem_device_put(nvmem); 1354 } 1355 break; 1356 } 1357 } 1358 1359 mutex_unlock(&nvmem_lookup_mutex); 1360 return cell; 1361 } 1362 1363 static void nvmem_layout_module_put(struct nvmem_device *nvmem) 1364 { 1365 if (nvmem->layout && nvmem->layout->dev.driver) 1366 module_put(nvmem->layout->dev.driver->owner); 1367 } 1368 1369 #if IS_ENABLED(CONFIG_OF) 1370 static struct nvmem_cell_entry * 1371 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1372 { 1373 struct nvmem_cell_entry *iter, *cell = NULL; 1374 1375 mutex_lock(&nvmem_mutex); 1376 list_for_each_entry(iter, &nvmem->cells, node) { 1377 if (np == iter->np) { 1378 cell = iter; 1379 break; 1380 } 1381 } 1382 mutex_unlock(&nvmem_mutex); 1383 1384 return cell; 1385 } 1386 1387 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) 1388 { 1389 if (!nvmem->layout) 1390 return 0; 1391 1392 if (!nvmem->layout->dev.driver || 1393 !try_module_get(nvmem->layout->dev.driver->owner)) 1394 return -EPROBE_DEFER; 1395 1396 return 0; 1397 } 1398 1399 /** 1400 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1401 * 1402 * @np: Device tree node that uses the nvmem cell. 1403 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1404 * for the cell at index 0 (the lone cell with no accompanying 1405 * nvmem-cell-names property). 1406 * 1407 * Return: Will be an ERR_PTR() on error or a valid pointer 1408 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1409 * nvmem_cell_put(). 1410 */ 1411 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1412 { 1413 struct device_node *cell_np, *nvmem_np; 1414 struct nvmem_device *nvmem; 1415 struct nvmem_cell_entry *cell_entry; 1416 struct nvmem_cell *cell; 1417 struct of_phandle_args cell_spec; 1418 int index = 0; 1419 int cell_index = 0; 1420 int ret; 1421 1422 /* if cell name exists, find index to the name */ 1423 if (id) 1424 index = of_property_match_string(np, "nvmem-cell-names", id); 1425 1426 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1427 "#nvmem-cell-cells", 1428 index, &cell_spec); 1429 if (ret) 1430 return ERR_PTR(-ENOENT); 1431 1432 if (cell_spec.args_count > 1) 1433 return ERR_PTR(-EINVAL); 1434 1435 cell_np = cell_spec.np; 1436 if (cell_spec.args_count) 1437 cell_index = cell_spec.args[0]; 1438 1439 nvmem_np = of_get_parent(cell_np); 1440 if (!nvmem_np) { 1441 of_node_put(cell_np); 1442 return ERR_PTR(-EINVAL); 1443 } 1444 1445 /* nvmem layouts produce cells within the nvmem-layout container */ 1446 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1447 nvmem_np = of_get_next_parent(nvmem_np); 1448 if (!nvmem_np) { 1449 of_node_put(cell_np); 1450 return ERR_PTR(-EINVAL); 1451 } 1452 } 1453 1454 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1455 of_node_put(nvmem_np); 1456 if (IS_ERR(nvmem)) { 1457 of_node_put(cell_np); 1458 return ERR_CAST(nvmem); 1459 } 1460 1461 ret = nvmem_layout_module_get_optional(nvmem); 1462 if (ret) { 1463 of_node_put(cell_np); 1464 __nvmem_device_put(nvmem); 1465 return ERR_PTR(ret); 1466 } 1467 1468 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1469 of_node_put(cell_np); 1470 if (!cell_entry) { 1471 __nvmem_device_put(nvmem); 1472 nvmem_layout_module_put(nvmem); 1473 if (nvmem->layout) 1474 return ERR_PTR(-EPROBE_DEFER); 1475 else 1476 return ERR_PTR(-ENOENT); 1477 } 1478 1479 cell = nvmem_create_cell(cell_entry, id, cell_index); 1480 if (IS_ERR(cell)) { 1481 __nvmem_device_put(nvmem); 1482 nvmem_layout_module_put(nvmem); 1483 } 1484 1485 return cell; 1486 } 1487 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1488 #endif 1489 1490 /** 1491 * nvmem_cell_get() - Get nvmem cell of device from a given cell name 1492 * 1493 * @dev: Device that requests the nvmem cell. 1494 * @id: nvmem cell name to get (this corresponds with the name from the 1495 * nvmem-cell-names property for DT systems and with the con_id from 1496 * the lookup entry for non-DT systems). 1497 * 1498 * Return: Will be an ERR_PTR() on error or a valid pointer 1499 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1500 * nvmem_cell_put(). 1501 */ 1502 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1503 { 1504 struct nvmem_cell *cell; 1505 1506 if (dev->of_node) { /* try dt first */ 1507 cell = of_nvmem_cell_get(dev->of_node, id); 1508 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1509 return cell; 1510 } 1511 1512 /* NULL cell id only allowed for device tree; invalid otherwise */ 1513 if (!id) 1514 return ERR_PTR(-EINVAL); 1515 1516 return nvmem_cell_get_from_lookup(dev, id); 1517 } 1518 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1519 1520 static void devm_nvmem_cell_release(struct device *dev, void *res) 1521 { 1522 nvmem_cell_put(*(struct nvmem_cell **)res); 1523 } 1524 1525 /** 1526 * devm_nvmem_cell_get() - Get nvmem cell of device from a given id 1527 * 1528 * @dev: Device that requests the nvmem cell. 1529 * @id: nvmem cell name id to get. 1530 * 1531 * Return: Will be an ERR_PTR() on error or a valid pointer 1532 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1533 * automatically once the device is freed. 1534 */ 1535 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1536 { 1537 struct nvmem_cell **ptr, *cell; 1538 1539 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1540 if (!ptr) 1541 return ERR_PTR(-ENOMEM); 1542 1543 cell = nvmem_cell_get(dev, id); 1544 if (!IS_ERR(cell)) { 1545 *ptr = cell; 1546 devres_add(dev, ptr); 1547 } else { 1548 devres_free(ptr); 1549 } 1550 1551 return cell; 1552 } 1553 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1554 1555 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1556 { 1557 struct nvmem_cell **c = res; 1558 1559 if (WARN_ON(!c || !*c)) 1560 return 0; 1561 1562 return *c == data; 1563 } 1564 1565 /** 1566 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1567 * from devm_nvmem_cell_get. 1568 * 1569 * @dev: Device that requests the nvmem cell. 1570 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1571 */ 1572 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1573 { 1574 int ret; 1575 1576 ret = devres_release(dev, devm_nvmem_cell_release, 1577 devm_nvmem_cell_match, cell); 1578 1579 WARN_ON(ret); 1580 } 1581 EXPORT_SYMBOL(devm_nvmem_cell_put); 1582 1583 /** 1584 * nvmem_cell_put() - Release previously allocated nvmem cell. 1585 * 1586 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1587 */ 1588 void nvmem_cell_put(struct nvmem_cell *cell) 1589 { 1590 struct nvmem_device *nvmem = cell->entry->nvmem; 1591 1592 if (cell->id) 1593 kfree_const(cell->id); 1594 1595 kfree(cell); 1596 __nvmem_device_put(nvmem); 1597 nvmem_layout_module_put(nvmem); 1598 } 1599 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1600 1601 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1602 { 1603 u8 *p, *b; 1604 int i, extra, bytes_offset; 1605 int bit_offset = cell->bit_offset; 1606 1607 p = b = buf; 1608 1609 bytes_offset = bit_offset / BITS_PER_BYTE; 1610 b += bytes_offset; 1611 bit_offset %= BITS_PER_BYTE; 1612 1613 if (bit_offset % BITS_PER_BYTE) { 1614 /* First shift */ 1615 *p = *b++ >> bit_offset; 1616 1617 /* setup rest of the bytes if any */ 1618 for (i = 1; i < cell->bytes; i++) { 1619 /* Get bits from next byte and shift them towards msb */ 1620 *p++ |= *b << (BITS_PER_BYTE - bit_offset); 1621 1622 *p = *b++ >> bit_offset; 1623 } 1624 } else if (p != b) { 1625 memmove(p, b, cell->bytes - bytes_offset); 1626 p += cell->bytes - 1; 1627 } else { 1628 /* point to the msb */ 1629 p += cell->bytes - 1; 1630 } 1631 1632 /* result fits in less bytes */ 1633 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1634 while (--extra >= 0) 1635 *p-- = 0; 1636 1637 /* clear msb bits if any leftover in the last byte */ 1638 if (cell->nbits % BITS_PER_BYTE) 1639 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1640 } 1641 1642 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1643 struct nvmem_cell_entry *cell, 1644 void *buf, size_t *len, const char *id, int index) 1645 { 1646 int rc; 1647 1648 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1649 1650 if (rc) 1651 return rc; 1652 1653 /* shift bits in-place */ 1654 if (cell->bit_offset || cell->nbits) 1655 nvmem_shift_read_buffer_in_place(cell, buf); 1656 1657 if (cell->read_post_process) { 1658 rc = cell->read_post_process(cell->priv, id, index, 1659 cell->offset, buf, cell->raw_len); 1660 if (rc) 1661 return rc; 1662 } 1663 1664 if (len) 1665 *len = cell->bytes; 1666 1667 return 0; 1668 } 1669 1670 /** 1671 * nvmem_cell_read() - Read a given nvmem cell 1672 * 1673 * @cell: nvmem cell to be read. 1674 * @len: pointer to length of cell which will be populated on successful read; 1675 * can be NULL. 1676 * 1677 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1678 * buffer should be freed by the consumer with a kfree(). 1679 */ 1680 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1681 { 1682 struct nvmem_cell_entry *entry = cell->entry; 1683 struct nvmem_device *nvmem = entry->nvmem; 1684 u8 *buf; 1685 int rc; 1686 1687 if (!nvmem) 1688 return ERR_PTR(-EINVAL); 1689 1690 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1691 if (!buf) 1692 return ERR_PTR(-ENOMEM); 1693 1694 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1695 if (rc) { 1696 kfree(buf); 1697 return ERR_PTR(rc); 1698 } 1699 1700 return buf; 1701 } 1702 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1703 1704 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1705 u8 *_buf, int len) 1706 { 1707 struct nvmem_device *nvmem = cell->nvmem; 1708 int i, rc, nbits, bit_offset = cell->bit_offset; 1709 u8 v, *p, *buf, *b, pbyte, pbits; 1710 1711 nbits = cell->nbits; 1712 buf = kzalloc(cell->bytes, GFP_KERNEL); 1713 if (!buf) 1714 return ERR_PTR(-ENOMEM); 1715 1716 memcpy(buf, _buf, len); 1717 p = b = buf; 1718 1719 if (bit_offset) { 1720 pbyte = *b; 1721 *b <<= bit_offset; 1722 1723 /* setup the first byte with lsb bits from nvmem */ 1724 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1725 if (rc) 1726 goto err; 1727 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1728 1729 /* setup rest of the byte if any */ 1730 for (i = 1; i < cell->bytes; i++) { 1731 /* Get last byte bits and shift them towards lsb */ 1732 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1733 pbyte = *b; 1734 p = b; 1735 *b <<= bit_offset; 1736 *b++ |= pbits; 1737 } 1738 } 1739 1740 /* if it's not end on byte boundary */ 1741 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1742 /* setup the last byte with msb bits from nvmem */ 1743 rc = nvmem_reg_read(nvmem, 1744 cell->offset + cell->bytes - 1, &v, 1); 1745 if (rc) 1746 goto err; 1747 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1748 1749 } 1750 1751 return buf; 1752 err: 1753 kfree(buf); 1754 return ERR_PTR(rc); 1755 } 1756 1757 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1758 { 1759 struct nvmem_device *nvmem = cell->nvmem; 1760 int rc; 1761 1762 if (!nvmem || nvmem->read_only || 1763 (cell->bit_offset == 0 && len != cell->bytes)) 1764 return -EINVAL; 1765 1766 /* 1767 * Any cells which have a read_post_process hook are read-only because 1768 * we cannot reverse the operation and it might affect other cells, 1769 * too. 1770 */ 1771 if (cell->read_post_process) 1772 return -EINVAL; 1773 1774 if (cell->bit_offset || cell->nbits) { 1775 if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes) 1776 return -EINVAL; 1777 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1778 if (IS_ERR(buf)) 1779 return PTR_ERR(buf); 1780 } 1781 1782 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1783 1784 /* free the tmp buffer */ 1785 if (cell->bit_offset || cell->nbits) 1786 kfree(buf); 1787 1788 if (rc) 1789 return rc; 1790 1791 return len; 1792 } 1793 1794 /** 1795 * nvmem_cell_write() - Write to a given nvmem cell 1796 * 1797 * @cell: nvmem cell to be written. 1798 * @buf: Buffer to be written. 1799 * @len: length of buffer to be written to nvmem cell. 1800 * 1801 * Return: length of bytes written or negative on failure. 1802 */ 1803 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1804 { 1805 return __nvmem_cell_entry_write(cell->entry, buf, len); 1806 } 1807 1808 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1809 1810 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1811 void *val, size_t count) 1812 { 1813 struct nvmem_cell *cell; 1814 void *buf; 1815 size_t len; 1816 1817 cell = nvmem_cell_get(dev, cell_id); 1818 if (IS_ERR(cell)) 1819 return PTR_ERR(cell); 1820 1821 buf = nvmem_cell_read(cell, &len); 1822 if (IS_ERR(buf)) { 1823 nvmem_cell_put(cell); 1824 return PTR_ERR(buf); 1825 } 1826 if (len != count) { 1827 kfree(buf); 1828 nvmem_cell_put(cell); 1829 return -EINVAL; 1830 } 1831 memcpy(val, buf, count); 1832 kfree(buf); 1833 nvmem_cell_put(cell); 1834 1835 return 0; 1836 } 1837 1838 /** 1839 * nvmem_cell_read_u8() - Read a cell value as a u8 1840 * 1841 * @dev: Device that requests the nvmem cell. 1842 * @cell_id: Name of nvmem cell to read. 1843 * @val: pointer to output value. 1844 * 1845 * Return: 0 on success or negative errno. 1846 */ 1847 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1848 { 1849 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1850 } 1851 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1852 1853 /** 1854 * nvmem_cell_read_u16() - Read a cell value as a u16 1855 * 1856 * @dev: Device that requests the nvmem cell. 1857 * @cell_id: Name of nvmem cell to read. 1858 * @val: pointer to output value. 1859 * 1860 * Return: 0 on success or negative errno. 1861 */ 1862 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1863 { 1864 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1865 } 1866 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1867 1868 /** 1869 * nvmem_cell_read_u32() - Read a cell value as a u32 1870 * 1871 * @dev: Device that requests the nvmem cell. 1872 * @cell_id: Name of nvmem cell to read. 1873 * @val: pointer to output value. 1874 * 1875 * Return: 0 on success or negative errno. 1876 */ 1877 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1878 { 1879 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1880 } 1881 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1882 1883 /** 1884 * nvmem_cell_read_u64() - Read a cell value as a u64 1885 * 1886 * @dev: Device that requests the nvmem cell. 1887 * @cell_id: Name of nvmem cell to read. 1888 * @val: pointer to output value. 1889 * 1890 * Return: 0 on success or negative errno. 1891 */ 1892 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1893 { 1894 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1895 } 1896 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1897 1898 static const void *nvmem_cell_read_variable_common(struct device *dev, 1899 const char *cell_id, 1900 size_t max_len, size_t *len) 1901 { 1902 struct nvmem_cell *cell; 1903 int nbits; 1904 void *buf; 1905 1906 cell = nvmem_cell_get(dev, cell_id); 1907 if (IS_ERR(cell)) 1908 return cell; 1909 1910 nbits = cell->entry->nbits; 1911 buf = nvmem_cell_read(cell, len); 1912 nvmem_cell_put(cell); 1913 if (IS_ERR(buf)) 1914 return buf; 1915 1916 /* 1917 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1918 * the length of the real data. Throw away the extra junk. 1919 */ 1920 if (nbits) 1921 *len = DIV_ROUND_UP(nbits, 8); 1922 1923 if (*len > max_len) { 1924 kfree(buf); 1925 return ERR_PTR(-ERANGE); 1926 } 1927 1928 return buf; 1929 } 1930 1931 /** 1932 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1933 * 1934 * @dev: Device that requests the nvmem cell. 1935 * @cell_id: Name of nvmem cell to read. 1936 * @val: pointer to output value. 1937 * 1938 * Return: 0 on success or negative errno. 1939 */ 1940 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1941 u32 *val) 1942 { 1943 size_t len; 1944 const u8 *buf; 1945 int i; 1946 1947 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1948 if (IS_ERR(buf)) 1949 return PTR_ERR(buf); 1950 1951 /* Copy w/ implicit endian conversion */ 1952 *val = 0; 1953 for (i = 0; i < len; i++) 1954 *val |= buf[i] << (8 * i); 1955 1956 kfree(buf); 1957 1958 return 0; 1959 } 1960 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1961 1962 /** 1963 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1964 * 1965 * @dev: Device that requests the nvmem cell. 1966 * @cell_id: Name of nvmem cell to read. 1967 * @val: pointer to output value. 1968 * 1969 * Return: 0 on success or negative errno. 1970 */ 1971 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1972 u64 *val) 1973 { 1974 size_t len; 1975 const u8 *buf; 1976 int i; 1977 1978 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1979 if (IS_ERR(buf)) 1980 return PTR_ERR(buf); 1981 1982 /* Copy w/ implicit endian conversion */ 1983 *val = 0; 1984 for (i = 0; i < len; i++) 1985 *val |= (uint64_t)buf[i] << (8 * i); 1986 1987 kfree(buf); 1988 1989 return 0; 1990 } 1991 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 1992 1993 /** 1994 * nvmem_device_cell_read() - Read a given nvmem device and cell 1995 * 1996 * @nvmem: nvmem device to read from. 1997 * @info: nvmem cell info to be read. 1998 * @buf: buffer pointer which will be populated on successful read. 1999 * 2000 * Return: length of successful bytes read on success and negative 2001 * error code on error. 2002 */ 2003 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 2004 struct nvmem_cell_info *info, void *buf) 2005 { 2006 struct nvmem_cell_entry cell; 2007 int rc; 2008 ssize_t len; 2009 2010 if (!nvmem) 2011 return -EINVAL; 2012 2013 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2014 if (rc) 2015 return rc; 2016 2017 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 2018 if (rc) 2019 return rc; 2020 2021 return len; 2022 } 2023 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 2024 2025 /** 2026 * nvmem_device_cell_write() - Write cell to a given nvmem device 2027 * 2028 * @nvmem: nvmem device to be written to. 2029 * @info: nvmem cell info to be written. 2030 * @buf: buffer to be written to cell. 2031 * 2032 * Return: length of bytes written or negative error code on failure. 2033 */ 2034 int nvmem_device_cell_write(struct nvmem_device *nvmem, 2035 struct nvmem_cell_info *info, void *buf) 2036 { 2037 struct nvmem_cell_entry cell; 2038 int rc; 2039 2040 if (!nvmem) 2041 return -EINVAL; 2042 2043 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2044 if (rc) 2045 return rc; 2046 2047 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 2048 } 2049 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 2050 2051 /** 2052 * nvmem_device_read() - Read from a given nvmem device 2053 * 2054 * @nvmem: nvmem device to read from. 2055 * @offset: offset in nvmem device. 2056 * @bytes: number of bytes to read. 2057 * @buf: buffer pointer which will be populated on successful read. 2058 * 2059 * Return: length of successful bytes read on success and negative 2060 * error code on error. 2061 */ 2062 int nvmem_device_read(struct nvmem_device *nvmem, 2063 unsigned int offset, 2064 size_t bytes, void *buf) 2065 { 2066 int rc; 2067 2068 if (!nvmem) 2069 return -EINVAL; 2070 2071 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2072 2073 if (rc) 2074 return rc; 2075 2076 return bytes; 2077 } 2078 EXPORT_SYMBOL_GPL(nvmem_device_read); 2079 2080 /** 2081 * nvmem_device_write() - Write cell to a given nvmem device 2082 * 2083 * @nvmem: nvmem device to be written to. 2084 * @offset: offset in nvmem device. 2085 * @bytes: number of bytes to write. 2086 * @buf: buffer to be written. 2087 * 2088 * Return: length of bytes written or negative error code on failure. 2089 */ 2090 int nvmem_device_write(struct nvmem_device *nvmem, 2091 unsigned int offset, 2092 size_t bytes, void *buf) 2093 { 2094 int rc; 2095 2096 if (!nvmem) 2097 return -EINVAL; 2098 2099 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2100 2101 if (rc) 2102 return rc; 2103 2104 2105 return bytes; 2106 } 2107 EXPORT_SYMBOL_GPL(nvmem_device_write); 2108 2109 /** 2110 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2111 * 2112 * @entries: array of cell lookup entries 2113 * @nentries: number of cell lookup entries in the array 2114 */ 2115 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2116 { 2117 int i; 2118 2119 mutex_lock(&nvmem_lookup_mutex); 2120 for (i = 0; i < nentries; i++) 2121 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2122 mutex_unlock(&nvmem_lookup_mutex); 2123 } 2124 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2125 2126 /** 2127 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2128 * entries 2129 * 2130 * @entries: array of cell lookup entries 2131 * @nentries: number of cell lookup entries in the array 2132 */ 2133 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2134 { 2135 int i; 2136 2137 mutex_lock(&nvmem_lookup_mutex); 2138 for (i = 0; i < nentries; i++) 2139 list_del(&entries[i].node); 2140 mutex_unlock(&nvmem_lookup_mutex); 2141 } 2142 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2143 2144 /** 2145 * nvmem_dev_name() - Get the name of a given nvmem device. 2146 * 2147 * @nvmem: nvmem device. 2148 * 2149 * Return: name of the nvmem device. 2150 */ 2151 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2152 { 2153 return dev_name(&nvmem->dev); 2154 } 2155 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2156 2157 /** 2158 * nvmem_dev_size() - Get the size of a given nvmem device. 2159 * 2160 * @nvmem: nvmem device. 2161 * 2162 * Return: size of the nvmem device. 2163 */ 2164 size_t nvmem_dev_size(struct nvmem_device *nvmem) 2165 { 2166 return nvmem->size; 2167 } 2168 EXPORT_SYMBOL_GPL(nvmem_dev_size); 2169 2170 static int __init nvmem_init(void) 2171 { 2172 int ret; 2173 2174 ret = bus_register(&nvmem_bus_type); 2175 if (ret) 2176 return ret; 2177 2178 ret = nvmem_layout_bus_register(); 2179 if (ret) 2180 bus_unregister(&nvmem_bus_type); 2181 2182 return ret; 2183 } 2184 2185 static void __exit nvmem_exit(void) 2186 { 2187 nvmem_layout_bus_unregister(); 2188 bus_unregister(&nvmem_bus_type); 2189 } 2190 2191 subsys_initcall(nvmem_init); 2192 module_exit(nvmem_exit); 2193 2194 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); 2195 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); 2196 MODULE_DESCRIPTION("nvmem Driver Core"); 2197