1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "internals.h" 23 24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 25 26 #define FLAG_COMPAT BIT(0) 27 struct nvmem_cell_entry { 28 const char *name; 29 int offset; 30 size_t raw_len; 31 int bytes; 32 int bit_offset; 33 int nbits; 34 nvmem_cell_post_process_t read_post_process; 35 void *priv; 36 struct device_node *np; 37 struct nvmem_device *nvmem; 38 struct list_head node; 39 }; 40 41 struct nvmem_cell { 42 struct nvmem_cell_entry *entry; 43 const char *id; 44 int index; 45 }; 46 47 static DEFINE_MUTEX(nvmem_mutex); 48 static DEFINE_IDA(nvmem_ida); 49 50 static DEFINE_MUTEX(nvmem_cell_mutex); 51 static LIST_HEAD(nvmem_cell_tables); 52 53 static DEFINE_MUTEX(nvmem_lookup_mutex); 54 static LIST_HEAD(nvmem_lookup_list); 55 56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 57 58 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 59 void *val, size_t bytes) 60 { 61 if (nvmem->reg_read) 62 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 63 64 return -EINVAL; 65 } 66 67 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 68 void *val, size_t bytes) 69 { 70 int ret; 71 72 if (nvmem->reg_write) { 73 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 74 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 75 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 76 return ret; 77 } 78 79 return -EINVAL; 80 } 81 82 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 83 unsigned int offset, void *val, 84 size_t bytes, int write) 85 { 86 87 unsigned int end = offset + bytes; 88 unsigned int kend, ksize; 89 const struct nvmem_keepout *keepout = nvmem->keepout; 90 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 91 int rc; 92 93 /* 94 * Skip all keepouts before the range being accessed. 95 * Keepouts are sorted. 96 */ 97 while ((keepout < keepoutend) && (keepout->end <= offset)) 98 keepout++; 99 100 while ((offset < end) && (keepout < keepoutend)) { 101 /* Access the valid portion before the keepout. */ 102 if (offset < keepout->start) { 103 kend = min(end, keepout->start); 104 ksize = kend - offset; 105 if (write) 106 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 107 else 108 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 109 110 if (rc) 111 return rc; 112 113 offset += ksize; 114 val += ksize; 115 } 116 117 /* 118 * Now we're aligned to the start of this keepout zone. Go 119 * through it. 120 */ 121 kend = min(end, keepout->end); 122 ksize = kend - offset; 123 if (!write) 124 memset(val, keepout->value, ksize); 125 126 val += ksize; 127 offset += ksize; 128 keepout++; 129 } 130 131 /* 132 * If we ran out of keepouts but there's still stuff to do, send it 133 * down directly 134 */ 135 if (offset < end) { 136 ksize = end - offset; 137 if (write) 138 return __nvmem_reg_write(nvmem, offset, val, ksize); 139 else 140 return __nvmem_reg_read(nvmem, offset, val, ksize); 141 } 142 143 return 0; 144 } 145 146 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 147 void *val, size_t bytes) 148 { 149 if (!nvmem->nkeepout) 150 return __nvmem_reg_read(nvmem, offset, val, bytes); 151 152 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 153 } 154 155 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 156 void *val, size_t bytes) 157 { 158 if (!nvmem->nkeepout) 159 return __nvmem_reg_write(nvmem, offset, val, bytes); 160 161 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 162 } 163 164 #ifdef CONFIG_NVMEM_SYSFS 165 static const char * const nvmem_type_str[] = { 166 [NVMEM_TYPE_UNKNOWN] = "Unknown", 167 [NVMEM_TYPE_EEPROM] = "EEPROM", 168 [NVMEM_TYPE_OTP] = "OTP", 169 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 170 [NVMEM_TYPE_FRAM] = "FRAM", 171 }; 172 173 #ifdef CONFIG_DEBUG_LOCK_ALLOC 174 static struct lock_class_key eeprom_lock_key; 175 #endif 176 177 static ssize_t type_show(struct device *dev, 178 struct device_attribute *attr, char *buf) 179 { 180 struct nvmem_device *nvmem = to_nvmem_device(dev); 181 182 return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]); 183 } 184 185 static DEVICE_ATTR_RO(type); 186 187 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 188 char *buf) 189 { 190 struct nvmem_device *nvmem = to_nvmem_device(dev); 191 192 return sysfs_emit(buf, "%d\n", nvmem->read_only); 193 } 194 195 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 196 const char *buf, size_t count) 197 { 198 struct nvmem_device *nvmem = to_nvmem_device(dev); 199 int ret = kstrtobool(buf, &nvmem->read_only); 200 201 if (ret < 0) 202 return ret; 203 204 return count; 205 } 206 207 static DEVICE_ATTR_RW(force_ro); 208 209 static struct attribute *nvmem_attrs[] = { 210 &dev_attr_force_ro.attr, 211 &dev_attr_type.attr, 212 NULL, 213 }; 214 215 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 216 struct bin_attribute *attr, char *buf, 217 loff_t pos, size_t count) 218 { 219 struct device *dev; 220 struct nvmem_device *nvmem; 221 int rc; 222 223 if (attr->private) 224 dev = attr->private; 225 else 226 dev = kobj_to_dev(kobj); 227 nvmem = to_nvmem_device(dev); 228 229 if (!IS_ALIGNED(pos, nvmem->stride)) 230 return -EINVAL; 231 232 if (count < nvmem->word_size) 233 return -EINVAL; 234 235 count = round_down(count, nvmem->word_size); 236 237 if (!nvmem->reg_read) 238 return -EPERM; 239 240 rc = nvmem_reg_read(nvmem, pos, buf, count); 241 242 if (rc) 243 return rc; 244 245 return count; 246 } 247 248 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 249 struct bin_attribute *attr, char *buf, 250 loff_t pos, size_t count) 251 { 252 struct device *dev; 253 struct nvmem_device *nvmem; 254 int rc; 255 256 if (attr->private) 257 dev = attr->private; 258 else 259 dev = kobj_to_dev(kobj); 260 nvmem = to_nvmem_device(dev); 261 262 if (!IS_ALIGNED(pos, nvmem->stride)) 263 return -EINVAL; 264 265 if (count < nvmem->word_size) 266 return -EINVAL; 267 268 count = round_down(count, nvmem->word_size); 269 270 if (!nvmem->reg_write || nvmem->read_only) 271 return -EPERM; 272 273 rc = nvmem_reg_write(nvmem, pos, buf, count); 274 275 if (rc) 276 return rc; 277 278 return count; 279 } 280 281 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 282 { 283 umode_t mode = 0400; 284 285 if (!nvmem->root_only) 286 mode |= 0044; 287 288 if (!nvmem->read_only) 289 mode |= 0200; 290 291 if (!nvmem->reg_write) 292 mode &= ~0200; 293 294 if (!nvmem->reg_read) 295 mode &= ~0444; 296 297 return mode; 298 } 299 300 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 301 const struct bin_attribute *attr, 302 int i) 303 { 304 struct device *dev = kobj_to_dev(kobj); 305 struct nvmem_device *nvmem = to_nvmem_device(dev); 306 307 return nvmem_bin_attr_get_umode(nvmem); 308 } 309 310 static size_t nvmem_bin_attr_size(struct kobject *kobj, 311 const struct bin_attribute *attr, 312 int i) 313 { 314 struct device *dev = kobj_to_dev(kobj); 315 struct nvmem_device *nvmem = to_nvmem_device(dev); 316 317 return nvmem->size; 318 } 319 320 static umode_t nvmem_attr_is_visible(struct kobject *kobj, 321 struct attribute *attr, int i) 322 { 323 struct device *dev = kobj_to_dev(kobj); 324 struct nvmem_device *nvmem = to_nvmem_device(dev); 325 326 /* 327 * If the device has no .reg_write operation, do not allow 328 * configuration as read-write. 329 * If the device is set as read-only by configuration, it 330 * can be forced into read-write mode using the 'force_ro' 331 * attribute. 332 */ 333 if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write) 334 return 0; /* Attribute not visible */ 335 336 return attr->mode; 337 } 338 339 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 340 const char *id, int index); 341 342 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, 343 struct bin_attribute *attr, char *buf, 344 loff_t pos, size_t count) 345 { 346 struct nvmem_cell_entry *entry; 347 struct nvmem_cell *cell = NULL; 348 size_t cell_sz, read_len; 349 void *content; 350 351 entry = attr->private; 352 cell = nvmem_create_cell(entry, entry->name, 0); 353 if (IS_ERR(cell)) 354 return PTR_ERR(cell); 355 356 if (!cell) 357 return -EINVAL; 358 359 content = nvmem_cell_read(cell, &cell_sz); 360 if (IS_ERR(content)) { 361 read_len = PTR_ERR(content); 362 goto destroy_cell; 363 } 364 365 read_len = min_t(unsigned int, cell_sz - pos, count); 366 memcpy(buf, content + pos, read_len); 367 kfree(content); 368 369 destroy_cell: 370 kfree_const(cell->id); 371 kfree(cell); 372 373 return read_len; 374 } 375 376 /* default read/write permissions */ 377 static struct bin_attribute bin_attr_rw_nvmem = { 378 .attr = { 379 .name = "nvmem", 380 .mode = 0644, 381 }, 382 .read = bin_attr_nvmem_read, 383 .write = bin_attr_nvmem_write, 384 }; 385 386 static struct bin_attribute *nvmem_bin_attributes[] = { 387 &bin_attr_rw_nvmem, 388 NULL, 389 }; 390 391 static const struct attribute_group nvmem_bin_group = { 392 .bin_attrs = nvmem_bin_attributes, 393 .attrs = nvmem_attrs, 394 .is_bin_visible = nvmem_bin_attr_is_visible, 395 .bin_size = nvmem_bin_attr_size, 396 .is_visible = nvmem_attr_is_visible, 397 }; 398 399 static const struct attribute_group *nvmem_dev_groups[] = { 400 &nvmem_bin_group, 401 NULL, 402 }; 403 404 static struct bin_attribute bin_attr_nvmem_eeprom_compat = { 405 .attr = { 406 .name = "eeprom", 407 }, 408 .read = bin_attr_nvmem_read, 409 .write = bin_attr_nvmem_write, 410 }; 411 412 /* 413 * nvmem_setup_compat() - Create an additional binary entry in 414 * drivers sys directory, to be backwards compatible with the older 415 * drivers/misc/eeprom drivers. 416 */ 417 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 418 const struct nvmem_config *config) 419 { 420 int rval; 421 422 if (!config->compat) 423 return 0; 424 425 if (!config->base_dev) 426 return -EINVAL; 427 428 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 429 if (config->type == NVMEM_TYPE_FRAM) 430 nvmem->eeprom.attr.name = "fram"; 431 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 432 nvmem->eeprom.size = nvmem->size; 433 #ifdef CONFIG_DEBUG_LOCK_ALLOC 434 nvmem->eeprom.attr.key = &eeprom_lock_key; 435 #endif 436 nvmem->eeprom.private = &nvmem->dev; 437 nvmem->base_dev = config->base_dev; 438 439 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 440 if (rval) { 441 dev_err(&nvmem->dev, 442 "Failed to create eeprom binary file %d\n", rval); 443 return rval; 444 } 445 446 nvmem->flags |= FLAG_COMPAT; 447 448 return 0; 449 } 450 451 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 452 const struct nvmem_config *config) 453 { 454 if (config->compat) 455 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 456 } 457 458 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) 459 { 460 struct attribute_group group = { 461 .name = "cells", 462 }; 463 struct nvmem_cell_entry *entry; 464 struct bin_attribute *attrs; 465 unsigned int ncells = 0, i = 0; 466 int ret = 0; 467 468 mutex_lock(&nvmem_mutex); 469 470 if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) 471 goto unlock_mutex; 472 473 /* Allocate an array of attributes with a sentinel */ 474 ncells = list_count_nodes(&nvmem->cells); 475 group.bin_attrs = devm_kcalloc(&nvmem->dev, ncells + 1, 476 sizeof(struct bin_attribute *), GFP_KERNEL); 477 if (!group.bin_attrs) { 478 ret = -ENOMEM; 479 goto unlock_mutex; 480 } 481 482 attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); 483 if (!attrs) { 484 ret = -ENOMEM; 485 goto unlock_mutex; 486 } 487 488 /* Initialize each attribute to take the name and size of the cell */ 489 list_for_each_entry(entry, &nvmem->cells, node) { 490 sysfs_bin_attr_init(&attrs[i]); 491 attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, 492 "%s@%x,%x", entry->name, 493 entry->offset, 494 entry->bit_offset); 495 attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); 496 attrs[i].size = entry->bytes; 497 attrs[i].read = &nvmem_cell_attr_read; 498 attrs[i].private = entry; 499 if (!attrs[i].attr.name) { 500 ret = -ENOMEM; 501 goto unlock_mutex; 502 } 503 504 group.bin_attrs[i] = &attrs[i]; 505 i++; 506 } 507 508 ret = device_add_group(&nvmem->dev, &group); 509 if (ret) 510 goto unlock_mutex; 511 512 nvmem->sysfs_cells_populated = true; 513 514 unlock_mutex: 515 mutex_unlock(&nvmem_mutex); 516 517 return ret; 518 } 519 520 #else /* CONFIG_NVMEM_SYSFS */ 521 522 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 523 const struct nvmem_config *config) 524 { 525 return -ENOSYS; 526 } 527 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 528 const struct nvmem_config *config) 529 { 530 } 531 532 #endif /* CONFIG_NVMEM_SYSFS */ 533 534 static void nvmem_release(struct device *dev) 535 { 536 struct nvmem_device *nvmem = to_nvmem_device(dev); 537 538 ida_free(&nvmem_ida, nvmem->id); 539 gpiod_put(nvmem->wp_gpio); 540 kfree(nvmem); 541 } 542 543 static const struct device_type nvmem_provider_type = { 544 .release = nvmem_release, 545 }; 546 547 static struct bus_type nvmem_bus_type = { 548 .name = "nvmem", 549 }; 550 551 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 552 { 553 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 554 mutex_lock(&nvmem_mutex); 555 list_del(&cell->node); 556 mutex_unlock(&nvmem_mutex); 557 of_node_put(cell->np); 558 kfree_const(cell->name); 559 kfree(cell); 560 } 561 562 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 563 { 564 struct nvmem_cell_entry *cell, *p; 565 566 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 567 nvmem_cell_entry_drop(cell); 568 } 569 570 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 571 { 572 mutex_lock(&nvmem_mutex); 573 list_add_tail(&cell->node, &cell->nvmem->cells); 574 mutex_unlock(&nvmem_mutex); 575 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 576 } 577 578 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 579 const struct nvmem_cell_info *info, 580 struct nvmem_cell_entry *cell) 581 { 582 cell->nvmem = nvmem; 583 cell->offset = info->offset; 584 cell->raw_len = info->raw_len ?: info->bytes; 585 cell->bytes = info->bytes; 586 cell->name = info->name; 587 cell->read_post_process = info->read_post_process; 588 cell->priv = info->priv; 589 590 cell->bit_offset = info->bit_offset; 591 cell->nbits = info->nbits; 592 cell->np = info->np; 593 594 if (cell->nbits) 595 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 596 BITS_PER_BYTE); 597 598 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 599 dev_err(&nvmem->dev, 600 "cell %s unaligned to nvmem stride %d\n", 601 cell->name ?: "<unknown>", nvmem->stride); 602 return -EINVAL; 603 } 604 605 return 0; 606 } 607 608 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 609 const struct nvmem_cell_info *info, 610 struct nvmem_cell_entry *cell) 611 { 612 int err; 613 614 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 615 if (err) 616 return err; 617 618 cell->name = kstrdup_const(info->name, GFP_KERNEL); 619 if (!cell->name) 620 return -ENOMEM; 621 622 return 0; 623 } 624 625 /** 626 * nvmem_add_one_cell() - Add one cell information to an nvmem device 627 * 628 * @nvmem: nvmem device to add cells to. 629 * @info: nvmem cell info to add to the device 630 * 631 * Return: 0 or negative error code on failure. 632 */ 633 int nvmem_add_one_cell(struct nvmem_device *nvmem, 634 const struct nvmem_cell_info *info) 635 { 636 struct nvmem_cell_entry *cell; 637 int rval; 638 639 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 640 if (!cell) 641 return -ENOMEM; 642 643 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 644 if (rval) { 645 kfree(cell); 646 return rval; 647 } 648 649 nvmem_cell_entry_add(cell); 650 651 return 0; 652 } 653 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 654 655 /** 656 * nvmem_add_cells() - Add cell information to an nvmem device 657 * 658 * @nvmem: nvmem device to add cells to. 659 * @info: nvmem cell info to add to the device 660 * @ncells: number of cells in info 661 * 662 * Return: 0 or negative error code on failure. 663 */ 664 static int nvmem_add_cells(struct nvmem_device *nvmem, 665 const struct nvmem_cell_info *info, 666 int ncells) 667 { 668 int i, rval; 669 670 for (i = 0; i < ncells; i++) { 671 rval = nvmem_add_one_cell(nvmem, &info[i]); 672 if (rval) 673 return rval; 674 } 675 676 return 0; 677 } 678 679 /** 680 * nvmem_register_notifier() - Register a notifier block for nvmem events. 681 * 682 * @nb: notifier block to be called on nvmem events. 683 * 684 * Return: 0 on success, negative error number on failure. 685 */ 686 int nvmem_register_notifier(struct notifier_block *nb) 687 { 688 return blocking_notifier_chain_register(&nvmem_notifier, nb); 689 } 690 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 691 692 /** 693 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 694 * 695 * @nb: notifier block to be unregistered. 696 * 697 * Return: 0 on success, negative error number on failure. 698 */ 699 int nvmem_unregister_notifier(struct notifier_block *nb) 700 { 701 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 702 } 703 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 704 705 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 706 { 707 const struct nvmem_cell_info *info; 708 struct nvmem_cell_table *table; 709 struct nvmem_cell_entry *cell; 710 int rval = 0, i; 711 712 mutex_lock(&nvmem_cell_mutex); 713 list_for_each_entry(table, &nvmem_cell_tables, node) { 714 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 715 for (i = 0; i < table->ncells; i++) { 716 info = &table->cells[i]; 717 718 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 719 if (!cell) { 720 rval = -ENOMEM; 721 goto out; 722 } 723 724 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 725 if (rval) { 726 kfree(cell); 727 goto out; 728 } 729 730 nvmem_cell_entry_add(cell); 731 } 732 } 733 } 734 735 out: 736 mutex_unlock(&nvmem_cell_mutex); 737 return rval; 738 } 739 740 static struct nvmem_cell_entry * 741 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 742 { 743 struct nvmem_cell_entry *iter, *cell = NULL; 744 745 mutex_lock(&nvmem_mutex); 746 list_for_each_entry(iter, &nvmem->cells, node) { 747 if (strcmp(cell_id, iter->name) == 0) { 748 cell = iter; 749 break; 750 } 751 } 752 mutex_unlock(&nvmem_mutex); 753 754 return cell; 755 } 756 757 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 758 { 759 unsigned int cur = 0; 760 const struct nvmem_keepout *keepout = nvmem->keepout; 761 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 762 763 while (keepout < keepoutend) { 764 /* Ensure keepouts are sorted and don't overlap. */ 765 if (keepout->start < cur) { 766 dev_err(&nvmem->dev, 767 "Keepout regions aren't sorted or overlap.\n"); 768 769 return -ERANGE; 770 } 771 772 if (keepout->end < keepout->start) { 773 dev_err(&nvmem->dev, 774 "Invalid keepout region.\n"); 775 776 return -EINVAL; 777 } 778 779 /* 780 * Validate keepouts (and holes between) don't violate 781 * word_size constraints. 782 */ 783 if ((keepout->end - keepout->start < nvmem->word_size) || 784 ((keepout->start != cur) && 785 (keepout->start - cur < nvmem->word_size))) { 786 787 dev_err(&nvmem->dev, 788 "Keepout regions violate word_size constraints.\n"); 789 790 return -ERANGE; 791 } 792 793 /* Validate keepouts don't violate stride (alignment). */ 794 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 795 !IS_ALIGNED(keepout->end, nvmem->stride)) { 796 797 dev_err(&nvmem->dev, 798 "Keepout regions violate stride.\n"); 799 800 return -EINVAL; 801 } 802 803 cur = keepout->end; 804 keepout++; 805 } 806 807 return 0; 808 } 809 810 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 811 { 812 struct device *dev = &nvmem->dev; 813 struct device_node *child; 814 const __be32 *addr; 815 int len, ret; 816 817 for_each_child_of_node(np, child) { 818 struct nvmem_cell_info info = {0}; 819 820 addr = of_get_property(child, "reg", &len); 821 if (!addr) 822 continue; 823 if (len < 2 * sizeof(u32)) { 824 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 825 of_node_put(child); 826 return -EINVAL; 827 } 828 829 info.offset = be32_to_cpup(addr++); 830 info.bytes = be32_to_cpup(addr); 831 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 832 833 addr = of_get_property(child, "bits", &len); 834 if (addr && len == (2 * sizeof(u32))) { 835 info.bit_offset = be32_to_cpup(addr++); 836 info.nbits = be32_to_cpup(addr); 837 if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) { 838 dev_err(dev, "nvmem: invalid bits on %pOF\n", child); 839 of_node_put(child); 840 return -EINVAL; 841 } 842 } 843 844 info.np = of_node_get(child); 845 846 if (nvmem->fixup_dt_cell_info) 847 nvmem->fixup_dt_cell_info(nvmem, &info); 848 849 ret = nvmem_add_one_cell(nvmem, &info); 850 kfree(info.name); 851 if (ret) { 852 of_node_put(child); 853 return ret; 854 } 855 } 856 857 return 0; 858 } 859 860 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 861 { 862 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 863 } 864 865 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 866 { 867 struct device_node *layout_np; 868 int err = 0; 869 870 layout_np = of_nvmem_layout_get_container(nvmem); 871 if (!layout_np) 872 return 0; 873 874 if (of_device_is_compatible(layout_np, "fixed-layout")) 875 err = nvmem_add_cells_from_dt(nvmem, layout_np); 876 877 of_node_put(layout_np); 878 879 return err; 880 } 881 882 int nvmem_layout_register(struct nvmem_layout *layout) 883 { 884 int ret; 885 886 if (!layout->add_cells) 887 return -EINVAL; 888 889 /* Populate the cells */ 890 ret = layout->add_cells(layout); 891 if (ret) 892 return ret; 893 894 #ifdef CONFIG_NVMEM_SYSFS 895 ret = nvmem_populate_sysfs_cells(layout->nvmem); 896 if (ret) { 897 nvmem_device_remove_all_cells(layout->nvmem); 898 return ret; 899 } 900 #endif 901 902 return 0; 903 } 904 EXPORT_SYMBOL_GPL(nvmem_layout_register); 905 906 void nvmem_layout_unregister(struct nvmem_layout *layout) 907 { 908 /* Keep the API even with an empty stub in case we need it later */ 909 } 910 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 911 912 /** 913 * nvmem_register() - Register a nvmem device for given nvmem_config. 914 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 915 * 916 * @config: nvmem device configuration with which nvmem device is created. 917 * 918 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 919 * on success. 920 */ 921 922 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 923 { 924 struct nvmem_device *nvmem; 925 int rval; 926 927 if (!config->dev) 928 return ERR_PTR(-EINVAL); 929 930 if (!config->reg_read && !config->reg_write) 931 return ERR_PTR(-EINVAL); 932 933 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 934 if (!nvmem) 935 return ERR_PTR(-ENOMEM); 936 937 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 938 if (rval < 0) { 939 kfree(nvmem); 940 return ERR_PTR(rval); 941 } 942 943 nvmem->id = rval; 944 945 nvmem->dev.type = &nvmem_provider_type; 946 nvmem->dev.bus = &nvmem_bus_type; 947 nvmem->dev.parent = config->dev; 948 949 device_initialize(&nvmem->dev); 950 951 if (!config->ignore_wp) 952 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 953 GPIOD_OUT_HIGH); 954 if (IS_ERR(nvmem->wp_gpio)) { 955 rval = PTR_ERR(nvmem->wp_gpio); 956 nvmem->wp_gpio = NULL; 957 goto err_put_device; 958 } 959 960 kref_init(&nvmem->refcnt); 961 INIT_LIST_HEAD(&nvmem->cells); 962 nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; 963 964 nvmem->owner = config->owner; 965 if (!nvmem->owner && config->dev->driver) 966 nvmem->owner = config->dev->driver->owner; 967 nvmem->stride = config->stride ?: 1; 968 nvmem->word_size = config->word_size ?: 1; 969 nvmem->size = config->size; 970 nvmem->root_only = config->root_only; 971 nvmem->priv = config->priv; 972 nvmem->type = config->type; 973 nvmem->reg_read = config->reg_read; 974 nvmem->reg_write = config->reg_write; 975 nvmem->keepout = config->keepout; 976 nvmem->nkeepout = config->nkeepout; 977 if (config->of_node) 978 nvmem->dev.of_node = config->of_node; 979 else 980 nvmem->dev.of_node = config->dev->of_node; 981 982 switch (config->id) { 983 case NVMEM_DEVID_NONE: 984 rval = dev_set_name(&nvmem->dev, "%s", config->name); 985 break; 986 case NVMEM_DEVID_AUTO: 987 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 988 break; 989 default: 990 rval = dev_set_name(&nvmem->dev, "%s%d", 991 config->name ? : "nvmem", 992 config->name ? config->id : nvmem->id); 993 break; 994 } 995 996 if (rval) 997 goto err_put_device; 998 999 nvmem->read_only = device_property_present(config->dev, "read-only") || 1000 config->read_only || !nvmem->reg_write; 1001 1002 #ifdef CONFIG_NVMEM_SYSFS 1003 nvmem->dev.groups = nvmem_dev_groups; 1004 #endif 1005 1006 if (nvmem->nkeepout) { 1007 rval = nvmem_validate_keepouts(nvmem); 1008 if (rval) 1009 goto err_put_device; 1010 } 1011 1012 if (config->compat) { 1013 rval = nvmem_sysfs_setup_compat(nvmem, config); 1014 if (rval) 1015 goto err_put_device; 1016 } 1017 1018 if (config->cells) { 1019 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 1020 if (rval) 1021 goto err_remove_cells; 1022 } 1023 1024 rval = nvmem_add_cells_from_table(nvmem); 1025 if (rval) 1026 goto err_remove_cells; 1027 1028 if (config->add_legacy_fixed_of_cells) { 1029 rval = nvmem_add_cells_from_legacy_of(nvmem); 1030 if (rval) 1031 goto err_remove_cells; 1032 } 1033 1034 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1035 if (rval) 1036 goto err_remove_cells; 1037 1038 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1039 1040 rval = device_add(&nvmem->dev); 1041 if (rval) 1042 goto err_remove_cells; 1043 1044 rval = nvmem_populate_layout(nvmem); 1045 if (rval) 1046 goto err_remove_dev; 1047 1048 #ifdef CONFIG_NVMEM_SYSFS 1049 rval = nvmem_populate_sysfs_cells(nvmem); 1050 if (rval) 1051 goto err_destroy_layout; 1052 #endif 1053 1054 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1055 1056 return nvmem; 1057 1058 #ifdef CONFIG_NVMEM_SYSFS 1059 err_destroy_layout: 1060 nvmem_destroy_layout(nvmem); 1061 #endif 1062 err_remove_dev: 1063 device_del(&nvmem->dev); 1064 err_remove_cells: 1065 nvmem_device_remove_all_cells(nvmem); 1066 if (config->compat) 1067 nvmem_sysfs_remove_compat(nvmem, config); 1068 err_put_device: 1069 put_device(&nvmem->dev); 1070 1071 return ERR_PTR(rval); 1072 } 1073 EXPORT_SYMBOL_GPL(nvmem_register); 1074 1075 static void nvmem_device_release(struct kref *kref) 1076 { 1077 struct nvmem_device *nvmem; 1078 1079 nvmem = container_of(kref, struct nvmem_device, refcnt); 1080 1081 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1082 1083 if (nvmem->flags & FLAG_COMPAT) 1084 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1085 1086 nvmem_device_remove_all_cells(nvmem); 1087 nvmem_destroy_layout(nvmem); 1088 device_unregister(&nvmem->dev); 1089 } 1090 1091 /** 1092 * nvmem_unregister() - Unregister previously registered nvmem device 1093 * 1094 * @nvmem: Pointer to previously registered nvmem device. 1095 */ 1096 void nvmem_unregister(struct nvmem_device *nvmem) 1097 { 1098 if (nvmem) 1099 kref_put(&nvmem->refcnt, nvmem_device_release); 1100 } 1101 EXPORT_SYMBOL_GPL(nvmem_unregister); 1102 1103 static void devm_nvmem_unregister(void *nvmem) 1104 { 1105 nvmem_unregister(nvmem); 1106 } 1107 1108 /** 1109 * devm_nvmem_register() - Register a managed nvmem device for given 1110 * nvmem_config. 1111 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1112 * 1113 * @dev: Device that uses the nvmem device. 1114 * @config: nvmem device configuration with which nvmem device is created. 1115 * 1116 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1117 * on success. 1118 */ 1119 struct nvmem_device *devm_nvmem_register(struct device *dev, 1120 const struct nvmem_config *config) 1121 { 1122 struct nvmem_device *nvmem; 1123 int ret; 1124 1125 nvmem = nvmem_register(config); 1126 if (IS_ERR(nvmem)) 1127 return nvmem; 1128 1129 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1130 if (ret) 1131 return ERR_PTR(ret); 1132 1133 return nvmem; 1134 } 1135 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1136 1137 static struct nvmem_device *__nvmem_device_get(void *data, 1138 int (*match)(struct device *dev, const void *data)) 1139 { 1140 struct nvmem_device *nvmem = NULL; 1141 struct device *dev; 1142 1143 mutex_lock(&nvmem_mutex); 1144 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1145 if (dev) 1146 nvmem = to_nvmem_device(dev); 1147 mutex_unlock(&nvmem_mutex); 1148 if (!nvmem) 1149 return ERR_PTR(-EPROBE_DEFER); 1150 1151 if (!try_module_get(nvmem->owner)) { 1152 dev_err(&nvmem->dev, 1153 "could not increase module refcount for cell %s\n", 1154 nvmem_dev_name(nvmem)); 1155 1156 put_device(&nvmem->dev); 1157 return ERR_PTR(-EINVAL); 1158 } 1159 1160 kref_get(&nvmem->refcnt); 1161 1162 return nvmem; 1163 } 1164 1165 static void __nvmem_device_put(struct nvmem_device *nvmem) 1166 { 1167 put_device(&nvmem->dev); 1168 module_put(nvmem->owner); 1169 kref_put(&nvmem->refcnt, nvmem_device_release); 1170 } 1171 1172 #if IS_ENABLED(CONFIG_OF) 1173 /** 1174 * of_nvmem_device_get() - Get nvmem device from a given id 1175 * 1176 * @np: Device tree node that uses the nvmem device. 1177 * @id: nvmem name from nvmem-names property. 1178 * 1179 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1180 * on success. 1181 */ 1182 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1183 { 1184 1185 struct device_node *nvmem_np; 1186 struct nvmem_device *nvmem; 1187 int index = 0; 1188 1189 if (id) 1190 index = of_property_match_string(np, "nvmem-names", id); 1191 1192 nvmem_np = of_parse_phandle(np, "nvmem", index); 1193 if (!nvmem_np) 1194 return ERR_PTR(-ENOENT); 1195 1196 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1197 of_node_put(nvmem_np); 1198 return nvmem; 1199 } 1200 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1201 #endif 1202 1203 /** 1204 * nvmem_device_get() - Get nvmem device from a given id 1205 * 1206 * @dev: Device that uses the nvmem device. 1207 * @dev_name: name of the requested nvmem device. 1208 * 1209 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1210 * on success. 1211 */ 1212 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1213 { 1214 if (dev->of_node) { /* try dt first */ 1215 struct nvmem_device *nvmem; 1216 1217 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1218 1219 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1220 return nvmem; 1221 1222 } 1223 1224 return __nvmem_device_get((void *)dev_name, device_match_name); 1225 } 1226 EXPORT_SYMBOL_GPL(nvmem_device_get); 1227 1228 /** 1229 * nvmem_device_find() - Find nvmem device with matching function 1230 * 1231 * @data: Data to pass to match function 1232 * @match: Callback function to check device 1233 * 1234 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1235 * on success. 1236 */ 1237 struct nvmem_device *nvmem_device_find(void *data, 1238 int (*match)(struct device *dev, const void *data)) 1239 { 1240 return __nvmem_device_get(data, match); 1241 } 1242 EXPORT_SYMBOL_GPL(nvmem_device_find); 1243 1244 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1245 { 1246 struct nvmem_device **nvmem = res; 1247 1248 if (WARN_ON(!nvmem || !*nvmem)) 1249 return 0; 1250 1251 return *nvmem == data; 1252 } 1253 1254 static void devm_nvmem_device_release(struct device *dev, void *res) 1255 { 1256 nvmem_device_put(*(struct nvmem_device **)res); 1257 } 1258 1259 /** 1260 * devm_nvmem_device_put() - put already got nvmem device 1261 * 1262 * @dev: Device that uses the nvmem device. 1263 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1264 * that needs to be released. 1265 */ 1266 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1267 { 1268 int ret; 1269 1270 ret = devres_release(dev, devm_nvmem_device_release, 1271 devm_nvmem_device_match, nvmem); 1272 1273 WARN_ON(ret); 1274 } 1275 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1276 1277 /** 1278 * nvmem_device_put() - put already got nvmem device 1279 * 1280 * @nvmem: pointer to nvmem device that needs to be released. 1281 */ 1282 void nvmem_device_put(struct nvmem_device *nvmem) 1283 { 1284 __nvmem_device_put(nvmem); 1285 } 1286 EXPORT_SYMBOL_GPL(nvmem_device_put); 1287 1288 /** 1289 * devm_nvmem_device_get() - Get nvmem device of device form a given id 1290 * 1291 * @dev: Device that requests the nvmem device. 1292 * @id: name id for the requested nvmem device. 1293 * 1294 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1295 * on success. The nvmem_device will be freed by the automatically once the 1296 * device is freed. 1297 */ 1298 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1299 { 1300 struct nvmem_device **ptr, *nvmem; 1301 1302 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1303 if (!ptr) 1304 return ERR_PTR(-ENOMEM); 1305 1306 nvmem = nvmem_device_get(dev, id); 1307 if (!IS_ERR(nvmem)) { 1308 *ptr = nvmem; 1309 devres_add(dev, ptr); 1310 } else { 1311 devres_free(ptr); 1312 } 1313 1314 return nvmem; 1315 } 1316 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1317 1318 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1319 const char *id, int index) 1320 { 1321 struct nvmem_cell *cell; 1322 const char *name = NULL; 1323 1324 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1325 if (!cell) 1326 return ERR_PTR(-ENOMEM); 1327 1328 if (id) { 1329 name = kstrdup_const(id, GFP_KERNEL); 1330 if (!name) { 1331 kfree(cell); 1332 return ERR_PTR(-ENOMEM); 1333 } 1334 } 1335 1336 cell->id = name; 1337 cell->entry = entry; 1338 cell->index = index; 1339 1340 return cell; 1341 } 1342 1343 static struct nvmem_cell * 1344 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1345 { 1346 struct nvmem_cell_entry *cell_entry; 1347 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1348 struct nvmem_cell_lookup *lookup; 1349 struct nvmem_device *nvmem; 1350 const char *dev_id; 1351 1352 if (!dev) 1353 return ERR_PTR(-EINVAL); 1354 1355 dev_id = dev_name(dev); 1356 1357 mutex_lock(&nvmem_lookup_mutex); 1358 1359 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1360 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1361 (strcmp(lookup->con_id, con_id) == 0)) { 1362 /* This is the right entry. */ 1363 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1364 device_match_name); 1365 if (IS_ERR(nvmem)) { 1366 /* Provider may not be registered yet. */ 1367 cell = ERR_CAST(nvmem); 1368 break; 1369 } 1370 1371 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1372 lookup->cell_name); 1373 if (!cell_entry) { 1374 __nvmem_device_put(nvmem); 1375 cell = ERR_PTR(-ENOENT); 1376 } else { 1377 cell = nvmem_create_cell(cell_entry, con_id, 0); 1378 if (IS_ERR(cell)) 1379 __nvmem_device_put(nvmem); 1380 } 1381 break; 1382 } 1383 } 1384 1385 mutex_unlock(&nvmem_lookup_mutex); 1386 return cell; 1387 } 1388 1389 static void nvmem_layout_module_put(struct nvmem_device *nvmem) 1390 { 1391 if (nvmem->layout && nvmem->layout->dev.driver) 1392 module_put(nvmem->layout->dev.driver->owner); 1393 } 1394 1395 #if IS_ENABLED(CONFIG_OF) 1396 static struct nvmem_cell_entry * 1397 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1398 { 1399 struct nvmem_cell_entry *iter, *cell = NULL; 1400 1401 mutex_lock(&nvmem_mutex); 1402 list_for_each_entry(iter, &nvmem->cells, node) { 1403 if (np == iter->np) { 1404 cell = iter; 1405 break; 1406 } 1407 } 1408 mutex_unlock(&nvmem_mutex); 1409 1410 return cell; 1411 } 1412 1413 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) 1414 { 1415 if (!nvmem->layout) 1416 return 0; 1417 1418 if (!nvmem->layout->dev.driver || 1419 !try_module_get(nvmem->layout->dev.driver->owner)) 1420 return -EPROBE_DEFER; 1421 1422 return 0; 1423 } 1424 1425 /** 1426 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1427 * 1428 * @np: Device tree node that uses the nvmem cell. 1429 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1430 * for the cell at index 0 (the lone cell with no accompanying 1431 * nvmem-cell-names property). 1432 * 1433 * Return: Will be an ERR_PTR() on error or a valid pointer 1434 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1435 * nvmem_cell_put(). 1436 */ 1437 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1438 { 1439 struct device_node *cell_np, *nvmem_np; 1440 struct nvmem_device *nvmem; 1441 struct nvmem_cell_entry *cell_entry; 1442 struct nvmem_cell *cell; 1443 struct of_phandle_args cell_spec; 1444 int index = 0; 1445 int cell_index = 0; 1446 int ret; 1447 1448 /* if cell name exists, find index to the name */ 1449 if (id) 1450 index = of_property_match_string(np, "nvmem-cell-names", id); 1451 1452 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1453 "#nvmem-cell-cells", 1454 index, &cell_spec); 1455 if (ret) 1456 return ERR_PTR(-ENOENT); 1457 1458 if (cell_spec.args_count > 1) 1459 return ERR_PTR(-EINVAL); 1460 1461 cell_np = cell_spec.np; 1462 if (cell_spec.args_count) 1463 cell_index = cell_spec.args[0]; 1464 1465 nvmem_np = of_get_parent(cell_np); 1466 if (!nvmem_np) { 1467 of_node_put(cell_np); 1468 return ERR_PTR(-EINVAL); 1469 } 1470 1471 /* nvmem layouts produce cells within the nvmem-layout container */ 1472 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1473 nvmem_np = of_get_next_parent(nvmem_np); 1474 if (!nvmem_np) { 1475 of_node_put(cell_np); 1476 return ERR_PTR(-EINVAL); 1477 } 1478 } 1479 1480 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1481 of_node_put(nvmem_np); 1482 if (IS_ERR(nvmem)) { 1483 of_node_put(cell_np); 1484 return ERR_CAST(nvmem); 1485 } 1486 1487 ret = nvmem_layout_module_get_optional(nvmem); 1488 if (ret) { 1489 of_node_put(cell_np); 1490 __nvmem_device_put(nvmem); 1491 return ERR_PTR(ret); 1492 } 1493 1494 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1495 of_node_put(cell_np); 1496 if (!cell_entry) { 1497 __nvmem_device_put(nvmem); 1498 nvmem_layout_module_put(nvmem); 1499 if (nvmem->layout) 1500 return ERR_PTR(-EPROBE_DEFER); 1501 else 1502 return ERR_PTR(-ENOENT); 1503 } 1504 1505 cell = nvmem_create_cell(cell_entry, id, cell_index); 1506 if (IS_ERR(cell)) { 1507 __nvmem_device_put(nvmem); 1508 nvmem_layout_module_put(nvmem); 1509 } 1510 1511 return cell; 1512 } 1513 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1514 #endif 1515 1516 /** 1517 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1518 * 1519 * @dev: Device that requests the nvmem cell. 1520 * @id: nvmem cell name to get (this corresponds with the name from the 1521 * nvmem-cell-names property for DT systems and with the con_id from 1522 * the lookup entry for non-DT systems). 1523 * 1524 * Return: Will be an ERR_PTR() on error or a valid pointer 1525 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1526 * nvmem_cell_put(). 1527 */ 1528 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1529 { 1530 struct nvmem_cell *cell; 1531 1532 if (dev->of_node) { /* try dt first */ 1533 cell = of_nvmem_cell_get(dev->of_node, id); 1534 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1535 return cell; 1536 } 1537 1538 /* NULL cell id only allowed for device tree; invalid otherwise */ 1539 if (!id) 1540 return ERR_PTR(-EINVAL); 1541 1542 return nvmem_cell_get_from_lookup(dev, id); 1543 } 1544 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1545 1546 static void devm_nvmem_cell_release(struct device *dev, void *res) 1547 { 1548 nvmem_cell_put(*(struct nvmem_cell **)res); 1549 } 1550 1551 /** 1552 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1553 * 1554 * @dev: Device that requests the nvmem cell. 1555 * @id: nvmem cell name id to get. 1556 * 1557 * Return: Will be an ERR_PTR() on error or a valid pointer 1558 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1559 * automatically once the device is freed. 1560 */ 1561 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1562 { 1563 struct nvmem_cell **ptr, *cell; 1564 1565 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1566 if (!ptr) 1567 return ERR_PTR(-ENOMEM); 1568 1569 cell = nvmem_cell_get(dev, id); 1570 if (!IS_ERR(cell)) { 1571 *ptr = cell; 1572 devres_add(dev, ptr); 1573 } else { 1574 devres_free(ptr); 1575 } 1576 1577 return cell; 1578 } 1579 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1580 1581 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1582 { 1583 struct nvmem_cell **c = res; 1584 1585 if (WARN_ON(!c || !*c)) 1586 return 0; 1587 1588 return *c == data; 1589 } 1590 1591 /** 1592 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1593 * from devm_nvmem_cell_get. 1594 * 1595 * @dev: Device that requests the nvmem cell. 1596 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1597 */ 1598 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1599 { 1600 int ret; 1601 1602 ret = devres_release(dev, devm_nvmem_cell_release, 1603 devm_nvmem_cell_match, cell); 1604 1605 WARN_ON(ret); 1606 } 1607 EXPORT_SYMBOL(devm_nvmem_cell_put); 1608 1609 /** 1610 * nvmem_cell_put() - Release previously allocated nvmem cell. 1611 * 1612 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1613 */ 1614 void nvmem_cell_put(struct nvmem_cell *cell) 1615 { 1616 struct nvmem_device *nvmem = cell->entry->nvmem; 1617 1618 if (cell->id) 1619 kfree_const(cell->id); 1620 1621 kfree(cell); 1622 __nvmem_device_put(nvmem); 1623 nvmem_layout_module_put(nvmem); 1624 } 1625 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1626 1627 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1628 { 1629 u8 *p, *b; 1630 int i, extra, bit_offset = cell->bit_offset; 1631 1632 p = b = buf; 1633 if (bit_offset) { 1634 /* First shift */ 1635 *b++ >>= bit_offset; 1636 1637 /* setup rest of the bytes if any */ 1638 for (i = 1; i < cell->bytes; i++) { 1639 /* Get bits from next byte and shift them towards msb */ 1640 *p |= *b << (BITS_PER_BYTE - bit_offset); 1641 1642 p = b; 1643 *b++ >>= bit_offset; 1644 } 1645 } else { 1646 /* point to the msb */ 1647 p += cell->bytes - 1; 1648 } 1649 1650 /* result fits in less bytes */ 1651 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1652 while (--extra >= 0) 1653 *p-- = 0; 1654 1655 /* clear msb bits if any leftover in the last byte */ 1656 if (cell->nbits % BITS_PER_BYTE) 1657 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1658 } 1659 1660 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1661 struct nvmem_cell_entry *cell, 1662 void *buf, size_t *len, const char *id, int index) 1663 { 1664 int rc; 1665 1666 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1667 1668 if (rc) 1669 return rc; 1670 1671 /* shift bits in-place */ 1672 if (cell->bit_offset || cell->nbits) 1673 nvmem_shift_read_buffer_in_place(cell, buf); 1674 1675 if (cell->read_post_process) { 1676 rc = cell->read_post_process(cell->priv, id, index, 1677 cell->offset, buf, cell->raw_len); 1678 if (rc) 1679 return rc; 1680 } 1681 1682 if (len) 1683 *len = cell->bytes; 1684 1685 return 0; 1686 } 1687 1688 /** 1689 * nvmem_cell_read() - Read a given nvmem cell 1690 * 1691 * @cell: nvmem cell to be read. 1692 * @len: pointer to length of cell which will be populated on successful read; 1693 * can be NULL. 1694 * 1695 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1696 * buffer should be freed by the consumer with a kfree(). 1697 */ 1698 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1699 { 1700 struct nvmem_cell_entry *entry = cell->entry; 1701 struct nvmem_device *nvmem = entry->nvmem; 1702 u8 *buf; 1703 int rc; 1704 1705 if (!nvmem) 1706 return ERR_PTR(-EINVAL); 1707 1708 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1709 if (!buf) 1710 return ERR_PTR(-ENOMEM); 1711 1712 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1713 if (rc) { 1714 kfree(buf); 1715 return ERR_PTR(rc); 1716 } 1717 1718 return buf; 1719 } 1720 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1721 1722 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1723 u8 *_buf, int len) 1724 { 1725 struct nvmem_device *nvmem = cell->nvmem; 1726 int i, rc, nbits, bit_offset = cell->bit_offset; 1727 u8 v, *p, *buf, *b, pbyte, pbits; 1728 1729 nbits = cell->nbits; 1730 buf = kzalloc(cell->bytes, GFP_KERNEL); 1731 if (!buf) 1732 return ERR_PTR(-ENOMEM); 1733 1734 memcpy(buf, _buf, len); 1735 p = b = buf; 1736 1737 if (bit_offset) { 1738 pbyte = *b; 1739 *b <<= bit_offset; 1740 1741 /* setup the first byte with lsb bits from nvmem */ 1742 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1743 if (rc) 1744 goto err; 1745 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1746 1747 /* setup rest of the byte if any */ 1748 for (i = 1; i < cell->bytes; i++) { 1749 /* Get last byte bits and shift them towards lsb */ 1750 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1751 pbyte = *b; 1752 p = b; 1753 *b <<= bit_offset; 1754 *b++ |= pbits; 1755 } 1756 } 1757 1758 /* if it's not end on byte boundary */ 1759 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1760 /* setup the last byte with msb bits from nvmem */ 1761 rc = nvmem_reg_read(nvmem, 1762 cell->offset + cell->bytes - 1, &v, 1); 1763 if (rc) 1764 goto err; 1765 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1766 1767 } 1768 1769 return buf; 1770 err: 1771 kfree(buf); 1772 return ERR_PTR(rc); 1773 } 1774 1775 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1776 { 1777 struct nvmem_device *nvmem = cell->nvmem; 1778 int rc; 1779 1780 if (!nvmem || nvmem->read_only || 1781 (cell->bit_offset == 0 && len != cell->bytes)) 1782 return -EINVAL; 1783 1784 /* 1785 * Any cells which have a read_post_process hook are read-only because 1786 * we cannot reverse the operation and it might affect other cells, 1787 * too. 1788 */ 1789 if (cell->read_post_process) 1790 return -EINVAL; 1791 1792 if (cell->bit_offset || cell->nbits) { 1793 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1794 if (IS_ERR(buf)) 1795 return PTR_ERR(buf); 1796 } 1797 1798 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1799 1800 /* free the tmp buffer */ 1801 if (cell->bit_offset || cell->nbits) 1802 kfree(buf); 1803 1804 if (rc) 1805 return rc; 1806 1807 return len; 1808 } 1809 1810 /** 1811 * nvmem_cell_write() - Write to a given nvmem cell 1812 * 1813 * @cell: nvmem cell to be written. 1814 * @buf: Buffer to be written. 1815 * @len: length of buffer to be written to nvmem cell. 1816 * 1817 * Return: length of bytes written or negative on failure. 1818 */ 1819 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1820 { 1821 return __nvmem_cell_entry_write(cell->entry, buf, len); 1822 } 1823 1824 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1825 1826 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1827 void *val, size_t count) 1828 { 1829 struct nvmem_cell *cell; 1830 void *buf; 1831 size_t len; 1832 1833 cell = nvmem_cell_get(dev, cell_id); 1834 if (IS_ERR(cell)) 1835 return PTR_ERR(cell); 1836 1837 buf = nvmem_cell_read(cell, &len); 1838 if (IS_ERR(buf)) { 1839 nvmem_cell_put(cell); 1840 return PTR_ERR(buf); 1841 } 1842 if (len != count) { 1843 kfree(buf); 1844 nvmem_cell_put(cell); 1845 return -EINVAL; 1846 } 1847 memcpy(val, buf, count); 1848 kfree(buf); 1849 nvmem_cell_put(cell); 1850 1851 return 0; 1852 } 1853 1854 /** 1855 * nvmem_cell_read_u8() - Read a cell value as a u8 1856 * 1857 * @dev: Device that requests the nvmem cell. 1858 * @cell_id: Name of nvmem cell to read. 1859 * @val: pointer to output value. 1860 * 1861 * Return: 0 on success or negative errno. 1862 */ 1863 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1864 { 1865 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1866 } 1867 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1868 1869 /** 1870 * nvmem_cell_read_u16() - Read a cell value as a u16 1871 * 1872 * @dev: Device that requests the nvmem cell. 1873 * @cell_id: Name of nvmem cell to read. 1874 * @val: pointer to output value. 1875 * 1876 * Return: 0 on success or negative errno. 1877 */ 1878 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1879 { 1880 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1881 } 1882 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1883 1884 /** 1885 * nvmem_cell_read_u32() - Read a cell value as a u32 1886 * 1887 * @dev: Device that requests the nvmem cell. 1888 * @cell_id: Name of nvmem cell to read. 1889 * @val: pointer to output value. 1890 * 1891 * Return: 0 on success or negative errno. 1892 */ 1893 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1894 { 1895 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1896 } 1897 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1898 1899 /** 1900 * nvmem_cell_read_u64() - Read a cell value as a u64 1901 * 1902 * @dev: Device that requests the nvmem cell. 1903 * @cell_id: Name of nvmem cell to read. 1904 * @val: pointer to output value. 1905 * 1906 * Return: 0 on success or negative errno. 1907 */ 1908 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1909 { 1910 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1911 } 1912 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1913 1914 static const void *nvmem_cell_read_variable_common(struct device *dev, 1915 const char *cell_id, 1916 size_t max_len, size_t *len) 1917 { 1918 struct nvmem_cell *cell; 1919 int nbits; 1920 void *buf; 1921 1922 cell = nvmem_cell_get(dev, cell_id); 1923 if (IS_ERR(cell)) 1924 return cell; 1925 1926 nbits = cell->entry->nbits; 1927 buf = nvmem_cell_read(cell, len); 1928 nvmem_cell_put(cell); 1929 if (IS_ERR(buf)) 1930 return buf; 1931 1932 /* 1933 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1934 * the length of the real data. Throw away the extra junk. 1935 */ 1936 if (nbits) 1937 *len = DIV_ROUND_UP(nbits, 8); 1938 1939 if (*len > max_len) { 1940 kfree(buf); 1941 return ERR_PTR(-ERANGE); 1942 } 1943 1944 return buf; 1945 } 1946 1947 /** 1948 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1949 * 1950 * @dev: Device that requests the nvmem cell. 1951 * @cell_id: Name of nvmem cell to read. 1952 * @val: pointer to output value. 1953 * 1954 * Return: 0 on success or negative errno. 1955 */ 1956 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1957 u32 *val) 1958 { 1959 size_t len; 1960 const u8 *buf; 1961 int i; 1962 1963 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1964 if (IS_ERR(buf)) 1965 return PTR_ERR(buf); 1966 1967 /* Copy w/ implicit endian conversion */ 1968 *val = 0; 1969 for (i = 0; i < len; i++) 1970 *val |= buf[i] << (8 * i); 1971 1972 kfree(buf); 1973 1974 return 0; 1975 } 1976 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1977 1978 /** 1979 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1980 * 1981 * @dev: Device that requests the nvmem cell. 1982 * @cell_id: Name of nvmem cell to read. 1983 * @val: pointer to output value. 1984 * 1985 * Return: 0 on success or negative errno. 1986 */ 1987 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1988 u64 *val) 1989 { 1990 size_t len; 1991 const u8 *buf; 1992 int i; 1993 1994 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1995 if (IS_ERR(buf)) 1996 return PTR_ERR(buf); 1997 1998 /* Copy w/ implicit endian conversion */ 1999 *val = 0; 2000 for (i = 0; i < len; i++) 2001 *val |= (uint64_t)buf[i] << (8 * i); 2002 2003 kfree(buf); 2004 2005 return 0; 2006 } 2007 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 2008 2009 /** 2010 * nvmem_device_cell_read() - Read a given nvmem device and cell 2011 * 2012 * @nvmem: nvmem device to read from. 2013 * @info: nvmem cell info to be read. 2014 * @buf: buffer pointer which will be populated on successful read. 2015 * 2016 * Return: length of successful bytes read on success and negative 2017 * error code on error. 2018 */ 2019 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 2020 struct nvmem_cell_info *info, void *buf) 2021 { 2022 struct nvmem_cell_entry cell; 2023 int rc; 2024 ssize_t len; 2025 2026 if (!nvmem) 2027 return -EINVAL; 2028 2029 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2030 if (rc) 2031 return rc; 2032 2033 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 2034 if (rc) 2035 return rc; 2036 2037 return len; 2038 } 2039 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 2040 2041 /** 2042 * nvmem_device_cell_write() - Write cell to a given nvmem device 2043 * 2044 * @nvmem: nvmem device to be written to. 2045 * @info: nvmem cell info to be written. 2046 * @buf: buffer to be written to cell. 2047 * 2048 * Return: length of bytes written or negative error code on failure. 2049 */ 2050 int nvmem_device_cell_write(struct nvmem_device *nvmem, 2051 struct nvmem_cell_info *info, void *buf) 2052 { 2053 struct nvmem_cell_entry cell; 2054 int rc; 2055 2056 if (!nvmem) 2057 return -EINVAL; 2058 2059 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2060 if (rc) 2061 return rc; 2062 2063 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 2064 } 2065 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 2066 2067 /** 2068 * nvmem_device_read() - Read from a given nvmem device 2069 * 2070 * @nvmem: nvmem device to read from. 2071 * @offset: offset in nvmem device. 2072 * @bytes: number of bytes to read. 2073 * @buf: buffer pointer which will be populated on successful read. 2074 * 2075 * Return: length of successful bytes read on success and negative 2076 * error code on error. 2077 */ 2078 int nvmem_device_read(struct nvmem_device *nvmem, 2079 unsigned int offset, 2080 size_t bytes, void *buf) 2081 { 2082 int rc; 2083 2084 if (!nvmem) 2085 return -EINVAL; 2086 2087 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2088 2089 if (rc) 2090 return rc; 2091 2092 return bytes; 2093 } 2094 EXPORT_SYMBOL_GPL(nvmem_device_read); 2095 2096 /** 2097 * nvmem_device_write() - Write cell to a given nvmem device 2098 * 2099 * @nvmem: nvmem device to be written to. 2100 * @offset: offset in nvmem device. 2101 * @bytes: number of bytes to write. 2102 * @buf: buffer to be written. 2103 * 2104 * Return: length of bytes written or negative error code on failure. 2105 */ 2106 int nvmem_device_write(struct nvmem_device *nvmem, 2107 unsigned int offset, 2108 size_t bytes, void *buf) 2109 { 2110 int rc; 2111 2112 if (!nvmem) 2113 return -EINVAL; 2114 2115 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2116 2117 if (rc) 2118 return rc; 2119 2120 2121 return bytes; 2122 } 2123 EXPORT_SYMBOL_GPL(nvmem_device_write); 2124 2125 /** 2126 * nvmem_add_cell_table() - register a table of cell info entries 2127 * 2128 * @table: table of cell info entries 2129 */ 2130 void nvmem_add_cell_table(struct nvmem_cell_table *table) 2131 { 2132 mutex_lock(&nvmem_cell_mutex); 2133 list_add_tail(&table->node, &nvmem_cell_tables); 2134 mutex_unlock(&nvmem_cell_mutex); 2135 } 2136 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2137 2138 /** 2139 * nvmem_del_cell_table() - remove a previously registered cell info table 2140 * 2141 * @table: table of cell info entries 2142 */ 2143 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2144 { 2145 mutex_lock(&nvmem_cell_mutex); 2146 list_del(&table->node); 2147 mutex_unlock(&nvmem_cell_mutex); 2148 } 2149 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2150 2151 /** 2152 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2153 * 2154 * @entries: array of cell lookup entries 2155 * @nentries: number of cell lookup entries in the array 2156 */ 2157 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2158 { 2159 int i; 2160 2161 mutex_lock(&nvmem_lookup_mutex); 2162 for (i = 0; i < nentries; i++) 2163 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2164 mutex_unlock(&nvmem_lookup_mutex); 2165 } 2166 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2167 2168 /** 2169 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2170 * entries 2171 * 2172 * @entries: array of cell lookup entries 2173 * @nentries: number of cell lookup entries in the array 2174 */ 2175 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2176 { 2177 int i; 2178 2179 mutex_lock(&nvmem_lookup_mutex); 2180 for (i = 0; i < nentries; i++) 2181 list_del(&entries[i].node); 2182 mutex_unlock(&nvmem_lookup_mutex); 2183 } 2184 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2185 2186 /** 2187 * nvmem_dev_name() - Get the name of a given nvmem device. 2188 * 2189 * @nvmem: nvmem device. 2190 * 2191 * Return: name of the nvmem device. 2192 */ 2193 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2194 { 2195 return dev_name(&nvmem->dev); 2196 } 2197 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2198 2199 /** 2200 * nvmem_dev_size() - Get the size of a given nvmem device. 2201 * 2202 * @nvmem: nvmem device. 2203 * 2204 * Return: size of the nvmem device. 2205 */ 2206 size_t nvmem_dev_size(struct nvmem_device *nvmem) 2207 { 2208 return nvmem->size; 2209 } 2210 EXPORT_SYMBOL_GPL(nvmem_dev_size); 2211 2212 static int __init nvmem_init(void) 2213 { 2214 int ret; 2215 2216 ret = bus_register(&nvmem_bus_type); 2217 if (ret) 2218 return ret; 2219 2220 ret = nvmem_layout_bus_register(); 2221 if (ret) 2222 bus_unregister(&nvmem_bus_type); 2223 2224 return ret; 2225 } 2226 2227 static void __exit nvmem_exit(void) 2228 { 2229 nvmem_layout_bus_unregister(); 2230 bus_unregister(&nvmem_bus_type); 2231 } 2232 2233 subsys_initcall(nvmem_init); 2234 module_exit(nvmem_exit); 2235 2236 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2237 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2238 MODULE_DESCRIPTION("nvmem Driver Core"); 2239