1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "internals.h" 23 24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 25 26 #define FLAG_COMPAT BIT(0) 27 struct nvmem_cell_entry { 28 const char *name; 29 int offset; 30 size_t raw_len; 31 int bytes; 32 int bit_offset; 33 int nbits; 34 nvmem_cell_post_process_t read_post_process; 35 void *priv; 36 struct device_node *np; 37 struct nvmem_device *nvmem; 38 struct list_head node; 39 }; 40 41 struct nvmem_cell { 42 struct nvmem_cell_entry *entry; 43 const char *id; 44 int index; 45 }; 46 47 static DEFINE_MUTEX(nvmem_mutex); 48 static DEFINE_IDA(nvmem_ida); 49 50 static DEFINE_MUTEX(nvmem_cell_mutex); 51 static LIST_HEAD(nvmem_cell_tables); 52 53 static DEFINE_MUTEX(nvmem_lookup_mutex); 54 static LIST_HEAD(nvmem_lookup_list); 55 56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 57 58 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 59 void *val, size_t bytes) 60 { 61 if (nvmem->reg_read) 62 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 63 64 return -EINVAL; 65 } 66 67 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 68 void *val, size_t bytes) 69 { 70 int ret; 71 72 if (nvmem->reg_write) { 73 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 74 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 75 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 76 return ret; 77 } 78 79 return -EINVAL; 80 } 81 82 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 83 unsigned int offset, void *val, 84 size_t bytes, int write) 85 { 86 87 unsigned int end = offset + bytes; 88 unsigned int kend, ksize; 89 const struct nvmem_keepout *keepout = nvmem->keepout; 90 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 91 int rc; 92 93 /* 94 * Skip all keepouts before the range being accessed. 95 * Keepouts are sorted. 96 */ 97 while ((keepout < keepoutend) && (keepout->end <= offset)) 98 keepout++; 99 100 while ((offset < end) && (keepout < keepoutend)) { 101 /* Access the valid portion before the keepout. */ 102 if (offset < keepout->start) { 103 kend = min(end, keepout->start); 104 ksize = kend - offset; 105 if (write) 106 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 107 else 108 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 109 110 if (rc) 111 return rc; 112 113 offset += ksize; 114 val += ksize; 115 } 116 117 /* 118 * Now we're aligned to the start of this keepout zone. Go 119 * through it. 120 */ 121 kend = min(end, keepout->end); 122 ksize = kend - offset; 123 if (!write) 124 memset(val, keepout->value, ksize); 125 126 val += ksize; 127 offset += ksize; 128 keepout++; 129 } 130 131 /* 132 * If we ran out of keepouts but there's still stuff to do, send it 133 * down directly 134 */ 135 if (offset < end) { 136 ksize = end - offset; 137 if (write) 138 return __nvmem_reg_write(nvmem, offset, val, ksize); 139 else 140 return __nvmem_reg_read(nvmem, offset, val, ksize); 141 } 142 143 return 0; 144 } 145 146 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 147 void *val, size_t bytes) 148 { 149 if (!nvmem->nkeepout) 150 return __nvmem_reg_read(nvmem, offset, val, bytes); 151 152 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 153 } 154 155 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 156 void *val, size_t bytes) 157 { 158 if (!nvmem->nkeepout) 159 return __nvmem_reg_write(nvmem, offset, val, bytes); 160 161 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 162 } 163 164 #ifdef CONFIG_NVMEM_SYSFS 165 static const char * const nvmem_type_str[] = { 166 [NVMEM_TYPE_UNKNOWN] = "Unknown", 167 [NVMEM_TYPE_EEPROM] = "EEPROM", 168 [NVMEM_TYPE_OTP] = "OTP", 169 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 170 [NVMEM_TYPE_FRAM] = "FRAM", 171 }; 172 173 #ifdef CONFIG_DEBUG_LOCK_ALLOC 174 static struct lock_class_key eeprom_lock_key; 175 #endif 176 177 static ssize_t type_show(struct device *dev, 178 struct device_attribute *attr, char *buf) 179 { 180 struct nvmem_device *nvmem = to_nvmem_device(dev); 181 182 return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]); 183 } 184 185 static DEVICE_ATTR_RO(type); 186 187 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 188 char *buf) 189 { 190 struct nvmem_device *nvmem = to_nvmem_device(dev); 191 192 return sysfs_emit(buf, "%d\n", nvmem->read_only); 193 } 194 195 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 196 const char *buf, size_t count) 197 { 198 struct nvmem_device *nvmem = to_nvmem_device(dev); 199 int ret = kstrtobool(buf, &nvmem->read_only); 200 201 if (ret < 0) 202 return ret; 203 204 return count; 205 } 206 207 static DEVICE_ATTR_RW(force_ro); 208 209 static struct attribute *nvmem_attrs[] = { 210 &dev_attr_force_ro.attr, 211 &dev_attr_type.attr, 212 NULL, 213 }; 214 215 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 216 const struct bin_attribute *attr, char *buf, 217 loff_t pos, size_t count) 218 { 219 struct device *dev; 220 struct nvmem_device *nvmem; 221 int rc; 222 223 if (attr->private) 224 dev = attr->private; 225 else 226 dev = kobj_to_dev(kobj); 227 nvmem = to_nvmem_device(dev); 228 229 if (!IS_ALIGNED(pos, nvmem->stride)) 230 return -EINVAL; 231 232 if (count < nvmem->word_size) 233 return -EINVAL; 234 235 count = round_down(count, nvmem->word_size); 236 237 if (!nvmem->reg_read) 238 return -EPERM; 239 240 rc = nvmem_reg_read(nvmem, pos, buf, count); 241 242 if (rc) 243 return rc; 244 245 return count; 246 } 247 248 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 249 const struct bin_attribute *attr, char *buf, 250 loff_t pos, size_t count) 251 { 252 struct device *dev; 253 struct nvmem_device *nvmem; 254 int rc; 255 256 if (attr->private) 257 dev = attr->private; 258 else 259 dev = kobj_to_dev(kobj); 260 nvmem = to_nvmem_device(dev); 261 262 if (!IS_ALIGNED(pos, nvmem->stride)) 263 return -EINVAL; 264 265 if (count < nvmem->word_size) 266 return -EINVAL; 267 268 count = round_down(count, nvmem->word_size); 269 270 if (!nvmem->reg_write || nvmem->read_only) 271 return -EPERM; 272 273 rc = nvmem_reg_write(nvmem, pos, buf, count); 274 275 if (rc) 276 return rc; 277 278 return count; 279 } 280 281 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 282 { 283 umode_t mode = 0400; 284 285 if (!nvmem->root_only) 286 mode |= 0044; 287 288 if (!nvmem->read_only) 289 mode |= 0200; 290 291 if (!nvmem->reg_write) 292 mode &= ~0200; 293 294 if (!nvmem->reg_read) 295 mode &= ~0444; 296 297 return mode; 298 } 299 300 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 301 const struct bin_attribute *attr, 302 int i) 303 { 304 struct device *dev = kobj_to_dev(kobj); 305 struct nvmem_device *nvmem = to_nvmem_device(dev); 306 307 return nvmem_bin_attr_get_umode(nvmem); 308 } 309 310 static size_t nvmem_bin_attr_size(struct kobject *kobj, 311 const struct bin_attribute *attr, 312 int i) 313 { 314 struct device *dev = kobj_to_dev(kobj); 315 struct nvmem_device *nvmem = to_nvmem_device(dev); 316 317 return nvmem->size; 318 } 319 320 static umode_t nvmem_attr_is_visible(struct kobject *kobj, 321 struct attribute *attr, int i) 322 { 323 struct device *dev = kobj_to_dev(kobj); 324 struct nvmem_device *nvmem = to_nvmem_device(dev); 325 326 /* 327 * If the device has no .reg_write operation, do not allow 328 * configuration as read-write. 329 * If the device is set as read-only by configuration, it 330 * can be forced into read-write mode using the 'force_ro' 331 * attribute. 332 */ 333 if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write) 334 return 0; /* Attribute not visible */ 335 336 return attr->mode; 337 } 338 339 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 340 const char *id, int index); 341 342 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, 343 const struct bin_attribute *attr, char *buf, 344 loff_t pos, size_t count) 345 { 346 struct nvmem_cell_entry *entry; 347 struct nvmem_cell *cell = NULL; 348 size_t cell_sz, read_len; 349 void *content; 350 351 entry = attr->private; 352 cell = nvmem_create_cell(entry, entry->name, 0); 353 if (IS_ERR(cell)) 354 return PTR_ERR(cell); 355 356 if (!cell) 357 return -EINVAL; 358 359 content = nvmem_cell_read(cell, &cell_sz); 360 if (IS_ERR(content)) { 361 read_len = PTR_ERR(content); 362 goto destroy_cell; 363 } 364 365 read_len = min_t(unsigned int, cell_sz - pos, count); 366 memcpy(buf, content + pos, read_len); 367 kfree(content); 368 369 destroy_cell: 370 kfree_const(cell->id); 371 kfree(cell); 372 373 return read_len; 374 } 375 376 /* default read/write permissions */ 377 static const struct bin_attribute bin_attr_rw_nvmem = { 378 .attr = { 379 .name = "nvmem", 380 .mode = 0644, 381 }, 382 .read_new = bin_attr_nvmem_read, 383 .write_new = bin_attr_nvmem_write, 384 }; 385 386 static const struct bin_attribute *const nvmem_bin_attributes[] = { 387 &bin_attr_rw_nvmem, 388 NULL, 389 }; 390 391 static const struct attribute_group nvmem_bin_group = { 392 .bin_attrs_new = nvmem_bin_attributes, 393 .attrs = nvmem_attrs, 394 .is_bin_visible = nvmem_bin_attr_is_visible, 395 .bin_size = nvmem_bin_attr_size, 396 .is_visible = nvmem_attr_is_visible, 397 }; 398 399 static const struct attribute_group *nvmem_dev_groups[] = { 400 &nvmem_bin_group, 401 NULL, 402 }; 403 404 static const struct bin_attribute bin_attr_nvmem_eeprom_compat = { 405 .attr = { 406 .name = "eeprom", 407 }, 408 .read_new = bin_attr_nvmem_read, 409 .write_new = bin_attr_nvmem_write, 410 }; 411 412 /* 413 * nvmem_setup_compat() - Create an additional binary entry in 414 * drivers sys directory, to be backwards compatible with the older 415 * drivers/misc/eeprom drivers. 416 */ 417 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 418 const struct nvmem_config *config) 419 { 420 int rval; 421 422 if (!config->compat) 423 return 0; 424 425 if (!config->base_dev) 426 return -EINVAL; 427 428 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 429 if (config->type == NVMEM_TYPE_FRAM) 430 nvmem->eeprom.attr.name = "fram"; 431 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 432 nvmem->eeprom.size = nvmem->size; 433 #ifdef CONFIG_DEBUG_LOCK_ALLOC 434 nvmem->eeprom.attr.key = &eeprom_lock_key; 435 #endif 436 nvmem->eeprom.private = &nvmem->dev; 437 nvmem->base_dev = config->base_dev; 438 439 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 440 if (rval) { 441 dev_err(&nvmem->dev, 442 "Failed to create eeprom binary file %d\n", rval); 443 return rval; 444 } 445 446 nvmem->flags |= FLAG_COMPAT; 447 448 return 0; 449 } 450 451 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 452 const struct nvmem_config *config) 453 { 454 if (config->compat) 455 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 456 } 457 458 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) 459 { 460 struct attribute_group group = { 461 .name = "cells", 462 }; 463 struct nvmem_cell_entry *entry; 464 const struct bin_attribute **pattrs; 465 struct bin_attribute *attrs; 466 unsigned int ncells = 0, i = 0; 467 int ret = 0; 468 469 mutex_lock(&nvmem_mutex); 470 471 if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) 472 goto unlock_mutex; 473 474 /* Allocate an array of attributes with a sentinel */ 475 ncells = list_count_nodes(&nvmem->cells); 476 pattrs = devm_kcalloc(&nvmem->dev, ncells + 1, 477 sizeof(struct bin_attribute *), GFP_KERNEL); 478 if (!pattrs) { 479 ret = -ENOMEM; 480 goto unlock_mutex; 481 } 482 483 attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); 484 if (!attrs) { 485 ret = -ENOMEM; 486 goto unlock_mutex; 487 } 488 489 /* Initialize each attribute to take the name and size of the cell */ 490 list_for_each_entry(entry, &nvmem->cells, node) { 491 sysfs_bin_attr_init(&attrs[i]); 492 attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, 493 "%s@%x,%x", entry->name, 494 entry->offset, 495 entry->bit_offset); 496 attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); 497 attrs[i].size = entry->bytes; 498 attrs[i].read_new = &nvmem_cell_attr_read; 499 attrs[i].private = entry; 500 if (!attrs[i].attr.name) { 501 ret = -ENOMEM; 502 goto unlock_mutex; 503 } 504 505 pattrs[i] = &attrs[i]; 506 i++; 507 } 508 509 group.bin_attrs_new = pattrs; 510 511 ret = device_add_group(&nvmem->dev, &group); 512 if (ret) 513 goto unlock_mutex; 514 515 nvmem->sysfs_cells_populated = true; 516 517 unlock_mutex: 518 mutex_unlock(&nvmem_mutex); 519 520 return ret; 521 } 522 523 #else /* CONFIG_NVMEM_SYSFS */ 524 525 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 526 const struct nvmem_config *config) 527 { 528 return -ENOSYS; 529 } 530 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 531 const struct nvmem_config *config) 532 { 533 } 534 535 #endif /* CONFIG_NVMEM_SYSFS */ 536 537 static void nvmem_release(struct device *dev) 538 { 539 struct nvmem_device *nvmem = to_nvmem_device(dev); 540 541 ida_free(&nvmem_ida, nvmem->id); 542 gpiod_put(nvmem->wp_gpio); 543 kfree(nvmem); 544 } 545 546 static const struct device_type nvmem_provider_type = { 547 .release = nvmem_release, 548 }; 549 550 static struct bus_type nvmem_bus_type = { 551 .name = "nvmem", 552 }; 553 554 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 555 { 556 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 557 mutex_lock(&nvmem_mutex); 558 list_del(&cell->node); 559 mutex_unlock(&nvmem_mutex); 560 of_node_put(cell->np); 561 kfree_const(cell->name); 562 kfree(cell); 563 } 564 565 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 566 { 567 struct nvmem_cell_entry *cell, *p; 568 569 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 570 nvmem_cell_entry_drop(cell); 571 } 572 573 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 574 { 575 mutex_lock(&nvmem_mutex); 576 list_add_tail(&cell->node, &cell->nvmem->cells); 577 mutex_unlock(&nvmem_mutex); 578 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 579 } 580 581 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 582 const struct nvmem_cell_info *info, 583 struct nvmem_cell_entry *cell) 584 { 585 cell->nvmem = nvmem; 586 cell->offset = info->offset; 587 cell->raw_len = info->raw_len ?: info->bytes; 588 cell->bytes = info->bytes; 589 cell->name = info->name; 590 cell->read_post_process = info->read_post_process; 591 cell->priv = info->priv; 592 593 cell->bit_offset = info->bit_offset; 594 cell->nbits = info->nbits; 595 cell->np = info->np; 596 597 if (cell->nbits) 598 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 599 BITS_PER_BYTE); 600 601 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 602 dev_err(&nvmem->dev, 603 "cell %s unaligned to nvmem stride %d\n", 604 cell->name ?: "<unknown>", nvmem->stride); 605 return -EINVAL; 606 } 607 608 return 0; 609 } 610 611 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 612 const struct nvmem_cell_info *info, 613 struct nvmem_cell_entry *cell) 614 { 615 int err; 616 617 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 618 if (err) 619 return err; 620 621 cell->name = kstrdup_const(info->name, GFP_KERNEL); 622 if (!cell->name) 623 return -ENOMEM; 624 625 return 0; 626 } 627 628 /** 629 * nvmem_add_one_cell() - Add one cell information to an nvmem device 630 * 631 * @nvmem: nvmem device to add cells to. 632 * @info: nvmem cell info to add to the device 633 * 634 * Return: 0 or negative error code on failure. 635 */ 636 int nvmem_add_one_cell(struct nvmem_device *nvmem, 637 const struct nvmem_cell_info *info) 638 { 639 struct nvmem_cell_entry *cell; 640 int rval; 641 642 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 643 if (!cell) 644 return -ENOMEM; 645 646 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 647 if (rval) { 648 kfree(cell); 649 return rval; 650 } 651 652 nvmem_cell_entry_add(cell); 653 654 return 0; 655 } 656 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 657 658 /** 659 * nvmem_add_cells() - Add cell information to an nvmem device 660 * 661 * @nvmem: nvmem device to add cells to. 662 * @info: nvmem cell info to add to the device 663 * @ncells: number of cells in info 664 * 665 * Return: 0 or negative error code on failure. 666 */ 667 static int nvmem_add_cells(struct nvmem_device *nvmem, 668 const struct nvmem_cell_info *info, 669 int ncells) 670 { 671 int i, rval; 672 673 for (i = 0; i < ncells; i++) { 674 rval = nvmem_add_one_cell(nvmem, &info[i]); 675 if (rval) 676 return rval; 677 } 678 679 return 0; 680 } 681 682 /** 683 * nvmem_register_notifier() - Register a notifier block for nvmem events. 684 * 685 * @nb: notifier block to be called on nvmem events. 686 * 687 * Return: 0 on success, negative error number on failure. 688 */ 689 int nvmem_register_notifier(struct notifier_block *nb) 690 { 691 return blocking_notifier_chain_register(&nvmem_notifier, nb); 692 } 693 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 694 695 /** 696 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 697 * 698 * @nb: notifier block to be unregistered. 699 * 700 * Return: 0 on success, negative error number on failure. 701 */ 702 int nvmem_unregister_notifier(struct notifier_block *nb) 703 { 704 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 705 } 706 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 707 708 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 709 { 710 const struct nvmem_cell_info *info; 711 struct nvmem_cell_table *table; 712 struct nvmem_cell_entry *cell; 713 int rval = 0, i; 714 715 mutex_lock(&nvmem_cell_mutex); 716 list_for_each_entry(table, &nvmem_cell_tables, node) { 717 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 718 for (i = 0; i < table->ncells; i++) { 719 info = &table->cells[i]; 720 721 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 722 if (!cell) { 723 rval = -ENOMEM; 724 goto out; 725 } 726 727 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 728 if (rval) { 729 kfree(cell); 730 goto out; 731 } 732 733 nvmem_cell_entry_add(cell); 734 } 735 } 736 } 737 738 out: 739 mutex_unlock(&nvmem_cell_mutex); 740 return rval; 741 } 742 743 static struct nvmem_cell_entry * 744 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 745 { 746 struct nvmem_cell_entry *iter, *cell = NULL; 747 748 mutex_lock(&nvmem_mutex); 749 list_for_each_entry(iter, &nvmem->cells, node) { 750 if (strcmp(cell_id, iter->name) == 0) { 751 cell = iter; 752 break; 753 } 754 } 755 mutex_unlock(&nvmem_mutex); 756 757 return cell; 758 } 759 760 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 761 { 762 unsigned int cur = 0; 763 const struct nvmem_keepout *keepout = nvmem->keepout; 764 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 765 766 while (keepout < keepoutend) { 767 /* Ensure keepouts are sorted and don't overlap. */ 768 if (keepout->start < cur) { 769 dev_err(&nvmem->dev, 770 "Keepout regions aren't sorted or overlap.\n"); 771 772 return -ERANGE; 773 } 774 775 if (keepout->end < keepout->start) { 776 dev_err(&nvmem->dev, 777 "Invalid keepout region.\n"); 778 779 return -EINVAL; 780 } 781 782 /* 783 * Validate keepouts (and holes between) don't violate 784 * word_size constraints. 785 */ 786 if ((keepout->end - keepout->start < nvmem->word_size) || 787 ((keepout->start != cur) && 788 (keepout->start - cur < nvmem->word_size))) { 789 790 dev_err(&nvmem->dev, 791 "Keepout regions violate word_size constraints.\n"); 792 793 return -ERANGE; 794 } 795 796 /* Validate keepouts don't violate stride (alignment). */ 797 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 798 !IS_ALIGNED(keepout->end, nvmem->stride)) { 799 800 dev_err(&nvmem->dev, 801 "Keepout regions violate stride.\n"); 802 803 return -EINVAL; 804 } 805 806 cur = keepout->end; 807 keepout++; 808 } 809 810 return 0; 811 } 812 813 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 814 { 815 struct device *dev = &nvmem->dev; 816 struct device_node *child; 817 const __be32 *addr; 818 int len, ret; 819 820 for_each_child_of_node(np, child) { 821 struct nvmem_cell_info info = {0}; 822 823 addr = of_get_property(child, "reg", &len); 824 if (!addr) 825 continue; 826 if (len < 2 * sizeof(u32)) { 827 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 828 of_node_put(child); 829 return -EINVAL; 830 } 831 832 info.offset = be32_to_cpup(addr++); 833 info.bytes = be32_to_cpup(addr); 834 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 835 836 addr = of_get_property(child, "bits", &len); 837 if (addr && len == (2 * sizeof(u32))) { 838 info.bit_offset = be32_to_cpup(addr++); 839 info.nbits = be32_to_cpup(addr); 840 if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) { 841 dev_err(dev, "nvmem: invalid bits on %pOF\n", child); 842 of_node_put(child); 843 return -EINVAL; 844 } 845 } 846 847 info.np = of_node_get(child); 848 849 if (nvmem->fixup_dt_cell_info) 850 nvmem->fixup_dt_cell_info(nvmem, &info); 851 852 ret = nvmem_add_one_cell(nvmem, &info); 853 kfree(info.name); 854 if (ret) { 855 of_node_put(child); 856 return ret; 857 } 858 } 859 860 return 0; 861 } 862 863 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 864 { 865 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 866 } 867 868 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 869 { 870 struct device_node *layout_np; 871 int err = 0; 872 873 layout_np = of_nvmem_layout_get_container(nvmem); 874 if (!layout_np) 875 return 0; 876 877 if (of_device_is_compatible(layout_np, "fixed-layout")) 878 err = nvmem_add_cells_from_dt(nvmem, layout_np); 879 880 of_node_put(layout_np); 881 882 return err; 883 } 884 885 int nvmem_layout_register(struct nvmem_layout *layout) 886 { 887 int ret; 888 889 if (!layout->add_cells) 890 return -EINVAL; 891 892 /* Populate the cells */ 893 ret = layout->add_cells(layout); 894 if (ret) 895 return ret; 896 897 #ifdef CONFIG_NVMEM_SYSFS 898 ret = nvmem_populate_sysfs_cells(layout->nvmem); 899 if (ret) { 900 nvmem_device_remove_all_cells(layout->nvmem); 901 return ret; 902 } 903 #endif 904 905 return 0; 906 } 907 EXPORT_SYMBOL_GPL(nvmem_layout_register); 908 909 void nvmem_layout_unregister(struct nvmem_layout *layout) 910 { 911 /* Keep the API even with an empty stub in case we need it later */ 912 } 913 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 914 915 /** 916 * nvmem_register() - Register a nvmem device for given nvmem_config. 917 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 918 * 919 * @config: nvmem device configuration with which nvmem device is created. 920 * 921 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 922 * on success. 923 */ 924 925 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 926 { 927 struct nvmem_device *nvmem; 928 int rval; 929 930 if (!config->dev) 931 return ERR_PTR(-EINVAL); 932 933 if (!config->reg_read && !config->reg_write) 934 return ERR_PTR(-EINVAL); 935 936 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 937 if (!nvmem) 938 return ERR_PTR(-ENOMEM); 939 940 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 941 if (rval < 0) { 942 kfree(nvmem); 943 return ERR_PTR(rval); 944 } 945 946 nvmem->id = rval; 947 948 nvmem->dev.type = &nvmem_provider_type; 949 nvmem->dev.bus = &nvmem_bus_type; 950 nvmem->dev.parent = config->dev; 951 952 device_initialize(&nvmem->dev); 953 954 if (!config->ignore_wp) 955 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 956 GPIOD_OUT_HIGH); 957 if (IS_ERR(nvmem->wp_gpio)) { 958 rval = PTR_ERR(nvmem->wp_gpio); 959 nvmem->wp_gpio = NULL; 960 goto err_put_device; 961 } 962 963 kref_init(&nvmem->refcnt); 964 INIT_LIST_HEAD(&nvmem->cells); 965 nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; 966 967 nvmem->owner = config->owner; 968 if (!nvmem->owner && config->dev->driver) 969 nvmem->owner = config->dev->driver->owner; 970 nvmem->stride = config->stride ?: 1; 971 nvmem->word_size = config->word_size ?: 1; 972 nvmem->size = config->size; 973 nvmem->root_only = config->root_only; 974 nvmem->priv = config->priv; 975 nvmem->type = config->type; 976 nvmem->reg_read = config->reg_read; 977 nvmem->reg_write = config->reg_write; 978 nvmem->keepout = config->keepout; 979 nvmem->nkeepout = config->nkeepout; 980 if (config->of_node) 981 nvmem->dev.of_node = config->of_node; 982 else 983 nvmem->dev.of_node = config->dev->of_node; 984 985 switch (config->id) { 986 case NVMEM_DEVID_NONE: 987 rval = dev_set_name(&nvmem->dev, "%s", config->name); 988 break; 989 case NVMEM_DEVID_AUTO: 990 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 991 break; 992 default: 993 rval = dev_set_name(&nvmem->dev, "%s%d", 994 config->name ? : "nvmem", 995 config->name ? config->id : nvmem->id); 996 break; 997 } 998 999 if (rval) 1000 goto err_put_device; 1001 1002 nvmem->read_only = device_property_present(config->dev, "read-only") || 1003 config->read_only || !nvmem->reg_write; 1004 1005 #ifdef CONFIG_NVMEM_SYSFS 1006 nvmem->dev.groups = nvmem_dev_groups; 1007 #endif 1008 1009 if (nvmem->nkeepout) { 1010 rval = nvmem_validate_keepouts(nvmem); 1011 if (rval) 1012 goto err_put_device; 1013 } 1014 1015 if (config->compat) { 1016 rval = nvmem_sysfs_setup_compat(nvmem, config); 1017 if (rval) 1018 goto err_put_device; 1019 } 1020 1021 if (config->cells) { 1022 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 1023 if (rval) 1024 goto err_remove_cells; 1025 } 1026 1027 rval = nvmem_add_cells_from_table(nvmem); 1028 if (rval) 1029 goto err_remove_cells; 1030 1031 if (config->add_legacy_fixed_of_cells) { 1032 rval = nvmem_add_cells_from_legacy_of(nvmem); 1033 if (rval) 1034 goto err_remove_cells; 1035 } 1036 1037 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1038 if (rval) 1039 goto err_remove_cells; 1040 1041 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1042 1043 rval = device_add(&nvmem->dev); 1044 if (rval) 1045 goto err_remove_cells; 1046 1047 rval = nvmem_populate_layout(nvmem); 1048 if (rval) 1049 goto err_remove_dev; 1050 1051 #ifdef CONFIG_NVMEM_SYSFS 1052 rval = nvmem_populate_sysfs_cells(nvmem); 1053 if (rval) 1054 goto err_destroy_layout; 1055 #endif 1056 1057 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1058 1059 return nvmem; 1060 1061 #ifdef CONFIG_NVMEM_SYSFS 1062 err_destroy_layout: 1063 nvmem_destroy_layout(nvmem); 1064 #endif 1065 err_remove_dev: 1066 device_del(&nvmem->dev); 1067 err_remove_cells: 1068 nvmem_device_remove_all_cells(nvmem); 1069 if (config->compat) 1070 nvmem_sysfs_remove_compat(nvmem, config); 1071 err_put_device: 1072 put_device(&nvmem->dev); 1073 1074 return ERR_PTR(rval); 1075 } 1076 EXPORT_SYMBOL_GPL(nvmem_register); 1077 1078 static void nvmem_device_release(struct kref *kref) 1079 { 1080 struct nvmem_device *nvmem; 1081 1082 nvmem = container_of(kref, struct nvmem_device, refcnt); 1083 1084 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1085 1086 if (nvmem->flags & FLAG_COMPAT) 1087 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1088 1089 nvmem_device_remove_all_cells(nvmem); 1090 nvmem_destroy_layout(nvmem); 1091 device_unregister(&nvmem->dev); 1092 } 1093 1094 /** 1095 * nvmem_unregister() - Unregister previously registered nvmem device 1096 * 1097 * @nvmem: Pointer to previously registered nvmem device. 1098 */ 1099 void nvmem_unregister(struct nvmem_device *nvmem) 1100 { 1101 if (nvmem) 1102 kref_put(&nvmem->refcnt, nvmem_device_release); 1103 } 1104 EXPORT_SYMBOL_GPL(nvmem_unregister); 1105 1106 static void devm_nvmem_unregister(void *nvmem) 1107 { 1108 nvmem_unregister(nvmem); 1109 } 1110 1111 /** 1112 * devm_nvmem_register() - Register a managed nvmem device for given 1113 * nvmem_config. 1114 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1115 * 1116 * @dev: Device that uses the nvmem device. 1117 * @config: nvmem device configuration with which nvmem device is created. 1118 * 1119 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1120 * on success. 1121 */ 1122 struct nvmem_device *devm_nvmem_register(struct device *dev, 1123 const struct nvmem_config *config) 1124 { 1125 struct nvmem_device *nvmem; 1126 int ret; 1127 1128 nvmem = nvmem_register(config); 1129 if (IS_ERR(nvmem)) 1130 return nvmem; 1131 1132 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1133 if (ret) 1134 return ERR_PTR(ret); 1135 1136 return nvmem; 1137 } 1138 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1139 1140 static struct nvmem_device *__nvmem_device_get(void *data, 1141 int (*match)(struct device *dev, const void *data)) 1142 { 1143 struct nvmem_device *nvmem = NULL; 1144 struct device *dev; 1145 1146 mutex_lock(&nvmem_mutex); 1147 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1148 if (dev) 1149 nvmem = to_nvmem_device(dev); 1150 mutex_unlock(&nvmem_mutex); 1151 if (!nvmem) 1152 return ERR_PTR(-EPROBE_DEFER); 1153 1154 if (!try_module_get(nvmem->owner)) { 1155 dev_err(&nvmem->dev, 1156 "could not increase module refcount for cell %s\n", 1157 nvmem_dev_name(nvmem)); 1158 1159 put_device(&nvmem->dev); 1160 return ERR_PTR(-EINVAL); 1161 } 1162 1163 kref_get(&nvmem->refcnt); 1164 1165 return nvmem; 1166 } 1167 1168 static void __nvmem_device_put(struct nvmem_device *nvmem) 1169 { 1170 put_device(&nvmem->dev); 1171 module_put(nvmem->owner); 1172 kref_put(&nvmem->refcnt, nvmem_device_release); 1173 } 1174 1175 #if IS_ENABLED(CONFIG_OF) 1176 /** 1177 * of_nvmem_device_get() - Get nvmem device from a given id 1178 * 1179 * @np: Device tree node that uses the nvmem device. 1180 * @id: nvmem name from nvmem-names property. 1181 * 1182 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1183 * on success. 1184 */ 1185 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1186 { 1187 1188 struct device_node *nvmem_np; 1189 struct nvmem_device *nvmem; 1190 int index = 0; 1191 1192 if (id) 1193 index = of_property_match_string(np, "nvmem-names", id); 1194 1195 nvmem_np = of_parse_phandle(np, "nvmem", index); 1196 if (!nvmem_np) 1197 return ERR_PTR(-ENOENT); 1198 1199 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1200 of_node_put(nvmem_np); 1201 return nvmem; 1202 } 1203 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1204 #endif 1205 1206 /** 1207 * nvmem_device_get() - Get nvmem device from a given id 1208 * 1209 * @dev: Device that uses the nvmem device. 1210 * @dev_name: name of the requested nvmem device. 1211 * 1212 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1213 * on success. 1214 */ 1215 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1216 { 1217 if (dev->of_node) { /* try dt first */ 1218 struct nvmem_device *nvmem; 1219 1220 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1221 1222 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1223 return nvmem; 1224 1225 } 1226 1227 return __nvmem_device_get((void *)dev_name, device_match_name); 1228 } 1229 EXPORT_SYMBOL_GPL(nvmem_device_get); 1230 1231 /** 1232 * nvmem_device_find() - Find nvmem device with matching function 1233 * 1234 * @data: Data to pass to match function 1235 * @match: Callback function to check device 1236 * 1237 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1238 * on success. 1239 */ 1240 struct nvmem_device *nvmem_device_find(void *data, 1241 int (*match)(struct device *dev, const void *data)) 1242 { 1243 return __nvmem_device_get(data, match); 1244 } 1245 EXPORT_SYMBOL_GPL(nvmem_device_find); 1246 1247 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1248 { 1249 struct nvmem_device **nvmem = res; 1250 1251 if (WARN_ON(!nvmem || !*nvmem)) 1252 return 0; 1253 1254 return *nvmem == data; 1255 } 1256 1257 static void devm_nvmem_device_release(struct device *dev, void *res) 1258 { 1259 nvmem_device_put(*(struct nvmem_device **)res); 1260 } 1261 1262 /** 1263 * devm_nvmem_device_put() - put already got nvmem device 1264 * 1265 * @dev: Device that uses the nvmem device. 1266 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1267 * that needs to be released. 1268 */ 1269 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1270 { 1271 int ret; 1272 1273 ret = devres_release(dev, devm_nvmem_device_release, 1274 devm_nvmem_device_match, nvmem); 1275 1276 WARN_ON(ret); 1277 } 1278 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1279 1280 /** 1281 * nvmem_device_put() - put already got nvmem device 1282 * 1283 * @nvmem: pointer to nvmem device that needs to be released. 1284 */ 1285 void nvmem_device_put(struct nvmem_device *nvmem) 1286 { 1287 __nvmem_device_put(nvmem); 1288 } 1289 EXPORT_SYMBOL_GPL(nvmem_device_put); 1290 1291 /** 1292 * devm_nvmem_device_get() - Get nvmem device of device form a given id 1293 * 1294 * @dev: Device that requests the nvmem device. 1295 * @id: name id for the requested nvmem device. 1296 * 1297 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1298 * on success. The nvmem_device will be freed by the automatically once the 1299 * device is freed. 1300 */ 1301 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1302 { 1303 struct nvmem_device **ptr, *nvmem; 1304 1305 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1306 if (!ptr) 1307 return ERR_PTR(-ENOMEM); 1308 1309 nvmem = nvmem_device_get(dev, id); 1310 if (!IS_ERR(nvmem)) { 1311 *ptr = nvmem; 1312 devres_add(dev, ptr); 1313 } else { 1314 devres_free(ptr); 1315 } 1316 1317 return nvmem; 1318 } 1319 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1320 1321 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1322 const char *id, int index) 1323 { 1324 struct nvmem_cell *cell; 1325 const char *name = NULL; 1326 1327 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1328 if (!cell) 1329 return ERR_PTR(-ENOMEM); 1330 1331 if (id) { 1332 name = kstrdup_const(id, GFP_KERNEL); 1333 if (!name) { 1334 kfree(cell); 1335 return ERR_PTR(-ENOMEM); 1336 } 1337 } 1338 1339 cell->id = name; 1340 cell->entry = entry; 1341 cell->index = index; 1342 1343 return cell; 1344 } 1345 1346 static struct nvmem_cell * 1347 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1348 { 1349 struct nvmem_cell_entry *cell_entry; 1350 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1351 struct nvmem_cell_lookup *lookup; 1352 struct nvmem_device *nvmem; 1353 const char *dev_id; 1354 1355 if (!dev) 1356 return ERR_PTR(-EINVAL); 1357 1358 dev_id = dev_name(dev); 1359 1360 mutex_lock(&nvmem_lookup_mutex); 1361 1362 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1363 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1364 (strcmp(lookup->con_id, con_id) == 0)) { 1365 /* This is the right entry. */ 1366 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1367 device_match_name); 1368 if (IS_ERR(nvmem)) { 1369 /* Provider may not be registered yet. */ 1370 cell = ERR_CAST(nvmem); 1371 break; 1372 } 1373 1374 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1375 lookup->cell_name); 1376 if (!cell_entry) { 1377 __nvmem_device_put(nvmem); 1378 cell = ERR_PTR(-ENOENT); 1379 } else { 1380 cell = nvmem_create_cell(cell_entry, con_id, 0); 1381 if (IS_ERR(cell)) 1382 __nvmem_device_put(nvmem); 1383 } 1384 break; 1385 } 1386 } 1387 1388 mutex_unlock(&nvmem_lookup_mutex); 1389 return cell; 1390 } 1391 1392 static void nvmem_layout_module_put(struct nvmem_device *nvmem) 1393 { 1394 if (nvmem->layout && nvmem->layout->dev.driver) 1395 module_put(nvmem->layout->dev.driver->owner); 1396 } 1397 1398 #if IS_ENABLED(CONFIG_OF) 1399 static struct nvmem_cell_entry * 1400 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1401 { 1402 struct nvmem_cell_entry *iter, *cell = NULL; 1403 1404 mutex_lock(&nvmem_mutex); 1405 list_for_each_entry(iter, &nvmem->cells, node) { 1406 if (np == iter->np) { 1407 cell = iter; 1408 break; 1409 } 1410 } 1411 mutex_unlock(&nvmem_mutex); 1412 1413 return cell; 1414 } 1415 1416 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) 1417 { 1418 if (!nvmem->layout) 1419 return 0; 1420 1421 if (!nvmem->layout->dev.driver || 1422 !try_module_get(nvmem->layout->dev.driver->owner)) 1423 return -EPROBE_DEFER; 1424 1425 return 0; 1426 } 1427 1428 /** 1429 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1430 * 1431 * @np: Device tree node that uses the nvmem cell. 1432 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1433 * for the cell at index 0 (the lone cell with no accompanying 1434 * nvmem-cell-names property). 1435 * 1436 * Return: Will be an ERR_PTR() on error or a valid pointer 1437 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1438 * nvmem_cell_put(). 1439 */ 1440 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1441 { 1442 struct device_node *cell_np, *nvmem_np; 1443 struct nvmem_device *nvmem; 1444 struct nvmem_cell_entry *cell_entry; 1445 struct nvmem_cell *cell; 1446 struct of_phandle_args cell_spec; 1447 int index = 0; 1448 int cell_index = 0; 1449 int ret; 1450 1451 /* if cell name exists, find index to the name */ 1452 if (id) 1453 index = of_property_match_string(np, "nvmem-cell-names", id); 1454 1455 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1456 "#nvmem-cell-cells", 1457 index, &cell_spec); 1458 if (ret) 1459 return ERR_PTR(-ENOENT); 1460 1461 if (cell_spec.args_count > 1) 1462 return ERR_PTR(-EINVAL); 1463 1464 cell_np = cell_spec.np; 1465 if (cell_spec.args_count) 1466 cell_index = cell_spec.args[0]; 1467 1468 nvmem_np = of_get_parent(cell_np); 1469 if (!nvmem_np) { 1470 of_node_put(cell_np); 1471 return ERR_PTR(-EINVAL); 1472 } 1473 1474 /* nvmem layouts produce cells within the nvmem-layout container */ 1475 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1476 nvmem_np = of_get_next_parent(nvmem_np); 1477 if (!nvmem_np) { 1478 of_node_put(cell_np); 1479 return ERR_PTR(-EINVAL); 1480 } 1481 } 1482 1483 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1484 of_node_put(nvmem_np); 1485 if (IS_ERR(nvmem)) { 1486 of_node_put(cell_np); 1487 return ERR_CAST(nvmem); 1488 } 1489 1490 ret = nvmem_layout_module_get_optional(nvmem); 1491 if (ret) { 1492 of_node_put(cell_np); 1493 __nvmem_device_put(nvmem); 1494 return ERR_PTR(ret); 1495 } 1496 1497 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1498 of_node_put(cell_np); 1499 if (!cell_entry) { 1500 __nvmem_device_put(nvmem); 1501 nvmem_layout_module_put(nvmem); 1502 if (nvmem->layout) 1503 return ERR_PTR(-EPROBE_DEFER); 1504 else 1505 return ERR_PTR(-ENOENT); 1506 } 1507 1508 cell = nvmem_create_cell(cell_entry, id, cell_index); 1509 if (IS_ERR(cell)) { 1510 __nvmem_device_put(nvmem); 1511 nvmem_layout_module_put(nvmem); 1512 } 1513 1514 return cell; 1515 } 1516 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1517 #endif 1518 1519 /** 1520 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1521 * 1522 * @dev: Device that requests the nvmem cell. 1523 * @id: nvmem cell name to get (this corresponds with the name from the 1524 * nvmem-cell-names property for DT systems and with the con_id from 1525 * the lookup entry for non-DT systems). 1526 * 1527 * Return: Will be an ERR_PTR() on error or a valid pointer 1528 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1529 * nvmem_cell_put(). 1530 */ 1531 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1532 { 1533 struct nvmem_cell *cell; 1534 1535 if (dev->of_node) { /* try dt first */ 1536 cell = of_nvmem_cell_get(dev->of_node, id); 1537 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1538 return cell; 1539 } 1540 1541 /* NULL cell id only allowed for device tree; invalid otherwise */ 1542 if (!id) 1543 return ERR_PTR(-EINVAL); 1544 1545 return nvmem_cell_get_from_lookup(dev, id); 1546 } 1547 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1548 1549 static void devm_nvmem_cell_release(struct device *dev, void *res) 1550 { 1551 nvmem_cell_put(*(struct nvmem_cell **)res); 1552 } 1553 1554 /** 1555 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1556 * 1557 * @dev: Device that requests the nvmem cell. 1558 * @id: nvmem cell name id to get. 1559 * 1560 * Return: Will be an ERR_PTR() on error or a valid pointer 1561 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1562 * automatically once the device is freed. 1563 */ 1564 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1565 { 1566 struct nvmem_cell **ptr, *cell; 1567 1568 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1569 if (!ptr) 1570 return ERR_PTR(-ENOMEM); 1571 1572 cell = nvmem_cell_get(dev, id); 1573 if (!IS_ERR(cell)) { 1574 *ptr = cell; 1575 devres_add(dev, ptr); 1576 } else { 1577 devres_free(ptr); 1578 } 1579 1580 return cell; 1581 } 1582 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1583 1584 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1585 { 1586 struct nvmem_cell **c = res; 1587 1588 if (WARN_ON(!c || !*c)) 1589 return 0; 1590 1591 return *c == data; 1592 } 1593 1594 /** 1595 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1596 * from devm_nvmem_cell_get. 1597 * 1598 * @dev: Device that requests the nvmem cell. 1599 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1600 */ 1601 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1602 { 1603 int ret; 1604 1605 ret = devres_release(dev, devm_nvmem_cell_release, 1606 devm_nvmem_cell_match, cell); 1607 1608 WARN_ON(ret); 1609 } 1610 EXPORT_SYMBOL(devm_nvmem_cell_put); 1611 1612 /** 1613 * nvmem_cell_put() - Release previously allocated nvmem cell. 1614 * 1615 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1616 */ 1617 void nvmem_cell_put(struct nvmem_cell *cell) 1618 { 1619 struct nvmem_device *nvmem = cell->entry->nvmem; 1620 1621 if (cell->id) 1622 kfree_const(cell->id); 1623 1624 kfree(cell); 1625 __nvmem_device_put(nvmem); 1626 nvmem_layout_module_put(nvmem); 1627 } 1628 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1629 1630 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1631 { 1632 u8 *p, *b; 1633 int i, extra, bit_offset = cell->bit_offset; 1634 1635 p = b = buf; 1636 if (bit_offset) { 1637 /* First shift */ 1638 *b++ >>= bit_offset; 1639 1640 /* setup rest of the bytes if any */ 1641 for (i = 1; i < cell->bytes; i++) { 1642 /* Get bits from next byte and shift them towards msb */ 1643 *p |= *b << (BITS_PER_BYTE - bit_offset); 1644 1645 p = b; 1646 *b++ >>= bit_offset; 1647 } 1648 } else { 1649 /* point to the msb */ 1650 p += cell->bytes - 1; 1651 } 1652 1653 /* result fits in less bytes */ 1654 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1655 while (--extra >= 0) 1656 *p-- = 0; 1657 1658 /* clear msb bits if any leftover in the last byte */ 1659 if (cell->nbits % BITS_PER_BYTE) 1660 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1661 } 1662 1663 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1664 struct nvmem_cell_entry *cell, 1665 void *buf, size_t *len, const char *id, int index) 1666 { 1667 int rc; 1668 1669 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1670 1671 if (rc) 1672 return rc; 1673 1674 /* shift bits in-place */ 1675 if (cell->bit_offset || cell->nbits) 1676 nvmem_shift_read_buffer_in_place(cell, buf); 1677 1678 if (cell->read_post_process) { 1679 rc = cell->read_post_process(cell->priv, id, index, 1680 cell->offset, buf, cell->raw_len); 1681 if (rc) 1682 return rc; 1683 } 1684 1685 if (len) 1686 *len = cell->bytes; 1687 1688 return 0; 1689 } 1690 1691 /** 1692 * nvmem_cell_read() - Read a given nvmem cell 1693 * 1694 * @cell: nvmem cell to be read. 1695 * @len: pointer to length of cell which will be populated on successful read; 1696 * can be NULL. 1697 * 1698 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1699 * buffer should be freed by the consumer with a kfree(). 1700 */ 1701 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1702 { 1703 struct nvmem_cell_entry *entry = cell->entry; 1704 struct nvmem_device *nvmem = entry->nvmem; 1705 u8 *buf; 1706 int rc; 1707 1708 if (!nvmem) 1709 return ERR_PTR(-EINVAL); 1710 1711 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1712 if (!buf) 1713 return ERR_PTR(-ENOMEM); 1714 1715 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1716 if (rc) { 1717 kfree(buf); 1718 return ERR_PTR(rc); 1719 } 1720 1721 return buf; 1722 } 1723 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1724 1725 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1726 u8 *_buf, int len) 1727 { 1728 struct nvmem_device *nvmem = cell->nvmem; 1729 int i, rc, nbits, bit_offset = cell->bit_offset; 1730 u8 v, *p, *buf, *b, pbyte, pbits; 1731 1732 nbits = cell->nbits; 1733 buf = kzalloc(cell->bytes, GFP_KERNEL); 1734 if (!buf) 1735 return ERR_PTR(-ENOMEM); 1736 1737 memcpy(buf, _buf, len); 1738 p = b = buf; 1739 1740 if (bit_offset) { 1741 pbyte = *b; 1742 *b <<= bit_offset; 1743 1744 /* setup the first byte with lsb bits from nvmem */ 1745 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1746 if (rc) 1747 goto err; 1748 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1749 1750 /* setup rest of the byte if any */ 1751 for (i = 1; i < cell->bytes; i++) { 1752 /* Get last byte bits and shift them towards lsb */ 1753 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1754 pbyte = *b; 1755 p = b; 1756 *b <<= bit_offset; 1757 *b++ |= pbits; 1758 } 1759 } 1760 1761 /* if it's not end on byte boundary */ 1762 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1763 /* setup the last byte with msb bits from nvmem */ 1764 rc = nvmem_reg_read(nvmem, 1765 cell->offset + cell->bytes - 1, &v, 1); 1766 if (rc) 1767 goto err; 1768 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1769 1770 } 1771 1772 return buf; 1773 err: 1774 kfree(buf); 1775 return ERR_PTR(rc); 1776 } 1777 1778 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1779 { 1780 struct nvmem_device *nvmem = cell->nvmem; 1781 int rc; 1782 1783 if (!nvmem || nvmem->read_only || 1784 (cell->bit_offset == 0 && len != cell->bytes)) 1785 return -EINVAL; 1786 1787 /* 1788 * Any cells which have a read_post_process hook are read-only because 1789 * we cannot reverse the operation and it might affect other cells, 1790 * too. 1791 */ 1792 if (cell->read_post_process) 1793 return -EINVAL; 1794 1795 if (cell->bit_offset || cell->nbits) { 1796 if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes) 1797 return -EINVAL; 1798 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1799 if (IS_ERR(buf)) 1800 return PTR_ERR(buf); 1801 } 1802 1803 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1804 1805 /* free the tmp buffer */ 1806 if (cell->bit_offset || cell->nbits) 1807 kfree(buf); 1808 1809 if (rc) 1810 return rc; 1811 1812 return len; 1813 } 1814 1815 /** 1816 * nvmem_cell_write() - Write to a given nvmem cell 1817 * 1818 * @cell: nvmem cell to be written. 1819 * @buf: Buffer to be written. 1820 * @len: length of buffer to be written to nvmem cell. 1821 * 1822 * Return: length of bytes written or negative on failure. 1823 */ 1824 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1825 { 1826 return __nvmem_cell_entry_write(cell->entry, buf, len); 1827 } 1828 1829 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1830 1831 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1832 void *val, size_t count) 1833 { 1834 struct nvmem_cell *cell; 1835 void *buf; 1836 size_t len; 1837 1838 cell = nvmem_cell_get(dev, cell_id); 1839 if (IS_ERR(cell)) 1840 return PTR_ERR(cell); 1841 1842 buf = nvmem_cell_read(cell, &len); 1843 if (IS_ERR(buf)) { 1844 nvmem_cell_put(cell); 1845 return PTR_ERR(buf); 1846 } 1847 if (len != count) { 1848 kfree(buf); 1849 nvmem_cell_put(cell); 1850 return -EINVAL; 1851 } 1852 memcpy(val, buf, count); 1853 kfree(buf); 1854 nvmem_cell_put(cell); 1855 1856 return 0; 1857 } 1858 1859 /** 1860 * nvmem_cell_read_u8() - Read a cell value as a u8 1861 * 1862 * @dev: Device that requests the nvmem cell. 1863 * @cell_id: Name of nvmem cell to read. 1864 * @val: pointer to output value. 1865 * 1866 * Return: 0 on success or negative errno. 1867 */ 1868 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1869 { 1870 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1871 } 1872 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1873 1874 /** 1875 * nvmem_cell_read_u16() - Read a cell value as a u16 1876 * 1877 * @dev: Device that requests the nvmem cell. 1878 * @cell_id: Name of nvmem cell to read. 1879 * @val: pointer to output value. 1880 * 1881 * Return: 0 on success or negative errno. 1882 */ 1883 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1884 { 1885 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1886 } 1887 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1888 1889 /** 1890 * nvmem_cell_read_u32() - Read a cell value as a u32 1891 * 1892 * @dev: Device that requests the nvmem cell. 1893 * @cell_id: Name of nvmem cell to read. 1894 * @val: pointer to output value. 1895 * 1896 * Return: 0 on success or negative errno. 1897 */ 1898 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1899 { 1900 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1901 } 1902 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1903 1904 /** 1905 * nvmem_cell_read_u64() - Read a cell value as a u64 1906 * 1907 * @dev: Device that requests the nvmem cell. 1908 * @cell_id: Name of nvmem cell to read. 1909 * @val: pointer to output value. 1910 * 1911 * Return: 0 on success or negative errno. 1912 */ 1913 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1914 { 1915 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1916 } 1917 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1918 1919 static const void *nvmem_cell_read_variable_common(struct device *dev, 1920 const char *cell_id, 1921 size_t max_len, size_t *len) 1922 { 1923 struct nvmem_cell *cell; 1924 int nbits; 1925 void *buf; 1926 1927 cell = nvmem_cell_get(dev, cell_id); 1928 if (IS_ERR(cell)) 1929 return cell; 1930 1931 nbits = cell->entry->nbits; 1932 buf = nvmem_cell_read(cell, len); 1933 nvmem_cell_put(cell); 1934 if (IS_ERR(buf)) 1935 return buf; 1936 1937 /* 1938 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1939 * the length of the real data. Throw away the extra junk. 1940 */ 1941 if (nbits) 1942 *len = DIV_ROUND_UP(nbits, 8); 1943 1944 if (*len > max_len) { 1945 kfree(buf); 1946 return ERR_PTR(-ERANGE); 1947 } 1948 1949 return buf; 1950 } 1951 1952 /** 1953 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1954 * 1955 * @dev: Device that requests the nvmem cell. 1956 * @cell_id: Name of nvmem cell to read. 1957 * @val: pointer to output value. 1958 * 1959 * Return: 0 on success or negative errno. 1960 */ 1961 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1962 u32 *val) 1963 { 1964 size_t len; 1965 const u8 *buf; 1966 int i; 1967 1968 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1969 if (IS_ERR(buf)) 1970 return PTR_ERR(buf); 1971 1972 /* Copy w/ implicit endian conversion */ 1973 *val = 0; 1974 for (i = 0; i < len; i++) 1975 *val |= buf[i] << (8 * i); 1976 1977 kfree(buf); 1978 1979 return 0; 1980 } 1981 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 1982 1983 /** 1984 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 1985 * 1986 * @dev: Device that requests the nvmem cell. 1987 * @cell_id: Name of nvmem cell to read. 1988 * @val: pointer to output value. 1989 * 1990 * Return: 0 on success or negative errno. 1991 */ 1992 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 1993 u64 *val) 1994 { 1995 size_t len; 1996 const u8 *buf; 1997 int i; 1998 1999 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 2000 if (IS_ERR(buf)) 2001 return PTR_ERR(buf); 2002 2003 /* Copy w/ implicit endian conversion */ 2004 *val = 0; 2005 for (i = 0; i < len; i++) 2006 *val |= (uint64_t)buf[i] << (8 * i); 2007 2008 kfree(buf); 2009 2010 return 0; 2011 } 2012 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 2013 2014 /** 2015 * nvmem_device_cell_read() - Read a given nvmem device and cell 2016 * 2017 * @nvmem: nvmem device to read from. 2018 * @info: nvmem cell info to be read. 2019 * @buf: buffer pointer which will be populated on successful read. 2020 * 2021 * Return: length of successful bytes read on success and negative 2022 * error code on error. 2023 */ 2024 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 2025 struct nvmem_cell_info *info, void *buf) 2026 { 2027 struct nvmem_cell_entry cell; 2028 int rc; 2029 ssize_t len; 2030 2031 if (!nvmem) 2032 return -EINVAL; 2033 2034 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2035 if (rc) 2036 return rc; 2037 2038 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 2039 if (rc) 2040 return rc; 2041 2042 return len; 2043 } 2044 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 2045 2046 /** 2047 * nvmem_device_cell_write() - Write cell to a given nvmem device 2048 * 2049 * @nvmem: nvmem device to be written to. 2050 * @info: nvmem cell info to be written. 2051 * @buf: buffer to be written to cell. 2052 * 2053 * Return: length of bytes written or negative error code on failure. 2054 */ 2055 int nvmem_device_cell_write(struct nvmem_device *nvmem, 2056 struct nvmem_cell_info *info, void *buf) 2057 { 2058 struct nvmem_cell_entry cell; 2059 int rc; 2060 2061 if (!nvmem) 2062 return -EINVAL; 2063 2064 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2065 if (rc) 2066 return rc; 2067 2068 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 2069 } 2070 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 2071 2072 /** 2073 * nvmem_device_read() - Read from a given nvmem device 2074 * 2075 * @nvmem: nvmem device to read from. 2076 * @offset: offset in nvmem device. 2077 * @bytes: number of bytes to read. 2078 * @buf: buffer pointer which will be populated on successful read. 2079 * 2080 * Return: length of successful bytes read on success and negative 2081 * error code on error. 2082 */ 2083 int nvmem_device_read(struct nvmem_device *nvmem, 2084 unsigned int offset, 2085 size_t bytes, void *buf) 2086 { 2087 int rc; 2088 2089 if (!nvmem) 2090 return -EINVAL; 2091 2092 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2093 2094 if (rc) 2095 return rc; 2096 2097 return bytes; 2098 } 2099 EXPORT_SYMBOL_GPL(nvmem_device_read); 2100 2101 /** 2102 * nvmem_device_write() - Write cell to a given nvmem device 2103 * 2104 * @nvmem: nvmem device to be written to. 2105 * @offset: offset in nvmem device. 2106 * @bytes: number of bytes to write. 2107 * @buf: buffer to be written. 2108 * 2109 * Return: length of bytes written or negative error code on failure. 2110 */ 2111 int nvmem_device_write(struct nvmem_device *nvmem, 2112 unsigned int offset, 2113 size_t bytes, void *buf) 2114 { 2115 int rc; 2116 2117 if (!nvmem) 2118 return -EINVAL; 2119 2120 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2121 2122 if (rc) 2123 return rc; 2124 2125 2126 return bytes; 2127 } 2128 EXPORT_SYMBOL_GPL(nvmem_device_write); 2129 2130 /** 2131 * nvmem_add_cell_table() - register a table of cell info entries 2132 * 2133 * @table: table of cell info entries 2134 */ 2135 void nvmem_add_cell_table(struct nvmem_cell_table *table) 2136 { 2137 mutex_lock(&nvmem_cell_mutex); 2138 list_add_tail(&table->node, &nvmem_cell_tables); 2139 mutex_unlock(&nvmem_cell_mutex); 2140 } 2141 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2142 2143 /** 2144 * nvmem_del_cell_table() - remove a previously registered cell info table 2145 * 2146 * @table: table of cell info entries 2147 */ 2148 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2149 { 2150 mutex_lock(&nvmem_cell_mutex); 2151 list_del(&table->node); 2152 mutex_unlock(&nvmem_cell_mutex); 2153 } 2154 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2155 2156 /** 2157 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2158 * 2159 * @entries: array of cell lookup entries 2160 * @nentries: number of cell lookup entries in the array 2161 */ 2162 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2163 { 2164 int i; 2165 2166 mutex_lock(&nvmem_lookup_mutex); 2167 for (i = 0; i < nentries; i++) 2168 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2169 mutex_unlock(&nvmem_lookup_mutex); 2170 } 2171 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2172 2173 /** 2174 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2175 * entries 2176 * 2177 * @entries: array of cell lookup entries 2178 * @nentries: number of cell lookup entries in the array 2179 */ 2180 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2181 { 2182 int i; 2183 2184 mutex_lock(&nvmem_lookup_mutex); 2185 for (i = 0; i < nentries; i++) 2186 list_del(&entries[i].node); 2187 mutex_unlock(&nvmem_lookup_mutex); 2188 } 2189 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2190 2191 /** 2192 * nvmem_dev_name() - Get the name of a given nvmem device. 2193 * 2194 * @nvmem: nvmem device. 2195 * 2196 * Return: name of the nvmem device. 2197 */ 2198 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2199 { 2200 return dev_name(&nvmem->dev); 2201 } 2202 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2203 2204 /** 2205 * nvmem_dev_size() - Get the size of a given nvmem device. 2206 * 2207 * @nvmem: nvmem device. 2208 * 2209 * Return: size of the nvmem device. 2210 */ 2211 size_t nvmem_dev_size(struct nvmem_device *nvmem) 2212 { 2213 return nvmem->size; 2214 } 2215 EXPORT_SYMBOL_GPL(nvmem_dev_size); 2216 2217 static int __init nvmem_init(void) 2218 { 2219 int ret; 2220 2221 ret = bus_register(&nvmem_bus_type); 2222 if (ret) 2223 return ret; 2224 2225 ret = nvmem_layout_bus_register(); 2226 if (ret) 2227 bus_unregister(&nvmem_bus_type); 2228 2229 return ret; 2230 } 2231 2232 static void __exit nvmem_exit(void) 2233 { 2234 nvmem_layout_bus_unregister(); 2235 bus_unregister(&nvmem_bus_type); 2236 } 2237 2238 subsys_initcall(nvmem_init); 2239 module_exit(nvmem_exit); 2240 2241 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2242 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2243 MODULE_DESCRIPTION("nvmem Driver Core"); 2244