1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/export.h> 11 #include <linux/fs.h> 12 #include <linux/idr.h> 13 #include <linux/init.h> 14 #include <linux/kref.h> 15 #include <linux/module.h> 16 #include <linux/nvmem-consumer.h> 17 #include <linux/nvmem-provider.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "internals.h" 23 24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 25 26 #define FLAG_COMPAT BIT(0) 27 struct nvmem_cell_entry { 28 const char *name; 29 int offset; 30 size_t raw_len; 31 int bytes; 32 int bit_offset; 33 int nbits; 34 nvmem_cell_post_process_t read_post_process; 35 void *priv; 36 struct device_node *np; 37 struct nvmem_device *nvmem; 38 struct list_head node; 39 }; 40 41 struct nvmem_cell { 42 struct nvmem_cell_entry *entry; 43 const char *id; 44 int index; 45 }; 46 47 static DEFINE_MUTEX(nvmem_mutex); 48 static DEFINE_IDA(nvmem_ida); 49 50 static DEFINE_MUTEX(nvmem_cell_mutex); 51 static LIST_HEAD(nvmem_cell_tables); 52 53 static DEFINE_MUTEX(nvmem_lookup_mutex); 54 static LIST_HEAD(nvmem_lookup_list); 55 56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 57 58 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 59 void *val, size_t bytes) 60 { 61 if (nvmem->reg_read) 62 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 63 64 return -EINVAL; 65 } 66 67 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 68 void *val, size_t bytes) 69 { 70 int ret; 71 72 if (nvmem->reg_write) { 73 gpiod_set_value_cansleep(nvmem->wp_gpio, 0); 74 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); 75 gpiod_set_value_cansleep(nvmem->wp_gpio, 1); 76 return ret; 77 } 78 79 return -EINVAL; 80 } 81 82 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, 83 unsigned int offset, void *val, 84 size_t bytes, int write) 85 { 86 87 unsigned int end = offset + bytes; 88 unsigned int kend, ksize; 89 const struct nvmem_keepout *keepout = nvmem->keepout; 90 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 91 int rc; 92 93 /* 94 * Skip all keepouts before the range being accessed. 95 * Keepouts are sorted. 96 */ 97 while ((keepout < keepoutend) && (keepout->end <= offset)) 98 keepout++; 99 100 while ((offset < end) && (keepout < keepoutend)) { 101 /* Access the valid portion before the keepout. */ 102 if (offset < keepout->start) { 103 kend = min(end, keepout->start); 104 ksize = kend - offset; 105 if (write) 106 rc = __nvmem_reg_write(nvmem, offset, val, ksize); 107 else 108 rc = __nvmem_reg_read(nvmem, offset, val, ksize); 109 110 if (rc) 111 return rc; 112 113 offset += ksize; 114 val += ksize; 115 } 116 117 /* 118 * Now we're aligned to the start of this keepout zone. Go 119 * through it. 120 */ 121 kend = min(end, keepout->end); 122 ksize = kend - offset; 123 if (!write) 124 memset(val, keepout->value, ksize); 125 126 val += ksize; 127 offset += ksize; 128 keepout++; 129 } 130 131 /* 132 * If we ran out of keepouts but there's still stuff to do, send it 133 * down directly 134 */ 135 if (offset < end) { 136 ksize = end - offset; 137 if (write) 138 return __nvmem_reg_write(nvmem, offset, val, ksize); 139 else 140 return __nvmem_reg_read(nvmem, offset, val, ksize); 141 } 142 143 return 0; 144 } 145 146 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 147 void *val, size_t bytes) 148 { 149 if (!nvmem->nkeepout) 150 return __nvmem_reg_read(nvmem, offset, val, bytes); 151 152 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); 153 } 154 155 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 156 void *val, size_t bytes) 157 { 158 if (!nvmem->nkeepout) 159 return __nvmem_reg_write(nvmem, offset, val, bytes); 160 161 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); 162 } 163 164 #ifdef CONFIG_NVMEM_SYSFS 165 static const char * const nvmem_type_str[] = { 166 [NVMEM_TYPE_UNKNOWN] = "Unknown", 167 [NVMEM_TYPE_EEPROM] = "EEPROM", 168 [NVMEM_TYPE_OTP] = "OTP", 169 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 170 [NVMEM_TYPE_FRAM] = "FRAM", 171 }; 172 173 #ifdef CONFIG_DEBUG_LOCK_ALLOC 174 static struct lock_class_key eeprom_lock_key; 175 #endif 176 177 static ssize_t type_show(struct device *dev, 178 struct device_attribute *attr, char *buf) 179 { 180 struct nvmem_device *nvmem = to_nvmem_device(dev); 181 182 return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]); 183 } 184 185 static DEVICE_ATTR_RO(type); 186 187 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 188 char *buf) 189 { 190 struct nvmem_device *nvmem = to_nvmem_device(dev); 191 192 return sysfs_emit(buf, "%d\n", nvmem->read_only); 193 } 194 195 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 196 const char *buf, size_t count) 197 { 198 struct nvmem_device *nvmem = to_nvmem_device(dev); 199 int ret = kstrtobool(buf, &nvmem->read_only); 200 201 if (ret < 0) 202 return ret; 203 204 return count; 205 } 206 207 static DEVICE_ATTR_RW(force_ro); 208 209 static struct attribute *nvmem_attrs[] = { 210 &dev_attr_force_ro.attr, 211 &dev_attr_type.attr, 212 NULL, 213 }; 214 215 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 216 const struct bin_attribute *attr, char *buf, 217 loff_t pos, size_t count) 218 { 219 struct device *dev; 220 struct nvmem_device *nvmem; 221 int rc; 222 223 if (attr->private) 224 dev = attr->private; 225 else 226 dev = kobj_to_dev(kobj); 227 nvmem = to_nvmem_device(dev); 228 229 if (!IS_ALIGNED(pos, nvmem->stride)) 230 return -EINVAL; 231 232 if (count < nvmem->word_size) 233 return -EINVAL; 234 235 count = round_down(count, nvmem->word_size); 236 237 if (!nvmem->reg_read) 238 return -EPERM; 239 240 rc = nvmem_reg_read(nvmem, pos, buf, count); 241 242 if (rc) 243 return rc; 244 245 return count; 246 } 247 248 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 249 const struct bin_attribute *attr, char *buf, 250 loff_t pos, size_t count) 251 { 252 struct device *dev; 253 struct nvmem_device *nvmem; 254 int rc; 255 256 if (attr->private) 257 dev = attr->private; 258 else 259 dev = kobj_to_dev(kobj); 260 nvmem = to_nvmem_device(dev); 261 262 if (!IS_ALIGNED(pos, nvmem->stride)) 263 return -EINVAL; 264 265 if (count < nvmem->word_size) 266 return -EINVAL; 267 268 count = round_down(count, nvmem->word_size); 269 270 if (!nvmem->reg_write || nvmem->read_only) 271 return -EPERM; 272 273 rc = nvmem_reg_write(nvmem, pos, buf, count); 274 275 if (rc) 276 return rc; 277 278 return count; 279 } 280 281 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) 282 { 283 umode_t mode = 0400; 284 285 if (!nvmem->root_only) 286 mode |= 0044; 287 288 if (!nvmem->read_only) 289 mode |= 0200; 290 291 if (!nvmem->reg_write) 292 mode &= ~0200; 293 294 if (!nvmem->reg_read) 295 mode &= ~0444; 296 297 return mode; 298 } 299 300 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, 301 const struct bin_attribute *attr, 302 int i) 303 { 304 struct device *dev = kobj_to_dev(kobj); 305 struct nvmem_device *nvmem = to_nvmem_device(dev); 306 307 return nvmem_bin_attr_get_umode(nvmem); 308 } 309 310 static size_t nvmem_bin_attr_size(struct kobject *kobj, 311 const struct bin_attribute *attr, 312 int i) 313 { 314 struct device *dev = kobj_to_dev(kobj); 315 struct nvmem_device *nvmem = to_nvmem_device(dev); 316 317 return nvmem->size; 318 } 319 320 static umode_t nvmem_attr_is_visible(struct kobject *kobj, 321 struct attribute *attr, int i) 322 { 323 struct device *dev = kobj_to_dev(kobj); 324 struct nvmem_device *nvmem = to_nvmem_device(dev); 325 326 /* 327 * If the device has no .reg_write operation, do not allow 328 * configuration as read-write. 329 * If the device is set as read-only by configuration, it 330 * can be forced into read-write mode using the 'force_ro' 331 * attribute. 332 */ 333 if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write) 334 return 0; /* Attribute not visible */ 335 336 return attr->mode; 337 } 338 339 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 340 const char *id, int index); 341 342 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, 343 const struct bin_attribute *attr, char *buf, 344 loff_t pos, size_t count) 345 { 346 struct nvmem_cell_entry *entry; 347 struct nvmem_cell *cell = NULL; 348 size_t cell_sz, read_len; 349 void *content; 350 351 entry = attr->private; 352 cell = nvmem_create_cell(entry, entry->name, 0); 353 if (IS_ERR(cell)) 354 return PTR_ERR(cell); 355 356 if (!cell) 357 return -EINVAL; 358 359 content = nvmem_cell_read(cell, &cell_sz); 360 if (IS_ERR(content)) { 361 read_len = PTR_ERR(content); 362 goto destroy_cell; 363 } 364 365 read_len = min_t(unsigned int, cell_sz - pos, count); 366 memcpy(buf, content + pos, read_len); 367 kfree(content); 368 369 destroy_cell: 370 kfree_const(cell->id); 371 kfree(cell); 372 373 return read_len; 374 } 375 376 /* default read/write permissions */ 377 static const struct bin_attribute bin_attr_rw_nvmem = { 378 .attr = { 379 .name = "nvmem", 380 .mode = 0644, 381 }, 382 .read_new = bin_attr_nvmem_read, 383 .write_new = bin_attr_nvmem_write, 384 }; 385 386 static const struct bin_attribute *const nvmem_bin_attributes[] = { 387 &bin_attr_rw_nvmem, 388 NULL, 389 }; 390 391 static const struct attribute_group nvmem_bin_group = { 392 .bin_attrs_new = nvmem_bin_attributes, 393 .attrs = nvmem_attrs, 394 .is_bin_visible = nvmem_bin_attr_is_visible, 395 .bin_size = nvmem_bin_attr_size, 396 .is_visible = nvmem_attr_is_visible, 397 }; 398 399 static const struct attribute_group *nvmem_dev_groups[] = { 400 &nvmem_bin_group, 401 NULL, 402 }; 403 404 static const struct bin_attribute bin_attr_nvmem_eeprom_compat = { 405 .attr = { 406 .name = "eeprom", 407 }, 408 .read_new = bin_attr_nvmem_read, 409 .write_new = bin_attr_nvmem_write, 410 }; 411 412 /* 413 * nvmem_setup_compat() - Create an additional binary entry in 414 * drivers sys directory, to be backwards compatible with the older 415 * drivers/misc/eeprom drivers. 416 */ 417 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 418 const struct nvmem_config *config) 419 { 420 int rval; 421 422 if (!config->compat) 423 return 0; 424 425 if (!config->base_dev) 426 return -EINVAL; 427 428 nvmem->eeprom = bin_attr_nvmem_eeprom_compat; 429 if (config->type == NVMEM_TYPE_FRAM) 430 nvmem->eeprom.attr.name = "fram"; 431 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); 432 nvmem->eeprom.size = nvmem->size; 433 #ifdef CONFIG_DEBUG_LOCK_ALLOC 434 nvmem->eeprom.attr.key = &eeprom_lock_key; 435 #endif 436 nvmem->eeprom.private = &nvmem->dev; 437 nvmem->base_dev = config->base_dev; 438 439 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 440 if (rval) { 441 dev_err(&nvmem->dev, 442 "Failed to create eeprom binary file %d\n", rval); 443 return rval; 444 } 445 446 nvmem->flags |= FLAG_COMPAT; 447 448 return 0; 449 } 450 451 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 452 const struct nvmem_config *config) 453 { 454 if (config->compat) 455 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 456 } 457 458 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) 459 { 460 struct attribute_group group = { 461 .name = "cells", 462 }; 463 struct nvmem_cell_entry *entry; 464 const struct bin_attribute **pattrs; 465 struct bin_attribute *attrs; 466 unsigned int ncells = 0, i = 0; 467 int ret = 0; 468 469 mutex_lock(&nvmem_mutex); 470 471 if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) 472 goto unlock_mutex; 473 474 /* Allocate an array of attributes with a sentinel */ 475 ncells = list_count_nodes(&nvmem->cells); 476 pattrs = devm_kcalloc(&nvmem->dev, ncells + 1, 477 sizeof(struct bin_attribute *), GFP_KERNEL); 478 if (!pattrs) { 479 ret = -ENOMEM; 480 goto unlock_mutex; 481 } 482 483 attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); 484 if (!attrs) { 485 ret = -ENOMEM; 486 goto unlock_mutex; 487 } 488 489 /* Initialize each attribute to take the name and size of the cell */ 490 list_for_each_entry(entry, &nvmem->cells, node) { 491 sysfs_bin_attr_init(&attrs[i]); 492 attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, 493 "%s@%x,%x", entry->name, 494 entry->offset, 495 entry->bit_offset); 496 attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); 497 attrs[i].size = entry->bytes; 498 attrs[i].read_new = &nvmem_cell_attr_read; 499 attrs[i].private = entry; 500 if (!attrs[i].attr.name) { 501 ret = -ENOMEM; 502 goto unlock_mutex; 503 } 504 505 pattrs[i] = &attrs[i]; 506 i++; 507 } 508 509 group.bin_attrs_new = pattrs; 510 511 ret = device_add_group(&nvmem->dev, &group); 512 if (ret) 513 goto unlock_mutex; 514 515 nvmem->sysfs_cells_populated = true; 516 517 unlock_mutex: 518 mutex_unlock(&nvmem_mutex); 519 520 return ret; 521 } 522 523 #else /* CONFIG_NVMEM_SYSFS */ 524 525 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, 526 const struct nvmem_config *config) 527 { 528 return -ENOSYS; 529 } 530 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, 531 const struct nvmem_config *config) 532 { 533 } 534 535 #endif /* CONFIG_NVMEM_SYSFS */ 536 537 static void nvmem_release(struct device *dev) 538 { 539 struct nvmem_device *nvmem = to_nvmem_device(dev); 540 541 ida_free(&nvmem_ida, nvmem->id); 542 gpiod_put(nvmem->wp_gpio); 543 kfree(nvmem); 544 } 545 546 static const struct device_type nvmem_provider_type = { 547 .release = nvmem_release, 548 }; 549 550 static struct bus_type nvmem_bus_type = { 551 .name = "nvmem", 552 }; 553 554 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) 555 { 556 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 557 mutex_lock(&nvmem_mutex); 558 list_del(&cell->node); 559 mutex_unlock(&nvmem_mutex); 560 of_node_put(cell->np); 561 kfree_const(cell->name); 562 kfree(cell); 563 } 564 565 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 566 { 567 struct nvmem_cell_entry *cell, *p; 568 569 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 570 nvmem_cell_entry_drop(cell); 571 } 572 573 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) 574 { 575 mutex_lock(&nvmem_mutex); 576 list_add_tail(&cell->node, &cell->nvmem->cells); 577 mutex_unlock(&nvmem_mutex); 578 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 579 } 580 581 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, 582 const struct nvmem_cell_info *info, 583 struct nvmem_cell_entry *cell) 584 { 585 cell->nvmem = nvmem; 586 cell->offset = info->offset; 587 cell->raw_len = info->raw_len ?: info->bytes; 588 cell->bytes = info->bytes; 589 cell->name = info->name; 590 cell->read_post_process = info->read_post_process; 591 cell->priv = info->priv; 592 593 cell->bit_offset = info->bit_offset; 594 cell->nbits = info->nbits; 595 cell->np = info->np; 596 597 if (cell->nbits) { 598 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 599 BITS_PER_BYTE); 600 cell->raw_len = ALIGN(cell->bytes, nvmem->word_size); 601 } 602 603 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 604 dev_err(&nvmem->dev, 605 "cell %s unaligned to nvmem stride %d\n", 606 cell->name ?: "<unknown>", nvmem->stride); 607 return -EINVAL; 608 } 609 610 if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) { 611 dev_err(&nvmem->dev, 612 "cell %s raw len %zd unaligned to nvmem word size %d\n", 613 cell->name ?: "<unknown>", cell->raw_len, 614 nvmem->word_size); 615 616 if (info->raw_len) 617 return -EINVAL; 618 619 cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size); 620 } 621 622 return 0; 623 } 624 625 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, 626 const struct nvmem_cell_info *info, 627 struct nvmem_cell_entry *cell) 628 { 629 int err; 630 631 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); 632 if (err) 633 return err; 634 635 cell->name = kstrdup_const(info->name, GFP_KERNEL); 636 if (!cell->name) 637 return -ENOMEM; 638 639 return 0; 640 } 641 642 /** 643 * nvmem_add_one_cell() - Add one cell information to an nvmem device 644 * 645 * @nvmem: nvmem device to add cells to. 646 * @info: nvmem cell info to add to the device 647 * 648 * Return: 0 or negative error code on failure. 649 */ 650 int nvmem_add_one_cell(struct nvmem_device *nvmem, 651 const struct nvmem_cell_info *info) 652 { 653 struct nvmem_cell_entry *cell; 654 int rval; 655 656 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 657 if (!cell) 658 return -ENOMEM; 659 660 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 661 if (rval) { 662 kfree(cell); 663 return rval; 664 } 665 666 nvmem_cell_entry_add(cell); 667 668 return 0; 669 } 670 EXPORT_SYMBOL_GPL(nvmem_add_one_cell); 671 672 /** 673 * nvmem_add_cells() - Add cell information to an nvmem device 674 * 675 * @nvmem: nvmem device to add cells to. 676 * @info: nvmem cell info to add to the device 677 * @ncells: number of cells in info 678 * 679 * Return: 0 or negative error code on failure. 680 */ 681 static int nvmem_add_cells(struct nvmem_device *nvmem, 682 const struct nvmem_cell_info *info, 683 int ncells) 684 { 685 int i, rval; 686 687 for (i = 0; i < ncells; i++) { 688 rval = nvmem_add_one_cell(nvmem, &info[i]); 689 if (rval) 690 return rval; 691 } 692 693 return 0; 694 } 695 696 /** 697 * nvmem_register_notifier() - Register a notifier block for nvmem events. 698 * 699 * @nb: notifier block to be called on nvmem events. 700 * 701 * Return: 0 on success, negative error number on failure. 702 */ 703 int nvmem_register_notifier(struct notifier_block *nb) 704 { 705 return blocking_notifier_chain_register(&nvmem_notifier, nb); 706 } 707 EXPORT_SYMBOL_GPL(nvmem_register_notifier); 708 709 /** 710 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 711 * 712 * @nb: notifier block to be unregistered. 713 * 714 * Return: 0 on success, negative error number on failure. 715 */ 716 int nvmem_unregister_notifier(struct notifier_block *nb) 717 { 718 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 719 } 720 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 721 722 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 723 { 724 const struct nvmem_cell_info *info; 725 struct nvmem_cell_table *table; 726 struct nvmem_cell_entry *cell; 727 int rval = 0, i; 728 729 mutex_lock(&nvmem_cell_mutex); 730 list_for_each_entry(table, &nvmem_cell_tables, node) { 731 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 732 for (i = 0; i < table->ncells; i++) { 733 info = &table->cells[i]; 734 735 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 736 if (!cell) { 737 rval = -ENOMEM; 738 goto out; 739 } 740 741 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); 742 if (rval) { 743 kfree(cell); 744 goto out; 745 } 746 747 nvmem_cell_entry_add(cell); 748 } 749 } 750 } 751 752 out: 753 mutex_unlock(&nvmem_cell_mutex); 754 return rval; 755 } 756 757 static struct nvmem_cell_entry * 758 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) 759 { 760 struct nvmem_cell_entry *iter, *cell = NULL; 761 762 mutex_lock(&nvmem_mutex); 763 list_for_each_entry(iter, &nvmem->cells, node) { 764 if (strcmp(cell_id, iter->name) == 0) { 765 cell = iter; 766 break; 767 } 768 } 769 mutex_unlock(&nvmem_mutex); 770 771 return cell; 772 } 773 774 static int nvmem_validate_keepouts(struct nvmem_device *nvmem) 775 { 776 unsigned int cur = 0; 777 const struct nvmem_keepout *keepout = nvmem->keepout; 778 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; 779 780 while (keepout < keepoutend) { 781 /* Ensure keepouts are sorted and don't overlap. */ 782 if (keepout->start < cur) { 783 dev_err(&nvmem->dev, 784 "Keepout regions aren't sorted or overlap.\n"); 785 786 return -ERANGE; 787 } 788 789 if (keepout->end < keepout->start) { 790 dev_err(&nvmem->dev, 791 "Invalid keepout region.\n"); 792 793 return -EINVAL; 794 } 795 796 /* 797 * Validate keepouts (and holes between) don't violate 798 * word_size constraints. 799 */ 800 if ((keepout->end - keepout->start < nvmem->word_size) || 801 ((keepout->start != cur) && 802 (keepout->start - cur < nvmem->word_size))) { 803 804 dev_err(&nvmem->dev, 805 "Keepout regions violate word_size constraints.\n"); 806 807 return -ERANGE; 808 } 809 810 /* Validate keepouts don't violate stride (alignment). */ 811 if (!IS_ALIGNED(keepout->start, nvmem->stride) || 812 !IS_ALIGNED(keepout->end, nvmem->stride)) { 813 814 dev_err(&nvmem->dev, 815 "Keepout regions violate stride.\n"); 816 817 return -EINVAL; 818 } 819 820 cur = keepout->end; 821 keepout++; 822 } 823 824 return 0; 825 } 826 827 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) 828 { 829 struct device *dev = &nvmem->dev; 830 struct device_node *child; 831 const __be32 *addr; 832 int len, ret; 833 834 for_each_child_of_node(np, child) { 835 struct nvmem_cell_info info = {0}; 836 837 addr = of_get_property(child, "reg", &len); 838 if (!addr) 839 continue; 840 if (len < 2 * sizeof(u32)) { 841 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 842 of_node_put(child); 843 return -EINVAL; 844 } 845 846 info.offset = be32_to_cpup(addr++); 847 info.bytes = be32_to_cpup(addr); 848 info.name = kasprintf(GFP_KERNEL, "%pOFn", child); 849 850 addr = of_get_property(child, "bits", &len); 851 if (addr && len == (2 * sizeof(u32))) { 852 info.bit_offset = be32_to_cpup(addr++); 853 info.nbits = be32_to_cpup(addr); 854 if (info.bit_offset >= BITS_PER_BYTE * info.bytes || 855 info.nbits < 1 || 856 info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) { 857 dev_err(dev, "nvmem: invalid bits on %pOF\n", child); 858 of_node_put(child); 859 return -EINVAL; 860 } 861 } 862 863 info.np = of_node_get(child); 864 865 if (nvmem->fixup_dt_cell_info) 866 nvmem->fixup_dt_cell_info(nvmem, &info); 867 868 ret = nvmem_add_one_cell(nvmem, &info); 869 kfree(info.name); 870 if (ret) { 871 of_node_put(child); 872 return ret; 873 } 874 } 875 876 return 0; 877 } 878 879 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) 880 { 881 return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node); 882 } 883 884 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) 885 { 886 struct device_node *layout_np; 887 int err = 0; 888 889 layout_np = of_nvmem_layout_get_container(nvmem); 890 if (!layout_np) 891 return 0; 892 893 if (of_device_is_compatible(layout_np, "fixed-layout")) 894 err = nvmem_add_cells_from_dt(nvmem, layout_np); 895 896 of_node_put(layout_np); 897 898 return err; 899 } 900 901 int nvmem_layout_register(struct nvmem_layout *layout) 902 { 903 int ret; 904 905 if (!layout->add_cells) 906 return -EINVAL; 907 908 /* Populate the cells */ 909 ret = layout->add_cells(layout); 910 if (ret) 911 return ret; 912 913 #ifdef CONFIG_NVMEM_SYSFS 914 ret = nvmem_populate_sysfs_cells(layout->nvmem); 915 if (ret) { 916 nvmem_device_remove_all_cells(layout->nvmem); 917 return ret; 918 } 919 #endif 920 921 return 0; 922 } 923 EXPORT_SYMBOL_GPL(nvmem_layout_register); 924 925 void nvmem_layout_unregister(struct nvmem_layout *layout) 926 { 927 /* Keep the API even with an empty stub in case we need it later */ 928 } 929 EXPORT_SYMBOL_GPL(nvmem_layout_unregister); 930 931 /** 932 * nvmem_register() - Register a nvmem device for given nvmem_config. 933 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 934 * 935 * @config: nvmem device configuration with which nvmem device is created. 936 * 937 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 938 * on success. 939 */ 940 941 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 942 { 943 struct nvmem_device *nvmem; 944 int rval; 945 946 if (!config->dev) 947 return ERR_PTR(-EINVAL); 948 949 if (!config->reg_read && !config->reg_write) 950 return ERR_PTR(-EINVAL); 951 952 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 953 if (!nvmem) 954 return ERR_PTR(-ENOMEM); 955 956 rval = ida_alloc(&nvmem_ida, GFP_KERNEL); 957 if (rval < 0) { 958 kfree(nvmem); 959 return ERR_PTR(rval); 960 } 961 962 nvmem->id = rval; 963 964 nvmem->dev.type = &nvmem_provider_type; 965 nvmem->dev.bus = &nvmem_bus_type; 966 nvmem->dev.parent = config->dev; 967 968 device_initialize(&nvmem->dev); 969 970 if (!config->ignore_wp) 971 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", 972 GPIOD_OUT_HIGH); 973 if (IS_ERR(nvmem->wp_gpio)) { 974 rval = PTR_ERR(nvmem->wp_gpio); 975 nvmem->wp_gpio = NULL; 976 goto err_put_device; 977 } 978 979 kref_init(&nvmem->refcnt); 980 INIT_LIST_HEAD(&nvmem->cells); 981 nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; 982 983 nvmem->owner = config->owner; 984 if (!nvmem->owner && config->dev->driver) 985 nvmem->owner = config->dev->driver->owner; 986 nvmem->stride = config->stride ?: 1; 987 nvmem->word_size = config->word_size ?: 1; 988 nvmem->size = config->size; 989 nvmem->root_only = config->root_only; 990 nvmem->priv = config->priv; 991 nvmem->type = config->type; 992 nvmem->reg_read = config->reg_read; 993 nvmem->reg_write = config->reg_write; 994 nvmem->keepout = config->keepout; 995 nvmem->nkeepout = config->nkeepout; 996 if (config->of_node) 997 nvmem->dev.of_node = config->of_node; 998 else 999 nvmem->dev.of_node = config->dev->of_node; 1000 1001 switch (config->id) { 1002 case NVMEM_DEVID_NONE: 1003 rval = dev_set_name(&nvmem->dev, "%s", config->name); 1004 break; 1005 case NVMEM_DEVID_AUTO: 1006 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); 1007 break; 1008 default: 1009 rval = dev_set_name(&nvmem->dev, "%s%d", 1010 config->name ? : "nvmem", 1011 config->name ? config->id : nvmem->id); 1012 break; 1013 } 1014 1015 if (rval) 1016 goto err_put_device; 1017 1018 nvmem->read_only = device_property_present(config->dev, "read-only") || 1019 config->read_only || !nvmem->reg_write; 1020 1021 #ifdef CONFIG_NVMEM_SYSFS 1022 nvmem->dev.groups = nvmem_dev_groups; 1023 #endif 1024 1025 if (nvmem->nkeepout) { 1026 rval = nvmem_validate_keepouts(nvmem); 1027 if (rval) 1028 goto err_put_device; 1029 } 1030 1031 if (config->compat) { 1032 rval = nvmem_sysfs_setup_compat(nvmem, config); 1033 if (rval) 1034 goto err_put_device; 1035 } 1036 1037 if (config->cells) { 1038 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 1039 if (rval) 1040 goto err_remove_cells; 1041 } 1042 1043 rval = nvmem_add_cells_from_table(nvmem); 1044 if (rval) 1045 goto err_remove_cells; 1046 1047 if (config->add_legacy_fixed_of_cells) { 1048 rval = nvmem_add_cells_from_legacy_of(nvmem); 1049 if (rval) 1050 goto err_remove_cells; 1051 } 1052 1053 rval = nvmem_add_cells_from_fixed_layout(nvmem); 1054 if (rval) 1055 goto err_remove_cells; 1056 1057 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 1058 1059 rval = device_add(&nvmem->dev); 1060 if (rval) 1061 goto err_remove_cells; 1062 1063 rval = nvmem_populate_layout(nvmem); 1064 if (rval) 1065 goto err_remove_dev; 1066 1067 #ifdef CONFIG_NVMEM_SYSFS 1068 rval = nvmem_populate_sysfs_cells(nvmem); 1069 if (rval) 1070 goto err_destroy_layout; 1071 #endif 1072 1073 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 1074 1075 return nvmem; 1076 1077 #ifdef CONFIG_NVMEM_SYSFS 1078 err_destroy_layout: 1079 nvmem_destroy_layout(nvmem); 1080 #endif 1081 err_remove_dev: 1082 device_del(&nvmem->dev); 1083 err_remove_cells: 1084 nvmem_device_remove_all_cells(nvmem); 1085 if (config->compat) 1086 nvmem_sysfs_remove_compat(nvmem, config); 1087 err_put_device: 1088 put_device(&nvmem->dev); 1089 1090 return ERR_PTR(rval); 1091 } 1092 EXPORT_SYMBOL_GPL(nvmem_register); 1093 1094 static void nvmem_device_release(struct kref *kref) 1095 { 1096 struct nvmem_device *nvmem; 1097 1098 nvmem = container_of(kref, struct nvmem_device, refcnt); 1099 1100 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 1101 1102 if (nvmem->flags & FLAG_COMPAT) 1103 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 1104 1105 nvmem_device_remove_all_cells(nvmem); 1106 nvmem_destroy_layout(nvmem); 1107 device_unregister(&nvmem->dev); 1108 } 1109 1110 /** 1111 * nvmem_unregister() - Unregister previously registered nvmem device 1112 * 1113 * @nvmem: Pointer to previously registered nvmem device. 1114 */ 1115 void nvmem_unregister(struct nvmem_device *nvmem) 1116 { 1117 if (nvmem) 1118 kref_put(&nvmem->refcnt, nvmem_device_release); 1119 } 1120 EXPORT_SYMBOL_GPL(nvmem_unregister); 1121 1122 static void devm_nvmem_unregister(void *nvmem) 1123 { 1124 nvmem_unregister(nvmem); 1125 } 1126 1127 /** 1128 * devm_nvmem_register() - Register a managed nvmem device for given 1129 * nvmem_config. 1130 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 1131 * 1132 * @dev: Device that uses the nvmem device. 1133 * @config: nvmem device configuration with which nvmem device is created. 1134 * 1135 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 1136 * on success. 1137 */ 1138 struct nvmem_device *devm_nvmem_register(struct device *dev, 1139 const struct nvmem_config *config) 1140 { 1141 struct nvmem_device *nvmem; 1142 int ret; 1143 1144 nvmem = nvmem_register(config); 1145 if (IS_ERR(nvmem)) 1146 return nvmem; 1147 1148 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); 1149 if (ret) 1150 return ERR_PTR(ret); 1151 1152 return nvmem; 1153 } 1154 EXPORT_SYMBOL_GPL(devm_nvmem_register); 1155 1156 static struct nvmem_device *__nvmem_device_get(void *data, 1157 int (*match)(struct device *dev, const void *data)) 1158 { 1159 struct nvmem_device *nvmem = NULL; 1160 struct device *dev; 1161 1162 mutex_lock(&nvmem_mutex); 1163 dev = bus_find_device(&nvmem_bus_type, NULL, data, match); 1164 if (dev) 1165 nvmem = to_nvmem_device(dev); 1166 mutex_unlock(&nvmem_mutex); 1167 if (!nvmem) 1168 return ERR_PTR(-EPROBE_DEFER); 1169 1170 if (!try_module_get(nvmem->owner)) { 1171 dev_err(&nvmem->dev, 1172 "could not increase module refcount for cell %s\n", 1173 nvmem_dev_name(nvmem)); 1174 1175 put_device(&nvmem->dev); 1176 return ERR_PTR(-EINVAL); 1177 } 1178 1179 kref_get(&nvmem->refcnt); 1180 1181 return nvmem; 1182 } 1183 1184 static void __nvmem_device_put(struct nvmem_device *nvmem) 1185 { 1186 put_device(&nvmem->dev); 1187 module_put(nvmem->owner); 1188 kref_put(&nvmem->refcnt, nvmem_device_release); 1189 } 1190 1191 #if IS_ENABLED(CONFIG_OF) 1192 /** 1193 * of_nvmem_device_get() - Get nvmem device from a given id 1194 * 1195 * @np: Device tree node that uses the nvmem device. 1196 * @id: nvmem name from nvmem-names property. 1197 * 1198 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1199 * on success. 1200 */ 1201 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 1202 { 1203 1204 struct device_node *nvmem_np; 1205 struct nvmem_device *nvmem; 1206 int index = 0; 1207 1208 if (id) 1209 index = of_property_match_string(np, "nvmem-names", id); 1210 1211 nvmem_np = of_parse_phandle(np, "nvmem", index); 1212 if (!nvmem_np) 1213 return ERR_PTR(-ENOENT); 1214 1215 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1216 of_node_put(nvmem_np); 1217 return nvmem; 1218 } 1219 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 1220 #endif 1221 1222 /** 1223 * nvmem_device_get() - Get nvmem device from a given id 1224 * 1225 * @dev: Device that uses the nvmem device. 1226 * @dev_name: name of the requested nvmem device. 1227 * 1228 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1229 * on success. 1230 */ 1231 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 1232 { 1233 if (dev->of_node) { /* try dt first */ 1234 struct nvmem_device *nvmem; 1235 1236 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 1237 1238 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 1239 return nvmem; 1240 1241 } 1242 1243 return __nvmem_device_get((void *)dev_name, device_match_name); 1244 } 1245 EXPORT_SYMBOL_GPL(nvmem_device_get); 1246 1247 /** 1248 * nvmem_device_find() - Find nvmem device with matching function 1249 * 1250 * @data: Data to pass to match function 1251 * @match: Callback function to check device 1252 * 1253 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1254 * on success. 1255 */ 1256 struct nvmem_device *nvmem_device_find(void *data, 1257 int (*match)(struct device *dev, const void *data)) 1258 { 1259 return __nvmem_device_get(data, match); 1260 } 1261 EXPORT_SYMBOL_GPL(nvmem_device_find); 1262 1263 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 1264 { 1265 struct nvmem_device **nvmem = res; 1266 1267 if (WARN_ON(!nvmem || !*nvmem)) 1268 return 0; 1269 1270 return *nvmem == data; 1271 } 1272 1273 static void devm_nvmem_device_release(struct device *dev, void *res) 1274 { 1275 nvmem_device_put(*(struct nvmem_device **)res); 1276 } 1277 1278 /** 1279 * devm_nvmem_device_put() - put already got nvmem device 1280 * 1281 * @dev: Device that uses the nvmem device. 1282 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 1283 * that needs to be released. 1284 */ 1285 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 1286 { 1287 int ret; 1288 1289 ret = devres_release(dev, devm_nvmem_device_release, 1290 devm_nvmem_device_match, nvmem); 1291 1292 WARN_ON(ret); 1293 } 1294 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 1295 1296 /** 1297 * nvmem_device_put() - put already got nvmem device 1298 * 1299 * @nvmem: pointer to nvmem device that needs to be released. 1300 */ 1301 void nvmem_device_put(struct nvmem_device *nvmem) 1302 { 1303 __nvmem_device_put(nvmem); 1304 } 1305 EXPORT_SYMBOL_GPL(nvmem_device_put); 1306 1307 /** 1308 * devm_nvmem_device_get() - Get nvmem device of device form a given id 1309 * 1310 * @dev: Device that requests the nvmem device. 1311 * @id: name id for the requested nvmem device. 1312 * 1313 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 1314 * on success. The nvmem_device will be freed by the automatically once the 1315 * device is freed. 1316 */ 1317 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 1318 { 1319 struct nvmem_device **ptr, *nvmem; 1320 1321 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 1322 if (!ptr) 1323 return ERR_PTR(-ENOMEM); 1324 1325 nvmem = nvmem_device_get(dev, id); 1326 if (!IS_ERR(nvmem)) { 1327 *ptr = nvmem; 1328 devres_add(dev, ptr); 1329 } else { 1330 devres_free(ptr); 1331 } 1332 1333 return nvmem; 1334 } 1335 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 1336 1337 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, 1338 const char *id, int index) 1339 { 1340 struct nvmem_cell *cell; 1341 const char *name = NULL; 1342 1343 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 1344 if (!cell) 1345 return ERR_PTR(-ENOMEM); 1346 1347 if (id) { 1348 name = kstrdup_const(id, GFP_KERNEL); 1349 if (!name) { 1350 kfree(cell); 1351 return ERR_PTR(-ENOMEM); 1352 } 1353 } 1354 1355 cell->id = name; 1356 cell->entry = entry; 1357 cell->index = index; 1358 1359 return cell; 1360 } 1361 1362 static struct nvmem_cell * 1363 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 1364 { 1365 struct nvmem_cell_entry *cell_entry; 1366 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 1367 struct nvmem_cell_lookup *lookup; 1368 struct nvmem_device *nvmem; 1369 const char *dev_id; 1370 1371 if (!dev) 1372 return ERR_PTR(-EINVAL); 1373 1374 dev_id = dev_name(dev); 1375 1376 mutex_lock(&nvmem_lookup_mutex); 1377 1378 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 1379 if ((strcmp(lookup->dev_id, dev_id) == 0) && 1380 (strcmp(lookup->con_id, con_id) == 0)) { 1381 /* This is the right entry. */ 1382 nvmem = __nvmem_device_get((void *)lookup->nvmem_name, 1383 device_match_name); 1384 if (IS_ERR(nvmem)) { 1385 /* Provider may not be registered yet. */ 1386 cell = ERR_CAST(nvmem); 1387 break; 1388 } 1389 1390 cell_entry = nvmem_find_cell_entry_by_name(nvmem, 1391 lookup->cell_name); 1392 if (!cell_entry) { 1393 __nvmem_device_put(nvmem); 1394 cell = ERR_PTR(-ENOENT); 1395 } else { 1396 cell = nvmem_create_cell(cell_entry, con_id, 0); 1397 if (IS_ERR(cell)) 1398 __nvmem_device_put(nvmem); 1399 } 1400 break; 1401 } 1402 } 1403 1404 mutex_unlock(&nvmem_lookup_mutex); 1405 return cell; 1406 } 1407 1408 static void nvmem_layout_module_put(struct nvmem_device *nvmem) 1409 { 1410 if (nvmem->layout && nvmem->layout->dev.driver) 1411 module_put(nvmem->layout->dev.driver->owner); 1412 } 1413 1414 #if IS_ENABLED(CONFIG_OF) 1415 static struct nvmem_cell_entry * 1416 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) 1417 { 1418 struct nvmem_cell_entry *iter, *cell = NULL; 1419 1420 mutex_lock(&nvmem_mutex); 1421 list_for_each_entry(iter, &nvmem->cells, node) { 1422 if (np == iter->np) { 1423 cell = iter; 1424 break; 1425 } 1426 } 1427 mutex_unlock(&nvmem_mutex); 1428 1429 return cell; 1430 } 1431 1432 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) 1433 { 1434 if (!nvmem->layout) 1435 return 0; 1436 1437 if (!nvmem->layout->dev.driver || 1438 !try_module_get(nvmem->layout->dev.driver->owner)) 1439 return -EPROBE_DEFER; 1440 1441 return 0; 1442 } 1443 1444 /** 1445 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1446 * 1447 * @np: Device tree node that uses the nvmem cell. 1448 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1449 * for the cell at index 0 (the lone cell with no accompanying 1450 * nvmem-cell-names property). 1451 * 1452 * Return: Will be an ERR_PTR() on error or a valid pointer 1453 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1454 * nvmem_cell_put(). 1455 */ 1456 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1457 { 1458 struct device_node *cell_np, *nvmem_np; 1459 struct nvmem_device *nvmem; 1460 struct nvmem_cell_entry *cell_entry; 1461 struct nvmem_cell *cell; 1462 struct of_phandle_args cell_spec; 1463 int index = 0; 1464 int cell_index = 0; 1465 int ret; 1466 1467 /* if cell name exists, find index to the name */ 1468 if (id) 1469 index = of_property_match_string(np, "nvmem-cell-names", id); 1470 1471 ret = of_parse_phandle_with_optional_args(np, "nvmem-cells", 1472 "#nvmem-cell-cells", 1473 index, &cell_spec); 1474 if (ret) 1475 return ERR_PTR(-ENOENT); 1476 1477 if (cell_spec.args_count > 1) 1478 return ERR_PTR(-EINVAL); 1479 1480 cell_np = cell_spec.np; 1481 if (cell_spec.args_count) 1482 cell_index = cell_spec.args[0]; 1483 1484 nvmem_np = of_get_parent(cell_np); 1485 if (!nvmem_np) { 1486 of_node_put(cell_np); 1487 return ERR_PTR(-EINVAL); 1488 } 1489 1490 /* nvmem layouts produce cells within the nvmem-layout container */ 1491 if (of_node_name_eq(nvmem_np, "nvmem-layout")) { 1492 nvmem_np = of_get_next_parent(nvmem_np); 1493 if (!nvmem_np) { 1494 of_node_put(cell_np); 1495 return ERR_PTR(-EINVAL); 1496 } 1497 } 1498 1499 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node); 1500 of_node_put(nvmem_np); 1501 if (IS_ERR(nvmem)) { 1502 of_node_put(cell_np); 1503 return ERR_CAST(nvmem); 1504 } 1505 1506 ret = nvmem_layout_module_get_optional(nvmem); 1507 if (ret) { 1508 of_node_put(cell_np); 1509 __nvmem_device_put(nvmem); 1510 return ERR_PTR(ret); 1511 } 1512 1513 cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); 1514 of_node_put(cell_np); 1515 if (!cell_entry) { 1516 __nvmem_device_put(nvmem); 1517 nvmem_layout_module_put(nvmem); 1518 if (nvmem->layout) 1519 return ERR_PTR(-EPROBE_DEFER); 1520 else 1521 return ERR_PTR(-ENOENT); 1522 } 1523 1524 cell = nvmem_create_cell(cell_entry, id, cell_index); 1525 if (IS_ERR(cell)) { 1526 __nvmem_device_put(nvmem); 1527 nvmem_layout_module_put(nvmem); 1528 } 1529 1530 return cell; 1531 } 1532 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1533 #endif 1534 1535 /** 1536 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1537 * 1538 * @dev: Device that requests the nvmem cell. 1539 * @id: nvmem cell name to get (this corresponds with the name from the 1540 * nvmem-cell-names property for DT systems and with the con_id from 1541 * the lookup entry for non-DT systems). 1542 * 1543 * Return: Will be an ERR_PTR() on error or a valid pointer 1544 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1545 * nvmem_cell_put(). 1546 */ 1547 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1548 { 1549 struct nvmem_cell *cell; 1550 1551 if (dev->of_node) { /* try dt first */ 1552 cell = of_nvmem_cell_get(dev->of_node, id); 1553 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1554 return cell; 1555 } 1556 1557 /* NULL cell id only allowed for device tree; invalid otherwise */ 1558 if (!id) 1559 return ERR_PTR(-EINVAL); 1560 1561 return nvmem_cell_get_from_lookup(dev, id); 1562 } 1563 EXPORT_SYMBOL_GPL(nvmem_cell_get); 1564 1565 static void devm_nvmem_cell_release(struct device *dev, void *res) 1566 { 1567 nvmem_cell_put(*(struct nvmem_cell **)res); 1568 } 1569 1570 /** 1571 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1572 * 1573 * @dev: Device that requests the nvmem cell. 1574 * @id: nvmem cell name id to get. 1575 * 1576 * Return: Will be an ERR_PTR() on error or a valid pointer 1577 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1578 * automatically once the device is freed. 1579 */ 1580 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1581 { 1582 struct nvmem_cell **ptr, *cell; 1583 1584 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1585 if (!ptr) 1586 return ERR_PTR(-ENOMEM); 1587 1588 cell = nvmem_cell_get(dev, id); 1589 if (!IS_ERR(cell)) { 1590 *ptr = cell; 1591 devres_add(dev, ptr); 1592 } else { 1593 devres_free(ptr); 1594 } 1595 1596 return cell; 1597 } 1598 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1599 1600 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1601 { 1602 struct nvmem_cell **c = res; 1603 1604 if (WARN_ON(!c || !*c)) 1605 return 0; 1606 1607 return *c == data; 1608 } 1609 1610 /** 1611 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1612 * from devm_nvmem_cell_get. 1613 * 1614 * @dev: Device that requests the nvmem cell. 1615 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1616 */ 1617 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1618 { 1619 int ret; 1620 1621 ret = devres_release(dev, devm_nvmem_cell_release, 1622 devm_nvmem_cell_match, cell); 1623 1624 WARN_ON(ret); 1625 } 1626 EXPORT_SYMBOL(devm_nvmem_cell_put); 1627 1628 /** 1629 * nvmem_cell_put() - Release previously allocated nvmem cell. 1630 * 1631 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1632 */ 1633 void nvmem_cell_put(struct nvmem_cell *cell) 1634 { 1635 struct nvmem_device *nvmem = cell->entry->nvmem; 1636 1637 if (cell->id) 1638 kfree_const(cell->id); 1639 1640 kfree(cell); 1641 __nvmem_device_put(nvmem); 1642 nvmem_layout_module_put(nvmem); 1643 } 1644 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1645 1646 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) 1647 { 1648 u8 *p, *b; 1649 int i, extra, bytes_offset; 1650 int bit_offset = cell->bit_offset; 1651 1652 p = b = buf; 1653 1654 bytes_offset = bit_offset / BITS_PER_BYTE; 1655 b += bytes_offset; 1656 bit_offset %= BITS_PER_BYTE; 1657 1658 if (bit_offset % BITS_PER_BYTE) { 1659 /* First shift */ 1660 *p = *b++ >> bit_offset; 1661 1662 /* setup rest of the bytes if any */ 1663 for (i = 1; i < cell->bytes; i++) { 1664 /* Get bits from next byte and shift them towards msb */ 1665 *p++ |= *b << (BITS_PER_BYTE - bit_offset); 1666 1667 *p = *b++ >> bit_offset; 1668 } 1669 } else if (p != b) { 1670 memmove(p, b, cell->bytes - bytes_offset); 1671 p += cell->bytes - 1; 1672 } else { 1673 /* point to the msb */ 1674 p += cell->bytes - 1; 1675 } 1676 1677 /* result fits in less bytes */ 1678 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); 1679 while (--extra >= 0) 1680 *p-- = 0; 1681 1682 /* clear msb bits if any leftover in the last byte */ 1683 if (cell->nbits % BITS_PER_BYTE) 1684 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); 1685 } 1686 1687 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1688 struct nvmem_cell_entry *cell, 1689 void *buf, size_t *len, const char *id, int index) 1690 { 1691 int rc; 1692 1693 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len); 1694 1695 if (rc) 1696 return rc; 1697 1698 /* shift bits in-place */ 1699 if (cell->bit_offset || cell->nbits) 1700 nvmem_shift_read_buffer_in_place(cell, buf); 1701 1702 if (cell->read_post_process) { 1703 rc = cell->read_post_process(cell->priv, id, index, 1704 cell->offset, buf, cell->raw_len); 1705 if (rc) 1706 return rc; 1707 } 1708 1709 if (len) 1710 *len = cell->bytes; 1711 1712 return 0; 1713 } 1714 1715 /** 1716 * nvmem_cell_read() - Read a given nvmem cell 1717 * 1718 * @cell: nvmem cell to be read. 1719 * @len: pointer to length of cell which will be populated on successful read; 1720 * can be NULL. 1721 * 1722 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1723 * buffer should be freed by the consumer with a kfree(). 1724 */ 1725 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1726 { 1727 struct nvmem_cell_entry *entry = cell->entry; 1728 struct nvmem_device *nvmem = entry->nvmem; 1729 u8 *buf; 1730 int rc; 1731 1732 if (!nvmem) 1733 return ERR_PTR(-EINVAL); 1734 1735 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); 1736 if (!buf) 1737 return ERR_PTR(-ENOMEM); 1738 1739 rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index); 1740 if (rc) { 1741 kfree(buf); 1742 return ERR_PTR(rc); 1743 } 1744 1745 return buf; 1746 } 1747 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1748 1749 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, 1750 u8 *_buf, int len) 1751 { 1752 struct nvmem_device *nvmem = cell->nvmem; 1753 int i, rc, nbits, bit_offset = cell->bit_offset; 1754 u8 v, *p, *buf, *b, pbyte, pbits; 1755 1756 nbits = cell->nbits; 1757 buf = kzalloc(cell->bytes, GFP_KERNEL); 1758 if (!buf) 1759 return ERR_PTR(-ENOMEM); 1760 1761 memcpy(buf, _buf, len); 1762 p = b = buf; 1763 1764 if (bit_offset) { 1765 pbyte = *b; 1766 *b <<= bit_offset; 1767 1768 /* setup the first byte with lsb bits from nvmem */ 1769 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1770 if (rc) 1771 goto err; 1772 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1773 1774 /* setup rest of the byte if any */ 1775 for (i = 1; i < cell->bytes; i++) { 1776 /* Get last byte bits and shift them towards lsb */ 1777 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1778 pbyte = *b; 1779 p = b; 1780 *b <<= bit_offset; 1781 *b++ |= pbits; 1782 } 1783 } 1784 1785 /* if it's not end on byte boundary */ 1786 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1787 /* setup the last byte with msb bits from nvmem */ 1788 rc = nvmem_reg_read(nvmem, 1789 cell->offset + cell->bytes - 1, &v, 1); 1790 if (rc) 1791 goto err; 1792 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1793 1794 } 1795 1796 return buf; 1797 err: 1798 kfree(buf); 1799 return ERR_PTR(rc); 1800 } 1801 1802 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) 1803 { 1804 struct nvmem_device *nvmem = cell->nvmem; 1805 int rc; 1806 1807 if (!nvmem || nvmem->read_only || 1808 (cell->bit_offset == 0 && len != cell->bytes)) 1809 return -EINVAL; 1810 1811 /* 1812 * Any cells which have a read_post_process hook are read-only because 1813 * we cannot reverse the operation and it might affect other cells, 1814 * too. 1815 */ 1816 if (cell->read_post_process) 1817 return -EINVAL; 1818 1819 if (cell->bit_offset || cell->nbits) { 1820 if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes) 1821 return -EINVAL; 1822 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1823 if (IS_ERR(buf)) 1824 return PTR_ERR(buf); 1825 } 1826 1827 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1828 1829 /* free the tmp buffer */ 1830 if (cell->bit_offset || cell->nbits) 1831 kfree(buf); 1832 1833 if (rc) 1834 return rc; 1835 1836 return len; 1837 } 1838 1839 /** 1840 * nvmem_cell_write() - Write to a given nvmem cell 1841 * 1842 * @cell: nvmem cell to be written. 1843 * @buf: Buffer to be written. 1844 * @len: length of buffer to be written to nvmem cell. 1845 * 1846 * Return: length of bytes written or negative on failure. 1847 */ 1848 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1849 { 1850 return __nvmem_cell_entry_write(cell->entry, buf, len); 1851 } 1852 1853 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1854 1855 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, 1856 void *val, size_t count) 1857 { 1858 struct nvmem_cell *cell; 1859 void *buf; 1860 size_t len; 1861 1862 cell = nvmem_cell_get(dev, cell_id); 1863 if (IS_ERR(cell)) 1864 return PTR_ERR(cell); 1865 1866 buf = nvmem_cell_read(cell, &len); 1867 if (IS_ERR(buf)) { 1868 nvmem_cell_put(cell); 1869 return PTR_ERR(buf); 1870 } 1871 if (len != count) { 1872 kfree(buf); 1873 nvmem_cell_put(cell); 1874 return -EINVAL; 1875 } 1876 memcpy(val, buf, count); 1877 kfree(buf); 1878 nvmem_cell_put(cell); 1879 1880 return 0; 1881 } 1882 1883 /** 1884 * nvmem_cell_read_u8() - Read a cell value as a u8 1885 * 1886 * @dev: Device that requests the nvmem cell. 1887 * @cell_id: Name of nvmem cell to read. 1888 * @val: pointer to output value. 1889 * 1890 * Return: 0 on success or negative errno. 1891 */ 1892 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) 1893 { 1894 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1895 } 1896 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); 1897 1898 /** 1899 * nvmem_cell_read_u16() - Read a cell value as a u16 1900 * 1901 * @dev: Device that requests the nvmem cell. 1902 * @cell_id: Name of nvmem cell to read. 1903 * @val: pointer to output value. 1904 * 1905 * Return: 0 on success or negative errno. 1906 */ 1907 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) 1908 { 1909 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1910 } 1911 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); 1912 1913 /** 1914 * nvmem_cell_read_u32() - Read a cell value as a u32 1915 * 1916 * @dev: Device that requests the nvmem cell. 1917 * @cell_id: Name of nvmem cell to read. 1918 * @val: pointer to output value. 1919 * 1920 * Return: 0 on success or negative errno. 1921 */ 1922 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1923 { 1924 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1925 } 1926 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1927 1928 /** 1929 * nvmem_cell_read_u64() - Read a cell value as a u64 1930 * 1931 * @dev: Device that requests the nvmem cell. 1932 * @cell_id: Name of nvmem cell to read. 1933 * @val: pointer to output value. 1934 * 1935 * Return: 0 on success or negative errno. 1936 */ 1937 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) 1938 { 1939 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val)); 1940 } 1941 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); 1942 1943 static const void *nvmem_cell_read_variable_common(struct device *dev, 1944 const char *cell_id, 1945 size_t max_len, size_t *len) 1946 { 1947 struct nvmem_cell *cell; 1948 int nbits; 1949 void *buf; 1950 1951 cell = nvmem_cell_get(dev, cell_id); 1952 if (IS_ERR(cell)) 1953 return cell; 1954 1955 nbits = cell->entry->nbits; 1956 buf = nvmem_cell_read(cell, len); 1957 nvmem_cell_put(cell); 1958 if (IS_ERR(buf)) 1959 return buf; 1960 1961 /* 1962 * If nbits is set then nvmem_cell_read() can significantly exaggerate 1963 * the length of the real data. Throw away the extra junk. 1964 */ 1965 if (nbits) 1966 *len = DIV_ROUND_UP(nbits, 8); 1967 1968 if (*len > max_len) { 1969 kfree(buf); 1970 return ERR_PTR(-ERANGE); 1971 } 1972 1973 return buf; 1974 } 1975 1976 /** 1977 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. 1978 * 1979 * @dev: Device that requests the nvmem cell. 1980 * @cell_id: Name of nvmem cell to read. 1981 * @val: pointer to output value. 1982 * 1983 * Return: 0 on success or negative errno. 1984 */ 1985 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, 1986 u32 *val) 1987 { 1988 size_t len; 1989 const u8 *buf; 1990 int i; 1991 1992 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 1993 if (IS_ERR(buf)) 1994 return PTR_ERR(buf); 1995 1996 /* Copy w/ implicit endian conversion */ 1997 *val = 0; 1998 for (i = 0; i < len; i++) 1999 *val |= buf[i] << (8 * i); 2000 2001 kfree(buf); 2002 2003 return 0; 2004 } 2005 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); 2006 2007 /** 2008 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. 2009 * 2010 * @dev: Device that requests the nvmem cell. 2011 * @cell_id: Name of nvmem cell to read. 2012 * @val: pointer to output value. 2013 * 2014 * Return: 0 on success or negative errno. 2015 */ 2016 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, 2017 u64 *val) 2018 { 2019 size_t len; 2020 const u8 *buf; 2021 int i; 2022 2023 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len); 2024 if (IS_ERR(buf)) 2025 return PTR_ERR(buf); 2026 2027 /* Copy w/ implicit endian conversion */ 2028 *val = 0; 2029 for (i = 0; i < len; i++) 2030 *val |= (uint64_t)buf[i] << (8 * i); 2031 2032 kfree(buf); 2033 2034 return 0; 2035 } 2036 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); 2037 2038 /** 2039 * nvmem_device_cell_read() - Read a given nvmem device and cell 2040 * 2041 * @nvmem: nvmem device to read from. 2042 * @info: nvmem cell info to be read. 2043 * @buf: buffer pointer which will be populated on successful read. 2044 * 2045 * Return: length of successful bytes read on success and negative 2046 * error code on error. 2047 */ 2048 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 2049 struct nvmem_cell_info *info, void *buf) 2050 { 2051 struct nvmem_cell_entry cell; 2052 int rc; 2053 ssize_t len; 2054 2055 if (!nvmem) 2056 return -EINVAL; 2057 2058 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2059 if (rc) 2060 return rc; 2061 2062 rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0); 2063 if (rc) 2064 return rc; 2065 2066 return len; 2067 } 2068 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 2069 2070 /** 2071 * nvmem_device_cell_write() - Write cell to a given nvmem device 2072 * 2073 * @nvmem: nvmem device to be written to. 2074 * @info: nvmem cell info to be written. 2075 * @buf: buffer to be written to cell. 2076 * 2077 * Return: length of bytes written or negative error code on failure. 2078 */ 2079 int nvmem_device_cell_write(struct nvmem_device *nvmem, 2080 struct nvmem_cell_info *info, void *buf) 2081 { 2082 struct nvmem_cell_entry cell; 2083 int rc; 2084 2085 if (!nvmem) 2086 return -EINVAL; 2087 2088 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell); 2089 if (rc) 2090 return rc; 2091 2092 return __nvmem_cell_entry_write(&cell, buf, cell.bytes); 2093 } 2094 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 2095 2096 /** 2097 * nvmem_device_read() - Read from a given nvmem device 2098 * 2099 * @nvmem: nvmem device to read from. 2100 * @offset: offset in nvmem device. 2101 * @bytes: number of bytes to read. 2102 * @buf: buffer pointer which will be populated on successful read. 2103 * 2104 * Return: length of successful bytes read on success and negative 2105 * error code on error. 2106 */ 2107 int nvmem_device_read(struct nvmem_device *nvmem, 2108 unsigned int offset, 2109 size_t bytes, void *buf) 2110 { 2111 int rc; 2112 2113 if (!nvmem) 2114 return -EINVAL; 2115 2116 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 2117 2118 if (rc) 2119 return rc; 2120 2121 return bytes; 2122 } 2123 EXPORT_SYMBOL_GPL(nvmem_device_read); 2124 2125 /** 2126 * nvmem_device_write() - Write cell to a given nvmem device 2127 * 2128 * @nvmem: nvmem device to be written to. 2129 * @offset: offset in nvmem device. 2130 * @bytes: number of bytes to write. 2131 * @buf: buffer to be written. 2132 * 2133 * Return: length of bytes written or negative error code on failure. 2134 */ 2135 int nvmem_device_write(struct nvmem_device *nvmem, 2136 unsigned int offset, 2137 size_t bytes, void *buf) 2138 { 2139 int rc; 2140 2141 if (!nvmem) 2142 return -EINVAL; 2143 2144 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 2145 2146 if (rc) 2147 return rc; 2148 2149 2150 return bytes; 2151 } 2152 EXPORT_SYMBOL_GPL(nvmem_device_write); 2153 2154 /** 2155 * nvmem_add_cell_table() - register a table of cell info entries 2156 * 2157 * @table: table of cell info entries 2158 */ 2159 void nvmem_add_cell_table(struct nvmem_cell_table *table) 2160 { 2161 mutex_lock(&nvmem_cell_mutex); 2162 list_add_tail(&table->node, &nvmem_cell_tables); 2163 mutex_unlock(&nvmem_cell_mutex); 2164 } 2165 EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 2166 2167 /** 2168 * nvmem_del_cell_table() - remove a previously registered cell info table 2169 * 2170 * @table: table of cell info entries 2171 */ 2172 void nvmem_del_cell_table(struct nvmem_cell_table *table) 2173 { 2174 mutex_lock(&nvmem_cell_mutex); 2175 list_del(&table->node); 2176 mutex_unlock(&nvmem_cell_mutex); 2177 } 2178 EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 2179 2180 /** 2181 * nvmem_add_cell_lookups() - register a list of cell lookup entries 2182 * 2183 * @entries: array of cell lookup entries 2184 * @nentries: number of cell lookup entries in the array 2185 */ 2186 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2187 { 2188 int i; 2189 2190 mutex_lock(&nvmem_lookup_mutex); 2191 for (i = 0; i < nentries; i++) 2192 list_add_tail(&entries[i].node, &nvmem_lookup_list); 2193 mutex_unlock(&nvmem_lookup_mutex); 2194 } 2195 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 2196 2197 /** 2198 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 2199 * entries 2200 * 2201 * @entries: array of cell lookup entries 2202 * @nentries: number of cell lookup entries in the array 2203 */ 2204 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 2205 { 2206 int i; 2207 2208 mutex_lock(&nvmem_lookup_mutex); 2209 for (i = 0; i < nentries; i++) 2210 list_del(&entries[i].node); 2211 mutex_unlock(&nvmem_lookup_mutex); 2212 } 2213 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 2214 2215 /** 2216 * nvmem_dev_name() - Get the name of a given nvmem device. 2217 * 2218 * @nvmem: nvmem device. 2219 * 2220 * Return: name of the nvmem device. 2221 */ 2222 const char *nvmem_dev_name(struct nvmem_device *nvmem) 2223 { 2224 return dev_name(&nvmem->dev); 2225 } 2226 EXPORT_SYMBOL_GPL(nvmem_dev_name); 2227 2228 /** 2229 * nvmem_dev_size() - Get the size of a given nvmem device. 2230 * 2231 * @nvmem: nvmem device. 2232 * 2233 * Return: size of the nvmem device. 2234 */ 2235 size_t nvmem_dev_size(struct nvmem_device *nvmem) 2236 { 2237 return nvmem->size; 2238 } 2239 EXPORT_SYMBOL_GPL(nvmem_dev_size); 2240 2241 static int __init nvmem_init(void) 2242 { 2243 int ret; 2244 2245 ret = bus_register(&nvmem_bus_type); 2246 if (ret) 2247 return ret; 2248 2249 ret = nvmem_layout_bus_register(); 2250 if (ret) 2251 bus_unregister(&nvmem_bus_type); 2252 2253 return ret; 2254 } 2255 2256 static void __exit nvmem_exit(void) 2257 { 2258 nvmem_layout_bus_unregister(); 2259 bus_unregister(&nvmem_bus_type); 2260 } 2261 2262 subsys_initcall(nvmem_init); 2263 module_exit(nvmem_exit); 2264 2265 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 2266 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 2267 MODULE_DESCRIPTION("nvmem Driver Core"); 2268