1 /* 2 * nvmem framework core. 3 * 4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/fs.h> 20 #include <linux/idr.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/nvmem-consumer.h> 24 #include <linux/nvmem-provider.h> 25 #include <linux/of.h> 26 #include <linux/slab.h> 27 28 struct nvmem_device { 29 const char *name; 30 struct module *owner; 31 struct device dev; 32 int stride; 33 int word_size; 34 int ncells; 35 int id; 36 int users; 37 size_t size; 38 bool read_only; 39 int flags; 40 struct bin_attribute eeprom; 41 struct device *base_dev; 42 nvmem_reg_read_t reg_read; 43 nvmem_reg_write_t reg_write; 44 void *priv; 45 }; 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct nvmem_device *nvmem; 56 struct list_head node; 57 }; 58 59 static DEFINE_MUTEX(nvmem_mutex); 60 static DEFINE_IDA(nvmem_ida); 61 62 static LIST_HEAD(nvmem_cells); 63 static DEFINE_MUTEX(nvmem_cells_mutex); 64 65 #ifdef CONFIG_DEBUG_LOCK_ALLOC 66 static struct lock_class_key eeprom_lock_key; 67 #endif 68 69 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 70 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 71 void *val, size_t bytes) 72 { 73 if (nvmem->reg_read) 74 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 75 76 return -EINVAL; 77 } 78 79 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 80 void *val, size_t bytes) 81 { 82 if (nvmem->reg_write) 83 return nvmem->reg_write(nvmem->priv, offset, val, bytes); 84 85 return -EINVAL; 86 } 87 88 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 89 struct bin_attribute *attr, 90 char *buf, loff_t pos, size_t count) 91 { 92 struct device *dev; 93 struct nvmem_device *nvmem; 94 int rc; 95 96 if (attr->private) 97 dev = attr->private; 98 else 99 dev = container_of(kobj, struct device, kobj); 100 nvmem = to_nvmem_device(dev); 101 102 /* Stop the user from reading */ 103 if (pos >= nvmem->size) 104 return 0; 105 106 if (count < nvmem->word_size) 107 return -EINVAL; 108 109 if (pos + count > nvmem->size) 110 count = nvmem->size - pos; 111 112 count = round_down(count, nvmem->word_size); 113 114 rc = nvmem_reg_read(nvmem, pos, buf, count); 115 116 if (rc) 117 return rc; 118 119 return count; 120 } 121 122 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 123 struct bin_attribute *attr, 124 char *buf, loff_t pos, size_t count) 125 { 126 struct device *dev; 127 struct nvmem_device *nvmem; 128 int rc; 129 130 if (attr->private) 131 dev = attr->private; 132 else 133 dev = container_of(kobj, struct device, kobj); 134 nvmem = to_nvmem_device(dev); 135 136 /* Stop the user from writing */ 137 if (pos >= nvmem->size) 138 return -EFBIG; 139 140 if (count < nvmem->word_size) 141 return -EINVAL; 142 143 if (pos + count > nvmem->size) 144 count = nvmem->size - pos; 145 146 count = round_down(count, nvmem->word_size); 147 148 rc = nvmem_reg_write(nvmem, pos, buf, count); 149 150 if (rc) 151 return rc; 152 153 return count; 154 } 155 156 /* default read/write permissions */ 157 static struct bin_attribute bin_attr_rw_nvmem = { 158 .attr = { 159 .name = "nvmem", 160 .mode = S_IWUSR | S_IRUGO, 161 }, 162 .read = bin_attr_nvmem_read, 163 .write = bin_attr_nvmem_write, 164 }; 165 166 static struct bin_attribute *nvmem_bin_rw_attributes[] = { 167 &bin_attr_rw_nvmem, 168 NULL, 169 }; 170 171 static const struct attribute_group nvmem_bin_rw_group = { 172 .bin_attrs = nvmem_bin_rw_attributes, 173 }; 174 175 static const struct attribute_group *nvmem_rw_dev_groups[] = { 176 &nvmem_bin_rw_group, 177 NULL, 178 }; 179 180 /* read only permission */ 181 static struct bin_attribute bin_attr_ro_nvmem = { 182 .attr = { 183 .name = "nvmem", 184 .mode = S_IRUGO, 185 }, 186 .read = bin_attr_nvmem_read, 187 }; 188 189 static struct bin_attribute *nvmem_bin_ro_attributes[] = { 190 &bin_attr_ro_nvmem, 191 NULL, 192 }; 193 194 static const struct attribute_group nvmem_bin_ro_group = { 195 .bin_attrs = nvmem_bin_ro_attributes, 196 }; 197 198 static const struct attribute_group *nvmem_ro_dev_groups[] = { 199 &nvmem_bin_ro_group, 200 NULL, 201 }; 202 203 /* default read/write permissions, root only */ 204 static struct bin_attribute bin_attr_rw_root_nvmem = { 205 .attr = { 206 .name = "nvmem", 207 .mode = S_IWUSR | S_IRUSR, 208 }, 209 .read = bin_attr_nvmem_read, 210 .write = bin_attr_nvmem_write, 211 }; 212 213 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { 214 &bin_attr_rw_root_nvmem, 215 NULL, 216 }; 217 218 static const struct attribute_group nvmem_bin_rw_root_group = { 219 .bin_attrs = nvmem_bin_rw_root_attributes, 220 }; 221 222 static const struct attribute_group *nvmem_rw_root_dev_groups[] = { 223 &nvmem_bin_rw_root_group, 224 NULL, 225 }; 226 227 /* read only permission, root only */ 228 static struct bin_attribute bin_attr_ro_root_nvmem = { 229 .attr = { 230 .name = "nvmem", 231 .mode = S_IRUSR, 232 }, 233 .read = bin_attr_nvmem_read, 234 }; 235 236 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { 237 &bin_attr_ro_root_nvmem, 238 NULL, 239 }; 240 241 static const struct attribute_group nvmem_bin_ro_root_group = { 242 .bin_attrs = nvmem_bin_ro_root_attributes, 243 }; 244 245 static const struct attribute_group *nvmem_ro_root_dev_groups[] = { 246 &nvmem_bin_ro_root_group, 247 NULL, 248 }; 249 250 static void nvmem_release(struct device *dev) 251 { 252 struct nvmem_device *nvmem = to_nvmem_device(dev); 253 254 ida_simple_remove(&nvmem_ida, nvmem->id); 255 kfree(nvmem); 256 } 257 258 static const struct device_type nvmem_provider_type = { 259 .release = nvmem_release, 260 }; 261 262 static struct bus_type nvmem_bus_type = { 263 .name = "nvmem", 264 }; 265 266 static int of_nvmem_match(struct device *dev, void *nvmem_np) 267 { 268 return dev->of_node == nvmem_np; 269 } 270 271 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) 272 { 273 struct device *d; 274 275 if (!nvmem_np) 276 return NULL; 277 278 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); 279 280 if (!d) 281 return NULL; 282 283 return to_nvmem_device(d); 284 } 285 286 static struct nvmem_cell *nvmem_find_cell(const char *cell_id) 287 { 288 struct nvmem_cell *p; 289 290 mutex_lock(&nvmem_cells_mutex); 291 292 list_for_each_entry(p, &nvmem_cells, node) 293 if (!strcmp(p->name, cell_id)) { 294 mutex_unlock(&nvmem_cells_mutex); 295 return p; 296 } 297 298 mutex_unlock(&nvmem_cells_mutex); 299 300 return NULL; 301 } 302 303 static void nvmem_cell_drop(struct nvmem_cell *cell) 304 { 305 mutex_lock(&nvmem_cells_mutex); 306 list_del(&cell->node); 307 mutex_unlock(&nvmem_cells_mutex); 308 kfree(cell); 309 } 310 311 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 312 { 313 struct nvmem_cell *cell; 314 struct list_head *p, *n; 315 316 list_for_each_safe(p, n, &nvmem_cells) { 317 cell = list_entry(p, struct nvmem_cell, node); 318 if (cell->nvmem == nvmem) 319 nvmem_cell_drop(cell); 320 } 321 } 322 323 static void nvmem_cell_add(struct nvmem_cell *cell) 324 { 325 mutex_lock(&nvmem_cells_mutex); 326 list_add_tail(&cell->node, &nvmem_cells); 327 mutex_unlock(&nvmem_cells_mutex); 328 } 329 330 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 331 const struct nvmem_cell_info *info, 332 struct nvmem_cell *cell) 333 { 334 cell->nvmem = nvmem; 335 cell->offset = info->offset; 336 cell->bytes = info->bytes; 337 cell->name = info->name; 338 339 cell->bit_offset = info->bit_offset; 340 cell->nbits = info->nbits; 341 342 if (cell->nbits) 343 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 344 BITS_PER_BYTE); 345 346 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 347 dev_err(&nvmem->dev, 348 "cell %s unaligned to nvmem stride %d\n", 349 cell->name, nvmem->stride); 350 return -EINVAL; 351 } 352 353 return 0; 354 } 355 356 /** 357 * nvmem_add_cells() - Add cell information to an nvmem device 358 * 359 * @nvmem: nvmem device to add cells to. 360 * @info: nvmem cell info to add to the device 361 * @ncells: number of cells in info 362 * 363 * Return: 0 or negative error code on failure. 364 */ 365 int nvmem_add_cells(struct nvmem_device *nvmem, 366 const struct nvmem_cell_info *info, 367 int ncells) 368 { 369 struct nvmem_cell **cells; 370 int i, rval; 371 372 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 373 if (!cells) 374 return -ENOMEM; 375 376 for (i = 0; i < ncells; i++) { 377 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 378 if (!cells[i]) { 379 rval = -ENOMEM; 380 goto err; 381 } 382 383 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 384 if (rval) { 385 kfree(cells[i]); 386 goto err; 387 } 388 389 nvmem_cell_add(cells[i]); 390 } 391 392 nvmem->ncells = ncells; 393 /* remove tmp array */ 394 kfree(cells); 395 396 return 0; 397 err: 398 while (i--) 399 nvmem_cell_drop(cells[i]); 400 401 kfree(cells); 402 403 return rval; 404 } 405 EXPORT_SYMBOL_GPL(nvmem_add_cells); 406 407 /* 408 * nvmem_setup_compat() - Create an additional binary entry in 409 * drivers sys directory, to be backwards compatible with the older 410 * drivers/misc/eeprom drivers. 411 */ 412 static int nvmem_setup_compat(struct nvmem_device *nvmem, 413 const struct nvmem_config *config) 414 { 415 int rval; 416 417 if (!config->base_dev) 418 return -EINVAL; 419 420 if (nvmem->read_only) 421 nvmem->eeprom = bin_attr_ro_root_nvmem; 422 else 423 nvmem->eeprom = bin_attr_rw_root_nvmem; 424 nvmem->eeprom.attr.name = "eeprom"; 425 nvmem->eeprom.size = nvmem->size; 426 #ifdef CONFIG_DEBUG_LOCK_ALLOC 427 nvmem->eeprom.attr.key = &eeprom_lock_key; 428 #endif 429 nvmem->eeprom.private = &nvmem->dev; 430 nvmem->base_dev = config->base_dev; 431 432 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 433 if (rval) { 434 dev_err(&nvmem->dev, 435 "Failed to create eeprom binary file %d\n", rval); 436 return rval; 437 } 438 439 nvmem->flags |= FLAG_COMPAT; 440 441 return 0; 442 } 443 444 /** 445 * nvmem_register() - Register a nvmem device for given nvmem_config. 446 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 447 * 448 * @config: nvmem device configuration with which nvmem device is created. 449 * 450 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 451 * on success. 452 */ 453 454 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 455 { 456 struct nvmem_device *nvmem; 457 int rval; 458 459 if (!config->dev) 460 return ERR_PTR(-EINVAL); 461 462 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 463 if (!nvmem) 464 return ERR_PTR(-ENOMEM); 465 466 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 467 if (rval < 0) { 468 kfree(nvmem); 469 return ERR_PTR(rval); 470 } 471 472 nvmem->id = rval; 473 nvmem->owner = config->owner; 474 if (!nvmem->owner && config->dev->driver) 475 nvmem->owner = config->dev->driver->owner; 476 nvmem->stride = config->stride ?: 1; 477 nvmem->word_size = config->word_size ?: 1; 478 nvmem->size = config->size; 479 nvmem->dev.type = &nvmem_provider_type; 480 nvmem->dev.bus = &nvmem_bus_type; 481 nvmem->dev.parent = config->dev; 482 nvmem->priv = config->priv; 483 nvmem->reg_read = config->reg_read; 484 nvmem->reg_write = config->reg_write; 485 nvmem->dev.of_node = config->dev->of_node; 486 487 if (config->id == -1 && config->name) { 488 dev_set_name(&nvmem->dev, "%s", config->name); 489 } else { 490 dev_set_name(&nvmem->dev, "%s%d", 491 config->name ? : "nvmem", 492 config->name ? config->id : nvmem->id); 493 } 494 495 nvmem->read_only = device_property_present(config->dev, "read-only") | 496 config->read_only; 497 498 if (config->root_only) 499 nvmem->dev.groups = nvmem->read_only ? 500 nvmem_ro_root_dev_groups : 501 nvmem_rw_root_dev_groups; 502 else 503 nvmem->dev.groups = nvmem->read_only ? 504 nvmem_ro_dev_groups : 505 nvmem_rw_dev_groups; 506 507 device_initialize(&nvmem->dev); 508 509 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 510 511 rval = device_add(&nvmem->dev); 512 if (rval) 513 goto err_put_device; 514 515 if (config->compat) { 516 rval = nvmem_setup_compat(nvmem, config); 517 if (rval) 518 goto err_device_del; 519 } 520 521 if (config->cells) 522 nvmem_add_cells(nvmem, config->cells, config->ncells); 523 524 return nvmem; 525 526 err_device_del: 527 device_del(&nvmem->dev); 528 err_put_device: 529 put_device(&nvmem->dev); 530 531 return ERR_PTR(rval); 532 } 533 EXPORT_SYMBOL_GPL(nvmem_register); 534 535 /** 536 * nvmem_unregister() - Unregister previously registered nvmem device 537 * 538 * @nvmem: Pointer to previously registered nvmem device. 539 * 540 * Return: Will be an negative on error or a zero on success. 541 */ 542 int nvmem_unregister(struct nvmem_device *nvmem) 543 { 544 mutex_lock(&nvmem_mutex); 545 if (nvmem->users) { 546 mutex_unlock(&nvmem_mutex); 547 return -EBUSY; 548 } 549 mutex_unlock(&nvmem_mutex); 550 551 if (nvmem->flags & FLAG_COMPAT) 552 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 553 554 nvmem_device_remove_all_cells(nvmem); 555 device_del(&nvmem->dev); 556 put_device(&nvmem->dev); 557 558 return 0; 559 } 560 EXPORT_SYMBOL_GPL(nvmem_unregister); 561 562 static void devm_nvmem_release(struct device *dev, void *res) 563 { 564 WARN_ON(nvmem_unregister(*(struct nvmem_device **)res)); 565 } 566 567 /** 568 * devm_nvmem_register() - Register a managed nvmem device for given 569 * nvmem_config. 570 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 571 * 572 * @dev: Device that uses the nvmem device. 573 * @config: nvmem device configuration with which nvmem device is created. 574 * 575 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 576 * on success. 577 */ 578 struct nvmem_device *devm_nvmem_register(struct device *dev, 579 const struct nvmem_config *config) 580 { 581 struct nvmem_device **ptr, *nvmem; 582 583 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 584 if (!ptr) 585 return ERR_PTR(-ENOMEM); 586 587 nvmem = nvmem_register(config); 588 589 if (!IS_ERR(nvmem)) { 590 *ptr = nvmem; 591 devres_add(dev, ptr); 592 } else { 593 devres_free(ptr); 594 } 595 596 return nvmem; 597 } 598 EXPORT_SYMBOL_GPL(devm_nvmem_register); 599 600 static int devm_nvmem_match(struct device *dev, void *res, void *data) 601 { 602 struct nvmem_device **r = res; 603 604 return *r == data; 605 } 606 607 /** 608 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 609 * device. 610 * 611 * @dev: Device that uses the nvmem device. 612 * @nvmem: Pointer to previously registered nvmem device. 613 * 614 * Return: Will be an negative on error or a zero on success. 615 */ 616 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 617 { 618 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 619 } 620 EXPORT_SYMBOL(devm_nvmem_unregister); 621 622 623 static struct nvmem_device *__nvmem_device_get(struct device_node *np, 624 struct nvmem_cell **cellp, 625 const char *cell_id) 626 { 627 struct nvmem_device *nvmem = NULL; 628 629 mutex_lock(&nvmem_mutex); 630 631 if (np) { 632 nvmem = of_nvmem_find(np); 633 if (!nvmem) { 634 mutex_unlock(&nvmem_mutex); 635 return ERR_PTR(-EPROBE_DEFER); 636 } 637 } else { 638 struct nvmem_cell *cell = nvmem_find_cell(cell_id); 639 640 if (cell) { 641 nvmem = cell->nvmem; 642 *cellp = cell; 643 } 644 645 if (!nvmem) { 646 mutex_unlock(&nvmem_mutex); 647 return ERR_PTR(-ENOENT); 648 } 649 } 650 651 nvmem->users++; 652 mutex_unlock(&nvmem_mutex); 653 654 if (!try_module_get(nvmem->owner)) { 655 dev_err(&nvmem->dev, 656 "could not increase module refcount for cell %s\n", 657 nvmem->name); 658 659 mutex_lock(&nvmem_mutex); 660 nvmem->users--; 661 mutex_unlock(&nvmem_mutex); 662 663 return ERR_PTR(-EINVAL); 664 } 665 666 return nvmem; 667 } 668 669 static void __nvmem_device_put(struct nvmem_device *nvmem) 670 { 671 module_put(nvmem->owner); 672 mutex_lock(&nvmem_mutex); 673 nvmem->users--; 674 mutex_unlock(&nvmem_mutex); 675 } 676 677 static struct nvmem_device *nvmem_find(const char *name) 678 { 679 struct device *d; 680 681 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name); 682 683 if (!d) 684 return NULL; 685 686 return to_nvmem_device(d); 687 } 688 689 #if IS_ENABLED(CONFIG_OF) 690 /** 691 * of_nvmem_device_get() - Get nvmem device from a given id 692 * 693 * @np: Device tree node that uses the nvmem device. 694 * @id: nvmem name from nvmem-names property. 695 * 696 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 697 * on success. 698 */ 699 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 700 { 701 702 struct device_node *nvmem_np; 703 int index; 704 705 index = of_property_match_string(np, "nvmem-names", id); 706 707 nvmem_np = of_parse_phandle(np, "nvmem", index); 708 if (!nvmem_np) 709 return ERR_PTR(-EINVAL); 710 711 return __nvmem_device_get(nvmem_np, NULL, NULL); 712 } 713 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 714 #endif 715 716 /** 717 * nvmem_device_get() - Get nvmem device from a given id 718 * 719 * @dev: Device that uses the nvmem device. 720 * @dev_name: name of the requested nvmem device. 721 * 722 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 723 * on success. 724 */ 725 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 726 { 727 if (dev->of_node) { /* try dt first */ 728 struct nvmem_device *nvmem; 729 730 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 731 732 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 733 return nvmem; 734 735 } 736 737 return nvmem_find(dev_name); 738 } 739 EXPORT_SYMBOL_GPL(nvmem_device_get); 740 741 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 742 { 743 struct nvmem_device **nvmem = res; 744 745 if (WARN_ON(!nvmem || !*nvmem)) 746 return 0; 747 748 return *nvmem == data; 749 } 750 751 static void devm_nvmem_device_release(struct device *dev, void *res) 752 { 753 nvmem_device_put(*(struct nvmem_device **)res); 754 } 755 756 /** 757 * devm_nvmem_device_put() - put alredy got nvmem device 758 * 759 * @dev: Device that uses the nvmem device. 760 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 761 * that needs to be released. 762 */ 763 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 764 { 765 int ret; 766 767 ret = devres_release(dev, devm_nvmem_device_release, 768 devm_nvmem_device_match, nvmem); 769 770 WARN_ON(ret); 771 } 772 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 773 774 /** 775 * nvmem_device_put() - put alredy got nvmem device 776 * 777 * @nvmem: pointer to nvmem device that needs to be released. 778 */ 779 void nvmem_device_put(struct nvmem_device *nvmem) 780 { 781 __nvmem_device_put(nvmem); 782 } 783 EXPORT_SYMBOL_GPL(nvmem_device_put); 784 785 /** 786 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 787 * 788 * @dev: Device that requests the nvmem device. 789 * @id: name id for the requested nvmem device. 790 * 791 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 792 * on success. The nvmem_cell will be freed by the automatically once the 793 * device is freed. 794 */ 795 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 796 { 797 struct nvmem_device **ptr, *nvmem; 798 799 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 800 if (!ptr) 801 return ERR_PTR(-ENOMEM); 802 803 nvmem = nvmem_device_get(dev, id); 804 if (!IS_ERR(nvmem)) { 805 *ptr = nvmem; 806 devres_add(dev, ptr); 807 } else { 808 devres_free(ptr); 809 } 810 811 return nvmem; 812 } 813 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 814 815 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) 816 { 817 struct nvmem_cell *cell = NULL; 818 struct nvmem_device *nvmem; 819 820 nvmem = __nvmem_device_get(NULL, &cell, cell_id); 821 if (IS_ERR(nvmem)) 822 return ERR_CAST(nvmem); 823 824 return cell; 825 } 826 827 #if IS_ENABLED(CONFIG_OF) 828 /** 829 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 830 * 831 * @np: Device tree node that uses the nvmem cell. 832 * @name: nvmem cell name from nvmem-cell-names property, or NULL 833 * for the cell at index 0 (the lone cell with no accompanying 834 * nvmem-cell-names property). 835 * 836 * Return: Will be an ERR_PTR() on error or a valid pointer 837 * to a struct nvmem_cell. The nvmem_cell will be freed by the 838 * nvmem_cell_put(). 839 */ 840 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 841 const char *name) 842 { 843 struct device_node *cell_np, *nvmem_np; 844 struct nvmem_cell *cell; 845 struct nvmem_device *nvmem; 846 const __be32 *addr; 847 int rval, len; 848 int index = 0; 849 850 /* if cell name exists, find index to the name */ 851 if (name) 852 index = of_property_match_string(np, "nvmem-cell-names", name); 853 854 cell_np = of_parse_phandle(np, "nvmem-cells", index); 855 if (!cell_np) 856 return ERR_PTR(-EINVAL); 857 858 nvmem_np = of_get_next_parent(cell_np); 859 if (!nvmem_np) 860 return ERR_PTR(-EINVAL); 861 862 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 863 of_node_put(nvmem_np); 864 if (IS_ERR(nvmem)) 865 return ERR_CAST(nvmem); 866 867 addr = of_get_property(cell_np, "reg", &len); 868 if (!addr || (len < 2 * sizeof(u32))) { 869 dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n", 870 cell_np); 871 rval = -EINVAL; 872 goto err_mem; 873 } 874 875 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 876 if (!cell) { 877 rval = -ENOMEM; 878 goto err_mem; 879 } 880 881 cell->nvmem = nvmem; 882 cell->offset = be32_to_cpup(addr++); 883 cell->bytes = be32_to_cpup(addr); 884 cell->name = cell_np->name; 885 886 addr = of_get_property(cell_np, "bits", &len); 887 if (addr && len == (2 * sizeof(u32))) { 888 cell->bit_offset = be32_to_cpup(addr++); 889 cell->nbits = be32_to_cpup(addr); 890 } 891 892 if (cell->nbits) 893 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 894 BITS_PER_BYTE); 895 896 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 897 dev_err(&nvmem->dev, 898 "cell %s unaligned to nvmem stride %d\n", 899 cell->name, nvmem->stride); 900 rval = -EINVAL; 901 goto err_sanity; 902 } 903 904 nvmem_cell_add(cell); 905 906 return cell; 907 908 err_sanity: 909 kfree(cell); 910 911 err_mem: 912 __nvmem_device_put(nvmem); 913 914 return ERR_PTR(rval); 915 } 916 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 917 #endif 918 919 /** 920 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 921 * 922 * @dev: Device that requests the nvmem cell. 923 * @cell_id: nvmem cell name to get. 924 * 925 * Return: Will be an ERR_PTR() on error or a valid pointer 926 * to a struct nvmem_cell. The nvmem_cell will be freed by the 927 * nvmem_cell_put(). 928 */ 929 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) 930 { 931 struct nvmem_cell *cell; 932 933 if (dev->of_node) { /* try dt first */ 934 cell = of_nvmem_cell_get(dev->of_node, cell_id); 935 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 936 return cell; 937 } 938 939 return nvmem_cell_get_from_list(cell_id); 940 } 941 EXPORT_SYMBOL_GPL(nvmem_cell_get); 942 943 static void devm_nvmem_cell_release(struct device *dev, void *res) 944 { 945 nvmem_cell_put(*(struct nvmem_cell **)res); 946 } 947 948 /** 949 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 950 * 951 * @dev: Device that requests the nvmem cell. 952 * @id: nvmem cell name id to get. 953 * 954 * Return: Will be an ERR_PTR() on error or a valid pointer 955 * to a struct nvmem_cell. The nvmem_cell will be freed by the 956 * automatically once the device is freed. 957 */ 958 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 959 { 960 struct nvmem_cell **ptr, *cell; 961 962 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 963 if (!ptr) 964 return ERR_PTR(-ENOMEM); 965 966 cell = nvmem_cell_get(dev, id); 967 if (!IS_ERR(cell)) { 968 *ptr = cell; 969 devres_add(dev, ptr); 970 } else { 971 devres_free(ptr); 972 } 973 974 return cell; 975 } 976 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 977 978 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 979 { 980 struct nvmem_cell **c = res; 981 982 if (WARN_ON(!c || !*c)) 983 return 0; 984 985 return *c == data; 986 } 987 988 /** 989 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 990 * from devm_nvmem_cell_get. 991 * 992 * @dev: Device that requests the nvmem cell. 993 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 994 */ 995 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 996 { 997 int ret; 998 999 ret = devres_release(dev, devm_nvmem_cell_release, 1000 devm_nvmem_cell_match, cell); 1001 1002 WARN_ON(ret); 1003 } 1004 EXPORT_SYMBOL(devm_nvmem_cell_put); 1005 1006 /** 1007 * nvmem_cell_put() - Release previously allocated nvmem cell. 1008 * 1009 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1010 */ 1011 void nvmem_cell_put(struct nvmem_cell *cell) 1012 { 1013 struct nvmem_device *nvmem = cell->nvmem; 1014 1015 __nvmem_device_put(nvmem); 1016 nvmem_cell_drop(cell); 1017 } 1018 EXPORT_SYMBOL_GPL(nvmem_cell_put); 1019 1020 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1021 { 1022 u8 *p, *b; 1023 int i, bit_offset = cell->bit_offset; 1024 1025 p = b = buf; 1026 if (bit_offset) { 1027 /* First shift */ 1028 *b++ >>= bit_offset; 1029 1030 /* setup rest of the bytes if any */ 1031 for (i = 1; i < cell->bytes; i++) { 1032 /* Get bits from next byte and shift them towards msb */ 1033 *p |= *b << (BITS_PER_BYTE - bit_offset); 1034 1035 p = b; 1036 *b++ >>= bit_offset; 1037 } 1038 1039 /* result fits in less bytes */ 1040 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 1041 *p-- = 0; 1042 } 1043 /* clear msb bits if any leftover in the last byte */ 1044 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1045 } 1046 1047 static int __nvmem_cell_read(struct nvmem_device *nvmem, 1048 struct nvmem_cell *cell, 1049 void *buf, size_t *len) 1050 { 1051 int rc; 1052 1053 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1054 1055 if (rc) 1056 return rc; 1057 1058 /* shift bits in-place */ 1059 if (cell->bit_offset || cell->nbits) 1060 nvmem_shift_read_buffer_in_place(cell, buf); 1061 1062 if (len) 1063 *len = cell->bytes; 1064 1065 return 0; 1066 } 1067 1068 /** 1069 * nvmem_cell_read() - Read a given nvmem cell 1070 * 1071 * @cell: nvmem cell to be read. 1072 * @len: pointer to length of cell which will be populated on successful read; 1073 * can be NULL. 1074 * 1075 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1076 * buffer should be freed by the consumer with a kfree(). 1077 */ 1078 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1079 { 1080 struct nvmem_device *nvmem = cell->nvmem; 1081 u8 *buf; 1082 int rc; 1083 1084 if (!nvmem) 1085 return ERR_PTR(-EINVAL); 1086 1087 buf = kzalloc(cell->bytes, GFP_KERNEL); 1088 if (!buf) 1089 return ERR_PTR(-ENOMEM); 1090 1091 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1092 if (rc) { 1093 kfree(buf); 1094 return ERR_PTR(rc); 1095 } 1096 1097 return buf; 1098 } 1099 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1100 1101 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1102 u8 *_buf, int len) 1103 { 1104 struct nvmem_device *nvmem = cell->nvmem; 1105 int i, rc, nbits, bit_offset = cell->bit_offset; 1106 u8 v, *p, *buf, *b, pbyte, pbits; 1107 1108 nbits = cell->nbits; 1109 buf = kzalloc(cell->bytes, GFP_KERNEL); 1110 if (!buf) 1111 return ERR_PTR(-ENOMEM); 1112 1113 memcpy(buf, _buf, len); 1114 p = b = buf; 1115 1116 if (bit_offset) { 1117 pbyte = *b; 1118 *b <<= bit_offset; 1119 1120 /* setup the first byte with lsb bits from nvmem */ 1121 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1122 if (rc) 1123 goto err; 1124 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1125 1126 /* setup rest of the byte if any */ 1127 for (i = 1; i < cell->bytes; i++) { 1128 /* Get last byte bits and shift them towards lsb */ 1129 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1130 pbyte = *b; 1131 p = b; 1132 *b <<= bit_offset; 1133 *b++ |= pbits; 1134 } 1135 } 1136 1137 /* if it's not end on byte boundary */ 1138 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1139 /* setup the last byte with msb bits from nvmem */ 1140 rc = nvmem_reg_read(nvmem, 1141 cell->offset + cell->bytes - 1, &v, 1); 1142 if (rc) 1143 goto err; 1144 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1145 1146 } 1147 1148 return buf; 1149 err: 1150 kfree(buf); 1151 return ERR_PTR(rc); 1152 } 1153 1154 /** 1155 * nvmem_cell_write() - Write to a given nvmem cell 1156 * 1157 * @cell: nvmem cell to be written. 1158 * @buf: Buffer to be written. 1159 * @len: length of buffer to be written to nvmem cell. 1160 * 1161 * Return: length of bytes written or negative on failure. 1162 */ 1163 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1164 { 1165 struct nvmem_device *nvmem = cell->nvmem; 1166 int rc; 1167 1168 if (!nvmem || nvmem->read_only || 1169 (cell->bit_offset == 0 && len != cell->bytes)) 1170 return -EINVAL; 1171 1172 if (cell->bit_offset || cell->nbits) { 1173 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1174 if (IS_ERR(buf)) 1175 return PTR_ERR(buf); 1176 } 1177 1178 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1179 1180 /* free the tmp buffer */ 1181 if (cell->bit_offset || cell->nbits) 1182 kfree(buf); 1183 1184 if (rc) 1185 return rc; 1186 1187 return len; 1188 } 1189 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1190 1191 /** 1192 * nvmem_cell_read_u32() - Read a cell value as an u32 1193 * 1194 * @dev: Device that requests the nvmem cell. 1195 * @cell_id: Name of nvmem cell to read. 1196 * @val: pointer to output value. 1197 * 1198 * Return: 0 on success or negative errno. 1199 */ 1200 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1201 { 1202 struct nvmem_cell *cell; 1203 void *buf; 1204 size_t len; 1205 1206 cell = nvmem_cell_get(dev, cell_id); 1207 if (IS_ERR(cell)) 1208 return PTR_ERR(cell); 1209 1210 buf = nvmem_cell_read(cell, &len); 1211 if (IS_ERR(buf)) { 1212 nvmem_cell_put(cell); 1213 return PTR_ERR(buf); 1214 } 1215 if (len != sizeof(*val)) { 1216 kfree(buf); 1217 nvmem_cell_put(cell); 1218 return -EINVAL; 1219 } 1220 memcpy(val, buf, sizeof(*val)); 1221 1222 kfree(buf); 1223 nvmem_cell_put(cell); 1224 return 0; 1225 } 1226 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1227 1228 /** 1229 * nvmem_device_cell_read() - Read a given nvmem device and cell 1230 * 1231 * @nvmem: nvmem device to read from. 1232 * @info: nvmem cell info to be read. 1233 * @buf: buffer pointer which will be populated on successful read. 1234 * 1235 * Return: length of successful bytes read on success and negative 1236 * error code on error. 1237 */ 1238 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1239 struct nvmem_cell_info *info, void *buf) 1240 { 1241 struct nvmem_cell cell; 1242 int rc; 1243 ssize_t len; 1244 1245 if (!nvmem) 1246 return -EINVAL; 1247 1248 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1249 if (rc) 1250 return rc; 1251 1252 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1253 if (rc) 1254 return rc; 1255 1256 return len; 1257 } 1258 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1259 1260 /** 1261 * nvmem_device_cell_write() - Write cell to a given nvmem device 1262 * 1263 * @nvmem: nvmem device to be written to. 1264 * @info: nvmem cell info to be written. 1265 * @buf: buffer to be written to cell. 1266 * 1267 * Return: length of bytes written or negative error code on failure. 1268 * */ 1269 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1270 struct nvmem_cell_info *info, void *buf) 1271 { 1272 struct nvmem_cell cell; 1273 int rc; 1274 1275 if (!nvmem) 1276 return -EINVAL; 1277 1278 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1279 if (rc) 1280 return rc; 1281 1282 return nvmem_cell_write(&cell, buf, cell.bytes); 1283 } 1284 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1285 1286 /** 1287 * nvmem_device_read() - Read from a given nvmem device 1288 * 1289 * @nvmem: nvmem device to read from. 1290 * @offset: offset in nvmem device. 1291 * @bytes: number of bytes to read. 1292 * @buf: buffer pointer which will be populated on successful read. 1293 * 1294 * Return: length of successful bytes read on success and negative 1295 * error code on error. 1296 */ 1297 int nvmem_device_read(struct nvmem_device *nvmem, 1298 unsigned int offset, 1299 size_t bytes, void *buf) 1300 { 1301 int rc; 1302 1303 if (!nvmem) 1304 return -EINVAL; 1305 1306 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1307 1308 if (rc) 1309 return rc; 1310 1311 return bytes; 1312 } 1313 EXPORT_SYMBOL_GPL(nvmem_device_read); 1314 1315 /** 1316 * nvmem_device_write() - Write cell to a given nvmem device 1317 * 1318 * @nvmem: nvmem device to be written to. 1319 * @offset: offset in nvmem device. 1320 * @bytes: number of bytes to write. 1321 * @buf: buffer to be written. 1322 * 1323 * Return: length of bytes written or negative error code on failure. 1324 * */ 1325 int nvmem_device_write(struct nvmem_device *nvmem, 1326 unsigned int offset, 1327 size_t bytes, void *buf) 1328 { 1329 int rc; 1330 1331 if (!nvmem) 1332 return -EINVAL; 1333 1334 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1335 1336 if (rc) 1337 return rc; 1338 1339 1340 return bytes; 1341 } 1342 EXPORT_SYMBOL_GPL(nvmem_device_write); 1343 1344 static int __init nvmem_init(void) 1345 { 1346 return bus_register(&nvmem_bus_type); 1347 } 1348 1349 static void __exit nvmem_exit(void) 1350 { 1351 bus_unregister(&nvmem_bus_type); 1352 } 1353 1354 subsys_initcall(nvmem_init); 1355 module_exit(nvmem_exit); 1356 1357 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1358 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1359 MODULE_DESCRIPTION("nvmem Driver Core"); 1360 MODULE_LICENSE("GPL v2"); 1361