1 /* 2 * nvmem framework core. 3 * 4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/fs.h> 20 #include <linux/idr.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/nvmem-consumer.h> 24 #include <linux/nvmem-provider.h> 25 #include <linux/of.h> 26 #include <linux/slab.h> 27 28 struct nvmem_device { 29 const char *name; 30 struct module *owner; 31 struct device dev; 32 int stride; 33 int word_size; 34 int ncells; 35 int id; 36 int users; 37 size_t size; 38 bool read_only; 39 int flags; 40 struct bin_attribute eeprom; 41 struct device *base_dev; 42 nvmem_reg_read_t reg_read; 43 nvmem_reg_write_t reg_write; 44 void *priv; 45 }; 46 47 #define FLAG_COMPAT BIT(0) 48 49 struct nvmem_cell { 50 const char *name; 51 int offset; 52 int bytes; 53 int bit_offset; 54 int nbits; 55 struct nvmem_device *nvmem; 56 struct list_head node; 57 }; 58 59 static DEFINE_MUTEX(nvmem_mutex); 60 static DEFINE_IDA(nvmem_ida); 61 62 static LIST_HEAD(nvmem_cells); 63 static DEFINE_MUTEX(nvmem_cells_mutex); 64 65 #ifdef CONFIG_DEBUG_LOCK_ALLOC 66 static struct lock_class_key eeprom_lock_key; 67 #endif 68 69 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 70 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 71 void *val, size_t bytes) 72 { 73 if (nvmem->reg_read) 74 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 75 76 return -EINVAL; 77 } 78 79 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 80 void *val, size_t bytes) 81 { 82 if (nvmem->reg_write) 83 return nvmem->reg_write(nvmem->priv, offset, val, bytes); 84 85 return -EINVAL; 86 } 87 88 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 89 struct bin_attribute *attr, 90 char *buf, loff_t pos, size_t count) 91 { 92 struct device *dev; 93 struct nvmem_device *nvmem; 94 int rc; 95 96 if (attr->private) 97 dev = attr->private; 98 else 99 dev = container_of(kobj, struct device, kobj); 100 nvmem = to_nvmem_device(dev); 101 102 /* Stop the user from reading */ 103 if (pos >= nvmem->size) 104 return 0; 105 106 if (count < nvmem->word_size) 107 return -EINVAL; 108 109 if (pos + count > nvmem->size) 110 count = nvmem->size - pos; 111 112 count = round_down(count, nvmem->word_size); 113 114 rc = nvmem_reg_read(nvmem, pos, buf, count); 115 116 if (rc) 117 return rc; 118 119 return count; 120 } 121 122 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 123 struct bin_attribute *attr, 124 char *buf, loff_t pos, size_t count) 125 { 126 struct device *dev; 127 struct nvmem_device *nvmem; 128 int rc; 129 130 if (attr->private) 131 dev = attr->private; 132 else 133 dev = container_of(kobj, struct device, kobj); 134 nvmem = to_nvmem_device(dev); 135 136 /* Stop the user from writing */ 137 if (pos >= nvmem->size) 138 return -EFBIG; 139 140 if (count < nvmem->word_size) 141 return -EINVAL; 142 143 if (pos + count > nvmem->size) 144 count = nvmem->size - pos; 145 146 count = round_down(count, nvmem->word_size); 147 148 rc = nvmem_reg_write(nvmem, pos, buf, count); 149 150 if (rc) 151 return rc; 152 153 return count; 154 } 155 156 /* default read/write permissions */ 157 static struct bin_attribute bin_attr_rw_nvmem = { 158 .attr = { 159 .name = "nvmem", 160 .mode = S_IWUSR | S_IRUGO, 161 }, 162 .read = bin_attr_nvmem_read, 163 .write = bin_attr_nvmem_write, 164 }; 165 166 static struct bin_attribute *nvmem_bin_rw_attributes[] = { 167 &bin_attr_rw_nvmem, 168 NULL, 169 }; 170 171 static const struct attribute_group nvmem_bin_rw_group = { 172 .bin_attrs = nvmem_bin_rw_attributes, 173 }; 174 175 static const struct attribute_group *nvmem_rw_dev_groups[] = { 176 &nvmem_bin_rw_group, 177 NULL, 178 }; 179 180 /* read only permission */ 181 static struct bin_attribute bin_attr_ro_nvmem = { 182 .attr = { 183 .name = "nvmem", 184 .mode = S_IRUGO, 185 }, 186 .read = bin_attr_nvmem_read, 187 }; 188 189 static struct bin_attribute *nvmem_bin_ro_attributes[] = { 190 &bin_attr_ro_nvmem, 191 NULL, 192 }; 193 194 static const struct attribute_group nvmem_bin_ro_group = { 195 .bin_attrs = nvmem_bin_ro_attributes, 196 }; 197 198 static const struct attribute_group *nvmem_ro_dev_groups[] = { 199 &nvmem_bin_ro_group, 200 NULL, 201 }; 202 203 /* default read/write permissions, root only */ 204 static struct bin_attribute bin_attr_rw_root_nvmem = { 205 .attr = { 206 .name = "nvmem", 207 .mode = S_IWUSR | S_IRUSR, 208 }, 209 .read = bin_attr_nvmem_read, 210 .write = bin_attr_nvmem_write, 211 }; 212 213 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { 214 &bin_attr_rw_root_nvmem, 215 NULL, 216 }; 217 218 static const struct attribute_group nvmem_bin_rw_root_group = { 219 .bin_attrs = nvmem_bin_rw_root_attributes, 220 }; 221 222 static const struct attribute_group *nvmem_rw_root_dev_groups[] = { 223 &nvmem_bin_rw_root_group, 224 NULL, 225 }; 226 227 /* read only permission, root only */ 228 static struct bin_attribute bin_attr_ro_root_nvmem = { 229 .attr = { 230 .name = "nvmem", 231 .mode = S_IRUSR, 232 }, 233 .read = bin_attr_nvmem_read, 234 }; 235 236 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { 237 &bin_attr_ro_root_nvmem, 238 NULL, 239 }; 240 241 static const struct attribute_group nvmem_bin_ro_root_group = { 242 .bin_attrs = nvmem_bin_ro_root_attributes, 243 }; 244 245 static const struct attribute_group *nvmem_ro_root_dev_groups[] = { 246 &nvmem_bin_ro_root_group, 247 NULL, 248 }; 249 250 static void nvmem_release(struct device *dev) 251 { 252 struct nvmem_device *nvmem = to_nvmem_device(dev); 253 254 ida_simple_remove(&nvmem_ida, nvmem->id); 255 kfree(nvmem); 256 } 257 258 static const struct device_type nvmem_provider_type = { 259 .release = nvmem_release, 260 }; 261 262 static struct bus_type nvmem_bus_type = { 263 .name = "nvmem", 264 }; 265 266 static int of_nvmem_match(struct device *dev, void *nvmem_np) 267 { 268 return dev->of_node == nvmem_np; 269 } 270 271 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) 272 { 273 struct device *d; 274 275 if (!nvmem_np) 276 return NULL; 277 278 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); 279 280 if (!d) 281 return NULL; 282 283 return to_nvmem_device(d); 284 } 285 286 static struct nvmem_cell *nvmem_find_cell(const char *cell_id) 287 { 288 struct nvmem_cell *p; 289 290 mutex_lock(&nvmem_cells_mutex); 291 292 list_for_each_entry(p, &nvmem_cells, node) 293 if (!strcmp(p->name, cell_id)) { 294 mutex_unlock(&nvmem_cells_mutex); 295 return p; 296 } 297 298 mutex_unlock(&nvmem_cells_mutex); 299 300 return NULL; 301 } 302 303 static void nvmem_cell_drop(struct nvmem_cell *cell) 304 { 305 mutex_lock(&nvmem_cells_mutex); 306 list_del(&cell->node); 307 mutex_unlock(&nvmem_cells_mutex); 308 kfree(cell); 309 } 310 311 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 312 { 313 struct nvmem_cell *cell; 314 struct list_head *p, *n; 315 316 list_for_each_safe(p, n, &nvmem_cells) { 317 cell = list_entry(p, struct nvmem_cell, node); 318 if (cell->nvmem == nvmem) 319 nvmem_cell_drop(cell); 320 } 321 } 322 323 static void nvmem_cell_add(struct nvmem_cell *cell) 324 { 325 mutex_lock(&nvmem_cells_mutex); 326 list_add_tail(&cell->node, &nvmem_cells); 327 mutex_unlock(&nvmem_cells_mutex); 328 } 329 330 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 331 const struct nvmem_cell_info *info, 332 struct nvmem_cell *cell) 333 { 334 cell->nvmem = nvmem; 335 cell->offset = info->offset; 336 cell->bytes = info->bytes; 337 cell->name = info->name; 338 339 cell->bit_offset = info->bit_offset; 340 cell->nbits = info->nbits; 341 342 if (cell->nbits) 343 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 344 BITS_PER_BYTE); 345 346 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 347 dev_err(&nvmem->dev, 348 "cell %s unaligned to nvmem stride %d\n", 349 cell->name, nvmem->stride); 350 return -EINVAL; 351 } 352 353 return 0; 354 } 355 356 static int nvmem_add_cells(struct nvmem_device *nvmem, 357 const struct nvmem_config *cfg) 358 { 359 struct nvmem_cell **cells; 360 const struct nvmem_cell_info *info = cfg->cells; 361 int i, rval; 362 363 cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); 364 if (!cells) 365 return -ENOMEM; 366 367 for (i = 0; i < cfg->ncells; i++) { 368 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 369 if (!cells[i]) { 370 rval = -ENOMEM; 371 goto err; 372 } 373 374 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 375 if (rval) { 376 kfree(cells[i]); 377 goto err; 378 } 379 380 nvmem_cell_add(cells[i]); 381 } 382 383 nvmem->ncells = cfg->ncells; 384 /* remove tmp array */ 385 kfree(cells); 386 387 return 0; 388 err: 389 while (i--) 390 nvmem_cell_drop(cells[i]); 391 392 kfree(cells); 393 394 return rval; 395 } 396 397 /* 398 * nvmem_setup_compat() - Create an additional binary entry in 399 * drivers sys directory, to be backwards compatible with the older 400 * drivers/misc/eeprom drivers. 401 */ 402 static int nvmem_setup_compat(struct nvmem_device *nvmem, 403 const struct nvmem_config *config) 404 { 405 int rval; 406 407 if (!config->base_dev) 408 return -EINVAL; 409 410 if (nvmem->read_only) 411 nvmem->eeprom = bin_attr_ro_root_nvmem; 412 else 413 nvmem->eeprom = bin_attr_rw_root_nvmem; 414 nvmem->eeprom.attr.name = "eeprom"; 415 nvmem->eeprom.size = nvmem->size; 416 #ifdef CONFIG_DEBUG_LOCK_ALLOC 417 nvmem->eeprom.attr.key = &eeprom_lock_key; 418 #endif 419 nvmem->eeprom.private = &nvmem->dev; 420 nvmem->base_dev = config->base_dev; 421 422 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 423 if (rval) { 424 dev_err(&nvmem->dev, 425 "Failed to create eeprom binary file %d\n", rval); 426 return rval; 427 } 428 429 nvmem->flags |= FLAG_COMPAT; 430 431 return 0; 432 } 433 434 /** 435 * nvmem_register() - Register a nvmem device for given nvmem_config. 436 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 437 * 438 * @config: nvmem device configuration with which nvmem device is created. 439 * 440 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 441 * on success. 442 */ 443 444 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 445 { 446 struct nvmem_device *nvmem; 447 struct device_node *np; 448 int rval; 449 450 if (!config->dev) 451 return ERR_PTR(-EINVAL); 452 453 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 454 if (!nvmem) 455 return ERR_PTR(-ENOMEM); 456 457 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 458 if (rval < 0) { 459 kfree(nvmem); 460 return ERR_PTR(rval); 461 } 462 463 nvmem->id = rval; 464 nvmem->owner = config->owner; 465 if (!nvmem->owner && config->dev->driver) 466 nvmem->owner = config->dev->driver->owner; 467 nvmem->stride = config->stride; 468 nvmem->word_size = config->word_size; 469 nvmem->size = config->size; 470 nvmem->dev.type = &nvmem_provider_type; 471 nvmem->dev.bus = &nvmem_bus_type; 472 nvmem->dev.parent = config->dev; 473 nvmem->priv = config->priv; 474 nvmem->reg_read = config->reg_read; 475 nvmem->reg_write = config->reg_write; 476 np = config->dev->of_node; 477 nvmem->dev.of_node = np; 478 dev_set_name(&nvmem->dev, "%s%d", 479 config->name ? : "nvmem", 480 config->name ? config->id : nvmem->id); 481 482 nvmem->read_only = of_property_read_bool(np, "read-only") | 483 config->read_only; 484 485 if (config->root_only) 486 nvmem->dev.groups = nvmem->read_only ? 487 nvmem_ro_root_dev_groups : 488 nvmem_rw_root_dev_groups; 489 else 490 nvmem->dev.groups = nvmem->read_only ? 491 nvmem_ro_dev_groups : 492 nvmem_rw_dev_groups; 493 494 device_initialize(&nvmem->dev); 495 496 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 497 498 rval = device_add(&nvmem->dev); 499 if (rval) 500 goto err_put_device; 501 502 if (config->compat) { 503 rval = nvmem_setup_compat(nvmem, config); 504 if (rval) 505 goto err_device_del; 506 } 507 508 if (config->cells) 509 nvmem_add_cells(nvmem, config); 510 511 return nvmem; 512 513 err_device_del: 514 device_del(&nvmem->dev); 515 err_put_device: 516 put_device(&nvmem->dev); 517 518 return ERR_PTR(rval); 519 } 520 EXPORT_SYMBOL_GPL(nvmem_register); 521 522 /** 523 * nvmem_unregister() - Unregister previously registered nvmem device 524 * 525 * @nvmem: Pointer to previously registered nvmem device. 526 * 527 * Return: Will be an negative on error or a zero on success. 528 */ 529 int nvmem_unregister(struct nvmem_device *nvmem) 530 { 531 mutex_lock(&nvmem_mutex); 532 if (nvmem->users) { 533 mutex_unlock(&nvmem_mutex); 534 return -EBUSY; 535 } 536 mutex_unlock(&nvmem_mutex); 537 538 if (nvmem->flags & FLAG_COMPAT) 539 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 540 541 nvmem_device_remove_all_cells(nvmem); 542 device_del(&nvmem->dev); 543 put_device(&nvmem->dev); 544 545 return 0; 546 } 547 EXPORT_SYMBOL_GPL(nvmem_unregister); 548 549 static struct nvmem_device *__nvmem_device_get(struct device_node *np, 550 struct nvmem_cell **cellp, 551 const char *cell_id) 552 { 553 struct nvmem_device *nvmem = NULL; 554 555 mutex_lock(&nvmem_mutex); 556 557 if (np) { 558 nvmem = of_nvmem_find(np); 559 if (!nvmem) { 560 mutex_unlock(&nvmem_mutex); 561 return ERR_PTR(-EPROBE_DEFER); 562 } 563 } else { 564 struct nvmem_cell *cell = nvmem_find_cell(cell_id); 565 566 if (cell) { 567 nvmem = cell->nvmem; 568 *cellp = cell; 569 } 570 571 if (!nvmem) { 572 mutex_unlock(&nvmem_mutex); 573 return ERR_PTR(-ENOENT); 574 } 575 } 576 577 nvmem->users++; 578 mutex_unlock(&nvmem_mutex); 579 580 if (!try_module_get(nvmem->owner)) { 581 dev_err(&nvmem->dev, 582 "could not increase module refcount for cell %s\n", 583 nvmem->name); 584 585 mutex_lock(&nvmem_mutex); 586 nvmem->users--; 587 mutex_unlock(&nvmem_mutex); 588 589 return ERR_PTR(-EINVAL); 590 } 591 592 return nvmem; 593 } 594 595 static void __nvmem_device_put(struct nvmem_device *nvmem) 596 { 597 module_put(nvmem->owner); 598 mutex_lock(&nvmem_mutex); 599 nvmem->users--; 600 mutex_unlock(&nvmem_mutex); 601 } 602 603 static int nvmem_match(struct device *dev, void *data) 604 { 605 return !strcmp(dev_name(dev), data); 606 } 607 608 static struct nvmem_device *nvmem_find(const char *name) 609 { 610 struct device *d; 611 612 d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); 613 614 if (!d) 615 return NULL; 616 617 return to_nvmem_device(d); 618 } 619 620 #if IS_ENABLED(CONFIG_OF) 621 /** 622 * of_nvmem_device_get() - Get nvmem device from a given id 623 * 624 * @np: Device tree node that uses the nvmem device. 625 * @id: nvmem name from nvmem-names property. 626 * 627 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 628 * on success. 629 */ 630 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 631 { 632 633 struct device_node *nvmem_np; 634 int index; 635 636 index = of_property_match_string(np, "nvmem-names", id); 637 638 nvmem_np = of_parse_phandle(np, "nvmem", index); 639 if (!nvmem_np) 640 return ERR_PTR(-EINVAL); 641 642 return __nvmem_device_get(nvmem_np, NULL, NULL); 643 } 644 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 645 #endif 646 647 /** 648 * nvmem_device_get() - Get nvmem device from a given id 649 * 650 * @dev: Device that uses the nvmem device. 651 * @dev_name: name of the requested nvmem device. 652 * 653 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 654 * on success. 655 */ 656 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 657 { 658 if (dev->of_node) { /* try dt first */ 659 struct nvmem_device *nvmem; 660 661 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 662 663 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 664 return nvmem; 665 666 } 667 668 return nvmem_find(dev_name); 669 } 670 EXPORT_SYMBOL_GPL(nvmem_device_get); 671 672 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 673 { 674 struct nvmem_device **nvmem = res; 675 676 if (WARN_ON(!nvmem || !*nvmem)) 677 return 0; 678 679 return *nvmem == data; 680 } 681 682 static void devm_nvmem_device_release(struct device *dev, void *res) 683 { 684 nvmem_device_put(*(struct nvmem_device **)res); 685 } 686 687 /** 688 * devm_nvmem_device_put() - put alredy got nvmem device 689 * 690 * @dev: Device that uses the nvmem device. 691 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 692 * that needs to be released. 693 */ 694 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 695 { 696 int ret; 697 698 ret = devres_release(dev, devm_nvmem_device_release, 699 devm_nvmem_device_match, nvmem); 700 701 WARN_ON(ret); 702 } 703 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 704 705 /** 706 * nvmem_device_put() - put alredy got nvmem device 707 * 708 * @nvmem: pointer to nvmem device that needs to be released. 709 */ 710 void nvmem_device_put(struct nvmem_device *nvmem) 711 { 712 __nvmem_device_put(nvmem); 713 } 714 EXPORT_SYMBOL_GPL(nvmem_device_put); 715 716 /** 717 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 718 * 719 * @dev: Device that requests the nvmem device. 720 * @id: name id for the requested nvmem device. 721 * 722 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 723 * on success. The nvmem_cell will be freed by the automatically once the 724 * device is freed. 725 */ 726 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 727 { 728 struct nvmem_device **ptr, *nvmem; 729 730 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 731 if (!ptr) 732 return ERR_PTR(-ENOMEM); 733 734 nvmem = nvmem_device_get(dev, id); 735 if (!IS_ERR(nvmem)) { 736 *ptr = nvmem; 737 devres_add(dev, ptr); 738 } else { 739 devres_free(ptr); 740 } 741 742 return nvmem; 743 } 744 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 745 746 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) 747 { 748 struct nvmem_cell *cell = NULL; 749 struct nvmem_device *nvmem; 750 751 nvmem = __nvmem_device_get(NULL, &cell, cell_id); 752 if (IS_ERR(nvmem)) 753 return ERR_CAST(nvmem); 754 755 return cell; 756 } 757 758 #if IS_ENABLED(CONFIG_OF) 759 /** 760 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 761 * 762 * @np: Device tree node that uses the nvmem cell. 763 * @name: nvmem cell name from nvmem-cell-names property, or NULL 764 * for the cell at index 0 (the lone cell with no accompanying 765 * nvmem-cell-names property). 766 * 767 * Return: Will be an ERR_PTR() on error or a valid pointer 768 * to a struct nvmem_cell. The nvmem_cell will be freed by the 769 * nvmem_cell_put(). 770 */ 771 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 772 const char *name) 773 { 774 struct device_node *cell_np, *nvmem_np; 775 struct nvmem_cell *cell; 776 struct nvmem_device *nvmem; 777 const __be32 *addr; 778 int rval, len; 779 int index = 0; 780 781 /* if cell name exists, find index to the name */ 782 if (name) 783 index = of_property_match_string(np, "nvmem-cell-names", name); 784 785 cell_np = of_parse_phandle(np, "nvmem-cells", index); 786 if (!cell_np) 787 return ERR_PTR(-EINVAL); 788 789 nvmem_np = of_get_next_parent(cell_np); 790 if (!nvmem_np) 791 return ERR_PTR(-EINVAL); 792 793 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 794 of_node_put(nvmem_np); 795 if (IS_ERR(nvmem)) 796 return ERR_CAST(nvmem); 797 798 addr = of_get_property(cell_np, "reg", &len); 799 if (!addr || (len < 2 * sizeof(u32))) { 800 dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n", 801 cell_np); 802 rval = -EINVAL; 803 goto err_mem; 804 } 805 806 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 807 if (!cell) { 808 rval = -ENOMEM; 809 goto err_mem; 810 } 811 812 cell->nvmem = nvmem; 813 cell->offset = be32_to_cpup(addr++); 814 cell->bytes = be32_to_cpup(addr); 815 cell->name = cell_np->name; 816 817 addr = of_get_property(cell_np, "bits", &len); 818 if (addr && len == (2 * sizeof(u32))) { 819 cell->bit_offset = be32_to_cpup(addr++); 820 cell->nbits = be32_to_cpup(addr); 821 } 822 823 if (cell->nbits) 824 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 825 BITS_PER_BYTE); 826 827 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 828 dev_err(&nvmem->dev, 829 "cell %s unaligned to nvmem stride %d\n", 830 cell->name, nvmem->stride); 831 rval = -EINVAL; 832 goto err_sanity; 833 } 834 835 nvmem_cell_add(cell); 836 837 return cell; 838 839 err_sanity: 840 kfree(cell); 841 842 err_mem: 843 __nvmem_device_put(nvmem); 844 845 return ERR_PTR(rval); 846 } 847 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 848 #endif 849 850 /** 851 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 852 * 853 * @dev: Device that requests the nvmem cell. 854 * @cell_id: nvmem cell name to get. 855 * 856 * Return: Will be an ERR_PTR() on error or a valid pointer 857 * to a struct nvmem_cell. The nvmem_cell will be freed by the 858 * nvmem_cell_put(). 859 */ 860 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) 861 { 862 struct nvmem_cell *cell; 863 864 if (dev->of_node) { /* try dt first */ 865 cell = of_nvmem_cell_get(dev->of_node, cell_id); 866 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 867 return cell; 868 } 869 870 return nvmem_cell_get_from_list(cell_id); 871 } 872 EXPORT_SYMBOL_GPL(nvmem_cell_get); 873 874 static void devm_nvmem_cell_release(struct device *dev, void *res) 875 { 876 nvmem_cell_put(*(struct nvmem_cell **)res); 877 } 878 879 /** 880 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 881 * 882 * @dev: Device that requests the nvmem cell. 883 * @id: nvmem cell name id to get. 884 * 885 * Return: Will be an ERR_PTR() on error or a valid pointer 886 * to a struct nvmem_cell. The nvmem_cell will be freed by the 887 * automatically once the device is freed. 888 */ 889 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 890 { 891 struct nvmem_cell **ptr, *cell; 892 893 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 894 if (!ptr) 895 return ERR_PTR(-ENOMEM); 896 897 cell = nvmem_cell_get(dev, id); 898 if (!IS_ERR(cell)) { 899 *ptr = cell; 900 devres_add(dev, ptr); 901 } else { 902 devres_free(ptr); 903 } 904 905 return cell; 906 } 907 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 908 909 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 910 { 911 struct nvmem_cell **c = res; 912 913 if (WARN_ON(!c || !*c)) 914 return 0; 915 916 return *c == data; 917 } 918 919 /** 920 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 921 * from devm_nvmem_cell_get. 922 * 923 * @dev: Device that requests the nvmem cell. 924 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 925 */ 926 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 927 { 928 int ret; 929 930 ret = devres_release(dev, devm_nvmem_cell_release, 931 devm_nvmem_cell_match, cell); 932 933 WARN_ON(ret); 934 } 935 EXPORT_SYMBOL(devm_nvmem_cell_put); 936 937 /** 938 * nvmem_cell_put() - Release previously allocated nvmem cell. 939 * 940 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 941 */ 942 void nvmem_cell_put(struct nvmem_cell *cell) 943 { 944 struct nvmem_device *nvmem = cell->nvmem; 945 946 __nvmem_device_put(nvmem); 947 nvmem_cell_drop(cell); 948 } 949 EXPORT_SYMBOL_GPL(nvmem_cell_put); 950 951 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 952 { 953 u8 *p, *b; 954 int i, bit_offset = cell->bit_offset; 955 956 p = b = buf; 957 if (bit_offset) { 958 /* First shift */ 959 *b++ >>= bit_offset; 960 961 /* setup rest of the bytes if any */ 962 for (i = 1; i < cell->bytes; i++) { 963 /* Get bits from next byte and shift them towards msb */ 964 *p |= *b << (BITS_PER_BYTE - bit_offset); 965 966 p = b; 967 *b++ >>= bit_offset; 968 } 969 970 /* result fits in less bytes */ 971 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 972 *p-- = 0; 973 } 974 /* clear msb bits if any leftover in the last byte */ 975 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 976 } 977 978 static int __nvmem_cell_read(struct nvmem_device *nvmem, 979 struct nvmem_cell *cell, 980 void *buf, size_t *len) 981 { 982 int rc; 983 984 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 985 986 if (rc) 987 return rc; 988 989 /* shift bits in-place */ 990 if (cell->bit_offset || cell->nbits) 991 nvmem_shift_read_buffer_in_place(cell, buf); 992 993 if (len) 994 *len = cell->bytes; 995 996 return 0; 997 } 998 999 /** 1000 * nvmem_cell_read() - Read a given nvmem cell 1001 * 1002 * @cell: nvmem cell to be read. 1003 * @len: pointer to length of cell which will be populated on successful read; 1004 * can be NULL. 1005 * 1006 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1007 * buffer should be freed by the consumer with a kfree(). 1008 */ 1009 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1010 { 1011 struct nvmem_device *nvmem = cell->nvmem; 1012 u8 *buf; 1013 int rc; 1014 1015 if (!nvmem) 1016 return ERR_PTR(-EINVAL); 1017 1018 buf = kzalloc(cell->bytes, GFP_KERNEL); 1019 if (!buf) 1020 return ERR_PTR(-ENOMEM); 1021 1022 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1023 if (rc) { 1024 kfree(buf); 1025 return ERR_PTR(rc); 1026 } 1027 1028 return buf; 1029 } 1030 EXPORT_SYMBOL_GPL(nvmem_cell_read); 1031 1032 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1033 u8 *_buf, int len) 1034 { 1035 struct nvmem_device *nvmem = cell->nvmem; 1036 int i, rc, nbits, bit_offset = cell->bit_offset; 1037 u8 v, *p, *buf, *b, pbyte, pbits; 1038 1039 nbits = cell->nbits; 1040 buf = kzalloc(cell->bytes, GFP_KERNEL); 1041 if (!buf) 1042 return ERR_PTR(-ENOMEM); 1043 1044 memcpy(buf, _buf, len); 1045 p = b = buf; 1046 1047 if (bit_offset) { 1048 pbyte = *b; 1049 *b <<= bit_offset; 1050 1051 /* setup the first byte with lsb bits from nvmem */ 1052 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1053 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1054 1055 /* setup rest of the byte if any */ 1056 for (i = 1; i < cell->bytes; i++) { 1057 /* Get last byte bits and shift them towards lsb */ 1058 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1059 pbyte = *b; 1060 p = b; 1061 *b <<= bit_offset; 1062 *b++ |= pbits; 1063 } 1064 } 1065 1066 /* if it's not end on byte boundary */ 1067 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1068 /* setup the last byte with msb bits from nvmem */ 1069 rc = nvmem_reg_read(nvmem, 1070 cell->offset + cell->bytes - 1, &v, 1); 1071 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1072 1073 } 1074 1075 return buf; 1076 } 1077 1078 /** 1079 * nvmem_cell_write() - Write to a given nvmem cell 1080 * 1081 * @cell: nvmem cell to be written. 1082 * @buf: Buffer to be written. 1083 * @len: length of buffer to be written to nvmem cell. 1084 * 1085 * Return: length of bytes written or negative on failure. 1086 */ 1087 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1088 { 1089 struct nvmem_device *nvmem = cell->nvmem; 1090 int rc; 1091 1092 if (!nvmem || nvmem->read_only || 1093 (cell->bit_offset == 0 && len != cell->bytes)) 1094 return -EINVAL; 1095 1096 if (cell->bit_offset || cell->nbits) { 1097 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1098 if (IS_ERR(buf)) 1099 return PTR_ERR(buf); 1100 } 1101 1102 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1103 1104 /* free the tmp buffer */ 1105 if (cell->bit_offset || cell->nbits) 1106 kfree(buf); 1107 1108 if (rc) 1109 return rc; 1110 1111 return len; 1112 } 1113 EXPORT_SYMBOL_GPL(nvmem_cell_write); 1114 1115 /** 1116 * nvmem_cell_read_u32() - Read a cell value as an u32 1117 * 1118 * @dev: Device that requests the nvmem cell. 1119 * @cell_id: Name of nvmem cell to read. 1120 * @val: pointer to output value. 1121 * 1122 * Return: 0 on success or negative errno. 1123 */ 1124 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1125 { 1126 struct nvmem_cell *cell; 1127 void *buf; 1128 size_t len; 1129 1130 cell = nvmem_cell_get(dev, cell_id); 1131 if (IS_ERR(cell)) 1132 return PTR_ERR(cell); 1133 1134 buf = nvmem_cell_read(cell, &len); 1135 if (IS_ERR(buf)) { 1136 nvmem_cell_put(cell); 1137 return PTR_ERR(buf); 1138 } 1139 if (len != sizeof(*val)) { 1140 kfree(buf); 1141 nvmem_cell_put(cell); 1142 return -EINVAL; 1143 } 1144 memcpy(val, buf, sizeof(*val)); 1145 1146 kfree(buf); 1147 nvmem_cell_put(cell); 1148 return 0; 1149 } 1150 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1151 1152 /** 1153 * nvmem_device_cell_read() - Read a given nvmem device and cell 1154 * 1155 * @nvmem: nvmem device to read from. 1156 * @info: nvmem cell info to be read. 1157 * @buf: buffer pointer which will be populated on successful read. 1158 * 1159 * Return: length of successful bytes read on success and negative 1160 * error code on error. 1161 */ 1162 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1163 struct nvmem_cell_info *info, void *buf) 1164 { 1165 struct nvmem_cell cell; 1166 int rc; 1167 ssize_t len; 1168 1169 if (!nvmem) 1170 return -EINVAL; 1171 1172 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1173 if (rc) 1174 return rc; 1175 1176 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1177 if (rc) 1178 return rc; 1179 1180 return len; 1181 } 1182 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1183 1184 /** 1185 * nvmem_device_cell_write() - Write cell to a given nvmem device 1186 * 1187 * @nvmem: nvmem device to be written to. 1188 * @info: nvmem cell info to be written. 1189 * @buf: buffer to be written to cell. 1190 * 1191 * Return: length of bytes written or negative error code on failure. 1192 * */ 1193 int nvmem_device_cell_write(struct nvmem_device *nvmem, 1194 struct nvmem_cell_info *info, void *buf) 1195 { 1196 struct nvmem_cell cell; 1197 int rc; 1198 1199 if (!nvmem) 1200 return -EINVAL; 1201 1202 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1203 if (rc) 1204 return rc; 1205 1206 return nvmem_cell_write(&cell, buf, cell.bytes); 1207 } 1208 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1209 1210 /** 1211 * nvmem_device_read() - Read from a given nvmem device 1212 * 1213 * @nvmem: nvmem device to read from. 1214 * @offset: offset in nvmem device. 1215 * @bytes: number of bytes to read. 1216 * @buf: buffer pointer which will be populated on successful read. 1217 * 1218 * Return: length of successful bytes read on success and negative 1219 * error code on error. 1220 */ 1221 int nvmem_device_read(struct nvmem_device *nvmem, 1222 unsigned int offset, 1223 size_t bytes, void *buf) 1224 { 1225 int rc; 1226 1227 if (!nvmem) 1228 return -EINVAL; 1229 1230 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1231 1232 if (rc) 1233 return rc; 1234 1235 return bytes; 1236 } 1237 EXPORT_SYMBOL_GPL(nvmem_device_read); 1238 1239 /** 1240 * nvmem_device_write() - Write cell to a given nvmem device 1241 * 1242 * @nvmem: nvmem device to be written to. 1243 * @offset: offset in nvmem device. 1244 * @bytes: number of bytes to write. 1245 * @buf: buffer to be written. 1246 * 1247 * Return: length of bytes written or negative error code on failure. 1248 * */ 1249 int nvmem_device_write(struct nvmem_device *nvmem, 1250 unsigned int offset, 1251 size_t bytes, void *buf) 1252 { 1253 int rc; 1254 1255 if (!nvmem) 1256 return -EINVAL; 1257 1258 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1259 1260 if (rc) 1261 return rc; 1262 1263 1264 return bytes; 1265 } 1266 EXPORT_SYMBOL_GPL(nvmem_device_write); 1267 1268 static int __init nvmem_init(void) 1269 { 1270 return bus_register(&nvmem_bus_type); 1271 } 1272 1273 static void __exit nvmem_exit(void) 1274 { 1275 bus_unregister(&nvmem_bus_type); 1276 } 1277 1278 subsys_initcall(nvmem_init); 1279 module_exit(nvmem_exit); 1280 1281 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1282 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1283 MODULE_DESCRIPTION("nvmem Driver Core"); 1284 MODULE_LICENSE("GPL v2"); 1285