1 /* 2 * nvmem framework core. 3 * 4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/device.h> 18 #include <linux/export.h> 19 #include <linux/fs.h> 20 #include <linux/idr.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/nvmem-consumer.h> 24 #include <linux/nvmem-provider.h> 25 #include <linux/of.h> 26 #include <linux/regmap.h> 27 #include <linux/slab.h> 28 29 struct nvmem_device { 30 const char *name; 31 struct regmap *regmap; 32 struct module *owner; 33 struct device dev; 34 int stride; 35 int word_size; 36 int ncells; 37 int id; 38 int users; 39 size_t size; 40 bool read_only; 41 }; 42 43 struct nvmem_cell { 44 const char *name; 45 int offset; 46 int bytes; 47 int bit_offset; 48 int nbits; 49 struct nvmem_device *nvmem; 50 struct list_head node; 51 }; 52 53 static DEFINE_MUTEX(nvmem_mutex); 54 static DEFINE_IDA(nvmem_ida); 55 56 static LIST_HEAD(nvmem_cells); 57 static DEFINE_MUTEX(nvmem_cells_mutex); 58 59 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 60 61 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 62 struct bin_attribute *attr, 63 char *buf, loff_t pos, size_t count) 64 { 65 struct device *dev = container_of(kobj, struct device, kobj); 66 struct nvmem_device *nvmem = to_nvmem_device(dev); 67 int rc; 68 69 /* Stop the user from reading */ 70 if (pos >= nvmem->size) 71 return 0; 72 73 if (pos + count > nvmem->size) 74 count = nvmem->size - pos; 75 76 count = round_down(count, nvmem->word_size); 77 78 rc = regmap_raw_read(nvmem->regmap, pos, buf, count); 79 80 if (IS_ERR_VALUE(rc)) 81 return rc; 82 83 return count; 84 } 85 86 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 87 struct bin_attribute *attr, 88 char *buf, loff_t pos, size_t count) 89 { 90 struct device *dev = container_of(kobj, struct device, kobj); 91 struct nvmem_device *nvmem = to_nvmem_device(dev); 92 int rc; 93 94 /* Stop the user from writing */ 95 if (pos >= nvmem->size) 96 return 0; 97 98 if (pos + count > nvmem->size) 99 count = nvmem->size - pos; 100 101 count = round_down(count, nvmem->word_size); 102 103 rc = regmap_raw_write(nvmem->regmap, pos, buf, count); 104 105 if (IS_ERR_VALUE(rc)) 106 return rc; 107 108 return count; 109 } 110 111 /* default read/write permissions */ 112 static struct bin_attribute bin_attr_rw_nvmem = { 113 .attr = { 114 .name = "nvmem", 115 .mode = S_IWUSR | S_IRUGO, 116 }, 117 .read = bin_attr_nvmem_read, 118 .write = bin_attr_nvmem_write, 119 }; 120 121 static struct bin_attribute *nvmem_bin_rw_attributes[] = { 122 &bin_attr_rw_nvmem, 123 NULL, 124 }; 125 126 static const struct attribute_group nvmem_bin_rw_group = { 127 .bin_attrs = nvmem_bin_rw_attributes, 128 }; 129 130 static const struct attribute_group *nvmem_rw_dev_groups[] = { 131 &nvmem_bin_rw_group, 132 NULL, 133 }; 134 135 /* read only permission */ 136 static struct bin_attribute bin_attr_ro_nvmem = { 137 .attr = { 138 .name = "nvmem", 139 .mode = S_IRUGO, 140 }, 141 .read = bin_attr_nvmem_read, 142 }; 143 144 static struct bin_attribute *nvmem_bin_ro_attributes[] = { 145 &bin_attr_ro_nvmem, 146 NULL, 147 }; 148 149 static const struct attribute_group nvmem_bin_ro_group = { 150 .bin_attrs = nvmem_bin_ro_attributes, 151 }; 152 153 static const struct attribute_group *nvmem_ro_dev_groups[] = { 154 &nvmem_bin_ro_group, 155 NULL, 156 }; 157 158 static void nvmem_release(struct device *dev) 159 { 160 struct nvmem_device *nvmem = to_nvmem_device(dev); 161 162 ida_simple_remove(&nvmem_ida, nvmem->id); 163 kfree(nvmem); 164 } 165 166 static const struct device_type nvmem_provider_type = { 167 .release = nvmem_release, 168 }; 169 170 static struct bus_type nvmem_bus_type = { 171 .name = "nvmem", 172 }; 173 174 static int of_nvmem_match(struct device *dev, void *nvmem_np) 175 { 176 return dev->of_node == nvmem_np; 177 } 178 179 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) 180 { 181 struct device *d; 182 183 if (!nvmem_np) 184 return NULL; 185 186 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); 187 188 if (!d) 189 return NULL; 190 191 return to_nvmem_device(d); 192 } 193 194 static struct nvmem_cell *nvmem_find_cell(const char *cell_id) 195 { 196 struct nvmem_cell *p; 197 198 list_for_each_entry(p, &nvmem_cells, node) 199 if (p && !strcmp(p->name, cell_id)) 200 return p; 201 202 return NULL; 203 } 204 205 static void nvmem_cell_drop(struct nvmem_cell *cell) 206 { 207 mutex_lock(&nvmem_cells_mutex); 208 list_del(&cell->node); 209 mutex_unlock(&nvmem_cells_mutex); 210 kfree(cell); 211 } 212 213 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 214 { 215 struct nvmem_cell *cell; 216 struct list_head *p, *n; 217 218 list_for_each_safe(p, n, &nvmem_cells) { 219 cell = list_entry(p, struct nvmem_cell, node); 220 if (cell->nvmem == nvmem) 221 nvmem_cell_drop(cell); 222 } 223 } 224 225 static void nvmem_cell_add(struct nvmem_cell *cell) 226 { 227 mutex_lock(&nvmem_cells_mutex); 228 list_add_tail(&cell->node, &nvmem_cells); 229 mutex_unlock(&nvmem_cells_mutex); 230 } 231 232 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 233 const struct nvmem_cell_info *info, 234 struct nvmem_cell *cell) 235 { 236 cell->nvmem = nvmem; 237 cell->offset = info->offset; 238 cell->bytes = info->bytes; 239 cell->name = info->name; 240 241 cell->bit_offset = info->bit_offset; 242 cell->nbits = info->nbits; 243 244 if (cell->nbits) 245 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 246 BITS_PER_BYTE); 247 248 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 249 dev_err(&nvmem->dev, 250 "cell %s unaligned to nvmem stride %d\n", 251 cell->name, nvmem->stride); 252 return -EINVAL; 253 } 254 255 return 0; 256 } 257 258 static int nvmem_add_cells(struct nvmem_device *nvmem, 259 const struct nvmem_config *cfg) 260 { 261 struct nvmem_cell **cells; 262 const struct nvmem_cell_info *info = cfg->cells; 263 int i, rval; 264 265 cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); 266 if (!cells) 267 return -ENOMEM; 268 269 for (i = 0; i < cfg->ncells; i++) { 270 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 271 if (!cells[i]) { 272 rval = -ENOMEM; 273 goto err; 274 } 275 276 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 277 if (IS_ERR_VALUE(rval)) { 278 kfree(cells[i]); 279 goto err; 280 } 281 282 nvmem_cell_add(cells[i]); 283 } 284 285 nvmem->ncells = cfg->ncells; 286 /* remove tmp array */ 287 kfree(cells); 288 289 return 0; 290 err: 291 while (--i) 292 nvmem_cell_drop(cells[i]); 293 294 return rval; 295 } 296 297 /** 298 * nvmem_register() - Register a nvmem device for given nvmem_config. 299 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 300 * 301 * @config: nvmem device configuration with which nvmem device is created. 302 * 303 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 304 * on success. 305 */ 306 307 struct nvmem_device *nvmem_register(const struct nvmem_config *config) 308 { 309 struct nvmem_device *nvmem; 310 struct device_node *np; 311 struct regmap *rm; 312 int rval; 313 314 if (!config->dev) 315 return ERR_PTR(-EINVAL); 316 317 rm = dev_get_regmap(config->dev, NULL); 318 if (!rm) { 319 dev_err(config->dev, "Regmap not found\n"); 320 return ERR_PTR(-EINVAL); 321 } 322 323 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 324 if (!nvmem) 325 return ERR_PTR(-ENOMEM); 326 327 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 328 if (rval < 0) { 329 kfree(nvmem); 330 return ERR_PTR(rval); 331 } 332 333 nvmem->id = rval; 334 nvmem->regmap = rm; 335 nvmem->owner = config->owner; 336 nvmem->stride = regmap_get_reg_stride(rm); 337 nvmem->word_size = regmap_get_val_bytes(rm); 338 nvmem->size = regmap_get_max_register(rm) + nvmem->stride; 339 nvmem->dev.type = &nvmem_provider_type; 340 nvmem->dev.bus = &nvmem_bus_type; 341 nvmem->dev.parent = config->dev; 342 np = config->dev->of_node; 343 nvmem->dev.of_node = np; 344 dev_set_name(&nvmem->dev, "%s%d", 345 config->name ? : "nvmem", config->id); 346 347 nvmem->read_only = of_property_read_bool(np, "read-only") | 348 config->read_only; 349 350 nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups : 351 nvmem_rw_dev_groups; 352 353 device_initialize(&nvmem->dev); 354 355 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 356 357 rval = device_add(&nvmem->dev); 358 if (rval) { 359 ida_simple_remove(&nvmem_ida, nvmem->id); 360 kfree(nvmem); 361 return ERR_PTR(rval); 362 } 363 364 if (config->cells) 365 nvmem_add_cells(nvmem, config); 366 367 return nvmem; 368 } 369 EXPORT_SYMBOL_GPL(nvmem_register); 370 371 /** 372 * nvmem_unregister() - Unregister previously registered nvmem device 373 * 374 * @nvmem: Pointer to previously registered nvmem device. 375 * 376 * Return: Will be an negative on error or a zero on success. 377 */ 378 int nvmem_unregister(struct nvmem_device *nvmem) 379 { 380 mutex_lock(&nvmem_mutex); 381 if (nvmem->users) { 382 mutex_unlock(&nvmem_mutex); 383 return -EBUSY; 384 } 385 mutex_unlock(&nvmem_mutex); 386 387 nvmem_device_remove_all_cells(nvmem); 388 device_del(&nvmem->dev); 389 390 return 0; 391 } 392 EXPORT_SYMBOL_GPL(nvmem_unregister); 393 394 static struct nvmem_device *__nvmem_device_get(struct device_node *np, 395 struct nvmem_cell **cellp, 396 const char *cell_id) 397 { 398 struct nvmem_device *nvmem = NULL; 399 400 mutex_lock(&nvmem_mutex); 401 402 if (np) { 403 nvmem = of_nvmem_find(np); 404 if (!nvmem) { 405 mutex_unlock(&nvmem_mutex); 406 return ERR_PTR(-EPROBE_DEFER); 407 } 408 } else { 409 struct nvmem_cell *cell = nvmem_find_cell(cell_id); 410 411 if (cell) { 412 nvmem = cell->nvmem; 413 *cellp = cell; 414 } 415 416 if (!nvmem) { 417 mutex_unlock(&nvmem_mutex); 418 return ERR_PTR(-ENOENT); 419 } 420 } 421 422 nvmem->users++; 423 mutex_unlock(&nvmem_mutex); 424 425 if (!try_module_get(nvmem->owner)) { 426 dev_err(&nvmem->dev, 427 "could not increase module refcount for cell %s\n", 428 nvmem->name); 429 430 mutex_lock(&nvmem_mutex); 431 nvmem->users--; 432 mutex_unlock(&nvmem_mutex); 433 434 return ERR_PTR(-EINVAL); 435 } 436 437 return nvmem; 438 } 439 440 static void __nvmem_device_put(struct nvmem_device *nvmem) 441 { 442 module_put(nvmem->owner); 443 mutex_lock(&nvmem_mutex); 444 nvmem->users--; 445 mutex_unlock(&nvmem_mutex); 446 } 447 448 static int nvmem_match(struct device *dev, void *data) 449 { 450 return !strcmp(dev_name(dev), data); 451 } 452 453 static struct nvmem_device *nvmem_find(const char *name) 454 { 455 struct device *d; 456 457 d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); 458 459 if (!d) 460 return NULL; 461 462 return to_nvmem_device(d); 463 } 464 465 #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 466 /** 467 * of_nvmem_device_get() - Get nvmem device from a given id 468 * 469 * @dev node: Device tree node that uses the nvmem device 470 * @id: nvmem name from nvmem-names property. 471 * 472 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 473 * on success. 474 */ 475 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 476 { 477 478 struct device_node *nvmem_np; 479 int index; 480 481 index = of_property_match_string(np, "nvmem-names", id); 482 483 nvmem_np = of_parse_phandle(np, "nvmem", index); 484 if (!nvmem_np) 485 return ERR_PTR(-EINVAL); 486 487 return __nvmem_device_get(nvmem_np, NULL, NULL); 488 } 489 EXPORT_SYMBOL_GPL(of_nvmem_device_get); 490 #endif 491 492 /** 493 * nvmem_device_get() - Get nvmem device from a given id 494 * 495 * @dev : Device that uses the nvmem device 496 * @id: nvmem name from nvmem-names property. 497 * 498 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 499 * on success. 500 */ 501 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 502 { 503 if (dev->of_node) { /* try dt first */ 504 struct nvmem_device *nvmem; 505 506 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 507 508 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 509 return nvmem; 510 511 } 512 513 return nvmem_find(dev_name); 514 } 515 EXPORT_SYMBOL_GPL(nvmem_device_get); 516 517 static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 518 { 519 struct nvmem_device **nvmem = res; 520 521 if (WARN_ON(!nvmem || !*nvmem)) 522 return 0; 523 524 return *nvmem == data; 525 } 526 527 static void devm_nvmem_device_release(struct device *dev, void *res) 528 { 529 nvmem_device_put(*(struct nvmem_device **)res); 530 } 531 532 /** 533 * devm_nvmem_device_put() - put alredy got nvmem device 534 * 535 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 536 * that needs to be released. 537 */ 538 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 539 { 540 int ret; 541 542 ret = devres_release(dev, devm_nvmem_device_release, 543 devm_nvmem_device_match, nvmem); 544 545 WARN_ON(ret); 546 } 547 EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 548 549 /** 550 * nvmem_device_put() - put alredy got nvmem device 551 * 552 * @nvmem: pointer to nvmem device that needs to be released. 553 */ 554 void nvmem_device_put(struct nvmem_device *nvmem) 555 { 556 __nvmem_device_put(nvmem); 557 } 558 EXPORT_SYMBOL_GPL(nvmem_device_put); 559 560 /** 561 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 562 * 563 * @dev node: Device tree node that uses the nvmem cell 564 * @id: nvmem name in nvmems property. 565 * 566 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 567 * on success. The nvmem_cell will be freed by the automatically once the 568 * device is freed. 569 */ 570 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 571 { 572 struct nvmem_device **ptr, *nvmem; 573 574 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 575 if (!ptr) 576 return ERR_PTR(-ENOMEM); 577 578 nvmem = nvmem_device_get(dev, id); 579 if (!IS_ERR(nvmem)) { 580 *ptr = nvmem; 581 devres_add(dev, ptr); 582 } else { 583 devres_free(ptr); 584 } 585 586 return nvmem; 587 } 588 EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 589 590 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) 591 { 592 struct nvmem_cell *cell = NULL; 593 struct nvmem_device *nvmem; 594 595 nvmem = __nvmem_device_get(NULL, &cell, cell_id); 596 if (IS_ERR(nvmem)) 597 return ERR_CAST(nvmem); 598 599 return cell; 600 } 601 602 #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 603 /** 604 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 605 * 606 * @dev node: Device tree node that uses the nvmem cell 607 * @id: nvmem cell name from nvmem-cell-names property. 608 * 609 * Return: Will be an ERR_PTR() on error or a valid pointer 610 * to a struct nvmem_cell. The nvmem_cell will be freed by the 611 * nvmem_cell_put(). 612 */ 613 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 614 const char *name) 615 { 616 struct device_node *cell_np, *nvmem_np; 617 struct nvmem_cell *cell; 618 struct nvmem_device *nvmem; 619 const __be32 *addr; 620 int rval, len, index; 621 622 index = of_property_match_string(np, "nvmem-cell-names", name); 623 624 cell_np = of_parse_phandle(np, "nvmem-cells", index); 625 if (!cell_np) 626 return ERR_PTR(-EINVAL); 627 628 nvmem_np = of_get_next_parent(cell_np); 629 if (!nvmem_np) 630 return ERR_PTR(-EINVAL); 631 632 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 633 if (IS_ERR(nvmem)) 634 return ERR_CAST(nvmem); 635 636 addr = of_get_property(cell_np, "reg", &len); 637 if (!addr || (len < 2 * sizeof(u32))) { 638 dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n", 639 cell_np->full_name); 640 rval = -EINVAL; 641 goto err_mem; 642 } 643 644 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 645 if (!cell) { 646 rval = -ENOMEM; 647 goto err_mem; 648 } 649 650 cell->nvmem = nvmem; 651 cell->offset = be32_to_cpup(addr++); 652 cell->bytes = be32_to_cpup(addr); 653 cell->name = cell_np->name; 654 655 addr = of_get_property(cell_np, "bits", &len); 656 if (addr && len == (2 * sizeof(u32))) { 657 cell->bit_offset = be32_to_cpup(addr++); 658 cell->nbits = be32_to_cpup(addr); 659 } 660 661 if (cell->nbits) 662 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 663 BITS_PER_BYTE); 664 665 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 666 dev_err(&nvmem->dev, 667 "cell %s unaligned to nvmem stride %d\n", 668 cell->name, nvmem->stride); 669 rval = -EINVAL; 670 goto err_sanity; 671 } 672 673 nvmem_cell_add(cell); 674 675 return cell; 676 677 err_sanity: 678 kfree(cell); 679 680 err_mem: 681 __nvmem_device_put(nvmem); 682 683 return ERR_PTR(rval); 684 } 685 EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 686 #endif 687 688 /** 689 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 690 * 691 * @dev node: Device tree node that uses the nvmem cell 692 * @id: nvmem cell name to get. 693 * 694 * Return: Will be an ERR_PTR() on error or a valid pointer 695 * to a struct nvmem_cell. The nvmem_cell will be freed by the 696 * nvmem_cell_put(). 697 */ 698 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) 699 { 700 struct nvmem_cell *cell; 701 702 if (dev->of_node) { /* try dt first */ 703 cell = of_nvmem_cell_get(dev->of_node, cell_id); 704 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 705 return cell; 706 } 707 708 return nvmem_cell_get_from_list(cell_id); 709 } 710 EXPORT_SYMBOL_GPL(nvmem_cell_get); 711 712 static void devm_nvmem_cell_release(struct device *dev, void *res) 713 { 714 nvmem_cell_put(*(struct nvmem_cell **)res); 715 } 716 717 /** 718 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 719 * 720 * @dev node: Device tree node that uses the nvmem cell 721 * @id: nvmem id in nvmem-names property. 722 * 723 * Return: Will be an ERR_PTR() on error or a valid pointer 724 * to a struct nvmem_cell. The nvmem_cell will be freed by the 725 * automatically once the device is freed. 726 */ 727 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 728 { 729 struct nvmem_cell **ptr, *cell; 730 731 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 732 if (!ptr) 733 return ERR_PTR(-ENOMEM); 734 735 cell = nvmem_cell_get(dev, id); 736 if (!IS_ERR(cell)) { 737 *ptr = cell; 738 devres_add(dev, ptr); 739 } else { 740 devres_free(ptr); 741 } 742 743 return cell; 744 } 745 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 746 747 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 748 { 749 struct nvmem_cell **c = res; 750 751 if (WARN_ON(!c || !*c)) 752 return 0; 753 754 return *c == data; 755 } 756 757 /** 758 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 759 * from devm_nvmem_cell_get. 760 * 761 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get() 762 */ 763 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 764 { 765 int ret; 766 767 ret = devres_release(dev, devm_nvmem_cell_release, 768 devm_nvmem_cell_match, cell); 769 770 WARN_ON(ret); 771 } 772 EXPORT_SYMBOL(devm_nvmem_cell_put); 773 774 /** 775 * nvmem_cell_put() - Release previously allocated nvmem cell. 776 * 777 * @cell: Previously allocated nvmem cell by nvmem_cell_get() 778 */ 779 void nvmem_cell_put(struct nvmem_cell *cell) 780 { 781 struct nvmem_device *nvmem = cell->nvmem; 782 783 __nvmem_device_put(nvmem); 784 nvmem_cell_drop(cell); 785 } 786 EXPORT_SYMBOL_GPL(nvmem_cell_put); 787 788 static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, 789 void *buf) 790 { 791 u8 *p, *b; 792 int i, bit_offset = cell->bit_offset; 793 794 p = b = buf; 795 if (bit_offset) { 796 /* First shift */ 797 *b++ >>= bit_offset; 798 799 /* setup rest of the bytes if any */ 800 for (i = 1; i < cell->bytes; i++) { 801 /* Get bits from next byte and shift them towards msb */ 802 *p |= *b << (BITS_PER_BYTE - bit_offset); 803 804 p = b; 805 *b++ >>= bit_offset; 806 } 807 808 /* result fits in less bytes */ 809 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 810 *p-- = 0; 811 } 812 /* clear msb bits if any leftover in the last byte */ 813 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 814 } 815 816 static int __nvmem_cell_read(struct nvmem_device *nvmem, 817 struct nvmem_cell *cell, 818 void *buf, size_t *len) 819 { 820 int rc; 821 822 rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes); 823 824 if (IS_ERR_VALUE(rc)) 825 return rc; 826 827 /* shift bits in-place */ 828 if (cell->bit_offset || cell->nbits) 829 nvmem_shift_read_buffer_in_place(cell, buf); 830 831 *len = cell->bytes; 832 833 return 0; 834 } 835 836 /** 837 * nvmem_cell_read() - Read a given nvmem cell 838 * 839 * @cell: nvmem cell to be read. 840 * @len: pointer to length of cell which will be populated on successful read. 841 * 842 * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success. 843 * The buffer should be freed by the consumer with a kfree(). 844 */ 845 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 846 { 847 struct nvmem_device *nvmem = cell->nvmem; 848 u8 *buf; 849 int rc; 850 851 if (!nvmem || !nvmem->regmap) 852 return ERR_PTR(-EINVAL); 853 854 buf = kzalloc(cell->bytes, GFP_KERNEL); 855 if (!buf) 856 return ERR_PTR(-ENOMEM); 857 858 rc = __nvmem_cell_read(nvmem, cell, buf, len); 859 if (IS_ERR_VALUE(rc)) { 860 kfree(buf); 861 return ERR_PTR(rc); 862 } 863 864 return buf; 865 } 866 EXPORT_SYMBOL_GPL(nvmem_cell_read); 867 868 static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 869 u8 *_buf, int len) 870 { 871 struct nvmem_device *nvmem = cell->nvmem; 872 int i, rc, nbits, bit_offset = cell->bit_offset; 873 u8 v, *p, *buf, *b, pbyte, pbits; 874 875 nbits = cell->nbits; 876 buf = kzalloc(cell->bytes, GFP_KERNEL); 877 if (!buf) 878 return ERR_PTR(-ENOMEM); 879 880 memcpy(buf, _buf, len); 881 p = b = buf; 882 883 if (bit_offset) { 884 pbyte = *b; 885 *b <<= bit_offset; 886 887 /* setup the first byte with lsb bits from nvmem */ 888 rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1); 889 *b++ |= GENMASK(bit_offset - 1, 0) & v; 890 891 /* setup rest of the byte if any */ 892 for (i = 1; i < cell->bytes; i++) { 893 /* Get last byte bits and shift them towards lsb */ 894 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 895 pbyte = *b; 896 p = b; 897 *b <<= bit_offset; 898 *b++ |= pbits; 899 } 900 } 901 902 /* if it's not end on byte boundary */ 903 if ((nbits + bit_offset) % BITS_PER_BYTE) { 904 /* setup the last byte with msb bits from nvmem */ 905 rc = regmap_raw_read(nvmem->regmap, 906 cell->offset + cell->bytes - 1, &v, 1); 907 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 908 909 } 910 911 return buf; 912 } 913 914 /** 915 * nvmem_cell_write() - Write to a given nvmem cell 916 * 917 * @cell: nvmem cell to be written. 918 * @buf: Buffer to be written. 919 * @len: length of buffer to be written to nvmem cell. 920 * 921 * Return: length of bytes written or negative on failure. 922 */ 923 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 924 { 925 struct nvmem_device *nvmem = cell->nvmem; 926 int rc; 927 928 if (!nvmem || !nvmem->regmap || nvmem->read_only || 929 (cell->bit_offset == 0 && len != cell->bytes)) 930 return -EINVAL; 931 932 if (cell->bit_offset || cell->nbits) { 933 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 934 if (IS_ERR(buf)) 935 return PTR_ERR(buf); 936 } 937 938 rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); 939 940 /* free the tmp buffer */ 941 if (cell->bit_offset || cell->nbits) 942 kfree(buf); 943 944 if (IS_ERR_VALUE(rc)) 945 return rc; 946 947 return len; 948 } 949 EXPORT_SYMBOL_GPL(nvmem_cell_write); 950 951 /** 952 * nvmem_device_cell_read() - Read a given nvmem device and cell 953 * 954 * @nvmem: nvmem device to read from. 955 * @info: nvmem cell info to be read. 956 * @buf: buffer pointer which will be populated on successful read. 957 * 958 * Return: length of successful bytes read on success and negative 959 * error code on error. 960 */ 961 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 962 struct nvmem_cell_info *info, void *buf) 963 { 964 struct nvmem_cell cell; 965 int rc; 966 ssize_t len; 967 968 if (!nvmem || !nvmem->regmap) 969 return -EINVAL; 970 971 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 972 if (IS_ERR_VALUE(rc)) 973 return rc; 974 975 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 976 if (IS_ERR_VALUE(rc)) 977 return rc; 978 979 return len; 980 } 981 EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 982 983 /** 984 * nvmem_device_cell_write() - Write cell to a given nvmem device 985 * 986 * @nvmem: nvmem device to be written to. 987 * @info: nvmem cell info to be written 988 * @buf: buffer to be written to cell. 989 * 990 * Return: length of bytes written or negative error code on failure. 991 * */ 992 int nvmem_device_cell_write(struct nvmem_device *nvmem, 993 struct nvmem_cell_info *info, void *buf) 994 { 995 struct nvmem_cell cell; 996 int rc; 997 998 if (!nvmem || !nvmem->regmap) 999 return -EINVAL; 1000 1001 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1002 if (IS_ERR_VALUE(rc)) 1003 return rc; 1004 1005 return nvmem_cell_write(&cell, buf, cell.bytes); 1006 } 1007 EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1008 1009 /** 1010 * nvmem_device_read() - Read from a given nvmem device 1011 * 1012 * @nvmem: nvmem device to read from. 1013 * @offset: offset in nvmem device. 1014 * @bytes: number of bytes to read. 1015 * @buf: buffer pointer which will be populated on successful read. 1016 * 1017 * Return: length of successful bytes read on success and negative 1018 * error code on error. 1019 */ 1020 int nvmem_device_read(struct nvmem_device *nvmem, 1021 unsigned int offset, 1022 size_t bytes, void *buf) 1023 { 1024 int rc; 1025 1026 if (!nvmem || !nvmem->regmap) 1027 return -EINVAL; 1028 1029 rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes); 1030 1031 if (IS_ERR_VALUE(rc)) 1032 return rc; 1033 1034 return bytes; 1035 } 1036 EXPORT_SYMBOL_GPL(nvmem_device_read); 1037 1038 /** 1039 * nvmem_device_write() - Write cell to a given nvmem device 1040 * 1041 * @nvmem: nvmem device to be written to. 1042 * @offset: offset in nvmem device. 1043 * @bytes: number of bytes to write. 1044 * @buf: buffer to be written. 1045 * 1046 * Return: length of bytes written or negative error code on failure. 1047 * */ 1048 int nvmem_device_write(struct nvmem_device *nvmem, 1049 unsigned int offset, 1050 size_t bytes, void *buf) 1051 { 1052 int rc; 1053 1054 if (!nvmem || !nvmem->regmap) 1055 return -EINVAL; 1056 1057 rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes); 1058 1059 if (IS_ERR_VALUE(rc)) 1060 return rc; 1061 1062 1063 return bytes; 1064 } 1065 EXPORT_SYMBOL_GPL(nvmem_device_write); 1066 1067 static int __init nvmem_init(void) 1068 { 1069 return bus_register(&nvmem_bus_type); 1070 } 1071 1072 static void __exit nvmem_exit(void) 1073 { 1074 bus_unregister(&nvmem_bus_type); 1075 } 1076 1077 subsys_initcall(nvmem_init); 1078 module_exit(nvmem_exit); 1079 1080 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1081 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1082 MODULE_DESCRIPTION("nvmem Driver Core"); 1083 MODULE_LICENSE("GPL v2"); 1084