xref: /linux/drivers/nvmem/core.c (revision 185647813cac080453cb73a2e034a8821049f2a7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 
21 struct nvmem_device {
22 	struct module		*owner;
23 	struct device		dev;
24 	int			stride;
25 	int			word_size;
26 	int			id;
27 	struct kref		refcnt;
28 	size_t			size;
29 	bool			read_only;
30 	int			flags;
31 	struct bin_attribute	eeprom;
32 	struct device		*base_dev;
33 	struct list_head	cells;
34 	nvmem_reg_read_t	reg_read;
35 	nvmem_reg_write_t	reg_write;
36 	void *priv;
37 };
38 
39 #define FLAG_COMPAT		BIT(0)
40 
41 struct nvmem_cell {
42 	const char		*name;
43 	int			offset;
44 	int			bytes;
45 	int			bit_offset;
46 	int			nbits;
47 	struct nvmem_device	*nvmem;
48 	struct list_head	node;
49 };
50 
51 static DEFINE_MUTEX(nvmem_mutex);
52 static DEFINE_IDA(nvmem_ida);
53 
54 static DEFINE_MUTEX(nvmem_cell_mutex);
55 static LIST_HEAD(nvmem_cell_tables);
56 
57 static DEFINE_MUTEX(nvmem_lookup_mutex);
58 static LIST_HEAD(nvmem_lookup_list);
59 
60 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
61 
62 #ifdef CONFIG_DEBUG_LOCK_ALLOC
63 static struct lock_class_key eeprom_lock_key;
64 #endif
65 
66 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
67 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
68 			  void *val, size_t bytes)
69 {
70 	if (nvmem->reg_read)
71 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
72 
73 	return -EINVAL;
74 }
75 
76 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
77 			   void *val, size_t bytes)
78 {
79 	if (nvmem->reg_write)
80 		return nvmem->reg_write(nvmem->priv, offset, val, bytes);
81 
82 	return -EINVAL;
83 }
84 
85 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
86 				    struct bin_attribute *attr,
87 				    char *buf, loff_t pos, size_t count)
88 {
89 	struct device *dev;
90 	struct nvmem_device *nvmem;
91 	int rc;
92 
93 	if (attr->private)
94 		dev = attr->private;
95 	else
96 		dev = container_of(kobj, struct device, kobj);
97 	nvmem = to_nvmem_device(dev);
98 
99 	/* Stop the user from reading */
100 	if (pos >= nvmem->size)
101 		return 0;
102 
103 	if (count < nvmem->word_size)
104 		return -EINVAL;
105 
106 	if (pos + count > nvmem->size)
107 		count = nvmem->size - pos;
108 
109 	count = round_down(count, nvmem->word_size);
110 
111 	rc = nvmem_reg_read(nvmem, pos, buf, count);
112 
113 	if (rc)
114 		return rc;
115 
116 	return count;
117 }
118 
119 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
120 				     struct bin_attribute *attr,
121 				     char *buf, loff_t pos, size_t count)
122 {
123 	struct device *dev;
124 	struct nvmem_device *nvmem;
125 	int rc;
126 
127 	if (attr->private)
128 		dev = attr->private;
129 	else
130 		dev = container_of(kobj, struct device, kobj);
131 	nvmem = to_nvmem_device(dev);
132 
133 	/* Stop the user from writing */
134 	if (pos >= nvmem->size)
135 		return -EFBIG;
136 
137 	if (count < nvmem->word_size)
138 		return -EINVAL;
139 
140 	if (pos + count > nvmem->size)
141 		count = nvmem->size - pos;
142 
143 	count = round_down(count, nvmem->word_size);
144 
145 	rc = nvmem_reg_write(nvmem, pos, buf, count);
146 
147 	if (rc)
148 		return rc;
149 
150 	return count;
151 }
152 
153 /* default read/write permissions */
154 static struct bin_attribute bin_attr_rw_nvmem = {
155 	.attr	= {
156 		.name	= "nvmem",
157 		.mode	= 0644,
158 	},
159 	.read	= bin_attr_nvmem_read,
160 	.write	= bin_attr_nvmem_write,
161 };
162 
163 static struct bin_attribute *nvmem_bin_rw_attributes[] = {
164 	&bin_attr_rw_nvmem,
165 	NULL,
166 };
167 
168 static const struct attribute_group nvmem_bin_rw_group = {
169 	.bin_attrs	= nvmem_bin_rw_attributes,
170 };
171 
172 static const struct attribute_group *nvmem_rw_dev_groups[] = {
173 	&nvmem_bin_rw_group,
174 	NULL,
175 };
176 
177 /* read only permission */
178 static struct bin_attribute bin_attr_ro_nvmem = {
179 	.attr	= {
180 		.name	= "nvmem",
181 		.mode	= 0444,
182 	},
183 	.read	= bin_attr_nvmem_read,
184 };
185 
186 static struct bin_attribute *nvmem_bin_ro_attributes[] = {
187 	&bin_attr_ro_nvmem,
188 	NULL,
189 };
190 
191 static const struct attribute_group nvmem_bin_ro_group = {
192 	.bin_attrs	= nvmem_bin_ro_attributes,
193 };
194 
195 static const struct attribute_group *nvmem_ro_dev_groups[] = {
196 	&nvmem_bin_ro_group,
197 	NULL,
198 };
199 
200 /* default read/write permissions, root only */
201 static struct bin_attribute bin_attr_rw_root_nvmem = {
202 	.attr	= {
203 		.name	= "nvmem",
204 		.mode	= 0600,
205 	},
206 	.read	= bin_attr_nvmem_read,
207 	.write	= bin_attr_nvmem_write,
208 };
209 
210 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
211 	&bin_attr_rw_root_nvmem,
212 	NULL,
213 };
214 
215 static const struct attribute_group nvmem_bin_rw_root_group = {
216 	.bin_attrs	= nvmem_bin_rw_root_attributes,
217 };
218 
219 static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
220 	&nvmem_bin_rw_root_group,
221 	NULL,
222 };
223 
224 /* read only permission, root only */
225 static struct bin_attribute bin_attr_ro_root_nvmem = {
226 	.attr	= {
227 		.name	= "nvmem",
228 		.mode	= 0400,
229 	},
230 	.read	= bin_attr_nvmem_read,
231 };
232 
233 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
234 	&bin_attr_ro_root_nvmem,
235 	NULL,
236 };
237 
238 static const struct attribute_group nvmem_bin_ro_root_group = {
239 	.bin_attrs	= nvmem_bin_ro_root_attributes,
240 };
241 
242 static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
243 	&nvmem_bin_ro_root_group,
244 	NULL,
245 };
246 
247 static void nvmem_release(struct device *dev)
248 {
249 	struct nvmem_device *nvmem = to_nvmem_device(dev);
250 
251 	ida_simple_remove(&nvmem_ida, nvmem->id);
252 	kfree(nvmem);
253 }
254 
255 static const struct device_type nvmem_provider_type = {
256 	.release	= nvmem_release,
257 };
258 
259 static struct bus_type nvmem_bus_type = {
260 	.name		= "nvmem",
261 };
262 
263 static int of_nvmem_match(struct device *dev, void *nvmem_np)
264 {
265 	return dev->of_node == nvmem_np;
266 }
267 
268 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
269 {
270 	struct device *d;
271 
272 	if (!nvmem_np)
273 		return NULL;
274 
275 	d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
276 
277 	if (!d)
278 		return NULL;
279 
280 	return to_nvmem_device(d);
281 }
282 
283 static struct nvmem_device *nvmem_find(const char *name)
284 {
285 	struct device *d;
286 
287 	d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
288 
289 	if (!d)
290 		return NULL;
291 
292 	return to_nvmem_device(d);
293 }
294 
295 static void nvmem_cell_drop(struct nvmem_cell *cell)
296 {
297 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
298 	mutex_lock(&nvmem_mutex);
299 	list_del(&cell->node);
300 	mutex_unlock(&nvmem_mutex);
301 	kfree(cell);
302 }
303 
304 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
305 {
306 	struct nvmem_cell *cell, *p;
307 
308 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
309 		nvmem_cell_drop(cell);
310 }
311 
312 static void nvmem_cell_add(struct nvmem_cell *cell)
313 {
314 	mutex_lock(&nvmem_mutex);
315 	list_add_tail(&cell->node, &cell->nvmem->cells);
316 	mutex_unlock(&nvmem_mutex);
317 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
318 }
319 
320 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
321 				   const struct nvmem_cell_info *info,
322 				   struct nvmem_cell *cell)
323 {
324 	cell->nvmem = nvmem;
325 	cell->offset = info->offset;
326 	cell->bytes = info->bytes;
327 	cell->name = info->name;
328 
329 	cell->bit_offset = info->bit_offset;
330 	cell->nbits = info->nbits;
331 
332 	if (cell->nbits)
333 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
334 					   BITS_PER_BYTE);
335 
336 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
337 		dev_err(&nvmem->dev,
338 			"cell %s unaligned to nvmem stride %d\n",
339 			cell->name, nvmem->stride);
340 		return -EINVAL;
341 	}
342 
343 	return 0;
344 }
345 
346 /**
347  * nvmem_add_cells() - Add cell information to an nvmem device
348  *
349  * @nvmem: nvmem device to add cells to.
350  * @info: nvmem cell info to add to the device
351  * @ncells: number of cells in info
352  *
353  * Return: 0 or negative error code on failure.
354  */
355 static int nvmem_add_cells(struct nvmem_device *nvmem,
356 		    const struct nvmem_cell_info *info,
357 		    int ncells)
358 {
359 	struct nvmem_cell **cells;
360 	int i, rval;
361 
362 	cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
363 	if (!cells)
364 		return -ENOMEM;
365 
366 	for (i = 0; i < ncells; i++) {
367 		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
368 		if (!cells[i]) {
369 			rval = -ENOMEM;
370 			goto err;
371 		}
372 
373 		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
374 		if (rval) {
375 			kfree(cells[i]);
376 			goto err;
377 		}
378 
379 		nvmem_cell_add(cells[i]);
380 	}
381 
382 	/* remove tmp array */
383 	kfree(cells);
384 
385 	return 0;
386 err:
387 	while (i--)
388 		nvmem_cell_drop(cells[i]);
389 
390 	kfree(cells);
391 
392 	return rval;
393 }
394 
395 /*
396  * nvmem_setup_compat() - Create an additional binary entry in
397  * drivers sys directory, to be backwards compatible with the older
398  * drivers/misc/eeprom drivers.
399  */
400 static int nvmem_setup_compat(struct nvmem_device *nvmem,
401 			      const struct nvmem_config *config)
402 {
403 	int rval;
404 
405 	if (!config->base_dev)
406 		return -EINVAL;
407 
408 	if (nvmem->read_only)
409 		nvmem->eeprom = bin_attr_ro_root_nvmem;
410 	else
411 		nvmem->eeprom = bin_attr_rw_root_nvmem;
412 	nvmem->eeprom.attr.name = "eeprom";
413 	nvmem->eeprom.size = nvmem->size;
414 #ifdef CONFIG_DEBUG_LOCK_ALLOC
415 	nvmem->eeprom.attr.key = &eeprom_lock_key;
416 #endif
417 	nvmem->eeprom.private = &nvmem->dev;
418 	nvmem->base_dev = config->base_dev;
419 
420 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
421 	if (rval) {
422 		dev_err(&nvmem->dev,
423 			"Failed to create eeprom binary file %d\n", rval);
424 		return rval;
425 	}
426 
427 	nvmem->flags |= FLAG_COMPAT;
428 
429 	return 0;
430 }
431 
432 /**
433  * nvmem_register_notifier() - Register a notifier block for nvmem events.
434  *
435  * @nb: notifier block to be called on nvmem events.
436  *
437  * Return: 0 on success, negative error number on failure.
438  */
439 int nvmem_register_notifier(struct notifier_block *nb)
440 {
441 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
442 }
443 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
444 
445 /**
446  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
447  *
448  * @nb: notifier block to be unregistered.
449  *
450  * Return: 0 on success, negative error number on failure.
451  */
452 int nvmem_unregister_notifier(struct notifier_block *nb)
453 {
454 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
455 }
456 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
457 
458 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
459 {
460 	const struct nvmem_cell_info *info;
461 	struct nvmem_cell_table *table;
462 	struct nvmem_cell *cell;
463 	int rval = 0, i;
464 
465 	mutex_lock(&nvmem_cell_mutex);
466 	list_for_each_entry(table, &nvmem_cell_tables, node) {
467 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
468 			for (i = 0; i < table->ncells; i++) {
469 				info = &table->cells[i];
470 
471 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
472 				if (!cell) {
473 					rval = -ENOMEM;
474 					goto out;
475 				}
476 
477 				rval = nvmem_cell_info_to_nvmem_cell(nvmem,
478 								     info,
479 								     cell);
480 				if (rval) {
481 					kfree(cell);
482 					goto out;
483 				}
484 
485 				nvmem_cell_add(cell);
486 			}
487 		}
488 	}
489 
490 out:
491 	mutex_unlock(&nvmem_cell_mutex);
492 	return rval;
493 }
494 
495 static struct nvmem_cell *
496 nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
497 {
498 	struct nvmem_cell *cell = NULL;
499 	int i = 0;
500 
501 	mutex_lock(&nvmem_mutex);
502 	list_for_each_entry(cell, &nvmem->cells, node) {
503 		if (index == i++)
504 			break;
505 	}
506 	mutex_unlock(&nvmem_mutex);
507 
508 	return cell;
509 }
510 
511 static struct nvmem_cell *
512 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
513 {
514 	struct nvmem_cell *cell = NULL;
515 
516 	mutex_lock(&nvmem_mutex);
517 	list_for_each_entry(cell, &nvmem->cells, node) {
518 		if (strcmp(cell_id, cell->name) == 0)
519 			break;
520 	}
521 	mutex_unlock(&nvmem_mutex);
522 
523 	return cell;
524 }
525 
526 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
527 {
528 	struct device_node *parent, *child;
529 	struct device *dev = &nvmem->dev;
530 	struct nvmem_cell *cell;
531 	const __be32 *addr;
532 	int len;
533 
534 	parent = dev->of_node;
535 
536 	for_each_child_of_node(parent, child) {
537 		addr = of_get_property(child, "reg", &len);
538 		if (!addr || (len < 2 * sizeof(u32))) {
539 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
540 			return -EINVAL;
541 		}
542 
543 		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
544 		if (!cell)
545 			return -ENOMEM;
546 
547 		cell->nvmem = nvmem;
548 		cell->offset = be32_to_cpup(addr++);
549 		cell->bytes = be32_to_cpup(addr);
550 		cell->name = child->name;
551 
552 		addr = of_get_property(child, "bits", &len);
553 		if (addr && len == (2 * sizeof(u32))) {
554 			cell->bit_offset = be32_to_cpup(addr++);
555 			cell->nbits = be32_to_cpup(addr);
556 		}
557 
558 		if (cell->nbits)
559 			cell->bytes = DIV_ROUND_UP(
560 					cell->nbits + cell->bit_offset,
561 					BITS_PER_BYTE);
562 
563 		if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
564 			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
565 				cell->name, nvmem->stride);
566 			/* Cells already added will be freed later. */
567 			kfree(cell);
568 			return -EINVAL;
569 		}
570 
571 		nvmem_cell_add(cell);
572 	}
573 
574 	return 0;
575 }
576 
577 /**
578  * nvmem_register() - Register a nvmem device for given nvmem_config.
579  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
580  *
581  * @config: nvmem device configuration with which nvmem device is created.
582  *
583  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
584  * on success.
585  */
586 
587 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
588 {
589 	struct nvmem_device *nvmem;
590 	int rval;
591 
592 	if (!config->dev)
593 		return ERR_PTR(-EINVAL);
594 
595 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
596 	if (!nvmem)
597 		return ERR_PTR(-ENOMEM);
598 
599 	rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
600 	if (rval < 0) {
601 		kfree(nvmem);
602 		return ERR_PTR(rval);
603 	}
604 
605 	kref_init(&nvmem->refcnt);
606 	INIT_LIST_HEAD(&nvmem->cells);
607 
608 	nvmem->id = rval;
609 	nvmem->owner = config->owner;
610 	if (!nvmem->owner && config->dev->driver)
611 		nvmem->owner = config->dev->driver->owner;
612 	nvmem->stride = config->stride ?: 1;
613 	nvmem->word_size = config->word_size ?: 1;
614 	nvmem->size = config->size;
615 	nvmem->dev.type = &nvmem_provider_type;
616 	nvmem->dev.bus = &nvmem_bus_type;
617 	nvmem->dev.parent = config->dev;
618 	nvmem->priv = config->priv;
619 	nvmem->reg_read = config->reg_read;
620 	nvmem->reg_write = config->reg_write;
621 	nvmem->dev.of_node = config->dev->of_node;
622 
623 	if (config->id == -1 && config->name) {
624 		dev_set_name(&nvmem->dev, "%s", config->name);
625 	} else {
626 		dev_set_name(&nvmem->dev, "%s%d",
627 			     config->name ? : "nvmem",
628 			     config->name ? config->id : nvmem->id);
629 	}
630 
631 	nvmem->read_only = device_property_present(config->dev, "read-only") |
632 			   config->read_only;
633 
634 	if (config->root_only)
635 		nvmem->dev.groups = nvmem->read_only ?
636 			nvmem_ro_root_dev_groups :
637 			nvmem_rw_root_dev_groups;
638 	else
639 		nvmem->dev.groups = nvmem->read_only ?
640 			nvmem_ro_dev_groups :
641 			nvmem_rw_dev_groups;
642 
643 	device_initialize(&nvmem->dev);
644 
645 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
646 
647 	rval = device_add(&nvmem->dev);
648 	if (rval)
649 		goto err_put_device;
650 
651 	if (config->compat) {
652 		rval = nvmem_setup_compat(nvmem, config);
653 		if (rval)
654 			goto err_device_del;
655 	}
656 
657 	if (config->cells) {
658 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
659 		if (rval)
660 			goto err_teardown_compat;
661 	}
662 
663 	rval = nvmem_add_cells_from_table(nvmem);
664 	if (rval)
665 		goto err_remove_cells;
666 
667 	rval = nvmem_add_cells_from_of(nvmem);
668 	if (rval)
669 		goto err_remove_cells;
670 
671 	rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
672 	if (rval)
673 		goto err_remove_cells;
674 
675 	return nvmem;
676 
677 err_remove_cells:
678 	nvmem_device_remove_all_cells(nvmem);
679 err_teardown_compat:
680 	if (config->compat)
681 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
682 err_device_del:
683 	device_del(&nvmem->dev);
684 err_put_device:
685 	put_device(&nvmem->dev);
686 
687 	return ERR_PTR(rval);
688 }
689 EXPORT_SYMBOL_GPL(nvmem_register);
690 
691 static void nvmem_device_release(struct kref *kref)
692 {
693 	struct nvmem_device *nvmem;
694 
695 	nvmem = container_of(kref, struct nvmem_device, refcnt);
696 
697 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
698 
699 	if (nvmem->flags & FLAG_COMPAT)
700 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
701 
702 	nvmem_device_remove_all_cells(nvmem);
703 	device_del(&nvmem->dev);
704 	put_device(&nvmem->dev);
705 }
706 
707 /**
708  * nvmem_unregister() - Unregister previously registered nvmem device
709  *
710  * @nvmem: Pointer to previously registered nvmem device.
711  */
712 void nvmem_unregister(struct nvmem_device *nvmem)
713 {
714 	kref_put(&nvmem->refcnt, nvmem_device_release);
715 }
716 EXPORT_SYMBOL_GPL(nvmem_unregister);
717 
718 static void devm_nvmem_release(struct device *dev, void *res)
719 {
720 	nvmem_unregister(*(struct nvmem_device **)res);
721 }
722 
723 /**
724  * devm_nvmem_register() - Register a managed nvmem device for given
725  * nvmem_config.
726  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
727  *
728  * @dev: Device that uses the nvmem device.
729  * @config: nvmem device configuration with which nvmem device is created.
730  *
731  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
732  * on success.
733  */
734 struct nvmem_device *devm_nvmem_register(struct device *dev,
735 					 const struct nvmem_config *config)
736 {
737 	struct nvmem_device **ptr, *nvmem;
738 
739 	ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
740 	if (!ptr)
741 		return ERR_PTR(-ENOMEM);
742 
743 	nvmem = nvmem_register(config);
744 
745 	if (!IS_ERR(nvmem)) {
746 		*ptr = nvmem;
747 		devres_add(dev, ptr);
748 	} else {
749 		devres_free(ptr);
750 	}
751 
752 	return nvmem;
753 }
754 EXPORT_SYMBOL_GPL(devm_nvmem_register);
755 
756 static int devm_nvmem_match(struct device *dev, void *res, void *data)
757 {
758 	struct nvmem_device **r = res;
759 
760 	return *r == data;
761 }
762 
763 /**
764  * devm_nvmem_unregister() - Unregister previously registered managed nvmem
765  * device.
766  *
767  * @dev: Device that uses the nvmem device.
768  * @nvmem: Pointer to previously registered nvmem device.
769  *
770  * Return: Will be an negative on error or a zero on success.
771  */
772 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
773 {
774 	return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
775 }
776 EXPORT_SYMBOL(devm_nvmem_unregister);
777 
778 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
779 					       const char *nvmem_name)
780 {
781 	struct nvmem_device *nvmem = NULL;
782 
783 	mutex_lock(&nvmem_mutex);
784 	nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
785 	mutex_unlock(&nvmem_mutex);
786 	if (!nvmem)
787 		return ERR_PTR(-EPROBE_DEFER);
788 
789 	if (!try_module_get(nvmem->owner)) {
790 		dev_err(&nvmem->dev,
791 			"could not increase module refcount for cell %s\n",
792 			nvmem_dev_name(nvmem));
793 
794 		return ERR_PTR(-EINVAL);
795 	}
796 
797 	kref_get(&nvmem->refcnt);
798 
799 	return nvmem;
800 }
801 
802 static void __nvmem_device_put(struct nvmem_device *nvmem)
803 {
804 	module_put(nvmem->owner);
805 	kref_put(&nvmem->refcnt, nvmem_device_release);
806 }
807 
808 #if IS_ENABLED(CONFIG_OF)
809 /**
810  * of_nvmem_device_get() - Get nvmem device from a given id
811  *
812  * @np: Device tree node that uses the nvmem device.
813  * @id: nvmem name from nvmem-names property.
814  *
815  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
816  * on success.
817  */
818 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
819 {
820 
821 	struct device_node *nvmem_np;
822 	int index;
823 
824 	index = of_property_match_string(np, "nvmem-names", id);
825 
826 	nvmem_np = of_parse_phandle(np, "nvmem", index);
827 	if (!nvmem_np)
828 		return ERR_PTR(-EINVAL);
829 
830 	return __nvmem_device_get(nvmem_np, NULL);
831 }
832 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
833 #endif
834 
835 /**
836  * nvmem_device_get() - Get nvmem device from a given id
837  *
838  * @dev: Device that uses the nvmem device.
839  * @dev_name: name of the requested nvmem device.
840  *
841  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
842  * on success.
843  */
844 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
845 {
846 	if (dev->of_node) { /* try dt first */
847 		struct nvmem_device *nvmem;
848 
849 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
850 
851 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
852 			return nvmem;
853 
854 	}
855 
856 	return nvmem_find(dev_name);
857 }
858 EXPORT_SYMBOL_GPL(nvmem_device_get);
859 
860 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
861 {
862 	struct nvmem_device **nvmem = res;
863 
864 	if (WARN_ON(!nvmem || !*nvmem))
865 		return 0;
866 
867 	return *nvmem == data;
868 }
869 
870 static void devm_nvmem_device_release(struct device *dev, void *res)
871 {
872 	nvmem_device_put(*(struct nvmem_device **)res);
873 }
874 
875 /**
876  * devm_nvmem_device_put() - put alredy got nvmem device
877  *
878  * @dev: Device that uses the nvmem device.
879  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
880  * that needs to be released.
881  */
882 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
883 {
884 	int ret;
885 
886 	ret = devres_release(dev, devm_nvmem_device_release,
887 			     devm_nvmem_device_match, nvmem);
888 
889 	WARN_ON(ret);
890 }
891 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
892 
893 /**
894  * nvmem_device_put() - put alredy got nvmem device
895  *
896  * @nvmem: pointer to nvmem device that needs to be released.
897  */
898 void nvmem_device_put(struct nvmem_device *nvmem)
899 {
900 	__nvmem_device_put(nvmem);
901 }
902 EXPORT_SYMBOL_GPL(nvmem_device_put);
903 
904 /**
905  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
906  *
907  * @dev: Device that requests the nvmem device.
908  * @id: name id for the requested nvmem device.
909  *
910  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
911  * on success.  The nvmem_cell will be freed by the automatically once the
912  * device is freed.
913  */
914 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
915 {
916 	struct nvmem_device **ptr, *nvmem;
917 
918 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
919 	if (!ptr)
920 		return ERR_PTR(-ENOMEM);
921 
922 	nvmem = nvmem_device_get(dev, id);
923 	if (!IS_ERR(nvmem)) {
924 		*ptr = nvmem;
925 		devres_add(dev, ptr);
926 	} else {
927 		devres_free(ptr);
928 	}
929 
930 	return nvmem;
931 }
932 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
933 
934 static struct nvmem_cell *
935 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
936 {
937 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
938 	struct nvmem_cell_lookup *lookup;
939 	struct nvmem_device *nvmem;
940 	const char *dev_id;
941 
942 	if (!dev)
943 		return ERR_PTR(-EINVAL);
944 
945 	dev_id = dev_name(dev);
946 
947 	mutex_lock(&nvmem_lookup_mutex);
948 
949 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
950 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
951 		    (strcmp(lookup->con_id, con_id) == 0)) {
952 			/* This is the right entry. */
953 			nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
954 			if (!nvmem) {
955 				/* Provider may not be registered yet. */
956 				cell = ERR_PTR(-EPROBE_DEFER);
957 				goto out;
958 			}
959 
960 			cell = nvmem_find_cell_by_name(nvmem,
961 						       lookup->cell_name);
962 			if (!cell) {
963 				__nvmem_device_put(nvmem);
964 				goto out;
965 			}
966 		}
967 	}
968 
969 out:
970 	mutex_unlock(&nvmem_lookup_mutex);
971 	return cell;
972 }
973 
974 #if IS_ENABLED(CONFIG_OF)
975 /**
976  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
977  *
978  * @np: Device tree node that uses the nvmem cell.
979  * @id: nvmem cell name from nvmem-cell-names property, or NULL
980  *      for the cell at index 0 (the lone cell with no accompanying
981  *      nvmem-cell-names property).
982  *
983  * Return: Will be an ERR_PTR() on error or a valid pointer
984  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
985  * nvmem_cell_put().
986  */
987 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
988 {
989 	struct device_node *cell_np, *nvmem_np;
990 	struct nvmem_device *nvmem;
991 	struct nvmem_cell *cell;
992 	int index = 0;
993 
994 	/* if cell name exists, find index to the name */
995 	if (id)
996 		index = of_property_match_string(np, "nvmem-cell-names", id);
997 
998 	cell_np = of_parse_phandle(np, "nvmem-cells", index);
999 	if (!cell_np)
1000 		return ERR_PTR(-EINVAL);
1001 
1002 	nvmem_np = of_get_next_parent(cell_np);
1003 	if (!nvmem_np)
1004 		return ERR_PTR(-EINVAL);
1005 
1006 	nvmem = __nvmem_device_get(nvmem_np, NULL);
1007 	of_node_put(nvmem_np);
1008 	if (IS_ERR(nvmem))
1009 		return ERR_CAST(nvmem);
1010 
1011 	cell = nvmem_find_cell_by_index(nvmem, index);
1012 	if (!cell) {
1013 		__nvmem_device_put(nvmem);
1014 		return ERR_PTR(-ENOENT);
1015 	}
1016 
1017 	return cell;
1018 }
1019 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1020 #endif
1021 
1022 /**
1023  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1024  *
1025  * @dev: Device that requests the nvmem cell.
1026  * @id: nvmem cell name to get (this corresponds with the name from the
1027  *      nvmem-cell-names property for DT systems and with the con_id from
1028  *      the lookup entry for non-DT systems).
1029  *
1030  * Return: Will be an ERR_PTR() on error or a valid pointer
1031  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1032  * nvmem_cell_put().
1033  */
1034 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1035 {
1036 	struct nvmem_cell *cell;
1037 
1038 	if (dev->of_node) { /* try dt first */
1039 		cell = of_nvmem_cell_get(dev->of_node, id);
1040 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1041 			return cell;
1042 	}
1043 
1044 	/* NULL cell id only allowed for device tree; invalid otherwise */
1045 	if (!id)
1046 		return ERR_PTR(-EINVAL);
1047 
1048 	return nvmem_cell_get_from_lookup(dev, id);
1049 }
1050 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1051 
1052 static void devm_nvmem_cell_release(struct device *dev, void *res)
1053 {
1054 	nvmem_cell_put(*(struct nvmem_cell **)res);
1055 }
1056 
1057 /**
1058  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1059  *
1060  * @dev: Device that requests the nvmem cell.
1061  * @id: nvmem cell name id to get.
1062  *
1063  * Return: Will be an ERR_PTR() on error or a valid pointer
1064  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1065  * automatically once the device is freed.
1066  */
1067 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1068 {
1069 	struct nvmem_cell **ptr, *cell;
1070 
1071 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1072 	if (!ptr)
1073 		return ERR_PTR(-ENOMEM);
1074 
1075 	cell = nvmem_cell_get(dev, id);
1076 	if (!IS_ERR(cell)) {
1077 		*ptr = cell;
1078 		devres_add(dev, ptr);
1079 	} else {
1080 		devres_free(ptr);
1081 	}
1082 
1083 	return cell;
1084 }
1085 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1086 
1087 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1088 {
1089 	struct nvmem_cell **c = res;
1090 
1091 	if (WARN_ON(!c || !*c))
1092 		return 0;
1093 
1094 	return *c == data;
1095 }
1096 
1097 /**
1098  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1099  * from devm_nvmem_cell_get.
1100  *
1101  * @dev: Device that requests the nvmem cell.
1102  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1103  */
1104 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1105 {
1106 	int ret;
1107 
1108 	ret = devres_release(dev, devm_nvmem_cell_release,
1109 				devm_nvmem_cell_match, cell);
1110 
1111 	WARN_ON(ret);
1112 }
1113 EXPORT_SYMBOL(devm_nvmem_cell_put);
1114 
1115 /**
1116  * nvmem_cell_put() - Release previously allocated nvmem cell.
1117  *
1118  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1119  */
1120 void nvmem_cell_put(struct nvmem_cell *cell)
1121 {
1122 	struct nvmem_device *nvmem = cell->nvmem;
1123 
1124 	__nvmem_device_put(nvmem);
1125 }
1126 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1127 
1128 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1129 {
1130 	u8 *p, *b;
1131 	int i, bit_offset = cell->bit_offset;
1132 
1133 	p = b = buf;
1134 	if (bit_offset) {
1135 		/* First shift */
1136 		*b++ >>= bit_offset;
1137 
1138 		/* setup rest of the bytes if any */
1139 		for (i = 1; i < cell->bytes; i++) {
1140 			/* Get bits from next byte and shift them towards msb */
1141 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1142 
1143 			p = b;
1144 			*b++ >>= bit_offset;
1145 		}
1146 
1147 		/* result fits in less bytes */
1148 		if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
1149 			*p-- = 0;
1150 	}
1151 	/* clear msb bits if any leftover in the last byte */
1152 	*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1153 }
1154 
1155 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1156 		      struct nvmem_cell *cell,
1157 		      void *buf, size_t *len)
1158 {
1159 	int rc;
1160 
1161 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1162 
1163 	if (rc)
1164 		return rc;
1165 
1166 	/* shift bits in-place */
1167 	if (cell->bit_offset || cell->nbits)
1168 		nvmem_shift_read_buffer_in_place(cell, buf);
1169 
1170 	if (len)
1171 		*len = cell->bytes;
1172 
1173 	return 0;
1174 }
1175 
1176 /**
1177  * nvmem_cell_read() - Read a given nvmem cell
1178  *
1179  * @cell: nvmem cell to be read.
1180  * @len: pointer to length of cell which will be populated on successful read;
1181  *	 can be NULL.
1182  *
1183  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1184  * buffer should be freed by the consumer with a kfree().
1185  */
1186 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1187 {
1188 	struct nvmem_device *nvmem = cell->nvmem;
1189 	u8 *buf;
1190 	int rc;
1191 
1192 	if (!nvmem)
1193 		return ERR_PTR(-EINVAL);
1194 
1195 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1196 	if (!buf)
1197 		return ERR_PTR(-ENOMEM);
1198 
1199 	rc = __nvmem_cell_read(nvmem, cell, buf, len);
1200 	if (rc) {
1201 		kfree(buf);
1202 		return ERR_PTR(rc);
1203 	}
1204 
1205 	return buf;
1206 }
1207 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1208 
1209 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1210 					     u8 *_buf, int len)
1211 {
1212 	struct nvmem_device *nvmem = cell->nvmem;
1213 	int i, rc, nbits, bit_offset = cell->bit_offset;
1214 	u8 v, *p, *buf, *b, pbyte, pbits;
1215 
1216 	nbits = cell->nbits;
1217 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1218 	if (!buf)
1219 		return ERR_PTR(-ENOMEM);
1220 
1221 	memcpy(buf, _buf, len);
1222 	p = b = buf;
1223 
1224 	if (bit_offset) {
1225 		pbyte = *b;
1226 		*b <<= bit_offset;
1227 
1228 		/* setup the first byte with lsb bits from nvmem */
1229 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1230 		if (rc)
1231 			goto err;
1232 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1233 
1234 		/* setup rest of the byte if any */
1235 		for (i = 1; i < cell->bytes; i++) {
1236 			/* Get last byte bits and shift them towards lsb */
1237 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1238 			pbyte = *b;
1239 			p = b;
1240 			*b <<= bit_offset;
1241 			*b++ |= pbits;
1242 		}
1243 	}
1244 
1245 	/* if it's not end on byte boundary */
1246 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1247 		/* setup the last byte with msb bits from nvmem */
1248 		rc = nvmem_reg_read(nvmem,
1249 				    cell->offset + cell->bytes - 1, &v, 1);
1250 		if (rc)
1251 			goto err;
1252 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1253 
1254 	}
1255 
1256 	return buf;
1257 err:
1258 	kfree(buf);
1259 	return ERR_PTR(rc);
1260 }
1261 
1262 /**
1263  * nvmem_cell_write() - Write to a given nvmem cell
1264  *
1265  * @cell: nvmem cell to be written.
1266  * @buf: Buffer to be written.
1267  * @len: length of buffer to be written to nvmem cell.
1268  *
1269  * Return: length of bytes written or negative on failure.
1270  */
1271 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1272 {
1273 	struct nvmem_device *nvmem = cell->nvmem;
1274 	int rc;
1275 
1276 	if (!nvmem || nvmem->read_only ||
1277 	    (cell->bit_offset == 0 && len != cell->bytes))
1278 		return -EINVAL;
1279 
1280 	if (cell->bit_offset || cell->nbits) {
1281 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1282 		if (IS_ERR(buf))
1283 			return PTR_ERR(buf);
1284 	}
1285 
1286 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1287 
1288 	/* free the tmp buffer */
1289 	if (cell->bit_offset || cell->nbits)
1290 		kfree(buf);
1291 
1292 	if (rc)
1293 		return rc;
1294 
1295 	return len;
1296 }
1297 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1298 
1299 /**
1300  * nvmem_cell_read_u32() - Read a cell value as an u32
1301  *
1302  * @dev: Device that requests the nvmem cell.
1303  * @cell_id: Name of nvmem cell to read.
1304  * @val: pointer to output value.
1305  *
1306  * Return: 0 on success or negative errno.
1307  */
1308 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1309 {
1310 	struct nvmem_cell *cell;
1311 	void *buf;
1312 	size_t len;
1313 
1314 	cell = nvmem_cell_get(dev, cell_id);
1315 	if (IS_ERR(cell))
1316 		return PTR_ERR(cell);
1317 
1318 	buf = nvmem_cell_read(cell, &len);
1319 	if (IS_ERR(buf)) {
1320 		nvmem_cell_put(cell);
1321 		return PTR_ERR(buf);
1322 	}
1323 	if (len != sizeof(*val)) {
1324 		kfree(buf);
1325 		nvmem_cell_put(cell);
1326 		return -EINVAL;
1327 	}
1328 	memcpy(val, buf, sizeof(*val));
1329 
1330 	kfree(buf);
1331 	nvmem_cell_put(cell);
1332 	return 0;
1333 }
1334 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1335 
1336 /**
1337  * nvmem_device_cell_read() - Read a given nvmem device and cell
1338  *
1339  * @nvmem: nvmem device to read from.
1340  * @info: nvmem cell info to be read.
1341  * @buf: buffer pointer which will be populated on successful read.
1342  *
1343  * Return: length of successful bytes read on success and negative
1344  * error code on error.
1345  */
1346 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1347 			   struct nvmem_cell_info *info, void *buf)
1348 {
1349 	struct nvmem_cell cell;
1350 	int rc;
1351 	ssize_t len;
1352 
1353 	if (!nvmem)
1354 		return -EINVAL;
1355 
1356 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1357 	if (rc)
1358 		return rc;
1359 
1360 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1361 	if (rc)
1362 		return rc;
1363 
1364 	return len;
1365 }
1366 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1367 
1368 /**
1369  * nvmem_device_cell_write() - Write cell to a given nvmem device
1370  *
1371  * @nvmem: nvmem device to be written to.
1372  * @info: nvmem cell info to be written.
1373  * @buf: buffer to be written to cell.
1374  *
1375  * Return: length of bytes written or negative error code on failure.
1376  */
1377 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1378 			    struct nvmem_cell_info *info, void *buf)
1379 {
1380 	struct nvmem_cell cell;
1381 	int rc;
1382 
1383 	if (!nvmem)
1384 		return -EINVAL;
1385 
1386 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1387 	if (rc)
1388 		return rc;
1389 
1390 	return nvmem_cell_write(&cell, buf, cell.bytes);
1391 }
1392 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1393 
1394 /**
1395  * nvmem_device_read() - Read from a given nvmem device
1396  *
1397  * @nvmem: nvmem device to read from.
1398  * @offset: offset in nvmem device.
1399  * @bytes: number of bytes to read.
1400  * @buf: buffer pointer which will be populated on successful read.
1401  *
1402  * Return: length of successful bytes read on success and negative
1403  * error code on error.
1404  */
1405 int nvmem_device_read(struct nvmem_device *nvmem,
1406 		      unsigned int offset,
1407 		      size_t bytes, void *buf)
1408 {
1409 	int rc;
1410 
1411 	if (!nvmem)
1412 		return -EINVAL;
1413 
1414 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1415 
1416 	if (rc)
1417 		return rc;
1418 
1419 	return bytes;
1420 }
1421 EXPORT_SYMBOL_GPL(nvmem_device_read);
1422 
1423 /**
1424  * nvmem_device_write() - Write cell to a given nvmem device
1425  *
1426  * @nvmem: nvmem device to be written to.
1427  * @offset: offset in nvmem device.
1428  * @bytes: number of bytes to write.
1429  * @buf: buffer to be written.
1430  *
1431  * Return: length of bytes written or negative error code on failure.
1432  */
1433 int nvmem_device_write(struct nvmem_device *nvmem,
1434 		       unsigned int offset,
1435 		       size_t bytes, void *buf)
1436 {
1437 	int rc;
1438 
1439 	if (!nvmem)
1440 		return -EINVAL;
1441 
1442 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1443 
1444 	if (rc)
1445 		return rc;
1446 
1447 
1448 	return bytes;
1449 }
1450 EXPORT_SYMBOL_GPL(nvmem_device_write);
1451 
1452 /**
1453  * nvmem_add_cell_table() - register a table of cell info entries
1454  *
1455  * @table: table of cell info entries
1456  */
1457 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1458 {
1459 	mutex_lock(&nvmem_cell_mutex);
1460 	list_add_tail(&table->node, &nvmem_cell_tables);
1461 	mutex_unlock(&nvmem_cell_mutex);
1462 }
1463 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1464 
1465 /**
1466  * nvmem_del_cell_table() - remove a previously registered cell info table
1467  *
1468  * @table: table of cell info entries
1469  */
1470 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1471 {
1472 	mutex_lock(&nvmem_cell_mutex);
1473 	list_del(&table->node);
1474 	mutex_unlock(&nvmem_cell_mutex);
1475 }
1476 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1477 
1478 /**
1479  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1480  *
1481  * @entries: array of cell lookup entries
1482  * @nentries: number of cell lookup entries in the array
1483  */
1484 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1485 {
1486 	int i;
1487 
1488 	mutex_lock(&nvmem_lookup_mutex);
1489 	for (i = 0; i < nentries; i++)
1490 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
1491 	mutex_unlock(&nvmem_lookup_mutex);
1492 }
1493 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1494 
1495 /**
1496  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1497  *                            entries
1498  *
1499  * @entries: array of cell lookup entries
1500  * @nentries: number of cell lookup entries in the array
1501  */
1502 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1503 {
1504 	int i;
1505 
1506 	mutex_lock(&nvmem_lookup_mutex);
1507 	for (i = 0; i < nentries; i++)
1508 		list_del(&entries[i].node);
1509 	mutex_unlock(&nvmem_lookup_mutex);
1510 }
1511 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1512 
1513 /**
1514  * nvmem_dev_name() - Get the name of a given nvmem device.
1515  *
1516  * @nvmem: nvmem device.
1517  *
1518  * Return: name of the nvmem device.
1519  */
1520 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1521 {
1522 	return dev_name(&nvmem->dev);
1523 }
1524 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1525 
1526 static int __init nvmem_init(void)
1527 {
1528 	return bus_register(&nvmem_bus_type);
1529 }
1530 
1531 static void __exit nvmem_exit(void)
1532 {
1533 	bus_unregister(&nvmem_bus_type);
1534 }
1535 
1536 subsys_initcall(nvmem_init);
1537 module_exit(nvmem_exit);
1538 
1539 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1540 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1541 MODULE_DESCRIPTION("nvmem Driver Core");
1542 MODULE_LICENSE("GPL v2");
1543