xref: /linux/drivers/nvmem/core.c (revision e56af94b9b5487a71f8c705c83ac5f7bc28ae1a2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 #include "internals.h"
23 
24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
25 
26 #define FLAG_COMPAT		BIT(0)
27 struct nvmem_cell_entry {
28 	const char		*name;
29 	int			offset;
30 	size_t			raw_len;
31 	int			bytes;
32 	int			bit_offset;
33 	int			nbits;
34 	nvmem_cell_post_process_t read_post_process;
35 	void			*priv;
36 	struct device_node	*np;
37 	struct nvmem_device	*nvmem;
38 	struct list_head	node;
39 };
40 
41 struct nvmem_cell {
42 	struct nvmem_cell_entry *entry;
43 	const char		*id;
44 	int			index;
45 };
46 
47 static DEFINE_MUTEX(nvmem_mutex);
48 static DEFINE_IDA(nvmem_ida);
49 
50 static DEFINE_MUTEX(nvmem_cell_mutex);
51 static LIST_HEAD(nvmem_cell_tables);
52 
53 static DEFINE_MUTEX(nvmem_lookup_mutex);
54 static LIST_HEAD(nvmem_lookup_list);
55 
56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
57 
58 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
59 			    void *val, size_t bytes)
60 {
61 	if (nvmem->reg_read)
62 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
63 
64 	return -EINVAL;
65 }
66 
67 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
68 			     void *val, size_t bytes)
69 {
70 	int ret;
71 
72 	if (nvmem->reg_write) {
73 		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
74 		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
75 		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
76 		return ret;
77 	}
78 
79 	return -EINVAL;
80 }
81 
82 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
83 				      unsigned int offset, void *val,
84 				      size_t bytes, int write)
85 {
86 
87 	unsigned int end = offset + bytes;
88 	unsigned int kend, ksize;
89 	const struct nvmem_keepout *keepout = nvmem->keepout;
90 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
91 	int rc;
92 
93 	/*
94 	 * Skip all keepouts before the range being accessed.
95 	 * Keepouts are sorted.
96 	 */
97 	while ((keepout < keepoutend) && (keepout->end <= offset))
98 		keepout++;
99 
100 	while ((offset < end) && (keepout < keepoutend)) {
101 		/* Access the valid portion before the keepout. */
102 		if (offset < keepout->start) {
103 			kend = min(end, keepout->start);
104 			ksize = kend - offset;
105 			if (write)
106 				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
107 			else
108 				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
109 
110 			if (rc)
111 				return rc;
112 
113 			offset += ksize;
114 			val += ksize;
115 		}
116 
117 		/*
118 		 * Now we're aligned to the start of this keepout zone. Go
119 		 * through it.
120 		 */
121 		kend = min(end, keepout->end);
122 		ksize = kend - offset;
123 		if (!write)
124 			memset(val, keepout->value, ksize);
125 
126 		val += ksize;
127 		offset += ksize;
128 		keepout++;
129 	}
130 
131 	/*
132 	 * If we ran out of keepouts but there's still stuff to do, send it
133 	 * down directly
134 	 */
135 	if (offset < end) {
136 		ksize = end - offset;
137 		if (write)
138 			return __nvmem_reg_write(nvmem, offset, val, ksize);
139 		else
140 			return __nvmem_reg_read(nvmem, offset, val, ksize);
141 	}
142 
143 	return 0;
144 }
145 
146 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
147 			  void *val, size_t bytes)
148 {
149 	if (!nvmem->nkeepout)
150 		return __nvmem_reg_read(nvmem, offset, val, bytes);
151 
152 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
153 }
154 
155 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
156 			   void *val, size_t bytes)
157 {
158 	if (!nvmem->nkeepout)
159 		return __nvmem_reg_write(nvmem, offset, val, bytes);
160 
161 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
162 }
163 
164 #ifdef CONFIG_NVMEM_SYSFS
165 static const char * const nvmem_type_str[] = {
166 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
167 	[NVMEM_TYPE_EEPROM] = "EEPROM",
168 	[NVMEM_TYPE_OTP] = "OTP",
169 	[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
170 	[NVMEM_TYPE_FRAM] = "FRAM",
171 };
172 
173 #ifdef CONFIG_DEBUG_LOCK_ALLOC
174 static struct lock_class_key eeprom_lock_key;
175 #endif
176 
177 static ssize_t type_show(struct device *dev,
178 			 struct device_attribute *attr, char *buf)
179 {
180 	struct nvmem_device *nvmem = to_nvmem_device(dev);
181 
182 	return sysfs_emit(buf, "%s\n", nvmem_type_str[nvmem->type]);
183 }
184 
185 static DEVICE_ATTR_RO(type);
186 
187 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
188 			     char *buf)
189 {
190 	struct nvmem_device *nvmem = to_nvmem_device(dev);
191 
192 	return sysfs_emit(buf, "%d\n", nvmem->read_only);
193 }
194 
195 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
196 			      const char *buf, size_t count)
197 {
198 	struct nvmem_device *nvmem = to_nvmem_device(dev);
199 	int ret = kstrtobool(buf, &nvmem->read_only);
200 
201 	if (ret < 0)
202 		return ret;
203 
204 	return count;
205 }
206 
207 static DEVICE_ATTR_RW(force_ro);
208 
209 static struct attribute *nvmem_attrs[] = {
210 	&dev_attr_force_ro.attr,
211 	&dev_attr_type.attr,
212 	NULL,
213 };
214 
215 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
216 				   struct bin_attribute *attr, char *buf,
217 				   loff_t pos, size_t count)
218 {
219 	struct device *dev;
220 	struct nvmem_device *nvmem;
221 	int rc;
222 
223 	if (attr->private)
224 		dev = attr->private;
225 	else
226 		dev = kobj_to_dev(kobj);
227 	nvmem = to_nvmem_device(dev);
228 
229 	if (!IS_ALIGNED(pos, nvmem->stride))
230 		return -EINVAL;
231 
232 	if (count < nvmem->word_size)
233 		return -EINVAL;
234 
235 	count = round_down(count, nvmem->word_size);
236 
237 	if (!nvmem->reg_read)
238 		return -EPERM;
239 
240 	rc = nvmem_reg_read(nvmem, pos, buf, count);
241 
242 	if (rc)
243 		return rc;
244 
245 	return count;
246 }
247 
248 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
249 				    struct bin_attribute *attr, char *buf,
250 				    loff_t pos, size_t count)
251 {
252 	struct device *dev;
253 	struct nvmem_device *nvmem;
254 	int rc;
255 
256 	if (attr->private)
257 		dev = attr->private;
258 	else
259 		dev = kobj_to_dev(kobj);
260 	nvmem = to_nvmem_device(dev);
261 
262 	if (!IS_ALIGNED(pos, nvmem->stride))
263 		return -EINVAL;
264 
265 	if (count < nvmem->word_size)
266 		return -EINVAL;
267 
268 	count = round_down(count, nvmem->word_size);
269 
270 	if (!nvmem->reg_write)
271 		return -EPERM;
272 
273 	rc = nvmem_reg_write(nvmem, pos, buf, count);
274 
275 	if (rc)
276 		return rc;
277 
278 	return count;
279 }
280 
281 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
282 {
283 	umode_t mode = 0400;
284 
285 	if (!nvmem->root_only)
286 		mode |= 0044;
287 
288 	if (!nvmem->read_only)
289 		mode |= 0200;
290 
291 	if (!nvmem->reg_write)
292 		mode &= ~0200;
293 
294 	if (!nvmem->reg_read)
295 		mode &= ~0444;
296 
297 	return mode;
298 }
299 
300 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
301 					 struct bin_attribute *attr, int i)
302 {
303 	struct device *dev = kobj_to_dev(kobj);
304 	struct nvmem_device *nvmem = to_nvmem_device(dev);
305 
306 	attr->size = nvmem->size;
307 
308 	return nvmem_bin_attr_get_umode(nvmem);
309 }
310 
311 static umode_t nvmem_attr_is_visible(struct kobject *kobj,
312 				     struct attribute *attr, int i)
313 {
314 	struct device *dev = kobj_to_dev(kobj);
315 	struct nvmem_device *nvmem = to_nvmem_device(dev);
316 
317 	/*
318 	 * If the device has no .reg_write operation, do not allow
319 	 * configuration as read-write.
320 	 * If the device is set as read-only by configuration, it
321 	 * can be forced into read-write mode using the 'force_ro'
322 	 * attribute.
323 	 */
324 	if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write)
325 		return 0;	/* Attribute not visible */
326 
327 	return attr->mode;
328 }
329 
330 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
331 					    const char *id, int index);
332 
333 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
334 				    struct bin_attribute *attr, char *buf,
335 				    loff_t pos, size_t count)
336 {
337 	struct nvmem_cell_entry *entry;
338 	struct nvmem_cell *cell = NULL;
339 	size_t cell_sz, read_len;
340 	void *content;
341 
342 	entry = attr->private;
343 	cell = nvmem_create_cell(entry, entry->name, 0);
344 	if (IS_ERR(cell))
345 		return PTR_ERR(cell);
346 
347 	if (!cell)
348 		return -EINVAL;
349 
350 	content = nvmem_cell_read(cell, &cell_sz);
351 	if (IS_ERR(content)) {
352 		read_len = PTR_ERR(content);
353 		goto destroy_cell;
354 	}
355 
356 	read_len = min_t(unsigned int, cell_sz - pos, count);
357 	memcpy(buf, content + pos, read_len);
358 	kfree(content);
359 
360 destroy_cell:
361 	kfree_const(cell->id);
362 	kfree(cell);
363 
364 	return read_len;
365 }
366 
367 /* default read/write permissions */
368 static struct bin_attribute bin_attr_rw_nvmem = {
369 	.attr	= {
370 		.name	= "nvmem",
371 		.mode	= 0644,
372 	},
373 	.read	= bin_attr_nvmem_read,
374 	.write	= bin_attr_nvmem_write,
375 };
376 
377 static struct bin_attribute *nvmem_bin_attributes[] = {
378 	&bin_attr_rw_nvmem,
379 	NULL,
380 };
381 
382 static const struct attribute_group nvmem_bin_group = {
383 	.bin_attrs	= nvmem_bin_attributes,
384 	.attrs		= nvmem_attrs,
385 	.is_bin_visible = nvmem_bin_attr_is_visible,
386 	.is_visible	= nvmem_attr_is_visible,
387 };
388 
389 static const struct attribute_group *nvmem_dev_groups[] = {
390 	&nvmem_bin_group,
391 	NULL,
392 };
393 
394 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
395 	.attr	= {
396 		.name	= "eeprom",
397 	},
398 	.read	= bin_attr_nvmem_read,
399 	.write	= bin_attr_nvmem_write,
400 };
401 
402 /*
403  * nvmem_setup_compat() - Create an additional binary entry in
404  * drivers sys directory, to be backwards compatible with the older
405  * drivers/misc/eeprom drivers.
406  */
407 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
408 				    const struct nvmem_config *config)
409 {
410 	int rval;
411 
412 	if (!config->compat)
413 		return 0;
414 
415 	if (!config->base_dev)
416 		return -EINVAL;
417 
418 	if (config->type == NVMEM_TYPE_FRAM)
419 		bin_attr_nvmem_eeprom_compat.attr.name = "fram";
420 
421 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
422 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
423 	nvmem->eeprom.size = nvmem->size;
424 #ifdef CONFIG_DEBUG_LOCK_ALLOC
425 	nvmem->eeprom.attr.key = &eeprom_lock_key;
426 #endif
427 	nvmem->eeprom.private = &nvmem->dev;
428 	nvmem->base_dev = config->base_dev;
429 
430 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
431 	if (rval) {
432 		dev_err(&nvmem->dev,
433 			"Failed to create eeprom binary file %d\n", rval);
434 		return rval;
435 	}
436 
437 	nvmem->flags |= FLAG_COMPAT;
438 
439 	return 0;
440 }
441 
442 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
443 			      const struct nvmem_config *config)
444 {
445 	if (config->compat)
446 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
447 }
448 
449 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
450 {
451 	struct attribute_group group = {
452 		.name	= "cells",
453 	};
454 	struct nvmem_cell_entry *entry;
455 	struct bin_attribute *attrs;
456 	unsigned int ncells = 0, i = 0;
457 	int ret = 0;
458 
459 	mutex_lock(&nvmem_mutex);
460 
461 	if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated)
462 		goto unlock_mutex;
463 
464 	/* Allocate an array of attributes with a sentinel */
465 	ncells = list_count_nodes(&nvmem->cells);
466 	group.bin_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
467 				       sizeof(struct bin_attribute *), GFP_KERNEL);
468 	if (!group.bin_attrs) {
469 		ret = -ENOMEM;
470 		goto unlock_mutex;
471 	}
472 
473 	attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
474 	if (!attrs) {
475 		ret = -ENOMEM;
476 		goto unlock_mutex;
477 	}
478 
479 	/* Initialize each attribute to take the name and size of the cell */
480 	list_for_each_entry(entry, &nvmem->cells, node) {
481 		sysfs_bin_attr_init(&attrs[i]);
482 		attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
483 						    "%s@%x,%x", entry->name,
484 						    entry->offset,
485 						    entry->bit_offset);
486 		attrs[i].attr.mode = 0444;
487 		attrs[i].size = entry->bytes;
488 		attrs[i].read = &nvmem_cell_attr_read;
489 		attrs[i].private = entry;
490 		if (!attrs[i].attr.name) {
491 			ret = -ENOMEM;
492 			goto unlock_mutex;
493 		}
494 
495 		group.bin_attrs[i] = &attrs[i];
496 		i++;
497 	}
498 
499 	ret = device_add_group(&nvmem->dev, &group);
500 	if (ret)
501 		goto unlock_mutex;
502 
503 	nvmem->sysfs_cells_populated = true;
504 
505 unlock_mutex:
506 	mutex_unlock(&nvmem_mutex);
507 
508 	return ret;
509 }
510 
511 #else /* CONFIG_NVMEM_SYSFS */
512 
513 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
514 				    const struct nvmem_config *config)
515 {
516 	return -ENOSYS;
517 }
518 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
519 				      const struct nvmem_config *config)
520 {
521 }
522 
523 #endif /* CONFIG_NVMEM_SYSFS */
524 
525 static void nvmem_release(struct device *dev)
526 {
527 	struct nvmem_device *nvmem = to_nvmem_device(dev);
528 
529 	ida_free(&nvmem_ida, nvmem->id);
530 	gpiod_put(nvmem->wp_gpio);
531 	kfree(nvmem);
532 }
533 
534 static const struct device_type nvmem_provider_type = {
535 	.release	= nvmem_release,
536 };
537 
538 static struct bus_type nvmem_bus_type = {
539 	.name		= "nvmem",
540 };
541 
542 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
543 {
544 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
545 	mutex_lock(&nvmem_mutex);
546 	list_del(&cell->node);
547 	mutex_unlock(&nvmem_mutex);
548 	of_node_put(cell->np);
549 	kfree_const(cell->name);
550 	kfree(cell);
551 }
552 
553 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
554 {
555 	struct nvmem_cell_entry *cell, *p;
556 
557 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
558 		nvmem_cell_entry_drop(cell);
559 }
560 
561 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
562 {
563 	mutex_lock(&nvmem_mutex);
564 	list_add_tail(&cell->node, &cell->nvmem->cells);
565 	mutex_unlock(&nvmem_mutex);
566 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
567 }
568 
569 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
570 						     const struct nvmem_cell_info *info,
571 						     struct nvmem_cell_entry *cell)
572 {
573 	cell->nvmem = nvmem;
574 	cell->offset = info->offset;
575 	cell->raw_len = info->raw_len ?: info->bytes;
576 	cell->bytes = info->bytes;
577 	cell->name = info->name;
578 	cell->read_post_process = info->read_post_process;
579 	cell->priv = info->priv;
580 
581 	cell->bit_offset = info->bit_offset;
582 	cell->nbits = info->nbits;
583 	cell->np = info->np;
584 
585 	if (cell->nbits)
586 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
587 					   BITS_PER_BYTE);
588 
589 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
590 		dev_err(&nvmem->dev,
591 			"cell %s unaligned to nvmem stride %d\n",
592 			cell->name ?: "<unknown>", nvmem->stride);
593 		return -EINVAL;
594 	}
595 
596 	return 0;
597 }
598 
599 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
600 					       const struct nvmem_cell_info *info,
601 					       struct nvmem_cell_entry *cell)
602 {
603 	int err;
604 
605 	err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
606 	if (err)
607 		return err;
608 
609 	cell->name = kstrdup_const(info->name, GFP_KERNEL);
610 	if (!cell->name)
611 		return -ENOMEM;
612 
613 	return 0;
614 }
615 
616 /**
617  * nvmem_add_one_cell() - Add one cell information to an nvmem device
618  *
619  * @nvmem: nvmem device to add cells to.
620  * @info: nvmem cell info to add to the device
621  *
622  * Return: 0 or negative error code on failure.
623  */
624 int nvmem_add_one_cell(struct nvmem_device *nvmem,
625 		       const struct nvmem_cell_info *info)
626 {
627 	struct nvmem_cell_entry *cell;
628 	int rval;
629 
630 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
631 	if (!cell)
632 		return -ENOMEM;
633 
634 	rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
635 	if (rval) {
636 		kfree(cell);
637 		return rval;
638 	}
639 
640 	nvmem_cell_entry_add(cell);
641 
642 	return 0;
643 }
644 EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
645 
646 /**
647  * nvmem_add_cells() - Add cell information to an nvmem device
648  *
649  * @nvmem: nvmem device to add cells to.
650  * @info: nvmem cell info to add to the device
651  * @ncells: number of cells in info
652  *
653  * Return: 0 or negative error code on failure.
654  */
655 static int nvmem_add_cells(struct nvmem_device *nvmem,
656 		    const struct nvmem_cell_info *info,
657 		    int ncells)
658 {
659 	int i, rval;
660 
661 	for (i = 0; i < ncells; i++) {
662 		rval = nvmem_add_one_cell(nvmem, &info[i]);
663 		if (rval)
664 			return rval;
665 	}
666 
667 	return 0;
668 }
669 
670 /**
671  * nvmem_register_notifier() - Register a notifier block for nvmem events.
672  *
673  * @nb: notifier block to be called on nvmem events.
674  *
675  * Return: 0 on success, negative error number on failure.
676  */
677 int nvmem_register_notifier(struct notifier_block *nb)
678 {
679 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
680 }
681 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
682 
683 /**
684  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
685  *
686  * @nb: notifier block to be unregistered.
687  *
688  * Return: 0 on success, negative error number on failure.
689  */
690 int nvmem_unregister_notifier(struct notifier_block *nb)
691 {
692 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
693 }
694 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
695 
696 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
697 {
698 	const struct nvmem_cell_info *info;
699 	struct nvmem_cell_table *table;
700 	struct nvmem_cell_entry *cell;
701 	int rval = 0, i;
702 
703 	mutex_lock(&nvmem_cell_mutex);
704 	list_for_each_entry(table, &nvmem_cell_tables, node) {
705 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
706 			for (i = 0; i < table->ncells; i++) {
707 				info = &table->cells[i];
708 
709 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
710 				if (!cell) {
711 					rval = -ENOMEM;
712 					goto out;
713 				}
714 
715 				rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
716 				if (rval) {
717 					kfree(cell);
718 					goto out;
719 				}
720 
721 				nvmem_cell_entry_add(cell);
722 			}
723 		}
724 	}
725 
726 out:
727 	mutex_unlock(&nvmem_cell_mutex);
728 	return rval;
729 }
730 
731 static struct nvmem_cell_entry *
732 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
733 {
734 	struct nvmem_cell_entry *iter, *cell = NULL;
735 
736 	mutex_lock(&nvmem_mutex);
737 	list_for_each_entry(iter, &nvmem->cells, node) {
738 		if (strcmp(cell_id, iter->name) == 0) {
739 			cell = iter;
740 			break;
741 		}
742 	}
743 	mutex_unlock(&nvmem_mutex);
744 
745 	return cell;
746 }
747 
748 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
749 {
750 	unsigned int cur = 0;
751 	const struct nvmem_keepout *keepout = nvmem->keepout;
752 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
753 
754 	while (keepout < keepoutend) {
755 		/* Ensure keepouts are sorted and don't overlap. */
756 		if (keepout->start < cur) {
757 			dev_err(&nvmem->dev,
758 				"Keepout regions aren't sorted or overlap.\n");
759 
760 			return -ERANGE;
761 		}
762 
763 		if (keepout->end < keepout->start) {
764 			dev_err(&nvmem->dev,
765 				"Invalid keepout region.\n");
766 
767 			return -EINVAL;
768 		}
769 
770 		/*
771 		 * Validate keepouts (and holes between) don't violate
772 		 * word_size constraints.
773 		 */
774 		if ((keepout->end - keepout->start < nvmem->word_size) ||
775 		    ((keepout->start != cur) &&
776 		     (keepout->start - cur < nvmem->word_size))) {
777 
778 			dev_err(&nvmem->dev,
779 				"Keepout regions violate word_size constraints.\n");
780 
781 			return -ERANGE;
782 		}
783 
784 		/* Validate keepouts don't violate stride (alignment). */
785 		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
786 		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
787 
788 			dev_err(&nvmem->dev,
789 				"Keepout regions violate stride.\n");
790 
791 			return -EINVAL;
792 		}
793 
794 		cur = keepout->end;
795 		keepout++;
796 	}
797 
798 	return 0;
799 }
800 
801 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
802 {
803 	struct device *dev = &nvmem->dev;
804 	struct device_node *child;
805 	const __be32 *addr;
806 	int len, ret;
807 
808 	for_each_child_of_node(np, child) {
809 		struct nvmem_cell_info info = {0};
810 
811 		addr = of_get_property(child, "reg", &len);
812 		if (!addr)
813 			continue;
814 		if (len < 2 * sizeof(u32)) {
815 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
816 			of_node_put(child);
817 			return -EINVAL;
818 		}
819 
820 		info.offset = be32_to_cpup(addr++);
821 		info.bytes = be32_to_cpup(addr);
822 		info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
823 
824 		addr = of_get_property(child, "bits", &len);
825 		if (addr && len == (2 * sizeof(u32))) {
826 			info.bit_offset = be32_to_cpup(addr++);
827 			info.nbits = be32_to_cpup(addr);
828 			if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
829 				dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
830 				of_node_put(child);
831 				return -EINVAL;
832 			}
833 		}
834 
835 		info.np = of_node_get(child);
836 
837 		if (nvmem->fixup_dt_cell_info)
838 			nvmem->fixup_dt_cell_info(nvmem, &info);
839 
840 		ret = nvmem_add_one_cell(nvmem, &info);
841 		kfree(info.name);
842 		if (ret) {
843 			of_node_put(child);
844 			return ret;
845 		}
846 	}
847 
848 	return 0;
849 }
850 
851 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
852 {
853 	return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
854 }
855 
856 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
857 {
858 	struct device_node *layout_np;
859 	int err = 0;
860 
861 	layout_np = of_nvmem_layout_get_container(nvmem);
862 	if (!layout_np)
863 		return 0;
864 
865 	if (of_device_is_compatible(layout_np, "fixed-layout"))
866 		err = nvmem_add_cells_from_dt(nvmem, layout_np);
867 
868 	of_node_put(layout_np);
869 
870 	return err;
871 }
872 
873 int nvmem_layout_register(struct nvmem_layout *layout)
874 {
875 	int ret;
876 
877 	if (!layout->add_cells)
878 		return -EINVAL;
879 
880 	/* Populate the cells */
881 	ret = layout->add_cells(layout);
882 	if (ret)
883 		return ret;
884 
885 #ifdef CONFIG_NVMEM_SYSFS
886 	ret = nvmem_populate_sysfs_cells(layout->nvmem);
887 	if (ret) {
888 		nvmem_device_remove_all_cells(layout->nvmem);
889 		return ret;
890 	}
891 #endif
892 
893 	return 0;
894 }
895 EXPORT_SYMBOL_GPL(nvmem_layout_register);
896 
897 void nvmem_layout_unregister(struct nvmem_layout *layout)
898 {
899 	/* Keep the API even with an empty stub in case we need it later */
900 }
901 EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
902 
903 /**
904  * nvmem_register() - Register a nvmem device for given nvmem_config.
905  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
906  *
907  * @config: nvmem device configuration with which nvmem device is created.
908  *
909  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
910  * on success.
911  */
912 
913 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
914 {
915 	struct nvmem_device *nvmem;
916 	int rval;
917 
918 	if (!config->dev)
919 		return ERR_PTR(-EINVAL);
920 
921 	if (!config->reg_read && !config->reg_write)
922 		return ERR_PTR(-EINVAL);
923 
924 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
925 	if (!nvmem)
926 		return ERR_PTR(-ENOMEM);
927 
928 	rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
929 	if (rval < 0) {
930 		kfree(nvmem);
931 		return ERR_PTR(rval);
932 	}
933 
934 	nvmem->id = rval;
935 
936 	nvmem->dev.type = &nvmem_provider_type;
937 	nvmem->dev.bus = &nvmem_bus_type;
938 	nvmem->dev.parent = config->dev;
939 
940 	device_initialize(&nvmem->dev);
941 
942 	if (!config->ignore_wp)
943 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
944 						    GPIOD_OUT_HIGH);
945 	if (IS_ERR(nvmem->wp_gpio)) {
946 		rval = PTR_ERR(nvmem->wp_gpio);
947 		nvmem->wp_gpio = NULL;
948 		goto err_put_device;
949 	}
950 
951 	kref_init(&nvmem->refcnt);
952 	INIT_LIST_HEAD(&nvmem->cells);
953 	nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info;
954 
955 	nvmem->owner = config->owner;
956 	if (!nvmem->owner && config->dev->driver)
957 		nvmem->owner = config->dev->driver->owner;
958 	nvmem->stride = config->stride ?: 1;
959 	nvmem->word_size = config->word_size ?: 1;
960 	nvmem->size = config->size;
961 	nvmem->root_only = config->root_only;
962 	nvmem->priv = config->priv;
963 	nvmem->type = config->type;
964 	nvmem->reg_read = config->reg_read;
965 	nvmem->reg_write = config->reg_write;
966 	nvmem->keepout = config->keepout;
967 	nvmem->nkeepout = config->nkeepout;
968 	if (config->of_node)
969 		nvmem->dev.of_node = config->of_node;
970 	else
971 		nvmem->dev.of_node = config->dev->of_node;
972 
973 	switch (config->id) {
974 	case NVMEM_DEVID_NONE:
975 		rval = dev_set_name(&nvmem->dev, "%s", config->name);
976 		break;
977 	case NVMEM_DEVID_AUTO:
978 		rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
979 		break;
980 	default:
981 		rval = dev_set_name(&nvmem->dev, "%s%d",
982 			     config->name ? : "nvmem",
983 			     config->name ? config->id : nvmem->id);
984 		break;
985 	}
986 
987 	if (rval)
988 		goto err_put_device;
989 
990 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
991 			   config->read_only || !nvmem->reg_write;
992 
993 #ifdef CONFIG_NVMEM_SYSFS
994 	nvmem->dev.groups = nvmem_dev_groups;
995 #endif
996 
997 	if (nvmem->nkeepout) {
998 		rval = nvmem_validate_keepouts(nvmem);
999 		if (rval)
1000 			goto err_put_device;
1001 	}
1002 
1003 	if (config->compat) {
1004 		rval = nvmem_sysfs_setup_compat(nvmem, config);
1005 		if (rval)
1006 			goto err_put_device;
1007 	}
1008 
1009 	if (config->cells) {
1010 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
1011 		if (rval)
1012 			goto err_remove_cells;
1013 	}
1014 
1015 	rval = nvmem_add_cells_from_table(nvmem);
1016 	if (rval)
1017 		goto err_remove_cells;
1018 
1019 	if (config->add_legacy_fixed_of_cells) {
1020 		rval = nvmem_add_cells_from_legacy_of(nvmem);
1021 		if (rval)
1022 			goto err_remove_cells;
1023 	}
1024 
1025 	rval = nvmem_add_cells_from_fixed_layout(nvmem);
1026 	if (rval)
1027 		goto err_remove_cells;
1028 
1029 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
1030 
1031 	rval = device_add(&nvmem->dev);
1032 	if (rval)
1033 		goto err_remove_cells;
1034 
1035 	rval = nvmem_populate_layout(nvmem);
1036 	if (rval)
1037 		goto err_remove_dev;
1038 
1039 #ifdef CONFIG_NVMEM_SYSFS
1040 	rval = nvmem_populate_sysfs_cells(nvmem);
1041 	if (rval)
1042 		goto err_destroy_layout;
1043 #endif
1044 
1045 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
1046 
1047 	return nvmem;
1048 
1049 #ifdef CONFIG_NVMEM_SYSFS
1050 err_destroy_layout:
1051 	nvmem_destroy_layout(nvmem);
1052 #endif
1053 err_remove_dev:
1054 	device_del(&nvmem->dev);
1055 err_remove_cells:
1056 	nvmem_device_remove_all_cells(nvmem);
1057 	if (config->compat)
1058 		nvmem_sysfs_remove_compat(nvmem, config);
1059 err_put_device:
1060 	put_device(&nvmem->dev);
1061 
1062 	return ERR_PTR(rval);
1063 }
1064 EXPORT_SYMBOL_GPL(nvmem_register);
1065 
1066 static void nvmem_device_release(struct kref *kref)
1067 {
1068 	struct nvmem_device *nvmem;
1069 
1070 	nvmem = container_of(kref, struct nvmem_device, refcnt);
1071 
1072 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
1073 
1074 	if (nvmem->flags & FLAG_COMPAT)
1075 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
1076 
1077 	nvmem_device_remove_all_cells(nvmem);
1078 	nvmem_destroy_layout(nvmem);
1079 	device_unregister(&nvmem->dev);
1080 }
1081 
1082 /**
1083  * nvmem_unregister() - Unregister previously registered nvmem device
1084  *
1085  * @nvmem: Pointer to previously registered nvmem device.
1086  */
1087 void nvmem_unregister(struct nvmem_device *nvmem)
1088 {
1089 	if (nvmem)
1090 		kref_put(&nvmem->refcnt, nvmem_device_release);
1091 }
1092 EXPORT_SYMBOL_GPL(nvmem_unregister);
1093 
1094 static void devm_nvmem_unregister(void *nvmem)
1095 {
1096 	nvmem_unregister(nvmem);
1097 }
1098 
1099 /**
1100  * devm_nvmem_register() - Register a managed nvmem device for given
1101  * nvmem_config.
1102  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1103  *
1104  * @dev: Device that uses the nvmem device.
1105  * @config: nvmem device configuration with which nvmem device is created.
1106  *
1107  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1108  * on success.
1109  */
1110 struct nvmem_device *devm_nvmem_register(struct device *dev,
1111 					 const struct nvmem_config *config)
1112 {
1113 	struct nvmem_device *nvmem;
1114 	int ret;
1115 
1116 	nvmem = nvmem_register(config);
1117 	if (IS_ERR(nvmem))
1118 		return nvmem;
1119 
1120 	ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
1121 	if (ret)
1122 		return ERR_PTR(ret);
1123 
1124 	return nvmem;
1125 }
1126 EXPORT_SYMBOL_GPL(devm_nvmem_register);
1127 
1128 static struct nvmem_device *__nvmem_device_get(void *data,
1129 			int (*match)(struct device *dev, const void *data))
1130 {
1131 	struct nvmem_device *nvmem = NULL;
1132 	struct device *dev;
1133 
1134 	mutex_lock(&nvmem_mutex);
1135 	dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
1136 	if (dev)
1137 		nvmem = to_nvmem_device(dev);
1138 	mutex_unlock(&nvmem_mutex);
1139 	if (!nvmem)
1140 		return ERR_PTR(-EPROBE_DEFER);
1141 
1142 	if (!try_module_get(nvmem->owner)) {
1143 		dev_err(&nvmem->dev,
1144 			"could not increase module refcount for cell %s\n",
1145 			nvmem_dev_name(nvmem));
1146 
1147 		put_device(&nvmem->dev);
1148 		return ERR_PTR(-EINVAL);
1149 	}
1150 
1151 	kref_get(&nvmem->refcnt);
1152 
1153 	return nvmem;
1154 }
1155 
1156 static void __nvmem_device_put(struct nvmem_device *nvmem)
1157 {
1158 	put_device(&nvmem->dev);
1159 	module_put(nvmem->owner);
1160 	kref_put(&nvmem->refcnt, nvmem_device_release);
1161 }
1162 
1163 #if IS_ENABLED(CONFIG_OF)
1164 /**
1165  * of_nvmem_device_get() - Get nvmem device from a given id
1166  *
1167  * @np: Device tree node that uses the nvmem device.
1168  * @id: nvmem name from nvmem-names property.
1169  *
1170  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1171  * on success.
1172  */
1173 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1174 {
1175 
1176 	struct device_node *nvmem_np;
1177 	struct nvmem_device *nvmem;
1178 	int index = 0;
1179 
1180 	if (id)
1181 		index = of_property_match_string(np, "nvmem-names", id);
1182 
1183 	nvmem_np = of_parse_phandle(np, "nvmem", index);
1184 	if (!nvmem_np)
1185 		return ERR_PTR(-ENOENT);
1186 
1187 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1188 	of_node_put(nvmem_np);
1189 	return nvmem;
1190 }
1191 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1192 #endif
1193 
1194 /**
1195  * nvmem_device_get() - Get nvmem device from a given id
1196  *
1197  * @dev: Device that uses the nvmem device.
1198  * @dev_name: name of the requested nvmem device.
1199  *
1200  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1201  * on success.
1202  */
1203 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1204 {
1205 	if (dev->of_node) { /* try dt first */
1206 		struct nvmem_device *nvmem;
1207 
1208 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1209 
1210 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1211 			return nvmem;
1212 
1213 	}
1214 
1215 	return __nvmem_device_get((void *)dev_name, device_match_name);
1216 }
1217 EXPORT_SYMBOL_GPL(nvmem_device_get);
1218 
1219 /**
1220  * nvmem_device_find() - Find nvmem device with matching function
1221  *
1222  * @data: Data to pass to match function
1223  * @match: Callback function to check device
1224  *
1225  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1226  * on success.
1227  */
1228 struct nvmem_device *nvmem_device_find(void *data,
1229 			int (*match)(struct device *dev, const void *data))
1230 {
1231 	return __nvmem_device_get(data, match);
1232 }
1233 EXPORT_SYMBOL_GPL(nvmem_device_find);
1234 
1235 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1236 {
1237 	struct nvmem_device **nvmem = res;
1238 
1239 	if (WARN_ON(!nvmem || !*nvmem))
1240 		return 0;
1241 
1242 	return *nvmem == data;
1243 }
1244 
1245 static void devm_nvmem_device_release(struct device *dev, void *res)
1246 {
1247 	nvmem_device_put(*(struct nvmem_device **)res);
1248 }
1249 
1250 /**
1251  * devm_nvmem_device_put() - put alredy got nvmem device
1252  *
1253  * @dev: Device that uses the nvmem device.
1254  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1255  * that needs to be released.
1256  */
1257 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1258 {
1259 	int ret;
1260 
1261 	ret = devres_release(dev, devm_nvmem_device_release,
1262 			     devm_nvmem_device_match, nvmem);
1263 
1264 	WARN_ON(ret);
1265 }
1266 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1267 
1268 /**
1269  * nvmem_device_put() - put alredy got nvmem device
1270  *
1271  * @nvmem: pointer to nvmem device that needs to be released.
1272  */
1273 void nvmem_device_put(struct nvmem_device *nvmem)
1274 {
1275 	__nvmem_device_put(nvmem);
1276 }
1277 EXPORT_SYMBOL_GPL(nvmem_device_put);
1278 
1279 /**
1280  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1281  *
1282  * @dev: Device that requests the nvmem device.
1283  * @id: name id for the requested nvmem device.
1284  *
1285  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1286  * on success.  The nvmem_cell will be freed by the automatically once the
1287  * device is freed.
1288  */
1289 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1290 {
1291 	struct nvmem_device **ptr, *nvmem;
1292 
1293 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1294 	if (!ptr)
1295 		return ERR_PTR(-ENOMEM);
1296 
1297 	nvmem = nvmem_device_get(dev, id);
1298 	if (!IS_ERR(nvmem)) {
1299 		*ptr = nvmem;
1300 		devres_add(dev, ptr);
1301 	} else {
1302 		devres_free(ptr);
1303 	}
1304 
1305 	return nvmem;
1306 }
1307 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1308 
1309 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1310 					    const char *id, int index)
1311 {
1312 	struct nvmem_cell *cell;
1313 	const char *name = NULL;
1314 
1315 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1316 	if (!cell)
1317 		return ERR_PTR(-ENOMEM);
1318 
1319 	if (id) {
1320 		name = kstrdup_const(id, GFP_KERNEL);
1321 		if (!name) {
1322 			kfree(cell);
1323 			return ERR_PTR(-ENOMEM);
1324 		}
1325 	}
1326 
1327 	cell->id = name;
1328 	cell->entry = entry;
1329 	cell->index = index;
1330 
1331 	return cell;
1332 }
1333 
1334 static struct nvmem_cell *
1335 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1336 {
1337 	struct nvmem_cell_entry *cell_entry;
1338 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1339 	struct nvmem_cell_lookup *lookup;
1340 	struct nvmem_device *nvmem;
1341 	const char *dev_id;
1342 
1343 	if (!dev)
1344 		return ERR_PTR(-EINVAL);
1345 
1346 	dev_id = dev_name(dev);
1347 
1348 	mutex_lock(&nvmem_lookup_mutex);
1349 
1350 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1351 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1352 		    (strcmp(lookup->con_id, con_id) == 0)) {
1353 			/* This is the right entry. */
1354 			nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1355 						   device_match_name);
1356 			if (IS_ERR(nvmem)) {
1357 				/* Provider may not be registered yet. */
1358 				cell = ERR_CAST(nvmem);
1359 				break;
1360 			}
1361 
1362 			cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1363 								   lookup->cell_name);
1364 			if (!cell_entry) {
1365 				__nvmem_device_put(nvmem);
1366 				cell = ERR_PTR(-ENOENT);
1367 			} else {
1368 				cell = nvmem_create_cell(cell_entry, con_id, 0);
1369 				if (IS_ERR(cell))
1370 					__nvmem_device_put(nvmem);
1371 			}
1372 			break;
1373 		}
1374 	}
1375 
1376 	mutex_unlock(&nvmem_lookup_mutex);
1377 	return cell;
1378 }
1379 
1380 static void nvmem_layout_module_put(struct nvmem_device *nvmem)
1381 {
1382 	if (nvmem->layout && nvmem->layout->dev.driver)
1383 		module_put(nvmem->layout->dev.driver->owner);
1384 }
1385 
1386 #if IS_ENABLED(CONFIG_OF)
1387 static struct nvmem_cell_entry *
1388 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1389 {
1390 	struct nvmem_cell_entry *iter, *cell = NULL;
1391 
1392 	mutex_lock(&nvmem_mutex);
1393 	list_for_each_entry(iter, &nvmem->cells, node) {
1394 		if (np == iter->np) {
1395 			cell = iter;
1396 			break;
1397 		}
1398 	}
1399 	mutex_unlock(&nvmem_mutex);
1400 
1401 	return cell;
1402 }
1403 
1404 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem)
1405 {
1406 	if (!nvmem->layout)
1407 		return 0;
1408 
1409 	if (!nvmem->layout->dev.driver ||
1410 	    !try_module_get(nvmem->layout->dev.driver->owner))
1411 		return -EPROBE_DEFER;
1412 
1413 	return 0;
1414 }
1415 
1416 /**
1417  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1418  *
1419  * @np: Device tree node that uses the nvmem cell.
1420  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1421  *      for the cell at index 0 (the lone cell with no accompanying
1422  *      nvmem-cell-names property).
1423  *
1424  * Return: Will be an ERR_PTR() on error or a valid pointer
1425  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1426  * nvmem_cell_put().
1427  */
1428 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1429 {
1430 	struct device_node *cell_np, *nvmem_np;
1431 	struct nvmem_device *nvmem;
1432 	struct nvmem_cell_entry *cell_entry;
1433 	struct nvmem_cell *cell;
1434 	struct of_phandle_args cell_spec;
1435 	int index = 0;
1436 	int cell_index = 0;
1437 	int ret;
1438 
1439 	/* if cell name exists, find index to the name */
1440 	if (id)
1441 		index = of_property_match_string(np, "nvmem-cell-names", id);
1442 
1443 	ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
1444 						  "#nvmem-cell-cells",
1445 						  index, &cell_spec);
1446 	if (ret)
1447 		return ERR_PTR(-ENOENT);
1448 
1449 	if (cell_spec.args_count > 1)
1450 		return ERR_PTR(-EINVAL);
1451 
1452 	cell_np = cell_spec.np;
1453 	if (cell_spec.args_count)
1454 		cell_index = cell_spec.args[0];
1455 
1456 	nvmem_np = of_get_parent(cell_np);
1457 	if (!nvmem_np) {
1458 		of_node_put(cell_np);
1459 		return ERR_PTR(-EINVAL);
1460 	}
1461 
1462 	/* nvmem layouts produce cells within the nvmem-layout container */
1463 	if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
1464 		nvmem_np = of_get_next_parent(nvmem_np);
1465 		if (!nvmem_np) {
1466 			of_node_put(cell_np);
1467 			return ERR_PTR(-EINVAL);
1468 		}
1469 	}
1470 
1471 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1472 	of_node_put(nvmem_np);
1473 	if (IS_ERR(nvmem)) {
1474 		of_node_put(cell_np);
1475 		return ERR_CAST(nvmem);
1476 	}
1477 
1478 	ret = nvmem_layout_module_get_optional(nvmem);
1479 	if (ret) {
1480 		of_node_put(cell_np);
1481 		__nvmem_device_put(nvmem);
1482 		return ERR_PTR(ret);
1483 	}
1484 
1485 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1486 	of_node_put(cell_np);
1487 	if (!cell_entry) {
1488 		__nvmem_device_put(nvmem);
1489 		nvmem_layout_module_put(nvmem);
1490 		if (nvmem->layout)
1491 			return ERR_PTR(-EPROBE_DEFER);
1492 		else
1493 			return ERR_PTR(-ENOENT);
1494 	}
1495 
1496 	cell = nvmem_create_cell(cell_entry, id, cell_index);
1497 	if (IS_ERR(cell)) {
1498 		__nvmem_device_put(nvmem);
1499 		nvmem_layout_module_put(nvmem);
1500 	}
1501 
1502 	return cell;
1503 }
1504 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1505 #endif
1506 
1507 /**
1508  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1509  *
1510  * @dev: Device that requests the nvmem cell.
1511  * @id: nvmem cell name to get (this corresponds with the name from the
1512  *      nvmem-cell-names property for DT systems and with the con_id from
1513  *      the lookup entry for non-DT systems).
1514  *
1515  * Return: Will be an ERR_PTR() on error or a valid pointer
1516  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1517  * nvmem_cell_put().
1518  */
1519 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1520 {
1521 	struct nvmem_cell *cell;
1522 
1523 	if (dev->of_node) { /* try dt first */
1524 		cell = of_nvmem_cell_get(dev->of_node, id);
1525 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1526 			return cell;
1527 	}
1528 
1529 	/* NULL cell id only allowed for device tree; invalid otherwise */
1530 	if (!id)
1531 		return ERR_PTR(-EINVAL);
1532 
1533 	return nvmem_cell_get_from_lookup(dev, id);
1534 }
1535 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1536 
1537 static void devm_nvmem_cell_release(struct device *dev, void *res)
1538 {
1539 	nvmem_cell_put(*(struct nvmem_cell **)res);
1540 }
1541 
1542 /**
1543  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1544  *
1545  * @dev: Device that requests the nvmem cell.
1546  * @id: nvmem cell name id to get.
1547  *
1548  * Return: Will be an ERR_PTR() on error or a valid pointer
1549  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1550  * automatically once the device is freed.
1551  */
1552 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1553 {
1554 	struct nvmem_cell **ptr, *cell;
1555 
1556 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1557 	if (!ptr)
1558 		return ERR_PTR(-ENOMEM);
1559 
1560 	cell = nvmem_cell_get(dev, id);
1561 	if (!IS_ERR(cell)) {
1562 		*ptr = cell;
1563 		devres_add(dev, ptr);
1564 	} else {
1565 		devres_free(ptr);
1566 	}
1567 
1568 	return cell;
1569 }
1570 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1571 
1572 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1573 {
1574 	struct nvmem_cell **c = res;
1575 
1576 	if (WARN_ON(!c || !*c))
1577 		return 0;
1578 
1579 	return *c == data;
1580 }
1581 
1582 /**
1583  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1584  * from devm_nvmem_cell_get.
1585  *
1586  * @dev: Device that requests the nvmem cell.
1587  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1588  */
1589 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1590 {
1591 	int ret;
1592 
1593 	ret = devres_release(dev, devm_nvmem_cell_release,
1594 				devm_nvmem_cell_match, cell);
1595 
1596 	WARN_ON(ret);
1597 }
1598 EXPORT_SYMBOL(devm_nvmem_cell_put);
1599 
1600 /**
1601  * nvmem_cell_put() - Release previously allocated nvmem cell.
1602  *
1603  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1604  */
1605 void nvmem_cell_put(struct nvmem_cell *cell)
1606 {
1607 	struct nvmem_device *nvmem = cell->entry->nvmem;
1608 
1609 	if (cell->id)
1610 		kfree_const(cell->id);
1611 
1612 	kfree(cell);
1613 	__nvmem_device_put(nvmem);
1614 	nvmem_layout_module_put(nvmem);
1615 }
1616 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1617 
1618 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1619 {
1620 	u8 *p, *b;
1621 	int i, extra, bit_offset = cell->bit_offset;
1622 
1623 	p = b = buf;
1624 	if (bit_offset) {
1625 		/* First shift */
1626 		*b++ >>= bit_offset;
1627 
1628 		/* setup rest of the bytes if any */
1629 		for (i = 1; i < cell->bytes; i++) {
1630 			/* Get bits from next byte and shift them towards msb */
1631 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1632 
1633 			p = b;
1634 			*b++ >>= bit_offset;
1635 		}
1636 	} else {
1637 		/* point to the msb */
1638 		p += cell->bytes - 1;
1639 	}
1640 
1641 	/* result fits in less bytes */
1642 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1643 	while (--extra >= 0)
1644 		*p-- = 0;
1645 
1646 	/* clear msb bits if any leftover in the last byte */
1647 	if (cell->nbits % BITS_PER_BYTE)
1648 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1649 }
1650 
1651 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1652 			     struct nvmem_cell_entry *cell,
1653 			     void *buf, size_t *len, const char *id, int index)
1654 {
1655 	int rc;
1656 
1657 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
1658 
1659 	if (rc)
1660 		return rc;
1661 
1662 	/* shift bits in-place */
1663 	if (cell->bit_offset || cell->nbits)
1664 		nvmem_shift_read_buffer_in_place(cell, buf);
1665 
1666 	if (cell->read_post_process) {
1667 		rc = cell->read_post_process(cell->priv, id, index,
1668 					     cell->offset, buf, cell->raw_len);
1669 		if (rc)
1670 			return rc;
1671 	}
1672 
1673 	if (len)
1674 		*len = cell->bytes;
1675 
1676 	return 0;
1677 }
1678 
1679 /**
1680  * nvmem_cell_read() - Read a given nvmem cell
1681  *
1682  * @cell: nvmem cell to be read.
1683  * @len: pointer to length of cell which will be populated on successful read;
1684  *	 can be NULL.
1685  *
1686  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1687  * buffer should be freed by the consumer with a kfree().
1688  */
1689 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1690 {
1691 	struct nvmem_cell_entry *entry = cell->entry;
1692 	struct nvmem_device *nvmem = entry->nvmem;
1693 	u8 *buf;
1694 	int rc;
1695 
1696 	if (!nvmem)
1697 		return ERR_PTR(-EINVAL);
1698 
1699 	buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
1700 	if (!buf)
1701 		return ERR_PTR(-ENOMEM);
1702 
1703 	rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
1704 	if (rc) {
1705 		kfree(buf);
1706 		return ERR_PTR(rc);
1707 	}
1708 
1709 	return buf;
1710 }
1711 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1712 
1713 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1714 					     u8 *_buf, int len)
1715 {
1716 	struct nvmem_device *nvmem = cell->nvmem;
1717 	int i, rc, nbits, bit_offset = cell->bit_offset;
1718 	u8 v, *p, *buf, *b, pbyte, pbits;
1719 
1720 	nbits = cell->nbits;
1721 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1722 	if (!buf)
1723 		return ERR_PTR(-ENOMEM);
1724 
1725 	memcpy(buf, _buf, len);
1726 	p = b = buf;
1727 
1728 	if (bit_offset) {
1729 		pbyte = *b;
1730 		*b <<= bit_offset;
1731 
1732 		/* setup the first byte with lsb bits from nvmem */
1733 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1734 		if (rc)
1735 			goto err;
1736 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1737 
1738 		/* setup rest of the byte if any */
1739 		for (i = 1; i < cell->bytes; i++) {
1740 			/* Get last byte bits and shift them towards lsb */
1741 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1742 			pbyte = *b;
1743 			p = b;
1744 			*b <<= bit_offset;
1745 			*b++ |= pbits;
1746 		}
1747 	}
1748 
1749 	/* if it's not end on byte boundary */
1750 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1751 		/* setup the last byte with msb bits from nvmem */
1752 		rc = nvmem_reg_read(nvmem,
1753 				    cell->offset + cell->bytes - 1, &v, 1);
1754 		if (rc)
1755 			goto err;
1756 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1757 
1758 	}
1759 
1760 	return buf;
1761 err:
1762 	kfree(buf);
1763 	return ERR_PTR(rc);
1764 }
1765 
1766 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1767 {
1768 	struct nvmem_device *nvmem = cell->nvmem;
1769 	int rc;
1770 
1771 	if (!nvmem || nvmem->read_only ||
1772 	    (cell->bit_offset == 0 && len != cell->bytes))
1773 		return -EINVAL;
1774 
1775 	/*
1776 	 * Any cells which have a read_post_process hook are read-only because
1777 	 * we cannot reverse the operation and it might affect other cells,
1778 	 * too.
1779 	 */
1780 	if (cell->read_post_process)
1781 		return -EINVAL;
1782 
1783 	if (cell->bit_offset || cell->nbits) {
1784 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1785 		if (IS_ERR(buf))
1786 			return PTR_ERR(buf);
1787 	}
1788 
1789 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1790 
1791 	/* free the tmp buffer */
1792 	if (cell->bit_offset || cell->nbits)
1793 		kfree(buf);
1794 
1795 	if (rc)
1796 		return rc;
1797 
1798 	return len;
1799 }
1800 
1801 /**
1802  * nvmem_cell_write() - Write to a given nvmem cell
1803  *
1804  * @cell: nvmem cell to be written.
1805  * @buf: Buffer to be written.
1806  * @len: length of buffer to be written to nvmem cell.
1807  *
1808  * Return: length of bytes written or negative on failure.
1809  */
1810 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1811 {
1812 	return __nvmem_cell_entry_write(cell->entry, buf, len);
1813 }
1814 
1815 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1816 
1817 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1818 				  void *val, size_t count)
1819 {
1820 	struct nvmem_cell *cell;
1821 	void *buf;
1822 	size_t len;
1823 
1824 	cell = nvmem_cell_get(dev, cell_id);
1825 	if (IS_ERR(cell))
1826 		return PTR_ERR(cell);
1827 
1828 	buf = nvmem_cell_read(cell, &len);
1829 	if (IS_ERR(buf)) {
1830 		nvmem_cell_put(cell);
1831 		return PTR_ERR(buf);
1832 	}
1833 	if (len != count) {
1834 		kfree(buf);
1835 		nvmem_cell_put(cell);
1836 		return -EINVAL;
1837 	}
1838 	memcpy(val, buf, count);
1839 	kfree(buf);
1840 	nvmem_cell_put(cell);
1841 
1842 	return 0;
1843 }
1844 
1845 /**
1846  * nvmem_cell_read_u8() - Read a cell value as a u8
1847  *
1848  * @dev: Device that requests the nvmem cell.
1849  * @cell_id: Name of nvmem cell to read.
1850  * @val: pointer to output value.
1851  *
1852  * Return: 0 on success or negative errno.
1853  */
1854 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1855 {
1856 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1857 }
1858 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1859 
1860 /**
1861  * nvmem_cell_read_u16() - Read a cell value as a u16
1862  *
1863  * @dev: Device that requests the nvmem cell.
1864  * @cell_id: Name of nvmem cell to read.
1865  * @val: pointer to output value.
1866  *
1867  * Return: 0 on success or negative errno.
1868  */
1869 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1870 {
1871 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1872 }
1873 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1874 
1875 /**
1876  * nvmem_cell_read_u32() - Read a cell value as a u32
1877  *
1878  * @dev: Device that requests the nvmem cell.
1879  * @cell_id: Name of nvmem cell to read.
1880  * @val: pointer to output value.
1881  *
1882  * Return: 0 on success or negative errno.
1883  */
1884 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1885 {
1886 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1887 }
1888 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1889 
1890 /**
1891  * nvmem_cell_read_u64() - Read a cell value as a u64
1892  *
1893  * @dev: Device that requests the nvmem cell.
1894  * @cell_id: Name of nvmem cell to read.
1895  * @val: pointer to output value.
1896  *
1897  * Return: 0 on success or negative errno.
1898  */
1899 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1900 {
1901 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1902 }
1903 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1904 
1905 static const void *nvmem_cell_read_variable_common(struct device *dev,
1906 						   const char *cell_id,
1907 						   size_t max_len, size_t *len)
1908 {
1909 	struct nvmem_cell *cell;
1910 	int nbits;
1911 	void *buf;
1912 
1913 	cell = nvmem_cell_get(dev, cell_id);
1914 	if (IS_ERR(cell))
1915 		return cell;
1916 
1917 	nbits = cell->entry->nbits;
1918 	buf = nvmem_cell_read(cell, len);
1919 	nvmem_cell_put(cell);
1920 	if (IS_ERR(buf))
1921 		return buf;
1922 
1923 	/*
1924 	 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1925 	 * the length of the real data. Throw away the extra junk.
1926 	 */
1927 	if (nbits)
1928 		*len = DIV_ROUND_UP(nbits, 8);
1929 
1930 	if (*len > max_len) {
1931 		kfree(buf);
1932 		return ERR_PTR(-ERANGE);
1933 	}
1934 
1935 	return buf;
1936 }
1937 
1938 /**
1939  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1940  *
1941  * @dev: Device that requests the nvmem cell.
1942  * @cell_id: Name of nvmem cell to read.
1943  * @val: pointer to output value.
1944  *
1945  * Return: 0 on success or negative errno.
1946  */
1947 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1948 				    u32 *val)
1949 {
1950 	size_t len;
1951 	const u8 *buf;
1952 	int i;
1953 
1954 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1955 	if (IS_ERR(buf))
1956 		return PTR_ERR(buf);
1957 
1958 	/* Copy w/ implicit endian conversion */
1959 	*val = 0;
1960 	for (i = 0; i < len; i++)
1961 		*val |= buf[i] << (8 * i);
1962 
1963 	kfree(buf);
1964 
1965 	return 0;
1966 }
1967 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1968 
1969 /**
1970  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1971  *
1972  * @dev: Device that requests the nvmem cell.
1973  * @cell_id: Name of nvmem cell to read.
1974  * @val: pointer to output value.
1975  *
1976  * Return: 0 on success or negative errno.
1977  */
1978 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1979 				    u64 *val)
1980 {
1981 	size_t len;
1982 	const u8 *buf;
1983 	int i;
1984 
1985 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1986 	if (IS_ERR(buf))
1987 		return PTR_ERR(buf);
1988 
1989 	/* Copy w/ implicit endian conversion */
1990 	*val = 0;
1991 	for (i = 0; i < len; i++)
1992 		*val |= (uint64_t)buf[i] << (8 * i);
1993 
1994 	kfree(buf);
1995 
1996 	return 0;
1997 }
1998 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1999 
2000 /**
2001  * nvmem_device_cell_read() - Read a given nvmem device and cell
2002  *
2003  * @nvmem: nvmem device to read from.
2004  * @info: nvmem cell info to be read.
2005  * @buf: buffer pointer which will be populated on successful read.
2006  *
2007  * Return: length of successful bytes read on success and negative
2008  * error code on error.
2009  */
2010 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
2011 			   struct nvmem_cell_info *info, void *buf)
2012 {
2013 	struct nvmem_cell_entry cell;
2014 	int rc;
2015 	ssize_t len;
2016 
2017 	if (!nvmem)
2018 		return -EINVAL;
2019 
2020 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
2021 	if (rc)
2022 		return rc;
2023 
2024 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
2025 	if (rc)
2026 		return rc;
2027 
2028 	return len;
2029 }
2030 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
2031 
2032 /**
2033  * nvmem_device_cell_write() - Write cell to a given nvmem device
2034  *
2035  * @nvmem: nvmem device to be written to.
2036  * @info: nvmem cell info to be written.
2037  * @buf: buffer to be written to cell.
2038  *
2039  * Return: length of bytes written or negative error code on failure.
2040  */
2041 int nvmem_device_cell_write(struct nvmem_device *nvmem,
2042 			    struct nvmem_cell_info *info, void *buf)
2043 {
2044 	struct nvmem_cell_entry cell;
2045 	int rc;
2046 
2047 	if (!nvmem)
2048 		return -EINVAL;
2049 
2050 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
2051 	if (rc)
2052 		return rc;
2053 
2054 	return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
2055 }
2056 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
2057 
2058 /**
2059  * nvmem_device_read() - Read from a given nvmem device
2060  *
2061  * @nvmem: nvmem device to read from.
2062  * @offset: offset in nvmem device.
2063  * @bytes: number of bytes to read.
2064  * @buf: buffer pointer which will be populated on successful read.
2065  *
2066  * Return: length of successful bytes read on success and negative
2067  * error code on error.
2068  */
2069 int nvmem_device_read(struct nvmem_device *nvmem,
2070 		      unsigned int offset,
2071 		      size_t bytes, void *buf)
2072 {
2073 	int rc;
2074 
2075 	if (!nvmem)
2076 		return -EINVAL;
2077 
2078 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
2079 
2080 	if (rc)
2081 		return rc;
2082 
2083 	return bytes;
2084 }
2085 EXPORT_SYMBOL_GPL(nvmem_device_read);
2086 
2087 /**
2088  * nvmem_device_write() - Write cell to a given nvmem device
2089  *
2090  * @nvmem: nvmem device to be written to.
2091  * @offset: offset in nvmem device.
2092  * @bytes: number of bytes to write.
2093  * @buf: buffer to be written.
2094  *
2095  * Return: length of bytes written or negative error code on failure.
2096  */
2097 int nvmem_device_write(struct nvmem_device *nvmem,
2098 		       unsigned int offset,
2099 		       size_t bytes, void *buf)
2100 {
2101 	int rc;
2102 
2103 	if (!nvmem)
2104 		return -EINVAL;
2105 
2106 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
2107 
2108 	if (rc)
2109 		return rc;
2110 
2111 
2112 	return bytes;
2113 }
2114 EXPORT_SYMBOL_GPL(nvmem_device_write);
2115 
2116 /**
2117  * nvmem_add_cell_table() - register a table of cell info entries
2118  *
2119  * @table: table of cell info entries
2120  */
2121 void nvmem_add_cell_table(struct nvmem_cell_table *table)
2122 {
2123 	mutex_lock(&nvmem_cell_mutex);
2124 	list_add_tail(&table->node, &nvmem_cell_tables);
2125 	mutex_unlock(&nvmem_cell_mutex);
2126 }
2127 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
2128 
2129 /**
2130  * nvmem_del_cell_table() - remove a previously registered cell info table
2131  *
2132  * @table: table of cell info entries
2133  */
2134 void nvmem_del_cell_table(struct nvmem_cell_table *table)
2135 {
2136 	mutex_lock(&nvmem_cell_mutex);
2137 	list_del(&table->node);
2138 	mutex_unlock(&nvmem_cell_mutex);
2139 }
2140 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
2141 
2142 /**
2143  * nvmem_add_cell_lookups() - register a list of cell lookup entries
2144  *
2145  * @entries: array of cell lookup entries
2146  * @nentries: number of cell lookup entries in the array
2147  */
2148 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2149 {
2150 	int i;
2151 
2152 	mutex_lock(&nvmem_lookup_mutex);
2153 	for (i = 0; i < nentries; i++)
2154 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
2155 	mutex_unlock(&nvmem_lookup_mutex);
2156 }
2157 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
2158 
2159 /**
2160  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2161  *                            entries
2162  *
2163  * @entries: array of cell lookup entries
2164  * @nentries: number of cell lookup entries in the array
2165  */
2166 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2167 {
2168 	int i;
2169 
2170 	mutex_lock(&nvmem_lookup_mutex);
2171 	for (i = 0; i < nentries; i++)
2172 		list_del(&entries[i].node);
2173 	mutex_unlock(&nvmem_lookup_mutex);
2174 }
2175 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
2176 
2177 /**
2178  * nvmem_dev_name() - Get the name of a given nvmem device.
2179  *
2180  * @nvmem: nvmem device.
2181  *
2182  * Return: name of the nvmem device.
2183  */
2184 const char *nvmem_dev_name(struct nvmem_device *nvmem)
2185 {
2186 	return dev_name(&nvmem->dev);
2187 }
2188 EXPORT_SYMBOL_GPL(nvmem_dev_name);
2189 
2190 /**
2191  * nvmem_dev_size() - Get the size of a given nvmem device.
2192  *
2193  * @nvmem: nvmem device.
2194  *
2195  * Return: size of the nvmem device.
2196  */
2197 size_t nvmem_dev_size(struct nvmem_device *nvmem)
2198 {
2199 	return nvmem->size;
2200 }
2201 EXPORT_SYMBOL_GPL(nvmem_dev_size);
2202 
2203 static int __init nvmem_init(void)
2204 {
2205 	int ret;
2206 
2207 	ret = bus_register(&nvmem_bus_type);
2208 	if (ret)
2209 		return ret;
2210 
2211 	ret = nvmem_layout_bus_register();
2212 	if (ret)
2213 		bus_unregister(&nvmem_bus_type);
2214 
2215 	return ret;
2216 }
2217 
2218 static void __exit nvmem_exit(void)
2219 {
2220 	nvmem_layout_bus_unregister();
2221 	bus_unregister(&nvmem_bus_type);
2222 }
2223 
2224 subsys_initcall(nvmem_init);
2225 module_exit(nvmem_exit);
2226 
2227 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2228 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2229 MODULE_DESCRIPTION("nvmem Driver Core");
2230