xref: /linux/drivers/nvmem/core.c (revision c434e25b62f8efcfbb6bf1f7ce55960206c1137e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 #include "internals.h"
23 
24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
25 
26 #define FLAG_COMPAT		BIT(0)
27 struct nvmem_cell_entry {
28 	const char		*name;
29 	int			offset;
30 	size_t			raw_len;
31 	int			bytes;
32 	int			bit_offset;
33 	int			nbits;
34 	nvmem_cell_post_process_t read_post_process;
35 	void			*priv;
36 	struct device_node	*np;
37 	struct nvmem_device	*nvmem;
38 	struct list_head	node;
39 };
40 
41 struct nvmem_cell {
42 	struct nvmem_cell_entry *entry;
43 	const char		*id;
44 	int			index;
45 };
46 
47 static DEFINE_MUTEX(nvmem_mutex);
48 static DEFINE_IDA(nvmem_ida);
49 
50 static DEFINE_MUTEX(nvmem_cell_mutex);
51 static LIST_HEAD(nvmem_cell_tables);
52 
53 static DEFINE_MUTEX(nvmem_lookup_mutex);
54 static LIST_HEAD(nvmem_lookup_list);
55 
56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
57 
58 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
59 			    void *val, size_t bytes)
60 {
61 	if (nvmem->reg_read)
62 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
63 
64 	return -EINVAL;
65 }
66 
67 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
68 			     void *val, size_t bytes)
69 {
70 	int ret;
71 
72 	if (nvmem->reg_write) {
73 		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
74 		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
75 		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
76 		return ret;
77 	}
78 
79 	return -EINVAL;
80 }
81 
82 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
83 				      unsigned int offset, void *val,
84 				      size_t bytes, int write)
85 {
86 
87 	unsigned int end = offset + bytes;
88 	unsigned int kend, ksize;
89 	const struct nvmem_keepout *keepout = nvmem->keepout;
90 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
91 	int rc;
92 
93 	/*
94 	 * Skip all keepouts before the range being accessed.
95 	 * Keepouts are sorted.
96 	 */
97 	while ((keepout < keepoutend) && (keepout->end <= offset))
98 		keepout++;
99 
100 	while ((offset < end) && (keepout < keepoutend)) {
101 		/* Access the valid portion before the keepout. */
102 		if (offset < keepout->start) {
103 			kend = min(end, keepout->start);
104 			ksize = kend - offset;
105 			if (write)
106 				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
107 			else
108 				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
109 
110 			if (rc)
111 				return rc;
112 
113 			offset += ksize;
114 			val += ksize;
115 		}
116 
117 		/*
118 		 * Now we're aligned to the start of this keepout zone. Go
119 		 * through it.
120 		 */
121 		kend = min(end, keepout->end);
122 		ksize = kend - offset;
123 		if (!write)
124 			memset(val, keepout->value, ksize);
125 
126 		val += ksize;
127 		offset += ksize;
128 		keepout++;
129 	}
130 
131 	/*
132 	 * If we ran out of keepouts but there's still stuff to do, send it
133 	 * down directly
134 	 */
135 	if (offset < end) {
136 		ksize = end - offset;
137 		if (write)
138 			return __nvmem_reg_write(nvmem, offset, val, ksize);
139 		else
140 			return __nvmem_reg_read(nvmem, offset, val, ksize);
141 	}
142 
143 	return 0;
144 }
145 
146 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
147 			  void *val, size_t bytes)
148 {
149 	if (!nvmem->nkeepout)
150 		return __nvmem_reg_read(nvmem, offset, val, bytes);
151 
152 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
153 }
154 
155 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
156 			   void *val, size_t bytes)
157 {
158 	if (!nvmem->nkeepout)
159 		return __nvmem_reg_write(nvmem, offset, val, bytes);
160 
161 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
162 }
163 
164 #ifdef CONFIG_NVMEM_SYSFS
165 static const char * const nvmem_type_str[] = {
166 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
167 	[NVMEM_TYPE_EEPROM] = "EEPROM",
168 	[NVMEM_TYPE_OTP] = "OTP",
169 	[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
170 	[NVMEM_TYPE_FRAM] = "FRAM",
171 };
172 
173 #ifdef CONFIG_DEBUG_LOCK_ALLOC
174 static struct lock_class_key eeprom_lock_key;
175 #endif
176 
177 static ssize_t type_show(struct device *dev,
178 			 struct device_attribute *attr, char *buf)
179 {
180 	struct nvmem_device *nvmem = to_nvmem_device(dev);
181 
182 	return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
183 }
184 
185 static DEVICE_ATTR_RO(type);
186 
187 static struct attribute *nvmem_attrs[] = {
188 	&dev_attr_type.attr,
189 	NULL,
190 };
191 
192 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
193 				   struct bin_attribute *attr, char *buf,
194 				   loff_t pos, size_t count)
195 {
196 	struct device *dev;
197 	struct nvmem_device *nvmem;
198 	int rc;
199 
200 	if (attr->private)
201 		dev = attr->private;
202 	else
203 		dev = kobj_to_dev(kobj);
204 	nvmem = to_nvmem_device(dev);
205 
206 	/* Stop the user from reading */
207 	if (pos >= nvmem->size)
208 		return 0;
209 
210 	if (!IS_ALIGNED(pos, nvmem->stride))
211 		return -EINVAL;
212 
213 	if (count < nvmem->word_size)
214 		return -EINVAL;
215 
216 	if (pos + count > nvmem->size)
217 		count = nvmem->size - pos;
218 
219 	count = round_down(count, nvmem->word_size);
220 
221 	if (!nvmem->reg_read)
222 		return -EPERM;
223 
224 	rc = nvmem_reg_read(nvmem, pos, buf, count);
225 
226 	if (rc)
227 		return rc;
228 
229 	return count;
230 }
231 
232 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
233 				    struct bin_attribute *attr, char *buf,
234 				    loff_t pos, size_t count)
235 {
236 	struct device *dev;
237 	struct nvmem_device *nvmem;
238 	int rc;
239 
240 	if (attr->private)
241 		dev = attr->private;
242 	else
243 		dev = kobj_to_dev(kobj);
244 	nvmem = to_nvmem_device(dev);
245 
246 	/* Stop the user from writing */
247 	if (pos >= nvmem->size)
248 		return -EFBIG;
249 
250 	if (!IS_ALIGNED(pos, nvmem->stride))
251 		return -EINVAL;
252 
253 	if (count < nvmem->word_size)
254 		return -EINVAL;
255 
256 	if (pos + count > nvmem->size)
257 		count = nvmem->size - pos;
258 
259 	count = round_down(count, nvmem->word_size);
260 
261 	if (!nvmem->reg_write)
262 		return -EPERM;
263 
264 	rc = nvmem_reg_write(nvmem, pos, buf, count);
265 
266 	if (rc)
267 		return rc;
268 
269 	return count;
270 }
271 
272 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
273 {
274 	umode_t mode = 0400;
275 
276 	if (!nvmem->root_only)
277 		mode |= 0044;
278 
279 	if (!nvmem->read_only)
280 		mode |= 0200;
281 
282 	if (!nvmem->reg_write)
283 		mode &= ~0200;
284 
285 	if (!nvmem->reg_read)
286 		mode &= ~0444;
287 
288 	return mode;
289 }
290 
291 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
292 					 struct bin_attribute *attr, int i)
293 {
294 	struct device *dev = kobj_to_dev(kobj);
295 	struct nvmem_device *nvmem = to_nvmem_device(dev);
296 
297 	attr->size = nvmem->size;
298 
299 	return nvmem_bin_attr_get_umode(nvmem);
300 }
301 
302 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
303 					    const char *id, int index);
304 
305 static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
306 				    struct bin_attribute *attr, char *buf,
307 				    loff_t pos, size_t count)
308 {
309 	struct nvmem_cell_entry *entry;
310 	struct nvmem_cell *cell = NULL;
311 	size_t cell_sz, read_len;
312 	void *content;
313 
314 	entry = attr->private;
315 	cell = nvmem_create_cell(entry, entry->name, 0);
316 	if (IS_ERR(cell))
317 		return PTR_ERR(cell);
318 
319 	if (!cell)
320 		return -EINVAL;
321 
322 	content = nvmem_cell_read(cell, &cell_sz);
323 	if (IS_ERR(content)) {
324 		read_len = PTR_ERR(content);
325 		goto destroy_cell;
326 	}
327 
328 	read_len = min_t(unsigned int, cell_sz - pos, count);
329 	memcpy(buf, content + pos, read_len);
330 	kfree(content);
331 
332 destroy_cell:
333 	kfree_const(cell->id);
334 	kfree(cell);
335 
336 	return read_len;
337 }
338 
339 /* default read/write permissions */
340 static struct bin_attribute bin_attr_rw_nvmem = {
341 	.attr	= {
342 		.name	= "nvmem",
343 		.mode	= 0644,
344 	},
345 	.read	= bin_attr_nvmem_read,
346 	.write	= bin_attr_nvmem_write,
347 };
348 
349 static struct bin_attribute *nvmem_bin_attributes[] = {
350 	&bin_attr_rw_nvmem,
351 	NULL,
352 };
353 
354 static const struct attribute_group nvmem_bin_group = {
355 	.bin_attrs	= nvmem_bin_attributes,
356 	.attrs		= nvmem_attrs,
357 	.is_bin_visible = nvmem_bin_attr_is_visible,
358 };
359 
360 /* Cell attributes will be dynamically allocated */
361 static struct attribute_group nvmem_cells_group = {
362 	.name		= "cells",
363 };
364 
365 static const struct attribute_group *nvmem_dev_groups[] = {
366 	&nvmem_bin_group,
367 	NULL,
368 };
369 
370 static const struct attribute_group *nvmem_cells_groups[] = {
371 	&nvmem_cells_group,
372 	NULL,
373 };
374 
375 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
376 	.attr	= {
377 		.name	= "eeprom",
378 	},
379 	.read	= bin_attr_nvmem_read,
380 	.write	= bin_attr_nvmem_write,
381 };
382 
383 /*
384  * nvmem_setup_compat() - Create an additional binary entry in
385  * drivers sys directory, to be backwards compatible with the older
386  * drivers/misc/eeprom drivers.
387  */
388 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
389 				    const struct nvmem_config *config)
390 {
391 	int rval;
392 
393 	if (!config->compat)
394 		return 0;
395 
396 	if (!config->base_dev)
397 		return -EINVAL;
398 
399 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
400 	if (config->type == NVMEM_TYPE_FRAM)
401 		nvmem->eeprom.attr.name = "fram";
402 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
403 	nvmem->eeprom.size = nvmem->size;
404 #ifdef CONFIG_DEBUG_LOCK_ALLOC
405 	nvmem->eeprom.attr.key = &eeprom_lock_key;
406 #endif
407 	nvmem->eeprom.private = &nvmem->dev;
408 	nvmem->base_dev = config->base_dev;
409 
410 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
411 	if (rval) {
412 		dev_err(&nvmem->dev,
413 			"Failed to create eeprom binary file %d\n", rval);
414 		return rval;
415 	}
416 
417 	nvmem->flags |= FLAG_COMPAT;
418 
419 	return 0;
420 }
421 
422 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
423 			      const struct nvmem_config *config)
424 {
425 	if (config->compat)
426 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
427 }
428 
429 static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
430 {
431 	struct bin_attribute **cells_attrs, *attrs;
432 	struct nvmem_cell_entry *entry;
433 	unsigned int ncells = 0, i = 0;
434 	int ret = 0;
435 
436 	mutex_lock(&nvmem_mutex);
437 
438 	if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
439 		nvmem_cells_group.bin_attrs = NULL;
440 		goto unlock_mutex;
441 	}
442 
443 	/* Allocate an array of attributes with a sentinel */
444 	ncells = list_count_nodes(&nvmem->cells);
445 	cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
446 				   sizeof(struct bin_attribute *), GFP_KERNEL);
447 	if (!cells_attrs) {
448 		ret = -ENOMEM;
449 		goto unlock_mutex;
450 	}
451 
452 	attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
453 	if (!attrs) {
454 		ret = -ENOMEM;
455 		goto unlock_mutex;
456 	}
457 
458 	/* Initialize each attribute to take the name and size of the cell */
459 	list_for_each_entry(entry, &nvmem->cells, node) {
460 		sysfs_bin_attr_init(&attrs[i]);
461 		attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
462 						    "%s@%x,%x", entry->name,
463 						    entry->offset,
464 						    entry->bit_offset);
465 		attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem);
466 		attrs[i].size = entry->bytes;
467 		attrs[i].read = &nvmem_cell_attr_read;
468 		attrs[i].private = entry;
469 		if (!attrs[i].attr.name) {
470 			ret = -ENOMEM;
471 			goto unlock_mutex;
472 		}
473 
474 		cells_attrs[i] = &attrs[i];
475 		i++;
476 	}
477 
478 	nvmem_cells_group.bin_attrs = cells_attrs;
479 
480 	ret = device_add_groups(&nvmem->dev, nvmem_cells_groups);
481 	if (ret)
482 		goto unlock_mutex;
483 
484 	nvmem->sysfs_cells_populated = true;
485 
486 unlock_mutex:
487 	mutex_unlock(&nvmem_mutex);
488 
489 	return ret;
490 }
491 
492 #else /* CONFIG_NVMEM_SYSFS */
493 
494 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
495 				    const struct nvmem_config *config)
496 {
497 	return -ENOSYS;
498 }
499 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
500 				      const struct nvmem_config *config)
501 {
502 }
503 
504 #endif /* CONFIG_NVMEM_SYSFS */
505 
506 static void nvmem_release(struct device *dev)
507 {
508 	struct nvmem_device *nvmem = to_nvmem_device(dev);
509 
510 	ida_free(&nvmem_ida, nvmem->id);
511 	gpiod_put(nvmem->wp_gpio);
512 	kfree(nvmem);
513 }
514 
515 static const struct device_type nvmem_provider_type = {
516 	.release	= nvmem_release,
517 };
518 
519 static struct bus_type nvmem_bus_type = {
520 	.name		= "nvmem",
521 };
522 
523 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
524 {
525 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
526 	mutex_lock(&nvmem_mutex);
527 	list_del(&cell->node);
528 	mutex_unlock(&nvmem_mutex);
529 	of_node_put(cell->np);
530 	kfree_const(cell->name);
531 	kfree(cell);
532 }
533 
534 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
535 {
536 	struct nvmem_cell_entry *cell, *p;
537 
538 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
539 		nvmem_cell_entry_drop(cell);
540 }
541 
542 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
543 {
544 	mutex_lock(&nvmem_mutex);
545 	list_add_tail(&cell->node, &cell->nvmem->cells);
546 	mutex_unlock(&nvmem_mutex);
547 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
548 }
549 
550 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
551 						     const struct nvmem_cell_info *info,
552 						     struct nvmem_cell_entry *cell)
553 {
554 	cell->nvmem = nvmem;
555 	cell->offset = info->offset;
556 	cell->raw_len = info->raw_len ?: info->bytes;
557 	cell->bytes = info->bytes;
558 	cell->name = info->name;
559 	cell->read_post_process = info->read_post_process;
560 	cell->priv = info->priv;
561 
562 	cell->bit_offset = info->bit_offset;
563 	cell->nbits = info->nbits;
564 	cell->np = info->np;
565 
566 	if (cell->nbits)
567 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
568 					   BITS_PER_BYTE);
569 
570 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
571 		dev_err(&nvmem->dev,
572 			"cell %s unaligned to nvmem stride %d\n",
573 			cell->name ?: "<unknown>", nvmem->stride);
574 		return -EINVAL;
575 	}
576 
577 	return 0;
578 }
579 
580 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
581 					       const struct nvmem_cell_info *info,
582 					       struct nvmem_cell_entry *cell)
583 {
584 	int err;
585 
586 	err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
587 	if (err)
588 		return err;
589 
590 	cell->name = kstrdup_const(info->name, GFP_KERNEL);
591 	if (!cell->name)
592 		return -ENOMEM;
593 
594 	return 0;
595 }
596 
597 /**
598  * nvmem_add_one_cell() - Add one cell information to an nvmem device
599  *
600  * @nvmem: nvmem device to add cells to.
601  * @info: nvmem cell info to add to the device
602  *
603  * Return: 0 or negative error code on failure.
604  */
605 int nvmem_add_one_cell(struct nvmem_device *nvmem,
606 		       const struct nvmem_cell_info *info)
607 {
608 	struct nvmem_cell_entry *cell;
609 	int rval;
610 
611 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
612 	if (!cell)
613 		return -ENOMEM;
614 
615 	rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
616 	if (rval) {
617 		kfree(cell);
618 		return rval;
619 	}
620 
621 	nvmem_cell_entry_add(cell);
622 
623 	return 0;
624 }
625 EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
626 
627 /**
628  * nvmem_add_cells() - Add cell information to an nvmem device
629  *
630  * @nvmem: nvmem device to add cells to.
631  * @info: nvmem cell info to add to the device
632  * @ncells: number of cells in info
633  *
634  * Return: 0 or negative error code on failure.
635  */
636 static int nvmem_add_cells(struct nvmem_device *nvmem,
637 		    const struct nvmem_cell_info *info,
638 		    int ncells)
639 {
640 	int i, rval;
641 
642 	for (i = 0; i < ncells; i++) {
643 		rval = nvmem_add_one_cell(nvmem, &info[i]);
644 		if (rval)
645 			return rval;
646 	}
647 
648 	return 0;
649 }
650 
651 /**
652  * nvmem_register_notifier() - Register a notifier block for nvmem events.
653  *
654  * @nb: notifier block to be called on nvmem events.
655  *
656  * Return: 0 on success, negative error number on failure.
657  */
658 int nvmem_register_notifier(struct notifier_block *nb)
659 {
660 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
661 }
662 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
663 
664 /**
665  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
666  *
667  * @nb: notifier block to be unregistered.
668  *
669  * Return: 0 on success, negative error number on failure.
670  */
671 int nvmem_unregister_notifier(struct notifier_block *nb)
672 {
673 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
674 }
675 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
676 
677 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
678 {
679 	const struct nvmem_cell_info *info;
680 	struct nvmem_cell_table *table;
681 	struct nvmem_cell_entry *cell;
682 	int rval = 0, i;
683 
684 	mutex_lock(&nvmem_cell_mutex);
685 	list_for_each_entry(table, &nvmem_cell_tables, node) {
686 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
687 			for (i = 0; i < table->ncells; i++) {
688 				info = &table->cells[i];
689 
690 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
691 				if (!cell) {
692 					rval = -ENOMEM;
693 					goto out;
694 				}
695 
696 				rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
697 				if (rval) {
698 					kfree(cell);
699 					goto out;
700 				}
701 
702 				nvmem_cell_entry_add(cell);
703 			}
704 		}
705 	}
706 
707 out:
708 	mutex_unlock(&nvmem_cell_mutex);
709 	return rval;
710 }
711 
712 static struct nvmem_cell_entry *
713 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
714 {
715 	struct nvmem_cell_entry *iter, *cell = NULL;
716 
717 	mutex_lock(&nvmem_mutex);
718 	list_for_each_entry(iter, &nvmem->cells, node) {
719 		if (strcmp(cell_id, iter->name) == 0) {
720 			cell = iter;
721 			break;
722 		}
723 	}
724 	mutex_unlock(&nvmem_mutex);
725 
726 	return cell;
727 }
728 
729 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
730 {
731 	unsigned int cur = 0;
732 	const struct nvmem_keepout *keepout = nvmem->keepout;
733 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
734 
735 	while (keepout < keepoutend) {
736 		/* Ensure keepouts are sorted and don't overlap. */
737 		if (keepout->start < cur) {
738 			dev_err(&nvmem->dev,
739 				"Keepout regions aren't sorted or overlap.\n");
740 
741 			return -ERANGE;
742 		}
743 
744 		if (keepout->end < keepout->start) {
745 			dev_err(&nvmem->dev,
746 				"Invalid keepout region.\n");
747 
748 			return -EINVAL;
749 		}
750 
751 		/*
752 		 * Validate keepouts (and holes between) don't violate
753 		 * word_size constraints.
754 		 */
755 		if ((keepout->end - keepout->start < nvmem->word_size) ||
756 		    ((keepout->start != cur) &&
757 		     (keepout->start - cur < nvmem->word_size))) {
758 
759 			dev_err(&nvmem->dev,
760 				"Keepout regions violate word_size constraints.\n");
761 
762 			return -ERANGE;
763 		}
764 
765 		/* Validate keepouts don't violate stride (alignment). */
766 		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
767 		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
768 
769 			dev_err(&nvmem->dev,
770 				"Keepout regions violate stride.\n");
771 
772 			return -EINVAL;
773 		}
774 
775 		cur = keepout->end;
776 		keepout++;
777 	}
778 
779 	return 0;
780 }
781 
782 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
783 {
784 	struct device *dev = &nvmem->dev;
785 	struct device_node *child;
786 	const __be32 *addr;
787 	int len, ret;
788 
789 	for_each_child_of_node(np, child) {
790 		struct nvmem_cell_info info = {0};
791 
792 		addr = of_get_property(child, "reg", &len);
793 		if (!addr)
794 			continue;
795 		if (len < 2 * sizeof(u32)) {
796 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
797 			of_node_put(child);
798 			return -EINVAL;
799 		}
800 
801 		info.offset = be32_to_cpup(addr++);
802 		info.bytes = be32_to_cpup(addr);
803 		info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
804 
805 		addr = of_get_property(child, "bits", &len);
806 		if (addr && len == (2 * sizeof(u32))) {
807 			info.bit_offset = be32_to_cpup(addr++);
808 			info.nbits = be32_to_cpup(addr);
809 			if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
810 				dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
811 				of_node_put(child);
812 				return -EINVAL;
813 			}
814 		}
815 
816 		info.np = of_node_get(child);
817 
818 		if (nvmem->fixup_dt_cell_info)
819 			nvmem->fixup_dt_cell_info(nvmem, &info);
820 
821 		ret = nvmem_add_one_cell(nvmem, &info);
822 		kfree(info.name);
823 		if (ret) {
824 			of_node_put(child);
825 			return ret;
826 		}
827 	}
828 
829 	return 0;
830 }
831 
832 static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
833 {
834 	return nvmem_add_cells_from_dt(nvmem, nvmem->dev.of_node);
835 }
836 
837 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
838 {
839 	struct device_node *layout_np;
840 	int err = 0;
841 
842 	layout_np = of_nvmem_layout_get_container(nvmem);
843 	if (!layout_np)
844 		return 0;
845 
846 	if (of_device_is_compatible(layout_np, "fixed-layout"))
847 		err = nvmem_add_cells_from_dt(nvmem, layout_np);
848 
849 	of_node_put(layout_np);
850 
851 	return err;
852 }
853 
854 int nvmem_layout_register(struct nvmem_layout *layout)
855 {
856 	int ret;
857 
858 	if (!layout->add_cells)
859 		return -EINVAL;
860 
861 	/* Populate the cells */
862 	ret = layout->add_cells(layout);
863 	if (ret)
864 		return ret;
865 
866 #ifdef CONFIG_NVMEM_SYSFS
867 	ret = nvmem_populate_sysfs_cells(layout->nvmem);
868 	if (ret) {
869 		nvmem_device_remove_all_cells(layout->nvmem);
870 		return ret;
871 	}
872 #endif
873 
874 	return 0;
875 }
876 EXPORT_SYMBOL_GPL(nvmem_layout_register);
877 
878 void nvmem_layout_unregister(struct nvmem_layout *layout)
879 {
880 	/* Keep the API even with an empty stub in case we need it later */
881 }
882 EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
883 
884 /**
885  * nvmem_register() - Register a nvmem device for given nvmem_config.
886  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
887  *
888  * @config: nvmem device configuration with which nvmem device is created.
889  *
890  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
891  * on success.
892  */
893 
894 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
895 {
896 	struct nvmem_device *nvmem;
897 	int rval;
898 
899 	if (!config->dev)
900 		return ERR_PTR(-EINVAL);
901 
902 	if (!config->reg_read && !config->reg_write)
903 		return ERR_PTR(-EINVAL);
904 
905 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
906 	if (!nvmem)
907 		return ERR_PTR(-ENOMEM);
908 
909 	rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
910 	if (rval < 0) {
911 		kfree(nvmem);
912 		return ERR_PTR(rval);
913 	}
914 
915 	nvmem->id = rval;
916 
917 	nvmem->dev.type = &nvmem_provider_type;
918 	nvmem->dev.bus = &nvmem_bus_type;
919 	nvmem->dev.parent = config->dev;
920 
921 	device_initialize(&nvmem->dev);
922 
923 	if (!config->ignore_wp)
924 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
925 						    GPIOD_OUT_HIGH);
926 	if (IS_ERR(nvmem->wp_gpio)) {
927 		rval = PTR_ERR(nvmem->wp_gpio);
928 		nvmem->wp_gpio = NULL;
929 		goto err_put_device;
930 	}
931 
932 	kref_init(&nvmem->refcnt);
933 	INIT_LIST_HEAD(&nvmem->cells);
934 	nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info;
935 
936 	nvmem->owner = config->owner;
937 	if (!nvmem->owner && config->dev->driver)
938 		nvmem->owner = config->dev->driver->owner;
939 	nvmem->stride = config->stride ?: 1;
940 	nvmem->word_size = config->word_size ?: 1;
941 	nvmem->size = config->size;
942 	nvmem->root_only = config->root_only;
943 	nvmem->priv = config->priv;
944 	nvmem->type = config->type;
945 	nvmem->reg_read = config->reg_read;
946 	nvmem->reg_write = config->reg_write;
947 	nvmem->keepout = config->keepout;
948 	nvmem->nkeepout = config->nkeepout;
949 	if (config->of_node)
950 		nvmem->dev.of_node = config->of_node;
951 	else
952 		nvmem->dev.of_node = config->dev->of_node;
953 
954 	switch (config->id) {
955 	case NVMEM_DEVID_NONE:
956 		rval = dev_set_name(&nvmem->dev, "%s", config->name);
957 		break;
958 	case NVMEM_DEVID_AUTO:
959 		rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
960 		break;
961 	default:
962 		rval = dev_set_name(&nvmem->dev, "%s%d",
963 			     config->name ? : "nvmem",
964 			     config->name ? config->id : nvmem->id);
965 		break;
966 	}
967 
968 	if (rval)
969 		goto err_put_device;
970 
971 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
972 			   config->read_only || !nvmem->reg_write;
973 
974 #ifdef CONFIG_NVMEM_SYSFS
975 	nvmem->dev.groups = nvmem_dev_groups;
976 #endif
977 
978 	if (nvmem->nkeepout) {
979 		rval = nvmem_validate_keepouts(nvmem);
980 		if (rval)
981 			goto err_put_device;
982 	}
983 
984 	if (config->compat) {
985 		rval = nvmem_sysfs_setup_compat(nvmem, config);
986 		if (rval)
987 			goto err_put_device;
988 	}
989 
990 	if (config->cells) {
991 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
992 		if (rval)
993 			goto err_remove_cells;
994 	}
995 
996 	rval = nvmem_add_cells_from_table(nvmem);
997 	if (rval)
998 		goto err_remove_cells;
999 
1000 	if (config->add_legacy_fixed_of_cells) {
1001 		rval = nvmem_add_cells_from_legacy_of(nvmem);
1002 		if (rval)
1003 			goto err_remove_cells;
1004 	}
1005 
1006 	rval = nvmem_add_cells_from_fixed_layout(nvmem);
1007 	if (rval)
1008 		goto err_remove_cells;
1009 
1010 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
1011 
1012 	rval = device_add(&nvmem->dev);
1013 	if (rval)
1014 		goto err_remove_cells;
1015 
1016 	rval = nvmem_populate_layout(nvmem);
1017 	if (rval)
1018 		goto err_remove_dev;
1019 
1020 #ifdef CONFIG_NVMEM_SYSFS
1021 	rval = nvmem_populate_sysfs_cells(nvmem);
1022 	if (rval)
1023 		goto err_destroy_layout;
1024 #endif
1025 
1026 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
1027 
1028 	return nvmem;
1029 
1030 #ifdef CONFIG_NVMEM_SYSFS
1031 err_destroy_layout:
1032 	nvmem_destroy_layout(nvmem);
1033 #endif
1034 err_remove_dev:
1035 	device_del(&nvmem->dev);
1036 err_remove_cells:
1037 	nvmem_device_remove_all_cells(nvmem);
1038 	if (config->compat)
1039 		nvmem_sysfs_remove_compat(nvmem, config);
1040 err_put_device:
1041 	put_device(&nvmem->dev);
1042 
1043 	return ERR_PTR(rval);
1044 }
1045 EXPORT_SYMBOL_GPL(nvmem_register);
1046 
1047 static void nvmem_device_release(struct kref *kref)
1048 {
1049 	struct nvmem_device *nvmem;
1050 
1051 	nvmem = container_of(kref, struct nvmem_device, refcnt);
1052 
1053 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
1054 
1055 	if (nvmem->flags & FLAG_COMPAT)
1056 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
1057 
1058 	nvmem_device_remove_all_cells(nvmem);
1059 	nvmem_destroy_layout(nvmem);
1060 	device_unregister(&nvmem->dev);
1061 }
1062 
1063 /**
1064  * nvmem_unregister() - Unregister previously registered nvmem device
1065  *
1066  * @nvmem: Pointer to previously registered nvmem device.
1067  */
1068 void nvmem_unregister(struct nvmem_device *nvmem)
1069 {
1070 	if (nvmem)
1071 		kref_put(&nvmem->refcnt, nvmem_device_release);
1072 }
1073 EXPORT_SYMBOL_GPL(nvmem_unregister);
1074 
1075 static void devm_nvmem_unregister(void *nvmem)
1076 {
1077 	nvmem_unregister(nvmem);
1078 }
1079 
1080 /**
1081  * devm_nvmem_register() - Register a managed nvmem device for given
1082  * nvmem_config.
1083  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1084  *
1085  * @dev: Device that uses the nvmem device.
1086  * @config: nvmem device configuration with which nvmem device is created.
1087  *
1088  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1089  * on success.
1090  */
1091 struct nvmem_device *devm_nvmem_register(struct device *dev,
1092 					 const struct nvmem_config *config)
1093 {
1094 	struct nvmem_device *nvmem;
1095 	int ret;
1096 
1097 	nvmem = nvmem_register(config);
1098 	if (IS_ERR(nvmem))
1099 		return nvmem;
1100 
1101 	ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
1102 	if (ret)
1103 		return ERR_PTR(ret);
1104 
1105 	return nvmem;
1106 }
1107 EXPORT_SYMBOL_GPL(devm_nvmem_register);
1108 
1109 static struct nvmem_device *__nvmem_device_get(void *data,
1110 			int (*match)(struct device *dev, const void *data))
1111 {
1112 	struct nvmem_device *nvmem = NULL;
1113 	struct device *dev;
1114 
1115 	mutex_lock(&nvmem_mutex);
1116 	dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
1117 	if (dev)
1118 		nvmem = to_nvmem_device(dev);
1119 	mutex_unlock(&nvmem_mutex);
1120 	if (!nvmem)
1121 		return ERR_PTR(-EPROBE_DEFER);
1122 
1123 	if (!try_module_get(nvmem->owner)) {
1124 		dev_err(&nvmem->dev,
1125 			"could not increase module refcount for cell %s\n",
1126 			nvmem_dev_name(nvmem));
1127 
1128 		put_device(&nvmem->dev);
1129 		return ERR_PTR(-EINVAL);
1130 	}
1131 
1132 	kref_get(&nvmem->refcnt);
1133 
1134 	return nvmem;
1135 }
1136 
1137 static void __nvmem_device_put(struct nvmem_device *nvmem)
1138 {
1139 	put_device(&nvmem->dev);
1140 	module_put(nvmem->owner);
1141 	kref_put(&nvmem->refcnt, nvmem_device_release);
1142 }
1143 
1144 #if IS_ENABLED(CONFIG_OF)
1145 /**
1146  * of_nvmem_device_get() - Get nvmem device from a given id
1147  *
1148  * @np: Device tree node that uses the nvmem device.
1149  * @id: nvmem name from nvmem-names property.
1150  *
1151  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1152  * on success.
1153  */
1154 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1155 {
1156 
1157 	struct device_node *nvmem_np;
1158 	struct nvmem_device *nvmem;
1159 	int index = 0;
1160 
1161 	if (id)
1162 		index = of_property_match_string(np, "nvmem-names", id);
1163 
1164 	nvmem_np = of_parse_phandle(np, "nvmem", index);
1165 	if (!nvmem_np)
1166 		return ERR_PTR(-ENOENT);
1167 
1168 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1169 	of_node_put(nvmem_np);
1170 	return nvmem;
1171 }
1172 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1173 #endif
1174 
1175 /**
1176  * nvmem_device_get() - Get nvmem device from a given id
1177  *
1178  * @dev: Device that uses the nvmem device.
1179  * @dev_name: name of the requested nvmem device.
1180  *
1181  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1182  * on success.
1183  */
1184 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1185 {
1186 	if (dev->of_node) { /* try dt first */
1187 		struct nvmem_device *nvmem;
1188 
1189 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1190 
1191 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1192 			return nvmem;
1193 
1194 	}
1195 
1196 	return __nvmem_device_get((void *)dev_name, device_match_name);
1197 }
1198 EXPORT_SYMBOL_GPL(nvmem_device_get);
1199 
1200 /**
1201  * nvmem_device_find() - Find nvmem device with matching function
1202  *
1203  * @data: Data to pass to match function
1204  * @match: Callback function to check device
1205  *
1206  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1207  * on success.
1208  */
1209 struct nvmem_device *nvmem_device_find(void *data,
1210 			int (*match)(struct device *dev, const void *data))
1211 {
1212 	return __nvmem_device_get(data, match);
1213 }
1214 EXPORT_SYMBOL_GPL(nvmem_device_find);
1215 
1216 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1217 {
1218 	struct nvmem_device **nvmem = res;
1219 
1220 	if (WARN_ON(!nvmem || !*nvmem))
1221 		return 0;
1222 
1223 	return *nvmem == data;
1224 }
1225 
1226 static void devm_nvmem_device_release(struct device *dev, void *res)
1227 {
1228 	nvmem_device_put(*(struct nvmem_device **)res);
1229 }
1230 
1231 /**
1232  * devm_nvmem_device_put() - put alredy got nvmem device
1233  *
1234  * @dev: Device that uses the nvmem device.
1235  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1236  * that needs to be released.
1237  */
1238 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1239 {
1240 	int ret;
1241 
1242 	ret = devres_release(dev, devm_nvmem_device_release,
1243 			     devm_nvmem_device_match, nvmem);
1244 
1245 	WARN_ON(ret);
1246 }
1247 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1248 
1249 /**
1250  * nvmem_device_put() - put alredy got nvmem device
1251  *
1252  * @nvmem: pointer to nvmem device that needs to be released.
1253  */
1254 void nvmem_device_put(struct nvmem_device *nvmem)
1255 {
1256 	__nvmem_device_put(nvmem);
1257 }
1258 EXPORT_SYMBOL_GPL(nvmem_device_put);
1259 
1260 /**
1261  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1262  *
1263  * @dev: Device that requests the nvmem device.
1264  * @id: name id for the requested nvmem device.
1265  *
1266  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1267  * on success.  The nvmem_cell will be freed by the automatically once the
1268  * device is freed.
1269  */
1270 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1271 {
1272 	struct nvmem_device **ptr, *nvmem;
1273 
1274 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1275 	if (!ptr)
1276 		return ERR_PTR(-ENOMEM);
1277 
1278 	nvmem = nvmem_device_get(dev, id);
1279 	if (!IS_ERR(nvmem)) {
1280 		*ptr = nvmem;
1281 		devres_add(dev, ptr);
1282 	} else {
1283 		devres_free(ptr);
1284 	}
1285 
1286 	return nvmem;
1287 }
1288 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1289 
1290 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1291 					    const char *id, int index)
1292 {
1293 	struct nvmem_cell *cell;
1294 	const char *name = NULL;
1295 
1296 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1297 	if (!cell)
1298 		return ERR_PTR(-ENOMEM);
1299 
1300 	if (id) {
1301 		name = kstrdup_const(id, GFP_KERNEL);
1302 		if (!name) {
1303 			kfree(cell);
1304 			return ERR_PTR(-ENOMEM);
1305 		}
1306 	}
1307 
1308 	cell->id = name;
1309 	cell->entry = entry;
1310 	cell->index = index;
1311 
1312 	return cell;
1313 }
1314 
1315 static struct nvmem_cell *
1316 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1317 {
1318 	struct nvmem_cell_entry *cell_entry;
1319 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1320 	struct nvmem_cell_lookup *lookup;
1321 	struct nvmem_device *nvmem;
1322 	const char *dev_id;
1323 
1324 	if (!dev)
1325 		return ERR_PTR(-EINVAL);
1326 
1327 	dev_id = dev_name(dev);
1328 
1329 	mutex_lock(&nvmem_lookup_mutex);
1330 
1331 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1332 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1333 		    (strcmp(lookup->con_id, con_id) == 0)) {
1334 			/* This is the right entry. */
1335 			nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1336 						   device_match_name);
1337 			if (IS_ERR(nvmem)) {
1338 				/* Provider may not be registered yet. */
1339 				cell = ERR_CAST(nvmem);
1340 				break;
1341 			}
1342 
1343 			cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1344 								   lookup->cell_name);
1345 			if (!cell_entry) {
1346 				__nvmem_device_put(nvmem);
1347 				cell = ERR_PTR(-ENOENT);
1348 			} else {
1349 				cell = nvmem_create_cell(cell_entry, con_id, 0);
1350 				if (IS_ERR(cell))
1351 					__nvmem_device_put(nvmem);
1352 			}
1353 			break;
1354 		}
1355 	}
1356 
1357 	mutex_unlock(&nvmem_lookup_mutex);
1358 	return cell;
1359 }
1360 
1361 static void nvmem_layout_module_put(struct nvmem_device *nvmem)
1362 {
1363 	if (nvmem->layout && nvmem->layout->dev.driver)
1364 		module_put(nvmem->layout->dev.driver->owner);
1365 }
1366 
1367 #if IS_ENABLED(CONFIG_OF)
1368 static struct nvmem_cell_entry *
1369 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1370 {
1371 	struct nvmem_cell_entry *iter, *cell = NULL;
1372 
1373 	mutex_lock(&nvmem_mutex);
1374 	list_for_each_entry(iter, &nvmem->cells, node) {
1375 		if (np == iter->np) {
1376 			cell = iter;
1377 			break;
1378 		}
1379 	}
1380 	mutex_unlock(&nvmem_mutex);
1381 
1382 	return cell;
1383 }
1384 
1385 static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem)
1386 {
1387 	if (!nvmem->layout)
1388 		return 0;
1389 
1390 	if (!nvmem->layout->dev.driver ||
1391 	    !try_module_get(nvmem->layout->dev.driver->owner))
1392 		return -EPROBE_DEFER;
1393 
1394 	return 0;
1395 }
1396 
1397 /**
1398  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1399  *
1400  * @np: Device tree node that uses the nvmem cell.
1401  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1402  *      for the cell at index 0 (the lone cell with no accompanying
1403  *      nvmem-cell-names property).
1404  *
1405  * Return: Will be an ERR_PTR() on error or a valid pointer
1406  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1407  * nvmem_cell_put().
1408  */
1409 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1410 {
1411 	struct device_node *cell_np, *nvmem_np;
1412 	struct nvmem_device *nvmem;
1413 	struct nvmem_cell_entry *cell_entry;
1414 	struct nvmem_cell *cell;
1415 	struct of_phandle_args cell_spec;
1416 	int index = 0;
1417 	int cell_index = 0;
1418 	int ret;
1419 
1420 	/* if cell name exists, find index to the name */
1421 	if (id)
1422 		index = of_property_match_string(np, "nvmem-cell-names", id);
1423 
1424 	ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
1425 						  "#nvmem-cell-cells",
1426 						  index, &cell_spec);
1427 	if (ret)
1428 		return ERR_PTR(-ENOENT);
1429 
1430 	if (cell_spec.args_count > 1)
1431 		return ERR_PTR(-EINVAL);
1432 
1433 	cell_np = cell_spec.np;
1434 	if (cell_spec.args_count)
1435 		cell_index = cell_spec.args[0];
1436 
1437 	nvmem_np = of_get_parent(cell_np);
1438 	if (!nvmem_np) {
1439 		of_node_put(cell_np);
1440 		return ERR_PTR(-EINVAL);
1441 	}
1442 
1443 	/* nvmem layouts produce cells within the nvmem-layout container */
1444 	if (of_node_name_eq(nvmem_np, "nvmem-layout")) {
1445 		nvmem_np = of_get_next_parent(nvmem_np);
1446 		if (!nvmem_np) {
1447 			of_node_put(cell_np);
1448 			return ERR_PTR(-EINVAL);
1449 		}
1450 	}
1451 
1452 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1453 	of_node_put(nvmem_np);
1454 	if (IS_ERR(nvmem)) {
1455 		of_node_put(cell_np);
1456 		return ERR_CAST(nvmem);
1457 	}
1458 
1459 	ret = nvmem_layout_module_get_optional(nvmem);
1460 	if (ret) {
1461 		of_node_put(cell_np);
1462 		__nvmem_device_put(nvmem);
1463 		return ERR_PTR(ret);
1464 	}
1465 
1466 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1467 	of_node_put(cell_np);
1468 	if (!cell_entry) {
1469 		__nvmem_device_put(nvmem);
1470 		nvmem_layout_module_put(nvmem);
1471 		if (nvmem->layout)
1472 			return ERR_PTR(-EPROBE_DEFER);
1473 		else
1474 			return ERR_PTR(-ENOENT);
1475 	}
1476 
1477 	cell = nvmem_create_cell(cell_entry, id, cell_index);
1478 	if (IS_ERR(cell)) {
1479 		__nvmem_device_put(nvmem);
1480 		nvmem_layout_module_put(nvmem);
1481 	}
1482 
1483 	return cell;
1484 }
1485 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1486 #endif
1487 
1488 /**
1489  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1490  *
1491  * @dev: Device that requests the nvmem cell.
1492  * @id: nvmem cell name to get (this corresponds with the name from the
1493  *      nvmem-cell-names property for DT systems and with the con_id from
1494  *      the lookup entry for non-DT systems).
1495  *
1496  * Return: Will be an ERR_PTR() on error or a valid pointer
1497  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1498  * nvmem_cell_put().
1499  */
1500 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1501 {
1502 	struct nvmem_cell *cell;
1503 
1504 	if (dev->of_node) { /* try dt first */
1505 		cell = of_nvmem_cell_get(dev->of_node, id);
1506 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1507 			return cell;
1508 	}
1509 
1510 	/* NULL cell id only allowed for device tree; invalid otherwise */
1511 	if (!id)
1512 		return ERR_PTR(-EINVAL);
1513 
1514 	return nvmem_cell_get_from_lookup(dev, id);
1515 }
1516 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1517 
1518 static void devm_nvmem_cell_release(struct device *dev, void *res)
1519 {
1520 	nvmem_cell_put(*(struct nvmem_cell **)res);
1521 }
1522 
1523 /**
1524  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1525  *
1526  * @dev: Device that requests the nvmem cell.
1527  * @id: nvmem cell name id to get.
1528  *
1529  * Return: Will be an ERR_PTR() on error or a valid pointer
1530  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1531  * automatically once the device is freed.
1532  */
1533 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1534 {
1535 	struct nvmem_cell **ptr, *cell;
1536 
1537 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1538 	if (!ptr)
1539 		return ERR_PTR(-ENOMEM);
1540 
1541 	cell = nvmem_cell_get(dev, id);
1542 	if (!IS_ERR(cell)) {
1543 		*ptr = cell;
1544 		devres_add(dev, ptr);
1545 	} else {
1546 		devres_free(ptr);
1547 	}
1548 
1549 	return cell;
1550 }
1551 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1552 
1553 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1554 {
1555 	struct nvmem_cell **c = res;
1556 
1557 	if (WARN_ON(!c || !*c))
1558 		return 0;
1559 
1560 	return *c == data;
1561 }
1562 
1563 /**
1564  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1565  * from devm_nvmem_cell_get.
1566  *
1567  * @dev: Device that requests the nvmem cell.
1568  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1569  */
1570 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1571 {
1572 	int ret;
1573 
1574 	ret = devres_release(dev, devm_nvmem_cell_release,
1575 				devm_nvmem_cell_match, cell);
1576 
1577 	WARN_ON(ret);
1578 }
1579 EXPORT_SYMBOL(devm_nvmem_cell_put);
1580 
1581 /**
1582  * nvmem_cell_put() - Release previously allocated nvmem cell.
1583  *
1584  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1585  */
1586 void nvmem_cell_put(struct nvmem_cell *cell)
1587 {
1588 	struct nvmem_device *nvmem = cell->entry->nvmem;
1589 
1590 	if (cell->id)
1591 		kfree_const(cell->id);
1592 
1593 	kfree(cell);
1594 	__nvmem_device_put(nvmem);
1595 	nvmem_layout_module_put(nvmem);
1596 }
1597 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1598 
1599 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1600 {
1601 	u8 *p, *b;
1602 	int i, extra, bit_offset = cell->bit_offset;
1603 
1604 	p = b = buf;
1605 	if (bit_offset) {
1606 		/* First shift */
1607 		*b++ >>= bit_offset;
1608 
1609 		/* setup rest of the bytes if any */
1610 		for (i = 1; i < cell->bytes; i++) {
1611 			/* Get bits from next byte and shift them towards msb */
1612 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1613 
1614 			p = b;
1615 			*b++ >>= bit_offset;
1616 		}
1617 	} else {
1618 		/* point to the msb */
1619 		p += cell->bytes - 1;
1620 	}
1621 
1622 	/* result fits in less bytes */
1623 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1624 	while (--extra >= 0)
1625 		*p-- = 0;
1626 
1627 	/* clear msb bits if any leftover in the last byte */
1628 	if (cell->nbits % BITS_PER_BYTE)
1629 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1630 }
1631 
1632 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1633 			     struct nvmem_cell_entry *cell,
1634 			     void *buf, size_t *len, const char *id, int index)
1635 {
1636 	int rc;
1637 
1638 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->raw_len);
1639 
1640 	if (rc)
1641 		return rc;
1642 
1643 	/* shift bits in-place */
1644 	if (cell->bit_offset || cell->nbits)
1645 		nvmem_shift_read_buffer_in_place(cell, buf);
1646 
1647 	if (cell->read_post_process) {
1648 		rc = cell->read_post_process(cell->priv, id, index,
1649 					     cell->offset, buf, cell->raw_len);
1650 		if (rc)
1651 			return rc;
1652 	}
1653 
1654 	if (len)
1655 		*len = cell->bytes;
1656 
1657 	return 0;
1658 }
1659 
1660 /**
1661  * nvmem_cell_read() - Read a given nvmem cell
1662  *
1663  * @cell: nvmem cell to be read.
1664  * @len: pointer to length of cell which will be populated on successful read;
1665  *	 can be NULL.
1666  *
1667  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1668  * buffer should be freed by the consumer with a kfree().
1669  */
1670 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1671 {
1672 	struct nvmem_cell_entry *entry = cell->entry;
1673 	struct nvmem_device *nvmem = entry->nvmem;
1674 	u8 *buf;
1675 	int rc;
1676 
1677 	if (!nvmem)
1678 		return ERR_PTR(-EINVAL);
1679 
1680 	buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
1681 	if (!buf)
1682 		return ERR_PTR(-ENOMEM);
1683 
1684 	rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
1685 	if (rc) {
1686 		kfree(buf);
1687 		return ERR_PTR(rc);
1688 	}
1689 
1690 	return buf;
1691 }
1692 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1693 
1694 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1695 					     u8 *_buf, int len)
1696 {
1697 	struct nvmem_device *nvmem = cell->nvmem;
1698 	int i, rc, nbits, bit_offset = cell->bit_offset;
1699 	u8 v, *p, *buf, *b, pbyte, pbits;
1700 
1701 	nbits = cell->nbits;
1702 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1703 	if (!buf)
1704 		return ERR_PTR(-ENOMEM);
1705 
1706 	memcpy(buf, _buf, len);
1707 	p = b = buf;
1708 
1709 	if (bit_offset) {
1710 		pbyte = *b;
1711 		*b <<= bit_offset;
1712 
1713 		/* setup the first byte with lsb bits from nvmem */
1714 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1715 		if (rc)
1716 			goto err;
1717 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1718 
1719 		/* setup rest of the byte if any */
1720 		for (i = 1; i < cell->bytes; i++) {
1721 			/* Get last byte bits and shift them towards lsb */
1722 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1723 			pbyte = *b;
1724 			p = b;
1725 			*b <<= bit_offset;
1726 			*b++ |= pbits;
1727 		}
1728 	}
1729 
1730 	/* if it's not end on byte boundary */
1731 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1732 		/* setup the last byte with msb bits from nvmem */
1733 		rc = nvmem_reg_read(nvmem,
1734 				    cell->offset + cell->bytes - 1, &v, 1);
1735 		if (rc)
1736 			goto err;
1737 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1738 
1739 	}
1740 
1741 	return buf;
1742 err:
1743 	kfree(buf);
1744 	return ERR_PTR(rc);
1745 }
1746 
1747 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1748 {
1749 	struct nvmem_device *nvmem = cell->nvmem;
1750 	int rc;
1751 
1752 	if (!nvmem || nvmem->read_only ||
1753 	    (cell->bit_offset == 0 && len != cell->bytes))
1754 		return -EINVAL;
1755 
1756 	/*
1757 	 * Any cells which have a read_post_process hook are read-only because
1758 	 * we cannot reverse the operation and it might affect other cells,
1759 	 * too.
1760 	 */
1761 	if (cell->read_post_process)
1762 		return -EINVAL;
1763 
1764 	if (cell->bit_offset || cell->nbits) {
1765 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1766 		if (IS_ERR(buf))
1767 			return PTR_ERR(buf);
1768 	}
1769 
1770 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1771 
1772 	/* free the tmp buffer */
1773 	if (cell->bit_offset || cell->nbits)
1774 		kfree(buf);
1775 
1776 	if (rc)
1777 		return rc;
1778 
1779 	return len;
1780 }
1781 
1782 /**
1783  * nvmem_cell_write() - Write to a given nvmem cell
1784  *
1785  * @cell: nvmem cell to be written.
1786  * @buf: Buffer to be written.
1787  * @len: length of buffer to be written to nvmem cell.
1788  *
1789  * Return: length of bytes written or negative on failure.
1790  */
1791 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1792 {
1793 	return __nvmem_cell_entry_write(cell->entry, buf, len);
1794 }
1795 
1796 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1797 
1798 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1799 				  void *val, size_t count)
1800 {
1801 	struct nvmem_cell *cell;
1802 	void *buf;
1803 	size_t len;
1804 
1805 	cell = nvmem_cell_get(dev, cell_id);
1806 	if (IS_ERR(cell))
1807 		return PTR_ERR(cell);
1808 
1809 	buf = nvmem_cell_read(cell, &len);
1810 	if (IS_ERR(buf)) {
1811 		nvmem_cell_put(cell);
1812 		return PTR_ERR(buf);
1813 	}
1814 	if (len != count) {
1815 		kfree(buf);
1816 		nvmem_cell_put(cell);
1817 		return -EINVAL;
1818 	}
1819 	memcpy(val, buf, count);
1820 	kfree(buf);
1821 	nvmem_cell_put(cell);
1822 
1823 	return 0;
1824 }
1825 
1826 /**
1827  * nvmem_cell_read_u8() - Read a cell value as a u8
1828  *
1829  * @dev: Device that requests the nvmem cell.
1830  * @cell_id: Name of nvmem cell to read.
1831  * @val: pointer to output value.
1832  *
1833  * Return: 0 on success or negative errno.
1834  */
1835 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1836 {
1837 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1838 }
1839 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1840 
1841 /**
1842  * nvmem_cell_read_u16() - Read a cell value as a u16
1843  *
1844  * @dev: Device that requests the nvmem cell.
1845  * @cell_id: Name of nvmem cell to read.
1846  * @val: pointer to output value.
1847  *
1848  * Return: 0 on success or negative errno.
1849  */
1850 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1851 {
1852 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1853 }
1854 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1855 
1856 /**
1857  * nvmem_cell_read_u32() - Read a cell value as a u32
1858  *
1859  * @dev: Device that requests the nvmem cell.
1860  * @cell_id: Name of nvmem cell to read.
1861  * @val: pointer to output value.
1862  *
1863  * Return: 0 on success or negative errno.
1864  */
1865 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1866 {
1867 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1868 }
1869 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1870 
1871 /**
1872  * nvmem_cell_read_u64() - Read a cell value as a u64
1873  *
1874  * @dev: Device that requests the nvmem cell.
1875  * @cell_id: Name of nvmem cell to read.
1876  * @val: pointer to output value.
1877  *
1878  * Return: 0 on success or negative errno.
1879  */
1880 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1881 {
1882 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1883 }
1884 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1885 
1886 static const void *nvmem_cell_read_variable_common(struct device *dev,
1887 						   const char *cell_id,
1888 						   size_t max_len, size_t *len)
1889 {
1890 	struct nvmem_cell *cell;
1891 	int nbits;
1892 	void *buf;
1893 
1894 	cell = nvmem_cell_get(dev, cell_id);
1895 	if (IS_ERR(cell))
1896 		return cell;
1897 
1898 	nbits = cell->entry->nbits;
1899 	buf = nvmem_cell_read(cell, len);
1900 	nvmem_cell_put(cell);
1901 	if (IS_ERR(buf))
1902 		return buf;
1903 
1904 	/*
1905 	 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1906 	 * the length of the real data. Throw away the extra junk.
1907 	 */
1908 	if (nbits)
1909 		*len = DIV_ROUND_UP(nbits, 8);
1910 
1911 	if (*len > max_len) {
1912 		kfree(buf);
1913 		return ERR_PTR(-ERANGE);
1914 	}
1915 
1916 	return buf;
1917 }
1918 
1919 /**
1920  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1921  *
1922  * @dev: Device that requests the nvmem cell.
1923  * @cell_id: Name of nvmem cell to read.
1924  * @val: pointer to output value.
1925  *
1926  * Return: 0 on success or negative errno.
1927  */
1928 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1929 				    u32 *val)
1930 {
1931 	size_t len;
1932 	const u8 *buf;
1933 	int i;
1934 
1935 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1936 	if (IS_ERR(buf))
1937 		return PTR_ERR(buf);
1938 
1939 	/* Copy w/ implicit endian conversion */
1940 	*val = 0;
1941 	for (i = 0; i < len; i++)
1942 		*val |= buf[i] << (8 * i);
1943 
1944 	kfree(buf);
1945 
1946 	return 0;
1947 }
1948 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1949 
1950 /**
1951  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1952  *
1953  * @dev: Device that requests the nvmem cell.
1954  * @cell_id: Name of nvmem cell to read.
1955  * @val: pointer to output value.
1956  *
1957  * Return: 0 on success or negative errno.
1958  */
1959 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1960 				    u64 *val)
1961 {
1962 	size_t len;
1963 	const u8 *buf;
1964 	int i;
1965 
1966 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1967 	if (IS_ERR(buf))
1968 		return PTR_ERR(buf);
1969 
1970 	/* Copy w/ implicit endian conversion */
1971 	*val = 0;
1972 	for (i = 0; i < len; i++)
1973 		*val |= (uint64_t)buf[i] << (8 * i);
1974 
1975 	kfree(buf);
1976 
1977 	return 0;
1978 }
1979 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1980 
1981 /**
1982  * nvmem_device_cell_read() - Read a given nvmem device and cell
1983  *
1984  * @nvmem: nvmem device to read from.
1985  * @info: nvmem cell info to be read.
1986  * @buf: buffer pointer which will be populated on successful read.
1987  *
1988  * Return: length of successful bytes read on success and negative
1989  * error code on error.
1990  */
1991 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1992 			   struct nvmem_cell_info *info, void *buf)
1993 {
1994 	struct nvmem_cell_entry cell;
1995 	int rc;
1996 	ssize_t len;
1997 
1998 	if (!nvmem)
1999 		return -EINVAL;
2000 
2001 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
2002 	if (rc)
2003 		return rc;
2004 
2005 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
2006 	if (rc)
2007 		return rc;
2008 
2009 	return len;
2010 }
2011 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
2012 
2013 /**
2014  * nvmem_device_cell_write() - Write cell to a given nvmem device
2015  *
2016  * @nvmem: nvmem device to be written to.
2017  * @info: nvmem cell info to be written.
2018  * @buf: buffer to be written to cell.
2019  *
2020  * Return: length of bytes written or negative error code on failure.
2021  */
2022 int nvmem_device_cell_write(struct nvmem_device *nvmem,
2023 			    struct nvmem_cell_info *info, void *buf)
2024 {
2025 	struct nvmem_cell_entry cell;
2026 	int rc;
2027 
2028 	if (!nvmem)
2029 		return -EINVAL;
2030 
2031 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
2032 	if (rc)
2033 		return rc;
2034 
2035 	return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
2036 }
2037 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
2038 
2039 /**
2040  * nvmem_device_read() - Read from a given nvmem device
2041  *
2042  * @nvmem: nvmem device to read from.
2043  * @offset: offset in nvmem device.
2044  * @bytes: number of bytes to read.
2045  * @buf: buffer pointer which will be populated on successful read.
2046  *
2047  * Return: length of successful bytes read on success and negative
2048  * error code on error.
2049  */
2050 int nvmem_device_read(struct nvmem_device *nvmem,
2051 		      unsigned int offset,
2052 		      size_t bytes, void *buf)
2053 {
2054 	int rc;
2055 
2056 	if (!nvmem)
2057 		return -EINVAL;
2058 
2059 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
2060 
2061 	if (rc)
2062 		return rc;
2063 
2064 	return bytes;
2065 }
2066 EXPORT_SYMBOL_GPL(nvmem_device_read);
2067 
2068 /**
2069  * nvmem_device_write() - Write cell to a given nvmem device
2070  *
2071  * @nvmem: nvmem device to be written to.
2072  * @offset: offset in nvmem device.
2073  * @bytes: number of bytes to write.
2074  * @buf: buffer to be written.
2075  *
2076  * Return: length of bytes written or negative error code on failure.
2077  */
2078 int nvmem_device_write(struct nvmem_device *nvmem,
2079 		       unsigned int offset,
2080 		       size_t bytes, void *buf)
2081 {
2082 	int rc;
2083 
2084 	if (!nvmem)
2085 		return -EINVAL;
2086 
2087 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
2088 
2089 	if (rc)
2090 		return rc;
2091 
2092 
2093 	return bytes;
2094 }
2095 EXPORT_SYMBOL_GPL(nvmem_device_write);
2096 
2097 /**
2098  * nvmem_add_cell_table() - register a table of cell info entries
2099  *
2100  * @table: table of cell info entries
2101  */
2102 void nvmem_add_cell_table(struct nvmem_cell_table *table)
2103 {
2104 	mutex_lock(&nvmem_cell_mutex);
2105 	list_add_tail(&table->node, &nvmem_cell_tables);
2106 	mutex_unlock(&nvmem_cell_mutex);
2107 }
2108 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
2109 
2110 /**
2111  * nvmem_del_cell_table() - remove a previously registered cell info table
2112  *
2113  * @table: table of cell info entries
2114  */
2115 void nvmem_del_cell_table(struct nvmem_cell_table *table)
2116 {
2117 	mutex_lock(&nvmem_cell_mutex);
2118 	list_del(&table->node);
2119 	mutex_unlock(&nvmem_cell_mutex);
2120 }
2121 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
2122 
2123 /**
2124  * nvmem_add_cell_lookups() - register a list of cell lookup entries
2125  *
2126  * @entries: array of cell lookup entries
2127  * @nentries: number of cell lookup entries in the array
2128  */
2129 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2130 {
2131 	int i;
2132 
2133 	mutex_lock(&nvmem_lookup_mutex);
2134 	for (i = 0; i < nentries; i++)
2135 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
2136 	mutex_unlock(&nvmem_lookup_mutex);
2137 }
2138 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
2139 
2140 /**
2141  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2142  *                            entries
2143  *
2144  * @entries: array of cell lookup entries
2145  * @nentries: number of cell lookup entries in the array
2146  */
2147 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2148 {
2149 	int i;
2150 
2151 	mutex_lock(&nvmem_lookup_mutex);
2152 	for (i = 0; i < nentries; i++)
2153 		list_del(&entries[i].node);
2154 	mutex_unlock(&nvmem_lookup_mutex);
2155 }
2156 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
2157 
2158 /**
2159  * nvmem_dev_name() - Get the name of a given nvmem device.
2160  *
2161  * @nvmem: nvmem device.
2162  *
2163  * Return: name of the nvmem device.
2164  */
2165 const char *nvmem_dev_name(struct nvmem_device *nvmem)
2166 {
2167 	return dev_name(&nvmem->dev);
2168 }
2169 EXPORT_SYMBOL_GPL(nvmem_dev_name);
2170 
2171 /**
2172  * nvmem_dev_size() - Get the size of a given nvmem device.
2173  *
2174  * @nvmem: nvmem device.
2175  *
2176  * Return: size of the nvmem device.
2177  */
2178 size_t nvmem_dev_size(struct nvmem_device *nvmem)
2179 {
2180 	return nvmem->size;
2181 }
2182 EXPORT_SYMBOL_GPL(nvmem_dev_size);
2183 
2184 static int __init nvmem_init(void)
2185 {
2186 	int ret;
2187 
2188 	ret = bus_register(&nvmem_bus_type);
2189 	if (ret)
2190 		return ret;
2191 
2192 	ret = nvmem_layout_bus_register();
2193 	if (ret)
2194 		bus_unregister(&nvmem_bus_type);
2195 
2196 	return ret;
2197 }
2198 
2199 static void __exit nvmem_exit(void)
2200 {
2201 	nvmem_layout_bus_unregister();
2202 	bus_unregister(&nvmem_bus_type);
2203 }
2204 
2205 subsys_initcall(nvmem_init);
2206 module_exit(nvmem_exit);
2207 
2208 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2209 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2210 MODULE_DESCRIPTION("nvmem Driver Core");
2211