xref: /linux/drivers/base/regmap/regmap.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * Register map access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/rbtree.h>
19 
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/regmap.h>
22 
23 #include "internal.h"
24 
25 /*
26  * Sometimes for failures during very early init the trace
27  * infrastructure isn't available early enough to be used.  For this
28  * sort of problem defining LOG_DEVICE will add printks for basic
29  * register I/O on a specific device.
30  */
31 #undef LOG_DEVICE
32 
33 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 			       unsigned int mask, unsigned int val,
35 			       bool *change);
36 
37 bool regmap_reg_in_ranges(unsigned int reg,
38 			  const struct regmap_range *ranges,
39 			  unsigned int nranges)
40 {
41 	const struct regmap_range *r;
42 	int i;
43 
44 	for (i = 0, r = ranges; i < nranges; i++, r++)
45 		if (regmap_reg_in_range(reg, r))
46 			return true;
47 	return false;
48 }
49 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
50 
51 static bool _regmap_check_range_table(struct regmap *map,
52 				      unsigned int reg,
53 				      const struct regmap_access_table *table)
54 {
55 	/* Check "no ranges" first */
56 	if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
57 		return false;
58 
59 	/* In case zero "yes ranges" are supplied, any reg is OK */
60 	if (!table->n_yes_ranges)
61 		return true;
62 
63 	return regmap_reg_in_ranges(reg, table->yes_ranges,
64 				    table->n_yes_ranges);
65 }
66 
67 bool regmap_writeable(struct regmap *map, unsigned int reg)
68 {
69 	if (map->max_register && reg > map->max_register)
70 		return false;
71 
72 	if (map->writeable_reg)
73 		return map->writeable_reg(map->dev, reg);
74 
75 	if (map->wr_table)
76 		return _regmap_check_range_table(map, reg, map->wr_table);
77 
78 	return true;
79 }
80 
81 bool regmap_readable(struct regmap *map, unsigned int reg)
82 {
83 	if (map->max_register && reg > map->max_register)
84 		return false;
85 
86 	if (map->format.format_write)
87 		return false;
88 
89 	if (map->readable_reg)
90 		return map->readable_reg(map->dev, reg);
91 
92 	if (map->rd_table)
93 		return _regmap_check_range_table(map, reg, map->rd_table);
94 
95 	return true;
96 }
97 
98 bool regmap_volatile(struct regmap *map, unsigned int reg)
99 {
100 	if (!regmap_readable(map, reg))
101 		return false;
102 
103 	if (map->volatile_reg)
104 		return map->volatile_reg(map->dev, reg);
105 
106 	if (map->volatile_table)
107 		return _regmap_check_range_table(map, reg, map->volatile_table);
108 
109 	return true;
110 }
111 
112 bool regmap_precious(struct regmap *map, unsigned int reg)
113 {
114 	if (!regmap_readable(map, reg))
115 		return false;
116 
117 	if (map->precious_reg)
118 		return map->precious_reg(map->dev, reg);
119 
120 	if (map->precious_table)
121 		return _regmap_check_range_table(map, reg, map->precious_table);
122 
123 	return false;
124 }
125 
126 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
127 	size_t num)
128 {
129 	unsigned int i;
130 
131 	for (i = 0; i < num; i++)
132 		if (!regmap_volatile(map, reg + i))
133 			return false;
134 
135 	return true;
136 }
137 
138 static void regmap_format_2_6_write(struct regmap *map,
139 				     unsigned int reg, unsigned int val)
140 {
141 	u8 *out = map->work_buf;
142 
143 	*out = (reg << 6) | val;
144 }
145 
146 static void regmap_format_4_12_write(struct regmap *map,
147 				     unsigned int reg, unsigned int val)
148 {
149 	__be16 *out = map->work_buf;
150 	*out = cpu_to_be16((reg << 12) | val);
151 }
152 
153 static void regmap_format_7_9_write(struct regmap *map,
154 				    unsigned int reg, unsigned int val)
155 {
156 	__be16 *out = map->work_buf;
157 	*out = cpu_to_be16((reg << 9) | val);
158 }
159 
160 static void regmap_format_10_14_write(struct regmap *map,
161 				    unsigned int reg, unsigned int val)
162 {
163 	u8 *out = map->work_buf;
164 
165 	out[2] = val;
166 	out[1] = (val >> 8) | (reg << 6);
167 	out[0] = reg >> 2;
168 }
169 
170 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
171 {
172 	u8 *b = buf;
173 
174 	b[0] = val << shift;
175 }
176 
177 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
178 {
179 	__be16 *b = buf;
180 
181 	b[0] = cpu_to_be16(val << shift);
182 }
183 
184 static void regmap_format_16_native(void *buf, unsigned int val,
185 				    unsigned int shift)
186 {
187 	*(u16 *)buf = val << shift;
188 }
189 
190 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
191 {
192 	u8 *b = buf;
193 
194 	val <<= shift;
195 
196 	b[0] = val >> 16;
197 	b[1] = val >> 8;
198 	b[2] = val;
199 }
200 
201 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
202 {
203 	__be32 *b = buf;
204 
205 	b[0] = cpu_to_be32(val << shift);
206 }
207 
208 static void regmap_format_32_native(void *buf, unsigned int val,
209 				    unsigned int shift)
210 {
211 	*(u32 *)buf = val << shift;
212 }
213 
214 static unsigned int regmap_parse_8(void *buf)
215 {
216 	u8 *b = buf;
217 
218 	return b[0];
219 }
220 
221 static unsigned int regmap_parse_16_be(void *buf)
222 {
223 	__be16 *b = buf;
224 
225 	b[0] = be16_to_cpu(b[0]);
226 
227 	return b[0];
228 }
229 
230 static unsigned int regmap_parse_16_native(void *buf)
231 {
232 	return *(u16 *)buf;
233 }
234 
235 static unsigned int regmap_parse_24(void *buf)
236 {
237 	u8 *b = buf;
238 	unsigned int ret = b[2];
239 	ret |= ((unsigned int)b[1]) << 8;
240 	ret |= ((unsigned int)b[0]) << 16;
241 
242 	return ret;
243 }
244 
245 static unsigned int regmap_parse_32_be(void *buf)
246 {
247 	__be32 *b = buf;
248 
249 	b[0] = be32_to_cpu(b[0]);
250 
251 	return b[0];
252 }
253 
254 static unsigned int regmap_parse_32_native(void *buf)
255 {
256 	return *(u32 *)buf;
257 }
258 
259 static void regmap_lock_mutex(void *__map)
260 {
261 	struct regmap *map = __map;
262 	mutex_lock(&map->mutex);
263 }
264 
265 static void regmap_unlock_mutex(void *__map)
266 {
267 	struct regmap *map = __map;
268 	mutex_unlock(&map->mutex);
269 }
270 
271 static void regmap_lock_spinlock(void *__map)
272 {
273 	struct regmap *map = __map;
274 	spin_lock(&map->spinlock);
275 }
276 
277 static void regmap_unlock_spinlock(void *__map)
278 {
279 	struct regmap *map = __map;
280 	spin_unlock(&map->spinlock);
281 }
282 
283 static void dev_get_regmap_release(struct device *dev, void *res)
284 {
285 	/*
286 	 * We don't actually have anything to do here; the goal here
287 	 * is not to manage the regmap but to provide a simple way to
288 	 * get the regmap back given a struct device.
289 	 */
290 }
291 
292 static bool _regmap_range_add(struct regmap *map,
293 			      struct regmap_range_node *data)
294 {
295 	struct rb_root *root = &map->range_tree;
296 	struct rb_node **new = &(root->rb_node), *parent = NULL;
297 
298 	while (*new) {
299 		struct regmap_range_node *this =
300 			container_of(*new, struct regmap_range_node, node);
301 
302 		parent = *new;
303 		if (data->range_max < this->range_min)
304 			new = &((*new)->rb_left);
305 		else if (data->range_min > this->range_max)
306 			new = &((*new)->rb_right);
307 		else
308 			return false;
309 	}
310 
311 	rb_link_node(&data->node, parent, new);
312 	rb_insert_color(&data->node, root);
313 
314 	return true;
315 }
316 
317 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
318 						      unsigned int reg)
319 {
320 	struct rb_node *node = map->range_tree.rb_node;
321 
322 	while (node) {
323 		struct regmap_range_node *this =
324 			container_of(node, struct regmap_range_node, node);
325 
326 		if (reg < this->range_min)
327 			node = node->rb_left;
328 		else if (reg > this->range_max)
329 			node = node->rb_right;
330 		else
331 			return this;
332 	}
333 
334 	return NULL;
335 }
336 
337 static void regmap_range_exit(struct regmap *map)
338 {
339 	struct rb_node *next;
340 	struct regmap_range_node *range_node;
341 
342 	next = rb_first(&map->range_tree);
343 	while (next) {
344 		range_node = rb_entry(next, struct regmap_range_node, node);
345 		next = rb_next(&range_node->node);
346 		rb_erase(&range_node->node, &map->range_tree);
347 		kfree(range_node);
348 	}
349 
350 	kfree(map->selector_work_buf);
351 }
352 
353 /**
354  * regmap_init(): Initialise register map
355  *
356  * @dev: Device that will be interacted with
357  * @bus: Bus-specific callbacks to use with device
358  * @bus_context: Data passed to bus-specific callbacks
359  * @config: Configuration for register map
360  *
361  * The return value will be an ERR_PTR() on error or a valid pointer to
362  * a struct regmap.  This function should generally not be called
363  * directly, it should be called by bus-specific init functions.
364  */
365 struct regmap *regmap_init(struct device *dev,
366 			   const struct regmap_bus *bus,
367 			   void *bus_context,
368 			   const struct regmap_config *config)
369 {
370 	struct regmap *map, **m;
371 	int ret = -EINVAL;
372 	enum regmap_endian reg_endian, val_endian;
373 	int i, j;
374 
375 	if (!bus || !config)
376 		goto err;
377 
378 	map = kzalloc(sizeof(*map), GFP_KERNEL);
379 	if (map == NULL) {
380 		ret = -ENOMEM;
381 		goto err;
382 	}
383 
384 	if (config->lock && config->unlock) {
385 		map->lock = config->lock;
386 		map->unlock = config->unlock;
387 		map->lock_arg = config->lock_arg;
388 	} else {
389 		if (bus->fast_io) {
390 			spin_lock_init(&map->spinlock);
391 			map->lock = regmap_lock_spinlock;
392 			map->unlock = regmap_unlock_spinlock;
393 		} else {
394 			mutex_init(&map->mutex);
395 			map->lock = regmap_lock_mutex;
396 			map->unlock = regmap_unlock_mutex;
397 		}
398 		map->lock_arg = map;
399 	}
400 	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
401 	map->format.pad_bytes = config->pad_bits / 8;
402 	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
403 	map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
404 			config->val_bits + config->pad_bits, 8);
405 	map->reg_shift = config->pad_bits % 8;
406 	if (config->reg_stride)
407 		map->reg_stride = config->reg_stride;
408 	else
409 		map->reg_stride = 1;
410 	map->use_single_rw = config->use_single_rw;
411 	map->dev = dev;
412 	map->bus = bus;
413 	map->bus_context = bus_context;
414 	map->max_register = config->max_register;
415 	map->wr_table = config->wr_table;
416 	map->rd_table = config->rd_table;
417 	map->volatile_table = config->volatile_table;
418 	map->precious_table = config->precious_table;
419 	map->writeable_reg = config->writeable_reg;
420 	map->readable_reg = config->readable_reg;
421 	map->volatile_reg = config->volatile_reg;
422 	map->precious_reg = config->precious_reg;
423 	map->cache_type = config->cache_type;
424 	map->name = config->name;
425 
426 	if (config->read_flag_mask || config->write_flag_mask) {
427 		map->read_flag_mask = config->read_flag_mask;
428 		map->write_flag_mask = config->write_flag_mask;
429 	} else {
430 		map->read_flag_mask = bus->read_flag_mask;
431 	}
432 
433 	reg_endian = config->reg_format_endian;
434 	if (reg_endian == REGMAP_ENDIAN_DEFAULT)
435 		reg_endian = bus->reg_format_endian_default;
436 	if (reg_endian == REGMAP_ENDIAN_DEFAULT)
437 		reg_endian = REGMAP_ENDIAN_BIG;
438 
439 	val_endian = config->val_format_endian;
440 	if (val_endian == REGMAP_ENDIAN_DEFAULT)
441 		val_endian = bus->val_format_endian_default;
442 	if (val_endian == REGMAP_ENDIAN_DEFAULT)
443 		val_endian = REGMAP_ENDIAN_BIG;
444 
445 	switch (config->reg_bits + map->reg_shift) {
446 	case 2:
447 		switch (config->val_bits) {
448 		case 6:
449 			map->format.format_write = regmap_format_2_6_write;
450 			break;
451 		default:
452 			goto err_map;
453 		}
454 		break;
455 
456 	case 4:
457 		switch (config->val_bits) {
458 		case 12:
459 			map->format.format_write = regmap_format_4_12_write;
460 			break;
461 		default:
462 			goto err_map;
463 		}
464 		break;
465 
466 	case 7:
467 		switch (config->val_bits) {
468 		case 9:
469 			map->format.format_write = regmap_format_7_9_write;
470 			break;
471 		default:
472 			goto err_map;
473 		}
474 		break;
475 
476 	case 10:
477 		switch (config->val_bits) {
478 		case 14:
479 			map->format.format_write = regmap_format_10_14_write;
480 			break;
481 		default:
482 			goto err_map;
483 		}
484 		break;
485 
486 	case 8:
487 		map->format.format_reg = regmap_format_8;
488 		break;
489 
490 	case 16:
491 		switch (reg_endian) {
492 		case REGMAP_ENDIAN_BIG:
493 			map->format.format_reg = regmap_format_16_be;
494 			break;
495 		case REGMAP_ENDIAN_NATIVE:
496 			map->format.format_reg = regmap_format_16_native;
497 			break;
498 		default:
499 			goto err_map;
500 		}
501 		break;
502 
503 	case 32:
504 		switch (reg_endian) {
505 		case REGMAP_ENDIAN_BIG:
506 			map->format.format_reg = regmap_format_32_be;
507 			break;
508 		case REGMAP_ENDIAN_NATIVE:
509 			map->format.format_reg = regmap_format_32_native;
510 			break;
511 		default:
512 			goto err_map;
513 		}
514 		break;
515 
516 	default:
517 		goto err_map;
518 	}
519 
520 	switch (config->val_bits) {
521 	case 8:
522 		map->format.format_val = regmap_format_8;
523 		map->format.parse_val = regmap_parse_8;
524 		break;
525 	case 16:
526 		switch (val_endian) {
527 		case REGMAP_ENDIAN_BIG:
528 			map->format.format_val = regmap_format_16_be;
529 			map->format.parse_val = regmap_parse_16_be;
530 			break;
531 		case REGMAP_ENDIAN_NATIVE:
532 			map->format.format_val = regmap_format_16_native;
533 			map->format.parse_val = regmap_parse_16_native;
534 			break;
535 		default:
536 			goto err_map;
537 		}
538 		break;
539 	case 24:
540 		if (val_endian != REGMAP_ENDIAN_BIG)
541 			goto err_map;
542 		map->format.format_val = regmap_format_24;
543 		map->format.parse_val = regmap_parse_24;
544 		break;
545 	case 32:
546 		switch (val_endian) {
547 		case REGMAP_ENDIAN_BIG:
548 			map->format.format_val = regmap_format_32_be;
549 			map->format.parse_val = regmap_parse_32_be;
550 			break;
551 		case REGMAP_ENDIAN_NATIVE:
552 			map->format.format_val = regmap_format_32_native;
553 			map->format.parse_val = regmap_parse_32_native;
554 			break;
555 		default:
556 			goto err_map;
557 		}
558 		break;
559 	}
560 
561 	if (map->format.format_write) {
562 		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
563 		    (val_endian != REGMAP_ENDIAN_BIG))
564 			goto err_map;
565 		map->use_single_rw = true;
566 	}
567 
568 	if (!map->format.format_write &&
569 	    !(map->format.format_reg && map->format.format_val))
570 		goto err_map;
571 
572 	map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
573 	if (map->work_buf == NULL) {
574 		ret = -ENOMEM;
575 		goto err_map;
576 	}
577 
578 	map->range_tree = RB_ROOT;
579 	for (i = 0; i < config->num_ranges; i++) {
580 		const struct regmap_range_cfg *range_cfg = &config->ranges[i];
581 		struct regmap_range_node *new;
582 
583 		/* Sanity check */
584 		if (range_cfg->range_max < range_cfg->range_min) {
585 			dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
586 				range_cfg->range_max, range_cfg->range_min);
587 			goto err_range;
588 		}
589 
590 		if (range_cfg->range_max > map->max_register) {
591 			dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
592 				range_cfg->range_max, map->max_register);
593 			goto err_range;
594 		}
595 
596 		if (range_cfg->selector_reg > map->max_register) {
597 			dev_err(map->dev,
598 				"Invalid range %d: selector out of map\n", i);
599 			goto err_range;
600 		}
601 
602 		if (range_cfg->window_len == 0) {
603 			dev_err(map->dev, "Invalid range %d: window_len 0\n",
604 				i);
605 			goto err_range;
606 		}
607 
608 		/* Make sure, that this register range has no selector
609 		   or data window within its boundary */
610 		for (j = 0; j < config->num_ranges; j++) {
611 			unsigned sel_reg = config->ranges[j].selector_reg;
612 			unsigned win_min = config->ranges[j].window_start;
613 			unsigned win_max = win_min +
614 					   config->ranges[j].window_len - 1;
615 
616 			if (range_cfg->range_min <= sel_reg &&
617 			    sel_reg <= range_cfg->range_max) {
618 				dev_err(map->dev,
619 					"Range %d: selector for %d in window\n",
620 					i, j);
621 				goto err_range;
622 			}
623 
624 			if (!(win_max < range_cfg->range_min ||
625 			      win_min > range_cfg->range_max)) {
626 				dev_err(map->dev,
627 					"Range %d: window for %d in window\n",
628 					i, j);
629 				goto err_range;
630 			}
631 		}
632 
633 		new = kzalloc(sizeof(*new), GFP_KERNEL);
634 		if (new == NULL) {
635 			ret = -ENOMEM;
636 			goto err_range;
637 		}
638 
639 		new->map = map;
640 		new->name = range_cfg->name;
641 		new->range_min = range_cfg->range_min;
642 		new->range_max = range_cfg->range_max;
643 		new->selector_reg = range_cfg->selector_reg;
644 		new->selector_mask = range_cfg->selector_mask;
645 		new->selector_shift = range_cfg->selector_shift;
646 		new->window_start = range_cfg->window_start;
647 		new->window_len = range_cfg->window_len;
648 
649 		if (_regmap_range_add(map, new) == false) {
650 			dev_err(map->dev, "Failed to add range %d\n", i);
651 			kfree(new);
652 			goto err_range;
653 		}
654 
655 		if (map->selector_work_buf == NULL) {
656 			map->selector_work_buf =
657 				kzalloc(map->format.buf_size, GFP_KERNEL);
658 			if (map->selector_work_buf == NULL) {
659 				ret = -ENOMEM;
660 				goto err_range;
661 			}
662 		}
663 	}
664 
665 	ret = regcache_init(map, config);
666 	if (ret != 0)
667 		goto err_range;
668 
669 	regmap_debugfs_init(map, config->name);
670 
671 	/* Add a devres resource for dev_get_regmap() */
672 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
673 	if (!m) {
674 		ret = -ENOMEM;
675 		goto err_debugfs;
676 	}
677 	*m = map;
678 	devres_add(dev, m);
679 
680 	return map;
681 
682 err_debugfs:
683 	regmap_debugfs_exit(map);
684 	regcache_exit(map);
685 err_range:
686 	regmap_range_exit(map);
687 	kfree(map->work_buf);
688 err_map:
689 	kfree(map);
690 err:
691 	return ERR_PTR(ret);
692 }
693 EXPORT_SYMBOL_GPL(regmap_init);
694 
695 static void devm_regmap_release(struct device *dev, void *res)
696 {
697 	regmap_exit(*(struct regmap **)res);
698 }
699 
700 /**
701  * devm_regmap_init(): Initialise managed register map
702  *
703  * @dev: Device that will be interacted with
704  * @bus: Bus-specific callbacks to use with device
705  * @bus_context: Data passed to bus-specific callbacks
706  * @config: Configuration for register map
707  *
708  * The return value will be an ERR_PTR() on error or a valid pointer
709  * to a struct regmap.  This function should generally not be called
710  * directly, it should be called by bus-specific init functions.  The
711  * map will be automatically freed by the device management code.
712  */
713 struct regmap *devm_regmap_init(struct device *dev,
714 				const struct regmap_bus *bus,
715 				void *bus_context,
716 				const struct regmap_config *config)
717 {
718 	struct regmap **ptr, *regmap;
719 
720 	ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
721 	if (!ptr)
722 		return ERR_PTR(-ENOMEM);
723 
724 	regmap = regmap_init(dev, bus, bus_context, config);
725 	if (!IS_ERR(regmap)) {
726 		*ptr = regmap;
727 		devres_add(dev, ptr);
728 	} else {
729 		devres_free(ptr);
730 	}
731 
732 	return regmap;
733 }
734 EXPORT_SYMBOL_GPL(devm_regmap_init);
735 
736 /**
737  * regmap_reinit_cache(): Reinitialise the current register cache
738  *
739  * @map: Register map to operate on.
740  * @config: New configuration.  Only the cache data will be used.
741  *
742  * Discard any existing register cache for the map and initialize a
743  * new cache.  This can be used to restore the cache to defaults or to
744  * update the cache configuration to reflect runtime discovery of the
745  * hardware.
746  *
747  * No explicit locking is done here, the user needs to ensure that
748  * this function will not race with other calls to regmap.
749  */
750 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
751 {
752 	regcache_exit(map);
753 	regmap_debugfs_exit(map);
754 
755 	map->max_register = config->max_register;
756 	map->writeable_reg = config->writeable_reg;
757 	map->readable_reg = config->readable_reg;
758 	map->volatile_reg = config->volatile_reg;
759 	map->precious_reg = config->precious_reg;
760 	map->cache_type = config->cache_type;
761 
762 	regmap_debugfs_init(map, config->name);
763 
764 	map->cache_bypass = false;
765 	map->cache_only = false;
766 
767 	return regcache_init(map, config);
768 }
769 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
770 
771 /**
772  * regmap_exit(): Free a previously allocated register map
773  */
774 void regmap_exit(struct regmap *map)
775 {
776 	regcache_exit(map);
777 	regmap_debugfs_exit(map);
778 	regmap_range_exit(map);
779 	if (map->bus->free_context)
780 		map->bus->free_context(map->bus_context);
781 	kfree(map->work_buf);
782 	kfree(map);
783 }
784 EXPORT_SYMBOL_GPL(regmap_exit);
785 
786 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
787 {
788 	struct regmap **r = res;
789 	if (!r || !*r) {
790 		WARN_ON(!r || !*r);
791 		return 0;
792 	}
793 
794 	/* If the user didn't specify a name match any */
795 	if (data)
796 		return (*r)->name == data;
797 	else
798 		return 1;
799 }
800 
801 /**
802  * dev_get_regmap(): Obtain the regmap (if any) for a device
803  *
804  * @dev: Device to retrieve the map for
805  * @name: Optional name for the register map, usually NULL.
806  *
807  * Returns the regmap for the device if one is present, or NULL.  If
808  * name is specified then it must match the name specified when
809  * registering the device, if it is NULL then the first regmap found
810  * will be used.  Devices with multiple register maps are very rare,
811  * generic code should normally not need to specify a name.
812  */
813 struct regmap *dev_get_regmap(struct device *dev, const char *name)
814 {
815 	struct regmap **r = devres_find(dev, dev_get_regmap_release,
816 					dev_get_regmap_match, (void *)name);
817 
818 	if (!r)
819 		return NULL;
820 	return *r;
821 }
822 EXPORT_SYMBOL_GPL(dev_get_regmap);
823 
824 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
825 			       struct regmap_range_node *range,
826 			       unsigned int val_num)
827 {
828 	void *orig_work_buf;
829 	unsigned int win_offset;
830 	unsigned int win_page;
831 	bool page_chg;
832 	int ret;
833 
834 	win_offset = (*reg - range->range_min) % range->window_len;
835 	win_page = (*reg - range->range_min) / range->window_len;
836 
837 	if (val_num > 1) {
838 		/* Bulk write shouldn't cross range boundary */
839 		if (*reg + val_num - 1 > range->range_max)
840 			return -EINVAL;
841 
842 		/* ... or single page boundary */
843 		if (val_num > range->window_len - win_offset)
844 			return -EINVAL;
845 	}
846 
847 	/* It is possible to have selector register inside data window.
848 	   In that case, selector register is located on every page and
849 	   it needs no page switching, when accessed alone. */
850 	if (val_num > 1 ||
851 	    range->window_start + win_offset != range->selector_reg) {
852 		/* Use separate work_buf during page switching */
853 		orig_work_buf = map->work_buf;
854 		map->work_buf = map->selector_work_buf;
855 
856 		ret = _regmap_update_bits(map, range->selector_reg,
857 					  range->selector_mask,
858 					  win_page << range->selector_shift,
859 					  &page_chg);
860 
861 		map->work_buf = orig_work_buf;
862 
863 		if (ret != 0)
864 			return ret;
865 	}
866 
867 	*reg = range->window_start + win_offset;
868 
869 	return 0;
870 }
871 
872 static int _regmap_raw_write(struct regmap *map, unsigned int reg,
873 			     const void *val, size_t val_len)
874 {
875 	struct regmap_range_node *range;
876 	u8 *u8 = map->work_buf;
877 	void *buf;
878 	int ret = -ENOTSUPP;
879 	size_t len;
880 	int i;
881 
882 	/* Check for unwritable registers before we start */
883 	if (map->writeable_reg)
884 		for (i = 0; i < val_len / map->format.val_bytes; i++)
885 			if (!map->writeable_reg(map->dev,
886 						reg + (i * map->reg_stride)))
887 				return -EINVAL;
888 
889 	if (!map->cache_bypass && map->format.parse_val) {
890 		unsigned int ival;
891 		int val_bytes = map->format.val_bytes;
892 		for (i = 0; i < val_len / val_bytes; i++) {
893 			memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
894 			ival = map->format.parse_val(map->work_buf);
895 			ret = regcache_write(map, reg + (i * map->reg_stride),
896 					     ival);
897 			if (ret) {
898 				dev_err(map->dev,
899 					"Error in caching of register: %x ret: %d\n",
900 					reg + i, ret);
901 				return ret;
902 			}
903 		}
904 		if (map->cache_only) {
905 			map->cache_dirty = true;
906 			return 0;
907 		}
908 	}
909 
910 	range = _regmap_range_lookup(map, reg);
911 	if (range) {
912 		int val_num = val_len / map->format.val_bytes;
913 		int win_offset = (reg - range->range_min) % range->window_len;
914 		int win_residue = range->window_len - win_offset;
915 
916 		/* If the write goes beyond the end of the window split it */
917 		while (val_num > win_residue) {
918 			dev_dbg(map->dev, "Writing window %d/%zu\n",
919 				win_residue, val_len / map->format.val_bytes);
920 			ret = _regmap_raw_write(map, reg, val, win_residue *
921 						map->format.val_bytes);
922 			if (ret != 0)
923 				return ret;
924 
925 			reg += win_residue;
926 			val_num -= win_residue;
927 			val += win_residue * map->format.val_bytes;
928 			val_len -= win_residue * map->format.val_bytes;
929 
930 			win_offset = (reg - range->range_min) %
931 				range->window_len;
932 			win_residue = range->window_len - win_offset;
933 		}
934 
935 		ret = _regmap_select_page(map, &reg, range, val_num);
936 		if (ret != 0)
937 			return ret;
938 	}
939 
940 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
941 
942 	u8[0] |= map->write_flag_mask;
943 
944 	trace_regmap_hw_write_start(map->dev, reg,
945 				    val_len / map->format.val_bytes);
946 
947 	/* If we're doing a single register write we can probably just
948 	 * send the work_buf directly, otherwise try to do a gather
949 	 * write.
950 	 */
951 	if (val == (map->work_buf + map->format.pad_bytes +
952 		    map->format.reg_bytes))
953 		ret = map->bus->write(map->bus_context, map->work_buf,
954 				      map->format.reg_bytes +
955 				      map->format.pad_bytes +
956 				      val_len);
957 	else if (map->bus->gather_write)
958 		ret = map->bus->gather_write(map->bus_context, map->work_buf,
959 					     map->format.reg_bytes +
960 					     map->format.pad_bytes,
961 					     val, val_len);
962 
963 	/* If that didn't work fall back on linearising by hand. */
964 	if (ret == -ENOTSUPP) {
965 		len = map->format.reg_bytes + map->format.pad_bytes + val_len;
966 		buf = kzalloc(len, GFP_KERNEL);
967 		if (!buf)
968 			return -ENOMEM;
969 
970 		memcpy(buf, map->work_buf, map->format.reg_bytes);
971 		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
972 		       val, val_len);
973 		ret = map->bus->write(map->bus_context, buf, len);
974 
975 		kfree(buf);
976 	}
977 
978 	trace_regmap_hw_write_done(map->dev, reg,
979 				   val_len / map->format.val_bytes);
980 
981 	return ret;
982 }
983 
984 int _regmap_write(struct regmap *map, unsigned int reg,
985 		  unsigned int val)
986 {
987 	struct regmap_range_node *range;
988 	int ret;
989 	BUG_ON(!map->format.format_write && !map->format.format_val);
990 
991 	if (!map->cache_bypass && map->format.format_write) {
992 		ret = regcache_write(map, reg, val);
993 		if (ret != 0)
994 			return ret;
995 		if (map->cache_only) {
996 			map->cache_dirty = true;
997 			return 0;
998 		}
999 	}
1000 
1001 #ifdef LOG_DEVICE
1002 	if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1003 		dev_info(map->dev, "%x <= %x\n", reg, val);
1004 #endif
1005 
1006 	trace_regmap_reg_write(map->dev, reg, val);
1007 
1008 	if (map->format.format_write) {
1009 		range = _regmap_range_lookup(map, reg);
1010 		if (range) {
1011 			ret = _regmap_select_page(map, &reg, range, 1);
1012 			if (ret != 0)
1013 				return ret;
1014 		}
1015 
1016 		map->format.format_write(map, reg, val);
1017 
1018 		trace_regmap_hw_write_start(map->dev, reg, 1);
1019 
1020 		ret = map->bus->write(map->bus_context, map->work_buf,
1021 				      map->format.buf_size);
1022 
1023 		trace_regmap_hw_write_done(map->dev, reg, 1);
1024 
1025 		return ret;
1026 	} else {
1027 		map->format.format_val(map->work_buf + map->format.reg_bytes
1028 				       + map->format.pad_bytes, val, 0);
1029 		return _regmap_raw_write(map, reg,
1030 					 map->work_buf +
1031 					 map->format.reg_bytes +
1032 					 map->format.pad_bytes,
1033 					 map->format.val_bytes);
1034 	}
1035 }
1036 
1037 /**
1038  * regmap_write(): Write a value to a single register
1039  *
1040  * @map: Register map to write to
1041  * @reg: Register to write to
1042  * @val: Value to be written
1043  *
1044  * A value of zero will be returned on success, a negative errno will
1045  * be returned in error cases.
1046  */
1047 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1048 {
1049 	int ret;
1050 
1051 	if (reg % map->reg_stride)
1052 		return -EINVAL;
1053 
1054 	map->lock(map->lock_arg);
1055 
1056 	ret = _regmap_write(map, reg, val);
1057 
1058 	map->unlock(map->lock_arg);
1059 
1060 	return ret;
1061 }
1062 EXPORT_SYMBOL_GPL(regmap_write);
1063 
1064 /**
1065  * regmap_raw_write(): Write raw values to one or more registers
1066  *
1067  * @map: Register map to write to
1068  * @reg: Initial register to write to
1069  * @val: Block of data to be written, laid out for direct transmission to the
1070  *       device
1071  * @val_len: Length of data pointed to by val.
1072  *
1073  * This function is intended to be used for things like firmware
1074  * download where a large block of data needs to be transferred to the
1075  * device.  No formatting will be done on the data provided.
1076  *
1077  * A value of zero will be returned on success, a negative errno will
1078  * be returned in error cases.
1079  */
1080 int regmap_raw_write(struct regmap *map, unsigned int reg,
1081 		     const void *val, size_t val_len)
1082 {
1083 	int ret;
1084 
1085 	if (val_len % map->format.val_bytes)
1086 		return -EINVAL;
1087 	if (reg % map->reg_stride)
1088 		return -EINVAL;
1089 
1090 	map->lock(map->lock_arg);
1091 
1092 	ret = _regmap_raw_write(map, reg, val, val_len);
1093 
1094 	map->unlock(map->lock_arg);
1095 
1096 	return ret;
1097 }
1098 EXPORT_SYMBOL_GPL(regmap_raw_write);
1099 
1100 /*
1101  * regmap_bulk_write(): Write multiple registers to the device
1102  *
1103  * @map: Register map to write to
1104  * @reg: First register to be write from
1105  * @val: Block of data to be written, in native register size for device
1106  * @val_count: Number of registers to write
1107  *
1108  * This function is intended to be used for writing a large block of
1109  * data to be device either in single transfer or multiple transfer.
1110  *
1111  * A value of zero will be returned on success, a negative errno will
1112  * be returned in error cases.
1113  */
1114 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1115 		     size_t val_count)
1116 {
1117 	int ret = 0, i;
1118 	size_t val_bytes = map->format.val_bytes;
1119 	void *wval;
1120 
1121 	if (!map->format.parse_val)
1122 		return -EINVAL;
1123 	if (reg % map->reg_stride)
1124 		return -EINVAL;
1125 
1126 	map->lock(map->lock_arg);
1127 
1128 	/* No formatting is require if val_byte is 1 */
1129 	if (val_bytes == 1) {
1130 		wval = (void *)val;
1131 	} else {
1132 		wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1133 		if (!wval) {
1134 			ret = -ENOMEM;
1135 			dev_err(map->dev, "Error in memory allocation\n");
1136 			goto out;
1137 		}
1138 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
1139 			map->format.parse_val(wval + i);
1140 	}
1141 	/*
1142 	 * Some devices does not support bulk write, for
1143 	 * them we have a series of single write operations.
1144 	 */
1145 	if (map->use_single_rw) {
1146 		for (i = 0; i < val_count; i++) {
1147 			ret = regmap_raw_write(map,
1148 						reg + (i * map->reg_stride),
1149 						val + (i * val_bytes),
1150 						val_bytes);
1151 			if (ret != 0)
1152 				return ret;
1153 		}
1154 	} else {
1155 		ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1156 	}
1157 
1158 	if (val_bytes != 1)
1159 		kfree(wval);
1160 
1161 out:
1162 	map->unlock(map->lock_arg);
1163 	return ret;
1164 }
1165 EXPORT_SYMBOL_GPL(regmap_bulk_write);
1166 
1167 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1168 			    unsigned int val_len)
1169 {
1170 	struct regmap_range_node *range;
1171 	u8 *u8 = map->work_buf;
1172 	int ret;
1173 
1174 	range = _regmap_range_lookup(map, reg);
1175 	if (range) {
1176 		ret = _regmap_select_page(map, &reg, range,
1177 					  val_len / map->format.val_bytes);
1178 		if (ret != 0)
1179 			return ret;
1180 	}
1181 
1182 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
1183 
1184 	/*
1185 	 * Some buses or devices flag reads by setting the high bits in the
1186 	 * register addresss; since it's always the high bits for all
1187 	 * current formats we can do this here rather than in
1188 	 * formatting.  This may break if we get interesting formats.
1189 	 */
1190 	u8[0] |= map->read_flag_mask;
1191 
1192 	trace_regmap_hw_read_start(map->dev, reg,
1193 				   val_len / map->format.val_bytes);
1194 
1195 	ret = map->bus->read(map->bus_context, map->work_buf,
1196 			     map->format.reg_bytes + map->format.pad_bytes,
1197 			     val, val_len);
1198 
1199 	trace_regmap_hw_read_done(map->dev, reg,
1200 				  val_len / map->format.val_bytes);
1201 
1202 	return ret;
1203 }
1204 
1205 static int _regmap_read(struct regmap *map, unsigned int reg,
1206 			unsigned int *val)
1207 {
1208 	int ret;
1209 
1210 	if (!map->cache_bypass) {
1211 		ret = regcache_read(map, reg, val);
1212 		if (ret == 0)
1213 			return 0;
1214 	}
1215 
1216 	if (!map->format.parse_val)
1217 		return -EINVAL;
1218 
1219 	if (map->cache_only)
1220 		return -EBUSY;
1221 
1222 	ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
1223 	if (ret == 0) {
1224 		*val = map->format.parse_val(map->work_buf);
1225 
1226 #ifdef LOG_DEVICE
1227 		if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1228 			dev_info(map->dev, "%x => %x\n", reg, *val);
1229 #endif
1230 
1231 		trace_regmap_reg_read(map->dev, reg, *val);
1232 	}
1233 
1234 	if (ret == 0 && !map->cache_bypass)
1235 		regcache_write(map, reg, *val);
1236 
1237 	return ret;
1238 }
1239 
1240 /**
1241  * regmap_read(): Read a value from a single register
1242  *
1243  * @map: Register map to write to
1244  * @reg: Register to be read from
1245  * @val: Pointer to store read value
1246  *
1247  * A value of zero will be returned on success, a negative errno will
1248  * be returned in error cases.
1249  */
1250 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
1251 {
1252 	int ret;
1253 
1254 	if (reg % map->reg_stride)
1255 		return -EINVAL;
1256 
1257 	map->lock(map->lock_arg);
1258 
1259 	ret = _regmap_read(map, reg, val);
1260 
1261 	map->unlock(map->lock_arg);
1262 
1263 	return ret;
1264 }
1265 EXPORT_SYMBOL_GPL(regmap_read);
1266 
1267 /**
1268  * regmap_raw_read(): Read raw data from the device
1269  *
1270  * @map: Register map to write to
1271  * @reg: First register to be read from
1272  * @val: Pointer to store read value
1273  * @val_len: Size of data to read
1274  *
1275  * A value of zero will be returned on success, a negative errno will
1276  * be returned in error cases.
1277  */
1278 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1279 		    size_t val_len)
1280 {
1281 	size_t val_bytes = map->format.val_bytes;
1282 	size_t val_count = val_len / val_bytes;
1283 	unsigned int v;
1284 	int ret, i;
1285 
1286 	if (val_len % map->format.val_bytes)
1287 		return -EINVAL;
1288 	if (reg % map->reg_stride)
1289 		return -EINVAL;
1290 
1291 	map->lock(map->lock_arg);
1292 
1293 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
1294 	    map->cache_type == REGCACHE_NONE) {
1295 		/* Physical block read if there's no cache involved */
1296 		ret = _regmap_raw_read(map, reg, val, val_len);
1297 
1298 	} else {
1299 		/* Otherwise go word by word for the cache; should be low
1300 		 * cost as we expect to hit the cache.
1301 		 */
1302 		for (i = 0; i < val_count; i++) {
1303 			ret = _regmap_read(map, reg + (i * map->reg_stride),
1304 					   &v);
1305 			if (ret != 0)
1306 				goto out;
1307 
1308 			map->format.format_val(val + (i * val_bytes), v, 0);
1309 		}
1310 	}
1311 
1312  out:
1313 	map->unlock(map->lock_arg);
1314 
1315 	return ret;
1316 }
1317 EXPORT_SYMBOL_GPL(regmap_raw_read);
1318 
1319 /**
1320  * regmap_bulk_read(): Read multiple registers from the device
1321  *
1322  * @map: Register map to write to
1323  * @reg: First register to be read from
1324  * @val: Pointer to store read value, in native register size for device
1325  * @val_count: Number of registers to read
1326  *
1327  * A value of zero will be returned on success, a negative errno will
1328  * be returned in error cases.
1329  */
1330 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1331 		     size_t val_count)
1332 {
1333 	int ret, i;
1334 	size_t val_bytes = map->format.val_bytes;
1335 	bool vol = regmap_volatile_range(map, reg, val_count);
1336 
1337 	if (!map->format.parse_val)
1338 		return -EINVAL;
1339 	if (reg % map->reg_stride)
1340 		return -EINVAL;
1341 
1342 	if (vol || map->cache_type == REGCACHE_NONE) {
1343 		/*
1344 		 * Some devices does not support bulk read, for
1345 		 * them we have a series of single read operations.
1346 		 */
1347 		if (map->use_single_rw) {
1348 			for (i = 0; i < val_count; i++) {
1349 				ret = regmap_raw_read(map,
1350 						reg + (i * map->reg_stride),
1351 						val + (i * val_bytes),
1352 						val_bytes);
1353 				if (ret != 0)
1354 					return ret;
1355 			}
1356 		} else {
1357 			ret = regmap_raw_read(map, reg, val,
1358 					      val_bytes * val_count);
1359 			if (ret != 0)
1360 				return ret;
1361 		}
1362 
1363 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
1364 			map->format.parse_val(val + i);
1365 	} else {
1366 		for (i = 0; i < val_count; i++) {
1367 			unsigned int ival;
1368 			ret = regmap_read(map, reg + (i * map->reg_stride),
1369 					  &ival);
1370 			if (ret != 0)
1371 				return ret;
1372 			memcpy(val + (i * val_bytes), &ival, val_bytes);
1373 		}
1374 	}
1375 
1376 	return 0;
1377 }
1378 EXPORT_SYMBOL_GPL(regmap_bulk_read);
1379 
1380 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
1381 			       unsigned int mask, unsigned int val,
1382 			       bool *change)
1383 {
1384 	int ret;
1385 	unsigned int tmp, orig;
1386 
1387 	ret = _regmap_read(map, reg, &orig);
1388 	if (ret != 0)
1389 		return ret;
1390 
1391 	tmp = orig & ~mask;
1392 	tmp |= val & mask;
1393 
1394 	if (tmp != orig) {
1395 		ret = _regmap_write(map, reg, tmp);
1396 		*change = true;
1397 	} else {
1398 		*change = false;
1399 	}
1400 
1401 	return ret;
1402 }
1403 
1404 /**
1405  * regmap_update_bits: Perform a read/modify/write cycle on the register map
1406  *
1407  * @map: Register map to update
1408  * @reg: Register to update
1409  * @mask: Bitmask to change
1410  * @val: New value for bitmask
1411  *
1412  * Returns zero for success, a negative number on error.
1413  */
1414 int regmap_update_bits(struct regmap *map, unsigned int reg,
1415 		       unsigned int mask, unsigned int val)
1416 {
1417 	bool change;
1418 	int ret;
1419 
1420 	map->lock(map->lock_arg);
1421 	ret = _regmap_update_bits(map, reg, mask, val, &change);
1422 	map->unlock(map->lock_arg);
1423 
1424 	return ret;
1425 }
1426 EXPORT_SYMBOL_GPL(regmap_update_bits);
1427 
1428 /**
1429  * regmap_update_bits_check: Perform a read/modify/write cycle on the
1430  *                           register map and report if updated
1431  *
1432  * @map: Register map to update
1433  * @reg: Register to update
1434  * @mask: Bitmask to change
1435  * @val: New value for bitmask
1436  * @change: Boolean indicating if a write was done
1437  *
1438  * Returns zero for success, a negative number on error.
1439  */
1440 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1441 			     unsigned int mask, unsigned int val,
1442 			     bool *change)
1443 {
1444 	int ret;
1445 
1446 	map->lock(map->lock_arg);
1447 	ret = _regmap_update_bits(map, reg, mask, val, change);
1448 	map->unlock(map->lock_arg);
1449 	return ret;
1450 }
1451 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1452 
1453 /**
1454  * regmap_register_patch: Register and apply register updates to be applied
1455  *                        on device initialistion
1456  *
1457  * @map: Register map to apply updates to.
1458  * @regs: Values to update.
1459  * @num_regs: Number of entries in regs.
1460  *
1461  * Register a set of register updates to be applied to the device
1462  * whenever the device registers are synchronised with the cache and
1463  * apply them immediately.  Typically this is used to apply
1464  * corrections to be applied to the device defaults on startup, such
1465  * as the updates some vendors provide to undocumented registers.
1466  */
1467 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1468 			  int num_regs)
1469 {
1470 	int i, ret;
1471 	bool bypass;
1472 
1473 	/* If needed the implementation can be extended to support this */
1474 	if (map->patch)
1475 		return -EBUSY;
1476 
1477 	map->lock(map->lock_arg);
1478 
1479 	bypass = map->cache_bypass;
1480 
1481 	map->cache_bypass = true;
1482 
1483 	/* Write out first; it's useful to apply even if we fail later. */
1484 	for (i = 0; i < num_regs; i++) {
1485 		ret = _regmap_write(map, regs[i].reg, regs[i].def);
1486 		if (ret != 0) {
1487 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
1488 				regs[i].reg, regs[i].def, ret);
1489 			goto out;
1490 		}
1491 	}
1492 
1493 	map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
1494 	if (map->patch != NULL) {
1495 		memcpy(map->patch, regs,
1496 		       num_regs * sizeof(struct reg_default));
1497 		map->patch_regs = num_regs;
1498 	} else {
1499 		ret = -ENOMEM;
1500 	}
1501 
1502 out:
1503 	map->cache_bypass = bypass;
1504 
1505 	map->unlock(map->lock_arg);
1506 
1507 	return ret;
1508 }
1509 EXPORT_SYMBOL_GPL(regmap_register_patch);
1510 
1511 /*
1512  * regmap_get_val_bytes(): Report the size of a register value
1513  *
1514  * Report the size of a register value, mainly intended to for use by
1515  * generic infrastructure built on top of regmap.
1516  */
1517 int regmap_get_val_bytes(struct regmap *map)
1518 {
1519 	if (map->format.format_write)
1520 		return -EINVAL;
1521 
1522 	return map->format.val_bytes;
1523 }
1524 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
1525 
1526 static int __init regmap_initcall(void)
1527 {
1528 	regmap_debugfs_initcall();
1529 
1530 	return 0;
1531 }
1532 postcore_initcall(regmap_initcall);
1533