xref: /linux/drivers/base/regmap/regmap.c (revision dc65b1ed4bb34ab6235ff2cc6a917b9295c04c2c)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register map access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/export.h>
12 #include <linux/mutex.h>
13 #include <linux/err.h>
14 #include <linux/property.h>
15 #include <linux/rbtree.h>
16 #include <linux/sched.h>
17 #include <linux/delay.h>
18 #include <linux/log2.h>
19 #include <linux/hwspinlock.h>
20 #include <linux/unaligned.h>
21 
22 #define CREATE_TRACE_POINTS
23 #include "trace.h"
24 
25 #include "internal.h"
26 
27 /*
28  * Sometimes for failures during very early init the trace
29  * infrastructure isn't available early enough to be used.  For this
30  * sort of problem defining LOG_DEVICE will add printks for basic
31  * register I/O on a specific device.
32  */
33 #undef LOG_DEVICE
34 
35 #ifdef LOG_DEVICE
36 static inline bool regmap_should_log(struct regmap *map)
37 {
38 	return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
39 }
40 #else
41 static inline bool regmap_should_log(struct regmap *map) { return false; }
42 #endif
43 
44 
45 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
46 			       unsigned int mask, unsigned int val,
47 			       bool *change, bool force_write);
48 
49 static int _regmap_bus_reg_read(void *context, unsigned int reg,
50 				unsigned int *val);
51 static int _regmap_bus_read(void *context, unsigned int reg,
52 			    unsigned int *val);
53 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
54 				       unsigned int val);
55 static int _regmap_bus_reg_write(void *context, unsigned int reg,
56 				 unsigned int val);
57 static int _regmap_bus_raw_write(void *context, unsigned int reg,
58 				 unsigned int val);
59 
60 bool regmap_reg_in_ranges(unsigned int reg,
61 			  const struct regmap_range *ranges,
62 			  unsigned int nranges)
63 {
64 	const struct regmap_range *r;
65 	int i;
66 
67 	for (i = 0, r = ranges; i < nranges; i++, r++)
68 		if (regmap_reg_in_range(reg, r))
69 			return true;
70 	return false;
71 }
72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
73 
74 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
75 			      const struct regmap_access_table *table)
76 {
77 	/* Check "no ranges" first */
78 	if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
79 		return false;
80 
81 	/* In case zero "yes ranges" are supplied, any reg is OK */
82 	if (!table->n_yes_ranges)
83 		return true;
84 
85 	return regmap_reg_in_ranges(reg, table->yes_ranges,
86 				    table->n_yes_ranges);
87 }
88 EXPORT_SYMBOL_GPL(regmap_check_range_table);
89 
90 bool regmap_writeable(struct regmap *map, unsigned int reg)
91 {
92 	if (map->max_register_is_set && reg > map->max_register)
93 		return false;
94 
95 	if (map->writeable_reg)
96 		return map->writeable_reg(map->dev, reg);
97 
98 	if (map->wr_table)
99 		return regmap_check_range_table(map, reg, map->wr_table);
100 
101 	return true;
102 }
103 
104 bool regmap_cached(struct regmap *map, unsigned int reg)
105 {
106 	int ret;
107 	unsigned int val;
108 
109 	if (map->cache_type == REGCACHE_NONE)
110 		return false;
111 
112 	if (!map->cache_ops)
113 		return false;
114 
115 	if (map->max_register_is_set && reg > map->max_register)
116 		return false;
117 
118 	map->lock(map->lock_arg);
119 	ret = regcache_read(map, reg, &val);
120 	map->unlock(map->lock_arg);
121 	if (ret)
122 		return false;
123 
124 	return true;
125 }
126 
127 bool regmap_readable(struct regmap *map, unsigned int reg)
128 {
129 	if (!map->reg_read)
130 		return false;
131 
132 	if (map->max_register_is_set && reg > map->max_register)
133 		return false;
134 
135 	if (map->format.format_write)
136 		return false;
137 
138 	if (map->readable_reg)
139 		return map->readable_reg(map->dev, reg);
140 
141 	if (map->rd_table)
142 		return regmap_check_range_table(map, reg, map->rd_table);
143 
144 	return true;
145 }
146 
147 bool regmap_volatile(struct regmap *map, unsigned int reg)
148 {
149 	if (!map->format.format_write && !regmap_readable(map, reg))
150 		return false;
151 
152 	if (map->volatile_reg)
153 		return map->volatile_reg(map->dev, reg);
154 
155 	if (map->volatile_table)
156 		return regmap_check_range_table(map, reg, map->volatile_table);
157 
158 	if (map->cache_ops)
159 		return false;
160 	else
161 		return true;
162 }
163 
164 bool regmap_precious(struct regmap *map, unsigned int reg)
165 {
166 	if (!regmap_readable(map, reg))
167 		return false;
168 
169 	if (map->precious_reg)
170 		return map->precious_reg(map->dev, reg);
171 
172 	if (map->precious_table)
173 		return regmap_check_range_table(map, reg, map->precious_table);
174 
175 	return false;
176 }
177 
178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
179 {
180 	if (map->writeable_noinc_reg)
181 		return map->writeable_noinc_reg(map->dev, reg);
182 
183 	if (map->wr_noinc_table)
184 		return regmap_check_range_table(map, reg, map->wr_noinc_table);
185 
186 	return true;
187 }
188 
189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
190 {
191 	if (map->readable_noinc_reg)
192 		return map->readable_noinc_reg(map->dev, reg);
193 
194 	if (map->rd_noinc_table)
195 		return regmap_check_range_table(map, reg, map->rd_noinc_table);
196 
197 	return true;
198 }
199 
200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
201 	size_t num)
202 {
203 	unsigned int i;
204 
205 	for (i = 0; i < num; i++)
206 		if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
207 			return false;
208 
209 	return true;
210 }
211 
212 static void regmap_format_12_20_write(struct regmap *map,
213 				     unsigned int reg, unsigned int val)
214 {
215 	u8 *out = map->work_buf;
216 
217 	out[0] = reg >> 4;
218 	out[1] = (reg << 4) | (val >> 16);
219 	out[2] = val >> 8;
220 	out[3] = val;
221 }
222 
223 
224 static void regmap_format_2_6_write(struct regmap *map,
225 				     unsigned int reg, unsigned int val)
226 {
227 	u8 *out = map->work_buf;
228 
229 	*out = (reg << 6) | val;
230 }
231 
232 static void regmap_format_4_12_write(struct regmap *map,
233 				     unsigned int reg, unsigned int val)
234 {
235 	__be16 *out = map->work_buf;
236 	*out = cpu_to_be16((reg << 12) | val);
237 }
238 
239 static void regmap_format_7_9_write(struct regmap *map,
240 				    unsigned int reg, unsigned int val)
241 {
242 	__be16 *out = map->work_buf;
243 	*out = cpu_to_be16((reg << 9) | val);
244 }
245 
246 static void regmap_format_7_17_write(struct regmap *map,
247 				    unsigned int reg, unsigned int val)
248 {
249 	u8 *out = map->work_buf;
250 
251 	out[2] = val;
252 	out[1] = val >> 8;
253 	out[0] = (val >> 16) | (reg << 1);
254 }
255 
256 static void regmap_format_10_14_write(struct regmap *map,
257 				    unsigned int reg, unsigned int val)
258 {
259 	u8 *out = map->work_buf;
260 
261 	out[2] = val;
262 	out[1] = (val >> 8) | (reg << 6);
263 	out[0] = reg >> 2;
264 }
265 
266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
267 {
268 	u8 *b = buf;
269 
270 	b[0] = val << shift;
271 }
272 
273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
274 {
275 	put_unaligned_be16(val << shift, buf);
276 }
277 
278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
279 {
280 	put_unaligned_le16(val << shift, buf);
281 }
282 
283 static void regmap_format_16_native(void *buf, unsigned int val,
284 				    unsigned int shift)
285 {
286 	u16 v = val << shift;
287 
288 	memcpy(buf, &v, sizeof(v));
289 }
290 
291 static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
292 {
293 	put_unaligned_be24(val << shift, buf);
294 }
295 
296 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
297 {
298 	put_unaligned_be32(val << shift, buf);
299 }
300 
301 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
302 {
303 	put_unaligned_le32(val << shift, buf);
304 }
305 
306 static void regmap_format_32_native(void *buf, unsigned int val,
307 				    unsigned int shift)
308 {
309 	u32 v = val << shift;
310 
311 	memcpy(buf, &v, sizeof(v));
312 }
313 
314 static void regmap_parse_inplace_noop(void *buf)
315 {
316 }
317 
318 static unsigned int regmap_parse_8(const void *buf)
319 {
320 	const u8 *b = buf;
321 
322 	return b[0];
323 }
324 
325 static unsigned int regmap_parse_16_be(const void *buf)
326 {
327 	return get_unaligned_be16(buf);
328 }
329 
330 static unsigned int regmap_parse_16_le(const void *buf)
331 {
332 	return get_unaligned_le16(buf);
333 }
334 
335 static void regmap_parse_16_be_inplace(void *buf)
336 {
337 	u16 v = get_unaligned_be16(buf);
338 
339 	memcpy(buf, &v, sizeof(v));
340 }
341 
342 static void regmap_parse_16_le_inplace(void *buf)
343 {
344 	u16 v = get_unaligned_le16(buf);
345 
346 	memcpy(buf, &v, sizeof(v));
347 }
348 
349 static unsigned int regmap_parse_16_native(const void *buf)
350 {
351 	u16 v;
352 
353 	memcpy(&v, buf, sizeof(v));
354 	return v;
355 }
356 
357 static unsigned int regmap_parse_24_be(const void *buf)
358 {
359 	return get_unaligned_be24(buf);
360 }
361 
362 static unsigned int regmap_parse_32_be(const void *buf)
363 {
364 	return get_unaligned_be32(buf);
365 }
366 
367 static unsigned int regmap_parse_32_le(const void *buf)
368 {
369 	return get_unaligned_le32(buf);
370 }
371 
372 static void regmap_parse_32_be_inplace(void *buf)
373 {
374 	u32 v = get_unaligned_be32(buf);
375 
376 	memcpy(buf, &v, sizeof(v));
377 }
378 
379 static void regmap_parse_32_le_inplace(void *buf)
380 {
381 	u32 v = get_unaligned_le32(buf);
382 
383 	memcpy(buf, &v, sizeof(v));
384 }
385 
386 static unsigned int regmap_parse_32_native(const void *buf)
387 {
388 	u32 v;
389 
390 	memcpy(&v, buf, sizeof(v));
391 	return v;
392 }
393 
394 static void regmap_lock_hwlock(void *__map)
395 {
396 	struct regmap *map = __map;
397 
398 	hwspin_lock_timeout(map->hwlock, UINT_MAX);
399 }
400 
401 static void regmap_lock_hwlock_irq(void *__map)
402 {
403 	struct regmap *map = __map;
404 
405 	hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
406 }
407 
408 static void regmap_lock_hwlock_irqsave(void *__map)
409 {
410 	struct regmap *map = __map;
411 	unsigned long flags = 0;
412 
413 	hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
414 				    &flags);
415 	map->spinlock_flags = flags;
416 }
417 
418 static void regmap_unlock_hwlock(void *__map)
419 {
420 	struct regmap *map = __map;
421 
422 	hwspin_unlock(map->hwlock);
423 }
424 
425 static void regmap_unlock_hwlock_irq(void *__map)
426 {
427 	struct regmap *map = __map;
428 
429 	hwspin_unlock_irq(map->hwlock);
430 }
431 
432 static void regmap_unlock_hwlock_irqrestore(void *__map)
433 {
434 	struct regmap *map = __map;
435 
436 	hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
437 }
438 
439 static void regmap_lock_unlock_none(void *__map)
440 {
441 
442 }
443 
444 static void regmap_lock_mutex(void *__map)
445 {
446 	struct regmap *map = __map;
447 	mutex_lock(&map->mutex);
448 }
449 
450 static void regmap_unlock_mutex(void *__map)
451 {
452 	struct regmap *map = __map;
453 	mutex_unlock(&map->mutex);
454 }
455 
456 static void regmap_lock_spinlock(void *__map)
457 __acquires(&map->spinlock)
458 {
459 	struct regmap *map = __map;
460 	unsigned long flags;
461 
462 	spin_lock_irqsave(&map->spinlock, flags);
463 	map->spinlock_flags = flags;
464 }
465 
466 static void regmap_unlock_spinlock(void *__map)
467 __releases(&map->spinlock)
468 {
469 	struct regmap *map = __map;
470 	spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
471 }
472 
473 static void regmap_lock_raw_spinlock(void *__map)
474 __acquires(&map->raw_spinlock)
475 {
476 	struct regmap *map = __map;
477 	unsigned long flags;
478 
479 	raw_spin_lock_irqsave(&map->raw_spinlock, flags);
480 	map->raw_spinlock_flags = flags;
481 }
482 
483 static void regmap_unlock_raw_spinlock(void *__map)
484 __releases(&map->raw_spinlock)
485 {
486 	struct regmap *map = __map;
487 	raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
488 }
489 
490 static void dev_get_regmap_release(struct device *dev, void *res)
491 {
492 	/*
493 	 * We don't actually have anything to do here; the goal here
494 	 * is not to manage the regmap but to provide a simple way to
495 	 * get the regmap back given a struct device.
496 	 */
497 }
498 
499 static bool _regmap_range_add(struct regmap *map,
500 			      struct regmap_range_node *data)
501 {
502 	struct rb_root *root = &map->range_tree;
503 	struct rb_node **new = &(root->rb_node), *parent = NULL;
504 
505 	while (*new) {
506 		struct regmap_range_node *this =
507 			rb_entry(*new, struct regmap_range_node, node);
508 
509 		parent = *new;
510 		if (data->range_max < this->range_min)
511 			new = &((*new)->rb_left);
512 		else if (data->range_min > this->range_max)
513 			new = &((*new)->rb_right);
514 		else
515 			return false;
516 	}
517 
518 	rb_link_node(&data->node, parent, new);
519 	rb_insert_color(&data->node, root);
520 
521 	return true;
522 }
523 
524 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
525 						      unsigned int reg)
526 {
527 	struct rb_node *node = map->range_tree.rb_node;
528 
529 	while (node) {
530 		struct regmap_range_node *this =
531 			rb_entry(node, struct regmap_range_node, node);
532 
533 		if (reg < this->range_min)
534 			node = node->rb_left;
535 		else if (reg > this->range_max)
536 			node = node->rb_right;
537 		else
538 			return this;
539 	}
540 
541 	return NULL;
542 }
543 
544 static void regmap_range_exit(struct regmap *map)
545 {
546 	struct rb_node *next;
547 	struct regmap_range_node *range_node;
548 
549 	next = rb_first(&map->range_tree);
550 	while (next) {
551 		range_node = rb_entry(next, struct regmap_range_node, node);
552 		next = rb_next(&range_node->node);
553 		rb_erase(&range_node->node, &map->range_tree);
554 		kfree(range_node);
555 	}
556 
557 	kfree(map->selector_work_buf);
558 }
559 
560 static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
561 {
562 	if (config->name) {
563 		const char *name = kstrdup_const(config->name, GFP_KERNEL);
564 
565 		if (!name)
566 			return -ENOMEM;
567 
568 		kfree_const(map->name);
569 		map->name = name;
570 	}
571 
572 	return 0;
573 }
574 
575 int regmap_attach_dev(struct device *dev, struct regmap *map,
576 		      const struct regmap_config *config)
577 {
578 	struct regmap **m;
579 	int ret;
580 
581 	map->dev = dev;
582 
583 	ret = regmap_set_name(map, config);
584 	if (ret)
585 		return ret;
586 
587 	regmap_debugfs_exit(map);
588 	regmap_debugfs_init(map);
589 
590 	/* Add a devres resource for dev_get_regmap() */
591 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
592 	if (!m) {
593 		regmap_debugfs_exit(map);
594 		return -ENOMEM;
595 	}
596 	*m = map;
597 	devres_add(dev, m);
598 
599 	return 0;
600 }
601 EXPORT_SYMBOL_GPL(regmap_attach_dev);
602 
603 static int dev_get_regmap_match(struct device *dev, void *res, void *data);
604 
605 static int regmap_detach_dev(struct device *dev, struct regmap *map)
606 {
607 	if (!dev)
608 		return 0;
609 
610 	return devres_release(dev, dev_get_regmap_release,
611 			      dev_get_regmap_match, (void *)map->name);
612 }
613 
614 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
615 					const struct regmap_config *config)
616 {
617 	enum regmap_endian endian;
618 
619 	/* Retrieve the endianness specification from the regmap config */
620 	endian = config->reg_format_endian;
621 
622 	/* If the regmap config specified a non-default value, use that */
623 	if (endian != REGMAP_ENDIAN_DEFAULT)
624 		return endian;
625 
626 	/* Retrieve the endianness specification from the bus config */
627 	if (bus && bus->reg_format_endian_default)
628 		endian = bus->reg_format_endian_default;
629 
630 	/* If the bus specified a non-default value, use that */
631 	if (endian != REGMAP_ENDIAN_DEFAULT)
632 		return endian;
633 
634 	/* Use this if no other value was found */
635 	return REGMAP_ENDIAN_BIG;
636 }
637 
638 enum regmap_endian regmap_get_val_endian(struct device *dev,
639 					 const struct regmap_bus *bus,
640 					 const struct regmap_config *config)
641 {
642 	struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
643 	enum regmap_endian endian;
644 
645 	/* Retrieve the endianness specification from the regmap config */
646 	endian = config->val_format_endian;
647 
648 	/* If the regmap config specified a non-default value, use that */
649 	if (endian != REGMAP_ENDIAN_DEFAULT)
650 		return endian;
651 
652 	/* If the firmware node exist try to get endianness from it */
653 	if (fwnode_property_read_bool(fwnode, "big-endian"))
654 		endian = REGMAP_ENDIAN_BIG;
655 	else if (fwnode_property_read_bool(fwnode, "little-endian"))
656 		endian = REGMAP_ENDIAN_LITTLE;
657 	else if (fwnode_property_read_bool(fwnode, "native-endian"))
658 		endian = REGMAP_ENDIAN_NATIVE;
659 
660 	/* If the endianness was specified in fwnode, use that */
661 	if (endian != REGMAP_ENDIAN_DEFAULT)
662 		return endian;
663 
664 	/* Retrieve the endianness specification from the bus config */
665 	if (bus && bus->val_format_endian_default)
666 		endian = bus->val_format_endian_default;
667 
668 	/* If the bus specified a non-default value, use that */
669 	if (endian != REGMAP_ENDIAN_DEFAULT)
670 		return endian;
671 
672 	/* Use this if no other value was found */
673 	return REGMAP_ENDIAN_BIG;
674 }
675 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
676 
677 struct regmap *__regmap_init(struct device *dev,
678 			     const struct regmap_bus *bus,
679 			     void *bus_context,
680 			     const struct regmap_config *config,
681 			     struct lock_class_key *lock_key,
682 			     const char *lock_name)
683 {
684 	struct regmap *map;
685 	int ret = -EINVAL;
686 	enum regmap_endian reg_endian, val_endian;
687 	int i, j;
688 
689 	if (!config)
690 		goto err;
691 
692 	map = kzalloc(sizeof(*map), GFP_KERNEL);
693 	if (map == NULL) {
694 		ret = -ENOMEM;
695 		goto err;
696 	}
697 
698 	ret = regmap_set_name(map, config);
699 	if (ret)
700 		goto err_map;
701 
702 	ret = -EINVAL; /* Later error paths rely on this */
703 
704 	if (config->disable_locking) {
705 		map->lock = map->unlock = regmap_lock_unlock_none;
706 		map->can_sleep = config->can_sleep;
707 		regmap_debugfs_disable(map);
708 	} else if (config->lock && config->unlock) {
709 		map->lock = config->lock;
710 		map->unlock = config->unlock;
711 		map->lock_arg = config->lock_arg;
712 		map->can_sleep = config->can_sleep;
713 	} else if (config->use_hwlock) {
714 		map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
715 		if (!map->hwlock) {
716 			ret = -ENXIO;
717 			goto err_name;
718 		}
719 
720 		switch (config->hwlock_mode) {
721 		case HWLOCK_IRQSTATE:
722 			map->lock = regmap_lock_hwlock_irqsave;
723 			map->unlock = regmap_unlock_hwlock_irqrestore;
724 			break;
725 		case HWLOCK_IRQ:
726 			map->lock = regmap_lock_hwlock_irq;
727 			map->unlock = regmap_unlock_hwlock_irq;
728 			break;
729 		default:
730 			map->lock = regmap_lock_hwlock;
731 			map->unlock = regmap_unlock_hwlock;
732 			break;
733 		}
734 
735 		map->lock_arg = map;
736 	} else {
737 		if ((bus && bus->fast_io) ||
738 		    config->fast_io) {
739 			if (config->use_raw_spinlock) {
740 				raw_spin_lock_init(&map->raw_spinlock);
741 				map->lock = regmap_lock_raw_spinlock;
742 				map->unlock = regmap_unlock_raw_spinlock;
743 				lockdep_set_class_and_name(&map->raw_spinlock,
744 							   lock_key, lock_name);
745 			} else {
746 				spin_lock_init(&map->spinlock);
747 				map->lock = regmap_lock_spinlock;
748 				map->unlock = regmap_unlock_spinlock;
749 				lockdep_set_class_and_name(&map->spinlock,
750 							   lock_key, lock_name);
751 			}
752 		} else {
753 			mutex_init(&map->mutex);
754 			map->lock = regmap_lock_mutex;
755 			map->unlock = regmap_unlock_mutex;
756 			map->can_sleep = true;
757 			lockdep_set_class_and_name(&map->mutex,
758 						   lock_key, lock_name);
759 		}
760 		map->lock_arg = map;
761 		map->lock_key = lock_key;
762 	}
763 
764 	/*
765 	 * When we write in fast-paths with regmap_bulk_write() don't allocate
766 	 * scratch buffers with sleeping allocations.
767 	 */
768 	if ((bus && bus->fast_io) || config->fast_io)
769 		map->alloc_flags = GFP_ATOMIC;
770 	else
771 		map->alloc_flags = GFP_KERNEL;
772 
773 	map->reg_base = config->reg_base;
774 	map->reg_shift = config->pad_bits % 8;
775 
776 	map->format.pad_bytes = config->pad_bits / 8;
777 	map->format.reg_shift = config->reg_shift;
778 	map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
779 	map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
780 	map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
781 	if (config->reg_stride)
782 		map->reg_stride = config->reg_stride;
783 	else
784 		map->reg_stride = 1;
785 	if (is_power_of_2(map->reg_stride))
786 		map->reg_stride_order = ilog2(map->reg_stride);
787 	else
788 		map->reg_stride_order = -1;
789 	map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
790 	map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
791 	map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
792 	if (bus) {
793 		map->max_raw_read = bus->max_raw_read;
794 		map->max_raw_write = bus->max_raw_write;
795 	} else if (config->max_raw_read && config->max_raw_write) {
796 		map->max_raw_read = config->max_raw_read;
797 		map->max_raw_write = config->max_raw_write;
798 	}
799 	map->dev = dev;
800 	map->bus = bus;
801 	map->bus_context = bus_context;
802 	map->max_register = config->max_register;
803 	map->max_register_is_set = map->max_register ?: config->max_register_is_0;
804 	map->wr_table = config->wr_table;
805 	map->rd_table = config->rd_table;
806 	map->volatile_table = config->volatile_table;
807 	map->precious_table = config->precious_table;
808 	map->wr_noinc_table = config->wr_noinc_table;
809 	map->rd_noinc_table = config->rd_noinc_table;
810 	map->writeable_reg = config->writeable_reg;
811 	map->readable_reg = config->readable_reg;
812 	map->volatile_reg = config->volatile_reg;
813 	map->precious_reg = config->precious_reg;
814 	map->writeable_noinc_reg = config->writeable_noinc_reg;
815 	map->readable_noinc_reg = config->readable_noinc_reg;
816 	map->reg_default_cb = config->reg_default_cb;
817 	map->cache_type = config->cache_type;
818 
819 	spin_lock_init(&map->async_lock);
820 	INIT_LIST_HEAD(&map->async_list);
821 	INIT_LIST_HEAD(&map->async_free);
822 	init_waitqueue_head(&map->async_waitq);
823 
824 	if (config->read_flag_mask ||
825 	    config->write_flag_mask ||
826 	    config->zero_flag_mask) {
827 		map->read_flag_mask = config->read_flag_mask;
828 		map->write_flag_mask = config->write_flag_mask;
829 	} else if (bus) {
830 		map->read_flag_mask = bus->read_flag_mask;
831 	}
832 
833 	if (config->read && config->write) {
834 		map->reg_read  = _regmap_bus_read;
835 		if (config->reg_update_bits)
836 			map->reg_update_bits = config->reg_update_bits;
837 
838 		/* Bulk read/write */
839 		map->read = config->read;
840 		map->write = config->write;
841 
842 		reg_endian = REGMAP_ENDIAN_NATIVE;
843 		val_endian = REGMAP_ENDIAN_NATIVE;
844 	} else if (!bus) {
845 		map->reg_read  = config->reg_read;
846 		map->reg_write = config->reg_write;
847 		map->reg_update_bits = config->reg_update_bits;
848 
849 		map->defer_caching = false;
850 		goto skip_format_initialization;
851 	} else if (!bus->read || !bus->write) {
852 		map->reg_read = _regmap_bus_reg_read;
853 		map->reg_write = _regmap_bus_reg_write;
854 		map->reg_update_bits = bus->reg_update_bits;
855 
856 		map->defer_caching = false;
857 		goto skip_format_initialization;
858 	} else {
859 		map->reg_read  = _regmap_bus_read;
860 		map->reg_update_bits = bus->reg_update_bits;
861 		/* Bulk read/write */
862 		map->read = bus->read;
863 		map->write = bus->write;
864 
865 		reg_endian = regmap_get_reg_endian(bus, config);
866 		val_endian = regmap_get_val_endian(dev, bus, config);
867 	}
868 
869 	switch (config->reg_bits + map->reg_shift) {
870 	case 2:
871 		switch (config->val_bits) {
872 		case 6:
873 			map->format.format_write = regmap_format_2_6_write;
874 			break;
875 		default:
876 			goto err_hwlock;
877 		}
878 		break;
879 
880 	case 4:
881 		switch (config->val_bits) {
882 		case 12:
883 			map->format.format_write = regmap_format_4_12_write;
884 			break;
885 		default:
886 			goto err_hwlock;
887 		}
888 		break;
889 
890 	case 7:
891 		switch (config->val_bits) {
892 		case 9:
893 			map->format.format_write = regmap_format_7_9_write;
894 			break;
895 		case 17:
896 			map->format.format_write = regmap_format_7_17_write;
897 			break;
898 		default:
899 			goto err_hwlock;
900 		}
901 		break;
902 
903 	case 10:
904 		switch (config->val_bits) {
905 		case 14:
906 			map->format.format_write = regmap_format_10_14_write;
907 			break;
908 		default:
909 			goto err_hwlock;
910 		}
911 		break;
912 
913 	case 12:
914 		switch (config->val_bits) {
915 		case 20:
916 			map->format.format_write = regmap_format_12_20_write;
917 			break;
918 		default:
919 			goto err_hwlock;
920 		}
921 		break;
922 
923 	case 8:
924 		map->format.format_reg = regmap_format_8;
925 		break;
926 
927 	case 16:
928 		switch (reg_endian) {
929 		case REGMAP_ENDIAN_BIG:
930 			map->format.format_reg = regmap_format_16_be;
931 			break;
932 		case REGMAP_ENDIAN_LITTLE:
933 			map->format.format_reg = regmap_format_16_le;
934 			break;
935 		case REGMAP_ENDIAN_NATIVE:
936 			map->format.format_reg = regmap_format_16_native;
937 			break;
938 		default:
939 			goto err_hwlock;
940 		}
941 		break;
942 
943 	case 24:
944 		switch (reg_endian) {
945 		case REGMAP_ENDIAN_BIG:
946 			map->format.format_reg = regmap_format_24_be;
947 			break;
948 		default:
949 			goto err_hwlock;
950 		}
951 		break;
952 
953 	case 32:
954 		switch (reg_endian) {
955 		case REGMAP_ENDIAN_BIG:
956 			map->format.format_reg = regmap_format_32_be;
957 			break;
958 		case REGMAP_ENDIAN_LITTLE:
959 			map->format.format_reg = regmap_format_32_le;
960 			break;
961 		case REGMAP_ENDIAN_NATIVE:
962 			map->format.format_reg = regmap_format_32_native;
963 			break;
964 		default:
965 			goto err_hwlock;
966 		}
967 		break;
968 
969 	default:
970 		goto err_hwlock;
971 	}
972 
973 	if (val_endian == REGMAP_ENDIAN_NATIVE)
974 		map->format.parse_inplace = regmap_parse_inplace_noop;
975 
976 	switch (config->val_bits) {
977 	case 8:
978 		map->format.format_val = regmap_format_8;
979 		map->format.parse_val = regmap_parse_8;
980 		map->format.parse_inplace = regmap_parse_inplace_noop;
981 		break;
982 	case 16:
983 		switch (val_endian) {
984 		case REGMAP_ENDIAN_BIG:
985 			map->format.format_val = regmap_format_16_be;
986 			map->format.parse_val = regmap_parse_16_be;
987 			map->format.parse_inplace = regmap_parse_16_be_inplace;
988 			break;
989 		case REGMAP_ENDIAN_LITTLE:
990 			map->format.format_val = regmap_format_16_le;
991 			map->format.parse_val = regmap_parse_16_le;
992 			map->format.parse_inplace = regmap_parse_16_le_inplace;
993 			break;
994 		case REGMAP_ENDIAN_NATIVE:
995 			map->format.format_val = regmap_format_16_native;
996 			map->format.parse_val = regmap_parse_16_native;
997 			break;
998 		default:
999 			goto err_hwlock;
1000 		}
1001 		break;
1002 	case 24:
1003 		switch (val_endian) {
1004 		case REGMAP_ENDIAN_BIG:
1005 			map->format.format_val = regmap_format_24_be;
1006 			map->format.parse_val = regmap_parse_24_be;
1007 			break;
1008 		default:
1009 			goto err_hwlock;
1010 		}
1011 		break;
1012 	case 32:
1013 		switch (val_endian) {
1014 		case REGMAP_ENDIAN_BIG:
1015 			map->format.format_val = regmap_format_32_be;
1016 			map->format.parse_val = regmap_parse_32_be;
1017 			map->format.parse_inplace = regmap_parse_32_be_inplace;
1018 			break;
1019 		case REGMAP_ENDIAN_LITTLE:
1020 			map->format.format_val = regmap_format_32_le;
1021 			map->format.parse_val = regmap_parse_32_le;
1022 			map->format.parse_inplace = regmap_parse_32_le_inplace;
1023 			break;
1024 		case REGMAP_ENDIAN_NATIVE:
1025 			map->format.format_val = regmap_format_32_native;
1026 			map->format.parse_val = regmap_parse_32_native;
1027 			break;
1028 		default:
1029 			goto err_hwlock;
1030 		}
1031 		break;
1032 	}
1033 
1034 	if (map->format.format_write) {
1035 		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1036 		    (val_endian != REGMAP_ENDIAN_BIG))
1037 			goto err_hwlock;
1038 		map->use_single_write = true;
1039 	}
1040 
1041 	if (!map->format.format_write &&
1042 	    !(map->format.format_reg && map->format.format_val))
1043 		goto err_hwlock;
1044 
1045 	map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1046 	if (map->work_buf == NULL) {
1047 		ret = -ENOMEM;
1048 		goto err_hwlock;
1049 	}
1050 
1051 	if (map->format.format_write) {
1052 		map->defer_caching = false;
1053 		map->reg_write = _regmap_bus_formatted_write;
1054 	} else if (map->format.format_val) {
1055 		map->defer_caching = true;
1056 		map->reg_write = _regmap_bus_raw_write;
1057 	}
1058 
1059 skip_format_initialization:
1060 
1061 	map->range_tree = RB_ROOT;
1062 	for (i = 0; i < config->num_ranges; i++) {
1063 		const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1064 		struct regmap_range_node *new;
1065 
1066 		/* Sanity check */
1067 		if (range_cfg->range_max < range_cfg->range_min) {
1068 			dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
1069 				range_cfg->range_max, range_cfg->range_min);
1070 			goto err_range;
1071 		}
1072 
1073 		if (range_cfg->range_max > map->max_register) {
1074 			dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
1075 				range_cfg->range_max, map->max_register);
1076 			goto err_range;
1077 		}
1078 
1079 		if (range_cfg->selector_reg > map->max_register) {
1080 			dev_err(map->dev,
1081 				"Invalid range %d: selector out of map\n", i);
1082 			goto err_range;
1083 		}
1084 
1085 		if (range_cfg->window_len == 0) {
1086 			dev_err(map->dev, "Invalid range %d: window_len 0\n",
1087 				i);
1088 			goto err_range;
1089 		}
1090 
1091 		/* Make sure, that this register range has no selector
1092 		   or data window within its boundary */
1093 		for (j = 0; j < config->num_ranges; j++) {
1094 			unsigned int sel_reg = config->ranges[j].selector_reg;
1095 			unsigned int win_min = config->ranges[j].window_start;
1096 			unsigned int win_max = win_min +
1097 					       config->ranges[j].window_len - 1;
1098 
1099 			/* Allow data window inside its own virtual range */
1100 			if (j == i)
1101 				continue;
1102 
1103 			if (range_cfg->range_min <= sel_reg &&
1104 			    sel_reg <= range_cfg->range_max) {
1105 				dev_err(map->dev,
1106 					"Range %d: selector for %d in window\n",
1107 					i, j);
1108 				goto err_range;
1109 			}
1110 
1111 			if (!(win_max < range_cfg->range_min ||
1112 			      win_min > range_cfg->range_max)) {
1113 				dev_err(map->dev,
1114 					"Range %d: window for %d in window\n",
1115 					i, j);
1116 				goto err_range;
1117 			}
1118 		}
1119 
1120 		new = kzalloc(sizeof(*new), GFP_KERNEL);
1121 		if (new == NULL) {
1122 			ret = -ENOMEM;
1123 			goto err_range;
1124 		}
1125 
1126 		new->map = map;
1127 		new->name = range_cfg->name;
1128 		new->range_min = range_cfg->range_min;
1129 		new->range_max = range_cfg->range_max;
1130 		new->selector_reg = range_cfg->selector_reg;
1131 		new->selector_mask = range_cfg->selector_mask;
1132 		new->selector_shift = range_cfg->selector_shift;
1133 		new->window_start = range_cfg->window_start;
1134 		new->window_len = range_cfg->window_len;
1135 
1136 		if (!_regmap_range_add(map, new)) {
1137 			dev_err(map->dev, "Failed to add range %d\n", i);
1138 			kfree(new);
1139 			goto err_range;
1140 		}
1141 
1142 		if (map->selector_work_buf == NULL) {
1143 			map->selector_work_buf =
1144 				kzalloc(map->format.buf_size, GFP_KERNEL);
1145 			if (map->selector_work_buf == NULL) {
1146 				ret = -ENOMEM;
1147 				goto err_range;
1148 			}
1149 		}
1150 	}
1151 
1152 	ret = regcache_init(map, config);
1153 	if (ret != 0)
1154 		goto err_range;
1155 
1156 	if (dev) {
1157 		ret = regmap_attach_dev(dev, map, config);
1158 		if (ret != 0)
1159 			goto err_regcache;
1160 	} else {
1161 		regmap_debugfs_init(map);
1162 	}
1163 
1164 	return map;
1165 
1166 err_regcache:
1167 	regcache_exit(map);
1168 err_range:
1169 	regmap_range_exit(map);
1170 	kfree(map->work_buf);
1171 err_hwlock:
1172 	if (map->hwlock)
1173 		hwspin_lock_free(map->hwlock);
1174 err_name:
1175 	kfree_const(map->name);
1176 err_map:
1177 	kfree(map);
1178 err:
1179 	if (bus && bus->free_on_exit)
1180 		kfree(bus);
1181 	return ERR_PTR(ret);
1182 }
1183 EXPORT_SYMBOL_GPL(__regmap_init);
1184 
1185 static void devm_regmap_release(struct device *dev, void *res)
1186 {
1187 	regmap_exit(*(struct regmap **)res);
1188 }
1189 
1190 struct regmap *__devm_regmap_init(struct device *dev,
1191 				  const struct regmap_bus *bus,
1192 				  void *bus_context,
1193 				  const struct regmap_config *config,
1194 				  struct lock_class_key *lock_key,
1195 				  const char *lock_name)
1196 {
1197 	struct regmap **ptr, *regmap;
1198 
1199 	ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1200 	if (!ptr)
1201 		return ERR_PTR(-ENOMEM);
1202 
1203 	regmap = __regmap_init(dev, bus, bus_context, config,
1204 			       lock_key, lock_name);
1205 	if (!IS_ERR(regmap)) {
1206 		*ptr = regmap;
1207 		devres_add(dev, ptr);
1208 	} else {
1209 		devres_free(ptr);
1210 	}
1211 
1212 	return regmap;
1213 }
1214 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1215 
1216 static void regmap_field_init(struct regmap_field *rm_field,
1217 	struct regmap *regmap, struct reg_field reg_field)
1218 {
1219 	rm_field->regmap = regmap;
1220 	rm_field->reg = reg_field.reg;
1221 	rm_field->shift = reg_field.lsb;
1222 	rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1223 
1224 	WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
1225 
1226 	rm_field->id_size = reg_field.id_size;
1227 	rm_field->id_offset = reg_field.id_offset;
1228 }
1229 
1230 /**
1231  * devm_regmap_field_alloc() - Allocate and initialise a register field.
1232  *
1233  * @dev: Device that will be interacted with
1234  * @regmap: regmap bank in which this register field is located.
1235  * @reg_field: Register field with in the bank.
1236  *
1237  * The return value will be an ERR_PTR() on error or a valid pointer
1238  * to a struct regmap_field. The regmap_field will be automatically freed
1239  * by the device management code.
1240  */
1241 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1242 		struct regmap *regmap, struct reg_field reg_field)
1243 {
1244 	struct regmap_field *rm_field = devm_kzalloc(dev,
1245 					sizeof(*rm_field), GFP_KERNEL);
1246 	if (!rm_field)
1247 		return ERR_PTR(-ENOMEM);
1248 
1249 	regmap_field_init(rm_field, regmap, reg_field);
1250 
1251 	return rm_field;
1252 
1253 }
1254 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1255 
1256 
1257 /**
1258  * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1259  *
1260  * @regmap: regmap bank in which this register field is located.
1261  * @rm_field: regmap register fields within the bank.
1262  * @reg_field: Register fields within the bank.
1263  * @num_fields: Number of register fields.
1264  *
1265  * The return value will be an -ENOMEM on error or zero for success.
1266  * Newly allocated regmap_fields should be freed by calling
1267  * regmap_field_bulk_free()
1268  */
1269 int regmap_field_bulk_alloc(struct regmap *regmap,
1270 			    struct regmap_field **rm_field,
1271 			    const struct reg_field *reg_field,
1272 			    int num_fields)
1273 {
1274 	struct regmap_field *rf;
1275 	int i;
1276 
1277 	rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1278 	if (!rf)
1279 		return -ENOMEM;
1280 
1281 	for (i = 0; i < num_fields; i++) {
1282 		regmap_field_init(&rf[i], regmap, reg_field[i]);
1283 		rm_field[i] = &rf[i];
1284 	}
1285 
1286 	return 0;
1287 }
1288 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1289 
1290 /**
1291  * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1292  * fields.
1293  *
1294  * @dev: Device that will be interacted with
1295  * @regmap: regmap bank in which this register field is located.
1296  * @rm_field: regmap register fields within the bank.
1297  * @reg_field: Register fields within the bank.
1298  * @num_fields: Number of register fields.
1299  *
1300  * The return value will be an -ENOMEM on error or zero for success.
1301  * Newly allocated regmap_fields will be automatically freed by the
1302  * device management code.
1303  */
1304 int devm_regmap_field_bulk_alloc(struct device *dev,
1305 				 struct regmap *regmap,
1306 				 struct regmap_field **rm_field,
1307 				 const struct reg_field *reg_field,
1308 				 int num_fields)
1309 {
1310 	struct regmap_field *rf;
1311 	int i;
1312 
1313 	rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1314 	if (!rf)
1315 		return -ENOMEM;
1316 
1317 	for (i = 0; i < num_fields; i++) {
1318 		regmap_field_init(&rf[i], regmap, reg_field[i]);
1319 		rm_field[i] = &rf[i];
1320 	}
1321 
1322 	return 0;
1323 }
1324 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1325 
1326 /**
1327  * regmap_field_bulk_free() - Free register field allocated using
1328  *                       regmap_field_bulk_alloc.
1329  *
1330  * @field: regmap fields which should be freed.
1331  */
1332 void regmap_field_bulk_free(struct regmap_field *field)
1333 {
1334 	kfree(field);
1335 }
1336 EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1337 
1338 /**
1339  * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1340  *                            devm_regmap_field_bulk_alloc.
1341  *
1342  * @dev: Device that will be interacted with
1343  * @field: regmap field which should be freed.
1344  *
1345  * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1346  * drivers need not call this function, as the memory allocated via devm
1347  * will be freed as per device-driver life-cycle.
1348  */
1349 void devm_regmap_field_bulk_free(struct device *dev,
1350 				 struct regmap_field *field)
1351 {
1352 	devm_kfree(dev, field);
1353 }
1354 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1355 
1356 /**
1357  * devm_regmap_field_free() - Free a register field allocated using
1358  *                            devm_regmap_field_alloc.
1359  *
1360  * @dev: Device that will be interacted with
1361  * @field: regmap field which should be freed.
1362  *
1363  * Free register field allocated using devm_regmap_field_alloc(). Usually
1364  * drivers need not call this function, as the memory allocated via devm
1365  * will be freed as per device-driver life-cyle.
1366  */
1367 void devm_regmap_field_free(struct device *dev,
1368 	struct regmap_field *field)
1369 {
1370 	devm_kfree(dev, field);
1371 }
1372 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1373 
1374 /**
1375  * regmap_field_alloc() - Allocate and initialise a register field.
1376  *
1377  * @regmap: regmap bank in which this register field is located.
1378  * @reg_field: Register field with in the bank.
1379  *
1380  * The return value will be an ERR_PTR() on error or a valid pointer
1381  * to a struct regmap_field. The regmap_field should be freed by the
1382  * user once its finished working with it using regmap_field_free().
1383  */
1384 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1385 		struct reg_field reg_field)
1386 {
1387 	struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1388 
1389 	if (!rm_field)
1390 		return ERR_PTR(-ENOMEM);
1391 
1392 	regmap_field_init(rm_field, regmap, reg_field);
1393 
1394 	return rm_field;
1395 }
1396 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1397 
1398 /**
1399  * regmap_field_free() - Free register field allocated using
1400  *                       regmap_field_alloc.
1401  *
1402  * @field: regmap field which should be freed.
1403  */
1404 void regmap_field_free(struct regmap_field *field)
1405 {
1406 	kfree(field);
1407 }
1408 EXPORT_SYMBOL_GPL(regmap_field_free);
1409 
1410 /**
1411  * regmap_reinit_cache() - Reinitialise the current register cache
1412  *
1413  * @map: Register map to operate on.
1414  * @config: New configuration.  Only the cache data will be used.
1415  *
1416  * Discard any existing register cache for the map and initialize a
1417  * new cache.  This can be used to restore the cache to defaults or to
1418  * update the cache configuration to reflect runtime discovery of the
1419  * hardware.
1420  *
1421  * No explicit locking is done here, the user needs to ensure that
1422  * this function will not race with other calls to regmap.
1423  */
1424 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1425 {
1426 	int ret;
1427 
1428 	regcache_exit(map);
1429 	regmap_debugfs_exit(map);
1430 
1431 	map->max_register = config->max_register;
1432 	map->max_register_is_set = map->max_register ?: config->max_register_is_0;
1433 	map->writeable_reg = config->writeable_reg;
1434 	map->readable_reg = config->readable_reg;
1435 	map->volatile_reg = config->volatile_reg;
1436 	map->precious_reg = config->precious_reg;
1437 	map->writeable_noinc_reg = config->writeable_noinc_reg;
1438 	map->readable_noinc_reg = config->readable_noinc_reg;
1439 	map->reg_default_cb = config->reg_default_cb;
1440 	map->cache_type = config->cache_type;
1441 
1442 	ret = regmap_set_name(map, config);
1443 	if (ret)
1444 		return ret;
1445 
1446 	regmap_debugfs_init(map);
1447 
1448 	map->cache_bypass = false;
1449 	map->cache_only = false;
1450 
1451 	return regcache_init(map, config);
1452 }
1453 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1454 
1455 /**
1456  * regmap_exit() - Free a previously allocated register map
1457  *
1458  * @map: Register map to operate on.
1459  */
1460 void regmap_exit(struct regmap *map)
1461 {
1462 	struct regmap_async *async;
1463 
1464 	regmap_detach_dev(map->dev, map);
1465 	regcache_exit(map);
1466 
1467 	regmap_debugfs_exit(map);
1468 	regmap_range_exit(map);
1469 	if (map->bus && map->bus->free_context)
1470 		map->bus->free_context(map->bus_context);
1471 	kfree(map->work_buf);
1472 	while (!list_empty(&map->async_free)) {
1473 		async = list_first_entry_or_null(&map->async_free,
1474 						 struct regmap_async,
1475 						 list);
1476 		list_del(&async->list);
1477 		kfree(async->work_buf);
1478 		kfree(async);
1479 	}
1480 	if (map->hwlock)
1481 		hwspin_lock_free(map->hwlock);
1482 	if (map->lock == regmap_lock_mutex)
1483 		mutex_destroy(&map->mutex);
1484 	kfree_const(map->name);
1485 	kfree(map->patch);
1486 	if (map->bus && map->bus->free_on_exit)
1487 		kfree(map->bus);
1488 	kfree(map);
1489 }
1490 EXPORT_SYMBOL_GPL(regmap_exit);
1491 
1492 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1493 {
1494 	struct regmap **r = res;
1495 	if (!r || !*r) {
1496 		WARN_ON(!r || !*r);
1497 		return 0;
1498 	}
1499 
1500 	/* If the user didn't specify a name match any */
1501 	if (data)
1502 		return (*r)->name && !strcmp((*r)->name, data);
1503 	else
1504 		return 1;
1505 }
1506 
1507 /**
1508  * dev_get_regmap() - Obtain the regmap (if any) for a device
1509  *
1510  * @dev: Device to retrieve the map for
1511  * @name: Optional name for the register map, usually NULL.
1512  *
1513  * Returns the regmap for the device if one is present, or NULL.  If
1514  * name is specified then it must match the name specified when
1515  * registering the device, if it is NULL then the first regmap found
1516  * will be used.  Devices with multiple register maps are very rare,
1517  * generic code should normally not need to specify a name.
1518  */
1519 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1520 {
1521 	struct regmap **r = devres_find(dev, dev_get_regmap_release,
1522 					dev_get_regmap_match, (void *)name);
1523 
1524 	if (!r)
1525 		return NULL;
1526 	return *r;
1527 }
1528 EXPORT_SYMBOL_GPL(dev_get_regmap);
1529 
1530 /**
1531  * regmap_get_device() - Obtain the device from a regmap
1532  *
1533  * @map: Register map to operate on.
1534  *
1535  * Returns the underlying device that the regmap has been created for.
1536  */
1537 struct device *regmap_get_device(struct regmap *map)
1538 {
1539 	return map->dev;
1540 }
1541 EXPORT_SYMBOL_GPL(regmap_get_device);
1542 
1543 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1544 			       struct regmap_range_node *range,
1545 			       unsigned int val_num)
1546 {
1547 	void *orig_work_buf;
1548 	unsigned int win_offset;
1549 	unsigned int win_page;
1550 	bool page_chg;
1551 	int ret;
1552 
1553 	win_offset = (*reg - range->range_min) % range->window_len;
1554 	win_page = (*reg - range->range_min) / range->window_len;
1555 
1556 	if (val_num > 1) {
1557 		/* Bulk write shouldn't cross range boundary */
1558 		if (*reg + val_num - 1 > range->range_max)
1559 			return -EINVAL;
1560 
1561 		/* ... or single page boundary */
1562 		if (val_num > range->window_len - win_offset)
1563 			return -EINVAL;
1564 	}
1565 
1566 	/* It is possible to have selector register inside data window.
1567 	   In that case, selector register is located on every page and
1568 	   it needs no page switching, when accessed alone. */
1569 	if (val_num > 1 ||
1570 	    range->window_start + win_offset != range->selector_reg) {
1571 		/* Use separate work_buf during page switching */
1572 		orig_work_buf = map->work_buf;
1573 		map->work_buf = map->selector_work_buf;
1574 
1575 		ret = _regmap_update_bits(map, range->selector_reg,
1576 					  range->selector_mask,
1577 					  win_page << range->selector_shift,
1578 					  &page_chg, false);
1579 
1580 		map->work_buf = orig_work_buf;
1581 
1582 		if (ret != 0)
1583 			return ret;
1584 	}
1585 
1586 	*reg = range->window_start + win_offset;
1587 
1588 	return 0;
1589 }
1590 
1591 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1592 					  unsigned long mask)
1593 {
1594 	u8 *buf;
1595 	int i;
1596 
1597 	if (!mask || !map->work_buf)
1598 		return;
1599 
1600 	buf = map->work_buf;
1601 
1602 	for (i = 0; i < max_bytes; i++)
1603 		buf[i] |= (mask >> (8 * i)) & 0xff;
1604 }
1605 
1606 static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
1607 {
1608 	reg += map->reg_base;
1609 
1610 	if (map->format.reg_shift > 0)
1611 		reg >>= map->format.reg_shift;
1612 	else if (map->format.reg_shift < 0)
1613 		reg <<= -(map->format.reg_shift);
1614 
1615 	return reg;
1616 }
1617 
1618 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1619 				  const void *val, size_t val_len, bool noinc)
1620 {
1621 	struct regmap_range_node *range;
1622 	unsigned long flags;
1623 	void *work_val = map->work_buf + map->format.reg_bytes +
1624 		map->format.pad_bytes;
1625 	void *buf;
1626 	int ret = -ENOTSUPP;
1627 	size_t len;
1628 	int i;
1629 
1630 	/* Check for unwritable or noinc registers in range
1631 	 * before we start
1632 	 */
1633 	if (!regmap_writeable_noinc(map, reg)) {
1634 		for (i = 0; i < val_len / map->format.val_bytes; i++) {
1635 			unsigned int element =
1636 				reg + regmap_get_offset(map, i);
1637 			if (!regmap_writeable(map, element) ||
1638 				regmap_writeable_noinc(map, element))
1639 				return -EINVAL;
1640 		}
1641 	}
1642 
1643 	if (!map->cache_bypass && map->format.parse_val) {
1644 		unsigned int ival, offset;
1645 		int val_bytes = map->format.val_bytes;
1646 
1647 		/* Cache the last written value for noinc writes */
1648 		i = noinc ? val_len - val_bytes : 0;
1649 		for (; i < val_len; i += val_bytes) {
1650 			ival = map->format.parse_val(val + i);
1651 			offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
1652 			ret = regcache_write(map, reg + offset, ival);
1653 			if (ret) {
1654 				dev_err(map->dev,
1655 					"Error in caching of register: %x ret: %d\n",
1656 					reg + offset, ret);
1657 				return ret;
1658 			}
1659 		}
1660 		if (map->cache_only) {
1661 			map->cache_dirty = true;
1662 			return 0;
1663 		}
1664 	}
1665 
1666 	range = _regmap_range_lookup(map, reg);
1667 	if (range) {
1668 		int val_num = val_len / map->format.val_bytes;
1669 		int win_offset = (reg - range->range_min) % range->window_len;
1670 		int win_residue = range->window_len - win_offset;
1671 
1672 		/* If the write goes beyond the end of the window split it */
1673 		while (val_num > win_residue) {
1674 			dev_dbg(map->dev, "Writing window %d/%zu\n",
1675 				win_residue, val_len / map->format.val_bytes);
1676 			ret = _regmap_raw_write_impl(map, reg, val,
1677 						     win_residue *
1678 						     map->format.val_bytes, noinc);
1679 			if (ret != 0)
1680 				return ret;
1681 
1682 			reg += win_residue;
1683 			val_num -= win_residue;
1684 			val += win_residue * map->format.val_bytes;
1685 			val_len -= win_residue * map->format.val_bytes;
1686 
1687 			win_offset = (reg - range->range_min) %
1688 				range->window_len;
1689 			win_residue = range->window_len - win_offset;
1690 		}
1691 
1692 		ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1693 		if (ret != 0)
1694 			return ret;
1695 	}
1696 
1697 	reg = regmap_reg_addr(map, reg);
1698 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
1699 	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1700 				      map->write_flag_mask);
1701 
1702 	/*
1703 	 * Essentially all I/O mechanisms will be faster with a single
1704 	 * buffer to write.  Since register syncs often generate raw
1705 	 * writes of single registers optimise that case.
1706 	 */
1707 	if (val != work_val && val_len == map->format.val_bytes) {
1708 		memcpy(work_val, val, map->format.val_bytes);
1709 		val = work_val;
1710 	}
1711 
1712 	if (map->async && map->bus && map->bus->async_write) {
1713 		struct regmap_async *async;
1714 
1715 		trace_regmap_async_write_start(map, reg, val_len);
1716 
1717 		spin_lock_irqsave(&map->async_lock, flags);
1718 		async = list_first_entry_or_null(&map->async_free,
1719 						 struct regmap_async,
1720 						 list);
1721 		if (async)
1722 			list_del(&async->list);
1723 		spin_unlock_irqrestore(&map->async_lock, flags);
1724 
1725 		if (!async) {
1726 			async = map->bus->async_alloc();
1727 			if (!async)
1728 				return -ENOMEM;
1729 
1730 			async->work_buf = kzalloc(map->format.buf_size,
1731 						  GFP_KERNEL | GFP_DMA);
1732 			if (!async->work_buf) {
1733 				kfree(async);
1734 				return -ENOMEM;
1735 			}
1736 		}
1737 
1738 		async->map = map;
1739 
1740 		/* If the caller supplied the value we can use it safely. */
1741 		memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1742 		       map->format.reg_bytes + map->format.val_bytes);
1743 
1744 		spin_lock_irqsave(&map->async_lock, flags);
1745 		list_add_tail(&async->list, &map->async_list);
1746 		spin_unlock_irqrestore(&map->async_lock, flags);
1747 
1748 		if (val != work_val)
1749 			ret = map->bus->async_write(map->bus_context,
1750 						    async->work_buf,
1751 						    map->format.reg_bytes +
1752 						    map->format.pad_bytes,
1753 						    val, val_len, async);
1754 		else
1755 			ret = map->bus->async_write(map->bus_context,
1756 						    async->work_buf,
1757 						    map->format.reg_bytes +
1758 						    map->format.pad_bytes +
1759 						    val_len, NULL, 0, async);
1760 
1761 		if (ret != 0) {
1762 			dev_err(map->dev, "Failed to schedule write: %d\n",
1763 				ret);
1764 
1765 			spin_lock_irqsave(&map->async_lock, flags);
1766 			list_move(&async->list, &map->async_free);
1767 			spin_unlock_irqrestore(&map->async_lock, flags);
1768 		}
1769 
1770 		return ret;
1771 	}
1772 
1773 	trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1774 
1775 	/* If we're doing a single register write we can probably just
1776 	 * send the work_buf directly, otherwise try to do a gather
1777 	 * write.
1778 	 */
1779 	if (val == work_val)
1780 		ret = map->write(map->bus_context, map->work_buf,
1781 				 map->format.reg_bytes +
1782 				 map->format.pad_bytes +
1783 				 val_len);
1784 	else if (map->bus && map->bus->gather_write)
1785 		ret = map->bus->gather_write(map->bus_context, map->work_buf,
1786 					     map->format.reg_bytes +
1787 					     map->format.pad_bytes,
1788 					     val, val_len);
1789 	else
1790 		ret = -ENOTSUPP;
1791 
1792 	/* If that didn't work fall back on linearising by hand. */
1793 	if (ret == -ENOTSUPP) {
1794 		len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1795 		buf = kzalloc(len, GFP_KERNEL);
1796 		if (!buf)
1797 			return -ENOMEM;
1798 
1799 		memcpy(buf, map->work_buf, map->format.reg_bytes);
1800 		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1801 		       val, val_len);
1802 		ret = map->write(map->bus_context, buf, len);
1803 
1804 		kfree(buf);
1805 	} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1806 		/* regcache_drop_region() takes lock that we already have,
1807 		 * thus call map->cache_ops->drop() directly
1808 		 */
1809 		if (map->cache_ops && map->cache_ops->drop)
1810 			map->cache_ops->drop(map, reg, reg + 1);
1811 	}
1812 
1813 	trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1814 
1815 	return ret;
1816 }
1817 
1818 /**
1819  * regmap_can_raw_write - Test if regmap_raw_write() is supported
1820  *
1821  * @map: Map to check.
1822  */
1823 bool regmap_can_raw_write(struct regmap *map)
1824 {
1825 	return map->write && map->format.format_val && map->format.format_reg;
1826 }
1827 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1828 
1829 /**
1830  * regmap_get_raw_read_max - Get the maximum size we can read
1831  *
1832  * @map: Map to check.
1833  */
1834 size_t regmap_get_raw_read_max(struct regmap *map)
1835 {
1836 	return map->max_raw_read;
1837 }
1838 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1839 
1840 /**
1841  * regmap_get_raw_write_max - Get the maximum size we can read
1842  *
1843  * @map: Map to check.
1844  */
1845 size_t regmap_get_raw_write_max(struct regmap *map)
1846 {
1847 	return map->max_raw_write;
1848 }
1849 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1850 
1851 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1852 				       unsigned int val)
1853 {
1854 	int ret;
1855 	struct regmap_range_node *range;
1856 	struct regmap *map = context;
1857 
1858 	WARN_ON(!map->format.format_write);
1859 
1860 	range = _regmap_range_lookup(map, reg);
1861 	if (range) {
1862 		ret = _regmap_select_page(map, &reg, range, 1);
1863 		if (ret != 0)
1864 			return ret;
1865 	}
1866 
1867 	reg = regmap_reg_addr(map, reg);
1868 	map->format.format_write(map, reg, val);
1869 
1870 	trace_regmap_hw_write_start(map, reg, 1);
1871 
1872 	ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
1873 
1874 	trace_regmap_hw_write_done(map, reg, 1);
1875 
1876 	return ret;
1877 }
1878 
1879 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1880 				 unsigned int val)
1881 {
1882 	struct regmap *map = context;
1883 	struct regmap_range_node *range;
1884 	int ret;
1885 
1886 	range = _regmap_range_lookup(map, reg);
1887 	if (range) {
1888 		ret = _regmap_select_page(map, &reg, range, 1);
1889 		if (ret != 0)
1890 			return ret;
1891 	}
1892 
1893 	reg = regmap_reg_addr(map, reg);
1894 	return map->bus->reg_write(map->bus_context, reg, val);
1895 }
1896 
1897 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1898 				 unsigned int val)
1899 {
1900 	struct regmap *map = context;
1901 
1902 	WARN_ON(!map->format.format_val);
1903 
1904 	map->format.format_val(map->work_buf + map->format.reg_bytes
1905 			       + map->format.pad_bytes, val, 0);
1906 	return _regmap_raw_write_impl(map, reg,
1907 				      map->work_buf +
1908 				      map->format.reg_bytes +
1909 				      map->format.pad_bytes,
1910 				      map->format.val_bytes,
1911 				      false);
1912 }
1913 
1914 static inline void *_regmap_map_get_context(struct regmap *map)
1915 {
1916 	return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
1917 }
1918 
1919 int _regmap_write(struct regmap *map, unsigned int reg,
1920 		  unsigned int val)
1921 {
1922 	int ret;
1923 	void *context = _regmap_map_get_context(map);
1924 
1925 	if (!regmap_writeable(map, reg))
1926 		return -EIO;
1927 
1928 	if (!map->cache_bypass && !map->defer_caching) {
1929 		ret = regcache_write(map, reg, val);
1930 		if (ret != 0)
1931 			return ret;
1932 		if (map->cache_only) {
1933 			map->cache_dirty = true;
1934 			return 0;
1935 		}
1936 	}
1937 
1938 	ret = map->reg_write(context, reg, val);
1939 	if (ret == 0) {
1940 		if (regmap_should_log(map))
1941 			dev_info(map->dev, "%x <= %x\n", reg, val);
1942 
1943 		trace_regmap_reg_write(map, reg, val);
1944 	}
1945 
1946 	return ret;
1947 }
1948 
1949 /**
1950  * regmap_write() - Write a value to a single register
1951  *
1952  * @map: Register map to write to
1953  * @reg: Register to write to
1954  * @val: Value to be written
1955  *
1956  * A value of zero will be returned on success, a negative errno will
1957  * be returned in error cases.
1958  */
1959 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1960 {
1961 	int ret;
1962 
1963 	if (!IS_ALIGNED(reg, map->reg_stride))
1964 		return -EINVAL;
1965 
1966 	map->lock(map->lock_arg);
1967 
1968 	ret = _regmap_write(map, reg, val);
1969 
1970 	map->unlock(map->lock_arg);
1971 
1972 	return ret;
1973 }
1974 EXPORT_SYMBOL_GPL(regmap_write);
1975 
1976 /**
1977  * regmap_write_async() - Write a value to a single register asynchronously
1978  *
1979  * @map: Register map to write to
1980  * @reg: Register to write to
1981  * @val: Value to be written
1982  *
1983  * A value of zero will be returned on success, a negative errno will
1984  * be returned in error cases.
1985  */
1986 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1987 {
1988 	int ret;
1989 
1990 	if (!IS_ALIGNED(reg, map->reg_stride))
1991 		return -EINVAL;
1992 
1993 	map->lock(map->lock_arg);
1994 
1995 	map->async = true;
1996 
1997 	ret = _regmap_write(map, reg, val);
1998 
1999 	map->async = false;
2000 
2001 	map->unlock(map->lock_arg);
2002 
2003 	return ret;
2004 }
2005 EXPORT_SYMBOL_GPL(regmap_write_async);
2006 
2007 int _regmap_raw_write(struct regmap *map, unsigned int reg,
2008 		      const void *val, size_t val_len, bool noinc)
2009 {
2010 	size_t val_bytes = map->format.val_bytes;
2011 	size_t val_count = val_len / val_bytes;
2012 	size_t chunk_count, chunk_bytes;
2013 	size_t chunk_regs = val_count;
2014 	int ret, i;
2015 
2016 	if (!val_count)
2017 		return -EINVAL;
2018 
2019 	if (map->use_single_write)
2020 		chunk_regs = 1;
2021 	else if (map->max_raw_write && val_len > map->max_raw_write)
2022 		chunk_regs = map->max_raw_write / val_bytes;
2023 
2024 	chunk_count = val_count / chunk_regs;
2025 	chunk_bytes = chunk_regs * val_bytes;
2026 
2027 	/* Write as many bytes as possible with chunk_size */
2028 	for (i = 0; i < chunk_count; i++) {
2029 		ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2030 		if (ret)
2031 			return ret;
2032 
2033 		reg += regmap_get_offset(map, chunk_regs);
2034 		val += chunk_bytes;
2035 		val_len -= chunk_bytes;
2036 	}
2037 
2038 	/* Write remaining bytes */
2039 	if (val_len)
2040 		ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2041 
2042 	return ret;
2043 }
2044 
2045 /**
2046  * regmap_raw_write() - Write raw values to one or more registers
2047  *
2048  * @map: Register map to write to
2049  * @reg: Initial register to write to
2050  * @val: Block of data to be written, laid out for direct transmission to the
2051  *       device
2052  * @val_len: Length of data pointed to by val.
2053  *
2054  * This function is intended to be used for things like firmware
2055  * download where a large block of data needs to be transferred to the
2056  * device.  No formatting will be done on the data provided.
2057  *
2058  * A value of zero will be returned on success, a negative errno will
2059  * be returned in error cases.
2060  */
2061 int regmap_raw_write(struct regmap *map, unsigned int reg,
2062 		     const void *val, size_t val_len)
2063 {
2064 	int ret;
2065 
2066 	if (!regmap_can_raw_write(map))
2067 		return -EINVAL;
2068 	if (val_len % map->format.val_bytes)
2069 		return -EINVAL;
2070 
2071 	map->lock(map->lock_arg);
2072 
2073 	ret = _regmap_raw_write(map, reg, val, val_len, false);
2074 
2075 	map->unlock(map->lock_arg);
2076 
2077 	return ret;
2078 }
2079 EXPORT_SYMBOL_GPL(regmap_raw_write);
2080 
2081 static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
2082 				  void *val, unsigned int val_len, bool write)
2083 {
2084 	size_t val_bytes = map->format.val_bytes;
2085 	size_t val_count = val_len / val_bytes;
2086 	unsigned int lastval;
2087 	u8 *u8p;
2088 	u16 *u16p;
2089 	u32 *u32p;
2090 	int ret;
2091 	int i;
2092 
2093 	switch (val_bytes) {
2094 	case 1:
2095 		u8p = val;
2096 		if (write)
2097 			lastval = (unsigned int)u8p[val_count - 1];
2098 		break;
2099 	case 2:
2100 		u16p = val;
2101 		if (write)
2102 			lastval = (unsigned int)u16p[val_count - 1];
2103 		break;
2104 	case 4:
2105 		u32p = val;
2106 		if (write)
2107 			lastval = (unsigned int)u32p[val_count - 1];
2108 		break;
2109 	default:
2110 		return -EINVAL;
2111 	}
2112 
2113 	/*
2114 	 * Update the cache with the last value we write, the rest is just
2115 	 * gone down in the hardware FIFO. We can't cache FIFOs. This makes
2116 	 * sure a single read from the cache will work.
2117 	 */
2118 	if (write) {
2119 		if (!map->cache_bypass && !map->defer_caching) {
2120 			ret = regcache_write(map, reg, lastval);
2121 			if (ret != 0)
2122 				return ret;
2123 			if (map->cache_only) {
2124 				map->cache_dirty = true;
2125 				return 0;
2126 			}
2127 		}
2128 		ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
2129 	} else {
2130 		ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
2131 	}
2132 
2133 	if (!ret && regmap_should_log(map)) {
2134 		dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
2135 		for (i = 0; i < val_count; i++) {
2136 			switch (val_bytes) {
2137 			case 1:
2138 				pr_cont("%x", u8p[i]);
2139 				break;
2140 			case 2:
2141 				pr_cont("%x", u16p[i]);
2142 				break;
2143 			case 4:
2144 				pr_cont("%x", u32p[i]);
2145 				break;
2146 			default:
2147 				break;
2148 			}
2149 			if (i == (val_count - 1))
2150 				pr_cont("]\n");
2151 			else
2152 				pr_cont(",");
2153 		}
2154 	}
2155 
2156 	return 0;
2157 }
2158 
2159 /**
2160  * regmap_noinc_write(): Write data to a register without incrementing the
2161  *			register number
2162  *
2163  * @map: Register map to write to
2164  * @reg: Register to write to
2165  * @val: Pointer to data buffer
2166  * @val_len: Length of output buffer in bytes.
2167  *
2168  * The regmap API usually assumes that bulk bus write operations will write a
2169  * range of registers. Some devices have certain registers for which a write
2170  * operation can write to an internal FIFO.
2171  *
2172  * The target register must be volatile but registers after it can be
2173  * completely unrelated cacheable registers.
2174  *
2175  * This will attempt multiple writes as required to write val_len bytes.
2176  *
2177  * A value of zero will be returned on success, a negative errno will be
2178  * returned in error cases.
2179  */
2180 int regmap_noinc_write(struct regmap *map, unsigned int reg,
2181 		      const void *val, size_t val_len)
2182 {
2183 	size_t write_len;
2184 	int ret;
2185 
2186 	if (!map->write && !(map->bus && map->bus->reg_noinc_write))
2187 		return -EINVAL;
2188 	if (val_len % map->format.val_bytes)
2189 		return -EINVAL;
2190 	if (!IS_ALIGNED(reg, map->reg_stride))
2191 		return -EINVAL;
2192 	if (val_len == 0)
2193 		return -EINVAL;
2194 
2195 	map->lock(map->lock_arg);
2196 
2197 	if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2198 		ret = -EINVAL;
2199 		goto out_unlock;
2200 	}
2201 
2202 	/*
2203 	 * Use the accelerated operation if we can. The val drops the const
2204 	 * typing in order to facilitate code reuse in regmap_noinc_readwrite().
2205 	 */
2206 	if (map->bus->reg_noinc_write) {
2207 		ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
2208 		goto out_unlock;
2209 	}
2210 
2211 	while (val_len) {
2212 		if (map->max_raw_write && map->max_raw_write < val_len)
2213 			write_len = map->max_raw_write;
2214 		else
2215 			write_len = val_len;
2216 		ret = _regmap_raw_write(map, reg, val, write_len, true);
2217 		if (ret)
2218 			goto out_unlock;
2219 		val = ((u8 *)val) + write_len;
2220 		val_len -= write_len;
2221 	}
2222 
2223 out_unlock:
2224 	map->unlock(map->lock_arg);
2225 	return ret;
2226 }
2227 EXPORT_SYMBOL_GPL(regmap_noinc_write);
2228 
2229 /**
2230  * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2231  *                                   register field.
2232  *
2233  * @field: Register field to write to
2234  * @mask: Bitmask to change
2235  * @val: Value to be written
2236  * @change: Boolean indicating if a write was done
2237  * @async: Boolean indicating asynchronously
2238  * @force: Boolean indicating use force update
2239  *
2240  * Perform a read/modify/write cycle on the register field with change,
2241  * async, force option.
2242  *
2243  * A value of zero will be returned on success, a negative errno will
2244  * be returned in error cases.
2245  */
2246 int regmap_field_update_bits_base(struct regmap_field *field,
2247 				  unsigned int mask, unsigned int val,
2248 				  bool *change, bool async, bool force)
2249 {
2250 	mask = (mask << field->shift) & field->mask;
2251 
2252 	return regmap_update_bits_base(field->regmap, field->reg,
2253 				       mask, val << field->shift,
2254 				       change, async, force);
2255 }
2256 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2257 
2258 /**
2259  * regmap_field_test_bits() - Check if all specified bits are set in a
2260  *                            register field.
2261  *
2262  * @field: Register field to operate on
2263  * @bits: Bits to test
2264  *
2265  * Returns negative errno if the underlying regmap_field_read() fails,
2266  * 0 if at least one of the tested bits is not set and 1 if all tested
2267  * bits are set.
2268  */
2269 int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
2270 {
2271 	unsigned int val;
2272 	int ret;
2273 
2274 	ret = regmap_field_read(field, &val);
2275 	if (ret)
2276 		return ret;
2277 
2278 	return (val & bits) == bits;
2279 }
2280 EXPORT_SYMBOL_GPL(regmap_field_test_bits);
2281 
2282 /**
2283  * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2284  *                                    register field with port ID
2285  *
2286  * @field: Register field to write to
2287  * @id: port ID
2288  * @mask: Bitmask to change
2289  * @val: Value to be written
2290  * @change: Boolean indicating if a write was done
2291  * @async: Boolean indicating asynchronously
2292  * @force: Boolean indicating use force update
2293  *
2294  * A value of zero will be returned on success, a negative errno will
2295  * be returned in error cases.
2296  */
2297 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2298 				   unsigned int mask, unsigned int val,
2299 				   bool *change, bool async, bool force)
2300 {
2301 	if (id >= field->id_size)
2302 		return -EINVAL;
2303 
2304 	mask = (mask << field->shift) & field->mask;
2305 
2306 	return regmap_update_bits_base(field->regmap,
2307 				       field->reg + (field->id_offset * id),
2308 				       mask, val << field->shift,
2309 				       change, async, force);
2310 }
2311 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2312 
2313 /**
2314  * regmap_bulk_write() - Write multiple registers to the device
2315  *
2316  * @map: Register map to write to
2317  * @reg: First register to be write from
2318  * @val: Block of data to be written, in native register size for device
2319  * @val_count: Number of registers to write
2320  *
2321  * This function is intended to be used for writing a large block of
2322  * data to the device either in single transfer or multiple transfer.
2323  *
2324  * A value of zero will be returned on success, a negative errno will
2325  * be returned in error cases.
2326  */
2327 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2328 		     size_t val_count)
2329 {
2330 	int ret = 0, i;
2331 	size_t val_bytes = map->format.val_bytes;
2332 
2333 	if (!IS_ALIGNED(reg, map->reg_stride))
2334 		return -EINVAL;
2335 
2336 	/*
2337 	 * Some devices don't support bulk write, for them we have a series of
2338 	 * single write operations.
2339 	 */
2340 	if (!map->write || !map->format.parse_inplace) {
2341 		map->lock(map->lock_arg);
2342 		for (i = 0; i < val_count; i++) {
2343 			unsigned int ival;
2344 
2345 			switch (val_bytes) {
2346 			case 1:
2347 				ival = *(u8 *)(val + (i * val_bytes));
2348 				break;
2349 			case 2:
2350 				ival = *(u16 *)(val + (i * val_bytes));
2351 				break;
2352 			case 4:
2353 				ival = *(u32 *)(val + (i * val_bytes));
2354 				break;
2355 			default:
2356 				ret = -EINVAL;
2357 				goto out;
2358 			}
2359 
2360 			ret = _regmap_write(map,
2361 					    reg + regmap_get_offset(map, i),
2362 					    ival);
2363 			if (ret != 0)
2364 				goto out;
2365 		}
2366 out:
2367 		map->unlock(map->lock_arg);
2368 	} else {
2369 		void *wval;
2370 
2371 		wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
2372 		if (!wval)
2373 			return -ENOMEM;
2374 
2375 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
2376 			map->format.parse_inplace(wval + i);
2377 
2378 		ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2379 
2380 		kfree(wval);
2381 	}
2382 
2383 	if (!ret)
2384 		trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
2385 
2386 	return ret;
2387 }
2388 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2389 
2390 /*
2391  * _regmap_raw_multi_reg_write()
2392  *
2393  * the (register,newvalue) pairs in regs have not been formatted, but
2394  * they are all in the same page and have been changed to being page
2395  * relative. The page register has been written if that was necessary.
2396  */
2397 static int _regmap_raw_multi_reg_write(struct regmap *map,
2398 				       const struct reg_sequence *regs,
2399 				       size_t num_regs)
2400 {
2401 	int ret;
2402 	void *buf;
2403 	int i;
2404 	u8 *u8;
2405 	size_t val_bytes = map->format.val_bytes;
2406 	size_t reg_bytes = map->format.reg_bytes;
2407 	size_t pad_bytes = map->format.pad_bytes;
2408 	size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2409 	size_t len = pair_size * num_regs;
2410 
2411 	if (!len)
2412 		return -EINVAL;
2413 
2414 	buf = kzalloc(len, GFP_KERNEL);
2415 	if (!buf)
2416 		return -ENOMEM;
2417 
2418 	/* We have to linearise by hand. */
2419 
2420 	u8 = buf;
2421 
2422 	for (i = 0; i < num_regs; i++) {
2423 		unsigned int reg = regs[i].reg;
2424 		unsigned int val = regs[i].def;
2425 		trace_regmap_hw_write_start(map, reg, 1);
2426 		reg = regmap_reg_addr(map, reg);
2427 		map->format.format_reg(u8, reg, map->reg_shift);
2428 		u8 += reg_bytes + pad_bytes;
2429 		map->format.format_val(u8, val, 0);
2430 		u8 += val_bytes;
2431 	}
2432 	u8 = buf;
2433 	*u8 |= map->write_flag_mask;
2434 
2435 	ret = map->write(map->bus_context, buf, len);
2436 
2437 	kfree(buf);
2438 
2439 	for (i = 0; i < num_regs; i++) {
2440 		int reg = regs[i].reg;
2441 		trace_regmap_hw_write_done(map, reg, 1);
2442 	}
2443 	return ret;
2444 }
2445 
2446 static unsigned int _regmap_register_page(struct regmap *map,
2447 					  unsigned int reg,
2448 					  struct regmap_range_node *range)
2449 {
2450 	unsigned int win_page = (reg - range->range_min) / range->window_len;
2451 
2452 	return win_page;
2453 }
2454 
2455 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2456 					       struct reg_sequence *regs,
2457 					       size_t num_regs)
2458 {
2459 	int ret;
2460 	int i, n;
2461 	struct reg_sequence *base;
2462 	unsigned int this_page = 0;
2463 	unsigned int page_change = 0;
2464 	/*
2465 	 * the set of registers are not neccessarily in order, but
2466 	 * since the order of write must be preserved this algorithm
2467 	 * chops the set each time the page changes. This also applies
2468 	 * if there is a delay required at any point in the sequence.
2469 	 */
2470 	base = regs;
2471 	for (i = 0, n = 0; i < num_regs; i++, n++) {
2472 		unsigned int reg = regs[i].reg;
2473 		struct regmap_range_node *range;
2474 
2475 		range = _regmap_range_lookup(map, reg);
2476 		if (range) {
2477 			unsigned int win_page = _regmap_register_page(map, reg,
2478 								      range);
2479 
2480 			if (i == 0)
2481 				this_page = win_page;
2482 			if (win_page != this_page) {
2483 				this_page = win_page;
2484 				page_change = 1;
2485 			}
2486 		}
2487 
2488 		/* If we have both a page change and a delay make sure to
2489 		 * write the regs and apply the delay before we change the
2490 		 * page.
2491 		 */
2492 
2493 		if (page_change || regs[i].delay_us) {
2494 
2495 				/* For situations where the first write requires
2496 				 * a delay we need to make sure we don't call
2497 				 * raw_multi_reg_write with n=0
2498 				 * This can't occur with page breaks as we
2499 				 * never write on the first iteration
2500 				 */
2501 				if (regs[i].delay_us && i == 0)
2502 					n = 1;
2503 
2504 				ret = _regmap_raw_multi_reg_write(map, base, n);
2505 				if (ret != 0)
2506 					return ret;
2507 
2508 				if (regs[i].delay_us) {
2509 					if (map->can_sleep)
2510 						fsleep(regs[i].delay_us);
2511 					else
2512 						udelay(regs[i].delay_us);
2513 				}
2514 
2515 				base += n;
2516 				n = 0;
2517 
2518 				if (page_change) {
2519 					ret = _regmap_select_page(map,
2520 								  &base[n].reg,
2521 								  range, 1);
2522 					if (ret != 0)
2523 						return ret;
2524 
2525 					page_change = 0;
2526 				}
2527 
2528 		}
2529 
2530 	}
2531 	if (n > 0)
2532 		return _regmap_raw_multi_reg_write(map, base, n);
2533 	return 0;
2534 }
2535 
2536 static int _regmap_multi_reg_write(struct regmap *map,
2537 				   const struct reg_sequence *regs,
2538 				   size_t num_regs)
2539 {
2540 	int i;
2541 	int ret;
2542 
2543 	if (!map->can_multi_write) {
2544 		for (i = 0; i < num_regs; i++) {
2545 			ret = _regmap_write(map, regs[i].reg, regs[i].def);
2546 			if (ret != 0)
2547 				return ret;
2548 
2549 			if (regs[i].delay_us) {
2550 				if (map->can_sleep)
2551 					fsleep(regs[i].delay_us);
2552 				else
2553 					udelay(regs[i].delay_us);
2554 			}
2555 		}
2556 		return 0;
2557 	}
2558 
2559 	if (!map->format.parse_inplace)
2560 		return -EINVAL;
2561 
2562 	if (map->writeable_reg)
2563 		for (i = 0; i < num_regs; i++) {
2564 			int reg = regs[i].reg;
2565 			if (!map->writeable_reg(map->dev, reg))
2566 				return -EINVAL;
2567 			if (!IS_ALIGNED(reg, map->reg_stride))
2568 				return -EINVAL;
2569 		}
2570 
2571 	if (!map->cache_bypass) {
2572 		for (i = 0; i < num_regs; i++) {
2573 			unsigned int val = regs[i].def;
2574 			unsigned int reg = regs[i].reg;
2575 			ret = regcache_write(map, reg, val);
2576 			if (ret) {
2577 				dev_err(map->dev,
2578 				"Error in caching of register: %x ret: %d\n",
2579 								reg, ret);
2580 				return ret;
2581 			}
2582 		}
2583 		if (map->cache_only) {
2584 			map->cache_dirty = true;
2585 			return 0;
2586 		}
2587 	}
2588 
2589 	WARN_ON(!map->bus);
2590 
2591 	for (i = 0; i < num_regs; i++) {
2592 		unsigned int reg = regs[i].reg;
2593 		struct regmap_range_node *range;
2594 
2595 		/* Coalesce all the writes between a page break or a delay
2596 		 * in a sequence
2597 		 */
2598 		range = _regmap_range_lookup(map, reg);
2599 		if (range || regs[i].delay_us) {
2600 			size_t len = sizeof(struct reg_sequence)*num_regs;
2601 			struct reg_sequence *base = kmemdup(regs, len,
2602 							   GFP_KERNEL);
2603 			if (!base)
2604 				return -ENOMEM;
2605 			ret = _regmap_range_multi_paged_reg_write(map, base,
2606 								  num_regs);
2607 			kfree(base);
2608 
2609 			return ret;
2610 		}
2611 	}
2612 	return _regmap_raw_multi_reg_write(map, regs, num_regs);
2613 }
2614 
2615 /**
2616  * regmap_multi_reg_write() - Write multiple registers to the device
2617  *
2618  * @map: Register map to write to
2619  * @regs: Array of structures containing register,value to be written
2620  * @num_regs: Number of registers to write
2621  *
2622  * Write multiple registers to the device where the set of register, value
2623  * pairs are supplied in any order, possibly not all in a single range.
2624  *
2625  * The 'normal' block write mode will send ultimately send data on the
2626  * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2627  * addressed. However, this alternative block multi write mode will send
2628  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2629  * must of course support the mode.
2630  *
2631  * A value of zero will be returned on success, a negative errno will be
2632  * returned in error cases.
2633  */
2634 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2635 			   int num_regs)
2636 {
2637 	int ret;
2638 
2639 	map->lock(map->lock_arg);
2640 
2641 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2642 
2643 	map->unlock(map->lock_arg);
2644 
2645 	return ret;
2646 }
2647 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2648 
2649 /**
2650  * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2651  *                                     device but not the cache
2652  *
2653  * @map: Register map to write to
2654  * @regs: Array of structures containing register,value to be written
2655  * @num_regs: Number of registers to write
2656  *
2657  * Write multiple registers to the device but not the cache where the set
2658  * of register are supplied in any order.
2659  *
2660  * This function is intended to be used for writing a large block of data
2661  * atomically to the device in single transfer for those I2C client devices
2662  * that implement this alternative block write mode.
2663  *
2664  * A value of zero will be returned on success, a negative errno will
2665  * be returned in error cases.
2666  */
2667 int regmap_multi_reg_write_bypassed(struct regmap *map,
2668 				    const struct reg_sequence *regs,
2669 				    int num_regs)
2670 {
2671 	int ret;
2672 	bool bypass;
2673 
2674 	map->lock(map->lock_arg);
2675 
2676 	bypass = map->cache_bypass;
2677 	map->cache_bypass = true;
2678 
2679 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2680 
2681 	map->cache_bypass = bypass;
2682 
2683 	map->unlock(map->lock_arg);
2684 
2685 	return ret;
2686 }
2687 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2688 
2689 /**
2690  * regmap_raw_write_async() - Write raw values to one or more registers
2691  *                            asynchronously
2692  *
2693  * @map: Register map to write to
2694  * @reg: Initial register to write to
2695  * @val: Block of data to be written, laid out for direct transmission to the
2696  *       device.  Must be valid until regmap_async_complete() is called.
2697  * @val_len: Length of data pointed to by val.
2698  *
2699  * This function is intended to be used for things like firmware
2700  * download where a large block of data needs to be transferred to the
2701  * device.  No formatting will be done on the data provided.
2702  *
2703  * If supported by the underlying bus the write will be scheduled
2704  * asynchronously, helping maximise I/O speed on higher speed buses
2705  * like SPI.  regmap_async_complete() can be called to ensure that all
2706  * asynchrnous writes have been completed.
2707  *
2708  * A value of zero will be returned on success, a negative errno will
2709  * be returned in error cases.
2710  */
2711 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2712 			   const void *val, size_t val_len)
2713 {
2714 	int ret;
2715 
2716 	if (val_len % map->format.val_bytes)
2717 		return -EINVAL;
2718 	if (!IS_ALIGNED(reg, map->reg_stride))
2719 		return -EINVAL;
2720 
2721 	map->lock(map->lock_arg);
2722 
2723 	map->async = true;
2724 
2725 	ret = _regmap_raw_write(map, reg, val, val_len, false);
2726 
2727 	map->async = false;
2728 
2729 	map->unlock(map->lock_arg);
2730 
2731 	return ret;
2732 }
2733 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2734 
2735 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2736 			    unsigned int val_len, bool noinc)
2737 {
2738 	struct regmap_range_node *range;
2739 	int ret;
2740 
2741 	if (!map->read)
2742 		return -EINVAL;
2743 
2744 	range = _regmap_range_lookup(map, reg);
2745 	if (range) {
2746 		ret = _regmap_select_page(map, &reg, range,
2747 					  noinc ? 1 : val_len / map->format.val_bytes);
2748 		if (ret != 0)
2749 			return ret;
2750 	}
2751 
2752 	reg = regmap_reg_addr(map, reg);
2753 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
2754 	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2755 				      map->read_flag_mask);
2756 	trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2757 
2758 	ret = map->read(map->bus_context, map->work_buf,
2759 			map->format.reg_bytes + map->format.pad_bytes,
2760 			val, val_len);
2761 
2762 	trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2763 
2764 	return ret;
2765 }
2766 
2767 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2768 				unsigned int *val)
2769 {
2770 	struct regmap *map = context;
2771 	struct regmap_range_node *range;
2772 	int ret;
2773 
2774 	range = _regmap_range_lookup(map, reg);
2775 	if (range) {
2776 		ret = _regmap_select_page(map, &reg, range, 1);
2777 		if (ret != 0)
2778 			return ret;
2779 	}
2780 
2781 	reg = regmap_reg_addr(map, reg);
2782 	return map->bus->reg_read(map->bus_context, reg, val);
2783 }
2784 
2785 static int _regmap_bus_read(void *context, unsigned int reg,
2786 			    unsigned int *val)
2787 {
2788 	int ret;
2789 	struct regmap *map = context;
2790 	void *work_val = map->work_buf + map->format.reg_bytes +
2791 		map->format.pad_bytes;
2792 
2793 	if (!map->format.parse_val)
2794 		return -EINVAL;
2795 
2796 	ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2797 	if (ret == 0)
2798 		*val = map->format.parse_val(work_val);
2799 
2800 	return ret;
2801 }
2802 
2803 static int _regmap_read(struct regmap *map, unsigned int reg,
2804 			unsigned int *val)
2805 {
2806 	int ret;
2807 	void *context = _regmap_map_get_context(map);
2808 
2809 	if (!map->cache_bypass) {
2810 		ret = regcache_read(map, reg, val);
2811 		if (ret == 0)
2812 			return 0;
2813 	}
2814 
2815 	if (map->cache_only)
2816 		return -EBUSY;
2817 
2818 	if (!regmap_readable(map, reg))
2819 		return -EIO;
2820 
2821 	ret = map->reg_read(context, reg, val);
2822 	if (ret == 0) {
2823 		if (regmap_should_log(map))
2824 			dev_info(map->dev, "%x => %x\n", reg, *val);
2825 
2826 		trace_regmap_reg_read(map, reg, *val);
2827 
2828 		if (!map->cache_bypass)
2829 			regcache_write(map, reg, *val);
2830 	}
2831 
2832 	return ret;
2833 }
2834 
2835 /**
2836  * regmap_read() - Read a value from a single register
2837  *
2838  * @map: Register map to read from
2839  * @reg: Register to be read from
2840  * @val: Pointer to store read value
2841  *
2842  * A value of zero will be returned on success, a negative errno will
2843  * be returned in error cases.
2844  */
2845 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2846 {
2847 	int ret;
2848 
2849 	if (!IS_ALIGNED(reg, map->reg_stride))
2850 		return -EINVAL;
2851 
2852 	map->lock(map->lock_arg);
2853 
2854 	ret = _regmap_read(map, reg, val);
2855 
2856 	map->unlock(map->lock_arg);
2857 
2858 	return ret;
2859 }
2860 EXPORT_SYMBOL_GPL(regmap_read);
2861 
2862 /**
2863  * regmap_read_bypassed() - Read a value from a single register direct
2864  *			    from the device, bypassing the cache
2865  *
2866  * @map: Register map to read from
2867  * @reg: Register to be read from
2868  * @val: Pointer to store read value
2869  *
2870  * A value of zero will be returned on success, a negative errno will
2871  * be returned in error cases.
2872  */
2873 int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
2874 {
2875 	int ret;
2876 	bool bypass, cache_only;
2877 
2878 	if (!IS_ALIGNED(reg, map->reg_stride))
2879 		return -EINVAL;
2880 
2881 	map->lock(map->lock_arg);
2882 
2883 	bypass = map->cache_bypass;
2884 	cache_only = map->cache_only;
2885 	map->cache_bypass = true;
2886 	map->cache_only = false;
2887 
2888 	ret = _regmap_read(map, reg, val);
2889 
2890 	map->cache_bypass = bypass;
2891 	map->cache_only = cache_only;
2892 
2893 	map->unlock(map->lock_arg);
2894 
2895 	return ret;
2896 }
2897 EXPORT_SYMBOL_GPL(regmap_read_bypassed);
2898 
2899 /**
2900  * regmap_raw_read() - Read raw data from the device
2901  *
2902  * @map: Register map to read from
2903  * @reg: First register to be read from
2904  * @val: Pointer to store read value
2905  * @val_len: Size of data to read
2906  *
2907  * A value of zero will be returned on success, a negative errno will
2908  * be returned in error cases.
2909  */
2910 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2911 		    size_t val_len)
2912 {
2913 	size_t val_bytes = map->format.val_bytes;
2914 	size_t val_count = val_len / val_bytes;
2915 	unsigned int v;
2916 	int ret, i;
2917 
2918 	if (val_len % map->format.val_bytes)
2919 		return -EINVAL;
2920 	if (!IS_ALIGNED(reg, map->reg_stride))
2921 		return -EINVAL;
2922 	if (val_count == 0)
2923 		return -EINVAL;
2924 
2925 	map->lock(map->lock_arg);
2926 
2927 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2928 	    map->cache_type == REGCACHE_NONE) {
2929 		size_t chunk_count, chunk_bytes;
2930 		size_t chunk_regs = val_count;
2931 
2932 		if (!map->cache_bypass && map->cache_only) {
2933 			ret = -EBUSY;
2934 			goto out;
2935 		}
2936 
2937 		if (!map->read) {
2938 			ret = -ENOTSUPP;
2939 			goto out;
2940 		}
2941 
2942 		if (map->use_single_read)
2943 			chunk_regs = 1;
2944 		else if (map->max_raw_read && val_len > map->max_raw_read)
2945 			chunk_regs = map->max_raw_read / val_bytes;
2946 
2947 		chunk_count = val_count / chunk_regs;
2948 		chunk_bytes = chunk_regs * val_bytes;
2949 
2950 		/* Read bytes that fit into whole chunks */
2951 		for (i = 0; i < chunk_count; i++) {
2952 			ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2953 			if (ret != 0)
2954 				goto out;
2955 
2956 			reg += regmap_get_offset(map, chunk_regs);
2957 			val += chunk_bytes;
2958 			val_len -= chunk_bytes;
2959 		}
2960 
2961 		/* Read remaining bytes */
2962 		if (val_len) {
2963 			ret = _regmap_raw_read(map, reg, val, val_len, false);
2964 			if (ret != 0)
2965 				goto out;
2966 		}
2967 	} else {
2968 		/* Otherwise go word by word for the cache; should be low
2969 		 * cost as we expect to hit the cache.
2970 		 */
2971 		for (i = 0; i < val_count; i++) {
2972 			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2973 					   &v);
2974 			if (ret != 0)
2975 				goto out;
2976 
2977 			map->format.format_val(val + (i * val_bytes), v, 0);
2978 		}
2979 	}
2980 
2981  out:
2982 	map->unlock(map->lock_arg);
2983 
2984 	return ret;
2985 }
2986 EXPORT_SYMBOL_GPL(regmap_raw_read);
2987 
2988 /**
2989  * regmap_noinc_read(): Read data from a register without incrementing the
2990  *			register number
2991  *
2992  * @map: Register map to read from
2993  * @reg: Register to read from
2994  * @val: Pointer to data buffer
2995  * @val_len: Length of output buffer in bytes.
2996  *
2997  * The regmap API usually assumes that bulk read operations will read a
2998  * range of registers. Some devices have certain registers for which a read
2999  * operation read will read from an internal FIFO.
3000  *
3001  * The target register must be volatile but registers after it can be
3002  * completely unrelated cacheable registers.
3003  *
3004  * This will attempt multiple reads as required to read val_len bytes.
3005  *
3006  * A value of zero will be returned on success, a negative errno will be
3007  * returned in error cases.
3008  */
3009 int regmap_noinc_read(struct regmap *map, unsigned int reg,
3010 		      void *val, size_t val_len)
3011 {
3012 	size_t read_len;
3013 	int ret;
3014 
3015 	if (!map->read)
3016 		return -ENOTSUPP;
3017 
3018 	if (val_len % map->format.val_bytes)
3019 		return -EINVAL;
3020 	if (!IS_ALIGNED(reg, map->reg_stride))
3021 		return -EINVAL;
3022 	if (val_len == 0)
3023 		return -EINVAL;
3024 
3025 	map->lock(map->lock_arg);
3026 
3027 	if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
3028 		ret = -EINVAL;
3029 		goto out_unlock;
3030 	}
3031 
3032 	/*
3033 	 * We have not defined the FIFO semantics for cache, as the
3034 	 * cache is just one value deep. Should we return the last
3035 	 * written value? Just avoid this by always reading the FIFO
3036 	 * even when using cache. Cache only will not work.
3037 	 */
3038 	if (!map->cache_bypass && map->cache_only) {
3039 		ret = -EBUSY;
3040 		goto out_unlock;
3041 	}
3042 
3043 	/* Use the accelerated operation if we can */
3044 	if (map->bus->reg_noinc_read) {
3045 		ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
3046 		goto out_unlock;
3047 	}
3048 
3049 	while (val_len) {
3050 		if (map->max_raw_read && map->max_raw_read < val_len)
3051 			read_len = map->max_raw_read;
3052 		else
3053 			read_len = val_len;
3054 		ret = _regmap_raw_read(map, reg, val, read_len, true);
3055 		if (ret)
3056 			goto out_unlock;
3057 		val = ((u8 *)val) + read_len;
3058 		val_len -= read_len;
3059 	}
3060 
3061 out_unlock:
3062 	map->unlock(map->lock_arg);
3063 	return ret;
3064 }
3065 EXPORT_SYMBOL_GPL(regmap_noinc_read);
3066 
3067 /**
3068  * regmap_field_read(): Read a value to a single register field
3069  *
3070  * @field: Register field to read from
3071  * @val: Pointer to store read value
3072  *
3073  * A value of zero will be returned on success, a negative errno will
3074  * be returned in error cases.
3075  */
3076 int regmap_field_read(struct regmap_field *field, unsigned int *val)
3077 {
3078 	int ret;
3079 	unsigned int reg_val;
3080 	ret = regmap_read(field->regmap, field->reg, &reg_val);
3081 	if (ret != 0)
3082 		return ret;
3083 
3084 	reg_val &= field->mask;
3085 	reg_val >>= field->shift;
3086 	*val = reg_val;
3087 
3088 	return ret;
3089 }
3090 EXPORT_SYMBOL_GPL(regmap_field_read);
3091 
3092 /**
3093  * regmap_fields_read() - Read a value to a single register field with port ID
3094  *
3095  * @field: Register field to read from
3096  * @id: port ID
3097  * @val: Pointer to store read value
3098  *
3099  * A value of zero will be returned on success, a negative errno will
3100  * be returned in error cases.
3101  */
3102 int regmap_fields_read(struct regmap_field *field, unsigned int id,
3103 		       unsigned int *val)
3104 {
3105 	int ret;
3106 	unsigned int reg_val;
3107 
3108 	if (id >= field->id_size)
3109 		return -EINVAL;
3110 
3111 	ret = regmap_read(field->regmap,
3112 			  field->reg + (field->id_offset * id),
3113 			  &reg_val);
3114 	if (ret != 0)
3115 		return ret;
3116 
3117 	reg_val &= field->mask;
3118 	reg_val >>= field->shift;
3119 	*val = reg_val;
3120 
3121 	return ret;
3122 }
3123 EXPORT_SYMBOL_GPL(regmap_fields_read);
3124 
3125 static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
3126 			     const unsigned int *regs, void *val, size_t val_count)
3127 {
3128 	u32 *u32 = val;
3129 	u16 *u16 = val;
3130 	u8 *u8 = val;
3131 	int ret, i;
3132 
3133 	map->lock(map->lock_arg);
3134 
3135 	for (i = 0; i < val_count; i++) {
3136 		unsigned int ival;
3137 
3138 		if (regs) {
3139 			if (!IS_ALIGNED(regs[i], map->reg_stride)) {
3140 				ret = -EINVAL;
3141 				goto out;
3142 			}
3143 			ret = _regmap_read(map, regs[i], &ival);
3144 		} else {
3145 			ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
3146 		}
3147 		if (ret != 0)
3148 			goto out;
3149 
3150 		switch (map->format.val_bytes) {
3151 		case 4:
3152 			u32[i] = ival;
3153 			break;
3154 		case 2:
3155 			u16[i] = ival;
3156 			break;
3157 		case 1:
3158 			u8[i] = ival;
3159 			break;
3160 		default:
3161 			ret = -EINVAL;
3162 			goto out;
3163 		}
3164 	}
3165 out:
3166 	map->unlock(map->lock_arg);
3167 	return ret;
3168 }
3169 
3170 /**
3171  * regmap_bulk_read() - Read multiple sequential registers from the device
3172  *
3173  * @map: Register map to read from
3174  * @reg: First register to be read from
3175  * @val: Pointer to store read value, in native register size for device
3176  * @val_count: Number of registers to read
3177  *
3178  * A value of zero will be returned on success, a negative errno will
3179  * be returned in error cases.
3180  */
3181 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
3182 		     size_t val_count)
3183 {
3184 	int ret, i;
3185 	size_t val_bytes = map->format.val_bytes;
3186 	bool vol = regmap_volatile_range(map, reg, val_count);
3187 
3188 	if (!IS_ALIGNED(reg, map->reg_stride))
3189 		return -EINVAL;
3190 	if (val_count == 0)
3191 		return -EINVAL;
3192 
3193 	if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3194 		ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3195 		if (ret != 0)
3196 			return ret;
3197 
3198 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
3199 			map->format.parse_inplace(val + i);
3200 	} else {
3201 		ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
3202 	}
3203 	if (!ret)
3204 		trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
3205 	return ret;
3206 }
3207 EXPORT_SYMBOL_GPL(regmap_bulk_read);
3208 
3209 /**
3210  * regmap_multi_reg_read() - Read multiple non-sequential registers from the device
3211  *
3212  * @map: Register map to read from
3213  * @regs: Array of registers to read from
3214  * @val: Pointer to store read value, in native register size for device
3215  * @val_count: Number of registers to read
3216  *
3217  * A value of zero will be returned on success, a negative errno will
3218  * be returned in error cases.
3219  */
3220 int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
3221 			  size_t val_count)
3222 {
3223 	if (val_count == 0)
3224 		return -EINVAL;
3225 
3226 	return _regmap_bulk_read(map, 0, regs, val, val_count);
3227 }
3228 EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
3229 
3230 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3231 			       unsigned int mask, unsigned int val,
3232 			       bool *change, bool force_write)
3233 {
3234 	int ret;
3235 	unsigned int tmp, orig;
3236 
3237 	if (change)
3238 		*change = false;
3239 
3240 	if (regmap_volatile(map, reg) && map->reg_update_bits) {
3241 		reg = regmap_reg_addr(map, reg);
3242 		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3243 		if (ret == 0 && change)
3244 			*change = true;
3245 	} else {
3246 		ret = _regmap_read(map, reg, &orig);
3247 		if (ret != 0)
3248 			return ret;
3249 
3250 		tmp = orig & ~mask;
3251 		tmp |= val & mask;
3252 
3253 		if (force_write || (tmp != orig) || map->force_write_field) {
3254 			ret = _regmap_write(map, reg, tmp);
3255 			if (ret == 0 && change)
3256 				*change = true;
3257 		}
3258 	}
3259 
3260 	return ret;
3261 }
3262 
3263 /**
3264  * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3265  *
3266  * @map: Register map to update
3267  * @reg: Register to update
3268  * @mask: Bitmask to change
3269  * @val: New value for bitmask
3270  * @change: Boolean indicating if a write was done
3271  * @async: Boolean indicating asynchronously
3272  * @force: Boolean indicating use force update
3273  *
3274  * Perform a read/modify/write cycle on a register map with change, async, force
3275  * options.
3276  *
3277  * If async is true:
3278  *
3279  * With most buses the read must be done synchronously so this is most useful
3280  * for devices with a cache which do not need to interact with the hardware to
3281  * determine the current register value.
3282  *
3283  * Returns zero for success, a negative number on error.
3284  */
3285 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3286 			    unsigned int mask, unsigned int val,
3287 			    bool *change, bool async, bool force)
3288 {
3289 	int ret;
3290 
3291 	map->lock(map->lock_arg);
3292 
3293 	map->async = async;
3294 
3295 	ret = _regmap_update_bits(map, reg, mask, val, change, force);
3296 
3297 	map->async = false;
3298 
3299 	map->unlock(map->lock_arg);
3300 
3301 	return ret;
3302 }
3303 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3304 
3305 /**
3306  * regmap_test_bits() - Check if all specified bits are set in a register.
3307  *
3308  * @map: Register map to operate on
3309  * @reg: Register to read from
3310  * @bits: Bits to test
3311  *
3312  * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3313  * bits are set and a negative error number if the underlying regmap_read()
3314  * fails.
3315  */
3316 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3317 {
3318 	unsigned int val;
3319 	int ret;
3320 
3321 	ret = regmap_read(map, reg, &val);
3322 	if (ret)
3323 		return ret;
3324 
3325 	return (val & bits) == bits;
3326 }
3327 EXPORT_SYMBOL_GPL(regmap_test_bits);
3328 
3329 void regmap_async_complete_cb(struct regmap_async *async, int ret)
3330 {
3331 	struct regmap *map = async->map;
3332 	bool wake;
3333 
3334 	trace_regmap_async_io_complete(map);
3335 
3336 	spin_lock(&map->async_lock);
3337 	list_move(&async->list, &map->async_free);
3338 	wake = list_empty(&map->async_list);
3339 
3340 	if (ret != 0)
3341 		map->async_ret = ret;
3342 
3343 	spin_unlock(&map->async_lock);
3344 
3345 	if (wake)
3346 		wake_up(&map->async_waitq);
3347 }
3348 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3349 
3350 static int regmap_async_is_done(struct regmap *map)
3351 {
3352 	unsigned long flags;
3353 	int ret;
3354 
3355 	spin_lock_irqsave(&map->async_lock, flags);
3356 	ret = list_empty(&map->async_list);
3357 	spin_unlock_irqrestore(&map->async_lock, flags);
3358 
3359 	return ret;
3360 }
3361 
3362 /**
3363  * regmap_async_complete - Ensure all asynchronous I/O has completed.
3364  *
3365  * @map: Map to operate on.
3366  *
3367  * Blocks until any pending asynchronous I/O has completed.  Returns
3368  * an error code for any failed I/O operations.
3369  */
3370 int regmap_async_complete(struct regmap *map)
3371 {
3372 	unsigned long flags;
3373 	int ret;
3374 
3375 	/* Nothing to do with no async support */
3376 	if (!map->bus || !map->bus->async_write)
3377 		return 0;
3378 
3379 	trace_regmap_async_complete_start(map);
3380 
3381 	wait_event(map->async_waitq, regmap_async_is_done(map));
3382 
3383 	spin_lock_irqsave(&map->async_lock, flags);
3384 	ret = map->async_ret;
3385 	map->async_ret = 0;
3386 	spin_unlock_irqrestore(&map->async_lock, flags);
3387 
3388 	trace_regmap_async_complete_done(map);
3389 
3390 	return ret;
3391 }
3392 EXPORT_SYMBOL_GPL(regmap_async_complete);
3393 
3394 /**
3395  * regmap_register_patch - Register and apply register updates to be applied
3396  *                         on device initialistion
3397  *
3398  * @map: Register map to apply updates to.
3399  * @regs: Values to update.
3400  * @num_regs: Number of entries in regs.
3401  *
3402  * Register a set of register updates to be applied to the device
3403  * whenever the device registers are synchronised with the cache and
3404  * apply them immediately.  Typically this is used to apply
3405  * corrections to be applied to the device defaults on startup, such
3406  * as the updates some vendors provide to undocumented registers.
3407  *
3408  * The caller must ensure that this function cannot be called
3409  * concurrently with either itself or regcache_sync().
3410  */
3411 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3412 			  int num_regs)
3413 {
3414 	struct reg_sequence *p;
3415 	int ret;
3416 	bool bypass;
3417 
3418 	if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3419 	    num_regs))
3420 		return 0;
3421 
3422 	p = krealloc(map->patch,
3423 		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3424 		     GFP_KERNEL);
3425 	if (p) {
3426 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3427 		map->patch = p;
3428 		map->patch_regs += num_regs;
3429 	} else {
3430 		return -ENOMEM;
3431 	}
3432 
3433 	map->lock(map->lock_arg);
3434 
3435 	bypass = map->cache_bypass;
3436 
3437 	map->cache_bypass = true;
3438 	map->async = true;
3439 
3440 	ret = _regmap_multi_reg_write(map, regs, num_regs);
3441 
3442 	map->async = false;
3443 	map->cache_bypass = bypass;
3444 
3445 	map->unlock(map->lock_arg);
3446 
3447 	regmap_async_complete(map);
3448 
3449 	return ret;
3450 }
3451 EXPORT_SYMBOL_GPL(regmap_register_patch);
3452 
3453 /**
3454  * regmap_get_val_bytes() - Report the size of a register value
3455  *
3456  * @map: Register map to operate on.
3457  *
3458  * Report the size of a register value, mainly intended to for use by
3459  * generic infrastructure built on top of regmap.
3460  */
3461 int regmap_get_val_bytes(struct regmap *map)
3462 {
3463 	if (map->format.format_write)
3464 		return -EINVAL;
3465 
3466 	return map->format.val_bytes;
3467 }
3468 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3469 
3470 /**
3471  * regmap_get_max_register() - Report the max register value
3472  *
3473  * @map: Register map to operate on.
3474  *
3475  * Report the max register value, mainly intended to for use by
3476  * generic infrastructure built on top of regmap.
3477  */
3478 int regmap_get_max_register(struct regmap *map)
3479 {
3480 	return map->max_register_is_set ? map->max_register : -EINVAL;
3481 }
3482 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3483 
3484 /**
3485  * regmap_get_reg_stride() - Report the register address stride
3486  *
3487  * @map: Register map to operate on.
3488  *
3489  * Report the register address stride, mainly intended to for use by
3490  * generic infrastructure built on top of regmap.
3491  */
3492 int regmap_get_reg_stride(struct regmap *map)
3493 {
3494 	return map->reg_stride;
3495 }
3496 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3497 
3498 /**
3499  * regmap_might_sleep() - Returns whether a regmap access might sleep.
3500  *
3501  * @map: Register map to operate on.
3502  *
3503  * Returns true if an access to the register might sleep, else false.
3504  */
3505 bool regmap_might_sleep(struct regmap *map)
3506 {
3507 	return map->can_sleep;
3508 }
3509 EXPORT_SYMBOL_GPL(regmap_might_sleep);
3510 
3511 int regmap_parse_val(struct regmap *map, const void *buf,
3512 			unsigned int *val)
3513 {
3514 	if (!map->format.parse_val)
3515 		return -EINVAL;
3516 
3517 	*val = map->format.parse_val(buf);
3518 
3519 	return 0;
3520 }
3521 EXPORT_SYMBOL_GPL(regmap_parse_val);
3522 
3523 static int __init regmap_initcall(void)
3524 {
3525 	regmap_debugfs_initcall();
3526 
3527 	return 0;
3528 }
3529 postcore_initcall(regmap_initcall);
3530