xref: /linux/drivers/base/regmap/regmap.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Register map access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/of.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 
23 #define CREATE_TRACE_POINTS
24 #include "trace.h"
25 
26 #include "internal.h"
27 
28 /*
29  * Sometimes for failures during very early init the trace
30  * infrastructure isn't available early enough to be used.  For this
31  * sort of problem defining LOG_DEVICE will add printks for basic
32  * register I/O on a specific device.
33  */
34 #undef LOG_DEVICE
35 
36 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
37 			       unsigned int mask, unsigned int val,
38 			       bool *change, bool force_write);
39 
40 static int _regmap_bus_reg_read(void *context, unsigned int reg,
41 				unsigned int *val);
42 static int _regmap_bus_read(void *context, unsigned int reg,
43 			    unsigned int *val);
44 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
45 				       unsigned int val);
46 static int _regmap_bus_reg_write(void *context, unsigned int reg,
47 				 unsigned int val);
48 static int _regmap_bus_raw_write(void *context, unsigned int reg,
49 				 unsigned int val);
50 
51 bool regmap_reg_in_ranges(unsigned int reg,
52 			  const struct regmap_range *ranges,
53 			  unsigned int nranges)
54 {
55 	const struct regmap_range *r;
56 	int i;
57 
58 	for (i = 0, r = ranges; i < nranges; i++, r++)
59 		if (regmap_reg_in_range(reg, r))
60 			return true;
61 	return false;
62 }
63 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
64 
65 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
66 			      const struct regmap_access_table *table)
67 {
68 	/* Check "no ranges" first */
69 	if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
70 		return false;
71 
72 	/* In case zero "yes ranges" are supplied, any reg is OK */
73 	if (!table->n_yes_ranges)
74 		return true;
75 
76 	return regmap_reg_in_ranges(reg, table->yes_ranges,
77 				    table->n_yes_ranges);
78 }
79 EXPORT_SYMBOL_GPL(regmap_check_range_table);
80 
81 bool regmap_writeable(struct regmap *map, unsigned int reg)
82 {
83 	if (map->max_register && reg > map->max_register)
84 		return false;
85 
86 	if (map->writeable_reg)
87 		return map->writeable_reg(map->dev, reg);
88 
89 	if (map->wr_table)
90 		return regmap_check_range_table(map, reg, map->wr_table);
91 
92 	return true;
93 }
94 
95 bool regmap_readable(struct regmap *map, unsigned int reg)
96 {
97 	if (!map->reg_read)
98 		return false;
99 
100 	if (map->max_register && reg > map->max_register)
101 		return false;
102 
103 	if (map->format.format_write)
104 		return false;
105 
106 	if (map->readable_reg)
107 		return map->readable_reg(map->dev, reg);
108 
109 	if (map->rd_table)
110 		return regmap_check_range_table(map, reg, map->rd_table);
111 
112 	return true;
113 }
114 
115 bool regmap_volatile(struct regmap *map, unsigned int reg)
116 {
117 	if (!map->format.format_write && !regmap_readable(map, reg))
118 		return false;
119 
120 	if (map->volatile_reg)
121 		return map->volatile_reg(map->dev, reg);
122 
123 	if (map->volatile_table)
124 		return regmap_check_range_table(map, reg, map->volatile_table);
125 
126 	if (map->cache_ops)
127 		return false;
128 	else
129 		return true;
130 }
131 
132 bool regmap_precious(struct regmap *map, unsigned int reg)
133 {
134 	if (!regmap_readable(map, reg))
135 		return false;
136 
137 	if (map->precious_reg)
138 		return map->precious_reg(map->dev, reg);
139 
140 	if (map->precious_table)
141 		return regmap_check_range_table(map, reg, map->precious_table);
142 
143 	return false;
144 }
145 
146 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
147 	size_t num)
148 {
149 	unsigned int i;
150 
151 	for (i = 0; i < num; i++)
152 		if (!regmap_volatile(map, reg + i))
153 			return false;
154 
155 	return true;
156 }
157 
158 static void regmap_format_2_6_write(struct regmap *map,
159 				     unsigned int reg, unsigned int val)
160 {
161 	u8 *out = map->work_buf;
162 
163 	*out = (reg << 6) | val;
164 }
165 
166 static void regmap_format_4_12_write(struct regmap *map,
167 				     unsigned int reg, unsigned int val)
168 {
169 	__be16 *out = map->work_buf;
170 	*out = cpu_to_be16((reg << 12) | val);
171 }
172 
173 static void regmap_format_7_9_write(struct regmap *map,
174 				    unsigned int reg, unsigned int val)
175 {
176 	__be16 *out = map->work_buf;
177 	*out = cpu_to_be16((reg << 9) | val);
178 }
179 
180 static void regmap_format_10_14_write(struct regmap *map,
181 				    unsigned int reg, unsigned int val)
182 {
183 	u8 *out = map->work_buf;
184 
185 	out[2] = val;
186 	out[1] = (val >> 8) | (reg << 6);
187 	out[0] = reg >> 2;
188 }
189 
190 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
191 {
192 	u8 *b = buf;
193 
194 	b[0] = val << shift;
195 }
196 
197 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
198 {
199 	__be16 *b = buf;
200 
201 	b[0] = cpu_to_be16(val << shift);
202 }
203 
204 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
205 {
206 	__le16 *b = buf;
207 
208 	b[0] = cpu_to_le16(val << shift);
209 }
210 
211 static void regmap_format_16_native(void *buf, unsigned int val,
212 				    unsigned int shift)
213 {
214 	*(u16 *)buf = val << shift;
215 }
216 
217 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
218 {
219 	u8 *b = buf;
220 
221 	val <<= shift;
222 
223 	b[0] = val >> 16;
224 	b[1] = val >> 8;
225 	b[2] = val;
226 }
227 
228 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
229 {
230 	__be32 *b = buf;
231 
232 	b[0] = cpu_to_be32(val << shift);
233 }
234 
235 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
236 {
237 	__le32 *b = buf;
238 
239 	b[0] = cpu_to_le32(val << shift);
240 }
241 
242 static void regmap_format_32_native(void *buf, unsigned int val,
243 				    unsigned int shift)
244 {
245 	*(u32 *)buf = val << shift;
246 }
247 
248 static void regmap_parse_inplace_noop(void *buf)
249 {
250 }
251 
252 static unsigned int regmap_parse_8(const void *buf)
253 {
254 	const u8 *b = buf;
255 
256 	return b[0];
257 }
258 
259 static unsigned int regmap_parse_16_be(const void *buf)
260 {
261 	const __be16 *b = buf;
262 
263 	return be16_to_cpu(b[0]);
264 }
265 
266 static unsigned int regmap_parse_16_le(const void *buf)
267 {
268 	const __le16 *b = buf;
269 
270 	return le16_to_cpu(b[0]);
271 }
272 
273 static void regmap_parse_16_be_inplace(void *buf)
274 {
275 	__be16 *b = buf;
276 
277 	b[0] = be16_to_cpu(b[0]);
278 }
279 
280 static void regmap_parse_16_le_inplace(void *buf)
281 {
282 	__le16 *b = buf;
283 
284 	b[0] = le16_to_cpu(b[0]);
285 }
286 
287 static unsigned int regmap_parse_16_native(const void *buf)
288 {
289 	return *(u16 *)buf;
290 }
291 
292 static unsigned int regmap_parse_24(const void *buf)
293 {
294 	const u8 *b = buf;
295 	unsigned int ret = b[2];
296 	ret |= ((unsigned int)b[1]) << 8;
297 	ret |= ((unsigned int)b[0]) << 16;
298 
299 	return ret;
300 }
301 
302 static unsigned int regmap_parse_32_be(const void *buf)
303 {
304 	const __be32 *b = buf;
305 
306 	return be32_to_cpu(b[0]);
307 }
308 
309 static unsigned int regmap_parse_32_le(const void *buf)
310 {
311 	const __le32 *b = buf;
312 
313 	return le32_to_cpu(b[0]);
314 }
315 
316 static void regmap_parse_32_be_inplace(void *buf)
317 {
318 	__be32 *b = buf;
319 
320 	b[0] = be32_to_cpu(b[0]);
321 }
322 
323 static void regmap_parse_32_le_inplace(void *buf)
324 {
325 	__le32 *b = buf;
326 
327 	b[0] = le32_to_cpu(b[0]);
328 }
329 
330 static unsigned int regmap_parse_32_native(const void *buf)
331 {
332 	return *(u32 *)buf;
333 }
334 
335 static void regmap_lock_mutex(void *__map)
336 {
337 	struct regmap *map = __map;
338 	mutex_lock(&map->mutex);
339 }
340 
341 static void regmap_unlock_mutex(void *__map)
342 {
343 	struct regmap *map = __map;
344 	mutex_unlock(&map->mutex);
345 }
346 
347 static void regmap_lock_spinlock(void *__map)
348 __acquires(&map->spinlock)
349 {
350 	struct regmap *map = __map;
351 	unsigned long flags;
352 
353 	spin_lock_irqsave(&map->spinlock, flags);
354 	map->spinlock_flags = flags;
355 }
356 
357 static void regmap_unlock_spinlock(void *__map)
358 __releases(&map->spinlock)
359 {
360 	struct regmap *map = __map;
361 	spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
362 }
363 
364 static void dev_get_regmap_release(struct device *dev, void *res)
365 {
366 	/*
367 	 * We don't actually have anything to do here; the goal here
368 	 * is not to manage the regmap but to provide a simple way to
369 	 * get the regmap back given a struct device.
370 	 */
371 }
372 
373 static bool _regmap_range_add(struct regmap *map,
374 			      struct regmap_range_node *data)
375 {
376 	struct rb_root *root = &map->range_tree;
377 	struct rb_node **new = &(root->rb_node), *parent = NULL;
378 
379 	while (*new) {
380 		struct regmap_range_node *this =
381 			container_of(*new, struct regmap_range_node, node);
382 
383 		parent = *new;
384 		if (data->range_max < this->range_min)
385 			new = &((*new)->rb_left);
386 		else if (data->range_min > this->range_max)
387 			new = &((*new)->rb_right);
388 		else
389 			return false;
390 	}
391 
392 	rb_link_node(&data->node, parent, new);
393 	rb_insert_color(&data->node, root);
394 
395 	return true;
396 }
397 
398 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
399 						      unsigned int reg)
400 {
401 	struct rb_node *node = map->range_tree.rb_node;
402 
403 	while (node) {
404 		struct regmap_range_node *this =
405 			container_of(node, struct regmap_range_node, node);
406 
407 		if (reg < this->range_min)
408 			node = node->rb_left;
409 		else if (reg > this->range_max)
410 			node = node->rb_right;
411 		else
412 			return this;
413 	}
414 
415 	return NULL;
416 }
417 
418 static void regmap_range_exit(struct regmap *map)
419 {
420 	struct rb_node *next;
421 	struct regmap_range_node *range_node;
422 
423 	next = rb_first(&map->range_tree);
424 	while (next) {
425 		range_node = rb_entry(next, struct regmap_range_node, node);
426 		next = rb_next(&range_node->node);
427 		rb_erase(&range_node->node, &map->range_tree);
428 		kfree(range_node);
429 	}
430 
431 	kfree(map->selector_work_buf);
432 }
433 
434 int regmap_attach_dev(struct device *dev, struct regmap *map,
435 		      const struct regmap_config *config)
436 {
437 	struct regmap **m;
438 
439 	map->dev = dev;
440 
441 	regmap_debugfs_init(map, config->name);
442 
443 	/* Add a devres resource for dev_get_regmap() */
444 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
445 	if (!m) {
446 		regmap_debugfs_exit(map);
447 		return -ENOMEM;
448 	}
449 	*m = map;
450 	devres_add(dev, m);
451 
452 	return 0;
453 }
454 EXPORT_SYMBOL_GPL(regmap_attach_dev);
455 
456 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
457 					const struct regmap_config *config)
458 {
459 	enum regmap_endian endian;
460 
461 	/* Retrieve the endianness specification from the regmap config */
462 	endian = config->reg_format_endian;
463 
464 	/* If the regmap config specified a non-default value, use that */
465 	if (endian != REGMAP_ENDIAN_DEFAULT)
466 		return endian;
467 
468 	/* Retrieve the endianness specification from the bus config */
469 	if (bus && bus->reg_format_endian_default)
470 		endian = bus->reg_format_endian_default;
471 
472 	/* If the bus specified a non-default value, use that */
473 	if (endian != REGMAP_ENDIAN_DEFAULT)
474 		return endian;
475 
476 	/* Use this if no other value was found */
477 	return REGMAP_ENDIAN_BIG;
478 }
479 
480 enum regmap_endian regmap_get_val_endian(struct device *dev,
481 					 const struct regmap_bus *bus,
482 					 const struct regmap_config *config)
483 {
484 	struct device_node *np;
485 	enum regmap_endian endian;
486 
487 	/* Retrieve the endianness specification from the regmap config */
488 	endian = config->val_format_endian;
489 
490 	/* If the regmap config specified a non-default value, use that */
491 	if (endian != REGMAP_ENDIAN_DEFAULT)
492 		return endian;
493 
494 	/* If the dev and dev->of_node exist try to get endianness from DT */
495 	if (dev && dev->of_node) {
496 		np = dev->of_node;
497 
498 		/* Parse the device's DT node for an endianness specification */
499 		if (of_property_read_bool(np, "big-endian"))
500 			endian = REGMAP_ENDIAN_BIG;
501 		else if (of_property_read_bool(np, "little-endian"))
502 			endian = REGMAP_ENDIAN_LITTLE;
503 
504 		/* If the endianness was specified in DT, use that */
505 		if (endian != REGMAP_ENDIAN_DEFAULT)
506 			return endian;
507 	}
508 
509 	/* Retrieve the endianness specification from the bus config */
510 	if (bus && bus->val_format_endian_default)
511 		endian = bus->val_format_endian_default;
512 
513 	/* If the bus specified a non-default value, use that */
514 	if (endian != REGMAP_ENDIAN_DEFAULT)
515 		return endian;
516 
517 	/* Use this if no other value was found */
518 	return REGMAP_ENDIAN_BIG;
519 }
520 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
521 
522 struct regmap *__regmap_init(struct device *dev,
523 			     const struct regmap_bus *bus,
524 			     void *bus_context,
525 			     const struct regmap_config *config,
526 			     struct lock_class_key *lock_key,
527 			     const char *lock_name)
528 {
529 	struct regmap *map;
530 	int ret = -EINVAL;
531 	enum regmap_endian reg_endian, val_endian;
532 	int i, j;
533 
534 	if (!config)
535 		goto err;
536 
537 	map = kzalloc(sizeof(*map), GFP_KERNEL);
538 	if (map == NULL) {
539 		ret = -ENOMEM;
540 		goto err;
541 	}
542 
543 	if (config->lock && config->unlock) {
544 		map->lock = config->lock;
545 		map->unlock = config->unlock;
546 		map->lock_arg = config->lock_arg;
547 	} else {
548 		if ((bus && bus->fast_io) ||
549 		    config->fast_io) {
550 			spin_lock_init(&map->spinlock);
551 			map->lock = regmap_lock_spinlock;
552 			map->unlock = regmap_unlock_spinlock;
553 			lockdep_set_class_and_name(&map->spinlock,
554 						   lock_key, lock_name);
555 		} else {
556 			mutex_init(&map->mutex);
557 			map->lock = regmap_lock_mutex;
558 			map->unlock = regmap_unlock_mutex;
559 			lockdep_set_class_and_name(&map->mutex,
560 						   lock_key, lock_name);
561 		}
562 		map->lock_arg = map;
563 	}
564 	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
565 	map->format.pad_bytes = config->pad_bits / 8;
566 	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
567 	map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
568 			config->val_bits + config->pad_bits, 8);
569 	map->reg_shift = config->pad_bits % 8;
570 	if (config->reg_stride)
571 		map->reg_stride = config->reg_stride;
572 	else
573 		map->reg_stride = 1;
574 	map->use_single_read = config->use_single_rw || !bus || !bus->read;
575 	map->use_single_write = config->use_single_rw || !bus || !bus->write;
576 	map->can_multi_write = config->can_multi_write && bus && bus->write;
577 	if (bus) {
578 		map->max_raw_read = bus->max_raw_read;
579 		map->max_raw_write = bus->max_raw_write;
580 	}
581 	map->dev = dev;
582 	map->bus = bus;
583 	map->bus_context = bus_context;
584 	map->max_register = config->max_register;
585 	map->wr_table = config->wr_table;
586 	map->rd_table = config->rd_table;
587 	map->volatile_table = config->volatile_table;
588 	map->precious_table = config->precious_table;
589 	map->writeable_reg = config->writeable_reg;
590 	map->readable_reg = config->readable_reg;
591 	map->volatile_reg = config->volatile_reg;
592 	map->precious_reg = config->precious_reg;
593 	map->cache_type = config->cache_type;
594 	map->name = config->name;
595 
596 	spin_lock_init(&map->async_lock);
597 	INIT_LIST_HEAD(&map->async_list);
598 	INIT_LIST_HEAD(&map->async_free);
599 	init_waitqueue_head(&map->async_waitq);
600 
601 	if (config->read_flag_mask || config->write_flag_mask) {
602 		map->read_flag_mask = config->read_flag_mask;
603 		map->write_flag_mask = config->write_flag_mask;
604 	} else if (bus) {
605 		map->read_flag_mask = bus->read_flag_mask;
606 	}
607 
608 	if (!bus) {
609 		map->reg_read  = config->reg_read;
610 		map->reg_write = config->reg_write;
611 
612 		map->defer_caching = false;
613 		goto skip_format_initialization;
614 	} else if (!bus->read || !bus->write) {
615 		map->reg_read = _regmap_bus_reg_read;
616 		map->reg_write = _regmap_bus_reg_write;
617 
618 		map->defer_caching = false;
619 		goto skip_format_initialization;
620 	} else {
621 		map->reg_read  = _regmap_bus_read;
622 	}
623 
624 	reg_endian = regmap_get_reg_endian(bus, config);
625 	val_endian = regmap_get_val_endian(dev, bus, config);
626 
627 	switch (config->reg_bits + map->reg_shift) {
628 	case 2:
629 		switch (config->val_bits) {
630 		case 6:
631 			map->format.format_write = regmap_format_2_6_write;
632 			break;
633 		default:
634 			goto err_map;
635 		}
636 		break;
637 
638 	case 4:
639 		switch (config->val_bits) {
640 		case 12:
641 			map->format.format_write = regmap_format_4_12_write;
642 			break;
643 		default:
644 			goto err_map;
645 		}
646 		break;
647 
648 	case 7:
649 		switch (config->val_bits) {
650 		case 9:
651 			map->format.format_write = regmap_format_7_9_write;
652 			break;
653 		default:
654 			goto err_map;
655 		}
656 		break;
657 
658 	case 10:
659 		switch (config->val_bits) {
660 		case 14:
661 			map->format.format_write = regmap_format_10_14_write;
662 			break;
663 		default:
664 			goto err_map;
665 		}
666 		break;
667 
668 	case 8:
669 		map->format.format_reg = regmap_format_8;
670 		break;
671 
672 	case 16:
673 		switch (reg_endian) {
674 		case REGMAP_ENDIAN_BIG:
675 			map->format.format_reg = regmap_format_16_be;
676 			break;
677 		case REGMAP_ENDIAN_NATIVE:
678 			map->format.format_reg = regmap_format_16_native;
679 			break;
680 		default:
681 			goto err_map;
682 		}
683 		break;
684 
685 	case 24:
686 		if (reg_endian != REGMAP_ENDIAN_BIG)
687 			goto err_map;
688 		map->format.format_reg = regmap_format_24;
689 		break;
690 
691 	case 32:
692 		switch (reg_endian) {
693 		case REGMAP_ENDIAN_BIG:
694 			map->format.format_reg = regmap_format_32_be;
695 			break;
696 		case REGMAP_ENDIAN_NATIVE:
697 			map->format.format_reg = regmap_format_32_native;
698 			break;
699 		default:
700 			goto err_map;
701 		}
702 		break;
703 
704 	default:
705 		goto err_map;
706 	}
707 
708 	if (val_endian == REGMAP_ENDIAN_NATIVE)
709 		map->format.parse_inplace = regmap_parse_inplace_noop;
710 
711 	switch (config->val_bits) {
712 	case 8:
713 		map->format.format_val = regmap_format_8;
714 		map->format.parse_val = regmap_parse_8;
715 		map->format.parse_inplace = regmap_parse_inplace_noop;
716 		break;
717 	case 16:
718 		switch (val_endian) {
719 		case REGMAP_ENDIAN_BIG:
720 			map->format.format_val = regmap_format_16_be;
721 			map->format.parse_val = regmap_parse_16_be;
722 			map->format.parse_inplace = regmap_parse_16_be_inplace;
723 			break;
724 		case REGMAP_ENDIAN_LITTLE:
725 			map->format.format_val = regmap_format_16_le;
726 			map->format.parse_val = regmap_parse_16_le;
727 			map->format.parse_inplace = regmap_parse_16_le_inplace;
728 			break;
729 		case REGMAP_ENDIAN_NATIVE:
730 			map->format.format_val = regmap_format_16_native;
731 			map->format.parse_val = regmap_parse_16_native;
732 			break;
733 		default:
734 			goto err_map;
735 		}
736 		break;
737 	case 24:
738 		if (val_endian != REGMAP_ENDIAN_BIG)
739 			goto err_map;
740 		map->format.format_val = regmap_format_24;
741 		map->format.parse_val = regmap_parse_24;
742 		break;
743 	case 32:
744 		switch (val_endian) {
745 		case REGMAP_ENDIAN_BIG:
746 			map->format.format_val = regmap_format_32_be;
747 			map->format.parse_val = regmap_parse_32_be;
748 			map->format.parse_inplace = regmap_parse_32_be_inplace;
749 			break;
750 		case REGMAP_ENDIAN_LITTLE:
751 			map->format.format_val = regmap_format_32_le;
752 			map->format.parse_val = regmap_parse_32_le;
753 			map->format.parse_inplace = regmap_parse_32_le_inplace;
754 			break;
755 		case REGMAP_ENDIAN_NATIVE:
756 			map->format.format_val = regmap_format_32_native;
757 			map->format.parse_val = regmap_parse_32_native;
758 			break;
759 		default:
760 			goto err_map;
761 		}
762 		break;
763 	}
764 
765 	if (map->format.format_write) {
766 		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
767 		    (val_endian != REGMAP_ENDIAN_BIG))
768 			goto err_map;
769 		map->use_single_write = true;
770 	}
771 
772 	if (!map->format.format_write &&
773 	    !(map->format.format_reg && map->format.format_val))
774 		goto err_map;
775 
776 	map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
777 	if (map->work_buf == NULL) {
778 		ret = -ENOMEM;
779 		goto err_map;
780 	}
781 
782 	if (map->format.format_write) {
783 		map->defer_caching = false;
784 		map->reg_write = _regmap_bus_formatted_write;
785 	} else if (map->format.format_val) {
786 		map->defer_caching = true;
787 		map->reg_write = _regmap_bus_raw_write;
788 	}
789 
790 skip_format_initialization:
791 
792 	map->range_tree = RB_ROOT;
793 	for (i = 0; i < config->num_ranges; i++) {
794 		const struct regmap_range_cfg *range_cfg = &config->ranges[i];
795 		struct regmap_range_node *new;
796 
797 		/* Sanity check */
798 		if (range_cfg->range_max < range_cfg->range_min) {
799 			dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
800 				range_cfg->range_max, range_cfg->range_min);
801 			goto err_range;
802 		}
803 
804 		if (range_cfg->range_max > map->max_register) {
805 			dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
806 				range_cfg->range_max, map->max_register);
807 			goto err_range;
808 		}
809 
810 		if (range_cfg->selector_reg > map->max_register) {
811 			dev_err(map->dev,
812 				"Invalid range %d: selector out of map\n", i);
813 			goto err_range;
814 		}
815 
816 		if (range_cfg->window_len == 0) {
817 			dev_err(map->dev, "Invalid range %d: window_len 0\n",
818 				i);
819 			goto err_range;
820 		}
821 
822 		/* Make sure, that this register range has no selector
823 		   or data window within its boundary */
824 		for (j = 0; j < config->num_ranges; j++) {
825 			unsigned sel_reg = config->ranges[j].selector_reg;
826 			unsigned win_min = config->ranges[j].window_start;
827 			unsigned win_max = win_min +
828 					   config->ranges[j].window_len - 1;
829 
830 			/* Allow data window inside its own virtual range */
831 			if (j == i)
832 				continue;
833 
834 			if (range_cfg->range_min <= sel_reg &&
835 			    sel_reg <= range_cfg->range_max) {
836 				dev_err(map->dev,
837 					"Range %d: selector for %d in window\n",
838 					i, j);
839 				goto err_range;
840 			}
841 
842 			if (!(win_max < range_cfg->range_min ||
843 			      win_min > range_cfg->range_max)) {
844 				dev_err(map->dev,
845 					"Range %d: window for %d in window\n",
846 					i, j);
847 				goto err_range;
848 			}
849 		}
850 
851 		new = kzalloc(sizeof(*new), GFP_KERNEL);
852 		if (new == NULL) {
853 			ret = -ENOMEM;
854 			goto err_range;
855 		}
856 
857 		new->map = map;
858 		new->name = range_cfg->name;
859 		new->range_min = range_cfg->range_min;
860 		new->range_max = range_cfg->range_max;
861 		new->selector_reg = range_cfg->selector_reg;
862 		new->selector_mask = range_cfg->selector_mask;
863 		new->selector_shift = range_cfg->selector_shift;
864 		new->window_start = range_cfg->window_start;
865 		new->window_len = range_cfg->window_len;
866 
867 		if (!_regmap_range_add(map, new)) {
868 			dev_err(map->dev, "Failed to add range %d\n", i);
869 			kfree(new);
870 			goto err_range;
871 		}
872 
873 		if (map->selector_work_buf == NULL) {
874 			map->selector_work_buf =
875 				kzalloc(map->format.buf_size, GFP_KERNEL);
876 			if (map->selector_work_buf == NULL) {
877 				ret = -ENOMEM;
878 				goto err_range;
879 			}
880 		}
881 	}
882 
883 	ret = regcache_init(map, config);
884 	if (ret != 0)
885 		goto err_range;
886 
887 	if (dev) {
888 		ret = regmap_attach_dev(dev, map, config);
889 		if (ret != 0)
890 			goto err_regcache;
891 	}
892 
893 	return map;
894 
895 err_regcache:
896 	regcache_exit(map);
897 err_range:
898 	regmap_range_exit(map);
899 	kfree(map->work_buf);
900 err_map:
901 	kfree(map);
902 err:
903 	return ERR_PTR(ret);
904 }
905 EXPORT_SYMBOL_GPL(__regmap_init);
906 
907 static void devm_regmap_release(struct device *dev, void *res)
908 {
909 	regmap_exit(*(struct regmap **)res);
910 }
911 
912 struct regmap *__devm_regmap_init(struct device *dev,
913 				  const struct regmap_bus *bus,
914 				  void *bus_context,
915 				  const struct regmap_config *config,
916 				  struct lock_class_key *lock_key,
917 				  const char *lock_name)
918 {
919 	struct regmap **ptr, *regmap;
920 
921 	ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
922 	if (!ptr)
923 		return ERR_PTR(-ENOMEM);
924 
925 	regmap = __regmap_init(dev, bus, bus_context, config,
926 			       lock_key, lock_name);
927 	if (!IS_ERR(regmap)) {
928 		*ptr = regmap;
929 		devres_add(dev, ptr);
930 	} else {
931 		devres_free(ptr);
932 	}
933 
934 	return regmap;
935 }
936 EXPORT_SYMBOL_GPL(__devm_regmap_init);
937 
938 static void regmap_field_init(struct regmap_field *rm_field,
939 	struct regmap *regmap, struct reg_field reg_field)
940 {
941 	rm_field->regmap = regmap;
942 	rm_field->reg = reg_field.reg;
943 	rm_field->shift = reg_field.lsb;
944 	rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
945 	rm_field->id_size = reg_field.id_size;
946 	rm_field->id_offset = reg_field.id_offset;
947 }
948 
949 /**
950  * devm_regmap_field_alloc(): Allocate and initialise a register field
951  * in a register map.
952  *
953  * @dev: Device that will be interacted with
954  * @regmap: regmap bank in which this register field is located.
955  * @reg_field: Register field with in the bank.
956  *
957  * The return value will be an ERR_PTR() on error or a valid pointer
958  * to a struct regmap_field. The regmap_field will be automatically freed
959  * by the device management code.
960  */
961 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
962 		struct regmap *regmap, struct reg_field reg_field)
963 {
964 	struct regmap_field *rm_field = devm_kzalloc(dev,
965 					sizeof(*rm_field), GFP_KERNEL);
966 	if (!rm_field)
967 		return ERR_PTR(-ENOMEM);
968 
969 	regmap_field_init(rm_field, regmap, reg_field);
970 
971 	return rm_field;
972 
973 }
974 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
975 
976 /**
977  * devm_regmap_field_free(): Free register field allocated using
978  * devm_regmap_field_alloc. Usally drivers need not call this function,
979  * as the memory allocated via devm will be freed as per device-driver
980  * life-cyle.
981  *
982  * @dev: Device that will be interacted with
983  * @field: regmap field which should be freed.
984  */
985 void devm_regmap_field_free(struct device *dev,
986 	struct regmap_field *field)
987 {
988 	devm_kfree(dev, field);
989 }
990 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
991 
992 /**
993  * regmap_field_alloc(): Allocate and initialise a register field
994  * in a register map.
995  *
996  * @regmap: regmap bank in which this register field is located.
997  * @reg_field: Register field with in the bank.
998  *
999  * The return value will be an ERR_PTR() on error or a valid pointer
1000  * to a struct regmap_field. The regmap_field should be freed by the
1001  * user once its finished working with it using regmap_field_free().
1002  */
1003 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1004 		struct reg_field reg_field)
1005 {
1006 	struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1007 
1008 	if (!rm_field)
1009 		return ERR_PTR(-ENOMEM);
1010 
1011 	regmap_field_init(rm_field, regmap, reg_field);
1012 
1013 	return rm_field;
1014 }
1015 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1016 
1017 /**
1018  * regmap_field_free(): Free register field allocated using regmap_field_alloc
1019  *
1020  * @field: regmap field which should be freed.
1021  */
1022 void regmap_field_free(struct regmap_field *field)
1023 {
1024 	kfree(field);
1025 }
1026 EXPORT_SYMBOL_GPL(regmap_field_free);
1027 
1028 /**
1029  * regmap_reinit_cache(): Reinitialise the current register cache
1030  *
1031  * @map: Register map to operate on.
1032  * @config: New configuration.  Only the cache data will be used.
1033  *
1034  * Discard any existing register cache for the map and initialize a
1035  * new cache.  This can be used to restore the cache to defaults or to
1036  * update the cache configuration to reflect runtime discovery of the
1037  * hardware.
1038  *
1039  * No explicit locking is done here, the user needs to ensure that
1040  * this function will not race with other calls to regmap.
1041  */
1042 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1043 {
1044 	regcache_exit(map);
1045 	regmap_debugfs_exit(map);
1046 
1047 	map->max_register = config->max_register;
1048 	map->writeable_reg = config->writeable_reg;
1049 	map->readable_reg = config->readable_reg;
1050 	map->volatile_reg = config->volatile_reg;
1051 	map->precious_reg = config->precious_reg;
1052 	map->cache_type = config->cache_type;
1053 
1054 	regmap_debugfs_init(map, config->name);
1055 
1056 	map->cache_bypass = false;
1057 	map->cache_only = false;
1058 
1059 	return regcache_init(map, config);
1060 }
1061 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1062 
1063 /**
1064  * regmap_exit(): Free a previously allocated register map
1065  */
1066 void regmap_exit(struct regmap *map)
1067 {
1068 	struct regmap_async *async;
1069 
1070 	regcache_exit(map);
1071 	regmap_debugfs_exit(map);
1072 	regmap_range_exit(map);
1073 	if (map->bus && map->bus->free_context)
1074 		map->bus->free_context(map->bus_context);
1075 	kfree(map->work_buf);
1076 	while (!list_empty(&map->async_free)) {
1077 		async = list_first_entry_or_null(&map->async_free,
1078 						 struct regmap_async,
1079 						 list);
1080 		list_del(&async->list);
1081 		kfree(async->work_buf);
1082 		kfree(async);
1083 	}
1084 	kfree(map);
1085 }
1086 EXPORT_SYMBOL_GPL(regmap_exit);
1087 
1088 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1089 {
1090 	struct regmap **r = res;
1091 	if (!r || !*r) {
1092 		WARN_ON(!r || !*r);
1093 		return 0;
1094 	}
1095 
1096 	/* If the user didn't specify a name match any */
1097 	if (data)
1098 		return (*r)->name == data;
1099 	else
1100 		return 1;
1101 }
1102 
1103 /**
1104  * dev_get_regmap(): Obtain the regmap (if any) for a device
1105  *
1106  * @dev: Device to retrieve the map for
1107  * @name: Optional name for the register map, usually NULL.
1108  *
1109  * Returns the regmap for the device if one is present, or NULL.  If
1110  * name is specified then it must match the name specified when
1111  * registering the device, if it is NULL then the first regmap found
1112  * will be used.  Devices with multiple register maps are very rare,
1113  * generic code should normally not need to specify a name.
1114  */
1115 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1116 {
1117 	struct regmap **r = devres_find(dev, dev_get_regmap_release,
1118 					dev_get_regmap_match, (void *)name);
1119 
1120 	if (!r)
1121 		return NULL;
1122 	return *r;
1123 }
1124 EXPORT_SYMBOL_GPL(dev_get_regmap);
1125 
1126 /**
1127  * regmap_get_device(): Obtain the device from a regmap
1128  *
1129  * @map: Register map to operate on.
1130  *
1131  * Returns the underlying device that the regmap has been created for.
1132  */
1133 struct device *regmap_get_device(struct regmap *map)
1134 {
1135 	return map->dev;
1136 }
1137 EXPORT_SYMBOL_GPL(regmap_get_device);
1138 
1139 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1140 			       struct regmap_range_node *range,
1141 			       unsigned int val_num)
1142 {
1143 	void *orig_work_buf;
1144 	unsigned int win_offset;
1145 	unsigned int win_page;
1146 	bool page_chg;
1147 	int ret;
1148 
1149 	win_offset = (*reg - range->range_min) % range->window_len;
1150 	win_page = (*reg - range->range_min) / range->window_len;
1151 
1152 	if (val_num > 1) {
1153 		/* Bulk write shouldn't cross range boundary */
1154 		if (*reg + val_num - 1 > range->range_max)
1155 			return -EINVAL;
1156 
1157 		/* ... or single page boundary */
1158 		if (val_num > range->window_len - win_offset)
1159 			return -EINVAL;
1160 	}
1161 
1162 	/* It is possible to have selector register inside data window.
1163 	   In that case, selector register is located on every page and
1164 	   it needs no page switching, when accessed alone. */
1165 	if (val_num > 1 ||
1166 	    range->window_start + win_offset != range->selector_reg) {
1167 		/* Use separate work_buf during page switching */
1168 		orig_work_buf = map->work_buf;
1169 		map->work_buf = map->selector_work_buf;
1170 
1171 		ret = _regmap_update_bits(map, range->selector_reg,
1172 					  range->selector_mask,
1173 					  win_page << range->selector_shift,
1174 					  &page_chg, false);
1175 
1176 		map->work_buf = orig_work_buf;
1177 
1178 		if (ret != 0)
1179 			return ret;
1180 	}
1181 
1182 	*reg = range->window_start + win_offset;
1183 
1184 	return 0;
1185 }
1186 
1187 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1188 		      const void *val, size_t val_len)
1189 {
1190 	struct regmap_range_node *range;
1191 	unsigned long flags;
1192 	u8 *u8 = map->work_buf;
1193 	void *work_val = map->work_buf + map->format.reg_bytes +
1194 		map->format.pad_bytes;
1195 	void *buf;
1196 	int ret = -ENOTSUPP;
1197 	size_t len;
1198 	int i;
1199 
1200 	WARN_ON(!map->bus);
1201 
1202 	/* Check for unwritable registers before we start */
1203 	if (map->writeable_reg)
1204 		for (i = 0; i < val_len / map->format.val_bytes; i++)
1205 			if (!map->writeable_reg(map->dev,
1206 						reg + (i * map->reg_stride)))
1207 				return -EINVAL;
1208 
1209 	if (!map->cache_bypass && map->format.parse_val) {
1210 		unsigned int ival;
1211 		int val_bytes = map->format.val_bytes;
1212 		for (i = 0; i < val_len / val_bytes; i++) {
1213 			ival = map->format.parse_val(val + (i * val_bytes));
1214 			ret = regcache_write(map, reg + (i * map->reg_stride),
1215 					     ival);
1216 			if (ret) {
1217 				dev_err(map->dev,
1218 					"Error in caching of register: %x ret: %d\n",
1219 					reg + i, ret);
1220 				return ret;
1221 			}
1222 		}
1223 		if (map->cache_only) {
1224 			map->cache_dirty = true;
1225 			return 0;
1226 		}
1227 	}
1228 
1229 	range = _regmap_range_lookup(map, reg);
1230 	if (range) {
1231 		int val_num = val_len / map->format.val_bytes;
1232 		int win_offset = (reg - range->range_min) % range->window_len;
1233 		int win_residue = range->window_len - win_offset;
1234 
1235 		/* If the write goes beyond the end of the window split it */
1236 		while (val_num > win_residue) {
1237 			dev_dbg(map->dev, "Writing window %d/%zu\n",
1238 				win_residue, val_len / map->format.val_bytes);
1239 			ret = _regmap_raw_write(map, reg, val, win_residue *
1240 						map->format.val_bytes);
1241 			if (ret != 0)
1242 				return ret;
1243 
1244 			reg += win_residue;
1245 			val_num -= win_residue;
1246 			val += win_residue * map->format.val_bytes;
1247 			val_len -= win_residue * map->format.val_bytes;
1248 
1249 			win_offset = (reg - range->range_min) %
1250 				range->window_len;
1251 			win_residue = range->window_len - win_offset;
1252 		}
1253 
1254 		ret = _regmap_select_page(map, &reg, range, val_num);
1255 		if (ret != 0)
1256 			return ret;
1257 	}
1258 
1259 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
1260 
1261 	u8[0] |= map->write_flag_mask;
1262 
1263 	/*
1264 	 * Essentially all I/O mechanisms will be faster with a single
1265 	 * buffer to write.  Since register syncs often generate raw
1266 	 * writes of single registers optimise that case.
1267 	 */
1268 	if (val != work_val && val_len == map->format.val_bytes) {
1269 		memcpy(work_val, val, map->format.val_bytes);
1270 		val = work_val;
1271 	}
1272 
1273 	if (map->async && map->bus->async_write) {
1274 		struct regmap_async *async;
1275 
1276 		trace_regmap_async_write_start(map, reg, val_len);
1277 
1278 		spin_lock_irqsave(&map->async_lock, flags);
1279 		async = list_first_entry_or_null(&map->async_free,
1280 						 struct regmap_async,
1281 						 list);
1282 		if (async)
1283 			list_del(&async->list);
1284 		spin_unlock_irqrestore(&map->async_lock, flags);
1285 
1286 		if (!async) {
1287 			async = map->bus->async_alloc();
1288 			if (!async)
1289 				return -ENOMEM;
1290 
1291 			async->work_buf = kzalloc(map->format.buf_size,
1292 						  GFP_KERNEL | GFP_DMA);
1293 			if (!async->work_buf) {
1294 				kfree(async);
1295 				return -ENOMEM;
1296 			}
1297 		}
1298 
1299 		async->map = map;
1300 
1301 		/* If the caller supplied the value we can use it safely. */
1302 		memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1303 		       map->format.reg_bytes + map->format.val_bytes);
1304 
1305 		spin_lock_irqsave(&map->async_lock, flags);
1306 		list_add_tail(&async->list, &map->async_list);
1307 		spin_unlock_irqrestore(&map->async_lock, flags);
1308 
1309 		if (val != work_val)
1310 			ret = map->bus->async_write(map->bus_context,
1311 						    async->work_buf,
1312 						    map->format.reg_bytes +
1313 						    map->format.pad_bytes,
1314 						    val, val_len, async);
1315 		else
1316 			ret = map->bus->async_write(map->bus_context,
1317 						    async->work_buf,
1318 						    map->format.reg_bytes +
1319 						    map->format.pad_bytes +
1320 						    val_len, NULL, 0, async);
1321 
1322 		if (ret != 0) {
1323 			dev_err(map->dev, "Failed to schedule write: %d\n",
1324 				ret);
1325 
1326 			spin_lock_irqsave(&map->async_lock, flags);
1327 			list_move(&async->list, &map->async_free);
1328 			spin_unlock_irqrestore(&map->async_lock, flags);
1329 		}
1330 
1331 		return ret;
1332 	}
1333 
1334 	trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1335 
1336 	/* If we're doing a single register write we can probably just
1337 	 * send the work_buf directly, otherwise try to do a gather
1338 	 * write.
1339 	 */
1340 	if (val == work_val)
1341 		ret = map->bus->write(map->bus_context, map->work_buf,
1342 				      map->format.reg_bytes +
1343 				      map->format.pad_bytes +
1344 				      val_len);
1345 	else if (map->bus->gather_write)
1346 		ret = map->bus->gather_write(map->bus_context, map->work_buf,
1347 					     map->format.reg_bytes +
1348 					     map->format.pad_bytes,
1349 					     val, val_len);
1350 
1351 	/* If that didn't work fall back on linearising by hand. */
1352 	if (ret == -ENOTSUPP) {
1353 		len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1354 		buf = kzalloc(len, GFP_KERNEL);
1355 		if (!buf)
1356 			return -ENOMEM;
1357 
1358 		memcpy(buf, map->work_buf, map->format.reg_bytes);
1359 		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1360 		       val, val_len);
1361 		ret = map->bus->write(map->bus_context, buf, len);
1362 
1363 		kfree(buf);
1364 	}
1365 
1366 	trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1367 
1368 	return ret;
1369 }
1370 
1371 /**
1372  * regmap_can_raw_write - Test if regmap_raw_write() is supported
1373  *
1374  * @map: Map to check.
1375  */
1376 bool regmap_can_raw_write(struct regmap *map)
1377 {
1378 	return map->bus && map->bus->write && map->format.format_val &&
1379 		map->format.format_reg;
1380 }
1381 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1382 
1383 /**
1384  * regmap_get_raw_read_max - Get the maximum size we can read
1385  *
1386  * @map: Map to check.
1387  */
1388 size_t regmap_get_raw_read_max(struct regmap *map)
1389 {
1390 	return map->max_raw_read;
1391 }
1392 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1393 
1394 /**
1395  * regmap_get_raw_write_max - Get the maximum size we can read
1396  *
1397  * @map: Map to check.
1398  */
1399 size_t regmap_get_raw_write_max(struct regmap *map)
1400 {
1401 	return map->max_raw_write;
1402 }
1403 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1404 
1405 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1406 				       unsigned int val)
1407 {
1408 	int ret;
1409 	struct regmap_range_node *range;
1410 	struct regmap *map = context;
1411 
1412 	WARN_ON(!map->bus || !map->format.format_write);
1413 
1414 	range = _regmap_range_lookup(map, reg);
1415 	if (range) {
1416 		ret = _regmap_select_page(map, &reg, range, 1);
1417 		if (ret != 0)
1418 			return ret;
1419 	}
1420 
1421 	map->format.format_write(map, reg, val);
1422 
1423 	trace_regmap_hw_write_start(map, reg, 1);
1424 
1425 	ret = map->bus->write(map->bus_context, map->work_buf,
1426 			      map->format.buf_size);
1427 
1428 	trace_regmap_hw_write_done(map, reg, 1);
1429 
1430 	return ret;
1431 }
1432 
1433 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1434 				 unsigned int val)
1435 {
1436 	struct regmap *map = context;
1437 
1438 	return map->bus->reg_write(map->bus_context, reg, val);
1439 }
1440 
1441 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1442 				 unsigned int val)
1443 {
1444 	struct regmap *map = context;
1445 
1446 	WARN_ON(!map->bus || !map->format.format_val);
1447 
1448 	map->format.format_val(map->work_buf + map->format.reg_bytes
1449 			       + map->format.pad_bytes, val, 0);
1450 	return _regmap_raw_write(map, reg,
1451 				 map->work_buf +
1452 				 map->format.reg_bytes +
1453 				 map->format.pad_bytes,
1454 				 map->format.val_bytes);
1455 }
1456 
1457 static inline void *_regmap_map_get_context(struct regmap *map)
1458 {
1459 	return (map->bus) ? map : map->bus_context;
1460 }
1461 
1462 int _regmap_write(struct regmap *map, unsigned int reg,
1463 		  unsigned int val)
1464 {
1465 	int ret;
1466 	void *context = _regmap_map_get_context(map);
1467 
1468 	if (!regmap_writeable(map, reg))
1469 		return -EIO;
1470 
1471 	if (!map->cache_bypass && !map->defer_caching) {
1472 		ret = regcache_write(map, reg, val);
1473 		if (ret != 0)
1474 			return ret;
1475 		if (map->cache_only) {
1476 			map->cache_dirty = true;
1477 			return 0;
1478 		}
1479 	}
1480 
1481 #ifdef LOG_DEVICE
1482 	if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1483 		dev_info(map->dev, "%x <= %x\n", reg, val);
1484 #endif
1485 
1486 	trace_regmap_reg_write(map, reg, val);
1487 
1488 	return map->reg_write(context, reg, val);
1489 }
1490 
1491 /**
1492  * regmap_write(): Write a value to a single register
1493  *
1494  * @map: Register map to write to
1495  * @reg: Register to write to
1496  * @val: Value to be written
1497  *
1498  * A value of zero will be returned on success, a negative errno will
1499  * be returned in error cases.
1500  */
1501 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1502 {
1503 	int ret;
1504 
1505 	if (reg % map->reg_stride)
1506 		return -EINVAL;
1507 
1508 	map->lock(map->lock_arg);
1509 
1510 	ret = _regmap_write(map, reg, val);
1511 
1512 	map->unlock(map->lock_arg);
1513 
1514 	return ret;
1515 }
1516 EXPORT_SYMBOL_GPL(regmap_write);
1517 
1518 /**
1519  * regmap_write_async(): Write a value to a single register asynchronously
1520  *
1521  * @map: Register map to write to
1522  * @reg: Register to write to
1523  * @val: Value to be written
1524  *
1525  * A value of zero will be returned on success, a negative errno will
1526  * be returned in error cases.
1527  */
1528 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1529 {
1530 	int ret;
1531 
1532 	if (reg % map->reg_stride)
1533 		return -EINVAL;
1534 
1535 	map->lock(map->lock_arg);
1536 
1537 	map->async = true;
1538 
1539 	ret = _regmap_write(map, reg, val);
1540 
1541 	map->async = false;
1542 
1543 	map->unlock(map->lock_arg);
1544 
1545 	return ret;
1546 }
1547 EXPORT_SYMBOL_GPL(regmap_write_async);
1548 
1549 /**
1550  * regmap_raw_write(): Write raw values to one or more registers
1551  *
1552  * @map: Register map to write to
1553  * @reg: Initial register to write to
1554  * @val: Block of data to be written, laid out for direct transmission to the
1555  *       device
1556  * @val_len: Length of data pointed to by val.
1557  *
1558  * This function is intended to be used for things like firmware
1559  * download where a large block of data needs to be transferred to the
1560  * device.  No formatting will be done on the data provided.
1561  *
1562  * A value of zero will be returned on success, a negative errno will
1563  * be returned in error cases.
1564  */
1565 int regmap_raw_write(struct regmap *map, unsigned int reg,
1566 		     const void *val, size_t val_len)
1567 {
1568 	int ret;
1569 
1570 	if (!regmap_can_raw_write(map))
1571 		return -EINVAL;
1572 	if (val_len % map->format.val_bytes)
1573 		return -EINVAL;
1574 	if (map->max_raw_write && map->max_raw_write > val_len)
1575 		return -E2BIG;
1576 
1577 	map->lock(map->lock_arg);
1578 
1579 	ret = _regmap_raw_write(map, reg, val, val_len);
1580 
1581 	map->unlock(map->lock_arg);
1582 
1583 	return ret;
1584 }
1585 EXPORT_SYMBOL_GPL(regmap_raw_write);
1586 
1587 /**
1588  * regmap_field_write(): Write a value to a single register field
1589  *
1590  * @field: Register field to write to
1591  * @val: Value to be written
1592  *
1593  * A value of zero will be returned on success, a negative errno will
1594  * be returned in error cases.
1595  */
1596 int regmap_field_write(struct regmap_field *field, unsigned int val)
1597 {
1598 	return regmap_update_bits(field->regmap, field->reg,
1599 				field->mask, val << field->shift);
1600 }
1601 EXPORT_SYMBOL_GPL(regmap_field_write);
1602 
1603 /**
1604  * regmap_field_update_bits():	Perform a read/modify/write cycle
1605  *                              on the register field
1606  *
1607  * @field: Register field to write to
1608  * @mask: Bitmask to change
1609  * @val: Value to be written
1610  *
1611  * A value of zero will be returned on success, a negative errno will
1612  * be returned in error cases.
1613  */
1614 int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
1615 {
1616 	mask = (mask << field->shift) & field->mask;
1617 
1618 	return regmap_update_bits(field->regmap, field->reg,
1619 				  mask, val << field->shift);
1620 }
1621 EXPORT_SYMBOL_GPL(regmap_field_update_bits);
1622 
1623 /**
1624  * regmap_fields_write(): Write a value to a single register field with port ID
1625  *
1626  * @field: Register field to write to
1627  * @id: port ID
1628  * @val: Value to be written
1629  *
1630  * A value of zero will be returned on success, a negative errno will
1631  * be returned in error cases.
1632  */
1633 int regmap_fields_write(struct regmap_field *field, unsigned int id,
1634 			unsigned int val)
1635 {
1636 	if (id >= field->id_size)
1637 		return -EINVAL;
1638 
1639 	return regmap_update_bits(field->regmap,
1640 				  field->reg + (field->id_offset * id),
1641 				  field->mask, val << field->shift);
1642 }
1643 EXPORT_SYMBOL_GPL(regmap_fields_write);
1644 
1645 int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
1646 			unsigned int val)
1647 {
1648 	if (id >= field->id_size)
1649 		return -EINVAL;
1650 
1651 	return regmap_write_bits(field->regmap,
1652 				  field->reg + (field->id_offset * id),
1653 				  field->mask, val << field->shift);
1654 }
1655 EXPORT_SYMBOL_GPL(regmap_fields_force_write);
1656 
1657 /**
1658  * regmap_fields_update_bits():	Perform a read/modify/write cycle
1659  *                              on the register field
1660  *
1661  * @field: Register field to write to
1662  * @id: port ID
1663  * @mask: Bitmask to change
1664  * @val: Value to be written
1665  *
1666  * A value of zero will be returned on success, a negative errno will
1667  * be returned in error cases.
1668  */
1669 int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
1670 			      unsigned int mask, unsigned int val)
1671 {
1672 	if (id >= field->id_size)
1673 		return -EINVAL;
1674 
1675 	mask = (mask << field->shift) & field->mask;
1676 
1677 	return regmap_update_bits(field->regmap,
1678 				  field->reg + (field->id_offset * id),
1679 				  mask, val << field->shift);
1680 }
1681 EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
1682 
1683 /*
1684  * regmap_bulk_write(): Write multiple registers to the device
1685  *
1686  * @map: Register map to write to
1687  * @reg: First register to be write from
1688  * @val: Block of data to be written, in native register size for device
1689  * @val_count: Number of registers to write
1690  *
1691  * This function is intended to be used for writing a large block of
1692  * data to the device either in single transfer or multiple transfer.
1693  *
1694  * A value of zero will be returned on success, a negative errno will
1695  * be returned in error cases.
1696  */
1697 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1698 		     size_t val_count)
1699 {
1700 	int ret = 0, i;
1701 	size_t val_bytes = map->format.val_bytes;
1702 	size_t total_size = val_bytes * val_count;
1703 
1704 	if (map->bus && !map->format.parse_inplace)
1705 		return -EINVAL;
1706 	if (reg % map->reg_stride)
1707 		return -EINVAL;
1708 
1709 	/*
1710 	 * Some devices don't support bulk write, for
1711 	 * them we have a series of single write operations in the first two if
1712 	 * blocks.
1713 	 *
1714 	 * The first if block is used for memory mapped io. It does not allow
1715 	 * val_bytes of 3 for example.
1716 	 * The second one is used for busses which do not have this limitation
1717 	 * and can write arbitrary value lengths.
1718 	 */
1719 	if (!map->bus) {
1720 		map->lock(map->lock_arg);
1721 		for (i = 0; i < val_count; i++) {
1722 			unsigned int ival;
1723 
1724 			switch (val_bytes) {
1725 			case 1:
1726 				ival = *(u8 *)(val + (i * val_bytes));
1727 				break;
1728 			case 2:
1729 				ival = *(u16 *)(val + (i * val_bytes));
1730 				break;
1731 			case 4:
1732 				ival = *(u32 *)(val + (i * val_bytes));
1733 				break;
1734 #ifdef CONFIG_64BIT
1735 			case 8:
1736 				ival = *(u64 *)(val + (i * val_bytes));
1737 				break;
1738 #endif
1739 			default:
1740 				ret = -EINVAL;
1741 				goto out;
1742 			}
1743 
1744 			ret = _regmap_write(map, reg + (i * map->reg_stride),
1745 					ival);
1746 			if (ret != 0)
1747 				goto out;
1748 		}
1749 out:
1750 		map->unlock(map->lock_arg);
1751 	} else if (map->use_single_write ||
1752 		   (map->max_raw_write && map->max_raw_write < total_size)) {
1753 		int chunk_stride = map->reg_stride;
1754 		size_t chunk_size = val_bytes;
1755 		size_t chunk_count = val_count;
1756 
1757 		if (!map->use_single_write) {
1758 			chunk_size = map->max_raw_write;
1759 			if (chunk_size % val_bytes)
1760 				chunk_size -= chunk_size % val_bytes;
1761 			chunk_count = total_size / chunk_size;
1762 			chunk_stride *= chunk_size / val_bytes;
1763 		}
1764 
1765 		map->lock(map->lock_arg);
1766 		/* Write as many bytes as possible with chunk_size */
1767 		for (i = 0; i < chunk_count; i++) {
1768 			ret = _regmap_raw_write(map,
1769 						reg + (i * chunk_stride),
1770 						val + (i * chunk_size),
1771 						chunk_size);
1772 			if (ret)
1773 				break;
1774 		}
1775 
1776 		/* Write remaining bytes */
1777 		if (!ret && chunk_size * i < total_size) {
1778 			ret = _regmap_raw_write(map, reg + (i * chunk_stride),
1779 						val + (i * chunk_size),
1780 						total_size - i * chunk_size);
1781 		}
1782 		map->unlock(map->lock_arg);
1783 	} else {
1784 		void *wval;
1785 
1786 		if (!val_count)
1787 			return -EINVAL;
1788 
1789 		wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1790 		if (!wval) {
1791 			dev_err(map->dev, "Error in memory allocation\n");
1792 			return -ENOMEM;
1793 		}
1794 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
1795 			map->format.parse_inplace(wval + i);
1796 
1797 		map->lock(map->lock_arg);
1798 		ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1799 		map->unlock(map->lock_arg);
1800 
1801 		kfree(wval);
1802 	}
1803 	return ret;
1804 }
1805 EXPORT_SYMBOL_GPL(regmap_bulk_write);
1806 
1807 /*
1808  * _regmap_raw_multi_reg_write()
1809  *
1810  * the (register,newvalue) pairs in regs have not been formatted, but
1811  * they are all in the same page and have been changed to being page
1812  * relative. The page register has been written if that was necessary.
1813  */
1814 static int _regmap_raw_multi_reg_write(struct regmap *map,
1815 				       const struct reg_sequence *regs,
1816 				       size_t num_regs)
1817 {
1818 	int ret;
1819 	void *buf;
1820 	int i;
1821 	u8 *u8;
1822 	size_t val_bytes = map->format.val_bytes;
1823 	size_t reg_bytes = map->format.reg_bytes;
1824 	size_t pad_bytes = map->format.pad_bytes;
1825 	size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1826 	size_t len = pair_size * num_regs;
1827 
1828 	if (!len)
1829 		return -EINVAL;
1830 
1831 	buf = kzalloc(len, GFP_KERNEL);
1832 	if (!buf)
1833 		return -ENOMEM;
1834 
1835 	/* We have to linearise by hand. */
1836 
1837 	u8 = buf;
1838 
1839 	for (i = 0; i < num_regs; i++) {
1840 		unsigned int reg = regs[i].reg;
1841 		unsigned int val = regs[i].def;
1842 		trace_regmap_hw_write_start(map, reg, 1);
1843 		map->format.format_reg(u8, reg, map->reg_shift);
1844 		u8 += reg_bytes + pad_bytes;
1845 		map->format.format_val(u8, val, 0);
1846 		u8 += val_bytes;
1847 	}
1848 	u8 = buf;
1849 	*u8 |= map->write_flag_mask;
1850 
1851 	ret = map->bus->write(map->bus_context, buf, len);
1852 
1853 	kfree(buf);
1854 
1855 	for (i = 0; i < num_regs; i++) {
1856 		int reg = regs[i].reg;
1857 		trace_regmap_hw_write_done(map, reg, 1);
1858 	}
1859 	return ret;
1860 }
1861 
1862 static unsigned int _regmap_register_page(struct regmap *map,
1863 					  unsigned int reg,
1864 					  struct regmap_range_node *range)
1865 {
1866 	unsigned int win_page = (reg - range->range_min) / range->window_len;
1867 
1868 	return win_page;
1869 }
1870 
1871 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1872 					       struct reg_sequence *regs,
1873 					       size_t num_regs)
1874 {
1875 	int ret;
1876 	int i, n;
1877 	struct reg_sequence *base;
1878 	unsigned int this_page = 0;
1879 	unsigned int page_change = 0;
1880 	/*
1881 	 * the set of registers are not neccessarily in order, but
1882 	 * since the order of write must be preserved this algorithm
1883 	 * chops the set each time the page changes. This also applies
1884 	 * if there is a delay required at any point in the sequence.
1885 	 */
1886 	base = regs;
1887 	for (i = 0, n = 0; i < num_regs; i++, n++) {
1888 		unsigned int reg = regs[i].reg;
1889 		struct regmap_range_node *range;
1890 
1891 		range = _regmap_range_lookup(map, reg);
1892 		if (range) {
1893 			unsigned int win_page = _regmap_register_page(map, reg,
1894 								      range);
1895 
1896 			if (i == 0)
1897 				this_page = win_page;
1898 			if (win_page != this_page) {
1899 				this_page = win_page;
1900 				page_change = 1;
1901 			}
1902 		}
1903 
1904 		/* If we have both a page change and a delay make sure to
1905 		 * write the regs and apply the delay before we change the
1906 		 * page.
1907 		 */
1908 
1909 		if (page_change || regs[i].delay_us) {
1910 
1911 				/* For situations where the first write requires
1912 				 * a delay we need to make sure we don't call
1913 				 * raw_multi_reg_write with n=0
1914 				 * This can't occur with page breaks as we
1915 				 * never write on the first iteration
1916 				 */
1917 				if (regs[i].delay_us && i == 0)
1918 					n = 1;
1919 
1920 				ret = _regmap_raw_multi_reg_write(map, base, n);
1921 				if (ret != 0)
1922 					return ret;
1923 
1924 				if (regs[i].delay_us)
1925 					udelay(regs[i].delay_us);
1926 
1927 				base += n;
1928 				n = 0;
1929 
1930 				if (page_change) {
1931 					ret = _regmap_select_page(map,
1932 								  &base[n].reg,
1933 								  range, 1);
1934 					if (ret != 0)
1935 						return ret;
1936 
1937 					page_change = 0;
1938 				}
1939 
1940 		}
1941 
1942 	}
1943 	if (n > 0)
1944 		return _regmap_raw_multi_reg_write(map, base, n);
1945 	return 0;
1946 }
1947 
1948 static int _regmap_multi_reg_write(struct regmap *map,
1949 				   const struct reg_sequence *regs,
1950 				   size_t num_regs)
1951 {
1952 	int i;
1953 	int ret;
1954 
1955 	if (!map->can_multi_write) {
1956 		for (i = 0; i < num_regs; i++) {
1957 			ret = _regmap_write(map, regs[i].reg, regs[i].def);
1958 			if (ret != 0)
1959 				return ret;
1960 
1961 			if (regs[i].delay_us)
1962 				udelay(regs[i].delay_us);
1963 		}
1964 		return 0;
1965 	}
1966 
1967 	if (!map->format.parse_inplace)
1968 		return -EINVAL;
1969 
1970 	if (map->writeable_reg)
1971 		for (i = 0; i < num_regs; i++) {
1972 			int reg = regs[i].reg;
1973 			if (!map->writeable_reg(map->dev, reg))
1974 				return -EINVAL;
1975 			if (reg % map->reg_stride)
1976 				return -EINVAL;
1977 		}
1978 
1979 	if (!map->cache_bypass) {
1980 		for (i = 0; i < num_regs; i++) {
1981 			unsigned int val = regs[i].def;
1982 			unsigned int reg = regs[i].reg;
1983 			ret = regcache_write(map, reg, val);
1984 			if (ret) {
1985 				dev_err(map->dev,
1986 				"Error in caching of register: %x ret: %d\n",
1987 								reg, ret);
1988 				return ret;
1989 			}
1990 		}
1991 		if (map->cache_only) {
1992 			map->cache_dirty = true;
1993 			return 0;
1994 		}
1995 	}
1996 
1997 	WARN_ON(!map->bus);
1998 
1999 	for (i = 0; i < num_regs; i++) {
2000 		unsigned int reg = regs[i].reg;
2001 		struct regmap_range_node *range;
2002 
2003 		/* Coalesce all the writes between a page break or a delay
2004 		 * in a sequence
2005 		 */
2006 		range = _regmap_range_lookup(map, reg);
2007 		if (range || regs[i].delay_us) {
2008 			size_t len = sizeof(struct reg_sequence)*num_regs;
2009 			struct reg_sequence *base = kmemdup(regs, len,
2010 							   GFP_KERNEL);
2011 			if (!base)
2012 				return -ENOMEM;
2013 			ret = _regmap_range_multi_paged_reg_write(map, base,
2014 								  num_regs);
2015 			kfree(base);
2016 
2017 			return ret;
2018 		}
2019 	}
2020 	return _regmap_raw_multi_reg_write(map, regs, num_regs);
2021 }
2022 
2023 /*
2024  * regmap_multi_reg_write(): Write multiple registers to the device
2025  *
2026  * where the set of register,value pairs are supplied in any order,
2027  * possibly not all in a single range.
2028  *
2029  * @map: Register map to write to
2030  * @regs: Array of structures containing register,value to be written
2031  * @num_regs: Number of registers to write
2032  *
2033  * The 'normal' block write mode will send ultimately send data on the
2034  * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
2035  * addressed. However, this alternative block multi write mode will send
2036  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2037  * must of course support the mode.
2038  *
2039  * A value of zero will be returned on success, a negative errno will be
2040  * returned in error cases.
2041  */
2042 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2043 			   int num_regs)
2044 {
2045 	int ret;
2046 
2047 	map->lock(map->lock_arg);
2048 
2049 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2050 
2051 	map->unlock(map->lock_arg);
2052 
2053 	return ret;
2054 }
2055 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2056 
2057 /*
2058  * regmap_multi_reg_write_bypassed(): Write multiple registers to the
2059  *                                    device but not the cache
2060  *
2061  * where the set of register are supplied in any order
2062  *
2063  * @map: Register map to write to
2064  * @regs: Array of structures containing register,value to be written
2065  * @num_regs: Number of registers to write
2066  *
2067  * This function is intended to be used for writing a large block of data
2068  * atomically to the device in single transfer for those I2C client devices
2069  * that implement this alternative block write mode.
2070  *
2071  * A value of zero will be returned on success, a negative errno will
2072  * be returned in error cases.
2073  */
2074 int regmap_multi_reg_write_bypassed(struct regmap *map,
2075 				    const struct reg_sequence *regs,
2076 				    int num_regs)
2077 {
2078 	int ret;
2079 	bool bypass;
2080 
2081 	map->lock(map->lock_arg);
2082 
2083 	bypass = map->cache_bypass;
2084 	map->cache_bypass = true;
2085 
2086 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2087 
2088 	map->cache_bypass = bypass;
2089 
2090 	map->unlock(map->lock_arg);
2091 
2092 	return ret;
2093 }
2094 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2095 
2096 /**
2097  * regmap_raw_write_async(): Write raw values to one or more registers
2098  *                           asynchronously
2099  *
2100  * @map: Register map to write to
2101  * @reg: Initial register to write to
2102  * @val: Block of data to be written, laid out for direct transmission to the
2103  *       device.  Must be valid until regmap_async_complete() is called.
2104  * @val_len: Length of data pointed to by val.
2105  *
2106  * This function is intended to be used for things like firmware
2107  * download where a large block of data needs to be transferred to the
2108  * device.  No formatting will be done on the data provided.
2109  *
2110  * If supported by the underlying bus the write will be scheduled
2111  * asynchronously, helping maximise I/O speed on higher speed buses
2112  * like SPI.  regmap_async_complete() can be called to ensure that all
2113  * asynchrnous writes have been completed.
2114  *
2115  * A value of zero will be returned on success, a negative errno will
2116  * be returned in error cases.
2117  */
2118 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2119 			   const void *val, size_t val_len)
2120 {
2121 	int ret;
2122 
2123 	if (val_len % map->format.val_bytes)
2124 		return -EINVAL;
2125 	if (reg % map->reg_stride)
2126 		return -EINVAL;
2127 
2128 	map->lock(map->lock_arg);
2129 
2130 	map->async = true;
2131 
2132 	ret = _regmap_raw_write(map, reg, val, val_len);
2133 
2134 	map->async = false;
2135 
2136 	map->unlock(map->lock_arg);
2137 
2138 	return ret;
2139 }
2140 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2141 
2142 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2143 			    unsigned int val_len)
2144 {
2145 	struct regmap_range_node *range;
2146 	u8 *u8 = map->work_buf;
2147 	int ret;
2148 
2149 	WARN_ON(!map->bus);
2150 
2151 	range = _regmap_range_lookup(map, reg);
2152 	if (range) {
2153 		ret = _regmap_select_page(map, &reg, range,
2154 					  val_len / map->format.val_bytes);
2155 		if (ret != 0)
2156 			return ret;
2157 	}
2158 
2159 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
2160 
2161 	/*
2162 	 * Some buses or devices flag reads by setting the high bits in the
2163 	 * register address; since it's always the high bits for all
2164 	 * current formats we can do this here rather than in
2165 	 * formatting.  This may break if we get interesting formats.
2166 	 */
2167 	u8[0] |= map->read_flag_mask;
2168 
2169 	trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2170 
2171 	ret = map->bus->read(map->bus_context, map->work_buf,
2172 			     map->format.reg_bytes + map->format.pad_bytes,
2173 			     val, val_len);
2174 
2175 	trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2176 
2177 	return ret;
2178 }
2179 
2180 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2181 				unsigned int *val)
2182 {
2183 	struct regmap *map = context;
2184 
2185 	return map->bus->reg_read(map->bus_context, reg, val);
2186 }
2187 
2188 static int _regmap_bus_read(void *context, unsigned int reg,
2189 			    unsigned int *val)
2190 {
2191 	int ret;
2192 	struct regmap *map = context;
2193 
2194 	if (!map->format.parse_val)
2195 		return -EINVAL;
2196 
2197 	ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
2198 	if (ret == 0)
2199 		*val = map->format.parse_val(map->work_buf);
2200 
2201 	return ret;
2202 }
2203 
2204 static int _regmap_read(struct regmap *map, unsigned int reg,
2205 			unsigned int *val)
2206 {
2207 	int ret;
2208 	void *context = _regmap_map_get_context(map);
2209 
2210 	if (!map->cache_bypass) {
2211 		ret = regcache_read(map, reg, val);
2212 		if (ret == 0)
2213 			return 0;
2214 	}
2215 
2216 	if (map->cache_only)
2217 		return -EBUSY;
2218 
2219 	if (!regmap_readable(map, reg))
2220 		return -EIO;
2221 
2222 	ret = map->reg_read(context, reg, val);
2223 	if (ret == 0) {
2224 #ifdef LOG_DEVICE
2225 		if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2226 			dev_info(map->dev, "%x => %x\n", reg, *val);
2227 #endif
2228 
2229 		trace_regmap_reg_read(map, reg, *val);
2230 
2231 		if (!map->cache_bypass)
2232 			regcache_write(map, reg, *val);
2233 	}
2234 
2235 	return ret;
2236 }
2237 
2238 /**
2239  * regmap_read(): Read a value from a single register
2240  *
2241  * @map: Register map to read from
2242  * @reg: Register to be read from
2243  * @val: Pointer to store read value
2244  *
2245  * A value of zero will be returned on success, a negative errno will
2246  * be returned in error cases.
2247  */
2248 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2249 {
2250 	int ret;
2251 
2252 	if (reg % map->reg_stride)
2253 		return -EINVAL;
2254 
2255 	map->lock(map->lock_arg);
2256 
2257 	ret = _regmap_read(map, reg, val);
2258 
2259 	map->unlock(map->lock_arg);
2260 
2261 	return ret;
2262 }
2263 EXPORT_SYMBOL_GPL(regmap_read);
2264 
2265 /**
2266  * regmap_raw_read(): Read raw data from the device
2267  *
2268  * @map: Register map to read from
2269  * @reg: First register to be read from
2270  * @val: Pointer to store read value
2271  * @val_len: Size of data to read
2272  *
2273  * A value of zero will be returned on success, a negative errno will
2274  * be returned in error cases.
2275  */
2276 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2277 		    size_t val_len)
2278 {
2279 	size_t val_bytes = map->format.val_bytes;
2280 	size_t val_count = val_len / val_bytes;
2281 	unsigned int v;
2282 	int ret, i;
2283 
2284 	if (!map->bus)
2285 		return -EINVAL;
2286 	if (val_len % map->format.val_bytes)
2287 		return -EINVAL;
2288 	if (reg % map->reg_stride)
2289 		return -EINVAL;
2290 	if (val_count == 0)
2291 		return -EINVAL;
2292 
2293 	map->lock(map->lock_arg);
2294 
2295 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2296 	    map->cache_type == REGCACHE_NONE) {
2297 		if (!map->bus->read) {
2298 			ret = -ENOTSUPP;
2299 			goto out;
2300 		}
2301 		if (map->max_raw_read && map->max_raw_read < val_len) {
2302 			ret = -E2BIG;
2303 			goto out;
2304 		}
2305 
2306 		/* Physical block read if there's no cache involved */
2307 		ret = _regmap_raw_read(map, reg, val, val_len);
2308 
2309 	} else {
2310 		/* Otherwise go word by word for the cache; should be low
2311 		 * cost as we expect to hit the cache.
2312 		 */
2313 		for (i = 0; i < val_count; i++) {
2314 			ret = _regmap_read(map, reg + (i * map->reg_stride),
2315 					   &v);
2316 			if (ret != 0)
2317 				goto out;
2318 
2319 			map->format.format_val(val + (i * val_bytes), v, 0);
2320 		}
2321 	}
2322 
2323  out:
2324 	map->unlock(map->lock_arg);
2325 
2326 	return ret;
2327 }
2328 EXPORT_SYMBOL_GPL(regmap_raw_read);
2329 
2330 /**
2331  * regmap_field_read(): Read a value to a single register field
2332  *
2333  * @field: Register field to read from
2334  * @val: Pointer to store read value
2335  *
2336  * A value of zero will be returned on success, a negative errno will
2337  * be returned in error cases.
2338  */
2339 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2340 {
2341 	int ret;
2342 	unsigned int reg_val;
2343 	ret = regmap_read(field->regmap, field->reg, &reg_val);
2344 	if (ret != 0)
2345 		return ret;
2346 
2347 	reg_val &= field->mask;
2348 	reg_val >>= field->shift;
2349 	*val = reg_val;
2350 
2351 	return ret;
2352 }
2353 EXPORT_SYMBOL_GPL(regmap_field_read);
2354 
2355 /**
2356  * regmap_fields_read(): Read a value to a single register field with port ID
2357  *
2358  * @field: Register field to read from
2359  * @id: port ID
2360  * @val: Pointer to store read value
2361  *
2362  * A value of zero will be returned on success, a negative errno will
2363  * be returned in error cases.
2364  */
2365 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2366 		       unsigned int *val)
2367 {
2368 	int ret;
2369 	unsigned int reg_val;
2370 
2371 	if (id >= field->id_size)
2372 		return -EINVAL;
2373 
2374 	ret = regmap_read(field->regmap,
2375 			  field->reg + (field->id_offset * id),
2376 			  &reg_val);
2377 	if (ret != 0)
2378 		return ret;
2379 
2380 	reg_val &= field->mask;
2381 	reg_val >>= field->shift;
2382 	*val = reg_val;
2383 
2384 	return ret;
2385 }
2386 EXPORT_SYMBOL_GPL(regmap_fields_read);
2387 
2388 /**
2389  * regmap_bulk_read(): Read multiple registers from the device
2390  *
2391  * @map: Register map to read from
2392  * @reg: First register to be read from
2393  * @val: Pointer to store read value, in native register size for device
2394  * @val_count: Number of registers to read
2395  *
2396  * A value of zero will be returned on success, a negative errno will
2397  * be returned in error cases.
2398  */
2399 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2400 		     size_t val_count)
2401 {
2402 	int ret, i;
2403 	size_t val_bytes = map->format.val_bytes;
2404 	bool vol = regmap_volatile_range(map, reg, val_count);
2405 
2406 	if (reg % map->reg_stride)
2407 		return -EINVAL;
2408 
2409 	if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2410 		/*
2411 		 * Some devices does not support bulk read, for
2412 		 * them we have a series of single read operations.
2413 		 */
2414 		size_t total_size = val_bytes * val_count;
2415 
2416 		if (!map->use_single_read &&
2417 		    (!map->max_raw_read || map->max_raw_read > total_size)) {
2418 			ret = regmap_raw_read(map, reg, val,
2419 					      val_bytes * val_count);
2420 			if (ret != 0)
2421 				return ret;
2422 		} else {
2423 			/*
2424 			 * Some devices do not support bulk read or do not
2425 			 * support large bulk reads, for them we have a series
2426 			 * of read operations.
2427 			 */
2428 			int chunk_stride = map->reg_stride;
2429 			size_t chunk_size = val_bytes;
2430 			size_t chunk_count = val_count;
2431 
2432 			if (!map->use_single_read) {
2433 				chunk_size = map->max_raw_read;
2434 				if (chunk_size % val_bytes)
2435 					chunk_size -= chunk_size % val_bytes;
2436 				chunk_count = total_size / chunk_size;
2437 				chunk_stride *= chunk_size / val_bytes;
2438 			}
2439 
2440 			/* Read bytes that fit into a multiple of chunk_size */
2441 			for (i = 0; i < chunk_count; i++) {
2442 				ret = regmap_raw_read(map,
2443 						      reg + (i * chunk_stride),
2444 						      val + (i * chunk_size),
2445 						      chunk_size);
2446 				if (ret != 0)
2447 					return ret;
2448 			}
2449 
2450 			/* Read remaining bytes */
2451 			if (chunk_size * i < total_size) {
2452 				ret = regmap_raw_read(map,
2453 						      reg + (i * chunk_stride),
2454 						      val + (i * chunk_size),
2455 						      total_size - i * chunk_size);
2456 				if (ret != 0)
2457 					return ret;
2458 			}
2459 		}
2460 
2461 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
2462 			map->format.parse_inplace(val + i);
2463 	} else {
2464 		for (i = 0; i < val_count; i++) {
2465 			unsigned int ival;
2466 			ret = regmap_read(map, reg + (i * map->reg_stride),
2467 					  &ival);
2468 			if (ret != 0)
2469 				return ret;
2470 
2471 			if (map->format.format_val) {
2472 				map->format.format_val(val + (i * val_bytes), ival, 0);
2473 			} else {
2474 				/* Devices providing read and write
2475 				 * operations can use the bulk I/O
2476 				 * functions if they define a val_bytes,
2477 				 * we assume that the values are native
2478 				 * endian.
2479 				 */
2480 				u32 *u32 = val;
2481 				u16 *u16 = val;
2482 				u8 *u8 = val;
2483 
2484 				switch (map->format.val_bytes) {
2485 				case 4:
2486 					u32[i] = ival;
2487 					break;
2488 				case 2:
2489 					u16[i] = ival;
2490 					break;
2491 				case 1:
2492 					u8[i] = ival;
2493 					break;
2494 				default:
2495 					return -EINVAL;
2496 				}
2497 			}
2498 		}
2499 	}
2500 
2501 	return 0;
2502 }
2503 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2504 
2505 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2506 			       unsigned int mask, unsigned int val,
2507 			       bool *change, bool force_write)
2508 {
2509 	int ret;
2510 	unsigned int tmp, orig;
2511 
2512 	ret = _regmap_read(map, reg, &orig);
2513 	if (ret != 0)
2514 		return ret;
2515 
2516 	tmp = orig & ~mask;
2517 	tmp |= val & mask;
2518 
2519 	if (force_write || (tmp != orig)) {
2520 		ret = _regmap_write(map, reg, tmp);
2521 		if (change)
2522 			*change = true;
2523 	} else {
2524 		if (change)
2525 			*change = false;
2526 	}
2527 
2528 	return ret;
2529 }
2530 
2531 /**
2532  * regmap_update_bits: Perform a read/modify/write cycle on the register map
2533  *
2534  * @map: Register map to update
2535  * @reg: Register to update
2536  * @mask: Bitmask to change
2537  * @val: New value for bitmask
2538  *
2539  * Returns zero for success, a negative number on error.
2540  */
2541 int regmap_update_bits(struct regmap *map, unsigned int reg,
2542 		       unsigned int mask, unsigned int val)
2543 {
2544 	int ret;
2545 
2546 	map->lock(map->lock_arg);
2547 	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
2548 	map->unlock(map->lock_arg);
2549 
2550 	return ret;
2551 }
2552 EXPORT_SYMBOL_GPL(regmap_update_bits);
2553 
2554 /**
2555  * regmap_write_bits: Perform a read/modify/write cycle on the register map
2556  *
2557  * @map: Register map to update
2558  * @reg: Register to update
2559  * @mask: Bitmask to change
2560  * @val: New value for bitmask
2561  *
2562  * Returns zero for success, a negative number on error.
2563  */
2564 int regmap_write_bits(struct regmap *map, unsigned int reg,
2565 		      unsigned int mask, unsigned int val)
2566 {
2567 	int ret;
2568 
2569 	map->lock(map->lock_arg);
2570 	ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
2571 	map->unlock(map->lock_arg);
2572 
2573 	return ret;
2574 }
2575 EXPORT_SYMBOL_GPL(regmap_write_bits);
2576 
2577 /**
2578  * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2579  *                           map asynchronously
2580  *
2581  * @map: Register map to update
2582  * @reg: Register to update
2583  * @mask: Bitmask to change
2584  * @val: New value for bitmask
2585  *
2586  * With most buses the read must be done synchronously so this is most
2587  * useful for devices with a cache which do not need to interact with
2588  * the hardware to determine the current register value.
2589  *
2590  * Returns zero for success, a negative number on error.
2591  */
2592 int regmap_update_bits_async(struct regmap *map, unsigned int reg,
2593 			     unsigned int mask, unsigned int val)
2594 {
2595 	int ret;
2596 
2597 	map->lock(map->lock_arg);
2598 
2599 	map->async = true;
2600 
2601 	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
2602 
2603 	map->async = false;
2604 
2605 	map->unlock(map->lock_arg);
2606 
2607 	return ret;
2608 }
2609 EXPORT_SYMBOL_GPL(regmap_update_bits_async);
2610 
2611 /**
2612  * regmap_update_bits_check: Perform a read/modify/write cycle on the
2613  *                           register map and report if updated
2614  *
2615  * @map: Register map to update
2616  * @reg: Register to update
2617  * @mask: Bitmask to change
2618  * @val: New value for bitmask
2619  * @change: Boolean indicating if a write was done
2620  *
2621  * Returns zero for success, a negative number on error.
2622  */
2623 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
2624 			     unsigned int mask, unsigned int val,
2625 			     bool *change)
2626 {
2627 	int ret;
2628 
2629 	map->lock(map->lock_arg);
2630 	ret = _regmap_update_bits(map, reg, mask, val, change, false);
2631 	map->unlock(map->lock_arg);
2632 	return ret;
2633 }
2634 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
2635 
2636 /**
2637  * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2638  *                                 register map asynchronously and report if
2639  *                                 updated
2640  *
2641  * @map: Register map to update
2642  * @reg: Register to update
2643  * @mask: Bitmask to change
2644  * @val: New value for bitmask
2645  * @change: Boolean indicating if a write was done
2646  *
2647  * With most buses the read must be done synchronously so this is most
2648  * useful for devices with a cache which do not need to interact with
2649  * the hardware to determine the current register value.
2650  *
2651  * Returns zero for success, a negative number on error.
2652  */
2653 int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
2654 				   unsigned int mask, unsigned int val,
2655 				   bool *change)
2656 {
2657 	int ret;
2658 
2659 	map->lock(map->lock_arg);
2660 
2661 	map->async = true;
2662 
2663 	ret = _regmap_update_bits(map, reg, mask, val, change, false);
2664 
2665 	map->async = false;
2666 
2667 	map->unlock(map->lock_arg);
2668 
2669 	return ret;
2670 }
2671 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
2672 
2673 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2674 {
2675 	struct regmap *map = async->map;
2676 	bool wake;
2677 
2678 	trace_regmap_async_io_complete(map);
2679 
2680 	spin_lock(&map->async_lock);
2681 	list_move(&async->list, &map->async_free);
2682 	wake = list_empty(&map->async_list);
2683 
2684 	if (ret != 0)
2685 		map->async_ret = ret;
2686 
2687 	spin_unlock(&map->async_lock);
2688 
2689 	if (wake)
2690 		wake_up(&map->async_waitq);
2691 }
2692 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2693 
2694 static int regmap_async_is_done(struct regmap *map)
2695 {
2696 	unsigned long flags;
2697 	int ret;
2698 
2699 	spin_lock_irqsave(&map->async_lock, flags);
2700 	ret = list_empty(&map->async_list);
2701 	spin_unlock_irqrestore(&map->async_lock, flags);
2702 
2703 	return ret;
2704 }
2705 
2706 /**
2707  * regmap_async_complete: Ensure all asynchronous I/O has completed.
2708  *
2709  * @map: Map to operate on.
2710  *
2711  * Blocks until any pending asynchronous I/O has completed.  Returns
2712  * an error code for any failed I/O operations.
2713  */
2714 int regmap_async_complete(struct regmap *map)
2715 {
2716 	unsigned long flags;
2717 	int ret;
2718 
2719 	/* Nothing to do with no async support */
2720 	if (!map->bus || !map->bus->async_write)
2721 		return 0;
2722 
2723 	trace_regmap_async_complete_start(map);
2724 
2725 	wait_event(map->async_waitq, regmap_async_is_done(map));
2726 
2727 	spin_lock_irqsave(&map->async_lock, flags);
2728 	ret = map->async_ret;
2729 	map->async_ret = 0;
2730 	spin_unlock_irqrestore(&map->async_lock, flags);
2731 
2732 	trace_regmap_async_complete_done(map);
2733 
2734 	return ret;
2735 }
2736 EXPORT_SYMBOL_GPL(regmap_async_complete);
2737 
2738 /**
2739  * regmap_register_patch: Register and apply register updates to be applied
2740  *                        on device initialistion
2741  *
2742  * @map: Register map to apply updates to.
2743  * @regs: Values to update.
2744  * @num_regs: Number of entries in regs.
2745  *
2746  * Register a set of register updates to be applied to the device
2747  * whenever the device registers are synchronised with the cache and
2748  * apply them immediately.  Typically this is used to apply
2749  * corrections to be applied to the device defaults on startup, such
2750  * as the updates some vendors provide to undocumented registers.
2751  *
2752  * The caller must ensure that this function cannot be called
2753  * concurrently with either itself or regcache_sync().
2754  */
2755 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2756 			  int num_regs)
2757 {
2758 	struct reg_sequence *p;
2759 	int ret;
2760 	bool bypass;
2761 
2762 	if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2763 	    num_regs))
2764 		return 0;
2765 
2766 	p = krealloc(map->patch,
2767 		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2768 		     GFP_KERNEL);
2769 	if (p) {
2770 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2771 		map->patch = p;
2772 		map->patch_regs += num_regs;
2773 	} else {
2774 		return -ENOMEM;
2775 	}
2776 
2777 	map->lock(map->lock_arg);
2778 
2779 	bypass = map->cache_bypass;
2780 
2781 	map->cache_bypass = true;
2782 	map->async = true;
2783 
2784 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2785 
2786 	map->async = false;
2787 	map->cache_bypass = bypass;
2788 
2789 	map->unlock(map->lock_arg);
2790 
2791 	regmap_async_complete(map);
2792 
2793 	return ret;
2794 }
2795 EXPORT_SYMBOL_GPL(regmap_register_patch);
2796 
2797 /*
2798  * regmap_get_val_bytes(): Report the size of a register value
2799  *
2800  * Report the size of a register value, mainly intended to for use by
2801  * generic infrastructure built on top of regmap.
2802  */
2803 int regmap_get_val_bytes(struct regmap *map)
2804 {
2805 	if (map->format.format_write)
2806 		return -EINVAL;
2807 
2808 	return map->format.val_bytes;
2809 }
2810 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2811 
2812 /**
2813  * regmap_get_max_register(): Report the max register value
2814  *
2815  * Report the max register value, mainly intended to for use by
2816  * generic infrastructure built on top of regmap.
2817  */
2818 int regmap_get_max_register(struct regmap *map)
2819 {
2820 	return map->max_register ? map->max_register : -EINVAL;
2821 }
2822 EXPORT_SYMBOL_GPL(regmap_get_max_register);
2823 
2824 /**
2825  * regmap_get_reg_stride(): Report the register address stride
2826  *
2827  * Report the register address stride, mainly intended to for use by
2828  * generic infrastructure built on top of regmap.
2829  */
2830 int regmap_get_reg_stride(struct regmap *map)
2831 {
2832 	return map->reg_stride;
2833 }
2834 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
2835 
2836 int regmap_parse_val(struct regmap *map, const void *buf,
2837 			unsigned int *val)
2838 {
2839 	if (!map->format.parse_val)
2840 		return -EINVAL;
2841 
2842 	*val = map->format.parse_val(buf);
2843 
2844 	return 0;
2845 }
2846 EXPORT_SYMBOL_GPL(regmap_parse_val);
2847 
2848 static int __init regmap_initcall(void)
2849 {
2850 	regmap_debugfs_initcall();
2851 
2852 	return 0;
2853 }
2854 postcore_initcall(regmap_initcall);
2855