xref: /linux/drivers/base/regmap/regcache.c (revision af0bc3ac9a9e830cb52b718ecb237c4e76a466be)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 
9 #include <linux/bsearch.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 
15 #include "trace.h"
16 #include "internal.h"
17 
18 static const struct regcache_ops *cache_types[] = {
19 	&regcache_flat_sparse_ops,
20 	&regcache_rbtree_ops,
21 	&regcache_maple_ops,
22 	&regcache_flat_ops,
23 };
24 
25 static int regcache_defaults_cmp(const void *a, const void *b)
26 {
27 	const struct reg_default *x = a;
28 	const struct reg_default *y = b;
29 
30 	if (x->reg > y->reg)
31 		return 1;
32 	else if (x->reg < y->reg)
33 		return -1;
34 	else
35 		return 0;
36 }
37 
38 void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
39 {
40 	sort(defaults, ndefaults, sizeof(*defaults),
41 	     regcache_defaults_cmp, NULL);
42 }
43 EXPORT_SYMBOL_GPL(regcache_sort_defaults);
44 
45 static int regcache_hw_init(struct regmap *map)
46 {
47 	int i, j;
48 	int ret;
49 	int count;
50 	unsigned int reg, val;
51 	void *tmp_buf;
52 
53 	if (!map->num_reg_defaults_raw)
54 		return -EINVAL;
55 
56 	/* calculate the size of reg_defaults */
57 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
58 		if (regmap_readable(map, i * map->reg_stride) &&
59 		    !regmap_volatile(map, i * map->reg_stride))
60 			count++;
61 
62 	/* all registers are unreadable or volatile, so just bypass */
63 	if (!count) {
64 		map->cache_bypass = true;
65 		return 0;
66 	}
67 
68 	map->num_reg_defaults = count;
69 	map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
70 					  GFP_KERNEL);
71 	if (!map->reg_defaults)
72 		return -ENOMEM;
73 
74 	if (!map->reg_defaults_raw) {
75 		bool cache_bypass = map->cache_bypass;
76 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
77 
78 		/* Bypass the cache access till data read from HW */
79 		map->cache_bypass = true;
80 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
81 		if (!tmp_buf) {
82 			ret = -ENOMEM;
83 			goto err_free;
84 		}
85 		ret = regmap_raw_read(map, 0, tmp_buf,
86 				      map->cache_size_raw);
87 		map->cache_bypass = cache_bypass;
88 		if (ret == 0) {
89 			map->reg_defaults_raw = tmp_buf;
90 			map->cache_free = true;
91 		} else {
92 			kfree(tmp_buf);
93 		}
94 	}
95 
96 	/* fill the reg_defaults */
97 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
98 		reg = i * map->reg_stride;
99 
100 		if (!regmap_readable(map, reg))
101 			continue;
102 
103 		if (regmap_volatile(map, reg))
104 			continue;
105 
106 		if (map->reg_defaults_raw) {
107 			val = regcache_get_val(map, map->reg_defaults_raw, i);
108 		} else {
109 			bool cache_bypass = map->cache_bypass;
110 
111 			map->cache_bypass = true;
112 			ret = regmap_read(map, reg, &val);
113 			map->cache_bypass = cache_bypass;
114 			if (ret != 0) {
115 				dev_err(map->dev, "Failed to read %d: %d\n",
116 					reg, ret);
117 				goto err_free;
118 			}
119 		}
120 
121 		map->reg_defaults[j].reg = reg;
122 		map->reg_defaults[j].def = val;
123 		j++;
124 	}
125 
126 	return 0;
127 
128 err_free:
129 	kfree(map->reg_defaults);
130 
131 	return ret;
132 }
133 
134 int regcache_init(struct regmap *map, const struct regmap_config *config)
135 {
136 	int ret;
137 	int i;
138 	void *tmp_buf;
139 
140 	if (map->cache_type == REGCACHE_NONE) {
141 		if (config->reg_defaults || config->num_reg_defaults_raw)
142 			dev_warn(map->dev,
143 				 "No cache used with register defaults set!\n");
144 
145 		map->cache_bypass = true;
146 		return 0;
147 	}
148 
149 	if (config->reg_defaults && !config->num_reg_defaults) {
150 		dev_err(map->dev,
151 			 "Register defaults are set without the number!\n");
152 		return -EINVAL;
153 	}
154 
155 	if (config->num_reg_defaults && !config->reg_defaults) {
156 		dev_err(map->dev,
157 			"Register defaults number are set without the reg!\n");
158 		return -EINVAL;
159 	}
160 
161 	for (i = 0; i < config->num_reg_defaults; i++)
162 		if (config->reg_defaults[i].reg % map->reg_stride)
163 			return -EINVAL;
164 
165 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
166 		if (cache_types[i]->type == map->cache_type)
167 			break;
168 
169 	if (i == ARRAY_SIZE(cache_types)) {
170 		dev_err(map->dev, "Could not match cache type: %d\n",
171 			map->cache_type);
172 		return -EINVAL;
173 	}
174 
175 	map->num_reg_defaults = config->num_reg_defaults;
176 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
177 	map->reg_defaults_raw = config->reg_defaults_raw;
178 	map->cache_word_size = BITS_TO_BYTES(config->val_bits);
179 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
180 
181 	map->cache = NULL;
182 	map->cache_ops = cache_types[i];
183 
184 	if (!map->cache_ops->read ||
185 	    !map->cache_ops->write ||
186 	    !map->cache_ops->name)
187 		return -EINVAL;
188 
189 	/* We still need to ensure that the reg_defaults
190 	 * won't vanish from under us.  We'll need to make
191 	 * a copy of it.
192 	 */
193 	if (config->reg_defaults) {
194 		tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
195 					sizeof(*map->reg_defaults), GFP_KERNEL);
196 		if (!tmp_buf)
197 			return -ENOMEM;
198 		map->reg_defaults = tmp_buf;
199 	} else if (map->num_reg_defaults_raw) {
200 		/* Some devices such as PMICs don't have cache defaults,
201 		 * we cope with this by reading back the HW registers and
202 		 * crafting the cache defaults by hand.
203 		 */
204 		ret = regcache_hw_init(map);
205 		if (ret < 0)
206 			return ret;
207 		if (map->cache_bypass)
208 			return 0;
209 	}
210 
211 	if (!map->max_register_is_set && map->num_reg_defaults_raw) {
212 		map->max_register = (map->num_reg_defaults_raw  - 1) * map->reg_stride;
213 		map->max_register_is_set = true;
214 	}
215 
216 	if (map->cache_ops->init) {
217 		dev_dbg(map->dev, "Initializing %s cache\n",
218 			map->cache_ops->name);
219 		map->lock(map->lock_arg);
220 		ret = map->cache_ops->init(map);
221 		map->unlock(map->lock_arg);
222 		if (ret)
223 			goto err_free;
224 	}
225 
226 	if (map->cache_ops->populate &&
227 	    (map->num_reg_defaults || map->reg_default_cb)) {
228 		dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
229 		map->lock(map->lock_arg);
230 		ret = map->cache_ops->populate(map);
231 		map->unlock(map->lock_arg);
232 		if (ret)
233 			goto err_exit;
234 	}
235 	return 0;
236 
237 err_exit:
238 	if (map->cache_ops->exit) {
239 		dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
240 		map->lock(map->lock_arg);
241 		ret = map->cache_ops->exit(map);
242 		map->unlock(map->lock_arg);
243 	}
244 err_free:
245 	kfree(map->reg_defaults);
246 	if (map->cache_free)
247 		kfree(map->reg_defaults_raw);
248 
249 	return ret;
250 }
251 
252 void regcache_exit(struct regmap *map)
253 {
254 	if (map->cache_type == REGCACHE_NONE)
255 		return;
256 
257 	BUG_ON(!map->cache_ops);
258 
259 	kfree(map->reg_defaults);
260 	if (map->cache_free)
261 		kfree(map->reg_defaults_raw);
262 
263 	if (map->cache_ops->exit) {
264 		dev_dbg(map->dev, "Destroying %s cache\n",
265 			map->cache_ops->name);
266 		map->lock(map->lock_arg);
267 		map->cache_ops->exit(map);
268 		map->unlock(map->lock_arg);
269 	}
270 }
271 
272 /**
273  * regcache_read - Fetch the value of a given register from the cache.
274  *
275  * @map: map to configure.
276  * @reg: The register index.
277  * @value: The value to be returned.
278  *
279  * Return a negative value on failure, 0 on success.
280  */
281 int regcache_read(struct regmap *map,
282 		  unsigned int reg, unsigned int *value)
283 {
284 	int ret;
285 
286 	if (map->cache_type == REGCACHE_NONE)
287 		return -EINVAL;
288 
289 	BUG_ON(!map->cache_ops);
290 
291 	if (!regmap_volatile(map, reg)) {
292 		ret = map->cache_ops->read(map, reg, value);
293 
294 		if (ret == 0)
295 			trace_regmap_reg_read_cache(map, reg, *value);
296 
297 		return ret;
298 	}
299 
300 	return -EINVAL;
301 }
302 
303 /**
304  * regcache_write - Set the value of a given register in the cache.
305  *
306  * @map: map to configure.
307  * @reg: The register index.
308  * @value: The new register value.
309  *
310  * Return a negative value on failure, 0 on success.
311  */
312 int regcache_write(struct regmap *map,
313 		   unsigned int reg, unsigned int value)
314 {
315 	if (map->cache_type == REGCACHE_NONE)
316 		return 0;
317 
318 	BUG_ON(!map->cache_ops);
319 
320 	if (!regmap_volatile(map, reg))
321 		return map->cache_ops->write(map, reg, value);
322 
323 	return 0;
324 }
325 
326 bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
327 			     unsigned int val)
328 {
329 	int ret;
330 
331 	if (!regmap_writeable(map, reg))
332 		return false;
333 
334 	/* If we don't know the chip just got reset, then sync everything. */
335 	if (!map->no_sync_defaults)
336 		return true;
337 
338 	/* Is this the hardware default?  If so skip. */
339 	ret = regcache_lookup_reg(map, reg);
340 	if (ret >= 0 && val == map->reg_defaults[ret].def)
341 		return false;
342 	return true;
343 }
344 
345 static int regcache_default_sync(struct regmap *map, unsigned int min,
346 				 unsigned int max)
347 {
348 	unsigned int reg;
349 
350 	for (reg = min; reg <= max; reg += map->reg_stride) {
351 		unsigned int val;
352 		int ret;
353 
354 		if (regmap_volatile(map, reg) ||
355 		    !regmap_writeable(map, reg))
356 			continue;
357 
358 		ret = regcache_read(map, reg, &val);
359 		if (ret == -ENOENT)
360 			continue;
361 		if (ret)
362 			return ret;
363 
364 		if (!regcache_reg_needs_sync(map, reg, val))
365 			continue;
366 
367 		map->cache_bypass = true;
368 		ret = _regmap_write(map, reg, val);
369 		map->cache_bypass = false;
370 		if (ret) {
371 			dev_err(map->dev, "Unable to sync register %#x. %d\n",
372 				reg, ret);
373 			return ret;
374 		}
375 		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
376 	}
377 
378 	return 0;
379 }
380 
381 static int rbtree_all(const void *key, const struct rb_node *node)
382 {
383 	return 0;
384 }
385 
386 /**
387  * regcache_sync - Sync the register cache with the hardware.
388  *
389  * @map: map to configure.
390  *
391  * Any registers that should not be synced should be marked as
392  * volatile.  In general drivers can choose not to use the provided
393  * syncing functionality if they so require.
394  *
395  * Return a negative value on failure, 0 on success.
396  */
397 int regcache_sync(struct regmap *map)
398 {
399 	int ret = 0;
400 	unsigned int i;
401 	const char *name;
402 	bool bypass;
403 	struct rb_node *node;
404 
405 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
406 		return -EINVAL;
407 
408 	BUG_ON(!map->cache_ops);
409 
410 	map->lock(map->lock_arg);
411 	/* Remember the initial bypass state */
412 	bypass = map->cache_bypass;
413 	dev_dbg(map->dev, "Syncing %s cache\n",
414 		map->cache_ops->name);
415 	name = map->cache_ops->name;
416 	trace_regcache_sync(map, name, "start");
417 
418 	if (!map->cache_dirty)
419 		goto out;
420 
421 	/* Apply any patch first */
422 	map->cache_bypass = true;
423 	for (i = 0; i < map->patch_regs; i++) {
424 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
425 		if (ret != 0) {
426 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
427 				map->patch[i].reg, map->patch[i].def, ret);
428 			goto out;
429 		}
430 	}
431 	map->cache_bypass = false;
432 
433 	if (map->cache_ops->sync)
434 		ret = map->cache_ops->sync(map, 0, map->max_register);
435 	else
436 		ret = regcache_default_sync(map, 0, map->max_register);
437 
438 	if (ret == 0)
439 		map->cache_dirty = false;
440 
441 out:
442 	/* Restore the bypass state */
443 	map->cache_bypass = bypass;
444 	map->no_sync_defaults = false;
445 
446 	/*
447 	 * If we did any paging with cache bypassed and a cached
448 	 * paging register then the register and cache state might
449 	 * have gone out of sync, force writes of all the paging
450 	 * registers.
451 	 */
452 	rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
453 		struct regmap_range_node *this =
454 			rb_entry(node, struct regmap_range_node, node);
455 
456 		/* If there's nothing in the cache there's nothing to sync */
457 		if (regcache_read(map, this->selector_reg, &i) != 0)
458 			continue;
459 
460 		ret = _regmap_write(map, this->selector_reg, i);
461 		if (ret != 0) {
462 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
463 				this->selector_reg, i, ret);
464 			break;
465 		}
466 	}
467 
468 	map->unlock(map->lock_arg);
469 
470 	regmap_async_complete(map);
471 
472 	trace_regcache_sync(map, name, "stop");
473 
474 	return ret;
475 }
476 EXPORT_SYMBOL_GPL(regcache_sync);
477 
478 /**
479  * regcache_sync_region - Sync part  of the register cache with the hardware.
480  *
481  * @map: map to sync.
482  * @min: first register to sync
483  * @max: last register to sync
484  *
485  * Write all non-default register values in the specified region to
486  * the hardware.
487  *
488  * Return a negative value on failure, 0 on success.
489  */
490 int regcache_sync_region(struct regmap *map, unsigned int min,
491 			 unsigned int max)
492 {
493 	int ret = 0;
494 	const char *name;
495 	bool bypass;
496 
497 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
498 		return -EINVAL;
499 
500 	BUG_ON(!map->cache_ops);
501 
502 	map->lock(map->lock_arg);
503 
504 	/* Remember the initial bypass state */
505 	bypass = map->cache_bypass;
506 
507 	name = map->cache_ops->name;
508 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
509 
510 	trace_regcache_sync(map, name, "start region");
511 
512 	if (!map->cache_dirty)
513 		goto out;
514 
515 	map->async = true;
516 
517 	if (map->cache_ops->sync)
518 		ret = map->cache_ops->sync(map, min, max);
519 	else
520 		ret = regcache_default_sync(map, min, max);
521 
522 out:
523 	/* Restore the bypass state */
524 	map->cache_bypass = bypass;
525 	map->async = false;
526 	map->no_sync_defaults = false;
527 	map->unlock(map->lock_arg);
528 
529 	regmap_async_complete(map);
530 
531 	trace_regcache_sync(map, name, "stop region");
532 
533 	return ret;
534 }
535 EXPORT_SYMBOL_GPL(regcache_sync_region);
536 
537 /**
538  * regcache_drop_region - Discard part of the register cache
539  *
540  * @map: map to operate on
541  * @min: first register to discard
542  * @max: last register to discard
543  *
544  * Discard part of the register cache.
545  *
546  * Return a negative value on failure, 0 on success.
547  */
548 int regcache_drop_region(struct regmap *map, unsigned int min,
549 			 unsigned int max)
550 {
551 	int ret = 0;
552 
553 	if (!map->cache_ops || !map->cache_ops->drop)
554 		return -EINVAL;
555 
556 	map->lock(map->lock_arg);
557 
558 	trace_regcache_drop_region(map, min, max);
559 
560 	ret = map->cache_ops->drop(map, min, max);
561 
562 	map->unlock(map->lock_arg);
563 
564 	return ret;
565 }
566 EXPORT_SYMBOL_GPL(regcache_drop_region);
567 
568 /**
569  * regcache_cache_only - Put a register map into cache only mode
570  *
571  * @map: map to configure
572  * @enable: flag if changes should be written to the hardware
573  *
574  * When a register map is marked as cache only writes to the register
575  * map API will only update the register cache, they will not cause
576  * any hardware changes.  This is useful for allowing portions of
577  * drivers to act as though the device were functioning as normal when
578  * it is disabled for power saving reasons.
579  */
580 void regcache_cache_only(struct regmap *map, bool enable)
581 {
582 	map->lock(map->lock_arg);
583 	WARN_ON(map->cache_type != REGCACHE_NONE &&
584 		map->cache_bypass && enable);
585 	map->cache_only = enable;
586 	trace_regmap_cache_only(map, enable);
587 	map->unlock(map->lock_arg);
588 }
589 EXPORT_SYMBOL_GPL(regcache_cache_only);
590 
591 /**
592  * regcache_mark_dirty - Indicate that HW registers were reset to default values
593  *
594  * @map: map to mark
595  *
596  * Inform regcache that the device has been powered down or reset, so that
597  * on resume, regcache_sync() knows to write out all non-default values
598  * stored in the cache.
599  *
600  * If this function is not called, regcache_sync() will assume that
601  * the hardware state still matches the cache state, modulo any writes that
602  * happened when cache_only was true.
603  */
604 void regcache_mark_dirty(struct regmap *map)
605 {
606 	map->lock(map->lock_arg);
607 	map->cache_dirty = true;
608 	map->no_sync_defaults = true;
609 	map->unlock(map->lock_arg);
610 }
611 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
612 
613 /**
614  * regcache_cache_bypass - Put a register map into cache bypass mode
615  *
616  * @map: map to configure
617  * @enable: flag if changes should not be written to the cache
618  *
619  * When a register map is marked with the cache bypass option, writes
620  * to the register map API will only update the hardware and not
621  * the cache directly.  This is useful when syncing the cache back to
622  * the hardware.
623  */
624 void regcache_cache_bypass(struct regmap *map, bool enable)
625 {
626 	map->lock(map->lock_arg);
627 	WARN_ON(map->cache_only && enable);
628 	map->cache_bypass = enable;
629 	trace_regmap_cache_bypass(map, enable);
630 	map->unlock(map->lock_arg);
631 }
632 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
633 
634 /**
635  * regcache_reg_cached - Check if a register is cached
636  *
637  * @map: map to check
638  * @reg: register to check
639  *
640  * Reports if a register is cached.
641  */
642 bool regcache_reg_cached(struct regmap *map, unsigned int reg)
643 {
644 	unsigned int val;
645 	int ret;
646 
647 	map->lock(map->lock_arg);
648 
649 	ret = regcache_read(map, reg, &val);
650 
651 	map->unlock(map->lock_arg);
652 
653 	return ret == 0;
654 }
655 EXPORT_SYMBOL_GPL(regcache_reg_cached);
656 
657 void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
658 		      unsigned int val)
659 {
660 	/* Use device native format if possible */
661 	if (map->format.format_val) {
662 		map->format.format_val(base + (map->cache_word_size * idx),
663 				       val, 0);
664 		return;
665 	}
666 
667 	switch (map->cache_word_size) {
668 	case 1: {
669 		u8 *cache = base;
670 
671 		cache[idx] = val;
672 		break;
673 	}
674 	case 2: {
675 		u16 *cache = base;
676 
677 		cache[idx] = val;
678 		break;
679 	}
680 	case 4: {
681 		u32 *cache = base;
682 
683 		cache[idx] = val;
684 		break;
685 	}
686 	default:
687 		BUG();
688 	}
689 }
690 
691 unsigned int regcache_get_val(struct regmap *map, const void *base,
692 			      unsigned int idx)
693 {
694 	if (!base)
695 		return -EINVAL;
696 
697 	/* Use device native format if possible */
698 	if (map->format.parse_val)
699 		return map->format.parse_val(regcache_get_val_addr(map, base,
700 								   idx));
701 
702 	switch (map->cache_word_size) {
703 	case 1: {
704 		const u8 *cache = base;
705 
706 		return cache[idx];
707 	}
708 	case 2: {
709 		const u16 *cache = base;
710 
711 		return cache[idx];
712 	}
713 	case 4: {
714 		const u32 *cache = base;
715 
716 		return cache[idx];
717 	}
718 	default:
719 		BUG();
720 	}
721 	/* unreachable */
722 	return -1;
723 }
724 
725 static int regcache_default_cmp(const void *a, const void *b)
726 {
727 	const struct reg_default *_a = a;
728 	const struct reg_default *_b = b;
729 
730 	return _a->reg - _b->reg;
731 }
732 
733 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
734 {
735 	struct reg_default key;
736 	struct reg_default *r;
737 
738 	key.reg = reg;
739 	key.def = 0;
740 
741 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
742 		    sizeof(struct reg_default), regcache_default_cmp);
743 
744 	if (r)
745 		return r - map->reg_defaults;
746 	else
747 		return -ENOENT;
748 }
749 
750 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
751 {
752 	if (!cache_present)
753 		return true;
754 
755 	return test_bit(idx, cache_present);
756 }
757 
758 int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
759 {
760 	int ret;
761 
762 	if (!regcache_reg_needs_sync(map, reg, val))
763 		return 0;
764 
765 	map->cache_bypass = true;
766 
767 	ret = _regmap_write(map, reg, val);
768 
769 	map->cache_bypass = false;
770 
771 	if (ret != 0) {
772 		dev_err(map->dev, "Unable to sync register %#x. %d\n",
773 			reg, ret);
774 		return ret;
775 	}
776 	dev_dbg(map->dev, "Synced register %#x, value %#x\n",
777 		reg, val);
778 
779 	return 0;
780 }
781 
782 static int regcache_sync_block_single(struct regmap *map, void *block,
783 				      unsigned long *cache_present,
784 				      unsigned int block_base,
785 				      unsigned int start, unsigned int end)
786 {
787 	unsigned int i, regtmp, val;
788 	int ret;
789 
790 	for (i = start; i < end; i++) {
791 		regtmp = block_base + (i * map->reg_stride);
792 
793 		if (!regcache_reg_present(cache_present, i) ||
794 		    !regmap_writeable(map, regtmp))
795 			continue;
796 
797 		val = regcache_get_val(map, block, i);
798 		ret = regcache_sync_val(map, regtmp, val);
799 		if (ret != 0)
800 			return ret;
801 	}
802 
803 	return 0;
804 }
805 
806 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
807 					 unsigned int base, unsigned int cur)
808 {
809 	size_t val_bytes = map->format.val_bytes;
810 	int ret, count;
811 
812 	if (*data == NULL)
813 		return 0;
814 
815 	count = (cur - base) / map->reg_stride;
816 
817 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
818 		count * val_bytes, count, base, cur - map->reg_stride);
819 
820 	map->cache_bypass = true;
821 
822 	ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
823 	if (ret)
824 		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
825 			base, cur - map->reg_stride, ret);
826 
827 	map->cache_bypass = false;
828 
829 	*data = NULL;
830 
831 	return ret;
832 }
833 
834 static int regcache_sync_block_raw(struct regmap *map, void *block,
835 			    unsigned long *cache_present,
836 			    unsigned int block_base, unsigned int start,
837 			    unsigned int end)
838 {
839 	unsigned int i, val;
840 	unsigned int regtmp = 0;
841 	unsigned int base = 0;
842 	const void *data = NULL;
843 	int ret;
844 
845 	for (i = start; i < end; i++) {
846 		regtmp = block_base + (i * map->reg_stride);
847 
848 		if (!regcache_reg_present(cache_present, i) ||
849 		    !regmap_writeable(map, regtmp)) {
850 			ret = regcache_sync_block_raw_flush(map, &data,
851 							    base, regtmp);
852 			if (ret != 0)
853 				return ret;
854 			continue;
855 		}
856 
857 		val = regcache_get_val(map, block, i);
858 		if (!regcache_reg_needs_sync(map, regtmp, val)) {
859 			ret = regcache_sync_block_raw_flush(map, &data,
860 							    base, regtmp);
861 			if (ret != 0)
862 				return ret;
863 			continue;
864 		}
865 
866 		if (!data) {
867 			data = regcache_get_val_addr(map, block, i);
868 			base = regtmp;
869 		}
870 	}
871 
872 	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
873 			map->reg_stride);
874 }
875 
876 int regcache_sync_block(struct regmap *map, void *block,
877 			unsigned long *cache_present,
878 			unsigned int block_base, unsigned int start,
879 			unsigned int end)
880 {
881 	if (regmap_can_raw_write(map) && !map->use_single_write)
882 		return regcache_sync_block_raw(map, block, cache_present,
883 					       block_base, start, end);
884 	else
885 		return regcache_sync_block_single(map, block, cache_present,
886 						  block_base, start, end);
887 }
888