xref: /linux/drivers/base/regmap/regcache.c (revision ba1401f9cced493948a691a670308832588e8f60)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 
9 #include <linux/bsearch.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 
15 #include "trace.h"
16 #include "internal.h"
17 
18 static const struct regcache_ops *cache_types[] = {
19 	&regcache_flat_sparse_ops,
20 	&regcache_rbtree_ops,
21 	&regcache_maple_ops,
22 	&regcache_flat_ops,
23 };
24 
25 static int regcache_defaults_cmp(const void *a, const void *b)
26 {
27 	const struct reg_default *x = a;
28 	const struct reg_default *y = b;
29 
30 	if (x->reg > y->reg)
31 		return 1;
32 	else if (x->reg < y->reg)
33 		return -1;
34 	else
35 		return 0;
36 }
37 
38 void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
39 {
40 	sort(defaults, ndefaults, sizeof(*defaults),
41 	     regcache_defaults_cmp, NULL);
42 }
43 EXPORT_SYMBOL_GPL(regcache_sort_defaults);
44 
45 static int regcache_hw_init(struct regmap *map)
46 {
47 	int i, j;
48 	int ret;
49 	int count;
50 	unsigned int reg, val;
51 	void *tmp_buf;
52 
53 	if (!map->num_reg_defaults_raw)
54 		return -EINVAL;
55 
56 	/* calculate the size of reg_defaults */
57 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
58 		if (regmap_readable(map, i * map->reg_stride) &&
59 		    !regmap_volatile(map, i * map->reg_stride))
60 			count++;
61 
62 	/* all registers are unreadable or volatile, so just bypass */
63 	if (!count) {
64 		map->cache_bypass = true;
65 		return 0;
66 	}
67 
68 	map->num_reg_defaults = count;
69 	map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
70 					  GFP_KERNEL);
71 	if (!map->reg_defaults)
72 		return -ENOMEM;
73 
74 	if (!map->reg_defaults_raw) {
75 		bool cache_bypass = map->cache_bypass;
76 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
77 
78 		/* Bypass the cache access till data read from HW */
79 		map->cache_bypass = true;
80 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
81 		if (!tmp_buf) {
82 			ret = -ENOMEM;
83 			goto err_free;
84 		}
85 		ret = regmap_raw_read(map, 0, tmp_buf,
86 				      map->cache_size_raw);
87 		map->cache_bypass = cache_bypass;
88 		if (ret == 0) {
89 			map->reg_defaults_raw = tmp_buf;
90 			map->cache_free = true;
91 		} else {
92 			kfree(tmp_buf);
93 		}
94 	}
95 
96 	/* fill the reg_defaults */
97 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
98 		reg = i * map->reg_stride;
99 
100 		if (!regmap_readable(map, reg))
101 			continue;
102 
103 		if (regmap_volatile(map, reg))
104 			continue;
105 
106 		if (map->reg_defaults_raw) {
107 			val = regcache_get_val(map, map->reg_defaults_raw, i);
108 		} else {
109 			bool cache_bypass = map->cache_bypass;
110 
111 			map->cache_bypass = true;
112 			ret = regmap_read(map, reg, &val);
113 			map->cache_bypass = cache_bypass;
114 			if (ret != 0) {
115 				dev_err(map->dev, "Failed to read %d: %d\n",
116 					reg, ret);
117 				goto err_free;
118 			}
119 		}
120 
121 		map->reg_defaults[j].reg = reg;
122 		map->reg_defaults[j].def = val;
123 		j++;
124 	}
125 
126 	return 0;
127 
128 err_free:
129 	kfree(map->reg_defaults);
130 
131 	return ret;
132 }
133 
134 int regcache_init(struct regmap *map, const struct regmap_config *config)
135 {
136 	int ret;
137 	int i;
138 	void *tmp_buf;
139 
140 	if (map->cache_type == REGCACHE_NONE) {
141 		if (config->reg_defaults || config->num_reg_defaults_raw)
142 			dev_warn(map->dev,
143 				 "No cache used with register defaults set!\n");
144 
145 		map->cache_bypass = true;
146 		return 0;
147 	}
148 
149 	if (config->reg_defaults && !config->num_reg_defaults) {
150 		dev_err(map->dev,
151 			 "Register defaults are set without the number!\n");
152 		return -EINVAL;
153 	}
154 
155 	if (config->num_reg_defaults && !config->reg_defaults) {
156 		dev_err(map->dev,
157 			"Register defaults number are set without the reg!\n");
158 		return -EINVAL;
159 	}
160 
161 	for (i = 0; i < config->num_reg_defaults; i++)
162 		if (config->reg_defaults[i].reg % map->reg_stride)
163 			return -EINVAL;
164 
165 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
166 		if (cache_types[i]->type == map->cache_type)
167 			break;
168 
169 	if (i == ARRAY_SIZE(cache_types)) {
170 		dev_err(map->dev, "Could not match cache type: %d\n",
171 			map->cache_type);
172 		return -EINVAL;
173 	}
174 
175 	map->num_reg_defaults = config->num_reg_defaults;
176 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
177 	map->reg_defaults_raw = config->reg_defaults_raw;
178 	map->cache_word_size = BITS_TO_BYTES(config->val_bits);
179 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
180 
181 	map->cache = NULL;
182 	map->cache_ops = cache_types[i];
183 
184 	if (!map->cache_ops->read ||
185 	    !map->cache_ops->write ||
186 	    !map->cache_ops->name)
187 		return -EINVAL;
188 
189 	/* We still need to ensure that the reg_defaults
190 	 * won't vanish from under us.  We'll need to make
191 	 * a copy of it.
192 	 */
193 	if (config->reg_defaults) {
194 		tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
195 					sizeof(*map->reg_defaults), GFP_KERNEL);
196 		if (!tmp_buf)
197 			return -ENOMEM;
198 		map->reg_defaults = tmp_buf;
199 	} else if (map->num_reg_defaults_raw) {
200 		/* Some devices such as PMICs don't have cache defaults,
201 		 * we cope with this by reading back the HW registers and
202 		 * crafting the cache defaults by hand.
203 		 */
204 		ret = regcache_hw_init(map);
205 		if (ret < 0)
206 			return ret;
207 		if (map->cache_bypass)
208 			return 0;
209 	}
210 
211 	if (!map->max_register_is_set && map->num_reg_defaults_raw) {
212 		map->max_register = (map->num_reg_defaults_raw  - 1) * map->reg_stride;
213 		map->max_register_is_set = true;
214 	}
215 
216 	if (map->cache_ops->init) {
217 		dev_dbg(map->dev, "Initializing %s cache\n",
218 			map->cache_ops->name);
219 		map->lock(map->lock_arg);
220 		ret = map->cache_ops->init(map);
221 		map->unlock(map->lock_arg);
222 		if (ret)
223 			goto err_free;
224 	}
225 
226 	if (map->num_reg_defaults && map->cache_ops->populate) {
227 		dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
228 		map->lock(map->lock_arg);
229 		ret = map->cache_ops->populate(map);
230 		map->unlock(map->lock_arg);
231 		if (ret)
232 			goto err_exit;
233 	}
234 	return 0;
235 
236 err_exit:
237 	if (map->cache_ops->exit) {
238 		dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
239 		map->lock(map->lock_arg);
240 		ret = map->cache_ops->exit(map);
241 		map->unlock(map->lock_arg);
242 	}
243 err_free:
244 	kfree(map->reg_defaults);
245 	if (map->cache_free)
246 		kfree(map->reg_defaults_raw);
247 
248 	return ret;
249 }
250 
251 void regcache_exit(struct regmap *map)
252 {
253 	if (map->cache_type == REGCACHE_NONE)
254 		return;
255 
256 	BUG_ON(!map->cache_ops);
257 
258 	kfree(map->reg_defaults);
259 	if (map->cache_free)
260 		kfree(map->reg_defaults_raw);
261 
262 	if (map->cache_ops->exit) {
263 		dev_dbg(map->dev, "Destroying %s cache\n",
264 			map->cache_ops->name);
265 		map->lock(map->lock_arg);
266 		map->cache_ops->exit(map);
267 		map->unlock(map->lock_arg);
268 	}
269 }
270 
271 /**
272  * regcache_read - Fetch the value of a given register from the cache.
273  *
274  * @map: map to configure.
275  * @reg: The register index.
276  * @value: The value to be returned.
277  *
278  * Return a negative value on failure, 0 on success.
279  */
280 int regcache_read(struct regmap *map,
281 		  unsigned int reg, unsigned int *value)
282 {
283 	int ret;
284 
285 	if (map->cache_type == REGCACHE_NONE)
286 		return -EINVAL;
287 
288 	BUG_ON(!map->cache_ops);
289 
290 	if (!regmap_volatile(map, reg)) {
291 		ret = map->cache_ops->read(map, reg, value);
292 
293 		if (ret == 0)
294 			trace_regmap_reg_read_cache(map, reg, *value);
295 
296 		return ret;
297 	}
298 
299 	return -EINVAL;
300 }
301 
302 /**
303  * regcache_write - Set the value of a given register in the cache.
304  *
305  * @map: map to configure.
306  * @reg: The register index.
307  * @value: The new register value.
308  *
309  * Return a negative value on failure, 0 on success.
310  */
311 int regcache_write(struct regmap *map,
312 		   unsigned int reg, unsigned int value)
313 {
314 	if (map->cache_type == REGCACHE_NONE)
315 		return 0;
316 
317 	BUG_ON(!map->cache_ops);
318 
319 	if (!regmap_volatile(map, reg))
320 		return map->cache_ops->write(map, reg, value);
321 
322 	return 0;
323 }
324 
325 bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
326 			     unsigned int val)
327 {
328 	int ret;
329 
330 	if (!regmap_writeable(map, reg))
331 		return false;
332 
333 	/* If we don't know the chip just got reset, then sync everything. */
334 	if (!map->no_sync_defaults)
335 		return true;
336 
337 	/* Is this the hardware default?  If so skip. */
338 	ret = regcache_lookup_reg(map, reg);
339 	if (ret >= 0 && val == map->reg_defaults[ret].def)
340 		return false;
341 	return true;
342 }
343 
344 static int regcache_default_sync(struct regmap *map, unsigned int min,
345 				 unsigned int max)
346 {
347 	unsigned int reg;
348 
349 	for (reg = min; reg <= max; reg += map->reg_stride) {
350 		unsigned int val;
351 		int ret;
352 
353 		if (regmap_volatile(map, reg) ||
354 		    !regmap_writeable(map, reg))
355 			continue;
356 
357 		ret = regcache_read(map, reg, &val);
358 		if (ret == -ENOENT)
359 			continue;
360 		if (ret)
361 			return ret;
362 
363 		if (!regcache_reg_needs_sync(map, reg, val))
364 			continue;
365 
366 		map->cache_bypass = true;
367 		ret = _regmap_write(map, reg, val);
368 		map->cache_bypass = false;
369 		if (ret) {
370 			dev_err(map->dev, "Unable to sync register %#x. %d\n",
371 				reg, ret);
372 			return ret;
373 		}
374 		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
375 	}
376 
377 	return 0;
378 }
379 
380 static int rbtree_all(const void *key, const struct rb_node *node)
381 {
382 	return 0;
383 }
384 
385 /**
386  * regcache_sync - Sync the register cache with the hardware.
387  *
388  * @map: map to configure.
389  *
390  * Any registers that should not be synced should be marked as
391  * volatile.  In general drivers can choose not to use the provided
392  * syncing functionality if they so require.
393  *
394  * Return a negative value on failure, 0 on success.
395  */
396 int regcache_sync(struct regmap *map)
397 {
398 	int ret = 0;
399 	unsigned int i;
400 	const char *name;
401 	bool bypass;
402 	struct rb_node *node;
403 
404 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
405 		return -EINVAL;
406 
407 	BUG_ON(!map->cache_ops);
408 
409 	map->lock(map->lock_arg);
410 	/* Remember the initial bypass state */
411 	bypass = map->cache_bypass;
412 	dev_dbg(map->dev, "Syncing %s cache\n",
413 		map->cache_ops->name);
414 	name = map->cache_ops->name;
415 	trace_regcache_sync(map, name, "start");
416 
417 	if (!map->cache_dirty)
418 		goto out;
419 
420 	/* Apply any patch first */
421 	map->cache_bypass = true;
422 	for (i = 0; i < map->patch_regs; i++) {
423 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
424 		if (ret != 0) {
425 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
426 				map->patch[i].reg, map->patch[i].def, ret);
427 			goto out;
428 		}
429 	}
430 	map->cache_bypass = false;
431 
432 	if (map->cache_ops->sync)
433 		ret = map->cache_ops->sync(map, 0, map->max_register);
434 	else
435 		ret = regcache_default_sync(map, 0, map->max_register);
436 
437 	if (ret == 0)
438 		map->cache_dirty = false;
439 
440 out:
441 	/* Restore the bypass state */
442 	map->cache_bypass = bypass;
443 	map->no_sync_defaults = false;
444 
445 	/*
446 	 * If we did any paging with cache bypassed and a cached
447 	 * paging register then the register and cache state might
448 	 * have gone out of sync, force writes of all the paging
449 	 * registers.
450 	 */
451 	rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
452 		struct regmap_range_node *this =
453 			rb_entry(node, struct regmap_range_node, node);
454 
455 		/* If there's nothing in the cache there's nothing to sync */
456 		if (regcache_read(map, this->selector_reg, &i) != 0)
457 			continue;
458 
459 		ret = _regmap_write(map, this->selector_reg, i);
460 		if (ret != 0) {
461 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
462 				this->selector_reg, i, ret);
463 			break;
464 		}
465 	}
466 
467 	map->unlock(map->lock_arg);
468 
469 	regmap_async_complete(map);
470 
471 	trace_regcache_sync(map, name, "stop");
472 
473 	return ret;
474 }
475 EXPORT_SYMBOL_GPL(regcache_sync);
476 
477 /**
478  * regcache_sync_region - Sync part  of the register cache with the hardware.
479  *
480  * @map: map to sync.
481  * @min: first register to sync
482  * @max: last register to sync
483  *
484  * Write all non-default register values in the specified region to
485  * the hardware.
486  *
487  * Return a negative value on failure, 0 on success.
488  */
489 int regcache_sync_region(struct regmap *map, unsigned int min,
490 			 unsigned int max)
491 {
492 	int ret = 0;
493 	const char *name;
494 	bool bypass;
495 
496 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
497 		return -EINVAL;
498 
499 	BUG_ON(!map->cache_ops);
500 
501 	map->lock(map->lock_arg);
502 
503 	/* Remember the initial bypass state */
504 	bypass = map->cache_bypass;
505 
506 	name = map->cache_ops->name;
507 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
508 
509 	trace_regcache_sync(map, name, "start region");
510 
511 	if (!map->cache_dirty)
512 		goto out;
513 
514 	map->async = true;
515 
516 	if (map->cache_ops->sync)
517 		ret = map->cache_ops->sync(map, min, max);
518 	else
519 		ret = regcache_default_sync(map, min, max);
520 
521 out:
522 	/* Restore the bypass state */
523 	map->cache_bypass = bypass;
524 	map->async = false;
525 	map->no_sync_defaults = false;
526 	map->unlock(map->lock_arg);
527 
528 	regmap_async_complete(map);
529 
530 	trace_regcache_sync(map, name, "stop region");
531 
532 	return ret;
533 }
534 EXPORT_SYMBOL_GPL(regcache_sync_region);
535 
536 /**
537  * regcache_drop_region - Discard part of the register cache
538  *
539  * @map: map to operate on
540  * @min: first register to discard
541  * @max: last register to discard
542  *
543  * Discard part of the register cache.
544  *
545  * Return a negative value on failure, 0 on success.
546  */
547 int regcache_drop_region(struct regmap *map, unsigned int min,
548 			 unsigned int max)
549 {
550 	int ret = 0;
551 
552 	if (!map->cache_ops || !map->cache_ops->drop)
553 		return -EINVAL;
554 
555 	map->lock(map->lock_arg);
556 
557 	trace_regcache_drop_region(map, min, max);
558 
559 	ret = map->cache_ops->drop(map, min, max);
560 
561 	map->unlock(map->lock_arg);
562 
563 	return ret;
564 }
565 EXPORT_SYMBOL_GPL(regcache_drop_region);
566 
567 /**
568  * regcache_cache_only - Put a register map into cache only mode
569  *
570  * @map: map to configure
571  * @enable: flag if changes should be written to the hardware
572  *
573  * When a register map is marked as cache only writes to the register
574  * map API will only update the register cache, they will not cause
575  * any hardware changes.  This is useful for allowing portions of
576  * drivers to act as though the device were functioning as normal when
577  * it is disabled for power saving reasons.
578  */
579 void regcache_cache_only(struct regmap *map, bool enable)
580 {
581 	map->lock(map->lock_arg);
582 	WARN_ON(map->cache_type != REGCACHE_NONE &&
583 		map->cache_bypass && enable);
584 	map->cache_only = enable;
585 	trace_regmap_cache_only(map, enable);
586 	map->unlock(map->lock_arg);
587 }
588 EXPORT_SYMBOL_GPL(regcache_cache_only);
589 
590 /**
591  * regcache_mark_dirty - Indicate that HW registers were reset to default values
592  *
593  * @map: map to mark
594  *
595  * Inform regcache that the device has been powered down or reset, so that
596  * on resume, regcache_sync() knows to write out all non-default values
597  * stored in the cache.
598  *
599  * If this function is not called, regcache_sync() will assume that
600  * the hardware state still matches the cache state, modulo any writes that
601  * happened when cache_only was true.
602  */
603 void regcache_mark_dirty(struct regmap *map)
604 {
605 	map->lock(map->lock_arg);
606 	map->cache_dirty = true;
607 	map->no_sync_defaults = true;
608 	map->unlock(map->lock_arg);
609 }
610 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
611 
612 /**
613  * regcache_cache_bypass - Put a register map into cache bypass mode
614  *
615  * @map: map to configure
616  * @enable: flag if changes should not be written to the cache
617  *
618  * When a register map is marked with the cache bypass option, writes
619  * to the register map API will only update the hardware and not
620  * the cache directly.  This is useful when syncing the cache back to
621  * the hardware.
622  */
623 void regcache_cache_bypass(struct regmap *map, bool enable)
624 {
625 	map->lock(map->lock_arg);
626 	WARN_ON(map->cache_only && enable);
627 	map->cache_bypass = enable;
628 	trace_regmap_cache_bypass(map, enable);
629 	map->unlock(map->lock_arg);
630 }
631 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
632 
633 /**
634  * regcache_reg_cached - Check if a register is cached
635  *
636  * @map: map to check
637  * @reg: register to check
638  *
639  * Reports if a register is cached.
640  */
641 bool regcache_reg_cached(struct regmap *map, unsigned int reg)
642 {
643 	unsigned int val;
644 	int ret;
645 
646 	map->lock(map->lock_arg);
647 
648 	ret = regcache_read(map, reg, &val);
649 
650 	map->unlock(map->lock_arg);
651 
652 	return ret == 0;
653 }
654 EXPORT_SYMBOL_GPL(regcache_reg_cached);
655 
656 void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
657 		      unsigned int val)
658 {
659 	/* Use device native format if possible */
660 	if (map->format.format_val) {
661 		map->format.format_val(base + (map->cache_word_size * idx),
662 				       val, 0);
663 		return;
664 	}
665 
666 	switch (map->cache_word_size) {
667 	case 1: {
668 		u8 *cache = base;
669 
670 		cache[idx] = val;
671 		break;
672 	}
673 	case 2: {
674 		u16 *cache = base;
675 
676 		cache[idx] = val;
677 		break;
678 	}
679 	case 4: {
680 		u32 *cache = base;
681 
682 		cache[idx] = val;
683 		break;
684 	}
685 	default:
686 		BUG();
687 	}
688 }
689 
690 unsigned int regcache_get_val(struct regmap *map, const void *base,
691 			      unsigned int idx)
692 {
693 	if (!base)
694 		return -EINVAL;
695 
696 	/* Use device native format if possible */
697 	if (map->format.parse_val)
698 		return map->format.parse_val(regcache_get_val_addr(map, base,
699 								   idx));
700 
701 	switch (map->cache_word_size) {
702 	case 1: {
703 		const u8 *cache = base;
704 
705 		return cache[idx];
706 	}
707 	case 2: {
708 		const u16 *cache = base;
709 
710 		return cache[idx];
711 	}
712 	case 4: {
713 		const u32 *cache = base;
714 
715 		return cache[idx];
716 	}
717 	default:
718 		BUG();
719 	}
720 	/* unreachable */
721 	return -1;
722 }
723 
724 static int regcache_default_cmp(const void *a, const void *b)
725 {
726 	const struct reg_default *_a = a;
727 	const struct reg_default *_b = b;
728 
729 	return _a->reg - _b->reg;
730 }
731 
732 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
733 {
734 	struct reg_default key;
735 	struct reg_default *r;
736 
737 	key.reg = reg;
738 	key.def = 0;
739 
740 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
741 		    sizeof(struct reg_default), regcache_default_cmp);
742 
743 	if (r)
744 		return r - map->reg_defaults;
745 	else
746 		return -ENOENT;
747 }
748 
749 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
750 {
751 	if (!cache_present)
752 		return true;
753 
754 	return test_bit(idx, cache_present);
755 }
756 
757 int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
758 {
759 	int ret;
760 
761 	if (!regcache_reg_needs_sync(map, reg, val))
762 		return 0;
763 
764 	map->cache_bypass = true;
765 
766 	ret = _regmap_write(map, reg, val);
767 
768 	map->cache_bypass = false;
769 
770 	if (ret != 0) {
771 		dev_err(map->dev, "Unable to sync register %#x. %d\n",
772 			reg, ret);
773 		return ret;
774 	}
775 	dev_dbg(map->dev, "Synced register %#x, value %#x\n",
776 		reg, val);
777 
778 	return 0;
779 }
780 
781 static int regcache_sync_block_single(struct regmap *map, void *block,
782 				      unsigned long *cache_present,
783 				      unsigned int block_base,
784 				      unsigned int start, unsigned int end)
785 {
786 	unsigned int i, regtmp, val;
787 	int ret;
788 
789 	for (i = start; i < end; i++) {
790 		regtmp = block_base + (i * map->reg_stride);
791 
792 		if (!regcache_reg_present(cache_present, i) ||
793 		    !regmap_writeable(map, regtmp))
794 			continue;
795 
796 		val = regcache_get_val(map, block, i);
797 		ret = regcache_sync_val(map, regtmp, val);
798 		if (ret != 0)
799 			return ret;
800 	}
801 
802 	return 0;
803 }
804 
805 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
806 					 unsigned int base, unsigned int cur)
807 {
808 	size_t val_bytes = map->format.val_bytes;
809 	int ret, count;
810 
811 	if (*data == NULL)
812 		return 0;
813 
814 	count = (cur - base) / map->reg_stride;
815 
816 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
817 		count * val_bytes, count, base, cur - map->reg_stride);
818 
819 	map->cache_bypass = true;
820 
821 	ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
822 	if (ret)
823 		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
824 			base, cur - map->reg_stride, ret);
825 
826 	map->cache_bypass = false;
827 
828 	*data = NULL;
829 
830 	return ret;
831 }
832 
833 static int regcache_sync_block_raw(struct regmap *map, void *block,
834 			    unsigned long *cache_present,
835 			    unsigned int block_base, unsigned int start,
836 			    unsigned int end)
837 {
838 	unsigned int i, val;
839 	unsigned int regtmp = 0;
840 	unsigned int base = 0;
841 	const void *data = NULL;
842 	int ret;
843 
844 	for (i = start; i < end; i++) {
845 		regtmp = block_base + (i * map->reg_stride);
846 
847 		if (!regcache_reg_present(cache_present, i) ||
848 		    !regmap_writeable(map, regtmp)) {
849 			ret = regcache_sync_block_raw_flush(map, &data,
850 							    base, regtmp);
851 			if (ret != 0)
852 				return ret;
853 			continue;
854 		}
855 
856 		val = regcache_get_val(map, block, i);
857 		if (!regcache_reg_needs_sync(map, regtmp, val)) {
858 			ret = regcache_sync_block_raw_flush(map, &data,
859 							    base, regtmp);
860 			if (ret != 0)
861 				return ret;
862 			continue;
863 		}
864 
865 		if (!data) {
866 			data = regcache_get_val_addr(map, block, i);
867 			base = regtmp;
868 		}
869 	}
870 
871 	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
872 			map->reg_stride);
873 }
874 
875 int regcache_sync_block(struct regmap *map, void *block,
876 			unsigned long *cache_present,
877 			unsigned int block_base, unsigned int start,
878 			unsigned int end)
879 {
880 	if (regmap_can_raw_write(map) && !map->use_single_write)
881 		return regcache_sync_block_raw(map, block, cache_present,
882 					       block_base, start, end);
883 	else
884 		return regcache_sync_block_single(map, block, cache_present,
885 						  block_base, start, end);
886 }
887