xref: /linux/drivers/base/regmap/regcache.c (revision e7662bced2e98ffa2c572126677deb9cf55d43b3)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 
9 #include <linux/bsearch.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 
15 #include "trace.h"
16 #include "internal.h"
17 
18 static const struct regcache_ops *cache_types[] = {
19 	&regcache_flat_sparse_ops,
20 	&regcache_rbtree_ops,
21 	&regcache_maple_ops,
22 	&regcache_flat_ops,
23 };
24 
25 static int regcache_defaults_cmp(const void *a, const void *b)
26 {
27 	const struct reg_default *x = a;
28 	const struct reg_default *y = b;
29 
30 	if (x->reg > y->reg)
31 		return 1;
32 	else if (x->reg < y->reg)
33 		return -1;
34 	else
35 		return 0;
36 }
37 
38 void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
39 {
40 	sort(defaults, ndefaults, sizeof(*defaults),
41 	     regcache_defaults_cmp, NULL);
42 }
43 EXPORT_SYMBOL_GPL(regcache_sort_defaults);
44 
45 static int regcache_count_cacheable_registers(struct regmap *map)
46 {
47 	unsigned int count;
48 
49 	/* calculate the size of reg_defaults */
50 	count = 0;
51 	for (unsigned int i = 0; i < map->num_reg_defaults_raw; i++)
52 		if (regmap_readable(map, i * map->reg_stride) &&
53 		    !regmap_volatile(map, i * map->reg_stride))
54 			count++;
55 
56 	return count;
57 }
58 
59 static int regcache_hw_init(struct regmap *map)
60 {
61 	int ret;
62 	unsigned int reg, val;
63 	void *tmp_buf;
64 
65 	if (!map->reg_defaults_raw) {
66 		bool cache_bypass = map->cache_bypass;
67 		dev_dbg(map->dev, "No cache defaults, reading back from HW\n");
68 
69 		/* Bypass the cache access till data read from HW */
70 		map->cache_bypass = true;
71 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
72 		if (!tmp_buf)
73 			return -ENOMEM;
74 		ret = regmap_raw_read(map, 0, tmp_buf,
75 				      map->cache_size_raw);
76 		map->cache_bypass = cache_bypass;
77 		if (ret == 0) {
78 			map->reg_defaults_raw = tmp_buf;
79 			map->cache_free = true;
80 		} else {
81 			kfree(tmp_buf);
82 		}
83 	}
84 
85 	/* fill the reg_defaults */
86 	for (unsigned int i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
87 		reg = i * map->reg_stride;
88 
89 		if (!regmap_readable(map, reg))
90 			continue;
91 
92 		if (regmap_volatile(map, reg))
93 			continue;
94 
95 		if (map->reg_defaults_raw) {
96 			val = regcache_get_val(map, map->reg_defaults_raw, i);
97 		} else {
98 			bool cache_bypass = map->cache_bypass;
99 
100 			map->cache_bypass = true;
101 			ret = regmap_read(map, reg, &val);
102 			map->cache_bypass = cache_bypass;
103 			if (ret != 0) {
104 				dev_err(map->dev, "Failed to read %x: %d\n",
105 					reg, ret);
106 				return ret;
107 			}
108 		}
109 
110 		map->reg_defaults[j].reg = reg;
111 		map->reg_defaults[j].def = val;
112 		j++;
113 	}
114 
115 	return 0;
116 }
117 
118 static void regcache_hw_exit(struct regmap *map)
119 {
120 	if (map->cache_free)
121 		kfree(map->reg_defaults_raw);
122 }
123 
124 int regcache_init(struct regmap *map, const struct regmap_config *config)
125 {
126 	int count = 0;
127 	int ret;
128 	int i;
129 	void *tmp_buf;
130 
131 	if (map->cache_type == REGCACHE_NONE) {
132 		if (config->reg_defaults || config->num_reg_defaults_raw)
133 			dev_warn(map->dev,
134 				 "No cache used with register defaults set!\n");
135 
136 		map->cache_bypass = true;
137 		return 0;
138 	}
139 
140 	if (config->reg_defaults && !config->num_reg_defaults) {
141 		dev_err(map->dev,
142 			 "Register defaults are set without the number!\n");
143 		return -EINVAL;
144 	}
145 
146 	if (config->num_reg_defaults && !config->reg_defaults) {
147 		dev_err(map->dev,
148 			"Register defaults number are set without the reg!\n");
149 		return -EINVAL;
150 	}
151 
152 	for (i = 0; i < config->num_reg_defaults; i++)
153 		if (config->reg_defaults[i].reg % map->reg_stride)
154 			return -EINVAL;
155 
156 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
157 		if (cache_types[i]->type == map->cache_type)
158 			break;
159 
160 	if (i == ARRAY_SIZE(cache_types)) {
161 		dev_err(map->dev, "Could not match cache type: %d\n",
162 			map->cache_type);
163 		return -EINVAL;
164 	}
165 
166 	map->num_reg_defaults = config->num_reg_defaults;
167 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
168 	map->reg_defaults_raw = config->reg_defaults_raw;
169 	map->cache_word_size = BITS_TO_BYTES(config->val_bits);
170 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
171 
172 	map->cache = NULL;
173 	map->cache_ops = cache_types[i];
174 
175 	if (!map->cache_ops->read ||
176 	    !map->cache_ops->write ||
177 	    !map->cache_ops->name)
178 		return -EINVAL;
179 
180 	/* We still need to ensure that the reg_defaults
181 	 * won't vanish from under us.  We'll need to make
182 	 * a copy of it.
183 	 */
184 	if (config->reg_defaults) {
185 		tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
186 					sizeof(*map->reg_defaults), GFP_KERNEL);
187 		if (!tmp_buf)
188 			return -ENOMEM;
189 		map->reg_defaults = tmp_buf;
190 	} else if (map->num_reg_defaults_raw) {
191 		count = regcache_count_cacheable_registers(map);
192 		if (!count)
193 			map->cache_bypass = true;
194 
195 		/* All registers are unreadable or volatile, so just bypass */
196 		if (map->cache_bypass)
197 			return 0;
198 
199 		map->num_reg_defaults = count;
200 		map->reg_defaults = kmalloc_objs(struct reg_default, count);
201 		if (!map->reg_defaults)
202 			return -ENOMEM;
203 	}
204 
205 	if (!map->max_register_is_set && map->num_reg_defaults_raw) {
206 		map->max_register = (map->num_reg_defaults_raw  - 1) * map->reg_stride;
207 		map->max_register_is_set = true;
208 	}
209 
210 	if (map->cache_ops->init) {
211 		dev_dbg(map->dev, "Initializing %s cache\n",
212 			map->cache_ops->name);
213 		map->lock(map->lock_arg);
214 		ret = map->cache_ops->init(map);
215 		map->unlock(map->lock_arg);
216 		if (ret)
217 			goto err_free_reg_defaults;
218 	}
219 
220 	/*
221 	 * Some devices such as PMICs don't have cache defaults,
222 	 * we cope with this by reading back the HW registers and
223 	 * crafting the cache defaults by hand.
224 	 */
225 	if (count) {
226 		ret = regcache_hw_init(map);
227 		if (ret)
228 			goto err_exit;
229 	}
230 
231 	if (map->cache_ops->populate &&
232 	    (map->num_reg_defaults || map->reg_default_cb)) {
233 		dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
234 		map->lock(map->lock_arg);
235 		ret = map->cache_ops->populate(map);
236 		map->unlock(map->lock_arg);
237 		if (ret)
238 			goto err_free;
239 	}
240 	return 0;
241 
242 err_free:
243 	regcache_hw_exit(map);
244 err_exit:
245 	if (map->cache_ops->exit) {
246 		dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
247 		map->lock(map->lock_arg);
248 		ret = map->cache_ops->exit(map);
249 		map->unlock(map->lock_arg);
250 	}
251 err_free_reg_defaults:
252 	kfree(map->reg_defaults);
253 
254 	return ret;
255 }
256 
257 void regcache_exit(struct regmap *map)
258 {
259 	if (map->cache_type == REGCACHE_NONE)
260 		return;
261 
262 	BUG_ON(!map->cache_ops);
263 
264 	regcache_hw_exit(map);
265 
266 	if (map->cache_ops->exit) {
267 		dev_dbg(map->dev, "Destroying %s cache\n",
268 			map->cache_ops->name);
269 		map->lock(map->lock_arg);
270 		map->cache_ops->exit(map);
271 		map->unlock(map->lock_arg);
272 	}
273 
274 	kfree(map->reg_defaults);
275 }
276 
277 /**
278  * regcache_read - Fetch the value of a given register from the cache.
279  *
280  * @map: map to configure.
281  * @reg: The register index.
282  * @value: The value to be returned.
283  *
284  * Return a negative value on failure, 0 on success.
285  */
286 int regcache_read(struct regmap *map,
287 		  unsigned int reg, unsigned int *value)
288 {
289 	int ret;
290 
291 	if (map->cache_type == REGCACHE_NONE)
292 		return -EINVAL;
293 
294 	BUG_ON(!map->cache_ops);
295 
296 	if (!regmap_volatile(map, reg)) {
297 		ret = map->cache_ops->read(map, reg, value);
298 
299 		if (ret == 0)
300 			trace_regmap_reg_read_cache(map, reg, *value);
301 
302 		return ret;
303 	}
304 
305 	return -EINVAL;
306 }
307 
308 /**
309  * regcache_write - Set the value of a given register in the cache.
310  *
311  * @map: map to configure.
312  * @reg: The register index.
313  * @value: The new register value.
314  *
315  * Return a negative value on failure, 0 on success.
316  */
317 int regcache_write(struct regmap *map,
318 		   unsigned int reg, unsigned int value)
319 {
320 	if (map->cache_type == REGCACHE_NONE)
321 		return 0;
322 
323 	BUG_ON(!map->cache_ops);
324 
325 	if (!regmap_volatile(map, reg))
326 		return map->cache_ops->write(map, reg, value);
327 
328 	return 0;
329 }
330 
331 bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
332 			     unsigned int val)
333 {
334 	int ret;
335 
336 	if (!regmap_writeable(map, reg))
337 		return false;
338 
339 	/* If we don't know the chip just got reset, then sync everything. */
340 	if (!map->no_sync_defaults)
341 		return true;
342 
343 	/* Is this the hardware default?  If so skip. */
344 	ret = regcache_lookup_reg(map, reg);
345 	if (ret >= 0 && val == map->reg_defaults[ret].def)
346 		return false;
347 	return true;
348 }
349 
350 static int regcache_default_sync(struct regmap *map, unsigned int min,
351 				 unsigned int max)
352 {
353 	unsigned int reg;
354 
355 	for (reg = min; reg <= max; reg += map->reg_stride) {
356 		unsigned int val;
357 		int ret;
358 
359 		if (regmap_volatile(map, reg) ||
360 		    !regmap_writeable(map, reg))
361 			continue;
362 
363 		ret = regcache_read(map, reg, &val);
364 		if (ret == -ENOENT)
365 			continue;
366 		if (ret)
367 			return ret;
368 
369 		if (!regcache_reg_needs_sync(map, reg, val))
370 			continue;
371 
372 		map->cache_bypass = true;
373 		ret = _regmap_write(map, reg, val);
374 		map->cache_bypass = false;
375 		if (ret) {
376 			dev_err(map->dev, "Unable to sync register %#x. %d\n",
377 				reg, ret);
378 			return ret;
379 		}
380 		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
381 	}
382 
383 	return 0;
384 }
385 
386 static int rbtree_all(const void *key, const struct rb_node *node)
387 {
388 	return 0;
389 }
390 
391 /**
392  * regcache_sync - Sync the register cache with the hardware.
393  *
394  * @map: map to configure.
395  *
396  * Any registers that should not be synced should be marked as
397  * volatile.  In general drivers can choose not to use the provided
398  * syncing functionality if they so require.
399  *
400  * Return a negative value on failure, 0 on success.
401  */
402 int regcache_sync(struct regmap *map)
403 {
404 	int ret = 0;
405 	unsigned int i;
406 	const char *name;
407 	bool bypass;
408 	struct rb_node *node;
409 
410 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
411 		return -EINVAL;
412 
413 	BUG_ON(!map->cache_ops);
414 
415 	map->lock(map->lock_arg);
416 	/* Remember the initial bypass state */
417 	bypass = map->cache_bypass;
418 	dev_dbg(map->dev, "Syncing %s cache\n",
419 		map->cache_ops->name);
420 	name = map->cache_ops->name;
421 	trace_regcache_sync(map, name, "start");
422 
423 	if (!map->cache_dirty)
424 		goto out;
425 
426 	/* Apply any patch first */
427 	map->cache_bypass = true;
428 	for (i = 0; i < map->patch_regs; i++) {
429 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
430 		if (ret != 0) {
431 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
432 				map->patch[i].reg, map->patch[i].def, ret);
433 			goto out;
434 		}
435 	}
436 	map->cache_bypass = false;
437 
438 	if (map->cache_ops->sync)
439 		ret = map->cache_ops->sync(map, 0, map->max_register);
440 	else
441 		ret = regcache_default_sync(map, 0, map->max_register);
442 
443 	if (ret == 0)
444 		map->cache_dirty = false;
445 
446 out:
447 	/* Restore the bypass state */
448 	map->cache_bypass = bypass;
449 	map->no_sync_defaults = false;
450 
451 	/*
452 	 * If we did any paging with cache bypassed and a cached
453 	 * paging register then the register and cache state might
454 	 * have gone out of sync, force writes of all the paging
455 	 * registers.
456 	 */
457 	rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
458 		struct regmap_range_node *this =
459 			rb_entry(node, struct regmap_range_node, node);
460 
461 		/* If there's nothing in the cache there's nothing to sync */
462 		if (regcache_read(map, this->selector_reg, &i) != 0)
463 			continue;
464 
465 		ret = _regmap_write(map, this->selector_reg, i);
466 		if (ret != 0) {
467 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
468 				this->selector_reg, i, ret);
469 			break;
470 		}
471 	}
472 
473 	map->unlock(map->lock_arg);
474 
475 	regmap_async_complete(map);
476 
477 	trace_regcache_sync(map, name, "stop");
478 
479 	return ret;
480 }
481 EXPORT_SYMBOL_GPL(regcache_sync);
482 
483 /**
484  * regcache_sync_region - Sync part  of the register cache with the hardware.
485  *
486  * @map: map to sync.
487  * @min: first register to sync
488  * @max: last register to sync
489  *
490  * Write all non-default register values in the specified region to
491  * the hardware.
492  *
493  * Return a negative value on failure, 0 on success.
494  */
495 int regcache_sync_region(struct regmap *map, unsigned int min,
496 			 unsigned int max)
497 {
498 	int ret = 0;
499 	const char *name;
500 	bool bypass;
501 
502 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
503 		return -EINVAL;
504 
505 	BUG_ON(!map->cache_ops);
506 
507 	map->lock(map->lock_arg);
508 
509 	/* Remember the initial bypass state */
510 	bypass = map->cache_bypass;
511 
512 	name = map->cache_ops->name;
513 	dev_dbg(map->dev, "Syncing %s cache from %#x-%#x\n", name, min, max);
514 
515 	trace_regcache_sync(map, name, "start region");
516 
517 	if (!map->cache_dirty)
518 		goto out;
519 
520 	map->async = true;
521 
522 	if (map->cache_ops->sync)
523 		ret = map->cache_ops->sync(map, min, max);
524 	else
525 		ret = regcache_default_sync(map, min, max);
526 
527 out:
528 	/* Restore the bypass state */
529 	map->cache_bypass = bypass;
530 	map->async = false;
531 	map->no_sync_defaults = false;
532 	map->unlock(map->lock_arg);
533 
534 	regmap_async_complete(map);
535 
536 	trace_regcache_sync(map, name, "stop region");
537 
538 	return ret;
539 }
540 EXPORT_SYMBOL_GPL(regcache_sync_region);
541 
542 /**
543  * regcache_drop_region - Discard part of the register cache
544  *
545  * @map: map to operate on
546  * @min: first register to discard
547  * @max: last register to discard
548  *
549  * Discard part of the register cache.
550  *
551  * Return a negative value on failure, 0 on success.
552  */
553 int regcache_drop_region(struct regmap *map, unsigned int min,
554 			 unsigned int max)
555 {
556 	int ret = 0;
557 
558 	if (!map->cache_ops || !map->cache_ops->drop)
559 		return -EINVAL;
560 
561 	map->lock(map->lock_arg);
562 
563 	trace_regcache_drop_region(map, min, max);
564 
565 	ret = map->cache_ops->drop(map, min, max);
566 
567 	map->unlock(map->lock_arg);
568 
569 	return ret;
570 }
571 EXPORT_SYMBOL_GPL(regcache_drop_region);
572 
573 /**
574  * regcache_cache_only - Put a register map into cache only mode
575  *
576  * @map: map to configure
577  * @enable: flag if changes should be written to the hardware
578  *
579  * When a register map is marked as cache only writes to the register
580  * map API will only update the register cache, they will not cause
581  * any hardware changes.  This is useful for allowing portions of
582  * drivers to act as though the device were functioning as normal when
583  * it is disabled for power saving reasons.
584  */
585 void regcache_cache_only(struct regmap *map, bool enable)
586 {
587 	map->lock(map->lock_arg);
588 	WARN_ON(map->cache_type != REGCACHE_NONE &&
589 		map->cache_bypass && enable);
590 	map->cache_only = enable;
591 	trace_regmap_cache_only(map, enable);
592 	map->unlock(map->lock_arg);
593 }
594 EXPORT_SYMBOL_GPL(regcache_cache_only);
595 
596 /**
597  * regcache_mark_dirty - Indicate that HW registers were reset to default values
598  *
599  * @map: map to mark
600  *
601  * Inform regcache that the device has been powered down or reset, so that
602  * on resume, regcache_sync() knows to write out all non-default values
603  * stored in the cache.
604  *
605  * If this function is not called, regcache_sync() will assume that
606  * the hardware state still matches the cache state, modulo any writes that
607  * happened when cache_only was true.
608  */
609 void regcache_mark_dirty(struct regmap *map)
610 {
611 	map->lock(map->lock_arg);
612 	map->cache_dirty = true;
613 	map->no_sync_defaults = true;
614 	map->unlock(map->lock_arg);
615 }
616 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
617 
618 /**
619  * regcache_cache_bypass - Put a register map into cache bypass mode
620  *
621  * @map: map to configure
622  * @enable: flag if changes should not be written to the cache
623  *
624  * When a register map is marked with the cache bypass option, writes
625  * to the register map API will only update the hardware and not
626  * the cache directly.  This is useful when syncing the cache back to
627  * the hardware.
628  */
629 void regcache_cache_bypass(struct regmap *map, bool enable)
630 {
631 	map->lock(map->lock_arg);
632 	WARN_ON(map->cache_only && enable);
633 	map->cache_bypass = enable;
634 	trace_regmap_cache_bypass(map, enable);
635 	map->unlock(map->lock_arg);
636 }
637 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
638 
639 /**
640  * regcache_reg_cached - Check if a register is cached
641  *
642  * @map: map to check
643  * @reg: register to check
644  *
645  * Reports if a register is cached.
646  */
647 bool regcache_reg_cached(struct regmap *map, unsigned int reg)
648 {
649 	unsigned int val;
650 	int ret;
651 
652 	map->lock(map->lock_arg);
653 
654 	ret = regcache_read(map, reg, &val);
655 
656 	map->unlock(map->lock_arg);
657 
658 	return ret == 0;
659 }
660 EXPORT_SYMBOL_GPL(regcache_reg_cached);
661 
662 void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
663 		      unsigned int val)
664 {
665 	/* Use device native format if possible */
666 	if (map->format.format_val) {
667 		map->format.format_val(base + (map->cache_word_size * idx),
668 				       val, 0);
669 		return;
670 	}
671 
672 	switch (map->cache_word_size) {
673 	case 1: {
674 		u8 *cache = base;
675 
676 		cache[idx] = val;
677 		break;
678 	}
679 	case 2: {
680 		u16 *cache = base;
681 
682 		cache[idx] = val;
683 		break;
684 	}
685 	case 4: {
686 		u32 *cache = base;
687 
688 		cache[idx] = val;
689 		break;
690 	}
691 	default:
692 		BUG();
693 	}
694 }
695 
696 unsigned int regcache_get_val(struct regmap *map, const void *base,
697 			      unsigned int idx)
698 {
699 	if (!base)
700 		return -EINVAL;
701 
702 	/* Use device native format if possible */
703 	if (map->format.parse_val)
704 		return map->format.parse_val(regcache_get_val_addr(map, base,
705 								   idx));
706 
707 	switch (map->cache_word_size) {
708 	case 1: {
709 		const u8 *cache = base;
710 
711 		return cache[idx];
712 	}
713 	case 2: {
714 		const u16 *cache = base;
715 
716 		return cache[idx];
717 	}
718 	case 4: {
719 		const u32 *cache = base;
720 
721 		return cache[idx];
722 	}
723 	default:
724 		BUG();
725 	}
726 	/* unreachable */
727 	return -1;
728 }
729 
730 static int regcache_default_cmp(const void *a, const void *b)
731 {
732 	const struct reg_default *_a = a;
733 	const struct reg_default *_b = b;
734 
735 	return _a->reg - _b->reg;
736 }
737 
738 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
739 {
740 	struct reg_default key;
741 	struct reg_default *r;
742 
743 	key.reg = reg;
744 	key.def = 0;
745 
746 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
747 		    sizeof(struct reg_default), regcache_default_cmp);
748 
749 	if (r)
750 		return r - map->reg_defaults;
751 	else
752 		return -ENOENT;
753 }
754 
755 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
756 {
757 	if (!cache_present)
758 		return true;
759 
760 	return test_bit(idx, cache_present);
761 }
762 
763 int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
764 {
765 	int ret;
766 
767 	if (!regcache_reg_needs_sync(map, reg, val))
768 		return 0;
769 
770 	map->cache_bypass = true;
771 
772 	ret = _regmap_write(map, reg, val);
773 
774 	map->cache_bypass = false;
775 
776 	if (ret != 0) {
777 		dev_err(map->dev, "Unable to sync register %#x. %d\n",
778 			reg, ret);
779 		return ret;
780 	}
781 	dev_dbg(map->dev, "Synced register %#x, value %#x\n",
782 		reg, val);
783 
784 	return 0;
785 }
786 
787 static int regcache_sync_block_single(struct regmap *map, void *block,
788 				      unsigned long *cache_present,
789 				      unsigned int block_base,
790 				      unsigned int start, unsigned int end)
791 {
792 	unsigned int i, regtmp, val;
793 	int ret;
794 
795 	for (i = start; i < end; i++) {
796 		regtmp = block_base + (i * map->reg_stride);
797 
798 		if (!regcache_reg_present(cache_present, i) ||
799 		    !regmap_writeable(map, regtmp))
800 			continue;
801 
802 		val = regcache_get_val(map, block, i);
803 		ret = regcache_sync_val(map, regtmp, val);
804 		if (ret != 0)
805 			return ret;
806 	}
807 
808 	return 0;
809 }
810 
811 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
812 					 unsigned int base, unsigned int cur)
813 {
814 	size_t val_bytes = map->format.val_bytes;
815 	int ret, count;
816 
817 	if (*data == NULL)
818 		return 0;
819 
820 	count = (cur - base) / map->reg_stride;
821 
822 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
823 		count * val_bytes, count, base, cur - map->reg_stride);
824 
825 	map->cache_bypass = true;
826 
827 	ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
828 	if (ret)
829 		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
830 			base, cur - map->reg_stride, ret);
831 
832 	map->cache_bypass = false;
833 
834 	*data = NULL;
835 
836 	return ret;
837 }
838 
839 static int regcache_sync_block_raw(struct regmap *map, void *block,
840 			    unsigned long *cache_present,
841 			    unsigned int block_base, unsigned int start,
842 			    unsigned int end)
843 {
844 	unsigned int regtmp = 0;
845 	unsigned int base = 0;
846 	const void *data = NULL;
847 	unsigned int val;
848 	int ret;
849 
850 	for (unsigned int i = start; i < end; i++) {
851 		regtmp = block_base + (i * map->reg_stride);
852 
853 		if (!regcache_reg_present(cache_present, i) ||
854 		    !regmap_writeable(map, regtmp)) {
855 			ret = regcache_sync_block_raw_flush(map, &data,
856 							    base, regtmp);
857 			if (ret != 0)
858 				return ret;
859 			continue;
860 		}
861 
862 		val = regcache_get_val(map, block, i);
863 		if (!regcache_reg_needs_sync(map, regtmp, val)) {
864 			ret = regcache_sync_block_raw_flush(map, &data,
865 							    base, regtmp);
866 			if (ret != 0)
867 				return ret;
868 			continue;
869 		}
870 
871 		if (!data) {
872 			data = regcache_get_val_addr(map, block, i);
873 			base = regtmp;
874 		}
875 	}
876 
877 	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
878 			map->reg_stride);
879 }
880 
881 int regcache_sync_block(struct regmap *map, void *block,
882 			unsigned long *cache_present,
883 			unsigned int block_base, unsigned int start,
884 			unsigned int end)
885 {
886 	if (regmap_can_raw_write(map) && !map->use_single_write)
887 		return regcache_sync_block_raw(map, block, cache_present,
888 					       block_base, start, end);
889 	else
890 		return regcache_sync_block_single(map, block, cache_present,
891 						  block_base, start, end);
892 }
893