xref: /linux/drivers/base/regmap/regcache.c (revision c971f11dbf1bff3d1226b92015302326c7c292c3)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 
9 #include <linux/bsearch.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 
15 #include "trace.h"
16 #include "internal.h"
17 
18 static const struct regcache_ops *cache_types[] = {
19 	&regcache_rbtree_ops,
20 	&regcache_maple_ops,
21 	&regcache_flat_ops,
22 };
23 
regcache_defaults_cmp(const void * a,const void * b)24 static int regcache_defaults_cmp(const void *a, const void *b)
25 {
26 	const struct reg_default *x = a;
27 	const struct reg_default *y = b;
28 
29 	if (x->reg > y->reg)
30 		return 1;
31 	else if (x->reg < y->reg)
32 		return -1;
33 	else
34 		return 0;
35 }
36 
regcache_sort_defaults(struct reg_default * defaults,unsigned int ndefaults)37 void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
38 {
39 	sort(defaults, ndefaults, sizeof(*defaults),
40 	     regcache_defaults_cmp, NULL);
41 }
42 EXPORT_SYMBOL_GPL(regcache_sort_defaults);
43 
regcache_hw_init(struct regmap * map)44 static int regcache_hw_init(struct regmap *map)
45 {
46 	int i, j;
47 	int ret;
48 	int count;
49 	unsigned int reg, val;
50 	void *tmp_buf;
51 
52 	if (!map->num_reg_defaults_raw)
53 		return -EINVAL;
54 
55 	/* calculate the size of reg_defaults */
56 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
57 		if (regmap_readable(map, i * map->reg_stride) &&
58 		    !regmap_volatile(map, i * map->reg_stride))
59 			count++;
60 
61 	/* all registers are unreadable or volatile, so just bypass */
62 	if (!count) {
63 		map->cache_bypass = true;
64 		return 0;
65 	}
66 
67 	map->num_reg_defaults = count;
68 	map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
69 					  GFP_KERNEL);
70 	if (!map->reg_defaults)
71 		return -ENOMEM;
72 
73 	if (!map->reg_defaults_raw) {
74 		bool cache_bypass = map->cache_bypass;
75 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
76 
77 		/* Bypass the cache access till data read from HW */
78 		map->cache_bypass = true;
79 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
80 		if (!tmp_buf) {
81 			ret = -ENOMEM;
82 			goto err_free;
83 		}
84 		ret = regmap_raw_read(map, 0, tmp_buf,
85 				      map->cache_size_raw);
86 		map->cache_bypass = cache_bypass;
87 		if (ret == 0) {
88 			map->reg_defaults_raw = tmp_buf;
89 			map->cache_free = true;
90 		} else {
91 			kfree(tmp_buf);
92 		}
93 	}
94 
95 	/* fill the reg_defaults */
96 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
97 		reg = i * map->reg_stride;
98 
99 		if (!regmap_readable(map, reg))
100 			continue;
101 
102 		if (regmap_volatile(map, reg))
103 			continue;
104 
105 		if (map->reg_defaults_raw) {
106 			val = regcache_get_val(map, map->reg_defaults_raw, i);
107 		} else {
108 			bool cache_bypass = map->cache_bypass;
109 
110 			map->cache_bypass = true;
111 			ret = regmap_read(map, reg, &val);
112 			map->cache_bypass = cache_bypass;
113 			if (ret != 0) {
114 				dev_err(map->dev, "Failed to read %d: %d\n",
115 					reg, ret);
116 				goto err_free;
117 			}
118 		}
119 
120 		map->reg_defaults[j].reg = reg;
121 		map->reg_defaults[j].def = val;
122 		j++;
123 	}
124 
125 	return 0;
126 
127 err_free:
128 	kfree(map->reg_defaults);
129 
130 	return ret;
131 }
132 
regcache_init(struct regmap * map,const struct regmap_config * config)133 int regcache_init(struct regmap *map, const struct regmap_config *config)
134 {
135 	int ret;
136 	int i;
137 	void *tmp_buf;
138 
139 	if (map->cache_type == REGCACHE_NONE) {
140 		if (config->reg_defaults || config->num_reg_defaults_raw)
141 			dev_warn(map->dev,
142 				 "No cache used with register defaults set!\n");
143 
144 		map->cache_bypass = true;
145 		return 0;
146 	}
147 
148 	if (config->reg_defaults && !config->num_reg_defaults) {
149 		dev_err(map->dev,
150 			 "Register defaults are set without the number!\n");
151 		return -EINVAL;
152 	}
153 
154 	if (config->num_reg_defaults && !config->reg_defaults) {
155 		dev_err(map->dev,
156 			"Register defaults number are set without the reg!\n");
157 		return -EINVAL;
158 	}
159 
160 	for (i = 0; i < config->num_reg_defaults; i++)
161 		if (config->reg_defaults[i].reg % map->reg_stride)
162 			return -EINVAL;
163 
164 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
165 		if (cache_types[i]->type == map->cache_type)
166 			break;
167 
168 	if (i == ARRAY_SIZE(cache_types)) {
169 		dev_err(map->dev, "Could not match cache type: %d\n",
170 			map->cache_type);
171 		return -EINVAL;
172 	}
173 
174 	map->num_reg_defaults = config->num_reg_defaults;
175 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
176 	map->reg_defaults_raw = config->reg_defaults_raw;
177 	map->cache_word_size = BITS_TO_BYTES(config->val_bits);
178 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
179 
180 	map->cache = NULL;
181 	map->cache_ops = cache_types[i];
182 
183 	if (!map->cache_ops->read ||
184 	    !map->cache_ops->write ||
185 	    !map->cache_ops->name)
186 		return -EINVAL;
187 
188 	/* We still need to ensure that the reg_defaults
189 	 * won't vanish from under us.  We'll need to make
190 	 * a copy of it.
191 	 */
192 	if (config->reg_defaults) {
193 		tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
194 					sizeof(*map->reg_defaults), GFP_KERNEL);
195 		if (!tmp_buf)
196 			return -ENOMEM;
197 		map->reg_defaults = tmp_buf;
198 	} else if (map->num_reg_defaults_raw) {
199 		/* Some devices such as PMICs don't have cache defaults,
200 		 * we cope with this by reading back the HW registers and
201 		 * crafting the cache defaults by hand.
202 		 */
203 		ret = regcache_hw_init(map);
204 		if (ret < 0)
205 			return ret;
206 		if (map->cache_bypass)
207 			return 0;
208 	}
209 
210 	if (!map->max_register_is_set && map->num_reg_defaults_raw) {
211 		map->max_register = (map->num_reg_defaults_raw  - 1) * map->reg_stride;
212 		map->max_register_is_set = true;
213 	}
214 
215 	if (map->cache_ops->init) {
216 		dev_dbg(map->dev, "Initializing %s cache\n",
217 			map->cache_ops->name);
218 		map->lock(map->lock_arg);
219 		ret = map->cache_ops->init(map);
220 		map->unlock(map->lock_arg);
221 		if (ret)
222 			goto err_free;
223 	}
224 	return 0;
225 
226 err_free:
227 	kfree(map->reg_defaults);
228 	if (map->cache_free)
229 		kfree(map->reg_defaults_raw);
230 
231 	return ret;
232 }
233 
regcache_exit(struct regmap * map)234 void regcache_exit(struct regmap *map)
235 {
236 	if (map->cache_type == REGCACHE_NONE)
237 		return;
238 
239 	BUG_ON(!map->cache_ops);
240 
241 	kfree(map->reg_defaults);
242 	if (map->cache_free)
243 		kfree(map->reg_defaults_raw);
244 
245 	if (map->cache_ops->exit) {
246 		dev_dbg(map->dev, "Destroying %s cache\n",
247 			map->cache_ops->name);
248 		map->lock(map->lock_arg);
249 		map->cache_ops->exit(map);
250 		map->unlock(map->lock_arg);
251 	}
252 }
253 
254 /**
255  * regcache_read - Fetch the value of a given register from the cache.
256  *
257  * @map: map to configure.
258  * @reg: The register index.
259  * @value: The value to be returned.
260  *
261  * Return a negative value on failure, 0 on success.
262  */
regcache_read(struct regmap * map,unsigned int reg,unsigned int * value)263 int regcache_read(struct regmap *map,
264 		  unsigned int reg, unsigned int *value)
265 {
266 	int ret;
267 
268 	if (map->cache_type == REGCACHE_NONE)
269 		return -EINVAL;
270 
271 	BUG_ON(!map->cache_ops);
272 
273 	if (!regmap_volatile(map, reg)) {
274 		ret = map->cache_ops->read(map, reg, value);
275 
276 		if (ret == 0)
277 			trace_regmap_reg_read_cache(map, reg, *value);
278 
279 		return ret;
280 	}
281 
282 	return -EINVAL;
283 }
284 
285 /**
286  * regcache_write - Set the value of a given register in the cache.
287  *
288  * @map: map to configure.
289  * @reg: The register index.
290  * @value: The new register value.
291  *
292  * Return a negative value on failure, 0 on success.
293  */
regcache_write(struct regmap * map,unsigned int reg,unsigned int value)294 int regcache_write(struct regmap *map,
295 		   unsigned int reg, unsigned int value)
296 {
297 	if (map->cache_type == REGCACHE_NONE)
298 		return 0;
299 
300 	BUG_ON(!map->cache_ops);
301 
302 	if (!regmap_volatile(map, reg))
303 		return map->cache_ops->write(map, reg, value);
304 
305 	return 0;
306 }
307 
regcache_reg_needs_sync(struct regmap * map,unsigned int reg,unsigned int val)308 bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
309 			     unsigned int val)
310 {
311 	int ret;
312 
313 	if (!regmap_writeable(map, reg))
314 		return false;
315 
316 	/* If we don't know the chip just got reset, then sync everything. */
317 	if (!map->no_sync_defaults)
318 		return true;
319 
320 	/* Is this the hardware default?  If so skip. */
321 	ret = regcache_lookup_reg(map, reg);
322 	if (ret >= 0 && val == map->reg_defaults[ret].def)
323 		return false;
324 	return true;
325 }
326 
regcache_default_sync(struct regmap * map,unsigned int min,unsigned int max)327 static int regcache_default_sync(struct regmap *map, unsigned int min,
328 				 unsigned int max)
329 {
330 	unsigned int reg;
331 
332 	for (reg = min; reg <= max; reg += map->reg_stride) {
333 		unsigned int val;
334 		int ret;
335 
336 		if (regmap_volatile(map, reg) ||
337 		    !regmap_writeable(map, reg))
338 			continue;
339 
340 		ret = regcache_read(map, reg, &val);
341 		if (ret == -ENOENT)
342 			continue;
343 		if (ret)
344 			return ret;
345 
346 		if (!regcache_reg_needs_sync(map, reg, val))
347 			continue;
348 
349 		map->cache_bypass = true;
350 		ret = _regmap_write(map, reg, val);
351 		map->cache_bypass = false;
352 		if (ret) {
353 			dev_err(map->dev, "Unable to sync register %#x. %d\n",
354 				reg, ret);
355 			return ret;
356 		}
357 		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
358 	}
359 
360 	return 0;
361 }
362 
rbtree_all(const void * key,const struct rb_node * node)363 static int rbtree_all(const void *key, const struct rb_node *node)
364 {
365 	return 0;
366 }
367 
368 /**
369  * regcache_sync - Sync the register cache with the hardware.
370  *
371  * @map: map to configure.
372  *
373  * Any registers that should not be synced should be marked as
374  * volatile.  In general drivers can choose not to use the provided
375  * syncing functionality if they so require.
376  *
377  * Return a negative value on failure, 0 on success.
378  */
regcache_sync(struct regmap * map)379 int regcache_sync(struct regmap *map)
380 {
381 	int ret = 0;
382 	unsigned int i;
383 	const char *name;
384 	bool bypass;
385 	struct rb_node *node;
386 
387 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
388 		return -EINVAL;
389 
390 	BUG_ON(!map->cache_ops);
391 
392 	map->lock(map->lock_arg);
393 	/* Remember the initial bypass state */
394 	bypass = map->cache_bypass;
395 	dev_dbg(map->dev, "Syncing %s cache\n",
396 		map->cache_ops->name);
397 	name = map->cache_ops->name;
398 	trace_regcache_sync(map, name, "start");
399 
400 	if (!map->cache_dirty)
401 		goto out;
402 
403 	/* Apply any patch first */
404 	map->cache_bypass = true;
405 	for (i = 0; i < map->patch_regs; i++) {
406 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
407 		if (ret != 0) {
408 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
409 				map->patch[i].reg, map->patch[i].def, ret);
410 			goto out;
411 		}
412 	}
413 	map->cache_bypass = false;
414 
415 	if (map->cache_ops->sync)
416 		ret = map->cache_ops->sync(map, 0, map->max_register);
417 	else
418 		ret = regcache_default_sync(map, 0, map->max_register);
419 
420 	if (ret == 0)
421 		map->cache_dirty = false;
422 
423 out:
424 	/* Restore the bypass state */
425 	map->cache_bypass = bypass;
426 	map->no_sync_defaults = false;
427 
428 	/*
429 	 * If we did any paging with cache bypassed and a cached
430 	 * paging register then the register and cache state might
431 	 * have gone out of sync, force writes of all the paging
432 	 * registers.
433 	 */
434 	rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
435 		struct regmap_range_node *this =
436 			rb_entry(node, struct regmap_range_node, node);
437 
438 		/* If there's nothing in the cache there's nothing to sync */
439 		if (regcache_read(map, this->selector_reg, &i) != 0)
440 			continue;
441 
442 		ret = _regmap_write(map, this->selector_reg, i);
443 		if (ret != 0) {
444 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
445 				this->selector_reg, i, ret);
446 			break;
447 		}
448 	}
449 
450 	map->unlock(map->lock_arg);
451 
452 	regmap_async_complete(map);
453 
454 	trace_regcache_sync(map, name, "stop");
455 
456 	return ret;
457 }
458 EXPORT_SYMBOL_GPL(regcache_sync);
459 
460 /**
461  * regcache_sync_region - Sync part  of the register cache with the hardware.
462  *
463  * @map: map to sync.
464  * @min: first register to sync
465  * @max: last register to sync
466  *
467  * Write all non-default register values in the specified region to
468  * the hardware.
469  *
470  * Return a negative value on failure, 0 on success.
471  */
regcache_sync_region(struct regmap * map,unsigned int min,unsigned int max)472 int regcache_sync_region(struct regmap *map, unsigned int min,
473 			 unsigned int max)
474 {
475 	int ret = 0;
476 	const char *name;
477 	bool bypass;
478 
479 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
480 		return -EINVAL;
481 
482 	BUG_ON(!map->cache_ops);
483 
484 	map->lock(map->lock_arg);
485 
486 	/* Remember the initial bypass state */
487 	bypass = map->cache_bypass;
488 
489 	name = map->cache_ops->name;
490 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
491 
492 	trace_regcache_sync(map, name, "start region");
493 
494 	if (!map->cache_dirty)
495 		goto out;
496 
497 	map->async = true;
498 
499 	if (map->cache_ops->sync)
500 		ret = map->cache_ops->sync(map, min, max);
501 	else
502 		ret = regcache_default_sync(map, min, max);
503 
504 out:
505 	/* Restore the bypass state */
506 	map->cache_bypass = bypass;
507 	map->async = false;
508 	map->no_sync_defaults = false;
509 	map->unlock(map->lock_arg);
510 
511 	regmap_async_complete(map);
512 
513 	trace_regcache_sync(map, name, "stop region");
514 
515 	return ret;
516 }
517 EXPORT_SYMBOL_GPL(regcache_sync_region);
518 
519 /**
520  * regcache_drop_region - Discard part of the register cache
521  *
522  * @map: map to operate on
523  * @min: first register to discard
524  * @max: last register to discard
525  *
526  * Discard part of the register cache.
527  *
528  * Return a negative value on failure, 0 on success.
529  */
regcache_drop_region(struct regmap * map,unsigned int min,unsigned int max)530 int regcache_drop_region(struct regmap *map, unsigned int min,
531 			 unsigned int max)
532 {
533 	int ret = 0;
534 
535 	if (!map->cache_ops || !map->cache_ops->drop)
536 		return -EINVAL;
537 
538 	map->lock(map->lock_arg);
539 
540 	trace_regcache_drop_region(map, min, max);
541 
542 	ret = map->cache_ops->drop(map, min, max);
543 
544 	map->unlock(map->lock_arg);
545 
546 	return ret;
547 }
548 EXPORT_SYMBOL_GPL(regcache_drop_region);
549 
550 /**
551  * regcache_cache_only - Put a register map into cache only mode
552  *
553  * @map: map to configure
554  * @enable: flag if changes should be written to the hardware
555  *
556  * When a register map is marked as cache only writes to the register
557  * map API will only update the register cache, they will not cause
558  * any hardware changes.  This is useful for allowing portions of
559  * drivers to act as though the device were functioning as normal when
560  * it is disabled for power saving reasons.
561  */
regcache_cache_only(struct regmap * map,bool enable)562 void regcache_cache_only(struct regmap *map, bool enable)
563 {
564 	map->lock(map->lock_arg);
565 	WARN_ON(map->cache_type != REGCACHE_NONE &&
566 		map->cache_bypass && enable);
567 	map->cache_only = enable;
568 	trace_regmap_cache_only(map, enable);
569 	map->unlock(map->lock_arg);
570 }
571 EXPORT_SYMBOL_GPL(regcache_cache_only);
572 
573 /**
574  * regcache_mark_dirty - Indicate that HW registers were reset to default values
575  *
576  * @map: map to mark
577  *
578  * Inform regcache that the device has been powered down or reset, so that
579  * on resume, regcache_sync() knows to write out all non-default values
580  * stored in the cache.
581  *
582  * If this function is not called, regcache_sync() will assume that
583  * the hardware state still matches the cache state, modulo any writes that
584  * happened when cache_only was true.
585  */
regcache_mark_dirty(struct regmap * map)586 void regcache_mark_dirty(struct regmap *map)
587 {
588 	map->lock(map->lock_arg);
589 	map->cache_dirty = true;
590 	map->no_sync_defaults = true;
591 	map->unlock(map->lock_arg);
592 }
593 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
594 
595 /**
596  * regcache_cache_bypass - Put a register map into cache bypass mode
597  *
598  * @map: map to configure
599  * @enable: flag if changes should not be written to the cache
600  *
601  * When a register map is marked with the cache bypass option, writes
602  * to the register map API will only update the hardware and not
603  * the cache directly.  This is useful when syncing the cache back to
604  * the hardware.
605  */
regcache_cache_bypass(struct regmap * map,bool enable)606 void regcache_cache_bypass(struct regmap *map, bool enable)
607 {
608 	map->lock(map->lock_arg);
609 	WARN_ON(map->cache_only && enable);
610 	map->cache_bypass = enable;
611 	trace_regmap_cache_bypass(map, enable);
612 	map->unlock(map->lock_arg);
613 }
614 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
615 
616 /**
617  * regcache_reg_cached - Check if a register is cached
618  *
619  * @map: map to check
620  * @reg: register to check
621  *
622  * Reports if a register is cached.
623  */
regcache_reg_cached(struct regmap * map,unsigned int reg)624 bool regcache_reg_cached(struct regmap *map, unsigned int reg)
625 {
626 	unsigned int val;
627 	int ret;
628 
629 	map->lock(map->lock_arg);
630 
631 	ret = regcache_read(map, reg, &val);
632 
633 	map->unlock(map->lock_arg);
634 
635 	return ret == 0;
636 }
637 EXPORT_SYMBOL_GPL(regcache_reg_cached);
638 
regcache_set_val(struct regmap * map,void * base,unsigned int idx,unsigned int val)639 void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
640 		      unsigned int val)
641 {
642 	/* Use device native format if possible */
643 	if (map->format.format_val) {
644 		map->format.format_val(base + (map->cache_word_size * idx),
645 				       val, 0);
646 		return;
647 	}
648 
649 	switch (map->cache_word_size) {
650 	case 1: {
651 		u8 *cache = base;
652 
653 		cache[idx] = val;
654 		break;
655 	}
656 	case 2: {
657 		u16 *cache = base;
658 
659 		cache[idx] = val;
660 		break;
661 	}
662 	case 4: {
663 		u32 *cache = base;
664 
665 		cache[idx] = val;
666 		break;
667 	}
668 	default:
669 		BUG();
670 	}
671 }
672 
regcache_get_val(struct regmap * map,const void * base,unsigned int idx)673 unsigned int regcache_get_val(struct regmap *map, const void *base,
674 			      unsigned int idx)
675 {
676 	if (!base)
677 		return -EINVAL;
678 
679 	/* Use device native format if possible */
680 	if (map->format.parse_val)
681 		return map->format.parse_val(regcache_get_val_addr(map, base,
682 								   idx));
683 
684 	switch (map->cache_word_size) {
685 	case 1: {
686 		const u8 *cache = base;
687 
688 		return cache[idx];
689 	}
690 	case 2: {
691 		const u16 *cache = base;
692 
693 		return cache[idx];
694 	}
695 	case 4: {
696 		const u32 *cache = base;
697 
698 		return cache[idx];
699 	}
700 	default:
701 		BUG();
702 	}
703 	/* unreachable */
704 	return -1;
705 }
706 
regcache_default_cmp(const void * a,const void * b)707 static int regcache_default_cmp(const void *a, const void *b)
708 {
709 	const struct reg_default *_a = a;
710 	const struct reg_default *_b = b;
711 
712 	return _a->reg - _b->reg;
713 }
714 
regcache_lookup_reg(struct regmap * map,unsigned int reg)715 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
716 {
717 	struct reg_default key;
718 	struct reg_default *r;
719 
720 	key.reg = reg;
721 	key.def = 0;
722 
723 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
724 		    sizeof(struct reg_default), regcache_default_cmp);
725 
726 	if (r)
727 		return r - map->reg_defaults;
728 	else
729 		return -ENOENT;
730 }
731 
regcache_reg_present(unsigned long * cache_present,unsigned int idx)732 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
733 {
734 	if (!cache_present)
735 		return true;
736 
737 	return test_bit(idx, cache_present);
738 }
739 
regcache_sync_val(struct regmap * map,unsigned int reg,unsigned int val)740 int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
741 {
742 	int ret;
743 
744 	if (!regcache_reg_needs_sync(map, reg, val))
745 		return 0;
746 
747 	map->cache_bypass = true;
748 
749 	ret = _regmap_write(map, reg, val);
750 
751 	map->cache_bypass = false;
752 
753 	if (ret != 0) {
754 		dev_err(map->dev, "Unable to sync register %#x. %d\n",
755 			reg, ret);
756 		return ret;
757 	}
758 	dev_dbg(map->dev, "Synced register %#x, value %#x\n",
759 		reg, val);
760 
761 	return 0;
762 }
763 
regcache_sync_block_single(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)764 static int regcache_sync_block_single(struct regmap *map, void *block,
765 				      unsigned long *cache_present,
766 				      unsigned int block_base,
767 				      unsigned int start, unsigned int end)
768 {
769 	unsigned int i, regtmp, val;
770 	int ret;
771 
772 	for (i = start; i < end; i++) {
773 		regtmp = block_base + (i * map->reg_stride);
774 
775 		if (!regcache_reg_present(cache_present, i) ||
776 		    !regmap_writeable(map, regtmp))
777 			continue;
778 
779 		val = regcache_get_val(map, block, i);
780 		ret = regcache_sync_val(map, regtmp, val);
781 		if (ret != 0)
782 			return ret;
783 	}
784 
785 	return 0;
786 }
787 
regcache_sync_block_raw_flush(struct regmap * map,const void ** data,unsigned int base,unsigned int cur)788 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
789 					 unsigned int base, unsigned int cur)
790 {
791 	size_t val_bytes = map->format.val_bytes;
792 	int ret, count;
793 
794 	if (*data == NULL)
795 		return 0;
796 
797 	count = (cur - base) / map->reg_stride;
798 
799 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
800 		count * val_bytes, count, base, cur - map->reg_stride);
801 
802 	map->cache_bypass = true;
803 
804 	ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
805 	if (ret)
806 		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
807 			base, cur - map->reg_stride, ret);
808 
809 	map->cache_bypass = false;
810 
811 	*data = NULL;
812 
813 	return ret;
814 }
815 
regcache_sync_block_raw(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)816 static int regcache_sync_block_raw(struct regmap *map, void *block,
817 			    unsigned long *cache_present,
818 			    unsigned int block_base, unsigned int start,
819 			    unsigned int end)
820 {
821 	unsigned int i, val;
822 	unsigned int regtmp = 0;
823 	unsigned int base = 0;
824 	const void *data = NULL;
825 	int ret;
826 
827 	for (i = start; i < end; i++) {
828 		regtmp = block_base + (i * map->reg_stride);
829 
830 		if (!regcache_reg_present(cache_present, i) ||
831 		    !regmap_writeable(map, regtmp)) {
832 			ret = regcache_sync_block_raw_flush(map, &data,
833 							    base, regtmp);
834 			if (ret != 0)
835 				return ret;
836 			continue;
837 		}
838 
839 		val = regcache_get_val(map, block, i);
840 		if (!regcache_reg_needs_sync(map, regtmp, val)) {
841 			ret = regcache_sync_block_raw_flush(map, &data,
842 							    base, regtmp);
843 			if (ret != 0)
844 				return ret;
845 			continue;
846 		}
847 
848 		if (!data) {
849 			data = regcache_get_val_addr(map, block, i);
850 			base = regtmp;
851 		}
852 	}
853 
854 	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
855 			map->reg_stride);
856 }
857 
regcache_sync_block(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)858 int regcache_sync_block(struct regmap *map, void *block,
859 			unsigned long *cache_present,
860 			unsigned int block_base, unsigned int start,
861 			unsigned int end)
862 {
863 	if (regmap_can_raw_write(map) && !map->use_single_write)
864 		return regcache_sync_block_raw(map, block, cache_present,
865 					       block_base, start, end);
866 	else
867 		return regcache_sync_block_single(map, block, cache_present,
868 						  block_base, start, end);
869 }
870