1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8
9 #include <linux/bsearch.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14
15 #include "trace.h"
16 #include "internal.h"
17
18 static const struct regcache_ops *cache_types[] = {
19 ®cache_flat_sparse_ops,
20 ®cache_rbtree_ops,
21 ®cache_maple_ops,
22 ®cache_flat_ops,
23 };
24
regcache_defaults_cmp(const void * a,const void * b)25 static int regcache_defaults_cmp(const void *a, const void *b)
26 {
27 const struct reg_default *x = a;
28 const struct reg_default *y = b;
29
30 if (x->reg > y->reg)
31 return 1;
32 else if (x->reg < y->reg)
33 return -1;
34 else
35 return 0;
36 }
37
regcache_sort_defaults(struct reg_default * defaults,unsigned int ndefaults)38 void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
39 {
40 sort(defaults, ndefaults, sizeof(*defaults),
41 regcache_defaults_cmp, NULL);
42 }
43 EXPORT_SYMBOL_GPL(regcache_sort_defaults);
44
regcache_hw_init(struct regmap * map)45 static int regcache_hw_init(struct regmap *map)
46 {
47 int i, j;
48 int ret;
49 int count;
50 unsigned int reg, val;
51 void *tmp_buf;
52
53 if (!map->num_reg_defaults_raw)
54 return -EINVAL;
55
56 /* calculate the size of reg_defaults */
57 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
58 if (regmap_readable(map, i * map->reg_stride) &&
59 !regmap_volatile(map, i * map->reg_stride))
60 count++;
61
62 /* all registers are unreadable or volatile, so just bypass */
63 if (!count) {
64 map->cache_bypass = true;
65 return 0;
66 }
67
68 map->num_reg_defaults = count;
69 map->reg_defaults = kmalloc_objs(struct reg_default, count);
70 if (!map->reg_defaults)
71 return -ENOMEM;
72
73 if (!map->reg_defaults_raw) {
74 bool cache_bypass = map->cache_bypass;
75 dev_dbg(map->dev, "No cache defaults, reading back from HW\n");
76
77 /* Bypass the cache access till data read from HW */
78 map->cache_bypass = true;
79 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
80 if (!tmp_buf) {
81 ret = -ENOMEM;
82 goto err_free;
83 }
84 ret = regmap_raw_read(map, 0, tmp_buf,
85 map->cache_size_raw);
86 map->cache_bypass = cache_bypass;
87 if (ret == 0) {
88 map->reg_defaults_raw = tmp_buf;
89 map->cache_free = true;
90 } else {
91 kfree(tmp_buf);
92 }
93 }
94
95 /* fill the reg_defaults */
96 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
97 reg = i * map->reg_stride;
98
99 if (!regmap_readable(map, reg))
100 continue;
101
102 if (regmap_volatile(map, reg))
103 continue;
104
105 if (map->reg_defaults_raw) {
106 val = regcache_get_val(map, map->reg_defaults_raw, i);
107 } else {
108 bool cache_bypass = map->cache_bypass;
109
110 map->cache_bypass = true;
111 ret = regmap_read(map, reg, &val);
112 map->cache_bypass = cache_bypass;
113 if (ret != 0) {
114 dev_err(map->dev, "Failed to read %d: %d\n",
115 reg, ret);
116 goto err_free;
117 }
118 }
119
120 map->reg_defaults[j].reg = reg;
121 map->reg_defaults[j].def = val;
122 j++;
123 }
124
125 return 0;
126
127 err_free:
128 kfree(map->reg_defaults);
129
130 return ret;
131 }
132
regcache_init(struct regmap * map,const struct regmap_config * config)133 int regcache_init(struct regmap *map, const struct regmap_config *config)
134 {
135 int ret;
136 int i;
137 void *tmp_buf;
138
139 if (map->cache_type == REGCACHE_NONE) {
140 if (config->reg_defaults || config->num_reg_defaults_raw)
141 dev_warn(map->dev,
142 "No cache used with register defaults set!\n");
143
144 map->cache_bypass = true;
145 return 0;
146 }
147
148 if (config->reg_defaults && !config->num_reg_defaults) {
149 dev_err(map->dev,
150 "Register defaults are set without the number!\n");
151 return -EINVAL;
152 }
153
154 if (config->num_reg_defaults && !config->reg_defaults) {
155 dev_err(map->dev,
156 "Register defaults number are set without the reg!\n");
157 return -EINVAL;
158 }
159
160 for (i = 0; i < config->num_reg_defaults; i++)
161 if (config->reg_defaults[i].reg % map->reg_stride)
162 return -EINVAL;
163
164 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
165 if (cache_types[i]->type == map->cache_type)
166 break;
167
168 if (i == ARRAY_SIZE(cache_types)) {
169 dev_err(map->dev, "Could not match cache type: %d\n",
170 map->cache_type);
171 return -EINVAL;
172 }
173
174 map->num_reg_defaults = config->num_reg_defaults;
175 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
176 map->reg_defaults_raw = config->reg_defaults_raw;
177 map->cache_word_size = BITS_TO_BYTES(config->val_bits);
178 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
179
180 map->cache = NULL;
181 map->cache_ops = cache_types[i];
182
183 if (!map->cache_ops->read ||
184 !map->cache_ops->write ||
185 !map->cache_ops->name)
186 return -EINVAL;
187
188 /* We still need to ensure that the reg_defaults
189 * won't vanish from under us. We'll need to make
190 * a copy of it.
191 */
192 if (config->reg_defaults) {
193 tmp_buf = kmemdup_array(config->reg_defaults, map->num_reg_defaults,
194 sizeof(*map->reg_defaults), GFP_KERNEL);
195 if (!tmp_buf)
196 return -ENOMEM;
197 map->reg_defaults = tmp_buf;
198 } else if (map->num_reg_defaults_raw) {
199 /* Some devices such as PMICs don't have cache defaults,
200 * we cope with this by reading back the HW registers and
201 * crafting the cache defaults by hand.
202 */
203 ret = regcache_hw_init(map);
204 if (ret < 0)
205 return ret;
206 if (map->cache_bypass)
207 return 0;
208 }
209
210 if (!map->max_register_is_set && map->num_reg_defaults_raw) {
211 map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
212 map->max_register_is_set = true;
213 }
214
215 if (map->cache_ops->init) {
216 dev_dbg(map->dev, "Initializing %s cache\n",
217 map->cache_ops->name);
218 map->lock(map->lock_arg);
219 ret = map->cache_ops->init(map);
220 map->unlock(map->lock_arg);
221 if (ret)
222 goto err_free;
223 }
224
225 if (map->cache_ops->populate &&
226 (map->num_reg_defaults || map->reg_default_cb)) {
227 dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
228 map->lock(map->lock_arg);
229 ret = map->cache_ops->populate(map);
230 map->unlock(map->lock_arg);
231 if (ret)
232 goto err_exit;
233 }
234 return 0;
235
236 err_exit:
237 if (map->cache_ops->exit) {
238 dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
239 map->lock(map->lock_arg);
240 ret = map->cache_ops->exit(map);
241 map->unlock(map->lock_arg);
242 }
243 err_free:
244 kfree(map->reg_defaults);
245 if (map->cache_free)
246 kfree(map->reg_defaults_raw);
247
248 return ret;
249 }
250
regcache_exit(struct regmap * map)251 void regcache_exit(struct regmap *map)
252 {
253 if (map->cache_type == REGCACHE_NONE)
254 return;
255
256 BUG_ON(!map->cache_ops);
257
258 kfree(map->reg_defaults);
259 if (map->cache_free)
260 kfree(map->reg_defaults_raw);
261
262 if (map->cache_ops->exit) {
263 dev_dbg(map->dev, "Destroying %s cache\n",
264 map->cache_ops->name);
265 map->lock(map->lock_arg);
266 map->cache_ops->exit(map);
267 map->unlock(map->lock_arg);
268 }
269 }
270
271 /**
272 * regcache_read - Fetch the value of a given register from the cache.
273 *
274 * @map: map to configure.
275 * @reg: The register index.
276 * @value: The value to be returned.
277 *
278 * Return a negative value on failure, 0 on success.
279 */
regcache_read(struct regmap * map,unsigned int reg,unsigned int * value)280 int regcache_read(struct regmap *map,
281 unsigned int reg, unsigned int *value)
282 {
283 int ret;
284
285 if (map->cache_type == REGCACHE_NONE)
286 return -EINVAL;
287
288 BUG_ON(!map->cache_ops);
289
290 if (!regmap_volatile(map, reg)) {
291 ret = map->cache_ops->read(map, reg, value);
292
293 if (ret == 0)
294 trace_regmap_reg_read_cache(map, reg, *value);
295
296 return ret;
297 }
298
299 return -EINVAL;
300 }
301
302 /**
303 * regcache_write - Set the value of a given register in the cache.
304 *
305 * @map: map to configure.
306 * @reg: The register index.
307 * @value: The new register value.
308 *
309 * Return a negative value on failure, 0 on success.
310 */
regcache_write(struct regmap * map,unsigned int reg,unsigned int value)311 int regcache_write(struct regmap *map,
312 unsigned int reg, unsigned int value)
313 {
314 if (map->cache_type == REGCACHE_NONE)
315 return 0;
316
317 BUG_ON(!map->cache_ops);
318
319 if (!regmap_volatile(map, reg))
320 return map->cache_ops->write(map, reg, value);
321
322 return 0;
323 }
324
regcache_reg_needs_sync(struct regmap * map,unsigned int reg,unsigned int val)325 bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
326 unsigned int val)
327 {
328 int ret;
329
330 if (!regmap_writeable(map, reg))
331 return false;
332
333 /* If we don't know the chip just got reset, then sync everything. */
334 if (!map->no_sync_defaults)
335 return true;
336
337 /* Is this the hardware default? If so skip. */
338 ret = regcache_lookup_reg(map, reg);
339 if (ret >= 0 && val == map->reg_defaults[ret].def)
340 return false;
341 return true;
342 }
343
regcache_default_sync(struct regmap * map,unsigned int min,unsigned int max)344 static int regcache_default_sync(struct regmap *map, unsigned int min,
345 unsigned int max)
346 {
347 unsigned int reg;
348
349 for (reg = min; reg <= max; reg += map->reg_stride) {
350 unsigned int val;
351 int ret;
352
353 if (regmap_volatile(map, reg) ||
354 !regmap_writeable(map, reg))
355 continue;
356
357 ret = regcache_read(map, reg, &val);
358 if (ret == -ENOENT)
359 continue;
360 if (ret)
361 return ret;
362
363 if (!regcache_reg_needs_sync(map, reg, val))
364 continue;
365
366 map->cache_bypass = true;
367 ret = _regmap_write(map, reg, val);
368 map->cache_bypass = false;
369 if (ret) {
370 dev_err(map->dev, "Unable to sync register %#x. %d\n",
371 reg, ret);
372 return ret;
373 }
374 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
375 }
376
377 return 0;
378 }
379
rbtree_all(const void * key,const struct rb_node * node)380 static int rbtree_all(const void *key, const struct rb_node *node)
381 {
382 return 0;
383 }
384
385 /**
386 * regcache_sync - Sync the register cache with the hardware.
387 *
388 * @map: map to configure.
389 *
390 * Any registers that should not be synced should be marked as
391 * volatile. In general drivers can choose not to use the provided
392 * syncing functionality if they so require.
393 *
394 * Return a negative value on failure, 0 on success.
395 */
regcache_sync(struct regmap * map)396 int regcache_sync(struct regmap *map)
397 {
398 int ret = 0;
399 unsigned int i;
400 const char *name;
401 bool bypass;
402 struct rb_node *node;
403
404 if (WARN_ON(map->cache_type == REGCACHE_NONE))
405 return -EINVAL;
406
407 BUG_ON(!map->cache_ops);
408
409 map->lock(map->lock_arg);
410 /* Remember the initial bypass state */
411 bypass = map->cache_bypass;
412 dev_dbg(map->dev, "Syncing %s cache\n",
413 map->cache_ops->name);
414 name = map->cache_ops->name;
415 trace_regcache_sync(map, name, "start");
416
417 if (!map->cache_dirty)
418 goto out;
419
420 /* Apply any patch first */
421 map->cache_bypass = true;
422 for (i = 0; i < map->patch_regs; i++) {
423 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
424 if (ret != 0) {
425 dev_err(map->dev, "Failed to write %x = %x: %d\n",
426 map->patch[i].reg, map->patch[i].def, ret);
427 goto out;
428 }
429 }
430 map->cache_bypass = false;
431
432 if (map->cache_ops->sync)
433 ret = map->cache_ops->sync(map, 0, map->max_register);
434 else
435 ret = regcache_default_sync(map, 0, map->max_register);
436
437 if (ret == 0)
438 map->cache_dirty = false;
439
440 out:
441 /* Restore the bypass state */
442 map->cache_bypass = bypass;
443 map->no_sync_defaults = false;
444
445 /*
446 * If we did any paging with cache bypassed and a cached
447 * paging register then the register and cache state might
448 * have gone out of sync, force writes of all the paging
449 * registers.
450 */
451 rb_for_each(node, NULL, &map->range_tree, rbtree_all) {
452 struct regmap_range_node *this =
453 rb_entry(node, struct regmap_range_node, node);
454
455 /* If there's nothing in the cache there's nothing to sync */
456 if (regcache_read(map, this->selector_reg, &i) != 0)
457 continue;
458
459 ret = _regmap_write(map, this->selector_reg, i);
460 if (ret != 0) {
461 dev_err(map->dev, "Failed to write %x = %x: %d\n",
462 this->selector_reg, i, ret);
463 break;
464 }
465 }
466
467 map->unlock(map->lock_arg);
468
469 regmap_async_complete(map);
470
471 trace_regcache_sync(map, name, "stop");
472
473 return ret;
474 }
475 EXPORT_SYMBOL_GPL(regcache_sync);
476
477 /**
478 * regcache_sync_region - Sync part of the register cache with the hardware.
479 *
480 * @map: map to sync.
481 * @min: first register to sync
482 * @max: last register to sync
483 *
484 * Write all non-default register values in the specified region to
485 * the hardware.
486 *
487 * Return a negative value on failure, 0 on success.
488 */
regcache_sync_region(struct regmap * map,unsigned int min,unsigned int max)489 int regcache_sync_region(struct regmap *map, unsigned int min,
490 unsigned int max)
491 {
492 int ret = 0;
493 const char *name;
494 bool bypass;
495
496 if (WARN_ON(map->cache_type == REGCACHE_NONE))
497 return -EINVAL;
498
499 BUG_ON(!map->cache_ops);
500
501 map->lock(map->lock_arg);
502
503 /* Remember the initial bypass state */
504 bypass = map->cache_bypass;
505
506 name = map->cache_ops->name;
507 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
508
509 trace_regcache_sync(map, name, "start region");
510
511 if (!map->cache_dirty)
512 goto out;
513
514 map->async = true;
515
516 if (map->cache_ops->sync)
517 ret = map->cache_ops->sync(map, min, max);
518 else
519 ret = regcache_default_sync(map, min, max);
520
521 out:
522 /* Restore the bypass state */
523 map->cache_bypass = bypass;
524 map->async = false;
525 map->no_sync_defaults = false;
526 map->unlock(map->lock_arg);
527
528 regmap_async_complete(map);
529
530 trace_regcache_sync(map, name, "stop region");
531
532 return ret;
533 }
534 EXPORT_SYMBOL_GPL(regcache_sync_region);
535
536 /**
537 * regcache_drop_region - Discard part of the register cache
538 *
539 * @map: map to operate on
540 * @min: first register to discard
541 * @max: last register to discard
542 *
543 * Discard part of the register cache.
544 *
545 * Return a negative value on failure, 0 on success.
546 */
regcache_drop_region(struct regmap * map,unsigned int min,unsigned int max)547 int regcache_drop_region(struct regmap *map, unsigned int min,
548 unsigned int max)
549 {
550 int ret = 0;
551
552 if (!map->cache_ops || !map->cache_ops->drop)
553 return -EINVAL;
554
555 map->lock(map->lock_arg);
556
557 trace_regcache_drop_region(map, min, max);
558
559 ret = map->cache_ops->drop(map, min, max);
560
561 map->unlock(map->lock_arg);
562
563 return ret;
564 }
565 EXPORT_SYMBOL_GPL(regcache_drop_region);
566
567 /**
568 * regcache_cache_only - Put a register map into cache only mode
569 *
570 * @map: map to configure
571 * @enable: flag if changes should be written to the hardware
572 *
573 * When a register map is marked as cache only writes to the register
574 * map API will only update the register cache, they will not cause
575 * any hardware changes. This is useful for allowing portions of
576 * drivers to act as though the device were functioning as normal when
577 * it is disabled for power saving reasons.
578 */
regcache_cache_only(struct regmap * map,bool enable)579 void regcache_cache_only(struct regmap *map, bool enable)
580 {
581 map->lock(map->lock_arg);
582 WARN_ON(map->cache_type != REGCACHE_NONE &&
583 map->cache_bypass && enable);
584 map->cache_only = enable;
585 trace_regmap_cache_only(map, enable);
586 map->unlock(map->lock_arg);
587 }
588 EXPORT_SYMBOL_GPL(regcache_cache_only);
589
590 /**
591 * regcache_mark_dirty - Indicate that HW registers were reset to default values
592 *
593 * @map: map to mark
594 *
595 * Inform regcache that the device has been powered down or reset, so that
596 * on resume, regcache_sync() knows to write out all non-default values
597 * stored in the cache.
598 *
599 * If this function is not called, regcache_sync() will assume that
600 * the hardware state still matches the cache state, modulo any writes that
601 * happened when cache_only was true.
602 */
regcache_mark_dirty(struct regmap * map)603 void regcache_mark_dirty(struct regmap *map)
604 {
605 map->lock(map->lock_arg);
606 map->cache_dirty = true;
607 map->no_sync_defaults = true;
608 map->unlock(map->lock_arg);
609 }
610 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
611
612 /**
613 * regcache_cache_bypass - Put a register map into cache bypass mode
614 *
615 * @map: map to configure
616 * @enable: flag if changes should not be written to the cache
617 *
618 * When a register map is marked with the cache bypass option, writes
619 * to the register map API will only update the hardware and not
620 * the cache directly. This is useful when syncing the cache back to
621 * the hardware.
622 */
regcache_cache_bypass(struct regmap * map,bool enable)623 void regcache_cache_bypass(struct regmap *map, bool enable)
624 {
625 map->lock(map->lock_arg);
626 WARN_ON(map->cache_only && enable);
627 map->cache_bypass = enable;
628 trace_regmap_cache_bypass(map, enable);
629 map->unlock(map->lock_arg);
630 }
631 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
632
633 /**
634 * regcache_reg_cached - Check if a register is cached
635 *
636 * @map: map to check
637 * @reg: register to check
638 *
639 * Reports if a register is cached.
640 */
regcache_reg_cached(struct regmap * map,unsigned int reg)641 bool regcache_reg_cached(struct regmap *map, unsigned int reg)
642 {
643 unsigned int val;
644 int ret;
645
646 map->lock(map->lock_arg);
647
648 ret = regcache_read(map, reg, &val);
649
650 map->unlock(map->lock_arg);
651
652 return ret == 0;
653 }
654 EXPORT_SYMBOL_GPL(regcache_reg_cached);
655
regcache_set_val(struct regmap * map,void * base,unsigned int idx,unsigned int val)656 void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
657 unsigned int val)
658 {
659 /* Use device native format if possible */
660 if (map->format.format_val) {
661 map->format.format_val(base + (map->cache_word_size * idx),
662 val, 0);
663 return;
664 }
665
666 switch (map->cache_word_size) {
667 case 1: {
668 u8 *cache = base;
669
670 cache[idx] = val;
671 break;
672 }
673 case 2: {
674 u16 *cache = base;
675
676 cache[idx] = val;
677 break;
678 }
679 case 4: {
680 u32 *cache = base;
681
682 cache[idx] = val;
683 break;
684 }
685 default:
686 BUG();
687 }
688 }
689
regcache_get_val(struct regmap * map,const void * base,unsigned int idx)690 unsigned int regcache_get_val(struct regmap *map, const void *base,
691 unsigned int idx)
692 {
693 if (!base)
694 return -EINVAL;
695
696 /* Use device native format if possible */
697 if (map->format.parse_val)
698 return map->format.parse_val(regcache_get_val_addr(map, base,
699 idx));
700
701 switch (map->cache_word_size) {
702 case 1: {
703 const u8 *cache = base;
704
705 return cache[idx];
706 }
707 case 2: {
708 const u16 *cache = base;
709
710 return cache[idx];
711 }
712 case 4: {
713 const u32 *cache = base;
714
715 return cache[idx];
716 }
717 default:
718 BUG();
719 }
720 /* unreachable */
721 return -1;
722 }
723
regcache_default_cmp(const void * a,const void * b)724 static int regcache_default_cmp(const void *a, const void *b)
725 {
726 const struct reg_default *_a = a;
727 const struct reg_default *_b = b;
728
729 return _a->reg - _b->reg;
730 }
731
regcache_lookup_reg(struct regmap * map,unsigned int reg)732 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
733 {
734 struct reg_default key;
735 struct reg_default *r;
736
737 key.reg = reg;
738 key.def = 0;
739
740 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
741 sizeof(struct reg_default), regcache_default_cmp);
742
743 if (r)
744 return r - map->reg_defaults;
745 else
746 return -ENOENT;
747 }
748
regcache_reg_present(unsigned long * cache_present,unsigned int idx)749 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
750 {
751 if (!cache_present)
752 return true;
753
754 return test_bit(idx, cache_present);
755 }
756
regcache_sync_val(struct regmap * map,unsigned int reg,unsigned int val)757 int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
758 {
759 int ret;
760
761 if (!regcache_reg_needs_sync(map, reg, val))
762 return 0;
763
764 map->cache_bypass = true;
765
766 ret = _regmap_write(map, reg, val);
767
768 map->cache_bypass = false;
769
770 if (ret != 0) {
771 dev_err(map->dev, "Unable to sync register %#x. %d\n",
772 reg, ret);
773 return ret;
774 }
775 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
776 reg, val);
777
778 return 0;
779 }
780
regcache_sync_block_single(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)781 static int regcache_sync_block_single(struct regmap *map, void *block,
782 unsigned long *cache_present,
783 unsigned int block_base,
784 unsigned int start, unsigned int end)
785 {
786 unsigned int i, regtmp, val;
787 int ret;
788
789 for (i = start; i < end; i++) {
790 regtmp = block_base + (i * map->reg_stride);
791
792 if (!regcache_reg_present(cache_present, i) ||
793 !regmap_writeable(map, regtmp))
794 continue;
795
796 val = regcache_get_val(map, block, i);
797 ret = regcache_sync_val(map, regtmp, val);
798 if (ret != 0)
799 return ret;
800 }
801
802 return 0;
803 }
804
regcache_sync_block_raw_flush(struct regmap * map,const void ** data,unsigned int base,unsigned int cur)805 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
806 unsigned int base, unsigned int cur)
807 {
808 size_t val_bytes = map->format.val_bytes;
809 int ret, count;
810
811 if (*data == NULL)
812 return 0;
813
814 count = (cur - base) / map->reg_stride;
815
816 dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
817 count * val_bytes, count, base, cur - map->reg_stride);
818
819 map->cache_bypass = true;
820
821 ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
822 if (ret)
823 dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
824 base, cur - map->reg_stride, ret);
825
826 map->cache_bypass = false;
827
828 *data = NULL;
829
830 return ret;
831 }
832
regcache_sync_block_raw(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)833 static int regcache_sync_block_raw(struct regmap *map, void *block,
834 unsigned long *cache_present,
835 unsigned int block_base, unsigned int start,
836 unsigned int end)
837 {
838 unsigned int i, val;
839 unsigned int regtmp = 0;
840 unsigned int base = 0;
841 const void *data = NULL;
842 int ret;
843
844 for (i = start; i < end; i++) {
845 regtmp = block_base + (i * map->reg_stride);
846
847 if (!regcache_reg_present(cache_present, i) ||
848 !regmap_writeable(map, regtmp)) {
849 ret = regcache_sync_block_raw_flush(map, &data,
850 base, regtmp);
851 if (ret != 0)
852 return ret;
853 continue;
854 }
855
856 val = regcache_get_val(map, block, i);
857 if (!regcache_reg_needs_sync(map, regtmp, val)) {
858 ret = regcache_sync_block_raw_flush(map, &data,
859 base, regtmp);
860 if (ret != 0)
861 return ret;
862 continue;
863 }
864
865 if (!data) {
866 data = regcache_get_val_addr(map, block, i);
867 base = regtmp;
868 }
869 }
870
871 return regcache_sync_block_raw_flush(map, &data, base, regtmp +
872 map->reg_stride);
873 }
874
regcache_sync_block(struct regmap * map,void * block,unsigned long * cache_present,unsigned int block_base,unsigned int start,unsigned int end)875 int regcache_sync_block(struct regmap *map, void *block,
876 unsigned long *cache_present,
877 unsigned int block_base, unsigned int start,
878 unsigned int end)
879 {
880 if (regmap_can_raw_write(map) && !map->use_single_write)
881 return regcache_sync_block_raw(map, block, cache_present,
882 block_base, start, end);
883 else
884 return regcache_sync_block_single(map, block, cache_present,
885 block_base, start, end);
886 }
887