1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register map access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/export.h>
12 #include <linux/mutex.h>
13 #include <linux/err.h>
14 #include <linux/property.h>
15 #include <linux/rbtree.h>
16 #include <linux/sched.h>
17 #include <linux/delay.h>
18 #include <linux/log2.h>
19 #include <linux/hwspinlock.h>
20 #include <linux/unaligned.h>
21
22 #define CREATE_TRACE_POINTS
23 #include "trace.h"
24
25 #include "internal.h"
26
27 /*
28 * Sometimes for failures during very early init the trace
29 * infrastructure isn't available early enough to be used. For this
30 * sort of problem defining LOG_DEVICE will add printks for basic
31 * register I/O on a specific device.
32 */
33 #undef LOG_DEVICE
34
35 #ifdef LOG_DEVICE
regmap_should_log(struct regmap * map)36 static inline bool regmap_should_log(struct regmap *map)
37 {
38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
39 }
40 #else
regmap_should_log(struct regmap * map)41 static inline bool regmap_should_log(struct regmap *map) { return false; }
42 #endif
43
44
45 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
46 unsigned int mask, unsigned int val,
47 bool *change, bool force_write);
48
49 static int _regmap_bus_reg_read(void *context, unsigned int reg,
50 unsigned int *val);
51 static int _regmap_bus_read(void *context, unsigned int reg,
52 unsigned int *val);
53 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
54 unsigned int val);
55 static int _regmap_bus_reg_write(void *context, unsigned int reg,
56 unsigned int val);
57 static int _regmap_bus_raw_write(void *context, unsigned int reg,
58 unsigned int val);
59
regmap_reg_in_ranges(unsigned int reg,const struct regmap_range * ranges,unsigned int nranges)60 bool regmap_reg_in_ranges(unsigned int reg,
61 const struct regmap_range *ranges,
62 unsigned int nranges)
63 {
64 const struct regmap_range *r;
65 int i;
66
67 for (i = 0, r = ranges; i < nranges; i++, r++)
68 if (regmap_reg_in_range(reg, r))
69 return true;
70 return false;
71 }
72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
73
regmap_check_range_table(struct regmap * map,unsigned int reg,const struct regmap_access_table * table)74 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
75 const struct regmap_access_table *table)
76 {
77 /* Check "no ranges" first */
78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
79 return false;
80
81 /* In case zero "yes ranges" are supplied, any reg is OK */
82 if (!table->n_yes_ranges)
83 return true;
84
85 return regmap_reg_in_ranges(reg, table->yes_ranges,
86 table->n_yes_ranges);
87 }
88 EXPORT_SYMBOL_GPL(regmap_check_range_table);
89
regmap_writeable(struct regmap * map,unsigned int reg)90 bool regmap_writeable(struct regmap *map, unsigned int reg)
91 {
92 if (map->max_register_is_set && reg > map->max_register)
93 return false;
94
95 if (map->writeable_reg)
96 return map->writeable_reg(map->dev, reg);
97
98 if (map->wr_table)
99 return regmap_check_range_table(map, reg, map->wr_table);
100
101 return true;
102 }
103
regmap_cached(struct regmap * map,unsigned int reg)104 bool regmap_cached(struct regmap *map, unsigned int reg)
105 {
106 int ret;
107 unsigned int val;
108
109 if (map->cache_type == REGCACHE_NONE)
110 return false;
111
112 if (!map->cache_ops)
113 return false;
114
115 if (map->max_register_is_set && reg > map->max_register)
116 return false;
117
118 map->lock(map->lock_arg);
119 ret = regcache_read(map, reg, &val);
120 map->unlock(map->lock_arg);
121 if (ret)
122 return false;
123
124 return true;
125 }
126
regmap_readable(struct regmap * map,unsigned int reg)127 bool regmap_readable(struct regmap *map, unsigned int reg)
128 {
129 if (!map->reg_read)
130 return false;
131
132 if (map->max_register_is_set && reg > map->max_register)
133 return false;
134
135 if (map->format.format_write)
136 return false;
137
138 if (map->readable_reg)
139 return map->readable_reg(map->dev, reg);
140
141 if (map->rd_table)
142 return regmap_check_range_table(map, reg, map->rd_table);
143
144 return true;
145 }
146
regmap_volatile(struct regmap * map,unsigned int reg)147 bool regmap_volatile(struct regmap *map, unsigned int reg)
148 {
149 if (!map->format.format_write && !regmap_readable(map, reg))
150 return false;
151
152 if (map->volatile_reg)
153 return map->volatile_reg(map->dev, reg);
154
155 if (map->volatile_table)
156 return regmap_check_range_table(map, reg, map->volatile_table);
157
158 if (map->cache_ops)
159 return false;
160 else
161 return true;
162 }
163
regmap_precious(struct regmap * map,unsigned int reg)164 bool regmap_precious(struct regmap *map, unsigned int reg)
165 {
166 if (!regmap_readable(map, reg))
167 return false;
168
169 if (map->precious_reg)
170 return map->precious_reg(map->dev, reg);
171
172 if (map->precious_table)
173 return regmap_check_range_table(map, reg, map->precious_table);
174
175 return false;
176 }
177
regmap_writeable_noinc(struct regmap * map,unsigned int reg)178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
179 {
180 if (map->writeable_noinc_reg)
181 return map->writeable_noinc_reg(map->dev, reg);
182
183 if (map->wr_noinc_table)
184 return regmap_check_range_table(map, reg, map->wr_noinc_table);
185
186 return true;
187 }
188
regmap_readable_noinc(struct regmap * map,unsigned int reg)189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
190 {
191 if (map->readable_noinc_reg)
192 return map->readable_noinc_reg(map->dev, reg);
193
194 if (map->rd_noinc_table)
195 return regmap_check_range_table(map, reg, map->rd_noinc_table);
196
197 return true;
198 }
199
regmap_volatile_range(struct regmap * map,unsigned int reg,size_t num)200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
201 size_t num)
202 {
203 unsigned int i;
204
205 for (i = 0; i < num; i++)
206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
207 return false;
208
209 return true;
210 }
211
regmap_format_12_20_write(struct regmap * map,unsigned int reg,unsigned int val)212 static void regmap_format_12_20_write(struct regmap *map,
213 unsigned int reg, unsigned int val)
214 {
215 u8 *out = map->work_buf;
216
217 out[0] = reg >> 4;
218 out[1] = (reg << 4) | (val >> 16);
219 out[2] = val >> 8;
220 out[3] = val;
221 }
222
223
regmap_format_2_6_write(struct regmap * map,unsigned int reg,unsigned int val)224 static void regmap_format_2_6_write(struct regmap *map,
225 unsigned int reg, unsigned int val)
226 {
227 u8 *out = map->work_buf;
228
229 *out = (reg << 6) | val;
230 }
231
regmap_format_4_12_write(struct regmap * map,unsigned int reg,unsigned int val)232 static void regmap_format_4_12_write(struct regmap *map,
233 unsigned int reg, unsigned int val)
234 {
235 __be16 *out = map->work_buf;
236 *out = cpu_to_be16((reg << 12) | val);
237 }
238
regmap_format_7_9_write(struct regmap * map,unsigned int reg,unsigned int val)239 static void regmap_format_7_9_write(struct regmap *map,
240 unsigned int reg, unsigned int val)
241 {
242 __be16 *out = map->work_buf;
243 *out = cpu_to_be16((reg << 9) | val);
244 }
245
regmap_format_7_17_write(struct regmap * map,unsigned int reg,unsigned int val)246 static void regmap_format_7_17_write(struct regmap *map,
247 unsigned int reg, unsigned int val)
248 {
249 u8 *out = map->work_buf;
250
251 out[2] = val;
252 out[1] = val >> 8;
253 out[0] = (val >> 16) | (reg << 1);
254 }
255
regmap_format_10_14_write(struct regmap * map,unsigned int reg,unsigned int val)256 static void regmap_format_10_14_write(struct regmap *map,
257 unsigned int reg, unsigned int val)
258 {
259 u8 *out = map->work_buf;
260
261 out[2] = val;
262 out[1] = (val >> 8) | (reg << 6);
263 out[0] = reg >> 2;
264 }
265
regmap_format_8(void * buf,unsigned int val,unsigned int shift)266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
267 {
268 u8 *b = buf;
269
270 b[0] = val << shift;
271 }
272
regmap_format_16_be(void * buf,unsigned int val,unsigned int shift)273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
274 {
275 put_unaligned_be16(val << shift, buf);
276 }
277
regmap_format_16_le(void * buf,unsigned int val,unsigned int shift)278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
279 {
280 put_unaligned_le16(val << shift, buf);
281 }
282
regmap_format_16_native(void * buf,unsigned int val,unsigned int shift)283 static void regmap_format_16_native(void *buf, unsigned int val,
284 unsigned int shift)
285 {
286 u16 v = val << shift;
287
288 memcpy(buf, &v, sizeof(v));
289 }
290
regmap_format_24_be(void * buf,unsigned int val,unsigned int shift)291 static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
292 {
293 put_unaligned_be24(val << shift, buf);
294 }
295
regmap_format_32_be(void * buf,unsigned int val,unsigned int shift)296 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
297 {
298 put_unaligned_be32(val << shift, buf);
299 }
300
regmap_format_32_le(void * buf,unsigned int val,unsigned int shift)301 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
302 {
303 put_unaligned_le32(val << shift, buf);
304 }
305
regmap_format_32_native(void * buf,unsigned int val,unsigned int shift)306 static void regmap_format_32_native(void *buf, unsigned int val,
307 unsigned int shift)
308 {
309 u32 v = val << shift;
310
311 memcpy(buf, &v, sizeof(v));
312 }
313
regmap_parse_inplace_noop(void * buf)314 static void regmap_parse_inplace_noop(void *buf)
315 {
316 }
317
regmap_parse_8(const void * buf)318 static unsigned int regmap_parse_8(const void *buf)
319 {
320 const u8 *b = buf;
321
322 return b[0];
323 }
324
regmap_parse_16_be(const void * buf)325 static unsigned int regmap_parse_16_be(const void *buf)
326 {
327 return get_unaligned_be16(buf);
328 }
329
regmap_parse_16_le(const void * buf)330 static unsigned int regmap_parse_16_le(const void *buf)
331 {
332 return get_unaligned_le16(buf);
333 }
334
regmap_parse_16_be_inplace(void * buf)335 static void regmap_parse_16_be_inplace(void *buf)
336 {
337 u16 v = get_unaligned_be16(buf);
338
339 memcpy(buf, &v, sizeof(v));
340 }
341
regmap_parse_16_le_inplace(void * buf)342 static void regmap_parse_16_le_inplace(void *buf)
343 {
344 u16 v = get_unaligned_le16(buf);
345
346 memcpy(buf, &v, sizeof(v));
347 }
348
regmap_parse_16_native(const void * buf)349 static unsigned int regmap_parse_16_native(const void *buf)
350 {
351 u16 v;
352
353 memcpy(&v, buf, sizeof(v));
354 return v;
355 }
356
regmap_parse_24_be(const void * buf)357 static unsigned int regmap_parse_24_be(const void *buf)
358 {
359 return get_unaligned_be24(buf);
360 }
361
regmap_parse_32_be(const void * buf)362 static unsigned int regmap_parse_32_be(const void *buf)
363 {
364 return get_unaligned_be32(buf);
365 }
366
regmap_parse_32_le(const void * buf)367 static unsigned int regmap_parse_32_le(const void *buf)
368 {
369 return get_unaligned_le32(buf);
370 }
371
regmap_parse_32_be_inplace(void * buf)372 static void regmap_parse_32_be_inplace(void *buf)
373 {
374 u32 v = get_unaligned_be32(buf);
375
376 memcpy(buf, &v, sizeof(v));
377 }
378
regmap_parse_32_le_inplace(void * buf)379 static void regmap_parse_32_le_inplace(void *buf)
380 {
381 u32 v = get_unaligned_le32(buf);
382
383 memcpy(buf, &v, sizeof(v));
384 }
385
regmap_parse_32_native(const void * buf)386 static unsigned int regmap_parse_32_native(const void *buf)
387 {
388 u32 v;
389
390 memcpy(&v, buf, sizeof(v));
391 return v;
392 }
393
regmap_lock_hwlock(void * __map)394 static void regmap_lock_hwlock(void *__map)
395 {
396 struct regmap *map = __map;
397
398 hwspin_lock_timeout(map->hwlock, UINT_MAX);
399 }
400
regmap_lock_hwlock_irq(void * __map)401 static void regmap_lock_hwlock_irq(void *__map)
402 {
403 struct regmap *map = __map;
404
405 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
406 }
407
regmap_lock_hwlock_irqsave(void * __map)408 static void regmap_lock_hwlock_irqsave(void *__map)
409 {
410 struct regmap *map = __map;
411 unsigned long flags = 0;
412
413 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
414 &flags);
415 map->spinlock_flags = flags;
416 }
417
regmap_unlock_hwlock(void * __map)418 static void regmap_unlock_hwlock(void *__map)
419 {
420 struct regmap *map = __map;
421
422 hwspin_unlock(map->hwlock);
423 }
424
regmap_unlock_hwlock_irq(void * __map)425 static void regmap_unlock_hwlock_irq(void *__map)
426 {
427 struct regmap *map = __map;
428
429 hwspin_unlock_irq(map->hwlock);
430 }
431
regmap_unlock_hwlock_irqrestore(void * __map)432 static void regmap_unlock_hwlock_irqrestore(void *__map)
433 {
434 struct regmap *map = __map;
435
436 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
437 }
438
regmap_lock_unlock_none(void * __map)439 static void regmap_lock_unlock_none(void *__map)
440 {
441
442 }
443
regmap_lock_mutex(void * __map)444 static void regmap_lock_mutex(void *__map)
445 {
446 struct regmap *map = __map;
447 mutex_lock(&map->mutex);
448 }
449
regmap_unlock_mutex(void * __map)450 static void regmap_unlock_mutex(void *__map)
451 {
452 struct regmap *map = __map;
453 mutex_unlock(&map->mutex);
454 }
455
regmap_lock_spinlock(void * __map)456 static void regmap_lock_spinlock(void *__map)
457 __acquires(&map->spinlock)
458 {
459 struct regmap *map = __map;
460 unsigned long flags;
461
462 spin_lock_irqsave(&map->spinlock, flags);
463 map->spinlock_flags = flags;
464 }
465
regmap_unlock_spinlock(void * __map)466 static void regmap_unlock_spinlock(void *__map)
467 __releases(&map->spinlock)
468 {
469 struct regmap *map = __map;
470 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
471 }
472
regmap_lock_raw_spinlock(void * __map)473 static void regmap_lock_raw_spinlock(void *__map)
474 __acquires(&map->raw_spinlock)
475 {
476 struct regmap *map = __map;
477 unsigned long flags;
478
479 raw_spin_lock_irqsave(&map->raw_spinlock, flags);
480 map->raw_spinlock_flags = flags;
481 }
482
regmap_unlock_raw_spinlock(void * __map)483 static void regmap_unlock_raw_spinlock(void *__map)
484 __releases(&map->raw_spinlock)
485 {
486 struct regmap *map = __map;
487 raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
488 }
489
dev_get_regmap_release(struct device * dev,void * res)490 static void dev_get_regmap_release(struct device *dev, void *res)
491 {
492 /*
493 * We don't actually have anything to do here; the goal here
494 * is not to manage the regmap but to provide a simple way to
495 * get the regmap back given a struct device.
496 */
497 }
498
_regmap_range_add(struct regmap * map,struct regmap_range_node * data)499 static bool _regmap_range_add(struct regmap *map,
500 struct regmap_range_node *data)
501 {
502 struct rb_root *root = &map->range_tree;
503 struct rb_node **new = &(root->rb_node), *parent = NULL;
504
505 while (*new) {
506 struct regmap_range_node *this =
507 rb_entry(*new, struct regmap_range_node, node);
508
509 parent = *new;
510 if (data->range_max < this->range_min)
511 new = &((*new)->rb_left);
512 else if (data->range_min > this->range_max)
513 new = &((*new)->rb_right);
514 else
515 return false;
516 }
517
518 rb_link_node(&data->node, parent, new);
519 rb_insert_color(&data->node, root);
520
521 return true;
522 }
523
_regmap_range_lookup(struct regmap * map,unsigned int reg)524 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
525 unsigned int reg)
526 {
527 struct rb_node *node = map->range_tree.rb_node;
528
529 while (node) {
530 struct regmap_range_node *this =
531 rb_entry(node, struct regmap_range_node, node);
532
533 if (reg < this->range_min)
534 node = node->rb_left;
535 else if (reg > this->range_max)
536 node = node->rb_right;
537 else
538 return this;
539 }
540
541 return NULL;
542 }
543
regmap_range_exit(struct regmap * map)544 static void regmap_range_exit(struct regmap *map)
545 {
546 struct rb_node *next;
547 struct regmap_range_node *range_node;
548
549 next = rb_first(&map->range_tree);
550 while (next) {
551 range_node = rb_entry(next, struct regmap_range_node, node);
552 next = rb_next(&range_node->node);
553 rb_erase(&range_node->node, &map->range_tree);
554 kfree(range_node);
555 }
556
557 kfree(map->selector_work_buf);
558 }
559
regmap_set_name(struct regmap * map,const struct regmap_config * config)560 static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
561 {
562 if (config->name) {
563 const char *name = kstrdup_const(config->name, GFP_KERNEL);
564
565 if (!name)
566 return -ENOMEM;
567
568 kfree_const(map->name);
569 map->name = name;
570 }
571
572 return 0;
573 }
574
regmap_attach_dev(struct device * dev,struct regmap * map,const struct regmap_config * config)575 int regmap_attach_dev(struct device *dev, struct regmap *map,
576 const struct regmap_config *config)
577 {
578 struct regmap **m;
579 int ret;
580
581 map->dev = dev;
582
583 ret = regmap_set_name(map, config);
584 if (ret)
585 return ret;
586
587 regmap_debugfs_exit(map);
588 regmap_debugfs_init(map);
589
590 /* Add a devres resource for dev_get_regmap() */
591 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
592 if (!m) {
593 regmap_debugfs_exit(map);
594 return -ENOMEM;
595 }
596 *m = map;
597 devres_add(dev, m);
598
599 return 0;
600 }
601 EXPORT_SYMBOL_GPL(regmap_attach_dev);
602
603 static int dev_get_regmap_match(struct device *dev, void *res, void *data);
604
regmap_detach_dev(struct device * dev,struct regmap * map)605 static int regmap_detach_dev(struct device *dev, struct regmap *map)
606 {
607 if (!dev)
608 return 0;
609
610 return devres_release(dev, dev_get_regmap_release,
611 dev_get_regmap_match, (void *)map->name);
612 }
613
regmap_get_reg_endian(const struct regmap_bus * bus,const struct regmap_config * config)614 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
615 const struct regmap_config *config)
616 {
617 enum regmap_endian endian;
618
619 /* Retrieve the endianness specification from the regmap config */
620 endian = config->reg_format_endian;
621
622 /* If the regmap config specified a non-default value, use that */
623 if (endian != REGMAP_ENDIAN_DEFAULT)
624 return endian;
625
626 /* Retrieve the endianness specification from the bus config */
627 if (bus && bus->reg_format_endian_default)
628 endian = bus->reg_format_endian_default;
629
630 /* If the bus specified a non-default value, use that */
631 if (endian != REGMAP_ENDIAN_DEFAULT)
632 return endian;
633
634 /* Use this if no other value was found */
635 return REGMAP_ENDIAN_BIG;
636 }
637
regmap_get_val_endian(struct device * dev,const struct regmap_bus * bus,const struct regmap_config * config)638 enum regmap_endian regmap_get_val_endian(struct device *dev,
639 const struct regmap_bus *bus,
640 const struct regmap_config *config)
641 {
642 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
643 enum regmap_endian endian;
644
645 /* Retrieve the endianness specification from the regmap config */
646 endian = config->val_format_endian;
647
648 /* If the regmap config specified a non-default value, use that */
649 if (endian != REGMAP_ENDIAN_DEFAULT)
650 return endian;
651
652 /* If the firmware node exist try to get endianness from it */
653 if (fwnode_property_read_bool(fwnode, "big-endian"))
654 endian = REGMAP_ENDIAN_BIG;
655 else if (fwnode_property_read_bool(fwnode, "little-endian"))
656 endian = REGMAP_ENDIAN_LITTLE;
657 else if (fwnode_property_read_bool(fwnode, "native-endian"))
658 endian = REGMAP_ENDIAN_NATIVE;
659
660 /* If the endianness was specified in fwnode, use that */
661 if (endian != REGMAP_ENDIAN_DEFAULT)
662 return endian;
663
664 /* Retrieve the endianness specification from the bus config */
665 if (bus && bus->val_format_endian_default)
666 endian = bus->val_format_endian_default;
667
668 /* If the bus specified a non-default value, use that */
669 if (endian != REGMAP_ENDIAN_DEFAULT)
670 return endian;
671
672 /* Use this if no other value was found */
673 return REGMAP_ENDIAN_BIG;
674 }
675 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
676
__regmap_init(struct device * dev,const struct regmap_bus * bus,void * bus_context,const struct regmap_config * config,struct lock_class_key * lock_key,const char * lock_name)677 struct regmap *__regmap_init(struct device *dev,
678 const struct regmap_bus *bus,
679 void *bus_context,
680 const struct regmap_config *config,
681 struct lock_class_key *lock_key,
682 const char *lock_name)
683 {
684 struct regmap *map;
685 int ret = -EINVAL;
686 enum regmap_endian reg_endian, val_endian;
687 int i, j;
688
689 if (!config)
690 goto err;
691
692 map = kzalloc_obj(*map);
693 if (map == NULL) {
694 ret = -ENOMEM;
695 goto err;
696 }
697
698 ret = regmap_set_name(map, config);
699 if (ret)
700 goto err_map;
701
702 ret = -EINVAL; /* Later error paths rely on this */
703
704 if (config->disable_locking) {
705 map->lock = map->unlock = regmap_lock_unlock_none;
706 map->can_sleep = config->can_sleep;
707 regmap_debugfs_disable(map);
708 } else if (config->lock && config->unlock) {
709 map->lock = config->lock;
710 map->unlock = config->unlock;
711 map->lock_arg = config->lock_arg;
712 map->can_sleep = config->can_sleep;
713 } else if (config->use_hwlock) {
714 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
715 if (!map->hwlock) {
716 ret = -ENXIO;
717 goto err_name;
718 }
719
720 switch (config->hwlock_mode) {
721 case HWLOCK_IRQSTATE:
722 map->lock = regmap_lock_hwlock_irqsave;
723 map->unlock = regmap_unlock_hwlock_irqrestore;
724 break;
725 case HWLOCK_IRQ:
726 map->lock = regmap_lock_hwlock_irq;
727 map->unlock = regmap_unlock_hwlock_irq;
728 break;
729 default:
730 map->lock = regmap_lock_hwlock;
731 map->unlock = regmap_unlock_hwlock;
732 break;
733 }
734
735 map->lock_arg = map;
736 } else {
737 if ((bus && bus->fast_io) ||
738 config->fast_io) {
739 if (config->use_raw_spinlock) {
740 raw_spin_lock_init(&map->raw_spinlock);
741 map->lock = regmap_lock_raw_spinlock;
742 map->unlock = regmap_unlock_raw_spinlock;
743 lockdep_set_class_and_name(&map->raw_spinlock,
744 lock_key, lock_name);
745 } else {
746 spin_lock_init(&map->spinlock);
747 map->lock = regmap_lock_spinlock;
748 map->unlock = regmap_unlock_spinlock;
749 lockdep_set_class_and_name(&map->spinlock,
750 lock_key, lock_name);
751 }
752 } else {
753 mutex_init(&map->mutex);
754 map->lock = regmap_lock_mutex;
755 map->unlock = regmap_unlock_mutex;
756 map->can_sleep = true;
757 lockdep_set_class_and_name(&map->mutex,
758 lock_key, lock_name);
759 }
760 map->lock_arg = map;
761 map->lock_key = lock_key;
762 }
763
764 /*
765 * When we write in fast-paths with regmap_bulk_write() don't allocate
766 * scratch buffers with sleeping allocations.
767 */
768 if ((bus && bus->fast_io) || config->fast_io)
769 map->alloc_flags = GFP_ATOMIC;
770 else
771 map->alloc_flags = GFP_KERNEL;
772
773 map->reg_base = config->reg_base;
774 map->reg_shift = config->pad_bits % 8;
775
776 map->format.pad_bytes = config->pad_bits / 8;
777 map->format.reg_shift = config->reg_shift;
778 map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
779 map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
780 map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
781 if (config->reg_stride)
782 map->reg_stride = config->reg_stride;
783 else
784 map->reg_stride = 1;
785 if (is_power_of_2(map->reg_stride))
786 map->reg_stride_order = ilog2(map->reg_stride);
787 else
788 map->reg_stride_order = -1;
789 map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
790 map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
791 map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
792 if (bus) {
793 map->max_raw_read = bus->max_raw_read;
794 map->max_raw_write = bus->max_raw_write;
795 } else if (config->max_raw_read && config->max_raw_write) {
796 map->max_raw_read = config->max_raw_read;
797 map->max_raw_write = config->max_raw_write;
798 }
799 map->dev = dev;
800 map->bus = bus;
801 map->bus_context = bus_context;
802 map->max_register = config->max_register;
803 map->max_register_is_set = map->max_register ?: config->max_register_is_0;
804 map->wr_table = config->wr_table;
805 map->rd_table = config->rd_table;
806 map->volatile_table = config->volatile_table;
807 map->precious_table = config->precious_table;
808 map->wr_noinc_table = config->wr_noinc_table;
809 map->rd_noinc_table = config->rd_noinc_table;
810 map->writeable_reg = config->writeable_reg;
811 map->readable_reg = config->readable_reg;
812 map->volatile_reg = config->volatile_reg;
813 map->precious_reg = config->precious_reg;
814 map->writeable_noinc_reg = config->writeable_noinc_reg;
815 map->readable_noinc_reg = config->readable_noinc_reg;
816 map->reg_default_cb = config->reg_default_cb;
817 map->cache_type = config->cache_type;
818
819 spin_lock_init(&map->async_lock);
820 INIT_LIST_HEAD(&map->async_list);
821 INIT_LIST_HEAD(&map->async_free);
822 init_waitqueue_head(&map->async_waitq);
823
824 if (config->read_flag_mask ||
825 config->write_flag_mask ||
826 config->zero_flag_mask) {
827 map->read_flag_mask = config->read_flag_mask;
828 map->write_flag_mask = config->write_flag_mask;
829 } else if (bus) {
830 map->read_flag_mask = bus->read_flag_mask;
831 }
832
833 if (config->read && config->write) {
834 map->reg_read = _regmap_bus_read;
835 if (config->reg_update_bits)
836 map->reg_update_bits = config->reg_update_bits;
837
838 /* Bulk read/write */
839 map->read = config->read;
840 map->write = config->write;
841
842 reg_endian = REGMAP_ENDIAN_NATIVE;
843 val_endian = REGMAP_ENDIAN_NATIVE;
844 } else if (!bus) {
845 map->reg_read = config->reg_read;
846 map->reg_write = config->reg_write;
847 map->reg_update_bits = config->reg_update_bits;
848
849 map->defer_caching = false;
850 goto skip_format_initialization;
851 } else if (!bus->read || !bus->write) {
852 map->reg_read = _regmap_bus_reg_read;
853 map->reg_write = _regmap_bus_reg_write;
854 map->reg_update_bits = bus->reg_update_bits;
855
856 map->defer_caching = false;
857 goto skip_format_initialization;
858 } else {
859 map->reg_read = _regmap_bus_read;
860 map->reg_update_bits = bus->reg_update_bits;
861 /* Bulk read/write */
862 map->read = bus->read;
863 map->write = bus->write;
864
865 reg_endian = regmap_get_reg_endian(bus, config);
866 val_endian = regmap_get_val_endian(dev, bus, config);
867 }
868
869 switch (config->reg_bits + map->reg_shift) {
870 case 2:
871 switch (config->val_bits) {
872 case 6:
873 map->format.format_write = regmap_format_2_6_write;
874 break;
875 default:
876 goto err_hwlock;
877 }
878 break;
879
880 case 4:
881 switch (config->val_bits) {
882 case 12:
883 map->format.format_write = regmap_format_4_12_write;
884 break;
885 default:
886 goto err_hwlock;
887 }
888 break;
889
890 case 7:
891 switch (config->val_bits) {
892 case 9:
893 map->format.format_write = regmap_format_7_9_write;
894 break;
895 case 17:
896 map->format.format_write = regmap_format_7_17_write;
897 break;
898 default:
899 goto err_hwlock;
900 }
901 break;
902
903 case 10:
904 switch (config->val_bits) {
905 case 14:
906 map->format.format_write = regmap_format_10_14_write;
907 break;
908 default:
909 goto err_hwlock;
910 }
911 break;
912
913 case 12:
914 switch (config->val_bits) {
915 case 20:
916 map->format.format_write = regmap_format_12_20_write;
917 break;
918 default:
919 goto err_hwlock;
920 }
921 break;
922
923 case 8:
924 map->format.format_reg = regmap_format_8;
925 break;
926
927 case 16:
928 switch (reg_endian) {
929 case REGMAP_ENDIAN_BIG:
930 map->format.format_reg = regmap_format_16_be;
931 break;
932 case REGMAP_ENDIAN_LITTLE:
933 map->format.format_reg = regmap_format_16_le;
934 break;
935 case REGMAP_ENDIAN_NATIVE:
936 map->format.format_reg = regmap_format_16_native;
937 break;
938 default:
939 goto err_hwlock;
940 }
941 break;
942
943 case 24:
944 switch (reg_endian) {
945 case REGMAP_ENDIAN_BIG:
946 map->format.format_reg = regmap_format_24_be;
947 break;
948 default:
949 goto err_hwlock;
950 }
951 break;
952
953 case 32:
954 switch (reg_endian) {
955 case REGMAP_ENDIAN_BIG:
956 map->format.format_reg = regmap_format_32_be;
957 break;
958 case REGMAP_ENDIAN_LITTLE:
959 map->format.format_reg = regmap_format_32_le;
960 break;
961 case REGMAP_ENDIAN_NATIVE:
962 map->format.format_reg = regmap_format_32_native;
963 break;
964 default:
965 goto err_hwlock;
966 }
967 break;
968
969 default:
970 goto err_hwlock;
971 }
972
973 if (val_endian == REGMAP_ENDIAN_NATIVE)
974 map->format.parse_inplace = regmap_parse_inplace_noop;
975
976 switch (config->val_bits) {
977 case 8:
978 map->format.format_val = regmap_format_8;
979 map->format.parse_val = regmap_parse_8;
980 map->format.parse_inplace = regmap_parse_inplace_noop;
981 break;
982 case 16:
983 switch (val_endian) {
984 case REGMAP_ENDIAN_BIG:
985 map->format.format_val = regmap_format_16_be;
986 map->format.parse_val = regmap_parse_16_be;
987 map->format.parse_inplace = regmap_parse_16_be_inplace;
988 break;
989 case REGMAP_ENDIAN_LITTLE:
990 map->format.format_val = regmap_format_16_le;
991 map->format.parse_val = regmap_parse_16_le;
992 map->format.parse_inplace = regmap_parse_16_le_inplace;
993 break;
994 case REGMAP_ENDIAN_NATIVE:
995 map->format.format_val = regmap_format_16_native;
996 map->format.parse_val = regmap_parse_16_native;
997 break;
998 default:
999 goto err_hwlock;
1000 }
1001 break;
1002 case 24:
1003 switch (val_endian) {
1004 case REGMAP_ENDIAN_BIG:
1005 map->format.format_val = regmap_format_24_be;
1006 map->format.parse_val = regmap_parse_24_be;
1007 break;
1008 default:
1009 goto err_hwlock;
1010 }
1011 break;
1012 case 32:
1013 switch (val_endian) {
1014 case REGMAP_ENDIAN_BIG:
1015 map->format.format_val = regmap_format_32_be;
1016 map->format.parse_val = regmap_parse_32_be;
1017 map->format.parse_inplace = regmap_parse_32_be_inplace;
1018 break;
1019 case REGMAP_ENDIAN_LITTLE:
1020 map->format.format_val = regmap_format_32_le;
1021 map->format.parse_val = regmap_parse_32_le;
1022 map->format.parse_inplace = regmap_parse_32_le_inplace;
1023 break;
1024 case REGMAP_ENDIAN_NATIVE:
1025 map->format.format_val = regmap_format_32_native;
1026 map->format.parse_val = regmap_parse_32_native;
1027 break;
1028 default:
1029 goto err_hwlock;
1030 }
1031 break;
1032 }
1033
1034 if (map->format.format_write) {
1035 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1036 (val_endian != REGMAP_ENDIAN_BIG))
1037 goto err_hwlock;
1038 map->use_single_write = true;
1039 }
1040
1041 if (!map->format.format_write &&
1042 !(map->format.format_reg && map->format.format_val))
1043 goto err_hwlock;
1044
1045 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1046 if (map->work_buf == NULL) {
1047 ret = -ENOMEM;
1048 goto err_hwlock;
1049 }
1050
1051 if (map->format.format_write) {
1052 map->defer_caching = false;
1053 map->reg_write = _regmap_bus_formatted_write;
1054 } else if (map->format.format_val) {
1055 map->defer_caching = true;
1056 map->reg_write = _regmap_bus_raw_write;
1057 }
1058
1059 skip_format_initialization:
1060
1061 map->range_tree = RB_ROOT;
1062 for (i = 0; i < config->num_ranges; i++) {
1063 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1064 struct regmap_range_node *new;
1065
1066 /* Sanity check */
1067 if (range_cfg->range_max < range_cfg->range_min) {
1068 dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
1069 range_cfg->range_max, range_cfg->range_min);
1070 goto err_range;
1071 }
1072
1073 if (range_cfg->range_max > map->max_register) {
1074 dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
1075 range_cfg->range_max, map->max_register);
1076 goto err_range;
1077 }
1078
1079 if (range_cfg->selector_reg > map->max_register) {
1080 dev_err(map->dev,
1081 "Invalid range %d: selector out of map\n", i);
1082 goto err_range;
1083 }
1084
1085 if (range_cfg->window_len == 0) {
1086 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1087 i);
1088 goto err_range;
1089 }
1090
1091 /* Make sure, that this register range has no selector
1092 or data window within its boundary */
1093 for (j = 0; j < config->num_ranges; j++) {
1094 unsigned int sel_reg = config->ranges[j].selector_reg;
1095 unsigned int win_min = config->ranges[j].window_start;
1096 unsigned int win_max = win_min +
1097 config->ranges[j].window_len - 1;
1098
1099 /* Allow data window inside its own virtual range */
1100 if (j == i)
1101 continue;
1102
1103 if (range_cfg->range_min <= sel_reg &&
1104 sel_reg <= range_cfg->range_max) {
1105 dev_err(map->dev,
1106 "Range %d: selector for %d in window\n",
1107 i, j);
1108 goto err_range;
1109 }
1110
1111 if (!(win_max < range_cfg->range_min ||
1112 win_min > range_cfg->range_max)) {
1113 dev_err(map->dev,
1114 "Range %d: window for %d in window\n",
1115 i, j);
1116 goto err_range;
1117 }
1118 }
1119
1120 new = kzalloc_obj(*new);
1121 if (new == NULL) {
1122 ret = -ENOMEM;
1123 goto err_range;
1124 }
1125
1126 new->map = map;
1127 new->name = range_cfg->name;
1128 new->range_min = range_cfg->range_min;
1129 new->range_max = range_cfg->range_max;
1130 new->selector_reg = range_cfg->selector_reg;
1131 new->selector_mask = range_cfg->selector_mask;
1132 new->selector_shift = range_cfg->selector_shift;
1133 new->window_start = range_cfg->window_start;
1134 new->window_len = range_cfg->window_len;
1135
1136 if (!_regmap_range_add(map, new)) {
1137 dev_err(map->dev, "Failed to add range %d\n", i);
1138 kfree(new);
1139 goto err_range;
1140 }
1141
1142 if (map->selector_work_buf == NULL) {
1143 map->selector_work_buf =
1144 kzalloc(map->format.buf_size, GFP_KERNEL);
1145 if (map->selector_work_buf == NULL) {
1146 ret = -ENOMEM;
1147 goto err_range;
1148 }
1149 }
1150 }
1151
1152 ret = regcache_init(map, config);
1153 if (ret != 0)
1154 goto err_range;
1155
1156 if (dev) {
1157 ret = regmap_attach_dev(dev, map, config);
1158 if (ret != 0)
1159 goto err_regcache;
1160 } else {
1161 regmap_debugfs_init(map);
1162 }
1163
1164 return map;
1165
1166 err_regcache:
1167 regcache_exit(map);
1168 err_range:
1169 regmap_range_exit(map);
1170 kfree(map->work_buf);
1171 err_hwlock:
1172 if (map->hwlock)
1173 hwspin_lock_free(map->hwlock);
1174 err_name:
1175 kfree_const(map->name);
1176 err_map:
1177 kfree(map);
1178 err:
1179 if (bus && bus->free_on_exit)
1180 kfree(bus);
1181 return ERR_PTR(ret);
1182 }
1183 EXPORT_SYMBOL_GPL(__regmap_init);
1184
devm_regmap_release(void * regmap)1185 static void devm_regmap_release(void *regmap)
1186 {
1187 regmap_exit(regmap);
1188 }
1189
__devm_regmap_init(struct device * dev,const struct regmap_bus * bus,void * bus_context,const struct regmap_config * config,struct lock_class_key * lock_key,const char * lock_name)1190 struct regmap *__devm_regmap_init(struct device *dev,
1191 const struct regmap_bus *bus,
1192 void *bus_context,
1193 const struct regmap_config *config,
1194 struct lock_class_key *lock_key,
1195 const char *lock_name)
1196 {
1197 struct regmap *regmap;
1198 int ret;
1199
1200 regmap = __regmap_init(dev, bus, bus_context, config,
1201 lock_key, lock_name);
1202 if (IS_ERR(regmap))
1203 return regmap;
1204
1205 ret = devm_add_action_or_reset(dev, devm_regmap_release, regmap);
1206 if (ret)
1207 return ERR_PTR(ret);
1208
1209 return regmap;
1210 }
1211 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1212
regmap_field_init(struct regmap_field * rm_field,struct regmap * regmap,struct reg_field reg_field)1213 static void regmap_field_init(struct regmap_field *rm_field,
1214 struct regmap *regmap, struct reg_field reg_field)
1215 {
1216 rm_field->regmap = regmap;
1217 rm_field->reg = reg_field.reg;
1218 rm_field->shift = reg_field.lsb;
1219 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1220
1221 WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
1222
1223 rm_field->id_size = reg_field.id_size;
1224 rm_field->id_offset = reg_field.id_offset;
1225 }
1226
1227 /**
1228 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1229 *
1230 * @dev: Device that will be interacted with
1231 * @regmap: regmap bank in which this register field is located.
1232 * @reg_field: Register field with in the bank.
1233 *
1234 * The return value will be an ERR_PTR() on error or a valid pointer
1235 * to a struct regmap_field. The regmap_field will be automatically freed
1236 * by the device management code.
1237 */
devm_regmap_field_alloc(struct device * dev,struct regmap * regmap,struct reg_field reg_field)1238 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1239 struct regmap *regmap, struct reg_field reg_field)
1240 {
1241 struct regmap_field *rm_field = devm_kzalloc(dev,
1242 sizeof(*rm_field), GFP_KERNEL);
1243 if (!rm_field)
1244 return ERR_PTR(-ENOMEM);
1245
1246 regmap_field_init(rm_field, regmap, reg_field);
1247
1248 return rm_field;
1249
1250 }
1251 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1252
1253
1254 /**
1255 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1256 *
1257 * @regmap: regmap bank in which this register field is located.
1258 * @rm_field: regmap register fields within the bank.
1259 * @reg_field: Register fields within the bank.
1260 * @num_fields: Number of register fields.
1261 *
1262 * The return value will be an -ENOMEM on error or zero for success.
1263 * Newly allocated regmap_fields should be freed by calling
1264 * regmap_field_bulk_free()
1265 */
regmap_field_bulk_alloc(struct regmap * regmap,struct regmap_field ** rm_field,const struct reg_field * reg_field,int num_fields)1266 int regmap_field_bulk_alloc(struct regmap *regmap,
1267 struct regmap_field **rm_field,
1268 const struct reg_field *reg_field,
1269 int num_fields)
1270 {
1271 struct regmap_field *rf;
1272 int i;
1273
1274 rf = kzalloc_objs(*rf, num_fields);
1275 if (!rf)
1276 return -ENOMEM;
1277
1278 for (i = 0; i < num_fields; i++) {
1279 regmap_field_init(&rf[i], regmap, reg_field[i]);
1280 rm_field[i] = &rf[i];
1281 }
1282
1283 return 0;
1284 }
1285 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1286
1287 /**
1288 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1289 * fields.
1290 *
1291 * @dev: Device that will be interacted with
1292 * @regmap: regmap bank in which this register field is located.
1293 * @rm_field: regmap register fields within the bank.
1294 * @reg_field: Register fields within the bank.
1295 * @num_fields: Number of register fields.
1296 *
1297 * The return value will be an -ENOMEM on error or zero for success.
1298 * Newly allocated regmap_fields will be automatically freed by the
1299 * device management code.
1300 */
devm_regmap_field_bulk_alloc(struct device * dev,struct regmap * regmap,struct regmap_field ** rm_field,const struct reg_field * reg_field,int num_fields)1301 int devm_regmap_field_bulk_alloc(struct device *dev,
1302 struct regmap *regmap,
1303 struct regmap_field **rm_field,
1304 const struct reg_field *reg_field,
1305 int num_fields)
1306 {
1307 struct regmap_field *rf;
1308 int i;
1309
1310 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1311 if (!rf)
1312 return -ENOMEM;
1313
1314 for (i = 0; i < num_fields; i++) {
1315 regmap_field_init(&rf[i], regmap, reg_field[i]);
1316 rm_field[i] = &rf[i];
1317 }
1318
1319 return 0;
1320 }
1321 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1322
1323 /**
1324 * regmap_field_bulk_free() - Free register field allocated using
1325 * regmap_field_bulk_alloc.
1326 *
1327 * @field: regmap fields which should be freed.
1328 */
regmap_field_bulk_free(struct regmap_field * field)1329 void regmap_field_bulk_free(struct regmap_field *field)
1330 {
1331 kfree(field);
1332 }
1333 EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1334
1335 /**
1336 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1337 * devm_regmap_field_bulk_alloc.
1338 *
1339 * @dev: Device that will be interacted with
1340 * @field: regmap field which should be freed.
1341 *
1342 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1343 * drivers need not call this function, as the memory allocated via devm
1344 * will be freed as per device-driver life-cycle.
1345 */
devm_regmap_field_bulk_free(struct device * dev,struct regmap_field * field)1346 void devm_regmap_field_bulk_free(struct device *dev,
1347 struct regmap_field *field)
1348 {
1349 devm_kfree(dev, field);
1350 }
1351 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1352
1353 /**
1354 * devm_regmap_field_free() - Free a register field allocated using
1355 * devm_regmap_field_alloc.
1356 *
1357 * @dev: Device that will be interacted with
1358 * @field: regmap field which should be freed.
1359 *
1360 * Free register field allocated using devm_regmap_field_alloc(). Usually
1361 * drivers need not call this function, as the memory allocated via devm
1362 * will be freed as per device-driver life-cyle.
1363 */
devm_regmap_field_free(struct device * dev,struct regmap_field * field)1364 void devm_regmap_field_free(struct device *dev,
1365 struct regmap_field *field)
1366 {
1367 devm_kfree(dev, field);
1368 }
1369 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1370
1371 /**
1372 * regmap_field_alloc() - Allocate and initialise a register field.
1373 *
1374 * @regmap: regmap bank in which this register field is located.
1375 * @reg_field: Register field with in the bank.
1376 *
1377 * The return value will be an ERR_PTR() on error or a valid pointer
1378 * to a struct regmap_field. The regmap_field should be freed by the
1379 * user once its finished working with it using regmap_field_free().
1380 */
regmap_field_alloc(struct regmap * regmap,struct reg_field reg_field)1381 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1382 struct reg_field reg_field)
1383 {
1384 struct regmap_field *rm_field = kzalloc_obj(*rm_field);
1385
1386 if (!rm_field)
1387 return ERR_PTR(-ENOMEM);
1388
1389 regmap_field_init(rm_field, regmap, reg_field);
1390
1391 return rm_field;
1392 }
1393 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1394
1395 /**
1396 * regmap_field_free() - Free register field allocated using
1397 * regmap_field_alloc.
1398 *
1399 * @field: regmap field which should be freed.
1400 */
regmap_field_free(struct regmap_field * field)1401 void regmap_field_free(struct regmap_field *field)
1402 {
1403 kfree(field);
1404 }
1405 EXPORT_SYMBOL_GPL(regmap_field_free);
1406
1407 /**
1408 * regmap_reinit_cache() - Reinitialise the current register cache
1409 *
1410 * @map: Register map to operate on.
1411 * @config: New configuration. Only the cache data will be used.
1412 *
1413 * Discard any existing register cache for the map and initialize a
1414 * new cache. This can be used to restore the cache to defaults or to
1415 * update the cache configuration to reflect runtime discovery of the
1416 * hardware.
1417 *
1418 * No explicit locking is done here, the user needs to ensure that
1419 * this function will not race with other calls to regmap.
1420 */
regmap_reinit_cache(struct regmap * map,const struct regmap_config * config)1421 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1422 {
1423 int ret;
1424
1425 regcache_exit(map);
1426 regmap_debugfs_exit(map);
1427
1428 map->max_register = config->max_register;
1429 map->max_register_is_set = map->max_register ?: config->max_register_is_0;
1430 map->writeable_reg = config->writeable_reg;
1431 map->readable_reg = config->readable_reg;
1432 map->volatile_reg = config->volatile_reg;
1433 map->precious_reg = config->precious_reg;
1434 map->writeable_noinc_reg = config->writeable_noinc_reg;
1435 map->readable_noinc_reg = config->readable_noinc_reg;
1436 map->reg_default_cb = config->reg_default_cb;
1437 map->cache_type = config->cache_type;
1438
1439 ret = regmap_set_name(map, config);
1440 if (ret)
1441 return ret;
1442
1443 regmap_debugfs_init(map);
1444
1445 map->cache_bypass = false;
1446 map->cache_only = false;
1447
1448 return regcache_init(map, config);
1449 }
1450 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1451
1452 /**
1453 * regmap_exit() - Free a previously allocated register map
1454 *
1455 * @map: Register map to operate on.
1456 */
regmap_exit(struct regmap * map)1457 void regmap_exit(struct regmap *map)
1458 {
1459 struct regmap_async *async;
1460
1461 regmap_detach_dev(map->dev, map);
1462 regcache_exit(map);
1463
1464 regmap_debugfs_exit(map);
1465 regmap_range_exit(map);
1466 if (map->bus && map->bus->free_context)
1467 map->bus->free_context(map->bus_context);
1468 kfree(map->work_buf);
1469 while (!list_empty(&map->async_free)) {
1470 async = list_first_entry_or_null(&map->async_free,
1471 struct regmap_async,
1472 list);
1473 list_del(&async->list);
1474 kfree(async->work_buf);
1475 kfree(async);
1476 }
1477 if (map->hwlock)
1478 hwspin_lock_free(map->hwlock);
1479 if (map->lock == regmap_lock_mutex)
1480 mutex_destroy(&map->mutex);
1481 kfree_const(map->name);
1482 kfree(map->patch);
1483 if (map->bus && map->bus->free_on_exit)
1484 kfree(map->bus);
1485 kfree(map);
1486 }
1487 EXPORT_SYMBOL_GPL(regmap_exit);
1488
dev_get_regmap_match(struct device * dev,void * res,void * data)1489 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1490 {
1491 struct regmap **r = res;
1492 if (!r || !*r) {
1493 WARN_ON(!r || !*r);
1494 return 0;
1495 }
1496
1497 /* If the user didn't specify a name match any */
1498 if (data)
1499 return (*r)->name && !strcmp((*r)->name, data);
1500 else
1501 return 1;
1502 }
1503
1504 /**
1505 * dev_get_regmap() - Obtain the regmap (if any) for a device
1506 *
1507 * @dev: Device to retrieve the map for
1508 * @name: Optional name for the register map, usually NULL.
1509 *
1510 * Returns the regmap for the device if one is present, or NULL. If
1511 * name is specified then it must match the name specified when
1512 * registering the device, if it is NULL then the first regmap found
1513 * will be used. Devices with multiple register maps are very rare,
1514 * generic code should normally not need to specify a name.
1515 */
dev_get_regmap(struct device * dev,const char * name)1516 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1517 {
1518 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1519 dev_get_regmap_match, (void *)name);
1520
1521 if (!r)
1522 return NULL;
1523 return *r;
1524 }
1525 EXPORT_SYMBOL_GPL(dev_get_regmap);
1526
1527 /**
1528 * regmap_get_device() - Obtain the device from a regmap
1529 *
1530 * @map: Register map to operate on.
1531 *
1532 * Returns the underlying device that the regmap has been created for.
1533 */
regmap_get_device(struct regmap * map)1534 struct device *regmap_get_device(struct regmap *map)
1535 {
1536 return map->dev;
1537 }
1538 EXPORT_SYMBOL_GPL(regmap_get_device);
1539
_regmap_select_page(struct regmap * map,unsigned int * reg,struct regmap_range_node * range,unsigned int val_num)1540 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1541 struct regmap_range_node *range,
1542 unsigned int val_num)
1543 {
1544 void *orig_work_buf;
1545 unsigned int selector_reg;
1546 unsigned int win_offset;
1547 unsigned int win_page;
1548 bool page_chg;
1549 int ret;
1550
1551 win_offset = (*reg - range->range_min) % range->window_len;
1552 win_page = (*reg - range->range_min) / range->window_len;
1553
1554 if (val_num > 1) {
1555 /* Bulk write shouldn't cross range boundary */
1556 if (*reg + val_num - 1 > range->range_max)
1557 return -EINVAL;
1558
1559 /* ... or single page boundary */
1560 if (val_num > range->window_len - win_offset)
1561 return -EINVAL;
1562 }
1563
1564 /*
1565 * Calculate the address of the selector register in the corresponding
1566 * data window if it is located on every page.
1567 */
1568 page_chg = in_range(range->selector_reg, range->window_start, range->window_len);
1569 if (page_chg)
1570 selector_reg = range->range_min + win_page * range->window_len +
1571 range->selector_reg - range->window_start;
1572
1573 /*
1574 * It is possible to have selector register inside data window.
1575 * In that case, selector register is located on every page and it
1576 * needs no page switching, when accessed alone.
1577 *
1578 * Nevertheless we should synchronize the cache values for it.
1579 * This can't be properly achieved if the selector register is
1580 * the first and the only one to be read inside the data window.
1581 * That's why we update it in that case as well.
1582 *
1583 * However, we specifically avoid updating it for the default page,
1584 * when it's overlapped with the real data window, to prevent from
1585 * infinite looping.
1586 */
1587 if (val_num > 1 ||
1588 (page_chg && selector_reg != range->selector_reg) ||
1589 range->window_start + win_offset != range->selector_reg) {
1590 /* Use separate work_buf during page switching */
1591 orig_work_buf = map->work_buf;
1592 map->work_buf = map->selector_work_buf;
1593
1594 ret = _regmap_update_bits(map, range->selector_reg,
1595 range->selector_mask,
1596 win_page << range->selector_shift,
1597 NULL, false);
1598
1599 map->work_buf = orig_work_buf;
1600
1601 if (ret != 0)
1602 return ret;
1603 }
1604
1605 *reg = range->window_start + win_offset;
1606
1607 return 0;
1608 }
1609
regmap_set_work_buf_flag_mask(struct regmap * map,int max_bytes,unsigned long mask)1610 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1611 unsigned long mask)
1612 {
1613 u8 *buf;
1614 int i;
1615
1616 if (!mask || !map->work_buf)
1617 return;
1618
1619 buf = map->work_buf;
1620
1621 for (i = 0; i < max_bytes; i++)
1622 buf[i] |= (mask >> (8 * i)) & 0xff;
1623 }
1624
regmap_reg_addr(struct regmap * map,unsigned int reg)1625 static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
1626 {
1627 reg += map->reg_base;
1628
1629 if (map->format.reg_shift > 0)
1630 reg >>= map->format.reg_shift;
1631 else if (map->format.reg_shift < 0)
1632 reg <<= -(map->format.reg_shift);
1633
1634 return reg;
1635 }
1636
_regmap_raw_write_impl(struct regmap * map,unsigned int reg,const void * val,size_t val_len,bool noinc)1637 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1638 const void *val, size_t val_len, bool noinc)
1639 {
1640 struct regmap_range_node *range;
1641 unsigned long flags;
1642 void *work_val = map->work_buf + map->format.reg_bytes +
1643 map->format.pad_bytes;
1644 void *buf;
1645 int ret = -ENOTSUPP;
1646 size_t len;
1647 int i;
1648
1649 /* Check for unwritable or noinc registers in range
1650 * before we start
1651 */
1652 if (!regmap_writeable_noinc(map, reg)) {
1653 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1654 unsigned int element =
1655 reg + regmap_get_offset(map, i);
1656 if (!regmap_writeable(map, element) ||
1657 regmap_writeable_noinc(map, element))
1658 return -EINVAL;
1659 }
1660 }
1661
1662 if (!map->cache_bypass && map->format.parse_val) {
1663 unsigned int ival, offset;
1664 int val_bytes = map->format.val_bytes;
1665
1666 /* Cache the last written value for noinc writes */
1667 i = noinc ? val_len - val_bytes : 0;
1668 for (; i < val_len; i += val_bytes) {
1669 ival = map->format.parse_val(val + i);
1670 offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
1671 ret = regcache_write(map, reg + offset, ival);
1672 if (ret) {
1673 dev_err(map->dev,
1674 "Error in caching of register: %x ret: %d\n",
1675 reg + offset, ret);
1676 return ret;
1677 }
1678 }
1679 if (map->cache_only) {
1680 map->cache_dirty = true;
1681 return 0;
1682 }
1683 }
1684
1685 range = _regmap_range_lookup(map, reg);
1686 if (range) {
1687 int val_num = val_len / map->format.val_bytes;
1688 int win_offset = (reg - range->range_min) % range->window_len;
1689 int win_residue = range->window_len - win_offset;
1690
1691 /* If the write goes beyond the end of the window split it */
1692 while (val_num > win_residue) {
1693 dev_dbg(map->dev, "Writing window %d/%zu\n",
1694 win_residue, val_len / map->format.val_bytes);
1695 ret = _regmap_raw_write_impl(map, reg, val,
1696 win_residue *
1697 map->format.val_bytes, noinc);
1698 if (ret != 0)
1699 return ret;
1700
1701 reg += win_residue;
1702 val_num -= win_residue;
1703 val += win_residue * map->format.val_bytes;
1704 val_len -= win_residue * map->format.val_bytes;
1705
1706 win_offset = (reg - range->range_min) %
1707 range->window_len;
1708 win_residue = range->window_len - win_offset;
1709 }
1710
1711 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
1712 if (ret != 0)
1713 return ret;
1714 }
1715
1716 reg = regmap_reg_addr(map, reg);
1717 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1718 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1719 map->write_flag_mask);
1720
1721 /*
1722 * Essentially all I/O mechanisms will be faster with a single
1723 * buffer to write. Since register syncs often generate raw
1724 * writes of single registers optimise that case.
1725 */
1726 if (val != work_val && val_len == map->format.val_bytes) {
1727 memcpy(work_val, val, map->format.val_bytes);
1728 val = work_val;
1729 }
1730
1731 if (map->async && map->bus && map->bus->async_write) {
1732 struct regmap_async *async;
1733
1734 trace_regmap_async_write_start(map, reg, val_len);
1735
1736 spin_lock_irqsave(&map->async_lock, flags);
1737 async = list_first_entry_or_null(&map->async_free,
1738 struct regmap_async,
1739 list);
1740 if (async)
1741 list_del(&async->list);
1742 spin_unlock_irqrestore(&map->async_lock, flags);
1743
1744 if (!async) {
1745 async = map->bus->async_alloc();
1746 if (!async)
1747 return -ENOMEM;
1748
1749 async->work_buf = kzalloc(map->format.buf_size,
1750 GFP_KERNEL | GFP_DMA);
1751 if (!async->work_buf) {
1752 kfree(async);
1753 return -ENOMEM;
1754 }
1755 }
1756
1757 async->map = map;
1758
1759 /* If the caller supplied the value we can use it safely. */
1760 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1761 map->format.reg_bytes + map->format.val_bytes);
1762
1763 spin_lock_irqsave(&map->async_lock, flags);
1764 list_add_tail(&async->list, &map->async_list);
1765 spin_unlock_irqrestore(&map->async_lock, flags);
1766
1767 if (val != work_val)
1768 ret = map->bus->async_write(map->bus_context,
1769 async->work_buf,
1770 map->format.reg_bytes +
1771 map->format.pad_bytes,
1772 val, val_len, async);
1773 else
1774 ret = map->bus->async_write(map->bus_context,
1775 async->work_buf,
1776 map->format.reg_bytes +
1777 map->format.pad_bytes +
1778 val_len, NULL, 0, async);
1779
1780 if (ret != 0) {
1781 dev_err(map->dev, "Failed to schedule write: %d\n",
1782 ret);
1783
1784 spin_lock_irqsave(&map->async_lock, flags);
1785 list_move(&async->list, &map->async_free);
1786 spin_unlock_irqrestore(&map->async_lock, flags);
1787 }
1788
1789 return ret;
1790 }
1791
1792 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1793
1794 /* If we're doing a single register write we can probably just
1795 * send the work_buf directly, otherwise try to do a gather
1796 * write.
1797 */
1798 if (val == work_val)
1799 ret = map->write(map->bus_context, map->work_buf,
1800 map->format.reg_bytes +
1801 map->format.pad_bytes +
1802 val_len);
1803 else if (map->bus && map->bus->gather_write)
1804 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1805 map->format.reg_bytes +
1806 map->format.pad_bytes,
1807 val, val_len);
1808 else
1809 ret = -ENOTSUPP;
1810
1811 /* If that didn't work fall back on linearising by hand. */
1812 if (ret == -ENOTSUPP) {
1813 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1814 buf = kzalloc(len, GFP_KERNEL);
1815 if (!buf)
1816 return -ENOMEM;
1817
1818 memcpy(buf, map->work_buf, map->format.reg_bytes);
1819 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1820 val, val_len);
1821 ret = map->write(map->bus_context, buf, len);
1822
1823 kfree(buf);
1824 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1825 /* regcache_drop_region() takes lock that we already have,
1826 * thus call map->cache_ops->drop() directly
1827 */
1828 if (map->cache_ops && map->cache_ops->drop)
1829 map->cache_ops->drop(map, reg, reg + 1);
1830 }
1831
1832 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1833
1834 return ret;
1835 }
1836
1837 /**
1838 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1839 *
1840 * @map: Map to check.
1841 */
regmap_can_raw_write(struct regmap * map)1842 bool regmap_can_raw_write(struct regmap *map)
1843 {
1844 return map->write && map->format.format_val && map->format.format_reg;
1845 }
1846 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1847
1848 /**
1849 * regmap_get_raw_read_max - Get the maximum size we can read
1850 *
1851 * @map: Map to check.
1852 */
regmap_get_raw_read_max(struct regmap * map)1853 size_t regmap_get_raw_read_max(struct regmap *map)
1854 {
1855 return map->max_raw_read;
1856 }
1857 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1858
1859 /**
1860 * regmap_get_raw_write_max - Get the maximum size we can read
1861 *
1862 * @map: Map to check.
1863 */
regmap_get_raw_write_max(struct regmap * map)1864 size_t regmap_get_raw_write_max(struct regmap *map)
1865 {
1866 return map->max_raw_write;
1867 }
1868 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1869
_regmap_bus_formatted_write(void * context,unsigned int reg,unsigned int val)1870 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1871 unsigned int val)
1872 {
1873 int ret;
1874 struct regmap_range_node *range;
1875 struct regmap *map = context;
1876
1877 WARN_ON(!map->format.format_write);
1878
1879 range = _regmap_range_lookup(map, reg);
1880 if (range) {
1881 ret = _regmap_select_page(map, ®, range, 1);
1882 if (ret != 0)
1883 return ret;
1884 }
1885
1886 reg = regmap_reg_addr(map, reg);
1887 map->format.format_write(map, reg, val);
1888
1889 trace_regmap_hw_write_start(map, reg, 1);
1890
1891 ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
1892
1893 trace_regmap_hw_write_done(map, reg, 1);
1894
1895 return ret;
1896 }
1897
_regmap_bus_reg_write(void * context,unsigned int reg,unsigned int val)1898 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1899 unsigned int val)
1900 {
1901 struct regmap *map = context;
1902 struct regmap_range_node *range;
1903 int ret;
1904
1905 range = _regmap_range_lookup(map, reg);
1906 if (range) {
1907 ret = _regmap_select_page(map, ®, range, 1);
1908 if (ret != 0)
1909 return ret;
1910 }
1911
1912 reg = regmap_reg_addr(map, reg);
1913 return map->bus->reg_write(map->bus_context, reg, val);
1914 }
1915
_regmap_bus_raw_write(void * context,unsigned int reg,unsigned int val)1916 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1917 unsigned int val)
1918 {
1919 struct regmap *map = context;
1920
1921 WARN_ON(!map->format.format_val);
1922
1923 map->format.format_val(map->work_buf + map->format.reg_bytes
1924 + map->format.pad_bytes, val, 0);
1925 return _regmap_raw_write_impl(map, reg,
1926 map->work_buf +
1927 map->format.reg_bytes +
1928 map->format.pad_bytes,
1929 map->format.val_bytes,
1930 false);
1931 }
1932
_regmap_map_get_context(struct regmap * map)1933 static inline void *_regmap_map_get_context(struct regmap *map)
1934 {
1935 return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
1936 }
1937
_regmap_write(struct regmap * map,unsigned int reg,unsigned int val)1938 int _regmap_write(struct regmap *map, unsigned int reg,
1939 unsigned int val)
1940 {
1941 int ret;
1942 void *context = _regmap_map_get_context(map);
1943
1944 if (!regmap_writeable(map, reg))
1945 return -EIO;
1946
1947 if (!map->cache_bypass && !map->defer_caching) {
1948 ret = regcache_write(map, reg, val);
1949 if (ret != 0)
1950 return ret;
1951 if (map->cache_only) {
1952 map->cache_dirty = true;
1953 return 0;
1954 }
1955 }
1956
1957 ret = map->reg_write(context, reg, val);
1958 if (ret == 0) {
1959 if (regmap_should_log(map))
1960 dev_info(map->dev, "%x <= %x\n", reg, val);
1961
1962 trace_regmap_reg_write(map, reg, val);
1963 }
1964
1965 return ret;
1966 }
1967
1968 /**
1969 * regmap_write() - Write a value to a single register
1970 *
1971 * @map: Register map to write to
1972 * @reg: Register to write to
1973 * @val: Value to be written
1974 *
1975 * A value of zero will be returned on success, a negative errno will
1976 * be returned in error cases.
1977 */
regmap_write(struct regmap * map,unsigned int reg,unsigned int val)1978 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1979 {
1980 int ret;
1981
1982 if (!IS_ALIGNED(reg, map->reg_stride))
1983 return -EINVAL;
1984
1985 map->lock(map->lock_arg);
1986
1987 ret = _regmap_write(map, reg, val);
1988
1989 map->unlock(map->lock_arg);
1990
1991 return ret;
1992 }
1993 EXPORT_SYMBOL_GPL(regmap_write);
1994
1995 /**
1996 * regmap_write_async() - Write a value to a single register asynchronously
1997 *
1998 * @map: Register map to write to
1999 * @reg: Register to write to
2000 * @val: Value to be written
2001 *
2002 * A value of zero will be returned on success, a negative errno will
2003 * be returned in error cases.
2004 */
regmap_write_async(struct regmap * map,unsigned int reg,unsigned int val)2005 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
2006 {
2007 int ret;
2008
2009 if (!IS_ALIGNED(reg, map->reg_stride))
2010 return -EINVAL;
2011
2012 map->lock(map->lock_arg);
2013
2014 map->async = true;
2015
2016 ret = _regmap_write(map, reg, val);
2017
2018 map->async = false;
2019
2020 map->unlock(map->lock_arg);
2021
2022 return ret;
2023 }
2024 EXPORT_SYMBOL_GPL(regmap_write_async);
2025
_regmap_raw_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len,bool noinc)2026 int _regmap_raw_write(struct regmap *map, unsigned int reg,
2027 const void *val, size_t val_len, bool noinc)
2028 {
2029 size_t val_bytes = map->format.val_bytes;
2030 size_t val_count = val_len / val_bytes;
2031 size_t chunk_count, chunk_bytes;
2032 size_t chunk_regs = val_count;
2033 int ret, i;
2034
2035 if (!val_count)
2036 return -EINVAL;
2037
2038 if (map->use_single_write)
2039 chunk_regs = 1;
2040 else if (map->max_raw_write && val_len > map->max_raw_write)
2041 chunk_regs = map->max_raw_write / val_bytes;
2042
2043 chunk_count = val_count / chunk_regs;
2044 chunk_bytes = chunk_regs * val_bytes;
2045
2046 /* Write as many bytes as possible with chunk_size */
2047 for (i = 0; i < chunk_count; i++) {
2048 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2049 if (ret)
2050 return ret;
2051
2052 reg += regmap_get_offset(map, chunk_regs);
2053 val += chunk_bytes;
2054 val_len -= chunk_bytes;
2055 }
2056
2057 /* Write remaining bytes */
2058 if (val_len)
2059 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2060
2061 return ret;
2062 }
2063
2064 /**
2065 * regmap_raw_write() - Write raw values to one or more registers
2066 *
2067 * @map: Register map to write to
2068 * @reg: Initial register to write to
2069 * @val: Block of data to be written, laid out for direct transmission to the
2070 * device
2071 * @val_len: Length of data pointed to by val.
2072 *
2073 * This function is intended to be used for things like firmware
2074 * download where a large block of data needs to be transferred to the
2075 * device. No formatting will be done on the data provided.
2076 *
2077 * A value of zero will be returned on success, a negative errno will
2078 * be returned in error cases.
2079 */
regmap_raw_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len)2080 int regmap_raw_write(struct regmap *map, unsigned int reg,
2081 const void *val, size_t val_len)
2082 {
2083 int ret;
2084
2085 if (!regmap_can_raw_write(map))
2086 return -EINVAL;
2087 if (val_len % map->format.val_bytes)
2088 return -EINVAL;
2089
2090 map->lock(map->lock_arg);
2091
2092 ret = _regmap_raw_write(map, reg, val, val_len, false);
2093
2094 map->unlock(map->lock_arg);
2095
2096 return ret;
2097 }
2098 EXPORT_SYMBOL_GPL(regmap_raw_write);
2099
regmap_noinc_readwrite(struct regmap * map,unsigned int reg,void * val,unsigned int val_len,bool write)2100 static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
2101 void *val, unsigned int val_len, bool write)
2102 {
2103 size_t val_bytes = map->format.val_bytes;
2104 size_t val_count = val_len / val_bytes;
2105 unsigned int lastval;
2106 u8 *u8p;
2107 u16 *u16p;
2108 u32 *u32p;
2109 int ret;
2110 int i;
2111
2112 switch (val_bytes) {
2113 case 1:
2114 u8p = val;
2115 if (write)
2116 lastval = (unsigned int)u8p[val_count - 1];
2117 break;
2118 case 2:
2119 u16p = val;
2120 if (write)
2121 lastval = (unsigned int)u16p[val_count - 1];
2122 break;
2123 case 4:
2124 u32p = val;
2125 if (write)
2126 lastval = (unsigned int)u32p[val_count - 1];
2127 break;
2128 default:
2129 return -EINVAL;
2130 }
2131
2132 /*
2133 * Update the cache with the last value we write, the rest is just
2134 * gone down in the hardware FIFO. We can't cache FIFOs. This makes
2135 * sure a single read from the cache will work.
2136 */
2137 if (write) {
2138 if (!map->cache_bypass && !map->defer_caching) {
2139 ret = regcache_write(map, reg, lastval);
2140 if (ret != 0)
2141 return ret;
2142 if (map->cache_only) {
2143 map->cache_dirty = true;
2144 return 0;
2145 }
2146 }
2147 ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
2148 } else {
2149 ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
2150 }
2151
2152 if (!ret && regmap_should_log(map)) {
2153 dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
2154 for (i = 0; i < val_count; i++) {
2155 switch (val_bytes) {
2156 case 1:
2157 pr_cont("%x", u8p[i]);
2158 break;
2159 case 2:
2160 pr_cont("%x", u16p[i]);
2161 break;
2162 case 4:
2163 pr_cont("%x", u32p[i]);
2164 break;
2165 default:
2166 break;
2167 }
2168 if (i == (val_count - 1))
2169 pr_cont("]\n");
2170 else
2171 pr_cont(",");
2172 }
2173 }
2174
2175 return 0;
2176 }
2177
2178 /**
2179 * regmap_noinc_write(): Write data to a register without incrementing the
2180 * register number
2181 *
2182 * @map: Register map to write to
2183 * @reg: Register to write to
2184 * @val: Pointer to data buffer
2185 * @val_len: Length of output buffer in bytes.
2186 *
2187 * The regmap API usually assumes that bulk bus write operations will write a
2188 * range of registers. Some devices have certain registers for which a write
2189 * operation can write to an internal FIFO.
2190 *
2191 * The target register must be volatile but registers after it can be
2192 * completely unrelated cacheable registers.
2193 *
2194 * This will attempt multiple writes as required to write val_len bytes.
2195 *
2196 * A value of zero will be returned on success, a negative errno will be
2197 * returned in error cases.
2198 */
regmap_noinc_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len)2199 int regmap_noinc_write(struct regmap *map, unsigned int reg,
2200 const void *val, size_t val_len)
2201 {
2202 size_t write_len;
2203 int ret;
2204
2205 if (!map->write && !(map->bus && map->bus->reg_noinc_write))
2206 return -EINVAL;
2207 if (val_len % map->format.val_bytes)
2208 return -EINVAL;
2209 if (!IS_ALIGNED(reg, map->reg_stride))
2210 return -EINVAL;
2211 if (val_len == 0)
2212 return -EINVAL;
2213
2214 map->lock(map->lock_arg);
2215
2216 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2217 ret = -EINVAL;
2218 goto out_unlock;
2219 }
2220
2221 /*
2222 * Use the accelerated operation if we can. The val drops the const
2223 * typing in order to facilitate code reuse in regmap_noinc_readwrite().
2224 */
2225 if (map->bus->reg_noinc_write) {
2226 ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
2227 goto out_unlock;
2228 }
2229
2230 while (val_len) {
2231 if (map->max_raw_write && map->max_raw_write < val_len)
2232 write_len = map->max_raw_write;
2233 else
2234 write_len = val_len;
2235 ret = _regmap_raw_write(map, reg, val, write_len, true);
2236 if (ret)
2237 goto out_unlock;
2238 val = ((u8 *)val) + write_len;
2239 val_len -= write_len;
2240 }
2241
2242 out_unlock:
2243 map->unlock(map->lock_arg);
2244 return ret;
2245 }
2246 EXPORT_SYMBOL_GPL(regmap_noinc_write);
2247
2248 /**
2249 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2250 * register field.
2251 *
2252 * @field: Register field to write to
2253 * @mask: Bitmask to change
2254 * @val: Value to be written
2255 * @change: Boolean indicating if a write was done
2256 * @async: Boolean indicating asynchronously
2257 * @force: Boolean indicating use force update
2258 *
2259 * Perform a read/modify/write cycle on the register field with change,
2260 * async, force option.
2261 *
2262 * A value of zero will be returned on success, a negative errno will
2263 * be returned in error cases.
2264 */
regmap_field_update_bits_base(struct regmap_field * field,unsigned int mask,unsigned int val,bool * change,bool async,bool force)2265 int regmap_field_update_bits_base(struct regmap_field *field,
2266 unsigned int mask, unsigned int val,
2267 bool *change, bool async, bool force)
2268 {
2269 mask = (mask << field->shift) & field->mask;
2270
2271 return regmap_update_bits_base(field->regmap, field->reg,
2272 mask, val << field->shift,
2273 change, async, force);
2274 }
2275 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2276
2277 /**
2278 * regmap_field_test_bits() - Check if all specified bits are set in a
2279 * register field.
2280 *
2281 * @field: Register field to operate on
2282 * @bits: Bits to test
2283 *
2284 * Returns negative errno if the underlying regmap_field_read() fails,
2285 * 0 if at least one of the tested bits is not set and 1 if all tested
2286 * bits are set.
2287 */
regmap_field_test_bits(struct regmap_field * field,unsigned int bits)2288 int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
2289 {
2290 unsigned int val;
2291 int ret;
2292
2293 ret = regmap_field_read(field, &val);
2294 if (ret)
2295 return ret;
2296
2297 return (val & bits) == bits;
2298 }
2299 EXPORT_SYMBOL_GPL(regmap_field_test_bits);
2300
2301 /**
2302 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2303 * register field with port ID
2304 *
2305 * @field: Register field to write to
2306 * @id: port ID
2307 * @mask: Bitmask to change
2308 * @val: Value to be written
2309 * @change: Boolean indicating if a write was done
2310 * @async: Boolean indicating asynchronously
2311 * @force: Boolean indicating use force update
2312 *
2313 * A value of zero will be returned on success, a negative errno will
2314 * be returned in error cases.
2315 */
regmap_fields_update_bits_base(struct regmap_field * field,unsigned int id,unsigned int mask,unsigned int val,bool * change,bool async,bool force)2316 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2317 unsigned int mask, unsigned int val,
2318 bool *change, bool async, bool force)
2319 {
2320 if (id >= field->id_size)
2321 return -EINVAL;
2322
2323 mask = (mask << field->shift) & field->mask;
2324
2325 return regmap_update_bits_base(field->regmap,
2326 field->reg + (field->id_offset * id),
2327 mask, val << field->shift,
2328 change, async, force);
2329 }
2330 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2331
2332 /**
2333 * regmap_bulk_write() - Write multiple registers to the device
2334 *
2335 * @map: Register map to write to
2336 * @reg: First register to be write from
2337 * @val: Block of data to be written, in native register size for device
2338 * @val_count: Number of registers to write
2339 *
2340 * This function is intended to be used for writing a large block of
2341 * data to the device either in single transfer or multiple transfer.
2342 *
2343 * A value of zero will be returned on success, a negative errno will
2344 * be returned in error cases.
2345 */
regmap_bulk_write(struct regmap * map,unsigned int reg,const void * val,size_t val_count)2346 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2347 size_t val_count)
2348 {
2349 int ret = 0, i;
2350 size_t val_bytes = map->format.val_bytes;
2351
2352 if (!IS_ALIGNED(reg, map->reg_stride))
2353 return -EINVAL;
2354
2355 /*
2356 * Some devices don't support bulk write, for them we have a series of
2357 * single write operations.
2358 */
2359 if (!map->write || !map->format.parse_inplace) {
2360 map->lock(map->lock_arg);
2361 for (i = 0; i < val_count; i++) {
2362 unsigned int ival;
2363
2364 switch (val_bytes) {
2365 case 1:
2366 ival = *(u8 *)(val + (i * val_bytes));
2367 break;
2368 case 2:
2369 ival = *(u16 *)(val + (i * val_bytes));
2370 break;
2371 case 4:
2372 ival = *(u32 *)(val + (i * val_bytes));
2373 break;
2374 default:
2375 ret = -EINVAL;
2376 goto out;
2377 }
2378
2379 ret = _regmap_write(map,
2380 reg + regmap_get_offset(map, i),
2381 ival);
2382 if (ret != 0)
2383 goto out;
2384 }
2385 out:
2386 map->unlock(map->lock_arg);
2387 } else {
2388 void *wval;
2389
2390 wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
2391 if (!wval)
2392 return -ENOMEM;
2393
2394 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2395 map->format.parse_inplace(wval + i);
2396
2397 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2398
2399 kfree(wval);
2400 }
2401
2402 if (!ret)
2403 trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
2404
2405 return ret;
2406 }
2407 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2408
2409 /*
2410 * _regmap_raw_multi_reg_write()
2411 *
2412 * the (register,newvalue) pairs in regs have not been formatted, but
2413 * they are all in the same page and have been changed to being page
2414 * relative. The page register has been written if that was necessary.
2415 */
_regmap_raw_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,size_t num_regs)2416 static int _regmap_raw_multi_reg_write(struct regmap *map,
2417 const struct reg_sequence *regs,
2418 size_t num_regs)
2419 {
2420 int ret;
2421 void *buf;
2422 int i;
2423 u8 *u8;
2424 size_t val_bytes = map->format.val_bytes;
2425 size_t reg_bytes = map->format.reg_bytes;
2426 size_t pad_bytes = map->format.pad_bytes;
2427 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2428 size_t len = pair_size * num_regs;
2429
2430 if (!len)
2431 return -EINVAL;
2432
2433 buf = kzalloc(len, GFP_KERNEL);
2434 if (!buf)
2435 return -ENOMEM;
2436
2437 /* We have to linearise by hand. */
2438
2439 u8 = buf;
2440
2441 for (i = 0; i < num_regs; i++) {
2442 unsigned int reg = regs[i].reg;
2443 unsigned int val = regs[i].def;
2444 trace_regmap_hw_write_start(map, reg, 1);
2445 reg = regmap_reg_addr(map, reg);
2446 map->format.format_reg(u8, reg, map->reg_shift);
2447 u8 += reg_bytes + pad_bytes;
2448 map->format.format_val(u8, val, 0);
2449 u8 += val_bytes;
2450 }
2451 u8 = buf;
2452 *u8 |= map->write_flag_mask;
2453
2454 ret = map->write(map->bus_context, buf, len);
2455
2456 kfree(buf);
2457
2458 for (i = 0; i < num_regs; i++) {
2459 int reg = regs[i].reg;
2460 trace_regmap_hw_write_done(map, reg, 1);
2461 }
2462 return ret;
2463 }
2464
_regmap_register_page(struct regmap * map,unsigned int reg,struct regmap_range_node * range)2465 static unsigned int _regmap_register_page(struct regmap *map,
2466 unsigned int reg,
2467 struct regmap_range_node *range)
2468 {
2469 unsigned int win_page = (reg - range->range_min) / range->window_len;
2470
2471 return win_page;
2472 }
2473
_regmap_range_multi_paged_reg_write(struct regmap * map,struct reg_sequence * regs,size_t num_regs)2474 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2475 struct reg_sequence *regs,
2476 size_t num_regs)
2477 {
2478 int ret;
2479 int i, n;
2480 struct reg_sequence *base;
2481 unsigned int this_page = 0;
2482 unsigned int page_change = 0;
2483 /*
2484 * the set of registers are not neccessarily in order, but
2485 * since the order of write must be preserved this algorithm
2486 * chops the set each time the page changes. This also applies
2487 * if there is a delay required at any point in the sequence.
2488 */
2489 base = regs;
2490 for (i = 0, n = 0; i < num_regs; i++, n++) {
2491 unsigned int reg = regs[i].reg;
2492 struct regmap_range_node *range;
2493
2494 range = _regmap_range_lookup(map, reg);
2495 if (range) {
2496 unsigned int win_page = _regmap_register_page(map, reg,
2497 range);
2498
2499 if (i == 0)
2500 this_page = win_page;
2501 if (win_page != this_page) {
2502 this_page = win_page;
2503 page_change = 1;
2504 }
2505 }
2506
2507 /* If we have both a page change and a delay make sure to
2508 * write the regs and apply the delay before we change the
2509 * page.
2510 */
2511
2512 if (page_change || regs[i].delay_us) {
2513
2514 /* For situations where the first write requires
2515 * a delay we need to make sure we don't call
2516 * raw_multi_reg_write with n=0
2517 * This can't occur with page breaks as we
2518 * never write on the first iteration
2519 */
2520 if (regs[i].delay_us && i == 0)
2521 n = 1;
2522
2523 ret = _regmap_raw_multi_reg_write(map, base, n);
2524 if (ret != 0)
2525 return ret;
2526
2527 if (regs[i].delay_us) {
2528 if (map->can_sleep)
2529 fsleep(regs[i].delay_us);
2530 else
2531 udelay(regs[i].delay_us);
2532 }
2533
2534 base += n;
2535 n = 0;
2536
2537 if (page_change) {
2538 ret = _regmap_select_page(map,
2539 &base[n].reg,
2540 range, 1);
2541 if (ret != 0)
2542 return ret;
2543
2544 page_change = 0;
2545 }
2546
2547 }
2548
2549 }
2550 if (n > 0)
2551 return _regmap_raw_multi_reg_write(map, base, n);
2552 return 0;
2553 }
2554
_regmap_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,size_t num_regs)2555 static int _regmap_multi_reg_write(struct regmap *map,
2556 const struct reg_sequence *regs,
2557 size_t num_regs)
2558 {
2559 int i;
2560 int ret;
2561
2562 if (!map->can_multi_write) {
2563 for (i = 0; i < num_regs; i++) {
2564 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2565 if (ret != 0)
2566 return ret;
2567
2568 if (regs[i].delay_us) {
2569 if (map->can_sleep)
2570 fsleep(regs[i].delay_us);
2571 else
2572 udelay(regs[i].delay_us);
2573 }
2574 }
2575 return 0;
2576 }
2577
2578 if (!map->format.parse_inplace)
2579 return -EINVAL;
2580
2581 if (map->writeable_reg)
2582 for (i = 0; i < num_regs; i++) {
2583 int reg = regs[i].reg;
2584 if (!map->writeable_reg(map->dev, reg))
2585 return -EINVAL;
2586 if (!IS_ALIGNED(reg, map->reg_stride))
2587 return -EINVAL;
2588 }
2589
2590 if (!map->cache_bypass) {
2591 for (i = 0; i < num_regs; i++) {
2592 unsigned int val = regs[i].def;
2593 unsigned int reg = regs[i].reg;
2594 ret = regcache_write(map, reg, val);
2595 if (ret) {
2596 dev_err(map->dev,
2597 "Error in caching of register: %x ret: %d\n",
2598 reg, ret);
2599 return ret;
2600 }
2601 }
2602 if (map->cache_only) {
2603 map->cache_dirty = true;
2604 return 0;
2605 }
2606 }
2607
2608 WARN_ON(!map->bus);
2609
2610 for (i = 0; i < num_regs; i++) {
2611 unsigned int reg = regs[i].reg;
2612 struct regmap_range_node *range;
2613
2614 /* Coalesce all the writes between a page break or a delay
2615 * in a sequence
2616 */
2617 range = _regmap_range_lookup(map, reg);
2618 if (range || regs[i].delay_us) {
2619 size_t len = sizeof(struct reg_sequence)*num_regs;
2620 struct reg_sequence *base = kmemdup(regs, len,
2621 GFP_KERNEL);
2622 if (!base)
2623 return -ENOMEM;
2624 ret = _regmap_range_multi_paged_reg_write(map, base,
2625 num_regs);
2626 kfree(base);
2627
2628 return ret;
2629 }
2630 }
2631 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2632 }
2633
2634 /**
2635 * regmap_multi_reg_write() - Write multiple registers to the device
2636 *
2637 * @map: Register map to write to
2638 * @regs: Array of structures containing register,value to be written
2639 * @num_regs: Number of registers to write
2640 *
2641 * Write multiple registers to the device where the set of register, value
2642 * pairs are supplied in any order, possibly not all in a single range.
2643 *
2644 * The 'normal' block write mode will send ultimately send data on the
2645 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2646 * addressed. However, this alternative block multi write mode will send
2647 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2648 * must of course support the mode.
2649 *
2650 * A value of zero will be returned on success, a negative errno will be
2651 * returned in error cases.
2652 */
regmap_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,int num_regs)2653 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2654 int num_regs)
2655 {
2656 int ret;
2657
2658 map->lock(map->lock_arg);
2659
2660 ret = _regmap_multi_reg_write(map, regs, num_regs);
2661
2662 map->unlock(map->lock_arg);
2663
2664 return ret;
2665 }
2666 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2667
2668 /**
2669 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2670 * device but not the cache
2671 *
2672 * @map: Register map to write to
2673 * @regs: Array of structures containing register,value to be written
2674 * @num_regs: Number of registers to write
2675 *
2676 * Write multiple registers to the device but not the cache where the set
2677 * of register are supplied in any order.
2678 *
2679 * This function is intended to be used for writing a large block of data
2680 * atomically to the device in single transfer for those I2C client devices
2681 * that implement this alternative block write mode.
2682 *
2683 * A value of zero will be returned on success, a negative errno will
2684 * be returned in error cases.
2685 */
regmap_multi_reg_write_bypassed(struct regmap * map,const struct reg_sequence * regs,int num_regs)2686 int regmap_multi_reg_write_bypassed(struct regmap *map,
2687 const struct reg_sequence *regs,
2688 int num_regs)
2689 {
2690 int ret;
2691 bool bypass;
2692
2693 map->lock(map->lock_arg);
2694
2695 bypass = map->cache_bypass;
2696 map->cache_bypass = true;
2697
2698 ret = _regmap_multi_reg_write(map, regs, num_regs);
2699
2700 map->cache_bypass = bypass;
2701
2702 map->unlock(map->lock_arg);
2703
2704 return ret;
2705 }
2706 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2707
2708 /**
2709 * regmap_raw_write_async() - Write raw values to one or more registers
2710 * asynchronously
2711 *
2712 * @map: Register map to write to
2713 * @reg: Initial register to write to
2714 * @val: Block of data to be written, laid out for direct transmission to the
2715 * device. Must be valid until regmap_async_complete() is called.
2716 * @val_len: Length of data pointed to by val.
2717 *
2718 * This function is intended to be used for things like firmware
2719 * download where a large block of data needs to be transferred to the
2720 * device. No formatting will be done on the data provided.
2721 *
2722 * If supported by the underlying bus the write will be scheduled
2723 * asynchronously, helping maximise I/O speed on higher speed buses
2724 * like SPI. regmap_async_complete() can be called to ensure that all
2725 * asynchrnous writes have been completed.
2726 *
2727 * A value of zero will be returned on success, a negative errno will
2728 * be returned in error cases.
2729 */
regmap_raw_write_async(struct regmap * map,unsigned int reg,const void * val,size_t val_len)2730 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2731 const void *val, size_t val_len)
2732 {
2733 int ret;
2734
2735 if (val_len % map->format.val_bytes)
2736 return -EINVAL;
2737 if (!IS_ALIGNED(reg, map->reg_stride))
2738 return -EINVAL;
2739
2740 map->lock(map->lock_arg);
2741
2742 map->async = true;
2743
2744 ret = _regmap_raw_write(map, reg, val, val_len, false);
2745
2746 map->async = false;
2747
2748 map->unlock(map->lock_arg);
2749
2750 return ret;
2751 }
2752 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2753
_regmap_raw_read(struct regmap * map,unsigned int reg,void * val,unsigned int val_len,bool noinc)2754 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2755 unsigned int val_len, bool noinc)
2756 {
2757 struct regmap_range_node *range;
2758 int ret;
2759
2760 if (!map->read)
2761 return -EINVAL;
2762
2763 range = _regmap_range_lookup(map, reg);
2764 if (range) {
2765 ret = _regmap_select_page(map, ®, range,
2766 noinc ? 1 : val_len / map->format.val_bytes);
2767 if (ret != 0)
2768 return ret;
2769 }
2770
2771 reg = regmap_reg_addr(map, reg);
2772 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2773 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2774 map->read_flag_mask);
2775 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2776
2777 ret = map->read(map->bus_context, map->work_buf,
2778 map->format.reg_bytes + map->format.pad_bytes,
2779 val, val_len);
2780
2781 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2782
2783 return ret;
2784 }
2785
_regmap_bus_reg_read(void * context,unsigned int reg,unsigned int * val)2786 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2787 unsigned int *val)
2788 {
2789 struct regmap *map = context;
2790 struct regmap_range_node *range;
2791 int ret;
2792
2793 range = _regmap_range_lookup(map, reg);
2794 if (range) {
2795 ret = _regmap_select_page(map, ®, range, 1);
2796 if (ret != 0)
2797 return ret;
2798 }
2799
2800 reg = regmap_reg_addr(map, reg);
2801 return map->bus->reg_read(map->bus_context, reg, val);
2802 }
2803
_regmap_bus_read(void * context,unsigned int reg,unsigned int * val)2804 static int _regmap_bus_read(void *context, unsigned int reg,
2805 unsigned int *val)
2806 {
2807 int ret;
2808 struct regmap *map = context;
2809 void *work_val = map->work_buf + map->format.reg_bytes +
2810 map->format.pad_bytes;
2811
2812 if (!map->format.parse_val)
2813 return -EINVAL;
2814
2815 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2816 if (ret == 0)
2817 *val = map->format.parse_val(work_val);
2818
2819 return ret;
2820 }
2821
_regmap_read(struct regmap * map,unsigned int reg,unsigned int * val)2822 static int _regmap_read(struct regmap *map, unsigned int reg,
2823 unsigned int *val)
2824 {
2825 int ret;
2826 void *context = _regmap_map_get_context(map);
2827
2828 if (!map->cache_bypass) {
2829 ret = regcache_read(map, reg, val);
2830 if (ret == 0)
2831 return 0;
2832 }
2833
2834 if (map->cache_only)
2835 return -EBUSY;
2836
2837 if (!regmap_readable(map, reg))
2838 return -EIO;
2839
2840 ret = map->reg_read(context, reg, val);
2841 if (ret == 0) {
2842 if (regmap_should_log(map))
2843 dev_info(map->dev, "%x => %x\n", reg, *val);
2844
2845 trace_regmap_reg_read(map, reg, *val);
2846
2847 if (!map->cache_bypass)
2848 regcache_write(map, reg, *val);
2849 }
2850
2851 return ret;
2852 }
2853
2854 /**
2855 * regmap_read() - Read a value from a single register
2856 *
2857 * @map: Register map to read from
2858 * @reg: Register to be read from
2859 * @val: Pointer to store read value
2860 *
2861 * A value of zero will be returned on success, a negative errno will
2862 * be returned in error cases.
2863 */
regmap_read(struct regmap * map,unsigned int reg,unsigned int * val)2864 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2865 {
2866 int ret;
2867
2868 if (!IS_ALIGNED(reg, map->reg_stride))
2869 return -EINVAL;
2870
2871 map->lock(map->lock_arg);
2872
2873 ret = _regmap_read(map, reg, val);
2874
2875 map->unlock(map->lock_arg);
2876
2877 return ret;
2878 }
2879 EXPORT_SYMBOL_GPL(regmap_read);
2880
2881 /**
2882 * regmap_read_bypassed() - Read a value from a single register direct
2883 * from the device, bypassing the cache
2884 *
2885 * @map: Register map to read from
2886 * @reg: Register to be read from
2887 * @val: Pointer to store read value
2888 *
2889 * A value of zero will be returned on success, a negative errno will
2890 * be returned in error cases.
2891 */
regmap_read_bypassed(struct regmap * map,unsigned int reg,unsigned int * val)2892 int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
2893 {
2894 int ret;
2895 bool bypass, cache_only;
2896
2897 if (!IS_ALIGNED(reg, map->reg_stride))
2898 return -EINVAL;
2899
2900 map->lock(map->lock_arg);
2901
2902 bypass = map->cache_bypass;
2903 cache_only = map->cache_only;
2904 map->cache_bypass = true;
2905 map->cache_only = false;
2906
2907 ret = _regmap_read(map, reg, val);
2908
2909 map->cache_bypass = bypass;
2910 map->cache_only = cache_only;
2911
2912 map->unlock(map->lock_arg);
2913
2914 return ret;
2915 }
2916 EXPORT_SYMBOL_GPL(regmap_read_bypassed);
2917
2918 /**
2919 * regmap_raw_read() - Read raw data from the device
2920 *
2921 * @map: Register map to read from
2922 * @reg: First register to be read from
2923 * @val: Pointer to store read value
2924 * @val_len: Size of data to read
2925 *
2926 * A value of zero will be returned on success, a negative errno will
2927 * be returned in error cases.
2928 */
regmap_raw_read(struct regmap * map,unsigned int reg,void * val,size_t val_len)2929 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2930 size_t val_len)
2931 {
2932 size_t val_bytes = map->format.val_bytes;
2933 size_t val_count = val_len / val_bytes;
2934 unsigned int v;
2935 int ret, i;
2936
2937 if (val_len % map->format.val_bytes)
2938 return -EINVAL;
2939 if (!IS_ALIGNED(reg, map->reg_stride))
2940 return -EINVAL;
2941 if (val_count == 0)
2942 return -EINVAL;
2943
2944 map->lock(map->lock_arg);
2945
2946 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2947 map->cache_type == REGCACHE_NONE) {
2948 size_t chunk_count, chunk_bytes;
2949 size_t chunk_regs = val_count;
2950
2951 if (!map->cache_bypass && map->cache_only) {
2952 ret = -EBUSY;
2953 goto out;
2954 }
2955
2956 if (!map->read) {
2957 ret = -ENOTSUPP;
2958 goto out;
2959 }
2960
2961 if (map->use_single_read)
2962 chunk_regs = 1;
2963 else if (map->max_raw_read && val_len > map->max_raw_read)
2964 chunk_regs = map->max_raw_read / val_bytes;
2965
2966 chunk_count = val_count / chunk_regs;
2967 chunk_bytes = chunk_regs * val_bytes;
2968
2969 /* Read bytes that fit into whole chunks */
2970 for (i = 0; i < chunk_count; i++) {
2971 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2972 if (ret != 0)
2973 goto out;
2974
2975 reg += regmap_get_offset(map, chunk_regs);
2976 val += chunk_bytes;
2977 val_len -= chunk_bytes;
2978 }
2979
2980 /* Read remaining bytes */
2981 if (val_len) {
2982 ret = _regmap_raw_read(map, reg, val, val_len, false);
2983 if (ret != 0)
2984 goto out;
2985 }
2986 } else {
2987 /* Otherwise go word by word for the cache; should be low
2988 * cost as we expect to hit the cache.
2989 */
2990 for (i = 0; i < val_count; i++) {
2991 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2992 &v);
2993 if (ret != 0)
2994 goto out;
2995
2996 map->format.format_val(val + (i * val_bytes), v, 0);
2997 }
2998 }
2999
3000 out:
3001 map->unlock(map->lock_arg);
3002
3003 return ret;
3004 }
3005 EXPORT_SYMBOL_GPL(regmap_raw_read);
3006
3007 /**
3008 * regmap_noinc_read(): Read data from a register without incrementing the
3009 * register number
3010 *
3011 * @map: Register map to read from
3012 * @reg: Register to read from
3013 * @val: Pointer to data buffer
3014 * @val_len: Length of output buffer in bytes.
3015 *
3016 * The regmap API usually assumes that bulk read operations will read a
3017 * range of registers. Some devices have certain registers for which a read
3018 * operation read will read from an internal FIFO.
3019 *
3020 * The target register must be volatile but registers after it can be
3021 * completely unrelated cacheable registers.
3022 *
3023 * This will attempt multiple reads as required to read val_len bytes.
3024 *
3025 * A value of zero will be returned on success, a negative errno will be
3026 * returned in error cases.
3027 */
regmap_noinc_read(struct regmap * map,unsigned int reg,void * val,size_t val_len)3028 int regmap_noinc_read(struct regmap *map, unsigned int reg,
3029 void *val, size_t val_len)
3030 {
3031 size_t read_len;
3032 int ret;
3033
3034 if (!map->read)
3035 return -ENOTSUPP;
3036
3037 if (val_len % map->format.val_bytes)
3038 return -EINVAL;
3039 if (!IS_ALIGNED(reg, map->reg_stride))
3040 return -EINVAL;
3041 if (val_len == 0)
3042 return -EINVAL;
3043
3044 map->lock(map->lock_arg);
3045
3046 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
3047 ret = -EINVAL;
3048 goto out_unlock;
3049 }
3050
3051 /*
3052 * We have not defined the FIFO semantics for cache, as the
3053 * cache is just one value deep. Should we return the last
3054 * written value? Just avoid this by always reading the FIFO
3055 * even when using cache. Cache only will not work.
3056 */
3057 if (!map->cache_bypass && map->cache_only) {
3058 ret = -EBUSY;
3059 goto out_unlock;
3060 }
3061
3062 /* Use the accelerated operation if we can */
3063 if (map->bus->reg_noinc_read) {
3064 ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
3065 goto out_unlock;
3066 }
3067
3068 while (val_len) {
3069 if (map->max_raw_read && map->max_raw_read < val_len)
3070 read_len = map->max_raw_read;
3071 else
3072 read_len = val_len;
3073 ret = _regmap_raw_read(map, reg, val, read_len, true);
3074 if (ret)
3075 goto out_unlock;
3076 val = ((u8 *)val) + read_len;
3077 val_len -= read_len;
3078 }
3079
3080 out_unlock:
3081 map->unlock(map->lock_arg);
3082 return ret;
3083 }
3084 EXPORT_SYMBOL_GPL(regmap_noinc_read);
3085
3086 /**
3087 * regmap_field_read(): Read a value to a single register field
3088 *
3089 * @field: Register field to read from
3090 * @val: Pointer to store read value
3091 *
3092 * A value of zero will be returned on success, a negative errno will
3093 * be returned in error cases.
3094 */
regmap_field_read(struct regmap_field * field,unsigned int * val)3095 int regmap_field_read(struct regmap_field *field, unsigned int *val)
3096 {
3097 int ret;
3098 unsigned int reg_val;
3099 ret = regmap_read(field->regmap, field->reg, ®_val);
3100 if (ret != 0)
3101 return ret;
3102
3103 reg_val &= field->mask;
3104 reg_val >>= field->shift;
3105 *val = reg_val;
3106
3107 return ret;
3108 }
3109 EXPORT_SYMBOL_GPL(regmap_field_read);
3110
3111 /**
3112 * regmap_fields_read() - Read a value to a single register field with port ID
3113 *
3114 * @field: Register field to read from
3115 * @id: port ID
3116 * @val: Pointer to store read value
3117 *
3118 * A value of zero will be returned on success, a negative errno will
3119 * be returned in error cases.
3120 */
regmap_fields_read(struct regmap_field * field,unsigned int id,unsigned int * val)3121 int regmap_fields_read(struct regmap_field *field, unsigned int id,
3122 unsigned int *val)
3123 {
3124 int ret;
3125 unsigned int reg_val;
3126
3127 if (id >= field->id_size)
3128 return -EINVAL;
3129
3130 ret = regmap_read(field->regmap,
3131 field->reg + (field->id_offset * id),
3132 ®_val);
3133 if (ret != 0)
3134 return ret;
3135
3136 reg_val &= field->mask;
3137 reg_val >>= field->shift;
3138 *val = reg_val;
3139
3140 return ret;
3141 }
3142 EXPORT_SYMBOL_GPL(regmap_fields_read);
3143
_regmap_bulk_read(struct regmap * map,unsigned int reg,const unsigned int * regs,void * val,size_t val_count)3144 static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
3145 const unsigned int *regs, void *val, size_t val_count)
3146 {
3147 u32 *u32 = val;
3148 u16 *u16 = val;
3149 u8 *u8 = val;
3150 int ret, i;
3151
3152 map->lock(map->lock_arg);
3153
3154 for (i = 0; i < val_count; i++) {
3155 unsigned int ival;
3156
3157 if (regs) {
3158 if (!IS_ALIGNED(regs[i], map->reg_stride)) {
3159 ret = -EINVAL;
3160 goto out;
3161 }
3162 ret = _regmap_read(map, regs[i], &ival);
3163 } else {
3164 ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
3165 }
3166 if (ret != 0)
3167 goto out;
3168
3169 switch (map->format.val_bytes) {
3170 case 4:
3171 u32[i] = ival;
3172 break;
3173 case 2:
3174 u16[i] = ival;
3175 break;
3176 case 1:
3177 u8[i] = ival;
3178 break;
3179 default:
3180 ret = -EINVAL;
3181 goto out;
3182 }
3183 }
3184 out:
3185 map->unlock(map->lock_arg);
3186 return ret;
3187 }
3188
3189 /**
3190 * regmap_bulk_read() - Read multiple sequential registers from the device
3191 *
3192 * @map: Register map to read from
3193 * @reg: First register to be read from
3194 * @val: Pointer to store read value, in native register size for device
3195 * @val_count: Number of registers to read
3196 *
3197 * A value of zero will be returned on success, a negative errno will
3198 * be returned in error cases.
3199 */
regmap_bulk_read(struct regmap * map,unsigned int reg,void * val,size_t val_count)3200 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
3201 size_t val_count)
3202 {
3203 int ret, i;
3204 size_t val_bytes = map->format.val_bytes;
3205 bool vol = regmap_volatile_range(map, reg, val_count);
3206
3207 if (!IS_ALIGNED(reg, map->reg_stride))
3208 return -EINVAL;
3209 if (val_count == 0)
3210 return -EINVAL;
3211
3212 if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3213 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3214 if (ret != 0)
3215 return ret;
3216
3217 for (i = 0; i < val_count * val_bytes; i += val_bytes)
3218 map->format.parse_inplace(val + i);
3219 } else {
3220 ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
3221 }
3222 if (!ret)
3223 trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
3224 return ret;
3225 }
3226 EXPORT_SYMBOL_GPL(regmap_bulk_read);
3227
3228 /**
3229 * regmap_multi_reg_read() - Read multiple non-sequential registers from the device
3230 *
3231 * @map: Register map to read from
3232 * @regs: Array of registers to read from
3233 * @val: Pointer to store read value, in native register size for device
3234 * @val_count: Number of registers to read
3235 *
3236 * A value of zero will be returned on success, a negative errno will
3237 * be returned in error cases.
3238 */
regmap_multi_reg_read(struct regmap * map,const unsigned int * regs,void * val,size_t val_count)3239 int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
3240 size_t val_count)
3241 {
3242 if (val_count == 0)
3243 return -EINVAL;
3244
3245 return _regmap_bulk_read(map, 0, regs, val, val_count);
3246 }
3247 EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
3248
_regmap_update_bits(struct regmap * map,unsigned int reg,unsigned int mask,unsigned int val,bool * change,bool force_write)3249 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3250 unsigned int mask, unsigned int val,
3251 bool *change, bool force_write)
3252 {
3253 int ret;
3254 unsigned int tmp, orig;
3255
3256 if (change)
3257 *change = false;
3258
3259 if (regmap_volatile(map, reg) && map->reg_update_bits) {
3260 reg = regmap_reg_addr(map, reg);
3261 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3262 if (ret == 0 && change)
3263 *change = true;
3264 } else {
3265 ret = _regmap_read(map, reg, &orig);
3266 if (ret != 0)
3267 return ret;
3268
3269 tmp = orig & ~mask;
3270 tmp |= val & mask;
3271
3272 if (force_write || (tmp != orig) || map->force_write_field) {
3273 ret = _regmap_write(map, reg, tmp);
3274 if (ret == 0 && change)
3275 *change = true;
3276 }
3277 }
3278
3279 return ret;
3280 }
3281
3282 /**
3283 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3284 *
3285 * @map: Register map to update
3286 * @reg: Register to update
3287 * @mask: Bitmask to change
3288 * @val: New value for bitmask
3289 * @change: Boolean indicating if a write was done
3290 * @async: Boolean indicating asynchronously
3291 * @force: Boolean indicating use force update
3292 *
3293 * Perform a read/modify/write cycle on a register map with change, async, force
3294 * options.
3295 *
3296 * If async is true:
3297 *
3298 * With most buses the read must be done synchronously so this is most useful
3299 * for devices with a cache which do not need to interact with the hardware to
3300 * determine the current register value.
3301 *
3302 * Returns zero for success, a negative number on error.
3303 */
regmap_update_bits_base(struct regmap * map,unsigned int reg,unsigned int mask,unsigned int val,bool * change,bool async,bool force)3304 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3305 unsigned int mask, unsigned int val,
3306 bool *change, bool async, bool force)
3307 {
3308 int ret;
3309
3310 map->lock(map->lock_arg);
3311
3312 map->async = async;
3313
3314 ret = _regmap_update_bits(map, reg, mask, val, change, force);
3315
3316 map->async = false;
3317
3318 map->unlock(map->lock_arg);
3319
3320 return ret;
3321 }
3322 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3323
3324 /**
3325 * regmap_test_bits() - Check if all specified bits are set in a register.
3326 *
3327 * @map: Register map to operate on
3328 * @reg: Register to read from
3329 * @bits: Bits to test
3330 *
3331 * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3332 * bits are set and a negative error number if the underlying regmap_read()
3333 * fails.
3334 */
regmap_test_bits(struct regmap * map,unsigned int reg,unsigned int bits)3335 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3336 {
3337 unsigned int val;
3338 int ret;
3339
3340 ret = regmap_read(map, reg, &val);
3341 if (ret)
3342 return ret;
3343
3344 return (val & bits) == bits;
3345 }
3346 EXPORT_SYMBOL_GPL(regmap_test_bits);
3347
regmap_async_complete_cb(struct regmap_async * async,int ret)3348 void regmap_async_complete_cb(struct regmap_async *async, int ret)
3349 {
3350 struct regmap *map = async->map;
3351 bool wake;
3352
3353 trace_regmap_async_io_complete(map);
3354
3355 spin_lock(&map->async_lock);
3356 list_move(&async->list, &map->async_free);
3357 wake = list_empty(&map->async_list);
3358
3359 if (ret != 0)
3360 map->async_ret = ret;
3361
3362 spin_unlock(&map->async_lock);
3363
3364 if (wake)
3365 wake_up(&map->async_waitq);
3366 }
3367 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3368
regmap_async_is_done(struct regmap * map)3369 static int regmap_async_is_done(struct regmap *map)
3370 {
3371 unsigned long flags;
3372 int ret;
3373
3374 spin_lock_irqsave(&map->async_lock, flags);
3375 ret = list_empty(&map->async_list);
3376 spin_unlock_irqrestore(&map->async_lock, flags);
3377
3378 return ret;
3379 }
3380
3381 /**
3382 * regmap_async_complete - Ensure all asynchronous I/O has completed.
3383 *
3384 * @map: Map to operate on.
3385 *
3386 * Blocks until any pending asynchronous I/O has completed. Returns
3387 * an error code for any failed I/O operations.
3388 */
regmap_async_complete(struct regmap * map)3389 int regmap_async_complete(struct regmap *map)
3390 {
3391 unsigned long flags;
3392 int ret;
3393
3394 /* Nothing to do with no async support */
3395 if (!map->bus || !map->bus->async_write)
3396 return 0;
3397
3398 trace_regmap_async_complete_start(map);
3399
3400 wait_event(map->async_waitq, regmap_async_is_done(map));
3401
3402 spin_lock_irqsave(&map->async_lock, flags);
3403 ret = map->async_ret;
3404 map->async_ret = 0;
3405 spin_unlock_irqrestore(&map->async_lock, flags);
3406
3407 trace_regmap_async_complete_done(map);
3408
3409 return ret;
3410 }
3411 EXPORT_SYMBOL_GPL(regmap_async_complete);
3412
3413 /**
3414 * regmap_register_patch - Register and apply register updates to be applied
3415 * on device initialistion
3416 *
3417 * @map: Register map to apply updates to.
3418 * @regs: Values to update.
3419 * @num_regs: Number of entries in regs.
3420 *
3421 * Register a set of register updates to be applied to the device
3422 * whenever the device registers are synchronised with the cache and
3423 * apply them immediately. Typically this is used to apply
3424 * corrections to be applied to the device defaults on startup, such
3425 * as the updates some vendors provide to undocumented registers.
3426 *
3427 * The caller must ensure that this function cannot be called
3428 * concurrently with either itself or regcache_sync().
3429 */
regmap_register_patch(struct regmap * map,const struct reg_sequence * regs,int num_regs)3430 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3431 int num_regs)
3432 {
3433 struct reg_sequence *p;
3434 int ret;
3435 bool bypass;
3436
3437 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3438 num_regs))
3439 return 0;
3440
3441 p = krealloc(map->patch,
3442 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3443 GFP_KERNEL);
3444 if (p) {
3445 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3446 map->patch = p;
3447 map->patch_regs += num_regs;
3448 } else {
3449 return -ENOMEM;
3450 }
3451
3452 map->lock(map->lock_arg);
3453
3454 bypass = map->cache_bypass;
3455
3456 map->cache_bypass = true;
3457 map->async = true;
3458
3459 ret = _regmap_multi_reg_write(map, regs, num_regs);
3460
3461 map->async = false;
3462 map->cache_bypass = bypass;
3463
3464 map->unlock(map->lock_arg);
3465
3466 regmap_async_complete(map);
3467
3468 return ret;
3469 }
3470 EXPORT_SYMBOL_GPL(regmap_register_patch);
3471
3472 /**
3473 * regmap_get_val_bytes() - Report the size of a register value
3474 *
3475 * @map: Register map to operate on.
3476 *
3477 * Report the size of a register value, mainly intended to for use by
3478 * generic infrastructure built on top of regmap.
3479 */
regmap_get_val_bytes(struct regmap * map)3480 int regmap_get_val_bytes(struct regmap *map)
3481 {
3482 if (map->format.format_write)
3483 return -EINVAL;
3484
3485 return map->format.val_bytes;
3486 }
3487 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3488
3489 /**
3490 * regmap_get_max_register() - Report the max register value
3491 *
3492 * @map: Register map to operate on.
3493 *
3494 * Report the max register value, mainly intended to for use by
3495 * generic infrastructure built on top of regmap.
3496 */
regmap_get_max_register(struct regmap * map)3497 int regmap_get_max_register(struct regmap *map)
3498 {
3499 return map->max_register_is_set ? map->max_register : -EINVAL;
3500 }
3501 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3502
3503 /**
3504 * regmap_get_reg_stride() - Report the register address stride
3505 *
3506 * @map: Register map to operate on.
3507 *
3508 * Report the register address stride, mainly intended to for use by
3509 * generic infrastructure built on top of regmap.
3510 */
regmap_get_reg_stride(struct regmap * map)3511 int regmap_get_reg_stride(struct regmap *map)
3512 {
3513 return map->reg_stride;
3514 }
3515 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3516
3517 /**
3518 * regmap_might_sleep() - Returns whether a regmap access might sleep.
3519 *
3520 * @map: Register map to operate on.
3521 *
3522 * Returns true if an access to the register might sleep, else false.
3523 */
regmap_might_sleep(struct regmap * map)3524 bool regmap_might_sleep(struct regmap *map)
3525 {
3526 return map->can_sleep;
3527 }
3528 EXPORT_SYMBOL_GPL(regmap_might_sleep);
3529
regmap_parse_val(struct regmap * map,const void * buf,unsigned int * val)3530 int regmap_parse_val(struct regmap *map, const void *buf,
3531 unsigned int *val)
3532 {
3533 if (!map->format.parse_val)
3534 return -EINVAL;
3535
3536 *val = map->format.parse_val(buf);
3537
3538 return 0;
3539 }
3540 EXPORT_SYMBOL_GPL(regmap_parse_val);
3541
regmap_initcall(void)3542 static int __init regmap_initcall(void)
3543 {
3544 regmap_debugfs_initcall();
3545
3546 return 0;
3547 }
3548 postcore_initcall(regmap_initcall);
3549