xref: /linux/drivers/base/regmap/regcache.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * Register cache access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <trace/events/regmap.h>
16 #include <linux/bsearch.h>
17 #include <linux/sort.h>
18 
19 #include "internal.h"
20 
21 static const struct regcache_ops *cache_types[] = {
22 	&regcache_rbtree_ops,
23 	&regcache_lzo_ops,
24 };
25 
26 static int regcache_hw_init(struct regmap *map)
27 {
28 	int i, j;
29 	int ret;
30 	int count;
31 	unsigned int val;
32 	void *tmp_buf;
33 
34 	if (!map->num_reg_defaults_raw)
35 		return -EINVAL;
36 
37 	if (!map->reg_defaults_raw) {
38 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
39 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
40 		if (!tmp_buf)
41 			return -EINVAL;
42 		ret = regmap_bulk_read(map, 0, tmp_buf,
43 				       map->num_reg_defaults_raw);
44 		if (ret < 0) {
45 			kfree(tmp_buf);
46 			return ret;
47 		}
48 		map->reg_defaults_raw = tmp_buf;
49 		map->cache_free = 1;
50 	}
51 
52 	/* calculate the size of reg_defaults */
53 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
54 		val = regcache_get_val(map->reg_defaults_raw,
55 				       i, map->cache_word_size);
56 		if (!val)
57 			continue;
58 		count++;
59 	}
60 
61 	map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
62 				      GFP_KERNEL);
63 	if (!map->reg_defaults) {
64 		ret = -ENOMEM;
65 		goto err_free;
66 	}
67 
68 	/* fill the reg_defaults */
69 	map->num_reg_defaults = count;
70 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
71 		val = regcache_get_val(map->reg_defaults_raw,
72 				       i, map->cache_word_size);
73 		if (!val)
74 			continue;
75 		map->reg_defaults[j].reg = i;
76 		map->reg_defaults[j].def = val;
77 		j++;
78 	}
79 
80 	return 0;
81 
82 err_free:
83 	if (map->cache_free)
84 		kfree(map->reg_defaults_raw);
85 
86 	return ret;
87 }
88 
89 int regcache_init(struct regmap *map, const struct regmap_config *config)
90 {
91 	int ret;
92 	int i;
93 	void *tmp_buf;
94 
95 	if (map->cache_type == REGCACHE_NONE) {
96 		map->cache_bypass = true;
97 		return 0;
98 	}
99 
100 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
101 		if (cache_types[i]->type == map->cache_type)
102 			break;
103 
104 	if (i == ARRAY_SIZE(cache_types)) {
105 		dev_err(map->dev, "Could not match compress type: %d\n",
106 			map->cache_type);
107 		return -EINVAL;
108 	}
109 
110 	map->num_reg_defaults = config->num_reg_defaults;
111 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
112 	map->reg_defaults_raw = config->reg_defaults_raw;
113 	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
114 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
115 
116 	map->cache = NULL;
117 	map->cache_ops = cache_types[i];
118 
119 	if (!map->cache_ops->read ||
120 	    !map->cache_ops->write ||
121 	    !map->cache_ops->name)
122 		return -EINVAL;
123 
124 	/* We still need to ensure that the reg_defaults
125 	 * won't vanish from under us.  We'll need to make
126 	 * a copy of it.
127 	 */
128 	if (config->reg_defaults) {
129 		if (!map->num_reg_defaults)
130 			return -EINVAL;
131 		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
132 				  sizeof(struct reg_default), GFP_KERNEL);
133 		if (!tmp_buf)
134 			return -ENOMEM;
135 		map->reg_defaults = tmp_buf;
136 	} else if (map->num_reg_defaults_raw) {
137 		/* Some devices such as PMICs don't have cache defaults,
138 		 * we cope with this by reading back the HW registers and
139 		 * crafting the cache defaults by hand.
140 		 */
141 		ret = regcache_hw_init(map);
142 		if (ret < 0)
143 			return ret;
144 	}
145 
146 	if (!map->max_register)
147 		map->max_register = map->num_reg_defaults_raw;
148 
149 	if (map->cache_ops->init) {
150 		dev_dbg(map->dev, "Initializing %s cache\n",
151 			map->cache_ops->name);
152 		ret = map->cache_ops->init(map);
153 		if (ret)
154 			goto err_free;
155 	}
156 	return 0;
157 
158 err_free:
159 	kfree(map->reg_defaults);
160 	if (map->cache_free)
161 		kfree(map->reg_defaults_raw);
162 
163 	return ret;
164 }
165 
166 void regcache_exit(struct regmap *map)
167 {
168 	if (map->cache_type == REGCACHE_NONE)
169 		return;
170 
171 	BUG_ON(!map->cache_ops);
172 
173 	kfree(map->reg_defaults);
174 	if (map->cache_free)
175 		kfree(map->reg_defaults_raw);
176 
177 	if (map->cache_ops->exit) {
178 		dev_dbg(map->dev, "Destroying %s cache\n",
179 			map->cache_ops->name);
180 		map->cache_ops->exit(map);
181 	}
182 }
183 
184 /**
185  * regcache_read: Fetch the value of a given register from the cache.
186  *
187  * @map: map to configure.
188  * @reg: The register index.
189  * @value: The value to be returned.
190  *
191  * Return a negative value on failure, 0 on success.
192  */
193 int regcache_read(struct regmap *map,
194 		  unsigned int reg, unsigned int *value)
195 {
196 	int ret;
197 
198 	if (map->cache_type == REGCACHE_NONE)
199 		return -ENOSYS;
200 
201 	BUG_ON(!map->cache_ops);
202 
203 	if (!regmap_volatile(map, reg)) {
204 		ret = map->cache_ops->read(map, reg, value);
205 
206 		if (ret == 0)
207 			trace_regmap_reg_read_cache(map->dev, reg, *value);
208 
209 		return ret;
210 	}
211 
212 	return -EINVAL;
213 }
214 EXPORT_SYMBOL_GPL(regcache_read);
215 
216 /**
217  * regcache_write: Set the value of a given register in the cache.
218  *
219  * @map: map to configure.
220  * @reg: The register index.
221  * @value: The new register value.
222  *
223  * Return a negative value on failure, 0 on success.
224  */
225 int regcache_write(struct regmap *map,
226 		   unsigned int reg, unsigned int value)
227 {
228 	if (map->cache_type == REGCACHE_NONE)
229 		return 0;
230 
231 	BUG_ON(!map->cache_ops);
232 
233 	if (!regmap_writeable(map, reg))
234 		return -EIO;
235 
236 	if (!regmap_volatile(map, reg))
237 		return map->cache_ops->write(map, reg, value);
238 
239 	return 0;
240 }
241 EXPORT_SYMBOL_GPL(regcache_write);
242 
243 /**
244  * regcache_sync: Sync the register cache with the hardware.
245  *
246  * @map: map to configure.
247  *
248  * Any registers that should not be synced should be marked as
249  * volatile.  In general drivers can choose not to use the provided
250  * syncing functionality if they so require.
251  *
252  * Return a negative value on failure, 0 on success.
253  */
254 int regcache_sync(struct regmap *map)
255 {
256 	int ret = 0;
257 	unsigned int val;
258 	unsigned int i;
259 	const char *name;
260 	unsigned int bypass;
261 
262 	BUG_ON(!map->cache_ops);
263 
264 	mutex_lock(&map->lock);
265 	/* Remember the initial bypass state */
266 	bypass = map->cache_bypass;
267 	dev_dbg(map->dev, "Syncing %s cache\n",
268 		map->cache_ops->name);
269 	name = map->cache_ops->name;
270 	trace_regcache_sync(map->dev, name, "start");
271 	if (!map->cache_dirty)
272 		goto out;
273 	if (map->cache_ops->sync) {
274 		ret = map->cache_ops->sync(map);
275 	} else {
276 		for (i = 0; i < map->num_reg_defaults; i++) {
277 			ret = regcache_read(map, i, &val);
278 			if (ret < 0)
279 				goto out;
280 			map->cache_bypass = 1;
281 			ret = _regmap_write(map, i, val);
282 			map->cache_bypass = 0;
283 			if (ret < 0)
284 				goto out;
285 			dev_dbg(map->dev, "Synced register %#x, value %#x\n",
286 				map->reg_defaults[i].reg,
287 				map->reg_defaults[i].def);
288 		}
289 
290 	}
291 out:
292 	trace_regcache_sync(map->dev, name, "stop");
293 	/* Restore the bypass state */
294 	map->cache_bypass = bypass;
295 	mutex_unlock(&map->lock);
296 
297 	return ret;
298 }
299 EXPORT_SYMBOL_GPL(regcache_sync);
300 
301 /**
302  * regcache_cache_only: Put a register map into cache only mode
303  *
304  * @map: map to configure
305  * @cache_only: flag if changes should be written to the hardware
306  *
307  * When a register map is marked as cache only writes to the register
308  * map API will only update the register cache, they will not cause
309  * any hardware changes.  This is useful for allowing portions of
310  * drivers to act as though the device were functioning as normal when
311  * it is disabled for power saving reasons.
312  */
313 void regcache_cache_only(struct regmap *map, bool enable)
314 {
315 	mutex_lock(&map->lock);
316 	WARN_ON(map->cache_bypass && enable);
317 	map->cache_only = enable;
318 	mutex_unlock(&map->lock);
319 }
320 EXPORT_SYMBOL_GPL(regcache_cache_only);
321 
322 /**
323  * regcache_mark_dirty: Mark the register cache as dirty
324  *
325  * @map: map to mark
326  *
327  * Mark the register cache as dirty, for example due to the device
328  * having been powered down for suspend.  If the cache is not marked
329  * as dirty then the cache sync will be suppressed.
330  */
331 void regcache_mark_dirty(struct regmap *map)
332 {
333 	mutex_lock(&map->lock);
334 	map->cache_dirty = true;
335 	mutex_unlock(&map->lock);
336 }
337 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
338 
339 /**
340  * regcache_cache_bypass: Put a register map into cache bypass mode
341  *
342  * @map: map to configure
343  * @cache_bypass: flag if changes should not be written to the hardware
344  *
345  * When a register map is marked with the cache bypass option, writes
346  * to the register map API will only update the hardware and not the
347  * the cache directly.  This is useful when syncing the cache back to
348  * the hardware.
349  */
350 void regcache_cache_bypass(struct regmap *map, bool enable)
351 {
352 	mutex_lock(&map->lock);
353 	WARN_ON(map->cache_only && enable);
354 	map->cache_bypass = enable;
355 	mutex_unlock(&map->lock);
356 }
357 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
358 
359 bool regcache_set_val(void *base, unsigned int idx,
360 		      unsigned int val, unsigned int word_size)
361 {
362 	switch (word_size) {
363 	case 1: {
364 		u8 *cache = base;
365 		if (cache[idx] == val)
366 			return true;
367 		cache[idx] = val;
368 		break;
369 	}
370 	case 2: {
371 		u16 *cache = base;
372 		if (cache[idx] == val)
373 			return true;
374 		cache[idx] = val;
375 		break;
376 	}
377 	default:
378 		BUG();
379 	}
380 	/* unreachable */
381 	return false;
382 }
383 
384 unsigned int regcache_get_val(const void *base, unsigned int idx,
385 			      unsigned int word_size)
386 {
387 	if (!base)
388 		return -EINVAL;
389 
390 	switch (word_size) {
391 	case 1: {
392 		const u8 *cache = base;
393 		return cache[idx];
394 	}
395 	case 2: {
396 		const u16 *cache = base;
397 		return cache[idx];
398 	}
399 	default:
400 		BUG();
401 	}
402 	/* unreachable */
403 	return -1;
404 }
405 
406 static int regcache_default_cmp(const void *a, const void *b)
407 {
408 	const struct reg_default *_a = a;
409 	const struct reg_default *_b = b;
410 
411 	return _a->reg - _b->reg;
412 }
413 
414 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
415 {
416 	struct reg_default key;
417 	struct reg_default *r;
418 
419 	key.reg = reg;
420 	key.def = 0;
421 
422 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
423 		    sizeof(struct reg_default), regcache_default_cmp);
424 
425 	if (r)
426 		return r - map->reg_defaults;
427 	else
428 		return -ENOENT;
429 }
430