xref: /linux/drivers/base/regmap/regcache-maple.c (revision afb923b8198aa71e5b8e65268e598026faf43f12)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API - maple tree based cache
4 //
5 // Copyright 2023 Arm, Ltd
6 //
7 // Author: Mark Brown <broonie@kernel.org>
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
13 
14 #include "internal.h"
15 
regcache_maple_read(struct regmap * map,unsigned int reg,unsigned int * value)16 static int regcache_maple_read(struct regmap *map,
17 			       unsigned int reg, unsigned int *value)
18 {
19 	struct maple_tree *mt = map->cache;
20 	MA_STATE(mas, mt, reg, reg);
21 	unsigned long *entry;
22 
23 	rcu_read_lock();
24 
25 	entry = mas_walk(&mas);
26 	if (!entry) {
27 		rcu_read_unlock();
28 		return -ENOENT;
29 	}
30 
31 	*value = entry[reg - mas.index];
32 
33 	rcu_read_unlock();
34 
35 	return 0;
36 }
37 
regcache_maple_write(struct regmap * map,unsigned int reg,unsigned int val)38 static int regcache_maple_write(struct regmap *map, unsigned int reg,
39 				unsigned int val)
40 {
41 	struct maple_tree *mt = map->cache;
42 	MA_STATE(mas, mt, reg, reg);
43 	unsigned long *entry, *upper, *lower;
44 	unsigned long index, last;
45 	size_t lower_sz, upper_sz;
46 	int ret;
47 
48 	rcu_read_lock();
49 
50 	entry = mas_walk(&mas);
51 	if (entry) {
52 		entry[reg - mas.index] = val;
53 		rcu_read_unlock();
54 		return 0;
55 	}
56 
57 	/* Any adjacent entries to extend/merge? */
58 	mas_set_range(&mas, reg - 1, reg + 1);
59 	index = reg;
60 	last = reg;
61 
62 	lower = mas_find(&mas, reg - 1);
63 	if (lower) {
64 		index = mas.index;
65 		lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66 	}
67 
68 	upper = mas_find(&mas, reg + 1);
69 	if (upper) {
70 		last = mas.last;
71 		upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72 	}
73 
74 	rcu_read_unlock();
75 
76 	entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
77 	if (!entry)
78 		return -ENOMEM;
79 
80 	if (lower)
81 		memcpy(entry, lower, lower_sz);
82 	entry[reg - index] = val;
83 	if (upper)
84 		memcpy(&entry[reg - index + 1], upper, upper_sz);
85 
86 	/*
87 	 * This is safe because the regmap lock means the Maple lock
88 	 * is redundant, but we need to take it due to lockdep asserts
89 	 * in the maple tree code.
90 	 */
91 	mas_lock(&mas);
92 
93 	mas_set_range(&mas, index, last);
94 	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
95 
96 	mas_unlock(&mas);
97 
98 	if (ret) {
99 		kfree(entry);
100 		return ret;
101 	}
102 	kfree(lower);
103 	kfree(upper);
104 	return 0;
105 }
106 
regcache_maple_drop(struct regmap * map,unsigned int min,unsigned int max)107 static int regcache_maple_drop(struct regmap *map, unsigned int min,
108 			       unsigned int max)
109 {
110 	struct maple_tree *mt = map->cache;
111 	MA_STATE(mas, mt, min, max);
112 	unsigned long *entry, *lower, *upper;
113 	/* initialized to work around false-positive -Wuninitialized warning */
114 	unsigned long lower_index = 0, lower_last = 0;
115 	unsigned long upper_index, upper_last;
116 	int ret = 0;
117 
118 	lower = NULL;
119 	upper = NULL;
120 
121 	mas_lock(&mas);
122 
123 	mas_for_each(&mas, entry, max) {
124 		/*
125 		 * This is safe because the regmap lock means the
126 		 * Maple lock is redundant, but we need to take it due
127 		 * to lockdep asserts in the maple tree code.
128 		 */
129 		mas_unlock(&mas);
130 
131 		/* Do we need to save any of this entry? */
132 		if (mas.index < min) {
133 			lower_index = mas.index;
134 			lower_last = min -1;
135 
136 			lower = kmemdup_array(entry,
137 					      min - mas.index, sizeof(*lower),
138 					      map->alloc_flags);
139 			if (!lower) {
140 				ret = -ENOMEM;
141 				goto out_unlocked;
142 			}
143 		}
144 
145 		if (mas.last > max) {
146 			upper_index = max + 1;
147 			upper_last = mas.last;
148 
149 			upper = kmemdup_array(&entry[max - mas.index + 1],
150 					      mas.last - max, sizeof(*upper),
151 					      map->alloc_flags);
152 			if (!upper) {
153 				ret = -ENOMEM;
154 				goto out_unlocked;
155 			}
156 		}
157 
158 		kfree(entry);
159 		mas_lock(&mas);
160 		mas_erase(&mas);
161 
162 		/* Insert new nodes with the saved data */
163 		if (lower) {
164 			mas_set_range(&mas, lower_index, lower_last);
165 			ret = mas_store_gfp(&mas, lower, map->alloc_flags);
166 			if (ret != 0)
167 				goto out;
168 			lower = NULL;
169 		}
170 
171 		if (upper) {
172 			mas_set_range(&mas, upper_index, upper_last);
173 			ret = mas_store_gfp(&mas, upper, map->alloc_flags);
174 			if (ret != 0)
175 				goto out;
176 			upper = NULL;
177 		}
178 	}
179 
180 out:
181 	mas_unlock(&mas);
182 out_unlocked:
183 	kfree(lower);
184 	kfree(upper);
185 
186 	return ret;
187 }
188 
regcache_maple_sync_block(struct regmap * map,unsigned long * entry,struct ma_state * mas,unsigned int min,unsigned int max)189 static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
190 				     struct ma_state *mas,
191 				     unsigned int min, unsigned int max)
192 {
193 	void *buf;
194 	unsigned long r;
195 	size_t val_bytes = map->format.val_bytes;
196 	int ret = 0;
197 
198 	mas_pause(mas);
199 	rcu_read_unlock();
200 
201 	/*
202 	 * Use a raw write if writing more than one register to a
203 	 * device that supports raw writes to reduce transaction
204 	 * overheads.
205 	 */
206 	if (max - min > 1 && regmap_can_raw_write(map)) {
207 		buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
208 		if (!buf) {
209 			ret = -ENOMEM;
210 			goto out;
211 		}
212 
213 		/* Render the data for a raw write */
214 		for (r = min; r < max; r++) {
215 			regcache_set_val(map, buf, r - min,
216 					 entry[r - mas->index]);
217 		}
218 
219 		ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
220 					false);
221 
222 		kfree(buf);
223 	} else {
224 		for (r = min; r < max; r++) {
225 			ret = _regmap_write(map, r,
226 					    entry[r - mas->index]);
227 			if (ret != 0)
228 				goto out;
229 		}
230 	}
231 
232 out:
233 	rcu_read_lock();
234 
235 	return ret;
236 }
237 
regcache_maple_sync(struct regmap * map,unsigned int min,unsigned int max)238 static int regcache_maple_sync(struct regmap *map, unsigned int min,
239 			       unsigned int max)
240 {
241 	struct maple_tree *mt = map->cache;
242 	unsigned long *entry;
243 	MA_STATE(mas, mt, min, max);
244 	unsigned long lmin = min;
245 	unsigned long lmax = max;
246 	unsigned int r, v, sync_start;
247 	int ret = 0;
248 	bool sync_needed = false;
249 
250 	map->cache_bypass = true;
251 
252 	rcu_read_lock();
253 
254 	mas_for_each(&mas, entry, max) {
255 		for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
256 			v = entry[r - mas.index];
257 
258 			if (regcache_reg_needs_sync(map, r, v)) {
259 				if (!sync_needed) {
260 					sync_start = r;
261 					sync_needed = true;
262 				}
263 				continue;
264 			}
265 
266 			if (!sync_needed)
267 				continue;
268 
269 			ret = regcache_maple_sync_block(map, entry, &mas,
270 							sync_start, r);
271 			if (ret != 0)
272 				goto out;
273 			sync_needed = false;
274 		}
275 
276 		if (sync_needed) {
277 			ret = regcache_maple_sync_block(map, entry, &mas,
278 							sync_start, r);
279 			if (ret != 0)
280 				goto out;
281 			sync_needed = false;
282 		}
283 	}
284 
285 out:
286 	rcu_read_unlock();
287 
288 	map->cache_bypass = false;
289 
290 	return ret;
291 }
292 
regcache_maple_init(struct regmap * map)293 static int regcache_maple_init(struct regmap *map)
294 {
295 	struct maple_tree *mt;
296 
297 	mt = kmalloc(sizeof(*mt), map->alloc_flags);
298 	if (!mt)
299 		return -ENOMEM;
300 	map->cache = mt;
301 
302 	mt_init(mt);
303 
304 	if (!mt_external_lock(mt) && map->lock_key)
305 		lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
306 
307 	return 0;
308 }
309 
regcache_maple_exit(struct regmap * map)310 static int regcache_maple_exit(struct regmap *map)
311 {
312 	struct maple_tree *mt = map->cache;
313 	MA_STATE(mas, mt, 0, UINT_MAX);
314 	unsigned int *entry;
315 
316 	/* if we've already been called then just return */
317 	if (!mt)
318 		return 0;
319 
320 	mas_lock(&mas);
321 	mas_for_each(&mas, entry, UINT_MAX)
322 		kfree(entry);
323 	__mt_destroy(mt);
324 	mas_unlock(&mas);
325 
326 	kfree(mt);
327 	map->cache = NULL;
328 
329 	return 0;
330 }
331 
regcache_maple_insert_block(struct regmap * map,int first,int last)332 static int regcache_maple_insert_block(struct regmap *map, int first,
333 					int last)
334 {
335 	struct maple_tree *mt = map->cache;
336 	MA_STATE(mas, mt, first, last);
337 	unsigned long *entry;
338 	int i, ret;
339 
340 	entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
341 	if (!entry)
342 		return -ENOMEM;
343 
344 	for (i = 0; i < last - first + 1; i++)
345 		entry[i] = map->reg_defaults[first + i].def;
346 
347 	mas_lock(&mas);
348 
349 	mas_set_range(&mas, map->reg_defaults[first].reg,
350 		      map->reg_defaults[last].reg);
351 	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
352 
353 	mas_unlock(&mas);
354 
355 	if (ret)
356 		kfree(entry);
357 
358 	return ret;
359 }
360 
regcache_maple_populate(struct regmap * map)361 static int regcache_maple_populate(struct regmap *map)
362 {
363 	int i;
364 	int ret;
365 	int range_start;
366 
367 	range_start = 0;
368 
369 	/* Scan for ranges of contiguous registers */
370 	for (i = 1; i < map->num_reg_defaults; i++) {
371 		if (map->reg_defaults[i].reg !=
372 		    map->reg_defaults[i - 1].reg + 1) {
373 			ret = regcache_maple_insert_block(map, range_start,
374 							  i - 1);
375 			if (ret != 0)
376 				return ret;
377 
378 			range_start = i;
379 		}
380 	}
381 
382 	/* Add the last block */
383 	return regcache_maple_insert_block(map, range_start, map->num_reg_defaults - 1);
384 }
385 
386 struct regcache_ops regcache_maple_ops = {
387 	.type = REGCACHE_MAPLE,
388 	.name = "maple",
389 	.init = regcache_maple_init,
390 	.exit = regcache_maple_exit,
391 	.populate = regcache_maple_populate,
392 	.read = regcache_maple_read,
393 	.write = regcache_maple_write,
394 	.drop = regcache_maple_drop,
395 	.sync = regcache_maple_sync,
396 };
397