xref: /linux/drivers/base/regmap/regcache-maple.c (revision 76d9b92e68f2bb55890f935c5143f4fef97a935d)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API - maple tree based cache
4 //
5 // Copyright 2023 Arm, Ltd
6 //
7 // Author: Mark Brown <broonie@kernel.org>
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
13 
14 #include "internal.h"
15 
16 static int regcache_maple_read(struct regmap *map,
17 			       unsigned int reg, unsigned int *value)
18 {
19 	struct maple_tree *mt = map->cache;
20 	MA_STATE(mas, mt, reg, reg);
21 	unsigned long *entry;
22 
23 	rcu_read_lock();
24 
25 	entry = mas_walk(&mas);
26 	if (!entry) {
27 		rcu_read_unlock();
28 		return -ENOENT;
29 	}
30 
31 	*value = entry[reg - mas.index];
32 
33 	rcu_read_unlock();
34 
35 	return 0;
36 }
37 
38 static int regcache_maple_write(struct regmap *map, unsigned int reg,
39 				unsigned int val)
40 {
41 	struct maple_tree *mt = map->cache;
42 	MA_STATE(mas, mt, reg, reg);
43 	unsigned long *entry, *upper, *lower;
44 	unsigned long index, last;
45 	size_t lower_sz, upper_sz;
46 	int ret;
47 
48 	rcu_read_lock();
49 
50 	entry = mas_walk(&mas);
51 	if (entry) {
52 		entry[reg - mas.index] = val;
53 		rcu_read_unlock();
54 		return 0;
55 	}
56 
57 	/* Any adjacent entries to extend/merge? */
58 	mas_set_range(&mas, reg - 1, reg + 1);
59 	index = reg;
60 	last = reg;
61 
62 	lower = mas_find(&mas, reg - 1);
63 	if (lower) {
64 		index = mas.index;
65 		lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66 	}
67 
68 	upper = mas_find(&mas, reg + 1);
69 	if (upper) {
70 		last = mas.last;
71 		upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72 	}
73 
74 	rcu_read_unlock();
75 
76 	entry = kmalloc((last - index + 1) * sizeof(unsigned long),
77 			map->alloc_flags);
78 	if (!entry)
79 		return -ENOMEM;
80 
81 	if (lower)
82 		memcpy(entry, lower, lower_sz);
83 	entry[reg - index] = val;
84 	if (upper)
85 		memcpy(&entry[reg - index + 1], upper, upper_sz);
86 
87 	/*
88 	 * This is safe because the regmap lock means the Maple lock
89 	 * is redundant, but we need to take it due to lockdep asserts
90 	 * in the maple tree code.
91 	 */
92 	mas_lock(&mas);
93 
94 	mas_set_range(&mas, index, last);
95 	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
96 
97 	mas_unlock(&mas);
98 
99 	if (ret == 0) {
100 		kfree(lower);
101 		kfree(upper);
102 	}
103 
104 	return ret;
105 }
106 
107 static int regcache_maple_drop(struct regmap *map, unsigned int min,
108 			       unsigned int max)
109 {
110 	struct maple_tree *mt = map->cache;
111 	MA_STATE(mas, mt, min, max);
112 	unsigned long *entry, *lower, *upper;
113 	unsigned long lower_index, lower_last;
114 	unsigned long upper_index, upper_last;
115 	int ret = 0;
116 
117 	lower = NULL;
118 	upper = NULL;
119 
120 	mas_lock(&mas);
121 
122 	mas_for_each(&mas, entry, max) {
123 		/*
124 		 * This is safe because the regmap lock means the
125 		 * Maple lock is redundant, but we need to take it due
126 		 * to lockdep asserts in the maple tree code.
127 		 */
128 		mas_unlock(&mas);
129 
130 		/* Do we need to save any of this entry? */
131 		if (mas.index < min) {
132 			lower_index = mas.index;
133 			lower_last = min -1;
134 
135 			lower = kmemdup_array(entry,
136 					      min - mas.index, sizeof(*lower),
137 					      map->alloc_flags);
138 			if (!lower) {
139 				ret = -ENOMEM;
140 				goto out_unlocked;
141 			}
142 		}
143 
144 		if (mas.last > max) {
145 			upper_index = max + 1;
146 			upper_last = mas.last;
147 
148 			upper = kmemdup_array(&entry[max - mas.index + 1],
149 					      mas.last - max, sizeof(*upper),
150 					      map->alloc_flags);
151 			if (!upper) {
152 				ret = -ENOMEM;
153 				goto out_unlocked;
154 			}
155 		}
156 
157 		kfree(entry);
158 		mas_lock(&mas);
159 		mas_erase(&mas);
160 
161 		/* Insert new nodes with the saved data */
162 		if (lower) {
163 			mas_set_range(&mas, lower_index, lower_last);
164 			ret = mas_store_gfp(&mas, lower, map->alloc_flags);
165 			if (ret != 0)
166 				goto out;
167 			lower = NULL;
168 		}
169 
170 		if (upper) {
171 			mas_set_range(&mas, upper_index, upper_last);
172 			ret = mas_store_gfp(&mas, upper, map->alloc_flags);
173 			if (ret != 0)
174 				goto out;
175 			upper = NULL;
176 		}
177 	}
178 
179 out:
180 	mas_unlock(&mas);
181 out_unlocked:
182 	kfree(lower);
183 	kfree(upper);
184 
185 	return ret;
186 }
187 
188 static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
189 				     struct ma_state *mas,
190 				     unsigned int min, unsigned int max)
191 {
192 	void *buf;
193 	unsigned long r;
194 	size_t val_bytes = map->format.val_bytes;
195 	int ret = 0;
196 
197 	mas_pause(mas);
198 	rcu_read_unlock();
199 
200 	/*
201 	 * Use a raw write if writing more than one register to a
202 	 * device that supports raw writes to reduce transaction
203 	 * overheads.
204 	 */
205 	if (max - min > 1 && regmap_can_raw_write(map)) {
206 		buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
207 		if (!buf) {
208 			ret = -ENOMEM;
209 			goto out;
210 		}
211 
212 		/* Render the data for a raw write */
213 		for (r = min; r < max; r++) {
214 			regcache_set_val(map, buf, r - min,
215 					 entry[r - mas->index]);
216 		}
217 
218 		ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
219 					false);
220 
221 		kfree(buf);
222 	} else {
223 		for (r = min; r < max; r++) {
224 			ret = _regmap_write(map, r,
225 					    entry[r - mas->index]);
226 			if (ret != 0)
227 				goto out;
228 		}
229 	}
230 
231 out:
232 	rcu_read_lock();
233 
234 	return ret;
235 }
236 
237 static int regcache_maple_sync(struct regmap *map, unsigned int min,
238 			       unsigned int max)
239 {
240 	struct maple_tree *mt = map->cache;
241 	unsigned long *entry;
242 	MA_STATE(mas, mt, min, max);
243 	unsigned long lmin = min;
244 	unsigned long lmax = max;
245 	unsigned int r, v, sync_start;
246 	int ret = 0;
247 	bool sync_needed = false;
248 
249 	map->cache_bypass = true;
250 
251 	rcu_read_lock();
252 
253 	mas_for_each(&mas, entry, max) {
254 		for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
255 			v = entry[r - mas.index];
256 
257 			if (regcache_reg_needs_sync(map, r, v)) {
258 				if (!sync_needed) {
259 					sync_start = r;
260 					sync_needed = true;
261 				}
262 				continue;
263 			}
264 
265 			if (!sync_needed)
266 				continue;
267 
268 			ret = regcache_maple_sync_block(map, entry, &mas,
269 							sync_start, r);
270 			if (ret != 0)
271 				goto out;
272 			sync_needed = false;
273 		}
274 
275 		if (sync_needed) {
276 			ret = regcache_maple_sync_block(map, entry, &mas,
277 							sync_start, r);
278 			if (ret != 0)
279 				goto out;
280 			sync_needed = false;
281 		}
282 	}
283 
284 out:
285 	rcu_read_unlock();
286 
287 	map->cache_bypass = false;
288 
289 	return ret;
290 }
291 
292 static int regcache_maple_exit(struct regmap *map)
293 {
294 	struct maple_tree *mt = map->cache;
295 	MA_STATE(mas, mt, 0, UINT_MAX);
296 	unsigned int *entry;
297 
298 	/* if we've already been called then just return */
299 	if (!mt)
300 		return 0;
301 
302 	mas_lock(&mas);
303 	mas_for_each(&mas, entry, UINT_MAX)
304 		kfree(entry);
305 	__mt_destroy(mt);
306 	mas_unlock(&mas);
307 
308 	kfree(mt);
309 	map->cache = NULL;
310 
311 	return 0;
312 }
313 
314 static int regcache_maple_insert_block(struct regmap *map, int first,
315 					int last)
316 {
317 	struct maple_tree *mt = map->cache;
318 	MA_STATE(mas, mt, first, last);
319 	unsigned long *entry;
320 	int i, ret;
321 
322 	entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
323 	if (!entry)
324 		return -ENOMEM;
325 
326 	for (i = 0; i < last - first + 1; i++)
327 		entry[i] = map->reg_defaults[first + i].def;
328 
329 	mas_lock(&mas);
330 
331 	mas_set_range(&mas, map->reg_defaults[first].reg,
332 		      map->reg_defaults[last].reg);
333 	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
334 
335 	mas_unlock(&mas);
336 
337 	if (ret)
338 		kfree(entry);
339 
340 	return ret;
341 }
342 
343 static int regcache_maple_init(struct regmap *map)
344 {
345 	struct maple_tree *mt;
346 	int i;
347 	int ret;
348 	int range_start;
349 
350 	mt = kmalloc(sizeof(*mt), GFP_KERNEL);
351 	if (!mt)
352 		return -ENOMEM;
353 	map->cache = mt;
354 
355 	mt_init(mt);
356 
357 	if (!map->num_reg_defaults)
358 		return 0;
359 
360 	range_start = 0;
361 
362 	/* Scan for ranges of contiguous registers */
363 	for (i = 1; i < map->num_reg_defaults; i++) {
364 		if (map->reg_defaults[i].reg !=
365 		    map->reg_defaults[i - 1].reg + 1) {
366 			ret = regcache_maple_insert_block(map, range_start,
367 							  i - 1);
368 			if (ret != 0)
369 				goto err;
370 
371 			range_start = i;
372 		}
373 	}
374 
375 	/* Add the last block */
376 	ret = regcache_maple_insert_block(map, range_start,
377 					  map->num_reg_defaults - 1);
378 	if (ret != 0)
379 		goto err;
380 
381 	return 0;
382 
383 err:
384 	regcache_maple_exit(map);
385 	return ret;
386 }
387 
388 struct regcache_ops regcache_maple_ops = {
389 	.type = REGCACHE_MAPLE,
390 	.name = "maple",
391 	.init = regcache_maple_init,
392 	.exit = regcache_maple_exit,
393 	.read = regcache_maple_read,
394 	.write = regcache_maple_write,
395 	.drop = regcache_maple_drop,
396 	.sync = regcache_maple_sync,
397 };
398