xref: /linux/drivers/base/regmap/regcache-maple.c (revision 5e3992fe72748ed3892be876f09d4d990548b7af)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API - maple tree based cache
4 //
5 // Copyright 2023 Arm, Ltd
6 //
7 // Author: Mark Brown <broonie@kernel.org>
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
13 
14 #include "internal.h"
15 
16 static int regcache_maple_read(struct regmap *map,
17 			       unsigned int reg, unsigned int *value)
18 {
19 	struct maple_tree *mt = map->cache;
20 	MA_STATE(mas, mt, reg, reg);
21 	unsigned long *entry;
22 
23 	rcu_read_lock();
24 
25 	entry = mas_walk(&mas);
26 	if (!entry) {
27 		rcu_read_unlock();
28 		return -ENOENT;
29 	}
30 
31 	*value = entry[reg - mas.index];
32 
33 	rcu_read_unlock();
34 
35 	return 0;
36 }
37 
38 static int regcache_maple_write(struct regmap *map, unsigned int reg,
39 				unsigned int val)
40 {
41 	struct maple_tree *mt = map->cache;
42 	MA_STATE(mas, mt, reg, reg);
43 	unsigned long *entry, *upper, *lower;
44 	unsigned long index, last;
45 	size_t lower_sz, upper_sz;
46 	int ret;
47 
48 	rcu_read_lock();
49 
50 	entry = mas_walk(&mas);
51 	if (entry) {
52 		entry[reg - mas.index] = val;
53 		rcu_read_unlock();
54 		return 0;
55 	}
56 
57 	/* Any adjacent entries to extend/merge? */
58 	mas_set_range(&mas, reg - 1, reg + 1);
59 	index = reg;
60 	last = reg;
61 
62 	lower = mas_find(&mas, reg - 1);
63 	if (lower) {
64 		index = mas.index;
65 		lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66 	}
67 
68 	upper = mas_find(&mas, reg + 1);
69 	if (upper) {
70 		last = mas.last;
71 		upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72 	}
73 
74 	rcu_read_unlock();
75 
76 	entry = kmalloc((last - index + 1) * sizeof(unsigned long),
77 			GFP_KERNEL);
78 	if (!entry)
79 		return -ENOMEM;
80 
81 	if (lower)
82 		memcpy(entry, lower, lower_sz);
83 	entry[reg - index] = val;
84 	if (upper)
85 		memcpy(&entry[reg - index + 1], upper, upper_sz);
86 
87 	/*
88 	 * This is safe because the regmap lock means the Maple lock
89 	 * is redundant, but we need to take it due to lockdep asserts
90 	 * in the maple tree code.
91 	 */
92 	mas_lock(&mas);
93 
94 	mas_set_range(&mas, index, last);
95 	ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
96 
97 	mas_unlock(&mas);
98 
99 	if (ret == 0) {
100 		kfree(lower);
101 		kfree(upper);
102 	}
103 
104 	return ret;
105 }
106 
107 static int regcache_maple_drop(struct regmap *map, unsigned int min,
108 			       unsigned int max)
109 {
110 	struct maple_tree *mt = map->cache;
111 	MA_STATE(mas, mt, min, max);
112 	unsigned long *entry, *lower, *upper;
113 	unsigned long lower_index, lower_last;
114 	unsigned long upper_index, upper_last;
115 	int ret;
116 
117 	lower = NULL;
118 	upper = NULL;
119 
120 	mas_lock(&mas);
121 
122 	mas_for_each(&mas, entry, max) {
123 		/*
124 		 * This is safe because the regmap lock means the
125 		 * Maple lock is redundant, but we need to take it due
126 		 * to lockdep asserts in the maple tree code.
127 		 */
128 		mas_unlock(&mas);
129 
130 		/* Do we need to save any of this entry? */
131 		if (mas.index < min) {
132 			lower_index = mas.index;
133 			lower_last = min -1;
134 
135 			lower = kmemdup(entry, ((min - mas.index) *
136 						sizeof(unsigned long)),
137 					GFP_KERNEL);
138 			if (!lower) {
139 				ret = -ENOMEM;
140 				goto out_unlocked;
141 			}
142 		}
143 
144 		if (mas.last > max) {
145 			upper_index = max + 1;
146 			upper_last = mas.last;
147 
148 			upper = kmemdup(&entry[max + 1],
149 					((mas.last - max) *
150 					 sizeof(unsigned long)),
151 					GFP_KERNEL);
152 			if (!upper) {
153 				ret = -ENOMEM;
154 				goto out_unlocked;
155 			}
156 		}
157 
158 		kfree(entry);
159 		mas_lock(&mas);
160 		mas_erase(&mas);
161 
162 		/* Insert new nodes with the saved data */
163 		if (lower) {
164 			mas_set_range(&mas, lower_index, lower_last);
165 			ret = mas_store_gfp(&mas, lower, GFP_KERNEL);
166 			if (ret != 0)
167 				goto out;
168 			lower = NULL;
169 		}
170 
171 		if (upper) {
172 			mas_set_range(&mas, upper_index, upper_last);
173 			ret = mas_store_gfp(&mas, upper, GFP_KERNEL);
174 			if (ret != 0)
175 				goto out;
176 			upper = NULL;
177 		}
178 	}
179 
180 out:
181 	mas_unlock(&mas);
182 out_unlocked:
183 	kfree(lower);
184 	kfree(upper);
185 
186 	return ret;
187 }
188 
189 static int regcache_maple_sync(struct regmap *map, unsigned int min,
190 			       unsigned int max)
191 {
192 	struct maple_tree *mt = map->cache;
193 	unsigned long *entry;
194 	MA_STATE(mas, mt, min, max);
195 	unsigned long lmin = min;
196 	unsigned long lmax = max;
197 	unsigned int r;
198 	int ret;
199 
200 	map->cache_bypass = true;
201 
202 	rcu_read_lock();
203 
204 	mas_for_each(&mas, entry, max) {
205 		for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
206 			ret = regcache_sync_val(map, r, entry[r - mas.index]);
207 			if (ret != 0)
208 				goto out;
209 		}
210 	}
211 
212 out:
213 	rcu_read_unlock();
214 
215 	map->cache_bypass = false;
216 
217 	return ret;
218 }
219 
220 static int regcache_maple_exit(struct regmap *map)
221 {
222 	struct maple_tree *mt = map->cache;
223 	MA_STATE(mas, mt, 0, UINT_MAX);
224 	unsigned int *entry;;
225 
226 	/* if we've already been called then just return */
227 	if (!mt)
228 		return 0;
229 
230 	mas_lock(&mas);
231 	mas_for_each(&mas, entry, UINT_MAX)
232 		kfree(entry);
233 	__mt_destroy(mt);
234 	mas_unlock(&mas);
235 
236 	kfree(mt);
237 	map->cache = NULL;
238 
239 	return 0;
240 }
241 
242 static int regcache_maple_init(struct regmap *map)
243 {
244 	struct maple_tree *mt;
245 	int i;
246 	int ret;
247 
248 	mt = kmalloc(sizeof(*mt), GFP_KERNEL);
249 	if (!mt)
250 		return -ENOMEM;
251 	map->cache = mt;
252 
253 	mt_init(mt);
254 
255 	for (i = 0; i < map->num_reg_defaults; i++) {
256 		ret = regcache_maple_write(map,
257 					   map->reg_defaults[i].reg,
258 					   map->reg_defaults[i].def);
259 		if (ret)
260 			goto err;
261 	}
262 
263 	return 0;
264 
265 err:
266 	regcache_maple_exit(map);
267 	return ret;
268 }
269 
270 struct regcache_ops regcache_maple_ops = {
271 	.type = REGCACHE_MAPLE,
272 	.name = "maple",
273 	.init = regcache_maple_init,
274 	.exit = regcache_maple_exit,
275 	.read = regcache_maple_read,
276 	.write = regcache_maple_write,
277 	.drop = regcache_maple_drop,
278 	.sync = regcache_maple_sync,
279 };
280