xref: /freebsd/sys/compat/linuxkpi/common/src/linux_xarray.c (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 /*-
2  * Copyright (c) 2020 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <linux/xarray.h>
31 
32 #include <vm/vm_pageout.h>
33 
34 /*
35  * Linux' XArray allows to store a NULL pointer as a value. xa_load() would
36  * return NULL for both an unused index and an index set to NULL. But it
37  * impacts xa_alloc() which needs to find the next available index.
38  *
39  * However, our implementation relies on a radix tree (see `linux_radix.c`)
40  * which does not accept NULL pointers as values. I'm not sure this is a
41  * limitation or a feature, so to work around this, a NULL value is replaced by
42  * `NULL_VALUE`, an unlikely address, when we pass it to linux_radix.
43  */
44 #define	NULL_VALUE	(void *)0x1
45 
46 /*
47  * This function removes the element at the given index and returns
48  * the pointer to the removed element, if any.
49  */
50 void *
51 __xa_erase(struct xarray *xa, uint32_t index)
52 {
53 	void *retval;
54 
55 	XA_ASSERT_LOCKED(xa);
56 
57 	retval = radix_tree_delete(&xa->root, index);
58 	if (retval == NULL_VALUE)
59 		retval = NULL;
60 
61 	return (retval);
62 }
63 
64 void *
65 xa_erase(struct xarray *xa, uint32_t index)
66 {
67 	void *retval;
68 
69 	xa_lock(xa);
70 	retval = __xa_erase(xa, index);
71 	xa_unlock(xa);
72 
73 	return (retval);
74 }
75 
76 /*
77  * This function returns the element pointer at the given index. A
78  * value of NULL is returned if the element does not exist.
79  */
80 void *
81 xa_load(struct xarray *xa, uint32_t index)
82 {
83 	void *retval;
84 
85 	xa_lock(xa);
86 	retval = radix_tree_lookup(&xa->root, index);
87 	xa_unlock(xa);
88 
89 	if (retval == NULL_VALUE)
90 		retval = NULL;
91 
92 	return (retval);
93 }
94 
95 /*
96  * This is an internal function used to sleep until more memory
97  * becomes available.
98  */
99 static void
100 xa_vm_wait_locked(struct xarray *xa)
101 {
102 	xa_unlock(xa);
103 	vm_wait(NULL);
104 	xa_lock(xa);
105 }
106 
107 /*
108  * This function iterates the xarray until it finds a free slot where
109  * it can insert the element pointer to by "ptr". It starts at the
110  * index pointed to by "pindex" and updates this value at return. The
111  * "mask" argument defines the maximum index allowed, inclusivly, and
112  * must be a power of two minus one value. The "gfp" argument
113  * basically tells if we can wait for more memory to become available
114  * or not. This function returns zero upon success or a negative error
115  * code on failure. A typical error code is -ENOMEM which means either
116  * the xarray is full, or there was not enough internal memory
117  * available to complete the radix tree insertion.
118  */
119 int
120 __xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
121 {
122 	int retval;
123 
124 	XA_ASSERT_LOCKED(xa);
125 
126 	/* mask should allow to allocate at least one item */
127 	MPASS(mask > ((xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
128 
129 	/* mask can be any power of two value minus one */
130 	MPASS((mask & (mask + 1)) == 0);
131 
132 	*pindex = (xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
133 	if (ptr == NULL)
134 		ptr = NULL_VALUE;
135 retry:
136 	retval = radix_tree_insert(&xa->root, *pindex, ptr);
137 
138 	switch (retval) {
139 	case -EEXIST:
140 		if (likely(*pindex != mask)) {
141 			(*pindex)++;
142 			goto retry;
143 		}
144 		retval = -ENOMEM;
145 		break;
146 	case -ENOMEM:
147 		if (likely(gfp & M_WAITOK)) {
148 			xa_vm_wait_locked(xa);
149 			goto retry;
150 		}
151 		break;
152 	default:
153 		break;
154 	}
155 	return (retval);
156 }
157 
158 int
159 xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
160 {
161 	int retval;
162 
163 	if (ptr == NULL)
164 		ptr = NULL_VALUE;
165 
166 	xa_lock(xa);
167 	retval = __xa_alloc(xa, pindex, ptr, mask, gfp);
168 	xa_unlock(xa);
169 
170 	return (retval);
171 }
172 
173 /*
174  * This function works the same like the "xa_alloc" function, except
175  * it wraps the next index value to zero when there are no entries
176  * left at the end of the xarray searching for a free slot from the
177  * beginning of the array. If the xarray is full -ENOMEM is returned.
178  */
179 int
180 __xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
181     uint32_t *pnext_index, gfp_t gfp)
182 {
183 	int retval;
184 	int timeout = 1;
185 
186 	XA_ASSERT_LOCKED(xa);
187 
188 	/* mask should allow to allocate at least one item */
189 	MPASS(mask > ((xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
190 
191 	/* mask can be any power of two value minus one */
192 	MPASS((mask & (mask + 1)) == 0);
193 
194 	*pnext_index = (xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
195 	if (ptr == NULL)
196 		ptr = NULL_VALUE;
197 retry:
198 	retval = radix_tree_insert(&xa->root, *pnext_index, ptr);
199 
200 	switch (retval) {
201 	case -EEXIST:
202 		if (unlikely(*pnext_index == mask) && !timeout--) {
203 			retval = -ENOMEM;
204 			break;
205 		}
206 		(*pnext_index)++;
207 		(*pnext_index) &= mask;
208 		if (*pnext_index == 0 && (xa->flags & XA_FLAGS_ALLOC1) != 0)
209 			(*pnext_index)++;
210 		goto retry;
211 	case -ENOMEM:
212 		if (likely(gfp & M_WAITOK)) {
213 			xa_vm_wait_locked(xa);
214 			goto retry;
215 		}
216 		break;
217 	default:
218 		break;
219 	}
220 	*pindex = *pnext_index;
221 
222 	return (retval);
223 }
224 
225 int
226 xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
227     uint32_t *pnext_index, gfp_t gfp)
228 {
229 	int retval;
230 
231 	xa_lock(xa);
232 	retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp);
233 	xa_unlock(xa);
234 
235 	return (retval);
236 }
237 
238 /*
239  * This function tries to insert an element at the given index. The
240  * "gfp" argument basically decides of this function can sleep or not
241  * trying to allocate internal memory for its radix tree.  The
242  * function returns an error code upon failure. Typical error codes
243  * are element exists (-EEXIST) or out of memory (-ENOMEM).
244  */
245 int
246 __xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
247 {
248 	int retval;
249 
250 	XA_ASSERT_LOCKED(xa);
251 	if (ptr == NULL)
252 		ptr = NULL_VALUE;
253 retry:
254 	retval = radix_tree_insert(&xa->root, index, ptr);
255 
256 	switch (retval) {
257 	case -ENOMEM:
258 		if (likely(gfp & M_WAITOK)) {
259 			xa_vm_wait_locked(xa);
260 			goto retry;
261 		}
262 		break;
263 	default:
264 		break;
265 	}
266 	return (retval);
267 }
268 
269 int
270 xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
271 {
272 	int retval;
273 
274 	xa_lock(xa);
275 	retval = __xa_insert(xa, index, ptr, gfp);
276 	xa_unlock(xa);
277 
278 	return (retval);
279 }
280 
281 /*
282  * This function updates the element at the given index and returns a
283  * pointer to the old element. The "gfp" argument basically decides of
284  * this function can sleep or not trying to allocate internal memory
285  * for its radix tree. The function returns an XA_ERROR() pointer code
286  * upon failure. Code using this function must always check if the
287  * return value is an XA_ERROR() code before using the returned value.
288  */
289 void *
290 __xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
291 {
292 	int retval;
293 
294 	XA_ASSERT_LOCKED(xa);
295 	if (ptr == NULL)
296 		ptr = NULL_VALUE;
297 retry:
298 	retval = radix_tree_store(&xa->root, index, &ptr);
299 
300 	switch (retval) {
301 	case 0:
302 		if (ptr == NULL_VALUE)
303 			ptr = NULL;
304 		break;
305 	case -ENOMEM:
306 		if (likely(gfp & M_WAITOK)) {
307 			xa_vm_wait_locked(xa);
308 			goto retry;
309 		}
310 		ptr = XA_ERROR(retval);
311 		break;
312 	default:
313 		ptr = XA_ERROR(retval);
314 		break;
315 	}
316 	return (ptr);
317 }
318 
319 void *
320 xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
321 {
322 	void *retval;
323 
324 	xa_lock(xa);
325 	retval = __xa_store(xa, index, ptr, gfp);
326 	xa_unlock(xa);
327 
328 	return (retval);
329 }
330 
331 /*
332  * This function initialize an xarray structure.
333  */
334 void
335 xa_init_flags(struct xarray *xa, uint32_t flags)
336 {
337 	memset(xa, 0, sizeof(*xa));
338 
339 	mtx_init(&xa->mtx, "lkpi-xarray", NULL, MTX_DEF | MTX_RECURSE);
340 	xa->root.gfp_mask = GFP_NOWAIT;
341 	xa->flags = flags;
342 }
343 
344 /*
345  * This function destroys an xarray structure and all its internal
346  * memory and locks.
347  */
348 void
349 xa_destroy(struct xarray *xa)
350 {
351 	struct radix_tree_iter iter;
352 	void **ppslot;
353 
354 	radix_tree_for_each_slot(ppslot, &xa->root, &iter, 0)
355 		radix_tree_iter_delete(&xa->root, &iter, ppslot);
356 	mtx_destroy(&xa->mtx);
357 }
358 
359 /*
360  * This function checks if an xarray is empty or not.
361  * It returns true if empty, else false.
362  */
363 bool
364 __xa_empty(struct xarray *xa)
365 {
366 	struct radix_tree_iter iter = {};
367 	void **temp;
368 
369 	XA_ASSERT_LOCKED(xa);
370 
371 	return (!radix_tree_iter_find(&xa->root, &iter, &temp));
372 }
373 
374 bool
375 xa_empty(struct xarray *xa)
376 {
377 	bool retval;
378 
379 	xa_lock(xa);
380 	retval = __xa_empty(xa);
381 	xa_unlock(xa);
382 
383 	return (retval);
384 }
385 
386 /*
387  * This function returns the next valid xarray entry based on the
388  * index given by "pindex". The valued pointed to by "pindex" is
389  * updated before return.
390  */
391 void *
392 __xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
393 {
394 	struct radix_tree_iter iter = { .index = *pindex };
395 	void **ppslot;
396 	void *retval;
397 	bool found;
398 
399 	XA_ASSERT_LOCKED(xa);
400 
401 	if (not_first) {
402 		/* advance to next index, if any */
403 		iter.index++;
404 		if (iter.index == 0)
405 			return (NULL);
406 	}
407 
408 	found = radix_tree_iter_find(&xa->root, &iter, &ppslot);
409 	if (likely(found)) {
410 		retval = *ppslot;
411 		if (retval == NULL_VALUE)
412 			retval = NULL;
413 		*pindex = iter.index;
414 	} else {
415 		retval = NULL;
416 	}
417 	return (retval);
418 }
419 
420 void *
421 xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
422 {
423 	void *retval;
424 
425 	xa_lock(xa);
426 	retval = __xa_next(xa, pindex, not_first);
427 	xa_unlock(xa);
428 
429 	return (retval);
430 }
431