xref: /freebsd/sys/compat/linuxkpi/common/src/linux_xarray.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /*-
2  * Copyright (c) 2020 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include <linux/xarray.h>
29 
30 #include <vm/vm_pageout.h>
31 
32 /*
33  * Linux' XArray allows to store a NULL pointer as a value. xa_load() would
34  * return NULL for both an unused index and an index set to NULL. But it
35  * impacts xa_alloc() which needs to find the next available index.
36  *
37  * However, our implementation relies on a radix tree (see `linux_radix.c`)
38  * which does not accept NULL pointers as values. I'm not sure this is a
39  * limitation or a feature, so to work around this, a NULL value is replaced by
40  * `NULL_VALUE`, an unlikely address, when we pass it to linux_radix.
41  */
42 #define	NULL_VALUE	(void *)0x1
43 
44 /*
45  * This function removes the element at the given index and returns
46  * the pointer to the removed element, if any.
47  */
48 void *
__xa_erase(struct xarray * xa,uint32_t index)49 __xa_erase(struct xarray *xa, uint32_t index)
50 {
51 	void *retval;
52 
53 	XA_ASSERT_LOCKED(xa);
54 
55 	retval = radix_tree_delete(&xa->root, index);
56 	if (retval == NULL_VALUE)
57 		retval = NULL;
58 
59 	return (retval);
60 }
61 
62 void *
xa_erase(struct xarray * xa,uint32_t index)63 xa_erase(struct xarray *xa, uint32_t index)
64 {
65 	void *retval;
66 
67 	xa_lock(xa);
68 	retval = __xa_erase(xa, index);
69 	xa_unlock(xa);
70 
71 	return (retval);
72 }
73 
74 /*
75  * This function returns the element pointer at the given index. A
76  * value of NULL is returned if the element does not exist.
77  */
78 void *
xa_load(struct xarray * xa,uint32_t index)79 xa_load(struct xarray *xa, uint32_t index)
80 {
81 	void *retval;
82 
83 	xa_lock(xa);
84 	retval = radix_tree_lookup(&xa->root, index);
85 	xa_unlock(xa);
86 
87 	if (retval == NULL_VALUE)
88 		retval = NULL;
89 
90 	return (retval);
91 }
92 
93 /*
94  * This is an internal function used to sleep until more memory
95  * becomes available.
96  */
97 static void
xa_vm_wait_locked(struct xarray * xa)98 xa_vm_wait_locked(struct xarray *xa)
99 {
100 	xa_unlock(xa);
101 	vm_wait(NULL);
102 	xa_lock(xa);
103 }
104 
105 /*
106  * This function iterates the xarray until it finds a free slot where
107  * it can insert the element pointer to by "ptr". It starts at the
108  * index pointed to by "pindex" and updates this value at return. The
109  * "mask" argument defines the maximum index allowed, inclusivly, and
110  * must be a power of two minus one value. The "gfp" argument
111  * basically tells if we can wait for more memory to become available
112  * or not. This function returns zero upon success or a negative error
113  * code on failure. A typical error code is -ENOMEM which means either
114  * the xarray is full, or there was not enough internal memory
115  * available to complete the radix tree insertion.
116  */
117 int
__xa_alloc(struct xarray * xa,uint32_t * pindex,void * ptr,uint32_t mask,gfp_t gfp)118 __xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
119 {
120 	int retval;
121 
122 	XA_ASSERT_LOCKED(xa);
123 
124 	/* mask should allow to allocate at least one item */
125 	MPASS(mask > ((xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
126 
127 	/* mask can be any power of two value minus one */
128 	MPASS((mask & (mask + 1)) == 0);
129 
130 	*pindex = (xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
131 	if (ptr == NULL)
132 		ptr = NULL_VALUE;
133 retry:
134 	retval = radix_tree_insert(&xa->root, *pindex, ptr);
135 
136 	switch (retval) {
137 	case -EEXIST:
138 		if (likely(*pindex != mask)) {
139 			(*pindex)++;
140 			goto retry;
141 		}
142 		retval = -ENOMEM;
143 		break;
144 	case -ENOMEM:
145 		if (likely(gfp & M_WAITOK)) {
146 			xa_vm_wait_locked(xa);
147 			goto retry;
148 		}
149 		break;
150 	default:
151 		break;
152 	}
153 	return (retval);
154 }
155 
156 int
xa_alloc(struct xarray * xa,uint32_t * pindex,void * ptr,uint32_t mask,gfp_t gfp)157 xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask, gfp_t gfp)
158 {
159 	int retval;
160 
161 	if (ptr == NULL)
162 		ptr = NULL_VALUE;
163 
164 	xa_lock(xa);
165 	retval = __xa_alloc(xa, pindex, ptr, mask, gfp);
166 	xa_unlock(xa);
167 
168 	return (retval);
169 }
170 
171 /*
172  * This function works the same like the "xa_alloc" function, except
173  * it wraps the next index value to zero when there are no entries
174  * left at the end of the xarray searching for a free slot from the
175  * beginning of the array. If the xarray is full -ENOMEM is returned.
176  */
177 int
__xa_alloc_cyclic(struct xarray * xa,uint32_t * pindex,void * ptr,uint32_t mask,uint32_t * pnext_index,gfp_t gfp)178 __xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
179     uint32_t *pnext_index, gfp_t gfp)
180 {
181 	int retval;
182 	int timeout = 1;
183 
184 	XA_ASSERT_LOCKED(xa);
185 
186 	/* mask should allow to allocate at least one item */
187 	MPASS(mask > ((xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0));
188 
189 	/* mask can be any power of two value minus one */
190 	MPASS((mask & (mask + 1)) == 0);
191 
192 	*pnext_index = (xa->flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
193 	if (ptr == NULL)
194 		ptr = NULL_VALUE;
195 retry:
196 	retval = radix_tree_insert(&xa->root, *pnext_index, ptr);
197 
198 	switch (retval) {
199 	case -EEXIST:
200 		if (unlikely(*pnext_index == mask) && !timeout--) {
201 			retval = -ENOMEM;
202 			break;
203 		}
204 		(*pnext_index)++;
205 		(*pnext_index) &= mask;
206 		if (*pnext_index == 0 && (xa->flags & XA_FLAGS_ALLOC1) != 0)
207 			(*pnext_index)++;
208 		goto retry;
209 	case -ENOMEM:
210 		if (likely(gfp & M_WAITOK)) {
211 			xa_vm_wait_locked(xa);
212 			goto retry;
213 		}
214 		break;
215 	default:
216 		break;
217 	}
218 	*pindex = *pnext_index;
219 
220 	return (retval);
221 }
222 
223 int
xa_alloc_cyclic(struct xarray * xa,uint32_t * pindex,void * ptr,uint32_t mask,uint32_t * pnext_index,gfp_t gfp)224 xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, uint32_t mask,
225     uint32_t *pnext_index, gfp_t gfp)
226 {
227 	int retval;
228 
229 	xa_lock(xa);
230 	retval = __xa_alloc_cyclic(xa, pindex, ptr, mask, pnext_index, gfp);
231 	xa_unlock(xa);
232 
233 	return (retval);
234 }
235 
236 /*
237  * This function tries to insert an element at the given index. The
238  * "gfp" argument basically decides of this function can sleep or not
239  * trying to allocate internal memory for its radix tree.  The
240  * function returns an error code upon failure. Typical error codes
241  * are element exists (-EEXIST) or out of memory (-ENOMEM).
242  */
243 int
__xa_insert(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)244 __xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
245 {
246 	int retval;
247 
248 	XA_ASSERT_LOCKED(xa);
249 	if (ptr == NULL)
250 		ptr = NULL_VALUE;
251 retry:
252 	retval = radix_tree_insert(&xa->root, index, ptr);
253 
254 	switch (retval) {
255 	case -ENOMEM:
256 		if (likely(gfp & M_WAITOK)) {
257 			xa_vm_wait_locked(xa);
258 			goto retry;
259 		}
260 		break;
261 	default:
262 		break;
263 	}
264 	return (retval);
265 }
266 
267 int
xa_insert(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)268 xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
269 {
270 	int retval;
271 
272 	xa_lock(xa);
273 	retval = __xa_insert(xa, index, ptr, gfp);
274 	xa_unlock(xa);
275 
276 	return (retval);
277 }
278 
279 /*
280  * This function updates the element at the given index and returns a
281  * pointer to the old element. The "gfp" argument basically decides of
282  * this function can sleep or not trying to allocate internal memory
283  * for its radix tree. The function returns an XA_ERROR() pointer code
284  * upon failure. Code using this function must always check if the
285  * return value is an XA_ERROR() code before using the returned value.
286  */
287 void *
__xa_store(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)288 __xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
289 {
290 	int retval;
291 
292 	XA_ASSERT_LOCKED(xa);
293 	if (ptr == NULL)
294 		ptr = NULL_VALUE;
295 retry:
296 	retval = radix_tree_store(&xa->root, index, &ptr);
297 
298 	switch (retval) {
299 	case 0:
300 		if (ptr == NULL_VALUE)
301 			ptr = NULL;
302 		break;
303 	case -ENOMEM:
304 		if (likely(gfp & M_WAITOK)) {
305 			xa_vm_wait_locked(xa);
306 			goto retry;
307 		}
308 		ptr = XA_ERROR(retval);
309 		break;
310 	default:
311 		ptr = XA_ERROR(retval);
312 		break;
313 	}
314 	return (ptr);
315 }
316 
317 void *
xa_store(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)318 xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
319 {
320 	void *retval;
321 
322 	xa_lock(xa);
323 	retval = __xa_store(xa, index, ptr, gfp);
324 	xa_unlock(xa);
325 
326 	return (retval);
327 }
328 
329 /*
330  * This function initialize an xarray structure.
331  */
332 void
xa_init_flags(struct xarray * xa,uint32_t flags)333 xa_init_flags(struct xarray *xa, uint32_t flags)
334 {
335 	memset(xa, 0, sizeof(*xa));
336 
337 	mtx_init(&xa->mtx, "lkpi-xarray", NULL, MTX_DEF | MTX_RECURSE);
338 	xa->root.gfp_mask = GFP_NOWAIT;
339 	xa->flags = flags;
340 }
341 
342 /*
343  * This function destroys an xarray structure and all its internal
344  * memory and locks.
345  */
346 void
xa_destroy(struct xarray * xa)347 xa_destroy(struct xarray *xa)
348 {
349 	struct radix_tree_iter iter;
350 	void **ppslot;
351 
352 	radix_tree_for_each_slot(ppslot, &xa->root, &iter, 0)
353 		radix_tree_iter_delete(&xa->root, &iter, ppslot);
354 	mtx_destroy(&xa->mtx);
355 }
356 
357 /*
358  * This function checks if an xarray is empty or not.
359  * It returns true if empty, else false.
360  */
361 bool
__xa_empty(struct xarray * xa)362 __xa_empty(struct xarray *xa)
363 {
364 	struct radix_tree_iter iter = {};
365 	void **temp;
366 
367 	XA_ASSERT_LOCKED(xa);
368 
369 	return (!radix_tree_iter_find(&xa->root, &iter, &temp));
370 }
371 
372 bool
xa_empty(struct xarray * xa)373 xa_empty(struct xarray *xa)
374 {
375 	bool retval;
376 
377 	xa_lock(xa);
378 	retval = __xa_empty(xa);
379 	xa_unlock(xa);
380 
381 	return (retval);
382 }
383 
384 /*
385  * This function returns the next valid xarray entry based on the
386  * index given by "pindex". The valued pointed to by "pindex" is
387  * updated before return.
388  */
389 void *
__xa_next(struct xarray * xa,unsigned long * pindex,bool not_first)390 __xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
391 {
392 	struct radix_tree_iter iter = { .index = *pindex };
393 	void **ppslot;
394 	void *retval;
395 	bool found;
396 
397 	XA_ASSERT_LOCKED(xa);
398 
399 	if (not_first) {
400 		/* advance to next index, if any */
401 		iter.index++;
402 		if (iter.index == 0)
403 			return (NULL);
404 	}
405 
406 	found = radix_tree_iter_find(&xa->root, &iter, &ppslot);
407 	if (likely(found)) {
408 		retval = *ppslot;
409 		if (retval == NULL_VALUE)
410 			retval = NULL;
411 		*pindex = iter.index;
412 	} else {
413 		retval = NULL;
414 	}
415 	return (retval);
416 }
417 
418 void *
xa_next(struct xarray * xa,unsigned long * pindex,bool not_first)419 xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
420 {
421 	void *retval;
422 
423 	xa_lock(xa);
424 	retval = __xa_next(xa, pindex, not_first);
425 	xa_unlock(xa);
426 
427 	return (retval);
428 }
429