1 /*-
2 * Copyright (c) 2020 Mellanox Technologies, Ltd.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 #include <linux/xarray.h>
29
30 #include <vm/vm_pageout.h>
31
32 /*
33 * Linux' XArray allows to store a NULL pointer as a value. xa_load() would
34 * return NULL for both an unused index and an index set to NULL. But it
35 * impacts xa_alloc() which needs to find the next available index.
36 *
37 * However, our implementation relies on a radix tree (see `linux_radix.c`)
38 * which does not accept NULL pointers as values. I'm not sure this is a
39 * limitation or a feature, so to work around this, a NULL value is replaced by
40 * `NULL_VALUE`, an unlikely address, when we pass it to linux_radix.
41 */
42 #define NULL_VALUE (void *)0x1
43
44 /*
45 * This function removes the element at the given index and returns
46 * the pointer to the removed element, if any.
47 */
48 void *
__xa_erase(struct xarray * xa,uint32_t index)49 __xa_erase(struct xarray *xa, uint32_t index)
50 {
51 void *retval;
52
53 XA_ASSERT_LOCKED(xa);
54
55 retval = radix_tree_delete(&xa->xa_head, index);
56 if (retval == NULL_VALUE)
57 retval = NULL;
58
59 return (retval);
60 }
61
62 void *
xa_erase(struct xarray * xa,uint32_t index)63 xa_erase(struct xarray *xa, uint32_t index)
64 {
65 void *retval;
66
67 xa_lock(xa);
68 retval = __xa_erase(xa, index);
69 xa_unlock(xa);
70
71 return (retval);
72 }
73
74 /*
75 * This function returns the element pointer at the given index. A
76 * value of NULL is returned if the element does not exist.
77 */
78 void *
xa_load(struct xarray * xa,uint32_t index)79 xa_load(struct xarray *xa, uint32_t index)
80 {
81 void *retval;
82
83 xa_lock(xa);
84 retval = radix_tree_lookup(&xa->xa_head, index);
85 xa_unlock(xa);
86
87 if (retval == NULL_VALUE)
88 retval = NULL;
89
90 return (retval);
91 }
92
93 /*
94 * This is an internal function used to sleep until more memory
95 * becomes available.
96 */
97 static void
xa_vm_wait_locked(struct xarray * xa)98 xa_vm_wait_locked(struct xarray *xa)
99 {
100 xa_unlock(xa);
101 vm_wait(NULL);
102 xa_lock(xa);
103 }
104
105 /*
106 * This function iterates the xarray until it finds a free slot where
107 * it can insert the element pointer to by "ptr". It starts at the
108 * index pointed to by "pindex" and updates this value at return. The
109 * "mask" argument defines the maximum index allowed, inclusivly, and
110 * must be a power of two minus one value. The "gfp" argument
111 * basically tells if we can wait for more memory to become available
112 * or not. This function returns zero upon success or a negative error
113 * code on failure. A typical error code is -ENOMEM which means either
114 * the xarray is full, or there was not enough internal memory
115 * available to complete the radix tree insertion.
116 */
117 int
__xa_alloc(struct xarray * xa,uint32_t * pindex,void * ptr,struct xa_limit limit,gfp_t gfp)118 __xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit, gfp_t gfp)
119 {
120 int retval;
121
122 XA_ASSERT_LOCKED(xa);
123
124 MPASS(limit.max > limit.min);
125
126 *pindex = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
127 *pindex = MAX(*pindex, limit.min);
128 if (ptr == NULL)
129 ptr = NULL_VALUE;
130 retry:
131 retval = radix_tree_insert(&xa->xa_head, *pindex, ptr);
132
133 switch (retval) {
134 case -EEXIST:
135 if (likely(*pindex < limit.max)) {
136 (*pindex)++;
137 goto retry;
138 }
139 retval = -ENOMEM;
140 break;
141 case -ENOMEM:
142 if (likely(gfp & M_WAITOK)) {
143 xa_vm_wait_locked(xa);
144 goto retry;
145 }
146 break;
147 default:
148 break;
149 }
150 return (retval);
151 }
152
153 int
xa_alloc(struct xarray * xa,uint32_t * pindex,void * ptr,struct xa_limit limit,gfp_t gfp)154 xa_alloc(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit, gfp_t gfp)
155 {
156 int retval;
157
158 if (ptr == NULL)
159 ptr = NULL_VALUE;
160
161 xa_lock(xa);
162 retval = __xa_alloc(xa, pindex, ptr, limit, gfp);
163 xa_unlock(xa);
164
165 return (retval);
166 }
167
168 /*
169 * This function works the same like the "xa_alloc" function, except
170 * it wraps the next index value to zero when there are no entries
171 * left at the end of the xarray searching for a free slot from the
172 * beginning of the array. If the xarray is full -ENOMEM is returned.
173 */
174 int
__xa_alloc_cyclic(struct xarray * xa,uint32_t * pindex,void * ptr,struct xa_limit limit,uint32_t * pnext_index,gfp_t gfp)175 __xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit,
176 uint32_t *pnext_index, gfp_t gfp)
177 {
178 int retval;
179 int timeout = 1;
180
181 XA_ASSERT_LOCKED(xa);
182
183 MPASS(limit.max > limit.min);
184
185 *pnext_index = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
186 *pnext_index = MAX(*pnext_index, limit.min);
187 if (ptr == NULL)
188 ptr = NULL_VALUE;
189 retry:
190 retval = radix_tree_insert(&xa->xa_head, *pnext_index, ptr);
191
192 switch (retval) {
193 case -EEXIST:
194 if (unlikely(*pnext_index == limit.max) && !timeout--) {
195 retval = -ENOMEM;
196 break;
197 }
198 (*pnext_index)++;
199 if (*pnext_index > limit.max) {
200 *pnext_index = (xa->xa_flags & XA_FLAGS_ALLOC1) != 0 ? 1 : 0;
201 *pnext_index = MAX(*pnext_index, limit.min);
202 }
203 goto retry;
204 case -ENOMEM:
205 if (likely(gfp & M_WAITOK)) {
206 xa_vm_wait_locked(xa);
207 goto retry;
208 }
209 break;
210 default:
211 break;
212 }
213 *pindex = *pnext_index;
214
215 return (retval);
216 }
217
218 int
xa_alloc_cyclic(struct xarray * xa,uint32_t * pindex,void * ptr,struct xa_limit limit,uint32_t * pnext_index,gfp_t gfp)219 xa_alloc_cyclic(struct xarray *xa, uint32_t *pindex, void *ptr, struct xa_limit limit,
220 uint32_t *pnext_index, gfp_t gfp)
221 {
222 int retval;
223
224 xa_lock(xa);
225 retval = __xa_alloc_cyclic(xa, pindex, ptr, limit, pnext_index, gfp);
226 xa_unlock(xa);
227
228 return (retval);
229 }
230
231 int
xa_alloc_cyclic_irq(struct xarray * xa,uint32_t * pindex,void * ptr,struct xa_limit limit,uint32_t * pnext_index,gfp_t gfp)232 xa_alloc_cyclic_irq(struct xarray *xa, uint32_t *pindex, void *ptr,
233 struct xa_limit limit, uint32_t *pnext_index, gfp_t gfp)
234 {
235 int retval;
236
237 xa_lock_irq(xa);
238 retval = __xa_alloc_cyclic(xa, pindex, ptr, limit, pnext_index, gfp);
239 xa_unlock_irq(xa);
240
241 return (retval);
242 }
243
244 /*
245 * This function tries to insert an element at the given index. The
246 * "gfp" argument basically decides of this function can sleep or not
247 * trying to allocate internal memory for its radix tree. The
248 * function returns an error code upon failure. Typical error codes
249 * are element exists (-EEXIST) or out of memory (-ENOMEM).
250 */
251 int
__xa_insert(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)252 __xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
253 {
254 int retval;
255
256 XA_ASSERT_LOCKED(xa);
257 if (ptr == NULL)
258 ptr = NULL_VALUE;
259 retry:
260 retval = radix_tree_insert(&xa->xa_head, index, ptr);
261
262 switch (retval) {
263 case -ENOMEM:
264 if (likely(gfp & M_WAITOK)) {
265 xa_vm_wait_locked(xa);
266 goto retry;
267 }
268 break;
269 default:
270 break;
271 }
272 return (retval);
273 }
274
275 int
xa_insert(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)276 xa_insert(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
277 {
278 int retval;
279
280 xa_lock(xa);
281 retval = __xa_insert(xa, index, ptr, gfp);
282 xa_unlock(xa);
283
284 return (retval);
285 }
286
287 /*
288 * This function updates the element at the given index and returns a
289 * pointer to the old element. The "gfp" argument basically decides of
290 * this function can sleep or not trying to allocate internal memory
291 * for its radix tree. The function returns an XA_ERROR() pointer code
292 * upon failure. Code using this function must always check if the
293 * return value is an XA_ERROR() code before using the returned value.
294 */
295 void *
__xa_store(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)296 __xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
297 {
298 int retval;
299
300 XA_ASSERT_LOCKED(xa);
301 if (ptr == NULL)
302 ptr = NULL_VALUE;
303 retry:
304 retval = radix_tree_store(&xa->xa_head, index, &ptr);
305
306 switch (retval) {
307 case 0:
308 if (ptr == NULL_VALUE)
309 ptr = NULL;
310 break;
311 case -ENOMEM:
312 if (likely(gfp & M_WAITOK)) {
313 xa_vm_wait_locked(xa);
314 goto retry;
315 }
316 ptr = XA_ERROR(retval);
317 break;
318 default:
319 ptr = XA_ERROR(retval);
320 break;
321 }
322 return (ptr);
323 }
324
325 void *
xa_store(struct xarray * xa,uint32_t index,void * ptr,gfp_t gfp)326 xa_store(struct xarray *xa, uint32_t index, void *ptr, gfp_t gfp)
327 {
328 void *retval;
329
330 xa_lock(xa);
331 retval = __xa_store(xa, index, ptr, gfp);
332 xa_unlock(xa);
333
334 return (retval);
335 }
336
337 /*
338 * This function initialize an xarray structure.
339 */
340 void
xa_init_flags(struct xarray * xa,uint32_t flags)341 xa_init_flags(struct xarray *xa, uint32_t flags)
342 {
343 memset(xa, 0, sizeof(*xa));
344
345 mtx_init(&xa->xa_lock, "lkpi-xarray", NULL, MTX_DEF | MTX_RECURSE);
346 xa->xa_head.gfp_mask = GFP_NOWAIT;
347 xa->xa_flags = flags;
348 }
349
350 /*
351 * This function destroys an xarray structure and all its internal
352 * memory and locks.
353 */
354 void
xa_destroy(struct xarray * xa)355 xa_destroy(struct xarray *xa)
356 {
357 struct radix_tree_iter iter;
358 void **ppslot;
359
360 xa_lock(xa);
361 radix_tree_for_each_slot(ppslot, &xa->xa_head, &iter, 0)
362 radix_tree_iter_delete(&xa->xa_head, &iter, ppslot);
363 xa_unlock(xa);
364
365 /*
366 * The mutex initialized in `xa_init_flags()` is not destroyed here on
367 * purpose. The reason is that on Linux, the xarray remains usable
368 * after a call to `xa_destroy()`. For instance the i915 DRM driver
369 * relies on that during the initialixation of its GuC. Basically,
370 * `xa_destroy()` "resets" the structure to zero but doesn't really
371 * destroy it.
372 */
373 }
374
375 /*
376 * This function checks if an xarray is empty or not.
377 * It returns true if empty, else false.
378 */
379 bool
__xa_empty(struct xarray * xa)380 __xa_empty(struct xarray *xa)
381 {
382 struct radix_tree_iter iter = {};
383 void **temp;
384
385 XA_ASSERT_LOCKED(xa);
386
387 return (!radix_tree_iter_find(&xa->xa_head, &iter, &temp, 0));
388 }
389
390 bool
xa_empty(struct xarray * xa)391 xa_empty(struct xarray *xa)
392 {
393 bool retval;
394
395 xa_lock(xa);
396 retval = __xa_empty(xa);
397 xa_unlock(xa);
398
399 return (retval);
400 }
401
402 /*
403 * This function returns the next valid xarray entry based on the
404 * index given by "pindex". The valued pointed to by "pindex" is
405 * updated before return.
406 */
407 void *
__xa_next(struct xarray * xa,unsigned long * pindex,bool not_first)408 __xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
409 {
410 struct radix_tree_iter iter = { .index = *pindex };
411 void **ppslot;
412 void *retval;
413 bool found;
414
415 XA_ASSERT_LOCKED(xa);
416
417 if (not_first) {
418 /* advance to next index, if any */
419 iter.index++;
420 if (iter.index == 0)
421 return (NULL);
422 }
423
424 found = radix_tree_iter_find(&xa->xa_head, &iter, &ppslot, 0);
425 if (likely(found)) {
426 retval = *ppslot;
427 if (retval == NULL_VALUE)
428 retval = NULL;
429 *pindex = iter.index;
430 } else {
431 retval = NULL;
432 }
433 return (retval);
434 }
435
436 void *
xa_next(struct xarray * xa,unsigned long * pindex,bool not_first)437 xa_next(struct xarray *xa, unsigned long *pindex, bool not_first)
438 {
439 void *retval;
440
441 xa_lock(xa);
442 retval = __xa_next(xa, pindex, not_first);
443 xa_unlock(xa);
444
445 return (retval);
446 }
447