xref: /freebsd/sys/compat/linuxkpi/common/src/linux_idr.c (revision 09a53ad8f1318c5daae6cfb19d97f4f6459f0013)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 
41 #include <machine/stdarg.h>
42 
43 #include <linux/bitops.h>
44 #include <linux/kobject.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/err.h>
48 
49 /*
50  * IDR Implementation.
51  *
52  * This is quick and dirty and not as re-entrant as the linux version
53  * however it should be fairly fast.  It is basically a radix tree with
54  * a builtin bitmap for allocation.
55  */
56 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
57 
58 static inline int
59 idr_max(struct idr *idr)
60 {
61 	return (1 << (idr->layers * IDR_BITS)) - 1;
62 }
63 
64 static inline int
65 idr_pos(int id, int layer)
66 {
67 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
68 }
69 
70 void
71 idr_init(struct idr *idr)
72 {
73 	bzero(idr, sizeof(*idr));
74 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
75 }
76 
77 /* Only frees cached pages. */
78 void
79 idr_destroy(struct idr *idr)
80 {
81 	struct idr_layer *il, *iln;
82 
83 	idr_remove_all(idr);
84 	mtx_lock(&idr->lock);
85 	for (il = idr->free; il != NULL; il = iln) {
86 		iln = il->ary[0];
87 		free(il, M_IDR);
88 	}
89 	mtx_unlock(&idr->lock);
90 	mtx_destroy(&idr->lock);
91 }
92 
93 static void
94 idr_remove_layer(struct idr_layer *il, int layer)
95 {
96 	int i;
97 
98 	if (il == NULL)
99 		return;
100 	if (layer == 0) {
101 		free(il, M_IDR);
102 		return;
103 	}
104 	for (i = 0; i < IDR_SIZE; i++)
105 		if (il->ary[i])
106 			idr_remove_layer(il->ary[i], layer - 1);
107 }
108 
109 void
110 idr_remove_all(struct idr *idr)
111 {
112 
113 	mtx_lock(&idr->lock);
114 	idr_remove_layer(idr->top, idr->layers - 1);
115 	idr->top = NULL;
116 	idr->layers = 0;
117 	mtx_unlock(&idr->lock);
118 }
119 
120 static void
121 idr_remove_locked(struct idr *idr, int id)
122 {
123 	struct idr_layer *il;
124 	int layer;
125 	int idx;
126 
127 	id &= MAX_ID_MASK;
128 	il = idr->top;
129 	layer = idr->layers - 1;
130 	if (il == NULL || id > idr_max(idr))
131 		return;
132 	/*
133 	 * Walk down the tree to this item setting bitmaps along the way
134 	 * as we know at least one item will be free along this path.
135 	 */
136 	while (layer && il) {
137 		idx = idr_pos(id, layer);
138 		il->bitmap |= 1 << idx;
139 		il = il->ary[idx];
140 		layer--;
141 	}
142 	idx = id & IDR_MASK;
143 	/*
144 	 * At this point we've set free space bitmaps up the whole tree.
145 	 * We could make this non-fatal and unwind but linux dumps a stack
146 	 * and a warning so I don't think it's necessary.
147 	 */
148 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
149 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
150 		    id, idr, il);
151 	il->ary[idx] = NULL;
152 	il->bitmap |= 1 << idx;
153 }
154 
155 void
156 idr_remove(struct idr *idr, int id)
157 {
158 	mtx_lock(&idr->lock);
159 	idr_remove_locked(idr, id);
160 	mtx_unlock(&idr->lock);
161 }
162 
163 
164 static inline struct idr_layer *
165 idr_find_layer_locked(struct idr *idr, int id)
166 {
167 	struct idr_layer *il;
168 	int layer;
169 
170 	id &= MAX_ID_MASK;
171 	il = idr->top;
172 	layer = idr->layers - 1;
173 	if (il == NULL || id > idr_max(idr))
174 		return (NULL);
175 	while (layer && il) {
176 		il = il->ary[idr_pos(id, layer)];
177 		layer--;
178 	}
179 	return (il);
180 }
181 
182 void *
183 idr_replace(struct idr *idr, void *ptr, int id)
184 {
185 	struct idr_layer *il;
186 	void *res;
187 	int idx;
188 
189 	mtx_lock(&idr->lock);
190 	il = idr_find_layer_locked(idr, id);
191 	idx = id & IDR_MASK;
192 
193 	/* Replace still returns an error if the item was not allocated. */
194 	if (il == NULL || (il->bitmap & (1 << idx))) {
195 		res = ERR_PTR(-ENOENT);
196 	} else {
197 		res = il->ary[idx];
198 		il->ary[idx] = ptr;
199 	}
200 	mtx_unlock(&idr->lock);
201 	return (res);
202 }
203 
204 static inline void *
205 idr_find_locked(struct idr *idr, int id)
206 {
207 	struct idr_layer *il;
208 	void *res;
209 
210 	mtx_assert(&idr->lock, MA_OWNED);
211 	il = idr_find_layer_locked(idr, id);
212 	if (il != NULL)
213 		res = il->ary[id & IDR_MASK];
214 	else
215 		res = NULL;
216 	return (res);
217 }
218 
219 void *
220 idr_find(struct idr *idr, int id)
221 {
222 	void *res;
223 
224 	mtx_lock(&idr->lock);
225 	res = idr_find_locked(idr, id);
226 	mtx_unlock(&idr->lock);
227 	return (res);
228 }
229 
230 void *
231 idr_get_next(struct idr *idr, int *nextidp)
232 {
233 	void *res = NULL;
234 	int id = *nextidp;
235 
236 	mtx_lock(&idr->lock);
237 	for (; id <= idr_max(idr); id++) {
238 		res = idr_find_locked(idr, id);
239 		if (res == NULL)
240 			continue;
241 		*nextidp = id;
242 		break;
243 	}
244 	mtx_unlock(&idr->lock);
245 	return (res);
246 }
247 
248 int
249 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
250 {
251 	struct idr_layer *il, *iln;
252 	struct idr_layer *head;
253 	int need;
254 
255 	mtx_lock(&idr->lock);
256 	for (;;) {
257 		need = idr->layers + 1;
258 		for (il = idr->free; il != NULL; il = il->ary[0])
259 			need--;
260 		mtx_unlock(&idr->lock);
261 		if (need <= 0)
262 			break;
263 		for (head = NULL; need; need--) {
264 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
265 			if (iln == NULL)
266 				break;
267 			bitmap_fill(&iln->bitmap, IDR_SIZE);
268 			if (head != NULL) {
269 				il->ary[0] = iln;
270 				il = iln;
271 			} else
272 				head = il = iln;
273 		}
274 		if (head == NULL)
275 			return (0);
276 		mtx_lock(&idr->lock);
277 		il->ary[0] = idr->free;
278 		idr->free = head;
279 	}
280 	return (1);
281 }
282 
283 static inline struct idr_layer *
284 idr_get(struct idr *idr)
285 {
286 	struct idr_layer *il;
287 
288 	il = idr->free;
289 	if (il) {
290 		idr->free = il->ary[0];
291 		il->ary[0] = NULL;
292 		return (il);
293 	}
294 	il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT);
295 	if (il != NULL)
296 		bitmap_fill(&il->bitmap, IDR_SIZE);
297 	return (il);
298 }
299 
300 /*
301  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
302  * first for simplicity sake.
303  */
304 static int
305 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
306 {
307 	struct idr_layer *stack[MAX_LEVEL];
308 	struct idr_layer *il;
309 	int error;
310 	int layer;
311 	int idx;
312 	int id;
313 
314 	mtx_assert(&idr->lock, MA_OWNED);
315 
316 	error = -EAGAIN;
317 	/*
318 	 * Expand the tree until there is free space.
319 	 */
320 	if (idr->top == NULL || idr->top->bitmap == 0) {
321 		if (idr->layers == MAX_LEVEL + 1) {
322 			error = -ENOSPC;
323 			goto out;
324 		}
325 		il = idr_get(idr);
326 		if (il == NULL)
327 			goto out;
328 		il->ary[0] = idr->top;
329 		if (idr->top)
330 			il->bitmap &= ~1;
331 		idr->top = il;
332 		idr->layers++;
333 	}
334 	il = idr->top;
335 	id = 0;
336 	/*
337 	 * Walk the tree following free bitmaps, record our path.
338 	 */
339 	for (layer = idr->layers - 1;; layer--) {
340 		stack[layer] = il;
341 		idx = ffsl(il->bitmap);
342 		if (idx == 0)
343 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
344 			    idr, il);
345 		idx--;
346 		id |= idx << (layer * IDR_BITS);
347 		if (layer == 0)
348 			break;
349 		if (il->ary[idx] == NULL) {
350 			il->ary[idx] = idr_get(idr);
351 			if (il->ary[idx] == NULL)
352 				goto out;
353 		}
354 		il = il->ary[idx];
355 	}
356 	/*
357 	 * Allocate the leaf to the consumer.
358 	 */
359 	il->bitmap &= ~(1 << idx);
360 	il->ary[idx] = ptr;
361 	*idp = id;
362 	/*
363 	 * Clear bitmaps potentially up to the root.
364 	 */
365 	while (il->bitmap == 0 && ++layer < idr->layers) {
366 		il = stack[layer];
367 		il->bitmap &= ~(1 << idr_pos(id, layer));
368 	}
369 	error = 0;
370 out:
371 #ifdef INVARIANTS
372 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
373 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
374 		    idr, id, ptr);
375 	}
376 #endif
377 	return (error);
378 }
379 
380 int
381 idr_get_new(struct idr *idr, void *ptr, int *idp)
382 {
383 	int retval;
384 
385 	mtx_lock(&idr->lock);
386 	retval = idr_get_new_locked(idr, ptr, idp);
387 	mtx_unlock(&idr->lock);
388 	return (retval);
389 }
390 
391 static int
392 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
393 {
394 	struct idr_layer *stack[MAX_LEVEL];
395 	struct idr_layer *il;
396 	int error;
397 	int layer;
398 	int idx, sidx;
399 	int id;
400 
401 	mtx_assert(&idr->lock, MA_OWNED);
402 
403 	error = -EAGAIN;
404 	/*
405 	 * Compute the layers required to support starting_id and the mask
406 	 * at the top layer.
407 	 */
408 restart:
409 	idx = starting_id;
410 	layer = 0;
411 	while (idx & ~IDR_MASK) {
412 		layer++;
413 		idx >>= IDR_BITS;
414 	}
415 	if (layer == MAX_LEVEL + 1) {
416 		error = -ENOSPC;
417 		goto out;
418 	}
419 	/*
420 	 * Expand the tree until there is free space at or beyond starting_id.
421 	 */
422 	while (idr->layers <= layer ||
423 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
424 		if (idr->layers == MAX_LEVEL + 1) {
425 			error = -ENOSPC;
426 			goto out;
427 		}
428 		il = idr_get(idr);
429 		if (il == NULL)
430 			goto out;
431 		il->ary[0] = idr->top;
432 		if (idr->top && idr->top->bitmap == 0)
433 			il->bitmap &= ~1;
434 		idr->top = il;
435 		idr->layers++;
436 	}
437 	il = idr->top;
438 	id = 0;
439 	/*
440 	 * Walk the tree following free bitmaps, record our path.
441 	 */
442 	for (layer = idr->layers - 1;; layer--) {
443 		stack[layer] = il;
444 		sidx = idr_pos(starting_id, layer);
445 		/* Returns index numbered from 0 or size if none exists. */
446 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
447 		if (idx == IDR_SIZE && sidx == 0)
448 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
449 			    idr, il);
450 		/*
451 		 * We may have walked a path where there was a free bit but
452 		 * it was lower than what we wanted.  Restart the search with
453 		 * a larger starting id.  id contains the progress we made so
454 		 * far.  Search the leaf one above this level.  This may
455 		 * restart as many as MAX_LEVEL times but that is expected
456 		 * to be rare.
457 		 */
458 		if (idx == IDR_SIZE) {
459 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
460 			goto restart;
461 		}
462 		if (idx > sidx)
463 			starting_id = 0;	/* Search the whole subtree. */
464 		id |= idx << (layer * IDR_BITS);
465 		if (layer == 0)
466 			break;
467 		if (il->ary[idx] == NULL) {
468 			il->ary[idx] = idr_get(idr);
469 			if (il->ary[idx] == NULL)
470 				goto out;
471 		}
472 		il = il->ary[idx];
473 	}
474 	/*
475 	 * Allocate the leaf to the consumer.
476 	 */
477 	il->bitmap &= ~(1 << idx);
478 	il->ary[idx] = ptr;
479 	*idp = id;
480 	/*
481 	 * Clear bitmaps potentially up to the root.
482 	 */
483 	while (il->bitmap == 0 && ++layer < idr->layers) {
484 		il = stack[layer];
485 		il->bitmap &= ~(1 << idr_pos(id, layer));
486 	}
487 	error = 0;
488 out:
489 #ifdef INVARIANTS
490 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
491 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
492 		    idr, id, ptr);
493 	}
494 #endif
495 	return (error);
496 }
497 
498 int
499 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
500 {
501 	int retval;
502 
503 	mtx_lock(&idr->lock);
504 	retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
505 	mtx_unlock(&idr->lock);
506 	return (retval);
507 }
508 
509 int
510 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
511 {
512 	return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
513 }
514 
515 static int
516 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
517 {
518 	int max = end > 0 ? end - 1 : INT_MAX;
519 	int error;
520 	int id;
521 
522 	mtx_assert(&idr->lock, MA_OWNED);
523 
524 	if (unlikely(start < 0))
525 		return (-EINVAL);
526 	if (unlikely(max < start))
527 		return (-ENOSPC);
528 
529 	if (start == 0)
530 		error = idr_get_new_locked(idr, ptr, &id);
531 	else
532 		error = idr_get_new_above_locked(idr, ptr, start, &id);
533 
534 	if (unlikely(error < 0))
535 		return (error);
536 	if (unlikely(id > max)) {
537 		idr_remove_locked(idr, id);
538 		return (-ENOSPC);
539 	}
540 	return (id);
541 }
542 
543 int
544 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
545 {
546 	int retval;
547 
548 	mtx_lock(&idr->lock);
549 	retval = idr_alloc_locked(idr, ptr, start, end);
550 	mtx_unlock(&idr->lock);
551 	return (retval);
552 }
553 
554 int
555 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
556 {
557 	int retval;
558 
559 	mtx_lock(&idr->lock);
560 	retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
561 	if (unlikely(retval == -ENOSPC))
562 		retval = idr_alloc_locked(idr, ptr, start, end);
563 	if (likely(retval >= 0))
564 		idr->next_cyclic_id = retval + 1;
565 	mtx_unlock(&idr->lock);
566 	return (retval);
567 }
568 
569 static int
570 idr_for_each_layer(struct idr_layer *il, int layer,
571     int (*f)(int id, void *p, void *data), void *data)
572 {
573 	int i, err;
574 
575 	if (il == NULL)
576 		return (0);
577 	if (layer == 0) {
578 		for (i = 0; i < IDR_SIZE; i++) {
579 			if (il->ary[i] == NULL)
580 				continue;
581 			err = f(i, il->ary[i],  data);
582 			if (err)
583 				return (err);
584 		}
585 		return (0);
586 	}
587 	for (i = 0; i < IDR_SIZE; i++) {
588 		if (il->ary[i] == NULL)
589 			continue;
590 		err = idr_for_each_layer(il->ary[i], layer - 1, f, data);
591 		if (err)
592 			return (err);
593 	}
594 	return (0);
595 }
596 
597 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
598 int
599 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
600 {
601 	return (idr_for_each_layer(idp->top, idp->layers - 1, f, data));
602 }
603 
604 int
605 ida_pre_get(struct ida *ida, gfp_t flags)
606 {
607 	if (idr_pre_get(&ida->idr, flags) == 0)
608 		return (0);
609 
610 	if (ida->free_bitmap == NULL) {
611 		ida->free_bitmap =
612 		    malloc(sizeof(struct ida_bitmap), M_IDR, flags);
613 	}
614 	return (ida->free_bitmap != NULL);
615 }
616 
617 int
618 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
619     gfp_t flags)
620 {
621 	int ret, id;
622 	unsigned int max;
623 
624 	MPASS((int)start >= 0);
625 	MPASS((int)end >= 0);
626 
627 	if (end == 0)
628 		max = 0x80000000;
629 	else {
630 		MPASS(end > start);
631 		max = end - 1;
632 	}
633 again:
634 	if (!ida_pre_get(ida, flags))
635 		return (-ENOMEM);
636 
637 	if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
638 		if (id > max) {
639 			ida_remove(ida, id);
640 			ret = -ENOSPC;
641 		} else {
642 			ret = id;
643 		}
644 	}
645 	if (__predict_false(ret == -EAGAIN))
646 		goto again;
647 
648 	return (ret);
649 }
650 
651 void
652 ida_simple_remove(struct ida *ida, unsigned int id)
653 {
654 	idr_remove(&ida->idr, id);
655 }
656 
657 void
658 ida_remove(struct ida *ida, int id)
659 {
660 	idr_remove(&ida->idr, id);
661 }
662 
663 void
664 ida_init(struct ida *ida)
665 {
666 	idr_init(&ida->idr);
667 }
668 
669 void
670 ida_destroy(struct ida *ida)
671 {
672 	idr_destroy(&ida->idr);
673 	free(ida->free_bitmap, M_IDR);
674 }
675