xref: /freebsd/sys/compat/linuxkpi/common/src/linux_idr.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 
38 #include <machine/stdarg.h>
39 
40 #include <linux/bitmap.h>
41 #include <linux/kobject.h>
42 #include <linux/slab.h>
43 #include <linux/idr.h>
44 #include <linux/err.h>
45 
46 #define	MAX_IDR_LEVEL	((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
47 #define	MAX_IDR_FREE	(MAX_IDR_LEVEL * 2)
48 
49 struct linux_idr_cache {
50 	spinlock_t lock;
51 	struct idr_layer *head;
52 	unsigned count;
53 };
54 
55 DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache);
56 
57 /*
58  * IDR Implementation.
59  *
60  * This is quick and dirty and not as re-entrant as the linux version
61  * however it should be fairly fast.  It is basically a radix tree with
62  * a builtin bitmap for allocation.
63  */
64 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
65 
66 static struct idr_layer *
67 idr_preload_dequeue_locked(struct linux_idr_cache *lic)
68 {
69 	struct idr_layer *retval;
70 
71 	/* check if wrong thread is trying to dequeue */
72 	if (mtx_owned(&lic->lock.m) == 0)
73 		return (NULL);
74 
75 	retval = lic->head;
76 	if (likely(retval != NULL)) {
77 		lic->head = retval->ary[0];
78 		lic->count--;
79 		retval->ary[0] = NULL;
80 	}
81 	return (retval);
82 }
83 
84 static void
85 idr_preload_init(void *arg)
86 {
87 	int cpu;
88 
89 	CPU_FOREACH(cpu) {
90 		struct linux_idr_cache *lic =
91 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
92 
93 		spin_lock_init(&lic->lock);
94 	}
95 }
96 SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
97 
98 static void
99 idr_preload_uninit(void *arg)
100 {
101 	int cpu;
102 
103 	CPU_FOREACH(cpu) {
104 		struct idr_layer *cacheval;
105 		struct linux_idr_cache *lic =
106 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
107 
108 		while (1) {
109 			spin_lock(&lic->lock);
110 			cacheval = idr_preload_dequeue_locked(lic);
111 			spin_unlock(&lic->lock);
112 
113 			if (cacheval == NULL)
114 				break;
115 			free(cacheval, M_IDR);
116 		}
117 		spin_lock_destroy(&lic->lock);
118 	}
119 }
120 SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
121 
122 void
123 idr_preload(gfp_t gfp_mask)
124 {
125 	struct linux_idr_cache *lic;
126 	struct idr_layer *cacheval;
127 
128 	sched_pin();
129 
130 	lic = &DPCPU_GET(linux_idr_cache);
131 
132 	/* fill up cache */
133 	spin_lock(&lic->lock);
134 	while (lic->count < MAX_IDR_FREE) {
135 		spin_unlock(&lic->lock);
136 		cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
137 		spin_lock(&lic->lock);
138 		if (cacheval == NULL)
139 			break;
140 		cacheval->ary[0] = lic->head;
141 		lic->head = cacheval;
142 		lic->count++;
143 	}
144 }
145 
146 void
147 idr_preload_end(void)
148 {
149 	struct linux_idr_cache *lic;
150 
151 	lic = &DPCPU_GET(linux_idr_cache);
152 	spin_unlock(&lic->lock);
153 	sched_unpin();
154 }
155 
156 static inline int
157 idr_max(struct idr *idr)
158 {
159 	return (1 << (idr->layers * IDR_BITS)) - 1;
160 }
161 
162 static inline int
163 idr_pos(int id, int layer)
164 {
165 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
166 }
167 
168 void
169 idr_init(struct idr *idr)
170 {
171 	bzero(idr, sizeof(*idr));
172 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
173 }
174 
175 /* Only frees cached pages. */
176 void
177 idr_destroy(struct idr *idr)
178 {
179 	struct idr_layer *il, *iln;
180 
181 	idr_remove_all(idr);
182 	mtx_lock(&idr->lock);
183 	for (il = idr->free; il != NULL; il = iln) {
184 		iln = il->ary[0];
185 		free(il, M_IDR);
186 	}
187 	mtx_unlock(&idr->lock);
188 	mtx_destroy(&idr->lock);
189 }
190 
191 static void
192 idr_remove_layer(struct idr_layer *il, int layer)
193 {
194 	int i;
195 
196 	if (il == NULL)
197 		return;
198 	if (layer == 0) {
199 		free(il, M_IDR);
200 		return;
201 	}
202 	for (i = 0; i < IDR_SIZE; i++)
203 		if (il->ary[i])
204 			idr_remove_layer(il->ary[i], layer - 1);
205 }
206 
207 void
208 idr_remove_all(struct idr *idr)
209 {
210 
211 	mtx_lock(&idr->lock);
212 	idr_remove_layer(idr->top, idr->layers - 1);
213 	idr->top = NULL;
214 	idr->layers = 0;
215 	mtx_unlock(&idr->lock);
216 }
217 
218 static void *
219 idr_remove_locked(struct idr *idr, int id)
220 {
221 	struct idr_layer *il;
222 	void *res;
223 	int layer;
224 	int idx;
225 
226 	id &= MAX_ID_MASK;
227 	il = idr->top;
228 	layer = idr->layers - 1;
229 	if (il == NULL || id > idr_max(idr))
230 		return (NULL);
231 	/*
232 	 * Walk down the tree to this item setting bitmaps along the way
233 	 * as we know at least one item will be free along this path.
234 	 */
235 	while (layer && il) {
236 		idx = idr_pos(id, layer);
237 		il->bitmap |= 1 << idx;
238 		il = il->ary[idx];
239 		layer--;
240 	}
241 	idx = id & IDR_MASK;
242 	/*
243 	 * At this point we've set free space bitmaps up the whole tree.
244 	 * We could make this non-fatal and unwind but linux dumps a stack
245 	 * and a warning so I don't think it's necessary.
246 	 */
247 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
248 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
249 		    id, idr, il);
250 	res = il->ary[idx];
251 	il->ary[idx] = NULL;
252 	il->bitmap |= 1 << idx;
253 
254 	return (res);
255 }
256 
257 void *
258 idr_remove(struct idr *idr, int id)
259 {
260 	void *res;
261 
262 	mtx_lock(&idr->lock);
263 	res = idr_remove_locked(idr, id);
264 	mtx_unlock(&idr->lock);
265 
266 	return (res);
267 }
268 
269 static inline struct idr_layer *
270 idr_find_layer_locked(struct idr *idr, int id)
271 {
272 	struct idr_layer *il;
273 	int layer;
274 
275 	id &= MAX_ID_MASK;
276 	il = idr->top;
277 	layer = idr->layers - 1;
278 	if (il == NULL || id > idr_max(idr))
279 		return (NULL);
280 	while (layer && il) {
281 		il = il->ary[idr_pos(id, layer)];
282 		layer--;
283 	}
284 	return (il);
285 }
286 
287 void *
288 idr_replace(struct idr *idr, void *ptr, int id)
289 {
290 	struct idr_layer *il;
291 	void *res;
292 	int idx;
293 
294 	mtx_lock(&idr->lock);
295 	il = idr_find_layer_locked(idr, id);
296 	idx = id & IDR_MASK;
297 
298 	/* Replace still returns an error if the item was not allocated. */
299 	if (il == NULL || (il->bitmap & (1 << idx))) {
300 		res = ERR_PTR(-ENOENT);
301 	} else {
302 		res = il->ary[idx];
303 		il->ary[idx] = ptr;
304 	}
305 	mtx_unlock(&idr->lock);
306 	return (res);
307 }
308 
309 static inline void *
310 idr_find_locked(struct idr *idr, int id)
311 {
312 	struct idr_layer *il;
313 	void *res;
314 
315 	mtx_assert(&idr->lock, MA_OWNED);
316 	il = idr_find_layer_locked(idr, id);
317 	if (il != NULL)
318 		res = il->ary[id & IDR_MASK];
319 	else
320 		res = NULL;
321 	return (res);
322 }
323 
324 void *
325 idr_find(struct idr *idr, int id)
326 {
327 	void *res;
328 
329 	mtx_lock(&idr->lock);
330 	res = idr_find_locked(idr, id);
331 	mtx_unlock(&idr->lock);
332 	return (res);
333 }
334 
335 void *
336 idr_get_next(struct idr *idr, int *nextidp)
337 {
338 	void *res = NULL;
339 	int id = *nextidp;
340 
341 	mtx_lock(&idr->lock);
342 	for (; id <= idr_max(idr); id++) {
343 		res = idr_find_locked(idr, id);
344 		if (res == NULL)
345 			continue;
346 		*nextidp = id;
347 		break;
348 	}
349 	mtx_unlock(&idr->lock);
350 	return (res);
351 }
352 
353 int
354 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
355 {
356 	struct idr_layer *il, *iln;
357 	struct idr_layer *head;
358 	int need;
359 
360 	mtx_lock(&idr->lock);
361 	for (;;) {
362 		need = idr->layers + 1;
363 		for (il = idr->free; il != NULL; il = il->ary[0])
364 			need--;
365 		mtx_unlock(&idr->lock);
366 		if (need <= 0)
367 			break;
368 		for (head = NULL; need; need--) {
369 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
370 			if (iln == NULL)
371 				break;
372 			bitmap_fill(&iln->bitmap, IDR_SIZE);
373 			if (head != NULL) {
374 				il->ary[0] = iln;
375 				il = iln;
376 			} else
377 				head = il = iln;
378 		}
379 		if (head == NULL)
380 			return (0);
381 		mtx_lock(&idr->lock);
382 		il->ary[0] = idr->free;
383 		idr->free = head;
384 	}
385 	return (1);
386 }
387 
388 static struct idr_layer *
389 idr_free_list_get(struct idr *idp)
390 {
391 	struct idr_layer *il;
392 
393 	if ((il = idp->free) != NULL) {
394 		idp->free = il->ary[0];
395 		il->ary[0] = NULL;
396 	}
397 	return (il);
398 }
399 
400 static inline struct idr_layer *
401 idr_get(struct idr *idp)
402 {
403 	struct idr_layer *il;
404 
405 	if ((il = idr_free_list_get(idp)) != NULL) {
406 		MPASS(il->bitmap != 0);
407 	} else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
408 		bitmap_fill(&il->bitmap, IDR_SIZE);
409 	} else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
410 		bitmap_fill(&il->bitmap, IDR_SIZE);
411 	} else {
412 		return (NULL);
413 	}
414 	return (il);
415 }
416 
417 /*
418  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
419  * first for simplicity sake.
420  */
421 static int
422 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
423 {
424 	struct idr_layer *stack[MAX_LEVEL];
425 	struct idr_layer *il;
426 	int error;
427 	int layer;
428 	int idx;
429 	int id;
430 
431 	mtx_assert(&idr->lock, MA_OWNED);
432 
433 	error = -EAGAIN;
434 	/*
435 	 * Expand the tree until there is free space.
436 	 */
437 	if (idr->top == NULL || idr->top->bitmap == 0) {
438 		if (idr->layers == MAX_LEVEL + 1) {
439 			error = -ENOSPC;
440 			goto out;
441 		}
442 		il = idr_get(idr);
443 		if (il == NULL)
444 			goto out;
445 		il->ary[0] = idr->top;
446 		if (idr->top)
447 			il->bitmap &= ~1;
448 		idr->top = il;
449 		idr->layers++;
450 	}
451 	il = idr->top;
452 	id = 0;
453 	/*
454 	 * Walk the tree following free bitmaps, record our path.
455 	 */
456 	for (layer = idr->layers - 1;; layer--) {
457 		stack[layer] = il;
458 		idx = ffsl(il->bitmap);
459 		if (idx == 0)
460 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
461 			    idr, il);
462 		idx--;
463 		id |= idx << (layer * IDR_BITS);
464 		if (layer == 0)
465 			break;
466 		if (il->ary[idx] == NULL) {
467 			il->ary[idx] = idr_get(idr);
468 			if (il->ary[idx] == NULL)
469 				goto out;
470 		}
471 		il = il->ary[idx];
472 	}
473 	/*
474 	 * Allocate the leaf to the consumer.
475 	 */
476 	il->bitmap &= ~(1 << idx);
477 	il->ary[idx] = ptr;
478 	*idp = id;
479 	/*
480 	 * Clear bitmaps potentially up to the root.
481 	 */
482 	while (il->bitmap == 0 && ++layer < idr->layers) {
483 		il = stack[layer];
484 		il->bitmap &= ~(1 << idr_pos(id, layer));
485 	}
486 	error = 0;
487 out:
488 #ifdef INVARIANTS
489 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
490 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
491 		    idr, id, ptr);
492 	}
493 #endif
494 	return (error);
495 }
496 
497 int
498 idr_get_new(struct idr *idr, void *ptr, int *idp)
499 {
500 	int retval;
501 
502 	mtx_lock(&idr->lock);
503 	retval = idr_get_new_locked(idr, ptr, idp);
504 	mtx_unlock(&idr->lock);
505 	return (retval);
506 }
507 
508 static int
509 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
510 {
511 	struct idr_layer *stack[MAX_LEVEL];
512 	struct idr_layer *il;
513 	int error;
514 	int layer;
515 	int idx, sidx;
516 	int id;
517 
518 	mtx_assert(&idr->lock, MA_OWNED);
519 
520 	error = -EAGAIN;
521 	/*
522 	 * Compute the layers required to support starting_id and the mask
523 	 * at the top layer.
524 	 */
525 restart:
526 	idx = starting_id;
527 	layer = 0;
528 	while (idx & ~IDR_MASK) {
529 		layer++;
530 		idx >>= IDR_BITS;
531 	}
532 	if (layer == MAX_LEVEL + 1) {
533 		error = -ENOSPC;
534 		goto out;
535 	}
536 	/*
537 	 * Expand the tree until there is free space at or beyond starting_id.
538 	 */
539 	while (idr->layers <= layer ||
540 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
541 		if (idr->layers == MAX_LEVEL + 1) {
542 			error = -ENOSPC;
543 			goto out;
544 		}
545 		il = idr_get(idr);
546 		if (il == NULL)
547 			goto out;
548 		il->ary[0] = idr->top;
549 		if (idr->top && idr->top->bitmap == 0)
550 			il->bitmap &= ~1;
551 		idr->top = il;
552 		idr->layers++;
553 	}
554 	il = idr->top;
555 	id = 0;
556 	/*
557 	 * Walk the tree following free bitmaps, record our path.
558 	 */
559 	for (layer = idr->layers - 1;; layer--) {
560 		stack[layer] = il;
561 		sidx = idr_pos(starting_id, layer);
562 		/* Returns index numbered from 0 or size if none exists. */
563 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
564 		if (idx == IDR_SIZE && sidx == 0)
565 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
566 			    idr, il);
567 		/*
568 		 * We may have walked a path where there was a free bit but
569 		 * it was lower than what we wanted.  Restart the search with
570 		 * a larger starting id.  id contains the progress we made so
571 		 * far.  Search the leaf one above this level.  This may
572 		 * restart as many as MAX_LEVEL times but that is expected
573 		 * to be rare.
574 		 */
575 		if (idx == IDR_SIZE) {
576 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
577 			goto restart;
578 		}
579 		if (idx > sidx)
580 			starting_id = 0;	/* Search the whole subtree. */
581 		id |= idx << (layer * IDR_BITS);
582 		if (layer == 0)
583 			break;
584 		if (il->ary[idx] == NULL) {
585 			il->ary[idx] = idr_get(idr);
586 			if (il->ary[idx] == NULL)
587 				goto out;
588 		}
589 		il = il->ary[idx];
590 	}
591 	/*
592 	 * Allocate the leaf to the consumer.
593 	 */
594 	il->bitmap &= ~(1 << idx);
595 	il->ary[idx] = ptr;
596 	*idp = id;
597 	/*
598 	 * Clear bitmaps potentially up to the root.
599 	 */
600 	while (il->bitmap == 0 && ++layer < idr->layers) {
601 		il = stack[layer];
602 		il->bitmap &= ~(1 << idr_pos(id, layer));
603 	}
604 	error = 0;
605 out:
606 #ifdef INVARIANTS
607 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
608 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
609 		    idr, id, ptr);
610 	}
611 #endif
612 	return (error);
613 }
614 
615 int
616 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
617 {
618 	int retval;
619 
620 	mtx_lock(&idr->lock);
621 	retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
622 	mtx_unlock(&idr->lock);
623 	return (retval);
624 }
625 
626 int
627 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
628 {
629 	return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
630 }
631 
632 static int
633 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
634 {
635 	int max = end > 0 ? end - 1 : INT_MAX;
636 	int error;
637 	int id;
638 
639 	mtx_assert(&idr->lock, MA_OWNED);
640 
641 	if (unlikely(start < 0))
642 		return (-EINVAL);
643 	if (unlikely(max < start))
644 		return (-ENOSPC);
645 
646 	if (start == 0)
647 		error = idr_get_new_locked(idr, ptr, &id);
648 	else
649 		error = idr_get_new_above_locked(idr, ptr, start, &id);
650 
651 	if (unlikely(error < 0))
652 		return (error);
653 	if (unlikely(id > max)) {
654 		idr_remove_locked(idr, id);
655 		return (-ENOSPC);
656 	}
657 	return (id);
658 }
659 
660 int
661 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
662 {
663 	int retval;
664 
665 	mtx_lock(&idr->lock);
666 	retval = idr_alloc_locked(idr, ptr, start, end);
667 	mtx_unlock(&idr->lock);
668 	return (retval);
669 }
670 
671 int
672 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
673 {
674 	int retval;
675 
676 	mtx_lock(&idr->lock);
677 	retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
678 	if (unlikely(retval == -ENOSPC))
679 		retval = idr_alloc_locked(idr, ptr, start, end);
680 	if (likely(retval >= 0))
681 		idr->next_cyclic_id = retval + 1;
682 	mtx_unlock(&idr->lock);
683 	return (retval);
684 }
685 
686 static int
687 idr_for_each_layer(struct idr_layer *il, int offset, int layer,
688     int (*f)(int id, void *p, void *data), void *data)
689 {
690 	int i, err;
691 
692 	if (il == NULL)
693 		return (0);
694 	if (layer == 0) {
695 		for (i = 0; i < IDR_SIZE; i++) {
696 			if (il->ary[i] == NULL)
697 				continue;
698 			err = f(i + offset, il->ary[i],  data);
699 			if (err)
700 				return (err);
701 		}
702 		return (0);
703 	}
704 	for (i = 0; i < IDR_SIZE; i++) {
705 		if (il->ary[i] == NULL)
706 			continue;
707 		err = idr_for_each_layer(il->ary[i],
708 		    (i + offset) * IDR_SIZE, layer - 1, f, data);
709 		if (err)
710 			return (err);
711 	}
712 	return (0);
713 }
714 
715 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
716 int
717 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
718 {
719 	return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data));
720 }
721 
722 static int
723 idr_has_entry(int id, void *p, void *data)
724 {
725 
726 	return (1);
727 }
728 
729 bool
730 idr_is_empty(struct idr *idp)
731 {
732 
733 	return (idr_for_each(idp, idr_has_entry, NULL) == 0);
734 }
735 
736 int
737 ida_pre_get(struct ida *ida, gfp_t flags)
738 {
739 	if (idr_pre_get(&ida->idr, flags) == 0)
740 		return (0);
741 
742 	if (ida->free_bitmap == NULL) {
743 		ida->free_bitmap =
744 		    malloc(sizeof(struct ida_bitmap), M_IDR, flags);
745 	}
746 	return (ida->free_bitmap != NULL);
747 }
748 
749 int
750 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
751     gfp_t flags)
752 {
753 	int ret, id;
754 	unsigned int max;
755 
756 	MPASS((int)start >= 0);
757 
758 	if ((int)end <= 0)
759 		max = INT_MAX;
760 	else {
761 		MPASS(end > start);
762 		max = end - 1;
763 	}
764 again:
765 	if (!ida_pre_get(ida, flags))
766 		return (-ENOMEM);
767 
768 	if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
769 		if (id > max) {
770 			ida_remove(ida, id);
771 			ret = -ENOSPC;
772 		} else {
773 			ret = id;
774 		}
775 	}
776 	if (__predict_false(ret == -EAGAIN))
777 		goto again;
778 
779 	return (ret);
780 }
781 
782 void
783 ida_simple_remove(struct ida *ida, unsigned int id)
784 {
785 	idr_remove(&ida->idr, id);
786 }
787 
788 void
789 ida_remove(struct ida *ida, int id)
790 {
791 	idr_remove(&ida->idr, id);
792 }
793 
794 void
795 ida_init(struct ida *ida)
796 {
797 	idr_init(&ida->idr);
798 }
799 
800 void
801 ida_destroy(struct ida *ida)
802 {
803 	idr_destroy(&ida->idr);
804 	free(ida->free_bitmap, M_IDR);
805 }
806