xref: /freebsd/sys/compat/linuxkpi/common/src/linux_idr.c (revision e453e498cbb88570a3ff7b3679de65c88707da95)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/stdarg.h>
38 
39 #include <linux/bitmap.h>
40 #include <linux/kobject.h>
41 #include <linux/slab.h>
42 #include <linux/idr.h>
43 #include <linux/err.h>
44 
45 #define	MAX_IDR_LEVEL	((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
46 #define	MAX_IDR_FREE	(MAX_IDR_LEVEL * 2)
47 
48 struct linux_idr_cache {
49 	spinlock_t lock;
50 	struct idr_layer *head;
51 	unsigned count;
52 };
53 
54 DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache);
55 
56 /*
57  * IDR Implementation.
58  *
59  * This is quick and dirty and not as re-entrant as the linux version
60  * however it should be fairly fast.  It is basically a radix tree with
61  * a builtin bitmap for allocation.
62  */
63 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
64 
65 static struct idr_layer *
idr_preload_dequeue_locked(struct linux_idr_cache * lic)66 idr_preload_dequeue_locked(struct linux_idr_cache *lic)
67 {
68 	struct idr_layer *retval;
69 
70 	/* check if wrong thread is trying to dequeue */
71 	if (mtx_owned(&lic->lock) == 0)
72 		return (NULL);
73 
74 	retval = lic->head;
75 	if (likely(retval != NULL)) {
76 		lic->head = retval->ary[0];
77 		lic->count--;
78 		retval->ary[0] = NULL;
79 	}
80 	return (retval);
81 }
82 
83 static void
idr_preload_init(void * arg)84 idr_preload_init(void *arg)
85 {
86 	int cpu;
87 
88 	CPU_FOREACH(cpu) {
89 		struct linux_idr_cache *lic =
90 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
91 
92 		spin_lock_init(&lic->lock);
93 	}
94 }
95 SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
96 
97 static void
idr_preload_uninit(void * arg)98 idr_preload_uninit(void *arg)
99 {
100 	int cpu;
101 
102 	CPU_FOREACH(cpu) {
103 		struct idr_layer *cacheval;
104 		struct linux_idr_cache *lic =
105 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
106 
107 		while (1) {
108 			spin_lock(&lic->lock);
109 			cacheval = idr_preload_dequeue_locked(lic);
110 			spin_unlock(&lic->lock);
111 
112 			if (cacheval == NULL)
113 				break;
114 			free(cacheval, M_IDR);
115 		}
116 		spin_lock_destroy(&lic->lock);
117 	}
118 }
119 SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
120 
121 void
idr_preload(gfp_t gfp_mask)122 idr_preload(gfp_t gfp_mask)
123 {
124 	struct linux_idr_cache *lic;
125 	struct idr_layer *cacheval;
126 
127 	sched_pin();
128 
129 	lic = &DPCPU_GET(linux_idr_cache);
130 
131 	/* fill up cache */
132 	spin_lock(&lic->lock);
133 	while (lic->count < MAX_IDR_FREE) {
134 		spin_unlock(&lic->lock);
135 		cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
136 		spin_lock(&lic->lock);
137 		if (cacheval == NULL)
138 			break;
139 		cacheval->ary[0] = lic->head;
140 		lic->head = cacheval;
141 		lic->count++;
142 	}
143 }
144 
145 void
idr_preload_end(void)146 idr_preload_end(void)
147 {
148 	struct linux_idr_cache *lic;
149 
150 	lic = &DPCPU_GET(linux_idr_cache);
151 	spin_unlock(&lic->lock);
152 	sched_unpin();
153 }
154 
155 static inline int
idr_max(struct idr * idr)156 idr_max(struct idr *idr)
157 {
158 	return (1 << (idr->layers * IDR_BITS)) - 1;
159 }
160 
161 static inline int
idr_pos(int id,int layer)162 idr_pos(int id, int layer)
163 {
164 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
165 }
166 
167 void
idr_init(struct idr * idr)168 idr_init(struct idr *idr)
169 {
170 	bzero(idr, sizeof(*idr));
171 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
172 }
173 
174 /* Only frees cached pages. */
175 void
idr_destroy(struct idr * idr)176 idr_destroy(struct idr *idr)
177 {
178 	struct idr_layer *il, *iln;
179 
180 	/*
181 	 * This idr can be reused, and this function might be called multiple times
182 	 * without a idr_init(). Check if this is the case.  If we do not do this
183 	 * then the mutex will panic while asserting that it is valid.
184 	 */
185 	if (mtx_initialized(&idr->lock) == 0)
186 		return;
187 
188 	idr_remove_all(idr);
189 	mtx_lock(&idr->lock);
190 	for (il = idr->free; il != NULL; il = iln) {
191 		iln = il->ary[0];
192 		free(il, M_IDR);
193 	}
194 	mtx_unlock(&idr->lock);
195 	mtx_destroy(&idr->lock);
196 }
197 
198 static void
idr_remove_layer(struct idr_layer * il,int layer)199 idr_remove_layer(struct idr_layer *il, int layer)
200 {
201 	int i;
202 
203 	if (il == NULL)
204 		return;
205 	if (layer == 0) {
206 		free(il, M_IDR);
207 		return;
208 	}
209 	for (i = 0; i < IDR_SIZE; i++)
210 		if (il->ary[i])
211 			idr_remove_layer(il->ary[i], layer - 1);
212 }
213 
214 void
idr_remove_all(struct idr * idr)215 idr_remove_all(struct idr *idr)
216 {
217 
218 	mtx_lock(&idr->lock);
219 	idr_remove_layer(idr->top, idr->layers - 1);
220 	idr->top = NULL;
221 	idr->layers = 0;
222 	mtx_unlock(&idr->lock);
223 }
224 
225 static void *
idr_remove_locked(struct idr * idr,int id)226 idr_remove_locked(struct idr *idr, int id)
227 {
228 	struct idr_layer *il;
229 	void *res;
230 	int layer;
231 	int idx;
232 
233 	id &= MAX_ID_MASK;
234 	il = idr->top;
235 	layer = idr->layers - 1;
236 	if (il == NULL || id > idr_max(idr))
237 		return (NULL);
238 	/*
239 	 * Walk down the tree to this item setting bitmaps along the way
240 	 * as we know at least one item will be free along this path.
241 	 */
242 	while (layer && il) {
243 		idx = idr_pos(id, layer);
244 		il->bitmap |= 1 << idx;
245 		il = il->ary[idx];
246 		layer--;
247 	}
248 	idx = id & IDR_MASK;
249 	/*
250 	 * At this point we've set free space bitmaps up the whole tree.
251 	 * We could make this non-fatal and unwind but linux dumps a stack
252 	 * and a warning so I don't think it's necessary.
253 	 */
254 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
255 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
256 		    id, idr, il);
257 	res = il->ary[idx];
258 	il->ary[idx] = NULL;
259 	il->bitmap |= 1 << idx;
260 
261 	return (res);
262 }
263 
264 void *
idr_remove(struct idr * idr,int id)265 idr_remove(struct idr *idr, int id)
266 {
267 	void *res;
268 
269 	mtx_lock(&idr->lock);
270 	res = idr_remove_locked(idr, id);
271 	mtx_unlock(&idr->lock);
272 
273 	return (res);
274 }
275 
276 static inline struct idr_layer *
idr_find_layer_locked(struct idr * idr,int id)277 idr_find_layer_locked(struct idr *idr, int id)
278 {
279 	struct idr_layer *il;
280 	int layer;
281 
282 	id &= MAX_ID_MASK;
283 	il = idr->top;
284 	layer = idr->layers - 1;
285 	if (il == NULL || id > idr_max(idr))
286 		return (NULL);
287 	while (layer && il) {
288 		il = il->ary[idr_pos(id, layer)];
289 		layer--;
290 	}
291 	return (il);
292 }
293 
294 void *
idr_replace(struct idr * idr,void * ptr,int id)295 idr_replace(struct idr *idr, void *ptr, int id)
296 {
297 	struct idr_layer *il;
298 	void *res;
299 	int idx;
300 
301 	mtx_lock(&idr->lock);
302 	il = idr_find_layer_locked(idr, id);
303 	idx = id & IDR_MASK;
304 
305 	/* Replace still returns an error if the item was not allocated. */
306 	if (il == NULL || (il->bitmap & (1 << idx))) {
307 		res = ERR_PTR(-ENOENT);
308 	} else {
309 		res = il->ary[idx];
310 		il->ary[idx] = ptr;
311 	}
312 	mtx_unlock(&idr->lock);
313 	return (res);
314 }
315 
316 static inline void *
idr_find_locked(struct idr * idr,int id)317 idr_find_locked(struct idr *idr, int id)
318 {
319 	struct idr_layer *il;
320 	void *res;
321 
322 	mtx_assert(&idr->lock, MA_OWNED);
323 	il = idr_find_layer_locked(idr, id);
324 	if (il != NULL)
325 		res = il->ary[id & IDR_MASK];
326 	else
327 		res = NULL;
328 	return (res);
329 }
330 
331 void *
idr_find(struct idr * idr,int id)332 idr_find(struct idr *idr, int id)
333 {
334 	void *res;
335 
336 	mtx_lock(&idr->lock);
337 	res = idr_find_locked(idr, id);
338 	mtx_unlock(&idr->lock);
339 	return (res);
340 }
341 
342 void *
idr_get_next(struct idr * idr,int * nextidp)343 idr_get_next(struct idr *idr, int *nextidp)
344 {
345 	void *res = NULL;
346 	int id = *nextidp;
347 
348 	mtx_lock(&idr->lock);
349 	for (; id <= idr_max(idr); id++) {
350 		res = idr_find_locked(idr, id);
351 		if (res == NULL)
352 			continue;
353 		*nextidp = id;
354 		break;
355 	}
356 	mtx_unlock(&idr->lock);
357 	return (res);
358 }
359 
360 int
idr_pre_get(struct idr * idr,gfp_t gfp_mask)361 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
362 {
363 	struct idr_layer *il, *iln;
364 	struct idr_layer *head;
365 	int need;
366 
367 	mtx_lock(&idr->lock);
368 	for (;;) {
369 		need = idr->layers + 1;
370 		for (il = idr->free; il != NULL; il = il->ary[0])
371 			need--;
372 		mtx_unlock(&idr->lock);
373 		if (need <= 0)
374 			break;
375 		for (head = NULL; need; need--) {
376 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
377 			if (iln == NULL)
378 				break;
379 			bitmap_fill(&iln->bitmap, IDR_SIZE);
380 			if (head != NULL) {
381 				il->ary[0] = iln;
382 				il = iln;
383 			} else
384 				head = il = iln;
385 		}
386 		if (head == NULL)
387 			return (0);
388 		mtx_lock(&idr->lock);
389 		il->ary[0] = idr->free;
390 		idr->free = head;
391 	}
392 	return (1);
393 }
394 
395 static struct idr_layer *
idr_free_list_get(struct idr * idp)396 idr_free_list_get(struct idr *idp)
397 {
398 	struct idr_layer *il;
399 
400 	if ((il = idp->free) != NULL) {
401 		idp->free = il->ary[0];
402 		il->ary[0] = NULL;
403 	}
404 	return (il);
405 }
406 
407 static inline struct idr_layer *
idr_get(struct idr * idp)408 idr_get(struct idr *idp)
409 {
410 	struct idr_layer *il;
411 
412 	if ((il = idr_free_list_get(idp)) != NULL) {
413 		MPASS(il->bitmap != 0);
414 	} else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
415 		bitmap_fill(&il->bitmap, IDR_SIZE);
416 	} else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
417 		bitmap_fill(&il->bitmap, IDR_SIZE);
418 	} else {
419 		return (NULL);
420 	}
421 	return (il);
422 }
423 
424 /*
425  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
426  * first for simplicity sake.
427  */
428 static int
idr_get_new_locked(struct idr * idr,void * ptr,int * idp)429 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
430 {
431 	struct idr_layer *stack[MAX_LEVEL];
432 	struct idr_layer *il;
433 	int error;
434 	int layer;
435 	int idx;
436 	int id;
437 
438 	mtx_assert(&idr->lock, MA_OWNED);
439 
440 	error = -EAGAIN;
441 	/*
442 	 * Expand the tree until there is free space.
443 	 */
444 	if (idr->top == NULL || idr->top->bitmap == 0) {
445 		if (idr->layers == MAX_LEVEL + 1) {
446 			error = -ENOSPC;
447 			goto out;
448 		}
449 		il = idr_get(idr);
450 		if (il == NULL)
451 			goto out;
452 		il->ary[0] = idr->top;
453 		if (idr->top)
454 			il->bitmap &= ~1;
455 		idr->top = il;
456 		idr->layers++;
457 	}
458 	il = idr->top;
459 	id = 0;
460 	/*
461 	 * Walk the tree following free bitmaps, record our path.
462 	 */
463 	for (layer = idr->layers - 1;; layer--) {
464 		stack[layer] = il;
465 		idx = ffsl(il->bitmap);
466 		if (idx == 0)
467 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
468 			    idr, il);
469 		idx--;
470 		id |= idx << (layer * IDR_BITS);
471 		if (layer == 0)
472 			break;
473 		if (il->ary[idx] == NULL) {
474 			il->ary[idx] = idr_get(idr);
475 			if (il->ary[idx] == NULL)
476 				goto out;
477 		}
478 		il = il->ary[idx];
479 	}
480 	/*
481 	 * Allocate the leaf to the consumer.
482 	 */
483 	il->bitmap &= ~(1 << idx);
484 	il->ary[idx] = ptr;
485 	*idp = id;
486 	/*
487 	 * Clear bitmaps potentially up to the root.
488 	 */
489 	while (il->bitmap == 0 && ++layer < idr->layers) {
490 		il = stack[layer];
491 		il->bitmap &= ~(1 << idr_pos(id, layer));
492 	}
493 	error = 0;
494 out:
495 #ifdef INVARIANTS
496 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
497 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
498 		    idr, id, ptr);
499 	}
500 #endif
501 	return (error);
502 }
503 
504 int
idr_get_new(struct idr * idr,void * ptr,int * idp)505 idr_get_new(struct idr *idr, void *ptr, int *idp)
506 {
507 	int retval;
508 
509 	mtx_lock(&idr->lock);
510 	retval = idr_get_new_locked(idr, ptr, idp);
511 	mtx_unlock(&idr->lock);
512 	return (retval);
513 }
514 
515 static int
idr_get_new_above_locked(struct idr * idr,void * ptr,int starting_id,int * idp)516 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
517 {
518 	struct idr_layer *stack[MAX_LEVEL];
519 	struct idr_layer *il;
520 	int error;
521 	int layer;
522 	int idx, sidx;
523 	int id;
524 
525 	mtx_assert(&idr->lock, MA_OWNED);
526 
527 	error = -EAGAIN;
528 	/*
529 	 * Compute the layers required to support starting_id and the mask
530 	 * at the top layer.
531 	 */
532 restart:
533 	idx = starting_id;
534 	layer = 0;
535 	while (idx & ~IDR_MASK) {
536 		layer++;
537 		idx >>= IDR_BITS;
538 	}
539 	if (layer == MAX_LEVEL + 1) {
540 		error = -ENOSPC;
541 		goto out;
542 	}
543 	/*
544 	 * Expand the tree until there is free space at or beyond starting_id.
545 	 */
546 	while (idr->layers <= layer ||
547 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
548 		if (idr->layers == MAX_LEVEL + 1) {
549 			error = -ENOSPC;
550 			goto out;
551 		}
552 		il = idr_get(idr);
553 		if (il == NULL)
554 			goto out;
555 		il->ary[0] = idr->top;
556 		if (idr->top && idr->top->bitmap == 0)
557 			il->bitmap &= ~1;
558 		idr->top = il;
559 		idr->layers++;
560 	}
561 	il = idr->top;
562 	id = 0;
563 	/*
564 	 * Walk the tree following free bitmaps, record our path.
565 	 */
566 	for (layer = idr->layers - 1;; layer--) {
567 		stack[layer] = il;
568 		sidx = idr_pos(starting_id, layer);
569 		/* Returns index numbered from 0 or size if none exists. */
570 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
571 		if (idx == IDR_SIZE && sidx == 0)
572 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
573 			    idr, il);
574 		/*
575 		 * We may have walked a path where there was a free bit but
576 		 * it was lower than what we wanted.  Restart the search with
577 		 * a larger starting id.  id contains the progress we made so
578 		 * far.  Search the leaf one above this level.  This may
579 		 * restart as many as MAX_LEVEL times but that is expected
580 		 * to be rare.
581 		 */
582 		if (idx == IDR_SIZE) {
583 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
584 			goto restart;
585 		}
586 		if (idx > sidx)
587 			starting_id = 0;	/* Search the whole subtree. */
588 		id |= idx << (layer * IDR_BITS);
589 		if (layer == 0)
590 			break;
591 		if (il->ary[idx] == NULL) {
592 			il->ary[idx] = idr_get(idr);
593 			if (il->ary[idx] == NULL)
594 				goto out;
595 		}
596 		il = il->ary[idx];
597 	}
598 	/*
599 	 * Allocate the leaf to the consumer.
600 	 */
601 	il->bitmap &= ~(1 << idx);
602 	il->ary[idx] = ptr;
603 	*idp = id;
604 	/*
605 	 * Clear bitmaps potentially up to the root.
606 	 */
607 	while (il->bitmap == 0 && ++layer < idr->layers) {
608 		il = stack[layer];
609 		il->bitmap &= ~(1 << idr_pos(id, layer));
610 	}
611 	error = 0;
612 out:
613 #ifdef INVARIANTS
614 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
615 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
616 		    idr, id, ptr);
617 	}
618 #endif
619 	return (error);
620 }
621 
622 int
idr_get_new_above(struct idr * idr,void * ptr,int starting_id,int * idp)623 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
624 {
625 	int retval;
626 
627 	mtx_lock(&idr->lock);
628 	retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
629 	mtx_unlock(&idr->lock);
630 	return (retval);
631 }
632 
633 int
ida_get_new_above(struct ida * ida,int starting_id,int * p_id)634 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
635 {
636 	return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
637 }
638 
639 static int
idr_alloc_locked(struct idr * idr,void * ptr,int start,int end)640 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
641 {
642 	int max = end > 0 ? end - 1 : INT_MAX;
643 	int error;
644 	int id;
645 
646 	mtx_assert(&idr->lock, MA_OWNED);
647 
648 	if (unlikely(start < 0))
649 		return (-EINVAL);
650 	if (unlikely(max < start))
651 		return (-ENOSPC);
652 
653 	if (start == 0)
654 		error = idr_get_new_locked(idr, ptr, &id);
655 	else
656 		error = idr_get_new_above_locked(idr, ptr, start, &id);
657 
658 	if (unlikely(error < 0))
659 		return (error);
660 	if (unlikely(id > max)) {
661 		idr_remove_locked(idr, id);
662 		return (-ENOSPC);
663 	}
664 	return (id);
665 }
666 
667 int
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)668 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
669 {
670 	int retval;
671 
672 	mtx_lock(&idr->lock);
673 	retval = idr_alloc_locked(idr, ptr, start, end);
674 	mtx_unlock(&idr->lock);
675 	return (retval);
676 }
677 
678 int
idr_alloc_cyclic(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)679 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
680 {
681 	int retval;
682 
683 	mtx_lock(&idr->lock);
684 	retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
685 	if (unlikely(retval == -ENOSPC))
686 		retval = idr_alloc_locked(idr, ptr, start, end);
687 	if (likely(retval >= 0))
688 		idr->next_cyclic_id = retval + 1;
689 	mtx_unlock(&idr->lock);
690 	return (retval);
691 }
692 
693 static int
idr_for_each_layer(struct idr_layer * il,int offset,int layer,int (* f)(int id,void * p,void * data),void * data)694 idr_for_each_layer(struct idr_layer *il, int offset, int layer,
695     int (*f)(int id, void *p, void *data), void *data)
696 {
697 	int i, err;
698 
699 	if (il == NULL)
700 		return (0);
701 	if (layer == 0) {
702 		for (i = 0; i < IDR_SIZE; i++) {
703 			if (il->ary[i] == NULL)
704 				continue;
705 			err = f(i + offset, il->ary[i],  data);
706 			if (err)
707 				return (err);
708 		}
709 		return (0);
710 	}
711 	for (i = 0; i < IDR_SIZE; i++) {
712 		if (il->ary[i] == NULL)
713 			continue;
714 		err = idr_for_each_layer(il->ary[i],
715 		    (i + offset) * IDR_SIZE, layer - 1, f, data);
716 		if (err)
717 			return (err);
718 	}
719 	return (0);
720 }
721 
722 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
723 int
idr_for_each(struct idr * idp,int (* f)(int id,void * p,void * data),void * data)724 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
725 {
726 	return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data));
727 }
728 
729 static int
idr_has_entry(int id,void * p,void * data)730 idr_has_entry(int id, void *p, void *data)
731 {
732 
733 	return (1);
734 }
735 
736 bool
idr_is_empty(struct idr * idp)737 idr_is_empty(struct idr *idp)
738 {
739 
740 	return (idr_for_each(idp, idr_has_entry, NULL) == 0);
741 }
742 
743 int
ida_pre_get(struct ida * ida,gfp_t flags)744 ida_pre_get(struct ida *ida, gfp_t flags)
745 {
746 	if (idr_pre_get(&ida->idr, flags) == 0)
747 		return (0);
748 
749 	if (ida->free_bitmap == NULL) {
750 		ida->free_bitmap =
751 		    malloc(sizeof(struct ida_bitmap), M_IDR, flags);
752 	}
753 	return (ida->free_bitmap != NULL);
754 }
755 
756 int
ida_simple_get(struct ida * ida,unsigned int start,unsigned int end,gfp_t flags)757 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
758     gfp_t flags)
759 {
760 	int ret, id;
761 	unsigned int max;
762 
763 	MPASS((int)start >= 0);
764 
765 	if ((int)end <= 0)
766 		max = INT_MAX;
767 	else {
768 		MPASS(end > start);
769 		max = end - 1;
770 	}
771 again:
772 	if (!ida_pre_get(ida, flags))
773 		return (-ENOMEM);
774 
775 	if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
776 		if (id > max) {
777 			ida_remove(ida, id);
778 			ret = -ENOSPC;
779 		} else {
780 			ret = id;
781 		}
782 	}
783 	if (__predict_false(ret == -EAGAIN))
784 		goto again;
785 
786 	return (ret);
787 }
788 
789 void
ida_simple_remove(struct ida * ida,unsigned int id)790 ida_simple_remove(struct ida *ida, unsigned int id)
791 {
792 	idr_remove(&ida->idr, id);
793 }
794 
795 void
ida_remove(struct ida * ida,int id)796 ida_remove(struct ida *ida, int id)
797 {
798 	idr_remove(&ida->idr, id);
799 }
800 
801 void
ida_init(struct ida * ida)802 ida_init(struct ida *ida)
803 {
804 	idr_init(&ida->idr);
805 }
806 
807 void
ida_destroy(struct ida * ida)808 ida_destroy(struct ida *ida)
809 {
810 	idr_destroy(&ida->idr);
811 	free(ida->free_bitmap, M_IDR);
812 	ida->free_bitmap = NULL;
813 }
814