1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37
38 #include <machine/stdarg.h>
39
40 #include <linux/bitmap.h>
41 #include <linux/kobject.h>
42 #include <linux/slab.h>
43 #include <linux/idr.h>
44 #include <linux/err.h>
45
46 #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
47 #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
48
49 struct linux_idr_cache {
50 spinlock_t lock;
51 struct idr_layer *head;
52 unsigned count;
53 };
54
55 DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache);
56
57 /*
58 * IDR Implementation.
59 *
60 * This is quick and dirty and not as re-entrant as the linux version
61 * however it should be fairly fast. It is basically a radix tree with
62 * a builtin bitmap for allocation.
63 */
64 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
65
66 static struct idr_layer *
idr_preload_dequeue_locked(struct linux_idr_cache * lic)67 idr_preload_dequeue_locked(struct linux_idr_cache *lic)
68 {
69 struct idr_layer *retval;
70
71 /* check if wrong thread is trying to dequeue */
72 if (mtx_owned(&lic->lock) == 0)
73 return (NULL);
74
75 retval = lic->head;
76 if (likely(retval != NULL)) {
77 lic->head = retval->ary[0];
78 lic->count--;
79 retval->ary[0] = NULL;
80 }
81 return (retval);
82 }
83
84 static void
idr_preload_init(void * arg)85 idr_preload_init(void *arg)
86 {
87 int cpu;
88
89 CPU_FOREACH(cpu) {
90 struct linux_idr_cache *lic =
91 DPCPU_ID_PTR(cpu, linux_idr_cache);
92
93 spin_lock_init(&lic->lock);
94 }
95 }
96 SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
97
98 static void
idr_preload_uninit(void * arg)99 idr_preload_uninit(void *arg)
100 {
101 int cpu;
102
103 CPU_FOREACH(cpu) {
104 struct idr_layer *cacheval;
105 struct linux_idr_cache *lic =
106 DPCPU_ID_PTR(cpu, linux_idr_cache);
107
108 while (1) {
109 spin_lock(&lic->lock);
110 cacheval = idr_preload_dequeue_locked(lic);
111 spin_unlock(&lic->lock);
112
113 if (cacheval == NULL)
114 break;
115 free(cacheval, M_IDR);
116 }
117 spin_lock_destroy(&lic->lock);
118 }
119 }
120 SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
121
122 void
idr_preload(gfp_t gfp_mask)123 idr_preload(gfp_t gfp_mask)
124 {
125 struct linux_idr_cache *lic;
126 struct idr_layer *cacheval;
127
128 sched_pin();
129
130 lic = &DPCPU_GET(linux_idr_cache);
131
132 /* fill up cache */
133 spin_lock(&lic->lock);
134 while (lic->count < MAX_IDR_FREE) {
135 spin_unlock(&lic->lock);
136 cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
137 spin_lock(&lic->lock);
138 if (cacheval == NULL)
139 break;
140 cacheval->ary[0] = lic->head;
141 lic->head = cacheval;
142 lic->count++;
143 }
144 }
145
146 void
idr_preload_end(void)147 idr_preload_end(void)
148 {
149 struct linux_idr_cache *lic;
150
151 lic = &DPCPU_GET(linux_idr_cache);
152 spin_unlock(&lic->lock);
153 sched_unpin();
154 }
155
156 static inline int
idr_max(struct idr * idr)157 idr_max(struct idr *idr)
158 {
159 return (1 << (idr->layers * IDR_BITS)) - 1;
160 }
161
162 static inline int
idr_pos(int id,int layer)163 idr_pos(int id, int layer)
164 {
165 return (id >> (IDR_BITS * layer)) & IDR_MASK;
166 }
167
168 void
idr_init(struct idr * idr)169 idr_init(struct idr *idr)
170 {
171 bzero(idr, sizeof(*idr));
172 mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
173 }
174
175 /* Only frees cached pages. */
176 void
idr_destroy(struct idr * idr)177 idr_destroy(struct idr *idr)
178 {
179 struct idr_layer *il, *iln;
180
181 /*
182 * This idr can be reused, and this function might be called multiple times
183 * without a idr_init(). Check if this is the case. If we do not do this
184 * then the mutex will panic while asserting that it is valid.
185 */
186 if (mtx_initialized(&idr->lock) == 0)
187 return;
188
189 idr_remove_all(idr);
190 mtx_lock(&idr->lock);
191 for (il = idr->free; il != NULL; il = iln) {
192 iln = il->ary[0];
193 free(il, M_IDR);
194 }
195 mtx_unlock(&idr->lock);
196 mtx_destroy(&idr->lock);
197 }
198
199 static void
idr_remove_layer(struct idr_layer * il,int layer)200 idr_remove_layer(struct idr_layer *il, int layer)
201 {
202 int i;
203
204 if (il == NULL)
205 return;
206 if (layer == 0) {
207 free(il, M_IDR);
208 return;
209 }
210 for (i = 0; i < IDR_SIZE; i++)
211 if (il->ary[i])
212 idr_remove_layer(il->ary[i], layer - 1);
213 }
214
215 void
idr_remove_all(struct idr * idr)216 idr_remove_all(struct idr *idr)
217 {
218
219 mtx_lock(&idr->lock);
220 idr_remove_layer(idr->top, idr->layers - 1);
221 idr->top = NULL;
222 idr->layers = 0;
223 mtx_unlock(&idr->lock);
224 }
225
226 static void *
idr_remove_locked(struct idr * idr,int id)227 idr_remove_locked(struct idr *idr, int id)
228 {
229 struct idr_layer *il;
230 void *res;
231 int layer;
232 int idx;
233
234 id &= MAX_ID_MASK;
235 il = idr->top;
236 layer = idr->layers - 1;
237 if (il == NULL || id > idr_max(idr))
238 return (NULL);
239 /*
240 * Walk down the tree to this item setting bitmaps along the way
241 * as we know at least one item will be free along this path.
242 */
243 while (layer && il) {
244 idx = idr_pos(id, layer);
245 il->bitmap |= 1 << idx;
246 il = il->ary[idx];
247 layer--;
248 }
249 idx = id & IDR_MASK;
250 /*
251 * At this point we've set free space bitmaps up the whole tree.
252 * We could make this non-fatal and unwind but linux dumps a stack
253 * and a warning so I don't think it's necessary.
254 */
255 if (il == NULL || (il->bitmap & (1 << idx)) != 0)
256 panic("idr_remove: Item %d not allocated (%p, %p)\n",
257 id, idr, il);
258 res = il->ary[idx];
259 il->ary[idx] = NULL;
260 il->bitmap |= 1 << idx;
261
262 return (res);
263 }
264
265 void *
idr_remove(struct idr * idr,int id)266 idr_remove(struct idr *idr, int id)
267 {
268 void *res;
269
270 mtx_lock(&idr->lock);
271 res = idr_remove_locked(idr, id);
272 mtx_unlock(&idr->lock);
273
274 return (res);
275 }
276
277 static inline struct idr_layer *
idr_find_layer_locked(struct idr * idr,int id)278 idr_find_layer_locked(struct idr *idr, int id)
279 {
280 struct idr_layer *il;
281 int layer;
282
283 id &= MAX_ID_MASK;
284 il = idr->top;
285 layer = idr->layers - 1;
286 if (il == NULL || id > idr_max(idr))
287 return (NULL);
288 while (layer && il) {
289 il = il->ary[idr_pos(id, layer)];
290 layer--;
291 }
292 return (il);
293 }
294
295 void *
idr_replace(struct idr * idr,void * ptr,int id)296 idr_replace(struct idr *idr, void *ptr, int id)
297 {
298 struct idr_layer *il;
299 void *res;
300 int idx;
301
302 mtx_lock(&idr->lock);
303 il = idr_find_layer_locked(idr, id);
304 idx = id & IDR_MASK;
305
306 /* Replace still returns an error if the item was not allocated. */
307 if (il == NULL || (il->bitmap & (1 << idx))) {
308 res = ERR_PTR(-ENOENT);
309 } else {
310 res = il->ary[idx];
311 il->ary[idx] = ptr;
312 }
313 mtx_unlock(&idr->lock);
314 return (res);
315 }
316
317 static inline void *
idr_find_locked(struct idr * idr,int id)318 idr_find_locked(struct idr *idr, int id)
319 {
320 struct idr_layer *il;
321 void *res;
322
323 mtx_assert(&idr->lock, MA_OWNED);
324 il = idr_find_layer_locked(idr, id);
325 if (il != NULL)
326 res = il->ary[id & IDR_MASK];
327 else
328 res = NULL;
329 return (res);
330 }
331
332 void *
idr_find(struct idr * idr,int id)333 idr_find(struct idr *idr, int id)
334 {
335 void *res;
336
337 mtx_lock(&idr->lock);
338 res = idr_find_locked(idr, id);
339 mtx_unlock(&idr->lock);
340 return (res);
341 }
342
343 void *
idr_get_next(struct idr * idr,int * nextidp)344 idr_get_next(struct idr *idr, int *nextidp)
345 {
346 void *res = NULL;
347 int id = *nextidp;
348
349 mtx_lock(&idr->lock);
350 for (; id <= idr_max(idr); id++) {
351 res = idr_find_locked(idr, id);
352 if (res == NULL)
353 continue;
354 *nextidp = id;
355 break;
356 }
357 mtx_unlock(&idr->lock);
358 return (res);
359 }
360
361 int
idr_pre_get(struct idr * idr,gfp_t gfp_mask)362 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
363 {
364 struct idr_layer *il, *iln;
365 struct idr_layer *head;
366 int need;
367
368 mtx_lock(&idr->lock);
369 for (;;) {
370 need = idr->layers + 1;
371 for (il = idr->free; il != NULL; il = il->ary[0])
372 need--;
373 mtx_unlock(&idr->lock);
374 if (need <= 0)
375 break;
376 for (head = NULL; need; need--) {
377 iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
378 if (iln == NULL)
379 break;
380 bitmap_fill(&iln->bitmap, IDR_SIZE);
381 if (head != NULL) {
382 il->ary[0] = iln;
383 il = iln;
384 } else
385 head = il = iln;
386 }
387 if (head == NULL)
388 return (0);
389 mtx_lock(&idr->lock);
390 il->ary[0] = idr->free;
391 idr->free = head;
392 }
393 return (1);
394 }
395
396 static struct idr_layer *
idr_free_list_get(struct idr * idp)397 idr_free_list_get(struct idr *idp)
398 {
399 struct idr_layer *il;
400
401 if ((il = idp->free) != NULL) {
402 idp->free = il->ary[0];
403 il->ary[0] = NULL;
404 }
405 return (il);
406 }
407
408 static inline struct idr_layer *
idr_get(struct idr * idp)409 idr_get(struct idr *idp)
410 {
411 struct idr_layer *il;
412
413 if ((il = idr_free_list_get(idp)) != NULL) {
414 MPASS(il->bitmap != 0);
415 } else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
416 bitmap_fill(&il->bitmap, IDR_SIZE);
417 } else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
418 bitmap_fill(&il->bitmap, IDR_SIZE);
419 } else {
420 return (NULL);
421 }
422 return (il);
423 }
424
425 /*
426 * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
427 * first for simplicity sake.
428 */
429 static int
idr_get_new_locked(struct idr * idr,void * ptr,int * idp)430 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
431 {
432 struct idr_layer *stack[MAX_LEVEL];
433 struct idr_layer *il;
434 int error;
435 int layer;
436 int idx;
437 int id;
438
439 mtx_assert(&idr->lock, MA_OWNED);
440
441 error = -EAGAIN;
442 /*
443 * Expand the tree until there is free space.
444 */
445 if (idr->top == NULL || idr->top->bitmap == 0) {
446 if (idr->layers == MAX_LEVEL + 1) {
447 error = -ENOSPC;
448 goto out;
449 }
450 il = idr_get(idr);
451 if (il == NULL)
452 goto out;
453 il->ary[0] = idr->top;
454 if (idr->top)
455 il->bitmap &= ~1;
456 idr->top = il;
457 idr->layers++;
458 }
459 il = idr->top;
460 id = 0;
461 /*
462 * Walk the tree following free bitmaps, record our path.
463 */
464 for (layer = idr->layers - 1;; layer--) {
465 stack[layer] = il;
466 idx = ffsl(il->bitmap);
467 if (idx == 0)
468 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
469 idr, il);
470 idx--;
471 id |= idx << (layer * IDR_BITS);
472 if (layer == 0)
473 break;
474 if (il->ary[idx] == NULL) {
475 il->ary[idx] = idr_get(idr);
476 if (il->ary[idx] == NULL)
477 goto out;
478 }
479 il = il->ary[idx];
480 }
481 /*
482 * Allocate the leaf to the consumer.
483 */
484 il->bitmap &= ~(1 << idx);
485 il->ary[idx] = ptr;
486 *idp = id;
487 /*
488 * Clear bitmaps potentially up to the root.
489 */
490 while (il->bitmap == 0 && ++layer < idr->layers) {
491 il = stack[layer];
492 il->bitmap &= ~(1 << idr_pos(id, layer));
493 }
494 error = 0;
495 out:
496 #ifdef INVARIANTS
497 if (error == 0 && idr_find_locked(idr, id) != ptr) {
498 panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
499 idr, id, ptr);
500 }
501 #endif
502 return (error);
503 }
504
505 int
idr_get_new(struct idr * idr,void * ptr,int * idp)506 idr_get_new(struct idr *idr, void *ptr, int *idp)
507 {
508 int retval;
509
510 mtx_lock(&idr->lock);
511 retval = idr_get_new_locked(idr, ptr, idp);
512 mtx_unlock(&idr->lock);
513 return (retval);
514 }
515
516 static int
idr_get_new_above_locked(struct idr * idr,void * ptr,int starting_id,int * idp)517 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
518 {
519 struct idr_layer *stack[MAX_LEVEL];
520 struct idr_layer *il;
521 int error;
522 int layer;
523 int idx, sidx;
524 int id;
525
526 mtx_assert(&idr->lock, MA_OWNED);
527
528 error = -EAGAIN;
529 /*
530 * Compute the layers required to support starting_id and the mask
531 * at the top layer.
532 */
533 restart:
534 idx = starting_id;
535 layer = 0;
536 while (idx & ~IDR_MASK) {
537 layer++;
538 idx >>= IDR_BITS;
539 }
540 if (layer == MAX_LEVEL + 1) {
541 error = -ENOSPC;
542 goto out;
543 }
544 /*
545 * Expand the tree until there is free space at or beyond starting_id.
546 */
547 while (idr->layers <= layer ||
548 idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
549 if (idr->layers == MAX_LEVEL + 1) {
550 error = -ENOSPC;
551 goto out;
552 }
553 il = idr_get(idr);
554 if (il == NULL)
555 goto out;
556 il->ary[0] = idr->top;
557 if (idr->top && idr->top->bitmap == 0)
558 il->bitmap &= ~1;
559 idr->top = il;
560 idr->layers++;
561 }
562 il = idr->top;
563 id = 0;
564 /*
565 * Walk the tree following free bitmaps, record our path.
566 */
567 for (layer = idr->layers - 1;; layer--) {
568 stack[layer] = il;
569 sidx = idr_pos(starting_id, layer);
570 /* Returns index numbered from 0 or size if none exists. */
571 idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
572 if (idx == IDR_SIZE && sidx == 0)
573 panic("idr_get_new: Invalid leaf state (%p, %p)\n",
574 idr, il);
575 /*
576 * We may have walked a path where there was a free bit but
577 * it was lower than what we wanted. Restart the search with
578 * a larger starting id. id contains the progress we made so
579 * far. Search the leaf one above this level. This may
580 * restart as many as MAX_LEVEL times but that is expected
581 * to be rare.
582 */
583 if (idx == IDR_SIZE) {
584 starting_id = id + (1 << ((layer + 1) * IDR_BITS));
585 goto restart;
586 }
587 if (idx > sidx)
588 starting_id = 0; /* Search the whole subtree. */
589 id |= idx << (layer * IDR_BITS);
590 if (layer == 0)
591 break;
592 if (il->ary[idx] == NULL) {
593 il->ary[idx] = idr_get(idr);
594 if (il->ary[idx] == NULL)
595 goto out;
596 }
597 il = il->ary[idx];
598 }
599 /*
600 * Allocate the leaf to the consumer.
601 */
602 il->bitmap &= ~(1 << idx);
603 il->ary[idx] = ptr;
604 *idp = id;
605 /*
606 * Clear bitmaps potentially up to the root.
607 */
608 while (il->bitmap == 0 && ++layer < idr->layers) {
609 il = stack[layer];
610 il->bitmap &= ~(1 << idr_pos(id, layer));
611 }
612 error = 0;
613 out:
614 #ifdef INVARIANTS
615 if (error == 0 && idr_find_locked(idr, id) != ptr) {
616 panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
617 idr, id, ptr);
618 }
619 #endif
620 return (error);
621 }
622
623 int
idr_get_new_above(struct idr * idr,void * ptr,int starting_id,int * idp)624 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
625 {
626 int retval;
627
628 mtx_lock(&idr->lock);
629 retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
630 mtx_unlock(&idr->lock);
631 return (retval);
632 }
633
634 int
ida_get_new_above(struct ida * ida,int starting_id,int * p_id)635 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
636 {
637 return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
638 }
639
640 static int
idr_alloc_locked(struct idr * idr,void * ptr,int start,int end)641 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
642 {
643 int max = end > 0 ? end - 1 : INT_MAX;
644 int error;
645 int id;
646
647 mtx_assert(&idr->lock, MA_OWNED);
648
649 if (unlikely(start < 0))
650 return (-EINVAL);
651 if (unlikely(max < start))
652 return (-ENOSPC);
653
654 if (start == 0)
655 error = idr_get_new_locked(idr, ptr, &id);
656 else
657 error = idr_get_new_above_locked(idr, ptr, start, &id);
658
659 if (unlikely(error < 0))
660 return (error);
661 if (unlikely(id > max)) {
662 idr_remove_locked(idr, id);
663 return (-ENOSPC);
664 }
665 return (id);
666 }
667
668 int
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)669 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
670 {
671 int retval;
672
673 mtx_lock(&idr->lock);
674 retval = idr_alloc_locked(idr, ptr, start, end);
675 mtx_unlock(&idr->lock);
676 return (retval);
677 }
678
679 int
idr_alloc_cyclic(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)680 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
681 {
682 int retval;
683
684 mtx_lock(&idr->lock);
685 retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
686 if (unlikely(retval == -ENOSPC))
687 retval = idr_alloc_locked(idr, ptr, start, end);
688 if (likely(retval >= 0))
689 idr->next_cyclic_id = retval + 1;
690 mtx_unlock(&idr->lock);
691 return (retval);
692 }
693
694 static int
idr_for_each_layer(struct idr_layer * il,int offset,int layer,int (* f)(int id,void * p,void * data),void * data)695 idr_for_each_layer(struct idr_layer *il, int offset, int layer,
696 int (*f)(int id, void *p, void *data), void *data)
697 {
698 int i, err;
699
700 if (il == NULL)
701 return (0);
702 if (layer == 0) {
703 for (i = 0; i < IDR_SIZE; i++) {
704 if (il->ary[i] == NULL)
705 continue;
706 err = f(i + offset, il->ary[i], data);
707 if (err)
708 return (err);
709 }
710 return (0);
711 }
712 for (i = 0; i < IDR_SIZE; i++) {
713 if (il->ary[i] == NULL)
714 continue;
715 err = idr_for_each_layer(il->ary[i],
716 (i + offset) * IDR_SIZE, layer - 1, f, data);
717 if (err)
718 return (err);
719 }
720 return (0);
721 }
722
723 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
724 int
idr_for_each(struct idr * idp,int (* f)(int id,void * p,void * data),void * data)725 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
726 {
727 return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data));
728 }
729
730 static int
idr_has_entry(int id,void * p,void * data)731 idr_has_entry(int id, void *p, void *data)
732 {
733
734 return (1);
735 }
736
737 bool
idr_is_empty(struct idr * idp)738 idr_is_empty(struct idr *idp)
739 {
740
741 return (idr_for_each(idp, idr_has_entry, NULL) == 0);
742 }
743
744 int
ida_pre_get(struct ida * ida,gfp_t flags)745 ida_pre_get(struct ida *ida, gfp_t flags)
746 {
747 if (idr_pre_get(&ida->idr, flags) == 0)
748 return (0);
749
750 if (ida->free_bitmap == NULL) {
751 ida->free_bitmap =
752 malloc(sizeof(struct ida_bitmap), M_IDR, flags);
753 }
754 return (ida->free_bitmap != NULL);
755 }
756
757 int
ida_simple_get(struct ida * ida,unsigned int start,unsigned int end,gfp_t flags)758 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
759 gfp_t flags)
760 {
761 int ret, id;
762 unsigned int max;
763
764 MPASS((int)start >= 0);
765
766 if ((int)end <= 0)
767 max = INT_MAX;
768 else {
769 MPASS(end > start);
770 max = end - 1;
771 }
772 again:
773 if (!ida_pre_get(ida, flags))
774 return (-ENOMEM);
775
776 if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
777 if (id > max) {
778 ida_remove(ida, id);
779 ret = -ENOSPC;
780 } else {
781 ret = id;
782 }
783 }
784 if (__predict_false(ret == -EAGAIN))
785 goto again;
786
787 return (ret);
788 }
789
790 void
ida_simple_remove(struct ida * ida,unsigned int id)791 ida_simple_remove(struct ida *ida, unsigned int id)
792 {
793 idr_remove(&ida->idr, id);
794 }
795
796 void
ida_remove(struct ida * ida,int id)797 ida_remove(struct ida *ida, int id)
798 {
799 idr_remove(&ida->idr, id);
800 }
801
802 void
ida_init(struct ida * ida)803 ida_init(struct ida *ida)
804 {
805 idr_init(&ida->idr);
806 }
807
808 void
ida_destroy(struct ida * ida)809 ida_destroy(struct ida *ida)
810 {
811 idr_destroy(&ida->idr);
812 free(ida->free_bitmap, M_IDR);
813 ida->free_bitmap = NULL;
814 }
815