1 /* 2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com 3 * Copyright (C) 2002 by Concurrent Computer Corporation 4 * Distributed under the GNU GPL license version 2. 5 * 6 * Modified by George Anzinger to reuse immediately and to use 7 * find bit instructions. Also removed _irq on spinlocks. 8 * 9 * Modified by Nadia Derbey to make it RCU safe. 10 * 11 * Small id to pointer translation service. 12 * 13 * It uses a radix tree like structure as a sparse array indexed 14 * by the id to obtain the pointer. The bitmap makes allocating 15 * a new id quick. 16 * 17 * You call it to allocate an id (an int) an associate with that id a 18 * pointer or what ever, we treat it as a (void *). You can pass this 19 * id to a user for him to pass back at a later time. You then pass 20 * that id to this code and it returns your pointer. 21 22 * You can release ids at any time. When all ids are released, most of 23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 24 * don't need to go to the memory "store" during an id allocate, just 25 * so you don't need to be too concerned about locking and conflicts 26 * with the slab allocator. 27 */ 28 29 #ifndef TEST // to test in user space... 30 #include <linux/slab.h> 31 #include <linux/init.h> 32 #include <linux/module.h> 33 #endif 34 #include <linux/err.h> 35 #include <linux/string.h> 36 #include <linux/idr.h> 37 38 static struct kmem_cache *idr_layer_cache; 39 40 static struct idr_layer *get_from_free_list(struct idr *idp) 41 { 42 struct idr_layer *p; 43 unsigned long flags; 44 45 spin_lock_irqsave(&idp->lock, flags); 46 if ((p = idp->id_free)) { 47 idp->id_free = p->ary[0]; 48 idp->id_free_cnt--; 49 p->ary[0] = NULL; 50 } 51 spin_unlock_irqrestore(&idp->lock, flags); 52 return(p); 53 } 54 55 static void idr_layer_rcu_free(struct rcu_head *head) 56 { 57 struct idr_layer *layer; 58 59 layer = container_of(head, struct idr_layer, rcu_head); 60 kmem_cache_free(idr_layer_cache, layer); 61 } 62 63 static inline void free_layer(struct idr_layer *p) 64 { 65 call_rcu(&p->rcu_head, idr_layer_rcu_free); 66 } 67 68 /* only called when idp->lock is held */ 69 static void __move_to_free_list(struct idr *idp, struct idr_layer *p) 70 { 71 p->ary[0] = idp->id_free; 72 idp->id_free = p; 73 idp->id_free_cnt++; 74 } 75 76 static void move_to_free_list(struct idr *idp, struct idr_layer *p) 77 { 78 unsigned long flags; 79 80 /* 81 * Depends on the return element being zeroed. 82 */ 83 spin_lock_irqsave(&idp->lock, flags); 84 __move_to_free_list(idp, p); 85 spin_unlock_irqrestore(&idp->lock, flags); 86 } 87 88 static void idr_mark_full(struct idr_layer **pa, int id) 89 { 90 struct idr_layer *p = pa[0]; 91 int l = 0; 92 93 __set_bit(id & IDR_MASK, &p->bitmap); 94 /* 95 * If this layer is full mark the bit in the layer above to 96 * show that this part of the radix tree is full. This may 97 * complete the layer above and require walking up the radix 98 * tree. 99 */ 100 while (p->bitmap == IDR_FULL) { 101 if (!(p = pa[++l])) 102 break; 103 id = id >> IDR_BITS; 104 __set_bit((id & IDR_MASK), &p->bitmap); 105 } 106 } 107 108 /** 109 * idr_pre_get - reserver resources for idr allocation 110 * @idp: idr handle 111 * @gfp_mask: memory allocation flags 112 * 113 * This function should be called prior to locking and calling the 114 * idr_get_new* functions. It preallocates enough memory to satisfy 115 * the worst possible allocation. 116 * 117 * If the system is REALLY out of memory this function returns 0, 118 * otherwise 1. 119 */ 120 int idr_pre_get(struct idr *idp, gfp_t gfp_mask) 121 { 122 while (idp->id_free_cnt < IDR_FREE_MAX) { 123 struct idr_layer *new; 124 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 125 if (new == NULL) 126 return (0); 127 move_to_free_list(idp, new); 128 } 129 return 1; 130 } 131 EXPORT_SYMBOL(idr_pre_get); 132 133 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) 134 { 135 int n, m, sh; 136 struct idr_layer *p, *new; 137 int l, id, oid; 138 unsigned long bm; 139 140 id = *starting_id; 141 restart: 142 p = idp->top; 143 l = p->layer; 144 while (1) { 145 /* 146 * We run around this while until we reach the leaf node... 147 */ 148 n = (id >> (IDR_BITS*l)) & IDR_MASK; 149 bm = ~p->bitmap; 150 m = find_next_bit(&bm, IDR_SIZE, n); 151 if (m == IDR_SIZE) { 152 /* no space available go back to previous layer. */ 153 l++; 154 oid = id; 155 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 157 /* did id go over the limit? */ 158 if (id >= (1 << (idp->layers * IDR_BITS))) { 159 *starting_id = id; 160 return IDR_NEED_TO_GROW; 161 } 162 163 /* If we need to go up one layer, continue the 164 * loop; otherwise, restart from the top. 165 */ 166 sh = IDR_BITS * (l + 1); 167 if (oid >> sh == id >> sh) 168 continue; 169 else 170 goto restart; 171 } 172 if (m != n) { 173 sh = IDR_BITS*l; 174 id = ((id >> sh) ^ n ^ m) << sh; 175 } 176 if ((id >= MAX_ID_BIT) || (id < 0)) 177 return IDR_NOMORE_SPACE; 178 if (l == 0) 179 break; 180 /* 181 * Create the layer below if it is missing. 182 */ 183 if (!p->ary[m]) { 184 new = get_from_free_list(idp); 185 if (!new) 186 return -1; 187 new->layer = l-1; 188 rcu_assign_pointer(p->ary[m], new); 189 p->count++; 190 } 191 pa[l--] = p; 192 p = p->ary[m]; 193 } 194 195 pa[l] = p; 196 return id; 197 } 198 199 static int idr_get_empty_slot(struct idr *idp, int starting_id, 200 struct idr_layer **pa) 201 { 202 struct idr_layer *p, *new; 203 int layers, v, id; 204 unsigned long flags; 205 206 id = starting_id; 207 build_up: 208 p = idp->top; 209 layers = idp->layers; 210 if (unlikely(!p)) { 211 if (!(p = get_from_free_list(idp))) 212 return -1; 213 p->layer = 0; 214 layers = 1; 215 } 216 /* 217 * Add a new layer to the top of the tree if the requested 218 * id is larger than the currently allocated space. 219 */ 220 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 221 layers++; 222 if (!p->count) { 223 /* special case: if the tree is currently empty, 224 * then we grow the tree by moving the top node 225 * upwards. 226 */ 227 p->layer++; 228 continue; 229 } 230 if (!(new = get_from_free_list(idp))) { 231 /* 232 * The allocation failed. If we built part of 233 * the structure tear it down. 234 */ 235 spin_lock_irqsave(&idp->lock, flags); 236 for (new = p; p && p != idp->top; new = p) { 237 p = p->ary[0]; 238 new->ary[0] = NULL; 239 new->bitmap = new->count = 0; 240 __move_to_free_list(idp, new); 241 } 242 spin_unlock_irqrestore(&idp->lock, flags); 243 return -1; 244 } 245 new->ary[0] = p; 246 new->count = 1; 247 new->layer = layers-1; 248 if (p->bitmap == IDR_FULL) 249 __set_bit(0, &new->bitmap); 250 p = new; 251 } 252 rcu_assign_pointer(idp->top, p); 253 idp->layers = layers; 254 v = sub_alloc(idp, &id, pa); 255 if (v == IDR_NEED_TO_GROW) 256 goto build_up; 257 return(v); 258 } 259 260 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) 261 { 262 struct idr_layer *pa[MAX_LEVEL]; 263 int id; 264 265 id = idr_get_empty_slot(idp, starting_id, pa); 266 if (id >= 0) { 267 /* 268 * Successfully found an empty slot. Install the user 269 * pointer and mark the slot full. 270 */ 271 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], 272 (struct idr_layer *)ptr); 273 pa[0]->count++; 274 idr_mark_full(pa, id); 275 } 276 277 return id; 278 } 279 280 /** 281 * idr_get_new_above - allocate new idr entry above or equal to a start id 282 * @idp: idr handle 283 * @ptr: pointer you want associated with the id 284 * @start_id: id to start search at 285 * @id: pointer to the allocated handle 286 * 287 * This is the allocate id function. It should be called with any 288 * required locks. 289 * 290 * If memory is required, it will return -EAGAIN, you should unlock 291 * and go back to the idr_pre_get() call. If the idr is full, it will 292 * return -ENOSPC. 293 * 294 * @id returns a value in the range @starting_id ... 0x7fffffff 295 */ 296 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 297 { 298 int rv; 299 300 rv = idr_get_new_above_int(idp, ptr, starting_id); 301 /* 302 * This is a cheap hack until the IDR code can be fixed to 303 * return proper error values. 304 */ 305 if (rv < 0) 306 return _idr_rc_to_errno(rv); 307 *id = rv; 308 return 0; 309 } 310 EXPORT_SYMBOL(idr_get_new_above); 311 312 /** 313 * idr_get_new - allocate new idr entry 314 * @idp: idr handle 315 * @ptr: pointer you want associated with the id 316 * @id: pointer to the allocated handle 317 * 318 * This is the allocate id function. It should be called with any 319 * required locks. 320 * 321 * If memory is required, it will return -EAGAIN, you should unlock 322 * and go back to the idr_pre_get() call. If the idr is full, it will 323 * return -ENOSPC. 324 * 325 * @id returns a value in the range 0 ... 0x7fffffff 326 */ 327 int idr_get_new(struct idr *idp, void *ptr, int *id) 328 { 329 int rv; 330 331 rv = idr_get_new_above_int(idp, ptr, 0); 332 /* 333 * This is a cheap hack until the IDR code can be fixed to 334 * return proper error values. 335 */ 336 if (rv < 0) 337 return _idr_rc_to_errno(rv); 338 *id = rv; 339 return 0; 340 } 341 EXPORT_SYMBOL(idr_get_new); 342 343 static void idr_remove_warning(int id) 344 { 345 printk(KERN_WARNING 346 "idr_remove called for id=%d which is not allocated.\n", id); 347 dump_stack(); 348 } 349 350 static void sub_remove(struct idr *idp, int shift, int id) 351 { 352 struct idr_layer *p = idp->top; 353 struct idr_layer **pa[MAX_LEVEL]; 354 struct idr_layer ***paa = &pa[0]; 355 struct idr_layer *to_free; 356 int n; 357 358 *paa = NULL; 359 *++paa = &idp->top; 360 361 while ((shift > 0) && p) { 362 n = (id >> shift) & IDR_MASK; 363 __clear_bit(n, &p->bitmap); 364 *++paa = &p->ary[n]; 365 p = p->ary[n]; 366 shift -= IDR_BITS; 367 } 368 n = id & IDR_MASK; 369 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 370 __clear_bit(n, &p->bitmap); 371 rcu_assign_pointer(p->ary[n], NULL); 372 to_free = NULL; 373 while(*paa && ! --((**paa)->count)){ 374 if (to_free) 375 free_layer(to_free); 376 to_free = **paa; 377 **paa-- = NULL; 378 } 379 if (!*paa) 380 idp->layers = 0; 381 if (to_free) 382 free_layer(to_free); 383 } else 384 idr_remove_warning(id); 385 } 386 387 /** 388 * idr_remove - remove the given id and free it's slot 389 * @idp: idr handle 390 * @id: unique key 391 */ 392 void idr_remove(struct idr *idp, int id) 393 { 394 struct idr_layer *p; 395 struct idr_layer *to_free; 396 397 /* Mask off upper bits we don't use for the search. */ 398 id &= MAX_ID_MASK; 399 400 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 401 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 402 idp->top->ary[0]) { 403 /* 404 * Single child at leftmost slot: we can shrink the tree. 405 * This level is not needed anymore since when layers are 406 * inserted, they are inserted at the top of the existing 407 * tree. 408 */ 409 to_free = idp->top; 410 p = idp->top->ary[0]; 411 rcu_assign_pointer(idp->top, p); 412 --idp->layers; 413 to_free->bitmap = to_free->count = 0; 414 free_layer(to_free); 415 } 416 while (idp->id_free_cnt >= IDR_FREE_MAX) { 417 p = get_from_free_list(idp); 418 /* 419 * Note: we don't call the rcu callback here, since the only 420 * layers that fall into the freelist are those that have been 421 * preallocated. 422 */ 423 kmem_cache_free(idr_layer_cache, p); 424 } 425 return; 426 } 427 EXPORT_SYMBOL(idr_remove); 428 429 /** 430 * idr_remove_all - remove all ids from the given idr tree 431 * @idp: idr handle 432 * 433 * idr_destroy() only frees up unused, cached idp_layers, but this 434 * function will remove all id mappings and leave all idp_layers 435 * unused. 436 * 437 * A typical clean-up sequence for objects stored in an idr tree, will 438 * use idr_for_each() to free all objects, if necessay, then 439 * idr_remove_all() to remove all ids, and idr_destroy() to free 440 * up the cached idr_layers. 441 */ 442 void idr_remove_all(struct idr *idp) 443 { 444 int n, id, max; 445 struct idr_layer *p; 446 struct idr_layer *pa[MAX_LEVEL]; 447 struct idr_layer **paa = &pa[0]; 448 449 n = idp->layers * IDR_BITS; 450 p = idp->top; 451 rcu_assign_pointer(idp->top, NULL); 452 max = 1 << n; 453 454 id = 0; 455 while (id < max) { 456 while (n > IDR_BITS && p) { 457 n -= IDR_BITS; 458 *paa++ = p; 459 p = p->ary[(id >> n) & IDR_MASK]; 460 } 461 462 id += 1 << n; 463 while (n < fls(id)) { 464 if (p) 465 free_layer(p); 466 n += IDR_BITS; 467 p = *--paa; 468 } 469 } 470 idp->layers = 0; 471 } 472 EXPORT_SYMBOL(idr_remove_all); 473 474 /** 475 * idr_destroy - release all cached layers within an idr tree 476 * idp: idr handle 477 */ 478 void idr_destroy(struct idr *idp) 479 { 480 while (idp->id_free_cnt) { 481 struct idr_layer *p = get_from_free_list(idp); 482 kmem_cache_free(idr_layer_cache, p); 483 } 484 } 485 EXPORT_SYMBOL(idr_destroy); 486 487 /** 488 * idr_find - return pointer for given id 489 * @idp: idr handle 490 * @id: lookup key 491 * 492 * Return the pointer given the id it has been registered with. A %NULL 493 * return indicates that @id is not valid or you passed %NULL in 494 * idr_get_new(). 495 * 496 * This function can be called under rcu_read_lock(), given that the leaf 497 * pointers lifetimes are correctly managed. 498 */ 499 void *idr_find(struct idr *idp, int id) 500 { 501 int n; 502 struct idr_layer *p; 503 504 p = rcu_dereference(idp->top); 505 if (!p) 506 return NULL; 507 n = (p->layer+1) * IDR_BITS; 508 509 /* Mask off upper bits we don't use for the search. */ 510 id &= MAX_ID_MASK; 511 512 if (id >= (1 << n)) 513 return NULL; 514 BUG_ON(n == 0); 515 516 while (n > 0 && p) { 517 n -= IDR_BITS; 518 BUG_ON(n != p->layer*IDR_BITS); 519 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 520 } 521 return((void *)p); 522 } 523 EXPORT_SYMBOL(idr_find); 524 525 /** 526 * idr_for_each - iterate through all stored pointers 527 * @idp: idr handle 528 * @fn: function to be called for each pointer 529 * @data: data passed back to callback function 530 * 531 * Iterate over the pointers registered with the given idr. The 532 * callback function will be called for each pointer currently 533 * registered, passing the id, the pointer and the data pointer passed 534 * to this function. It is not safe to modify the idr tree while in 535 * the callback, so functions such as idr_get_new and idr_remove are 536 * not allowed. 537 * 538 * We check the return of @fn each time. If it returns anything other 539 * than 0, we break out and return that value. 540 * 541 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). 542 */ 543 int idr_for_each(struct idr *idp, 544 int (*fn)(int id, void *p, void *data), void *data) 545 { 546 int n, id, max, error = 0; 547 struct idr_layer *p; 548 struct idr_layer *pa[MAX_LEVEL]; 549 struct idr_layer **paa = &pa[0]; 550 551 n = idp->layers * IDR_BITS; 552 p = rcu_dereference(idp->top); 553 max = 1 << n; 554 555 id = 0; 556 while (id < max) { 557 while (n > 0 && p) { 558 n -= IDR_BITS; 559 *paa++ = p; 560 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 561 } 562 563 if (p) { 564 error = fn(id, (void *)p, data); 565 if (error) 566 break; 567 } 568 569 id += 1 << n; 570 while (n < fls(id)) { 571 n += IDR_BITS; 572 p = *--paa; 573 } 574 } 575 576 return error; 577 } 578 EXPORT_SYMBOL(idr_for_each); 579 580 /** 581 * idr_get_next - lookup next object of id to given id. 582 * @idp: idr handle 583 * @id: pointer to lookup key 584 * 585 * Returns pointer to registered object with id, which is next number to 586 * given id. 587 */ 588 589 void *idr_get_next(struct idr *idp, int *nextidp) 590 { 591 struct idr_layer *p, *pa[MAX_LEVEL]; 592 struct idr_layer **paa = &pa[0]; 593 int id = *nextidp; 594 int n, max; 595 596 /* find first ent */ 597 n = idp->layers * IDR_BITS; 598 max = 1 << n; 599 p = rcu_dereference(idp->top); 600 if (!p) 601 return NULL; 602 603 while (id < max) { 604 while (n > 0 && p) { 605 n -= IDR_BITS; 606 *paa++ = p; 607 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 608 } 609 610 if (p) { 611 *nextidp = id; 612 return p; 613 } 614 615 id += 1 << n; 616 while (n < fls(id)) { 617 n += IDR_BITS; 618 p = *--paa; 619 } 620 } 621 return NULL; 622 } 623 624 625 626 /** 627 * idr_replace - replace pointer for given id 628 * @idp: idr handle 629 * @ptr: pointer you want associated with the id 630 * @id: lookup key 631 * 632 * Replace the pointer registered with an id and return the old value. 633 * A -ENOENT return indicates that @id was not found. 634 * A -EINVAL return indicates that @id was not within valid constraints. 635 * 636 * The caller must serialize with writers. 637 */ 638 void *idr_replace(struct idr *idp, void *ptr, int id) 639 { 640 int n; 641 struct idr_layer *p, *old_p; 642 643 p = idp->top; 644 if (!p) 645 return ERR_PTR(-EINVAL); 646 647 n = (p->layer+1) * IDR_BITS; 648 649 id &= MAX_ID_MASK; 650 651 if (id >= (1 << n)) 652 return ERR_PTR(-EINVAL); 653 654 n -= IDR_BITS; 655 while ((n > 0) && p) { 656 p = p->ary[(id >> n) & IDR_MASK]; 657 n -= IDR_BITS; 658 } 659 660 n = id & IDR_MASK; 661 if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) 662 return ERR_PTR(-ENOENT); 663 664 old_p = p->ary[n]; 665 rcu_assign_pointer(p->ary[n], ptr); 666 667 return old_p; 668 } 669 EXPORT_SYMBOL(idr_replace); 670 671 void __init idr_init_cache(void) 672 { 673 idr_layer_cache = kmem_cache_create("idr_layer_cache", 674 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); 675 } 676 677 /** 678 * idr_init - initialize idr handle 679 * @idp: idr handle 680 * 681 * This function is use to set up the handle (@idp) that you will pass 682 * to the rest of the functions. 683 */ 684 void idr_init(struct idr *idp) 685 { 686 memset(idp, 0, sizeof(struct idr)); 687 spin_lock_init(&idp->lock); 688 } 689 EXPORT_SYMBOL(idr_init); 690 691 692 /* 693 * IDA - IDR based ID allocator 694 * 695 * this is id allocator without id -> pointer translation. Memory 696 * usage is much lower than full blown idr because each id only 697 * occupies a bit. ida uses a custom leaf node which contains 698 * IDA_BITMAP_BITS slots. 699 * 700 * 2007-04-25 written by Tejun Heo <htejun@gmail.com> 701 */ 702 703 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) 704 { 705 unsigned long flags; 706 707 if (!ida->free_bitmap) { 708 spin_lock_irqsave(&ida->idr.lock, flags); 709 if (!ida->free_bitmap) { 710 ida->free_bitmap = bitmap; 711 bitmap = NULL; 712 } 713 spin_unlock_irqrestore(&ida->idr.lock, flags); 714 } 715 716 kfree(bitmap); 717 } 718 719 /** 720 * ida_pre_get - reserve resources for ida allocation 721 * @ida: ida handle 722 * @gfp_mask: memory allocation flag 723 * 724 * This function should be called prior to locking and calling the 725 * following function. It preallocates enough memory to satisfy the 726 * worst possible allocation. 727 * 728 * If the system is REALLY out of memory this function returns 0, 729 * otherwise 1. 730 */ 731 int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 732 { 733 /* allocate idr_layers */ 734 if (!idr_pre_get(&ida->idr, gfp_mask)) 735 return 0; 736 737 /* allocate free_bitmap */ 738 if (!ida->free_bitmap) { 739 struct ida_bitmap *bitmap; 740 741 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); 742 if (!bitmap) 743 return 0; 744 745 free_bitmap(ida, bitmap); 746 } 747 748 return 1; 749 } 750 EXPORT_SYMBOL(ida_pre_get); 751 752 /** 753 * ida_get_new_above - allocate new ID above or equal to a start id 754 * @ida: ida handle 755 * @staring_id: id to start search at 756 * @p_id: pointer to the allocated handle 757 * 758 * Allocate new ID above or equal to @ida. It should be called with 759 * any required locks. 760 * 761 * If memory is required, it will return -EAGAIN, you should unlock 762 * and go back to the ida_pre_get() call. If the ida is full, it will 763 * return -ENOSPC. 764 * 765 * @p_id returns a value in the range @starting_id ... 0x7fffffff. 766 */ 767 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 768 { 769 struct idr_layer *pa[MAX_LEVEL]; 770 struct ida_bitmap *bitmap; 771 unsigned long flags; 772 int idr_id = starting_id / IDA_BITMAP_BITS; 773 int offset = starting_id % IDA_BITMAP_BITS; 774 int t, id; 775 776 restart: 777 /* get vacant slot */ 778 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 779 if (t < 0) 780 return _idr_rc_to_errno(t); 781 782 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 783 return -ENOSPC; 784 785 if (t != idr_id) 786 offset = 0; 787 idr_id = t; 788 789 /* if bitmap isn't there, create a new one */ 790 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; 791 if (!bitmap) { 792 spin_lock_irqsave(&ida->idr.lock, flags); 793 bitmap = ida->free_bitmap; 794 ida->free_bitmap = NULL; 795 spin_unlock_irqrestore(&ida->idr.lock, flags); 796 797 if (!bitmap) 798 return -EAGAIN; 799 800 memset(bitmap, 0, sizeof(struct ida_bitmap)); 801 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], 802 (void *)bitmap); 803 pa[0]->count++; 804 } 805 806 /* lookup for empty slot */ 807 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); 808 if (t == IDA_BITMAP_BITS) { 809 /* no empty slot after offset, continue to the next chunk */ 810 idr_id++; 811 offset = 0; 812 goto restart; 813 } 814 815 id = idr_id * IDA_BITMAP_BITS + t; 816 if (id >= MAX_ID_BIT) 817 return -ENOSPC; 818 819 __set_bit(t, bitmap->bitmap); 820 if (++bitmap->nr_busy == IDA_BITMAP_BITS) 821 idr_mark_full(pa, idr_id); 822 823 *p_id = id; 824 825 /* Each leaf node can handle nearly a thousand slots and the 826 * whole idea of ida is to have small memory foot print. 827 * Throw away extra resources one by one after each successful 828 * allocation. 829 */ 830 if (ida->idr.id_free_cnt || ida->free_bitmap) { 831 struct idr_layer *p = get_from_free_list(&ida->idr); 832 if (p) 833 kmem_cache_free(idr_layer_cache, p); 834 } 835 836 return 0; 837 } 838 EXPORT_SYMBOL(ida_get_new_above); 839 840 /** 841 * ida_get_new - allocate new ID 842 * @ida: idr handle 843 * @p_id: pointer to the allocated handle 844 * 845 * Allocate new ID. It should be called with any required locks. 846 * 847 * If memory is required, it will return -EAGAIN, you should unlock 848 * and go back to the idr_pre_get() call. If the idr is full, it will 849 * return -ENOSPC. 850 * 851 * @id returns a value in the range 0 ... 0x7fffffff. 852 */ 853 int ida_get_new(struct ida *ida, int *p_id) 854 { 855 return ida_get_new_above(ida, 0, p_id); 856 } 857 EXPORT_SYMBOL(ida_get_new); 858 859 /** 860 * ida_remove - remove the given ID 861 * @ida: ida handle 862 * @id: ID to free 863 */ 864 void ida_remove(struct ida *ida, int id) 865 { 866 struct idr_layer *p = ida->idr.top; 867 int shift = (ida->idr.layers - 1) * IDR_BITS; 868 int idr_id = id / IDA_BITMAP_BITS; 869 int offset = id % IDA_BITMAP_BITS; 870 int n; 871 struct ida_bitmap *bitmap; 872 873 /* clear full bits while looking up the leaf idr_layer */ 874 while ((shift > 0) && p) { 875 n = (idr_id >> shift) & IDR_MASK; 876 __clear_bit(n, &p->bitmap); 877 p = p->ary[n]; 878 shift -= IDR_BITS; 879 } 880 881 if (p == NULL) 882 goto err; 883 884 n = idr_id & IDR_MASK; 885 __clear_bit(n, &p->bitmap); 886 887 bitmap = (void *)p->ary[n]; 888 if (!test_bit(offset, bitmap->bitmap)) 889 goto err; 890 891 /* update bitmap and remove it if empty */ 892 __clear_bit(offset, bitmap->bitmap); 893 if (--bitmap->nr_busy == 0) { 894 __set_bit(n, &p->bitmap); /* to please idr_remove() */ 895 idr_remove(&ida->idr, idr_id); 896 free_bitmap(ida, bitmap); 897 } 898 899 return; 900 901 err: 902 printk(KERN_WARNING 903 "ida_remove called for id=%d which is not allocated.\n", id); 904 } 905 EXPORT_SYMBOL(ida_remove); 906 907 /** 908 * ida_destroy - release all cached layers within an ida tree 909 * ida: ida handle 910 */ 911 void ida_destroy(struct ida *ida) 912 { 913 idr_destroy(&ida->idr); 914 kfree(ida->free_bitmap); 915 } 916 EXPORT_SYMBOL(ida_destroy); 917 918 /** 919 * ida_init - initialize ida handle 920 * @ida: ida handle 921 * 922 * This function is use to set up the handle (@ida) that you will pass 923 * to the rest of the functions. 924 */ 925 void ida_init(struct ida *ida) 926 { 927 memset(ida, 0, sizeof(struct ida)); 928 idr_init(&ida->idr); 929 930 } 931 EXPORT_SYMBOL(ida_init); 932