1 #include <linux/bitmap.h> 2 #include <linux/export.h> 3 #include <linux/idr.h> 4 #include <linux/slab.h> 5 #include <linux/spinlock.h> 6 7 DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); 8 static DEFINE_SPINLOCK(simple_ida_lock); 9 10 /** 11 * idr_alloc - allocate an id 12 * @idr: idr handle 13 * @ptr: pointer to be associated with the new id 14 * @start: the minimum id (inclusive) 15 * @end: the maximum id (exclusive) 16 * @gfp: memory allocation flags 17 * 18 * Allocates an unused ID in the range [start, end). Returns -ENOSPC 19 * if there are no unused IDs in that range. 20 * 21 * Note that @end is treated as max when <= 0. This is to always allow 22 * using @start + N as @end as long as N is inside integer range. 23 * 24 * Simultaneous modifications to the @idr are not allowed and should be 25 * prevented by the user, usually with a lock. idr_alloc() may be called 26 * concurrently with read-only accesses to the @idr, such as idr_find() and 27 * idr_for_each_entry(). 28 */ 29 int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) 30 { 31 void __rcu **slot; 32 struct radix_tree_iter iter; 33 34 if (WARN_ON_ONCE(start < 0)) 35 return -EINVAL; 36 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 37 return -EINVAL; 38 39 radix_tree_iter_init(&iter, start); 40 slot = idr_get_free(&idr->idr_rt, &iter, gfp, end); 41 if (IS_ERR(slot)) 42 return PTR_ERR(slot); 43 44 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); 45 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); 46 return iter.index; 47 } 48 EXPORT_SYMBOL_GPL(idr_alloc); 49 50 /** 51 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion 52 * @idr: idr handle 53 * @ptr: pointer to be associated with the new id 54 * @start: the minimum id (inclusive) 55 * @end: the maximum id (exclusive) 56 * @gfp: memory allocation flags 57 * 58 * Allocates an ID larger than the last ID allocated if one is available. 59 * If not, it will attempt to allocate the smallest ID that is larger or 60 * equal to @start. 61 */ 62 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) 63 { 64 int id, curr = idr->idr_next; 65 66 if (curr < start) 67 curr = start; 68 69 id = idr_alloc(idr, ptr, curr, end, gfp); 70 if ((id == -ENOSPC) && (curr > start)) 71 id = idr_alloc(idr, ptr, start, curr, gfp); 72 73 if (id >= 0) 74 idr->idr_next = id + 1U; 75 76 return id; 77 } 78 EXPORT_SYMBOL(idr_alloc_cyclic); 79 80 /** 81 * idr_for_each - iterate through all stored pointers 82 * @idr: idr handle 83 * @fn: function to be called for each pointer 84 * @data: data passed to callback function 85 * 86 * The callback function will be called for each entry in @idr, passing 87 * the id, the pointer and the data pointer passed to this function. 88 * 89 * If @fn returns anything other than %0, the iteration stops and that 90 * value is returned from this function. 91 * 92 * idr_for_each() can be called concurrently with idr_alloc() and 93 * idr_remove() if protected by RCU. Newly added entries may not be 94 * seen and deleted entries may be seen, but adding and removing entries 95 * will not cause other entries to be skipped, nor spurious ones to be seen. 96 */ 97 int idr_for_each(const struct idr *idr, 98 int (*fn)(int id, void *p, void *data), void *data) 99 { 100 struct radix_tree_iter iter; 101 void __rcu **slot; 102 103 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { 104 int ret = fn(iter.index, rcu_dereference_raw(*slot), data); 105 if (ret) 106 return ret; 107 } 108 109 return 0; 110 } 111 EXPORT_SYMBOL(idr_for_each); 112 113 /** 114 * idr_get_next - Find next populated entry 115 * @idr: idr handle 116 * @nextid: Pointer to lowest possible ID to return 117 * 118 * Returns the next populated entry in the tree with an ID greater than 119 * or equal to the value pointed to by @nextid. On exit, @nextid is updated 120 * to the ID of the found value. To use in a loop, the value pointed to by 121 * nextid must be incremented by the user. 122 */ 123 void *idr_get_next(struct idr *idr, int *nextid) 124 { 125 struct radix_tree_iter iter; 126 void __rcu **slot; 127 128 slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); 129 if (!slot) 130 return NULL; 131 132 *nextid = iter.index; 133 return rcu_dereference_raw(*slot); 134 } 135 EXPORT_SYMBOL(idr_get_next); 136 137 /** 138 * idr_replace - replace pointer for given id 139 * @idr: idr handle 140 * @ptr: New pointer to associate with the ID 141 * @id: Lookup key 142 * 143 * Replace the pointer registered with an ID and return the old value. 144 * This function can be called under the RCU read lock concurrently with 145 * idr_alloc() and idr_remove() (as long as the ID being removed is not 146 * the one being replaced!). 147 * 148 * Returns: 0 on success. %-ENOENT indicates that @id was not found. 149 * %-EINVAL indicates that @id or @ptr were not valid. 150 */ 151 void *idr_replace(struct idr *idr, void *ptr, int id) 152 { 153 struct radix_tree_node *node; 154 void __rcu **slot = NULL; 155 void *entry; 156 157 if (WARN_ON_ONCE(id < 0)) 158 return ERR_PTR(-EINVAL); 159 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 160 return ERR_PTR(-EINVAL); 161 162 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); 163 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) 164 return ERR_PTR(-ENOENT); 165 166 __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL); 167 168 return entry; 169 } 170 EXPORT_SYMBOL(idr_replace); 171 172 /** 173 * DOC: IDA description 174 * 175 * The IDA is an ID allocator which does not provide the ability to 176 * associate an ID with a pointer. As such, it only needs to store one 177 * bit per ID, and so is more space efficient than an IDR. To use an IDA, 178 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, 179 * then initialise it using ida_init()). To allocate a new ID, call 180 * ida_simple_get(). To free an ID, call ida_simple_remove(). 181 * 182 * If you have more complex locking requirements, use a loop around 183 * ida_pre_get() and ida_get_new() to allocate a new ID. Then use 184 * ida_remove() to free an ID. You must make sure that ida_get_new() and 185 * ida_remove() cannot be called at the same time as each other for the 186 * same IDA. 187 * 188 * You can also use ida_get_new_above() if you need an ID to be allocated 189 * above a particular number. ida_destroy() can be used to dispose of an 190 * IDA without needing to free the individual IDs in it. You can use 191 * ida_is_empty() to find out whether the IDA has any IDs currently allocated. 192 * 193 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward 194 * limitation, it should be quite straightforward to raise the maximum. 195 */ 196 197 /* 198 * Developer's notes: 199 * 200 * The IDA uses the functionality provided by the IDR & radix tree to store 201 * bitmaps in each entry. The IDR_FREE tag means there is at least one bit 202 * free, unlike the IDR where it means at least one entry is free. 203 * 204 * I considered telling the radix tree that each slot is an order-10 node 205 * and storing the bit numbers in the radix tree, but the radix tree can't 206 * allow a single multiorder entry at index 0, which would significantly 207 * increase memory consumption for the IDA. So instead we divide the index 208 * by the number of bits in the leaf bitmap before doing a radix tree lookup. 209 * 210 * As an optimisation, if there are only a few low bits set in any given 211 * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional 212 * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits 213 * directly in the entry. By being really tricksy, we could store 214 * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising 215 * for 0-3 allocated IDs. 216 * 217 * We allow the radix tree 'exceptional' count to get out of date. Nothing 218 * in the IDA nor the radix tree code checks it. If it becomes important 219 * to maintain an accurate exceptional count, switch the rcu_assign_pointer() 220 * calls to radix_tree_iter_replace() which will correct the exceptional 221 * count. 222 * 223 * The IDA always requires a lock to alloc/free. If we add a 'test_bit' 224 * equivalent, it will still need locking. Going to RCU lookup would require 225 * using RCU to free bitmaps, and that's not trivial without embedding an 226 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte 227 * bitmap, which is excessive. 228 */ 229 230 #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS) 231 232 /** 233 * ida_get_new_above - allocate new ID above or equal to a start id 234 * @ida: ida handle 235 * @start: id to start search at 236 * @id: pointer to the allocated handle 237 * 238 * Allocate new ID above or equal to @start. It should be called 239 * with any required locks to ensure that concurrent calls to 240 * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed. 241 * Consider using ida_simple_get() if you do not have complex locking 242 * requirements. 243 * 244 * If memory is required, it will return %-EAGAIN, you should unlock 245 * and go back to the ida_pre_get() call. If the ida is full, it will 246 * return %-ENOSPC. On success, it will return 0. 247 * 248 * @id returns a value in the range @start ... %0x7fffffff. 249 */ 250 int ida_get_new_above(struct ida *ida, int start, int *id) 251 { 252 struct radix_tree_root *root = &ida->ida_rt; 253 void __rcu **slot; 254 struct radix_tree_iter iter; 255 struct ida_bitmap *bitmap; 256 unsigned long index; 257 unsigned bit, ebit; 258 int new; 259 260 index = start / IDA_BITMAP_BITS; 261 bit = start % IDA_BITMAP_BITS; 262 ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; 263 264 slot = radix_tree_iter_init(&iter, index); 265 for (;;) { 266 if (slot) 267 slot = radix_tree_next_slot(slot, &iter, 268 RADIX_TREE_ITER_TAGGED); 269 if (!slot) { 270 slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); 271 if (IS_ERR(slot)) { 272 if (slot == ERR_PTR(-ENOMEM)) 273 return -EAGAIN; 274 return PTR_ERR(slot); 275 } 276 } 277 if (iter.index > index) { 278 bit = 0; 279 ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; 280 } 281 new = iter.index * IDA_BITMAP_BITS; 282 bitmap = rcu_dereference_raw(*slot); 283 if (radix_tree_exception(bitmap)) { 284 unsigned long tmp = (unsigned long)bitmap; 285 ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); 286 if (ebit < BITS_PER_LONG) { 287 tmp |= 1UL << ebit; 288 rcu_assign_pointer(*slot, (void *)tmp); 289 *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; 290 return 0; 291 } 292 bitmap = this_cpu_xchg(ida_bitmap, NULL); 293 if (!bitmap) 294 return -EAGAIN; 295 memset(bitmap, 0, sizeof(*bitmap)); 296 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; 297 rcu_assign_pointer(*slot, bitmap); 298 } 299 300 if (bitmap) { 301 bit = find_next_zero_bit(bitmap->bitmap, 302 IDA_BITMAP_BITS, bit); 303 new += bit; 304 if (new < 0) 305 return -ENOSPC; 306 if (bit == IDA_BITMAP_BITS) 307 continue; 308 309 __set_bit(bit, bitmap->bitmap); 310 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) 311 radix_tree_iter_tag_clear(root, &iter, 312 IDR_FREE); 313 } else { 314 new += bit; 315 if (new < 0) 316 return -ENOSPC; 317 if (ebit < BITS_PER_LONG) { 318 bitmap = (void *)((1UL << ebit) | 319 RADIX_TREE_EXCEPTIONAL_ENTRY); 320 radix_tree_iter_replace(root, &iter, slot, 321 bitmap); 322 *id = new; 323 return 0; 324 } 325 bitmap = this_cpu_xchg(ida_bitmap, NULL); 326 if (!bitmap) 327 return -EAGAIN; 328 memset(bitmap, 0, sizeof(*bitmap)); 329 __set_bit(bit, bitmap->bitmap); 330 radix_tree_iter_replace(root, &iter, slot, bitmap); 331 } 332 333 *id = new; 334 return 0; 335 } 336 } 337 EXPORT_SYMBOL(ida_get_new_above); 338 339 /** 340 * ida_remove - Free the given ID 341 * @ida: ida handle 342 * @id: ID to free 343 * 344 * This function should not be called at the same time as ida_get_new_above(). 345 */ 346 void ida_remove(struct ida *ida, int id) 347 { 348 unsigned long index = id / IDA_BITMAP_BITS; 349 unsigned offset = id % IDA_BITMAP_BITS; 350 struct ida_bitmap *bitmap; 351 unsigned long *btmp; 352 struct radix_tree_iter iter; 353 void __rcu **slot; 354 355 slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); 356 if (!slot) 357 goto err; 358 359 bitmap = rcu_dereference_raw(*slot); 360 if (radix_tree_exception(bitmap)) { 361 btmp = (unsigned long *)slot; 362 offset += RADIX_TREE_EXCEPTIONAL_SHIFT; 363 if (offset >= BITS_PER_LONG) 364 goto err; 365 } else { 366 btmp = bitmap->bitmap; 367 } 368 if (!test_bit(offset, btmp)) 369 goto err; 370 371 __clear_bit(offset, btmp); 372 radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); 373 if (radix_tree_exception(bitmap)) { 374 if (rcu_dereference_raw(*slot) == 375 (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) 376 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 377 } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { 378 kfree(bitmap); 379 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 380 } 381 return; 382 err: 383 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); 384 } 385 EXPORT_SYMBOL(ida_remove); 386 387 /** 388 * ida_destroy - Free the contents of an ida 389 * @ida: ida handle 390 * 391 * Calling this function releases all resources associated with an IDA. When 392 * this call returns, the IDA is empty and can be reused or freed. The caller 393 * should not allow ida_remove() or ida_get_new_above() to be called at the 394 * same time. 395 */ 396 void ida_destroy(struct ida *ida) 397 { 398 struct radix_tree_iter iter; 399 void __rcu **slot; 400 401 radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { 402 struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); 403 if (!radix_tree_exception(bitmap)) 404 kfree(bitmap); 405 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 406 } 407 } 408 EXPORT_SYMBOL(ida_destroy); 409 410 /** 411 * ida_simple_get - get a new id. 412 * @ida: the (initialized) ida. 413 * @start: the minimum id (inclusive, < 0x8000000) 414 * @end: the maximum id (exclusive, < 0x8000000 or 0) 415 * @gfp_mask: memory allocation flags 416 * 417 * Allocates an id in the range start <= id < end, or returns -ENOSPC. 418 * On memory allocation failure, returns -ENOMEM. 419 * 420 * Compared to ida_get_new_above() this function does its own locking, and 421 * should be used unless there are special requirements. 422 * 423 * Use ida_simple_remove() to get rid of an id. 424 */ 425 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 426 gfp_t gfp_mask) 427 { 428 int ret, id; 429 unsigned int max; 430 unsigned long flags; 431 432 BUG_ON((int)start < 0); 433 BUG_ON((int)end < 0); 434 435 if (end == 0) 436 max = 0x80000000; 437 else { 438 BUG_ON(end < start); 439 max = end - 1; 440 } 441 442 again: 443 if (!ida_pre_get(ida, gfp_mask)) 444 return -ENOMEM; 445 446 spin_lock_irqsave(&simple_ida_lock, flags); 447 ret = ida_get_new_above(ida, start, &id); 448 if (!ret) { 449 if (id > max) { 450 ida_remove(ida, id); 451 ret = -ENOSPC; 452 } else { 453 ret = id; 454 } 455 } 456 spin_unlock_irqrestore(&simple_ida_lock, flags); 457 458 if (unlikely(ret == -EAGAIN)) 459 goto again; 460 461 return ret; 462 } 463 EXPORT_SYMBOL(ida_simple_get); 464 465 /** 466 * ida_simple_remove - remove an allocated id. 467 * @ida: the (initialized) ida. 468 * @id: the id returned by ida_simple_get. 469 * 470 * Use to release an id allocated with ida_simple_get(). 471 * 472 * Compared to ida_remove() this function does its own locking, and should be 473 * used unless there are special requirements. 474 */ 475 void ida_simple_remove(struct ida *ida, unsigned int id) 476 { 477 unsigned long flags; 478 479 BUG_ON((int)id < 0); 480 spin_lock_irqsave(&simple_ida_lock, flags); 481 ida_remove(ida, id); 482 spin_unlock_irqrestore(&simple_ida_lock, flags); 483 } 484 EXPORT_SYMBOL(ida_simple_remove); 485