1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/bitmap.h>
3 #include <linux/bug.h>
4 #include <linux/export.h>
5 #include <linux/idr.h>
6 #include <linux/slab.h>
7 #include <linux/spinlock.h>
8 #include <linux/xarray.h>
9
10 /**
11 * idr_alloc_u32() - Allocate an ID.
12 * @idr: IDR handle.
13 * @ptr: Pointer to be associated with the new ID.
14 * @nextid: Pointer to an ID.
15 * @max: The maximum ID to allocate (inclusive).
16 * @gfp: Memory allocation flags.
17 *
18 * Allocates an unused ID in the range specified by @nextid and @max.
19 * Note that @max is inclusive whereas the @end parameter to idr_alloc()
20 * is exclusive. The new ID is assigned to @nextid before the pointer
21 * is inserted into the IDR, so if @nextid points into the object pointed
22 * to by @ptr, a concurrent lookup will not find an uninitialised ID.
23 *
24 * The caller should provide their own locking to ensure that two
25 * concurrent modifications to the IDR are not possible. Read-only
26 * accesses to the IDR may be done under the RCU read lock or may
27 * exclude simultaneous writers.
28 *
29 * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
30 * or -ENOSPC if no free IDs could be found. If an error occurred,
31 * @nextid is unchanged.
32 */
idr_alloc_u32(struct idr * idr,void * ptr,u32 * nextid,unsigned long max,gfp_t gfp)33 int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
34 unsigned long max, gfp_t gfp)
35 {
36 struct radix_tree_iter iter;
37 void __rcu **slot;
38 unsigned int base = idr->idr_base;
39 unsigned int id = *nextid;
40
41 if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
42 idr->idr_rt.xa_flags |= IDR_RT_MARKER;
43 if (max < base)
44 return -ENOSPC;
45
46 id = (id < base) ? 0 : id - base;
47 radix_tree_iter_init(&iter, id);
48 slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
49 if (IS_ERR(slot))
50 return PTR_ERR(slot);
51
52 *nextid = iter.index + base;
53 /* there is a memory barrier inside radix_tree_iter_replace() */
54 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
55 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
56
57 return 0;
58 }
59 EXPORT_SYMBOL_GPL(idr_alloc_u32);
60
61 /**
62 * idr_alloc() - Allocate an ID.
63 * @idr: IDR handle.
64 * @ptr: Pointer to be associated with the new ID.
65 * @start: The minimum ID (inclusive).
66 * @end: The maximum ID (exclusive).
67 * @gfp: Memory allocation flags.
68 *
69 * Allocates an unused ID in the range specified by @start and @end. If
70 * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
71 * callers to use @start + N as @end as long as N is within integer range.
72 *
73 * The caller should provide their own locking to ensure that two
74 * concurrent modifications to the IDR are not possible. Read-only
75 * accesses to the IDR may be done under the RCU read lock or may
76 * exclude simultaneous writers.
77 *
78 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
79 * or -ENOSPC if no free IDs could be found.
80 */
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp)81 int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
82 {
83 u32 id = start;
84 int ret;
85
86 if (WARN_ON_ONCE(start < 0))
87 return -EINVAL;
88
89 ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
90 if (ret)
91 return ret;
92
93 return id;
94 }
95 EXPORT_SYMBOL_GPL(idr_alloc);
96
97 /**
98 * idr_alloc_cyclic() - Allocate an ID cyclically.
99 * @idr: IDR handle.
100 * @ptr: Pointer to be associated with the new ID.
101 * @start: The minimum ID (inclusive).
102 * @end: The maximum ID (exclusive).
103 * @gfp: Memory allocation flags.
104 *
105 * Allocates an unused ID in the range specified by @start and @end. If
106 * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
107 * callers to use @start + N as @end as long as N is within integer range.
108 * The search for an unused ID will start at the last ID allocated and will
109 * wrap around to @start if no free IDs are found before reaching @end.
110 *
111 * The caller should provide their own locking to ensure that two
112 * concurrent modifications to the IDR are not possible. Read-only
113 * accesses to the IDR may be done under the RCU read lock or may
114 * exclude simultaneous writers.
115 *
116 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
117 * or -ENOSPC if no free IDs could be found.
118 */
idr_alloc_cyclic(struct idr * idr,void * ptr,int start,int end,gfp_t gfp)119 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
120 {
121 u32 id = idr->idr_next;
122 int err, max = end > 0 ? end - 1 : INT_MAX;
123
124 if ((int)id < start)
125 id = start;
126
127 err = idr_alloc_u32(idr, ptr, &id, max, gfp);
128 if ((err == -ENOSPC) && (id > start)) {
129 id = start;
130 err = idr_alloc_u32(idr, ptr, &id, max, gfp);
131 }
132 if (err)
133 return err;
134
135 idr->idr_next = id + 1;
136 return id;
137 }
138 EXPORT_SYMBOL(idr_alloc_cyclic);
139
140 /**
141 * idr_remove() - Remove an ID from the IDR.
142 * @idr: IDR handle.
143 * @id: Pointer ID.
144 *
145 * Removes this ID from the IDR. If the ID was not previously in the IDR,
146 * this function returns %NULL.
147 *
148 * Since this function modifies the IDR, the caller should provide their
149 * own locking to ensure that concurrent modification of the same IDR is
150 * not possible.
151 *
152 * Return: The pointer formerly associated with this ID.
153 */
idr_remove(struct idr * idr,unsigned long id)154 void *idr_remove(struct idr *idr, unsigned long id)
155 {
156 return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
157 }
158 EXPORT_SYMBOL_GPL(idr_remove);
159
160 /**
161 * idr_find() - Return pointer for given ID.
162 * @idr: IDR handle.
163 * @id: Pointer ID.
164 *
165 * Looks up the pointer associated with this ID. A %NULL pointer may
166 * indicate that @id is not allocated or that the %NULL pointer was
167 * associated with this ID.
168 *
169 * This function can be called under rcu_read_lock(), given that the leaf
170 * pointers lifetimes are correctly managed.
171 *
172 * Return: The pointer associated with this ID.
173 */
idr_find(const struct idr * idr,unsigned long id)174 void *idr_find(const struct idr *idr, unsigned long id)
175 {
176 return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
177 }
178 EXPORT_SYMBOL_GPL(idr_find);
179
180 /**
181 * idr_for_each() - Iterate through all stored pointers.
182 * @idr: IDR handle.
183 * @fn: Function to be called for each pointer.
184 * @data: Data passed to callback function.
185 *
186 * The callback function will be called for each entry in @idr, passing
187 * the ID, the entry and @data.
188 *
189 * If @fn returns anything other than %0, the iteration stops and that
190 * value is returned from this function.
191 *
192 * idr_for_each() can be called concurrently with idr_alloc() and
193 * idr_remove() if protected by RCU. Newly added entries may not be
194 * seen and deleted entries may be seen, but adding and removing entries
195 * will not cause other entries to be skipped, nor spurious ones to be seen.
196 */
idr_for_each(const struct idr * idr,int (* fn)(int id,void * p,void * data),void * data)197 int idr_for_each(const struct idr *idr,
198 int (*fn)(int id, void *p, void *data), void *data)
199 {
200 struct radix_tree_iter iter;
201 void __rcu **slot;
202 int base = idr->idr_base;
203
204 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
205 int ret;
206 unsigned long id = iter.index + base;
207
208 if (WARN_ON_ONCE(id > INT_MAX))
209 break;
210 ret = fn(id, rcu_dereference_raw(*slot), data);
211 if (ret)
212 return ret;
213 }
214
215 return 0;
216 }
217 EXPORT_SYMBOL(idr_for_each);
218
219 /**
220 * idr_get_next_ul() - Find next populated entry.
221 * @idr: IDR handle.
222 * @nextid: Pointer to an ID.
223 *
224 * Returns the next populated entry in the tree with an ID greater than
225 * or equal to the value pointed to by @nextid. On exit, @nextid is updated
226 * to the ID of the found value. To use in a loop, the value pointed to by
227 * nextid must be incremented by the user.
228 */
idr_get_next_ul(struct idr * idr,unsigned long * nextid)229 void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
230 {
231 struct radix_tree_iter iter;
232 void __rcu **slot;
233 void *entry = NULL;
234 unsigned long base = idr->idr_base;
235 unsigned long id = *nextid;
236
237 id = (id < base) ? 0 : id - base;
238 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
239 entry = rcu_dereference_raw(*slot);
240 if (!entry)
241 continue;
242 if (!xa_is_internal(entry))
243 break;
244 if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
245 break;
246 slot = radix_tree_iter_retry(&iter);
247 }
248 if (!slot)
249 return NULL;
250
251 *nextid = iter.index + base;
252 return entry;
253 }
254 EXPORT_SYMBOL(idr_get_next_ul);
255
256 /**
257 * idr_get_next() - Find next populated entry.
258 * @idr: IDR handle.
259 * @nextid: Pointer to an ID.
260 *
261 * Returns the next populated entry in the tree with an ID greater than
262 * or equal to the value pointed to by @nextid. On exit, @nextid is updated
263 * to the ID of the found value. To use in a loop, the value pointed to by
264 * nextid must be incremented by the user.
265 */
idr_get_next(struct idr * idr,int * nextid)266 void *idr_get_next(struct idr *idr, int *nextid)
267 {
268 unsigned long id = *nextid;
269 void *entry = idr_get_next_ul(idr, &id);
270
271 if (WARN_ON_ONCE(id > INT_MAX))
272 return NULL;
273 *nextid = id;
274 return entry;
275 }
276 EXPORT_SYMBOL(idr_get_next);
277
278 /**
279 * idr_replace() - replace pointer for given ID.
280 * @idr: IDR handle.
281 * @ptr: New pointer to associate with the ID.
282 * @id: ID to change.
283 *
284 * Replace the pointer registered with an ID and return the old value.
285 * This function can be called under the RCU read lock concurrently with
286 * idr_alloc() and idr_remove() (as long as the ID being removed is not
287 * the one being replaced!).
288 *
289 * Returns: the old value on success. %-ENOENT indicates that @id was not
290 * found. %-EINVAL indicates that @ptr was not valid.
291 */
idr_replace(struct idr * idr,void * ptr,unsigned long id)292 void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
293 {
294 struct radix_tree_node *node;
295 void __rcu **slot = NULL;
296 void *entry;
297
298 id -= idr->idr_base;
299
300 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
301 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
302 return ERR_PTR(-ENOENT);
303
304 __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
305
306 return entry;
307 }
308 EXPORT_SYMBOL(idr_replace);
309
310 /**
311 * DOC: IDA description
312 *
313 * The IDA is an ID allocator which does not provide the ability to
314 * associate an ID with a pointer. As such, it only needs to store one
315 * bit per ID, and so is more space efficient than an IDR. To use an IDA,
316 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
317 * then initialise it using ida_init()). To allocate a new ID, call
318 * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
319 * To free an ID, call ida_free().
320 *
321 * ida_destroy() can be used to dispose of an IDA without needing to
322 * free the individual IDs in it. You can use ida_is_empty() to find
323 * out whether the IDA has any IDs currently allocated.
324 *
325 * The IDA handles its own locking. It is safe to call any of the IDA
326 * functions without synchronisation in your code.
327 *
328 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
329 * limitation, it should be quite straightforward to raise the maximum.
330 */
331
332 /*
333 * Developer's notes:
334 *
335 * The IDA uses the functionality provided by the XArray to store bitmaps in
336 * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
337 * have been set.
338 *
339 * I considered telling the XArray that each slot is an order-10 node
340 * and indexing by bit number, but the XArray can't allow a single multi-index
341 * entry in the head, which would significantly increase memory consumption
342 * for the IDA. So instead we divide the index by the number of bits in the
343 * leaf bitmap before doing a radix tree lookup.
344 *
345 * As an optimisation, if there are only a few low bits set in any given
346 * leaf, instead of allocating a 128-byte bitmap, we store the bits
347 * as a value entry. Value entries never have the XA_FREE_MARK cleared
348 * because we can always convert them into a bitmap entry.
349 *
350 * It would be possible to optimise further; once we've run out of a
351 * single 128-byte bitmap, we currently switch to a 576-byte node, put
352 * the 128-byte bitmap in the first entry and then start allocating extra
353 * 128-byte entries. We could instead use the 512 bytes of the node's
354 * data as a bitmap before moving to that scheme. I do not believe this
355 * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
356 * users of the IDA and almost none of them use more than 1024 entries.
357 * Those that do use more than the 8192 IDs that the 512 bytes would
358 * provide.
359 *
360 * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
361 * equivalent, it will still need locking. Going to RCU lookup would require
362 * using RCU to free bitmaps, and that's not trivial without embedding an
363 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
364 * bitmap, which is excessive.
365 */
366
367 /**
368 * ida_alloc_range() - Allocate an unused ID.
369 * @ida: IDA handle.
370 * @min: Lowest ID to allocate.
371 * @max: Highest ID to allocate.
372 * @gfp: Memory allocation flags.
373 *
374 * Allocate an ID between @min and @max, inclusive. The allocated ID will
375 * not exceed %INT_MAX, even if @max is larger.
376 *
377 * Context: Any context. It is safe to call this function without
378 * locking in your code.
379 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
380 * or %-ENOSPC if there are no free IDs.
381 */
ida_alloc_range(struct ida * ida,unsigned int min,unsigned int max,gfp_t gfp)382 int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
383 gfp_t gfp)
384 {
385 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
386 unsigned bit = min % IDA_BITMAP_BITS;
387 unsigned long flags;
388 struct ida_bitmap *bitmap, *alloc = NULL;
389
390 if ((int)min < 0)
391 return -ENOSPC;
392
393 if ((int)max < 0)
394 max = INT_MAX;
395
396 retry:
397 xas_lock_irqsave(&xas, flags);
398 next:
399 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
400 if (xas.xa_index > min / IDA_BITMAP_BITS)
401 bit = 0;
402 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
403 goto nospc;
404
405 if (xa_is_value(bitmap)) {
406 unsigned long tmp = xa_to_value(bitmap);
407
408 if (bit < BITS_PER_XA_VALUE) {
409 bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
410 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
411 goto nospc;
412 if (bit < BITS_PER_XA_VALUE) {
413 tmp |= 1UL << bit;
414 xas_store(&xas, xa_mk_value(tmp));
415 goto out;
416 }
417 }
418 bitmap = alloc;
419 if (!bitmap)
420 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
421 if (!bitmap)
422 goto alloc;
423 bitmap->bitmap[0] = tmp;
424 xas_store(&xas, bitmap);
425 if (xas_error(&xas)) {
426 bitmap->bitmap[0] = 0;
427 goto out;
428 }
429 }
430
431 if (bitmap) {
432 bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
433 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
434 goto nospc;
435 if (bit == IDA_BITMAP_BITS)
436 goto next;
437
438 __set_bit(bit, bitmap->bitmap);
439 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
440 xas_clear_mark(&xas, XA_FREE_MARK);
441 } else {
442 if (bit < BITS_PER_XA_VALUE) {
443 bitmap = xa_mk_value(1UL << bit);
444 } else {
445 bitmap = alloc;
446 if (!bitmap)
447 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
448 if (!bitmap)
449 goto alloc;
450 __set_bit(bit, bitmap->bitmap);
451 }
452 xas_store(&xas, bitmap);
453 }
454 out:
455 xas_unlock_irqrestore(&xas, flags);
456 if (xas_nomem(&xas, gfp)) {
457 xas.xa_index = min / IDA_BITMAP_BITS;
458 bit = min % IDA_BITMAP_BITS;
459 goto retry;
460 }
461 if (bitmap != alloc)
462 kfree(alloc);
463 if (xas_error(&xas))
464 return xas_error(&xas);
465 return xas.xa_index * IDA_BITMAP_BITS + bit;
466 alloc:
467 xas_unlock_irqrestore(&xas, flags);
468 alloc = kzalloc(sizeof(*bitmap), gfp);
469 if (!alloc)
470 return -ENOMEM;
471 xas_set(&xas, min / IDA_BITMAP_BITS);
472 bit = min % IDA_BITMAP_BITS;
473 goto retry;
474 nospc:
475 xas_unlock_irqrestore(&xas, flags);
476 kfree(alloc);
477 return -ENOSPC;
478 }
479 EXPORT_SYMBOL(ida_alloc_range);
480
481 /**
482 * ida_find_first_range - Get the lowest used ID.
483 * @ida: IDA handle.
484 * @min: Lowest ID to get.
485 * @max: Highest ID to get.
486 *
487 * Get the lowest used ID between @min and @max, inclusive. The returned
488 * ID will not exceed %INT_MAX, even if @max is larger.
489 *
490 * Context: Any context. Takes and releases the xa_lock.
491 * Return: The lowest used ID, or errno if no used ID is found.
492 */
ida_find_first_range(struct ida * ida,unsigned int min,unsigned int max)493 int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max)
494 {
495 unsigned long index = min / IDA_BITMAP_BITS;
496 unsigned int offset = min % IDA_BITMAP_BITS;
497 unsigned long *addr, size, bit;
498 unsigned long tmp = 0;
499 unsigned long flags;
500 void *entry;
501 int ret;
502
503 if ((int)min < 0)
504 return -EINVAL;
505 if ((int)max < 0)
506 max = INT_MAX;
507
508 xa_lock_irqsave(&ida->xa, flags);
509
510 entry = xa_find(&ida->xa, &index, max / IDA_BITMAP_BITS, XA_PRESENT);
511 if (!entry) {
512 ret = -ENOENT;
513 goto err_unlock;
514 }
515
516 if (index > min / IDA_BITMAP_BITS)
517 offset = 0;
518 if (index * IDA_BITMAP_BITS + offset > max) {
519 ret = -ENOENT;
520 goto err_unlock;
521 }
522
523 if (xa_is_value(entry)) {
524 tmp = xa_to_value(entry);
525 addr = &tmp;
526 size = BITS_PER_XA_VALUE;
527 } else {
528 addr = ((struct ida_bitmap *)entry)->bitmap;
529 size = IDA_BITMAP_BITS;
530 }
531
532 bit = find_next_bit(addr, size, offset);
533
534 xa_unlock_irqrestore(&ida->xa, flags);
535
536 if (bit == size ||
537 index * IDA_BITMAP_BITS + bit > max)
538 return -ENOENT;
539
540 return index * IDA_BITMAP_BITS + bit;
541
542 err_unlock:
543 xa_unlock_irqrestore(&ida->xa, flags);
544 return ret;
545 }
546 EXPORT_SYMBOL(ida_find_first_range);
547
548 /**
549 * ida_free() - Release an allocated ID.
550 * @ida: IDA handle.
551 * @id: Previously allocated ID.
552 *
553 * Context: Any context. It is safe to call this function without
554 * locking in your code.
555 */
ida_free(struct ida * ida,unsigned int id)556 void ida_free(struct ida *ida, unsigned int id)
557 {
558 XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
559 unsigned bit = id % IDA_BITMAP_BITS;
560 struct ida_bitmap *bitmap;
561 unsigned long flags;
562
563 if ((int)id < 0)
564 return;
565
566 xas_lock_irqsave(&xas, flags);
567 bitmap = xas_load(&xas);
568
569 if (xa_is_value(bitmap)) {
570 unsigned long v = xa_to_value(bitmap);
571 if (bit >= BITS_PER_XA_VALUE)
572 goto err;
573 if (!(v & (1UL << bit)))
574 goto err;
575 v &= ~(1UL << bit);
576 if (!v)
577 goto delete;
578 xas_store(&xas, xa_mk_value(v));
579 } else {
580 if (!bitmap || !test_bit(bit, bitmap->bitmap))
581 goto err;
582 __clear_bit(bit, bitmap->bitmap);
583 xas_set_mark(&xas, XA_FREE_MARK);
584 if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
585 kfree(bitmap);
586 delete:
587 xas_store(&xas, NULL);
588 }
589 }
590 xas_unlock_irqrestore(&xas, flags);
591 return;
592 err:
593 xas_unlock_irqrestore(&xas, flags);
594 WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
595 }
596 EXPORT_SYMBOL(ida_free);
597
598 /**
599 * ida_destroy() - Free all IDs.
600 * @ida: IDA handle.
601 *
602 * Calling this function frees all IDs and releases all resources used
603 * by an IDA. When this call returns, the IDA is empty and can be reused
604 * or freed. If the IDA is already empty, there is no need to call this
605 * function.
606 *
607 * Context: Any context. It is safe to call this function without
608 * locking in your code.
609 */
ida_destroy(struct ida * ida)610 void ida_destroy(struct ida *ida)
611 {
612 XA_STATE(xas, &ida->xa, 0);
613 struct ida_bitmap *bitmap;
614 unsigned long flags;
615
616 xas_lock_irqsave(&xas, flags);
617 xas_for_each(&xas, bitmap, ULONG_MAX) {
618 if (!xa_is_value(bitmap))
619 kfree(bitmap);
620 xas_store(&xas, NULL);
621 }
622 xas_unlock_irqrestore(&xas, flags);
623 }
624 EXPORT_SYMBOL(ida_destroy);
625
626 #ifndef __KERNEL__
627 extern void xa_dump_index(unsigned long index, unsigned int shift);
628 #define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
629
ida_dump_entry(void * entry,unsigned long index)630 static void ida_dump_entry(void *entry, unsigned long index)
631 {
632 unsigned long i;
633
634 if (!entry)
635 return;
636
637 if (xa_is_node(entry)) {
638 struct xa_node *node = xa_to_node(entry);
639 unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
640 XA_CHUNK_SHIFT;
641
642 xa_dump_index(index * IDA_BITMAP_BITS, shift);
643 xa_dump_node(node);
644 for (i = 0; i < XA_CHUNK_SIZE; i++)
645 ida_dump_entry(node->slots[i],
646 index | (i << node->shift));
647 } else if (xa_is_value(entry)) {
648 xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
649 pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
650 } else {
651 struct ida_bitmap *bitmap = entry;
652
653 xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
654 pr_cont("bitmap: %p data", bitmap);
655 for (i = 0; i < IDA_BITMAP_LONGS; i++)
656 pr_cont(" %lx", bitmap->bitmap[i]);
657 pr_cont("\n");
658 }
659 }
660
ida_dump(struct ida * ida)661 static void ida_dump(struct ida *ida)
662 {
663 struct xarray *xa = &ida->xa;
664 pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
665 xa->xa_flags >> ROOT_TAG_SHIFT);
666 ida_dump_entry(xa->xa_head, 0);
667 }
668 #endif
669