xref: /linux/lib/radix-tree.c (revision 0d3b051adbb72ed81956447d0d1e54d5943ee6f5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2001 Momchil Velikov
4  * Portions Copyright (C) 2001 Christoph Hellwig
5  * Copyright (C) 2005 SGI, Christoph Lameter
6  * Copyright (C) 2006 Nick Piggin
7  * Copyright (C) 2012 Konstantin Khlebnikov
8  * Copyright (C) 2016 Intel, Matthew Wilcox
9  * Copyright (C) 2016 Intel, Ross Zwisler
10  */
11 
12 #include <linux/bitmap.h>
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/cpu.h>
16 #include <linux/errno.h>
17 #include <linux/export.h>
18 #include <linux/idr.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/kmemleak.h>
22 #include <linux/percpu.h>
23 #include <linux/preempt.h>		/* in_interrupt() */
24 #include <linux/radix-tree.h>
25 #include <linux/rcupdate.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/xarray.h>
29 
30 /*
31  * Radix tree node cache.
32  */
33 struct kmem_cache *radix_tree_node_cachep;
34 
35 /*
36  * The radix tree is variable-height, so an insert operation not only has
37  * to build the branch to its corresponding item, it also has to build the
38  * branch to existing items if the size has to be increased (by
39  * radix_tree_extend).
40  *
41  * The worst case is a zero height tree with just a single item at index 0,
42  * and then inserting an item at index ULONG_MAX. This requires 2 new branches
43  * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
44  * Hence:
45  */
46 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
47 
48 /*
49  * The IDR does not have to be as high as the radix tree since it uses
50  * signed integers, not unsigned longs.
51  */
52 #define IDR_INDEX_BITS		(8 /* CHAR_BIT */ * sizeof(int) - 1)
53 #define IDR_MAX_PATH		(DIV_ROUND_UP(IDR_INDEX_BITS, \
54 						RADIX_TREE_MAP_SHIFT))
55 #define IDR_PRELOAD_SIZE	(IDR_MAX_PATH * 2 - 1)
56 
57 /*
58  * Per-cpu pool of preloaded nodes
59  */
60 DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
61 	.lock = INIT_LOCAL_LOCK(lock),
62 };
63 EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
64 
65 static inline struct radix_tree_node *entry_to_node(void *ptr)
66 {
67 	return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
68 }
69 
70 static inline void *node_to_entry(void *ptr)
71 {
72 	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
73 }
74 
75 #define RADIX_TREE_RETRY	XA_RETRY_ENTRY
76 
77 static inline unsigned long
78 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
79 {
80 	return parent ? slot - parent->slots : 0;
81 }
82 
83 static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
84 			struct radix_tree_node **nodep, unsigned long index)
85 {
86 	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
87 	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
88 
89 	*nodep = (void *)entry;
90 	return offset;
91 }
92 
93 static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
94 {
95 	return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
96 }
97 
98 static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
99 		int offset)
100 {
101 	__set_bit(offset, node->tags[tag]);
102 }
103 
104 static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
105 		int offset)
106 {
107 	__clear_bit(offset, node->tags[tag]);
108 }
109 
110 static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
111 		int offset)
112 {
113 	return test_bit(offset, node->tags[tag]);
114 }
115 
116 static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
117 {
118 	root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
119 }
120 
121 static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
122 {
123 	root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
124 }
125 
126 static inline void root_tag_clear_all(struct radix_tree_root *root)
127 {
128 	root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
129 }
130 
131 static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
132 {
133 	return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
134 }
135 
136 static inline unsigned root_tags_get(const struct radix_tree_root *root)
137 {
138 	return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
139 }
140 
141 static inline bool is_idr(const struct radix_tree_root *root)
142 {
143 	return !!(root->xa_flags & ROOT_IS_IDR);
144 }
145 
146 /*
147  * Returns 1 if any slot in the node has this tag set.
148  * Otherwise returns 0.
149  */
150 static inline int any_tag_set(const struct radix_tree_node *node,
151 							unsigned int tag)
152 {
153 	unsigned idx;
154 	for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
155 		if (node->tags[tag][idx])
156 			return 1;
157 	}
158 	return 0;
159 }
160 
161 static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
162 {
163 	bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
164 }
165 
166 /**
167  * radix_tree_find_next_bit - find the next set bit in a memory region
168  *
169  * @addr: The address to base the search on
170  * @size: The bitmap size in bits
171  * @offset: The bitnumber to start searching at
172  *
173  * Unrollable variant of find_next_bit() for constant size arrays.
174  * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
175  * Returns next bit offset, or size if nothing found.
176  */
177 static __always_inline unsigned long
178 radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
179 			 unsigned long offset)
180 {
181 	const unsigned long *addr = node->tags[tag];
182 
183 	if (offset < RADIX_TREE_MAP_SIZE) {
184 		unsigned long tmp;
185 
186 		addr += offset / BITS_PER_LONG;
187 		tmp = *addr >> (offset % BITS_PER_LONG);
188 		if (tmp)
189 			return __ffs(tmp) + offset;
190 		offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
191 		while (offset < RADIX_TREE_MAP_SIZE) {
192 			tmp = *++addr;
193 			if (tmp)
194 				return __ffs(tmp) + offset;
195 			offset += BITS_PER_LONG;
196 		}
197 	}
198 	return RADIX_TREE_MAP_SIZE;
199 }
200 
201 static unsigned int iter_offset(const struct radix_tree_iter *iter)
202 {
203 	return iter->index & RADIX_TREE_MAP_MASK;
204 }
205 
206 /*
207  * The maximum index which can be stored in a radix tree
208  */
209 static inline unsigned long shift_maxindex(unsigned int shift)
210 {
211 	return (RADIX_TREE_MAP_SIZE << shift) - 1;
212 }
213 
214 static inline unsigned long node_maxindex(const struct radix_tree_node *node)
215 {
216 	return shift_maxindex(node->shift);
217 }
218 
219 static unsigned long next_index(unsigned long index,
220 				const struct radix_tree_node *node,
221 				unsigned long offset)
222 {
223 	return (index & ~node_maxindex(node)) + (offset << node->shift);
224 }
225 
226 /*
227  * This assumes that the caller has performed appropriate preallocation, and
228  * that the caller has pinned this thread of control to the current CPU.
229  */
230 static struct radix_tree_node *
231 radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
232 			struct radix_tree_root *root,
233 			unsigned int shift, unsigned int offset,
234 			unsigned int count, unsigned int nr_values)
235 {
236 	struct radix_tree_node *ret = NULL;
237 
238 	/*
239 	 * Preload code isn't irq safe and it doesn't make sense to use
240 	 * preloading during an interrupt anyway as all the allocations have
241 	 * to be atomic. So just do normal allocation when in interrupt.
242 	 */
243 	if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
244 		struct radix_tree_preload *rtp;
245 
246 		/*
247 		 * Even if the caller has preloaded, try to allocate from the
248 		 * cache first for the new node to get accounted to the memory
249 		 * cgroup.
250 		 */
251 		ret = kmem_cache_alloc(radix_tree_node_cachep,
252 				       gfp_mask | __GFP_NOWARN);
253 		if (ret)
254 			goto out;
255 
256 		/*
257 		 * Provided the caller has preloaded here, we will always
258 		 * succeed in getting a node here (and never reach
259 		 * kmem_cache_alloc)
260 		 */
261 		rtp = this_cpu_ptr(&radix_tree_preloads);
262 		if (rtp->nr) {
263 			ret = rtp->nodes;
264 			rtp->nodes = ret->parent;
265 			rtp->nr--;
266 		}
267 		/*
268 		 * Update the allocation stack trace as this is more useful
269 		 * for debugging.
270 		 */
271 		kmemleak_update_trace(ret);
272 		goto out;
273 	}
274 	ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
275 out:
276 	BUG_ON(radix_tree_is_internal_node(ret));
277 	if (ret) {
278 		ret->shift = shift;
279 		ret->offset = offset;
280 		ret->count = count;
281 		ret->nr_values = nr_values;
282 		ret->parent = parent;
283 		ret->array = root;
284 	}
285 	return ret;
286 }
287 
288 void radix_tree_node_rcu_free(struct rcu_head *head)
289 {
290 	struct radix_tree_node *node =
291 			container_of(head, struct radix_tree_node, rcu_head);
292 
293 	/*
294 	 * Must only free zeroed nodes into the slab.  We can be left with
295 	 * non-NULL entries by radix_tree_free_nodes, so clear the entries
296 	 * and tags here.
297 	 */
298 	memset(node->slots, 0, sizeof(node->slots));
299 	memset(node->tags, 0, sizeof(node->tags));
300 	INIT_LIST_HEAD(&node->private_list);
301 
302 	kmem_cache_free(radix_tree_node_cachep, node);
303 }
304 
305 static inline void
306 radix_tree_node_free(struct radix_tree_node *node)
307 {
308 	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
309 }
310 
311 /*
312  * Load up this CPU's radix_tree_node buffer with sufficient objects to
313  * ensure that the addition of a single element in the tree cannot fail.  On
314  * success, return zero, with preemption disabled.  On error, return -ENOMEM
315  * with preemption not disabled.
316  *
317  * To make use of this facility, the radix tree must be initialised without
318  * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
319  */
320 static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
321 {
322 	struct radix_tree_preload *rtp;
323 	struct radix_tree_node *node;
324 	int ret = -ENOMEM;
325 
326 	/*
327 	 * Nodes preloaded by one cgroup can be used by another cgroup, so
328 	 * they should never be accounted to any particular memory cgroup.
329 	 */
330 	gfp_mask &= ~__GFP_ACCOUNT;
331 
332 	local_lock(&radix_tree_preloads.lock);
333 	rtp = this_cpu_ptr(&radix_tree_preloads);
334 	while (rtp->nr < nr) {
335 		local_unlock(&radix_tree_preloads.lock);
336 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
337 		if (node == NULL)
338 			goto out;
339 		local_lock(&radix_tree_preloads.lock);
340 		rtp = this_cpu_ptr(&radix_tree_preloads);
341 		if (rtp->nr < nr) {
342 			node->parent = rtp->nodes;
343 			rtp->nodes = node;
344 			rtp->nr++;
345 		} else {
346 			kmem_cache_free(radix_tree_node_cachep, node);
347 		}
348 	}
349 	ret = 0;
350 out:
351 	return ret;
352 }
353 
354 /*
355  * Load up this CPU's radix_tree_node buffer with sufficient objects to
356  * ensure that the addition of a single element in the tree cannot fail.  On
357  * success, return zero, with preemption disabled.  On error, return -ENOMEM
358  * with preemption not disabled.
359  *
360  * To make use of this facility, the radix tree must be initialised without
361  * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
362  */
363 int radix_tree_preload(gfp_t gfp_mask)
364 {
365 	/* Warn on non-sensical use... */
366 	WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
367 	return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
368 }
369 EXPORT_SYMBOL(radix_tree_preload);
370 
371 /*
372  * The same as above function, except we don't guarantee preloading happens.
373  * We do it, if we decide it helps. On success, return zero with preemption
374  * disabled. On error, return -ENOMEM with preemption not disabled.
375  */
376 int radix_tree_maybe_preload(gfp_t gfp_mask)
377 {
378 	if (gfpflags_allow_blocking(gfp_mask))
379 		return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
380 	/* Preloading doesn't help anything with this gfp mask, skip it */
381 	local_lock(&radix_tree_preloads.lock);
382 	return 0;
383 }
384 EXPORT_SYMBOL(radix_tree_maybe_preload);
385 
386 static unsigned radix_tree_load_root(const struct radix_tree_root *root,
387 		struct radix_tree_node **nodep, unsigned long *maxindex)
388 {
389 	struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
390 
391 	*nodep = node;
392 
393 	if (likely(radix_tree_is_internal_node(node))) {
394 		node = entry_to_node(node);
395 		*maxindex = node_maxindex(node);
396 		return node->shift + RADIX_TREE_MAP_SHIFT;
397 	}
398 
399 	*maxindex = 0;
400 	return 0;
401 }
402 
403 /*
404  *	Extend a radix tree so it can store key @index.
405  */
406 static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
407 				unsigned long index, unsigned int shift)
408 {
409 	void *entry;
410 	unsigned int maxshift;
411 	int tag;
412 
413 	/* Figure out what the shift should be.  */
414 	maxshift = shift;
415 	while (index > shift_maxindex(maxshift))
416 		maxshift += RADIX_TREE_MAP_SHIFT;
417 
418 	entry = rcu_dereference_raw(root->xa_head);
419 	if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
420 		goto out;
421 
422 	do {
423 		struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
424 							root, shift, 0, 1, 0);
425 		if (!node)
426 			return -ENOMEM;
427 
428 		if (is_idr(root)) {
429 			all_tag_set(node, IDR_FREE);
430 			if (!root_tag_get(root, IDR_FREE)) {
431 				tag_clear(node, IDR_FREE, 0);
432 				root_tag_set(root, IDR_FREE);
433 			}
434 		} else {
435 			/* Propagate the aggregated tag info to the new child */
436 			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
437 				if (root_tag_get(root, tag))
438 					tag_set(node, tag, 0);
439 			}
440 		}
441 
442 		BUG_ON(shift > BITS_PER_LONG);
443 		if (radix_tree_is_internal_node(entry)) {
444 			entry_to_node(entry)->parent = node;
445 		} else if (xa_is_value(entry)) {
446 			/* Moving a value entry root->xa_head to a node */
447 			node->nr_values = 1;
448 		}
449 		/*
450 		 * entry was already in the radix tree, so we do not need
451 		 * rcu_assign_pointer here
452 		 */
453 		node->slots[0] = (void __rcu *)entry;
454 		entry = node_to_entry(node);
455 		rcu_assign_pointer(root->xa_head, entry);
456 		shift += RADIX_TREE_MAP_SHIFT;
457 	} while (shift <= maxshift);
458 out:
459 	return maxshift + RADIX_TREE_MAP_SHIFT;
460 }
461 
462 /**
463  *	radix_tree_shrink    -    shrink radix tree to minimum height
464  *	@root		radix tree root
465  */
466 static inline bool radix_tree_shrink(struct radix_tree_root *root)
467 {
468 	bool shrunk = false;
469 
470 	for (;;) {
471 		struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
472 		struct radix_tree_node *child;
473 
474 		if (!radix_tree_is_internal_node(node))
475 			break;
476 		node = entry_to_node(node);
477 
478 		/*
479 		 * The candidate node has more than one child, or its child
480 		 * is not at the leftmost slot, we cannot shrink.
481 		 */
482 		if (node->count != 1)
483 			break;
484 		child = rcu_dereference_raw(node->slots[0]);
485 		if (!child)
486 			break;
487 
488 		/*
489 		 * For an IDR, we must not shrink entry 0 into the root in
490 		 * case somebody calls idr_replace() with a pointer that
491 		 * appears to be an internal entry
492 		 */
493 		if (!node->shift && is_idr(root))
494 			break;
495 
496 		if (radix_tree_is_internal_node(child))
497 			entry_to_node(child)->parent = NULL;
498 
499 		/*
500 		 * We don't need rcu_assign_pointer(), since we are simply
501 		 * moving the node from one part of the tree to another: if it
502 		 * was safe to dereference the old pointer to it
503 		 * (node->slots[0]), it will be safe to dereference the new
504 		 * one (root->xa_head) as far as dependent read barriers go.
505 		 */
506 		root->xa_head = (void __rcu *)child;
507 		if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
508 			root_tag_clear(root, IDR_FREE);
509 
510 		/*
511 		 * We have a dilemma here. The node's slot[0] must not be
512 		 * NULLed in case there are concurrent lookups expecting to
513 		 * find the item. However if this was a bottom-level node,
514 		 * then it may be subject to the slot pointer being visible
515 		 * to callers dereferencing it. If item corresponding to
516 		 * slot[0] is subsequently deleted, these callers would expect
517 		 * their slot to become empty sooner or later.
518 		 *
519 		 * For example, lockless pagecache will look up a slot, deref
520 		 * the page pointer, and if the page has 0 refcount it means it
521 		 * was concurrently deleted from pagecache so try the deref
522 		 * again. Fortunately there is already a requirement for logic
523 		 * to retry the entire slot lookup -- the indirect pointer
524 		 * problem (replacing direct root node with an indirect pointer
525 		 * also results in a stale slot). So tag the slot as indirect
526 		 * to force callers to retry.
527 		 */
528 		node->count = 0;
529 		if (!radix_tree_is_internal_node(child)) {
530 			node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
531 		}
532 
533 		WARN_ON_ONCE(!list_empty(&node->private_list));
534 		radix_tree_node_free(node);
535 		shrunk = true;
536 	}
537 
538 	return shrunk;
539 }
540 
541 static bool delete_node(struct radix_tree_root *root,
542 			struct radix_tree_node *node)
543 {
544 	bool deleted = false;
545 
546 	do {
547 		struct radix_tree_node *parent;
548 
549 		if (node->count) {
550 			if (node_to_entry(node) ==
551 					rcu_dereference_raw(root->xa_head))
552 				deleted |= radix_tree_shrink(root);
553 			return deleted;
554 		}
555 
556 		parent = node->parent;
557 		if (parent) {
558 			parent->slots[node->offset] = NULL;
559 			parent->count--;
560 		} else {
561 			/*
562 			 * Shouldn't the tags already have all been cleared
563 			 * by the caller?
564 			 */
565 			if (!is_idr(root))
566 				root_tag_clear_all(root);
567 			root->xa_head = NULL;
568 		}
569 
570 		WARN_ON_ONCE(!list_empty(&node->private_list));
571 		radix_tree_node_free(node);
572 		deleted = true;
573 
574 		node = parent;
575 	} while (node);
576 
577 	return deleted;
578 }
579 
580 /**
581  *	__radix_tree_create	-	create a slot in a radix tree
582  *	@root:		radix tree root
583  *	@index:		index key
584  *	@nodep:		returns node
585  *	@slotp:		returns slot
586  *
587  *	Create, if necessary, and return the node and slot for an item
588  *	at position @index in the radix tree @root.
589  *
590  *	Until there is more than one item in the tree, no nodes are
591  *	allocated and @root->xa_head is used as a direct slot instead of
592  *	pointing to a node, in which case *@nodep will be NULL.
593  *
594  *	Returns -ENOMEM, or 0 for success.
595  */
596 static int __radix_tree_create(struct radix_tree_root *root,
597 		unsigned long index, struct radix_tree_node **nodep,
598 		void __rcu ***slotp)
599 {
600 	struct radix_tree_node *node = NULL, *child;
601 	void __rcu **slot = (void __rcu **)&root->xa_head;
602 	unsigned long maxindex;
603 	unsigned int shift, offset = 0;
604 	unsigned long max = index;
605 	gfp_t gfp = root_gfp_mask(root);
606 
607 	shift = radix_tree_load_root(root, &child, &maxindex);
608 
609 	/* Make sure the tree is high enough.  */
610 	if (max > maxindex) {
611 		int error = radix_tree_extend(root, gfp, max, shift);
612 		if (error < 0)
613 			return error;
614 		shift = error;
615 		child = rcu_dereference_raw(root->xa_head);
616 	}
617 
618 	while (shift > 0) {
619 		shift -= RADIX_TREE_MAP_SHIFT;
620 		if (child == NULL) {
621 			/* Have to add a child node.  */
622 			child = radix_tree_node_alloc(gfp, node, root, shift,
623 							offset, 0, 0);
624 			if (!child)
625 				return -ENOMEM;
626 			rcu_assign_pointer(*slot, node_to_entry(child));
627 			if (node)
628 				node->count++;
629 		} else if (!radix_tree_is_internal_node(child))
630 			break;
631 
632 		/* Go a level down */
633 		node = entry_to_node(child);
634 		offset = radix_tree_descend(node, &child, index);
635 		slot = &node->slots[offset];
636 	}
637 
638 	if (nodep)
639 		*nodep = node;
640 	if (slotp)
641 		*slotp = slot;
642 	return 0;
643 }
644 
645 /*
646  * Free any nodes below this node.  The tree is presumed to not need
647  * shrinking, and any user data in the tree is presumed to not need a
648  * destructor called on it.  If we need to add a destructor, we can
649  * add that functionality later.  Note that we may not clear tags or
650  * slots from the tree as an RCU walker may still have a pointer into
651  * this subtree.  We could replace the entries with RADIX_TREE_RETRY,
652  * but we'll still have to clear those in rcu_free.
653  */
654 static void radix_tree_free_nodes(struct radix_tree_node *node)
655 {
656 	unsigned offset = 0;
657 	struct radix_tree_node *child = entry_to_node(node);
658 
659 	for (;;) {
660 		void *entry = rcu_dereference_raw(child->slots[offset]);
661 		if (xa_is_node(entry) && child->shift) {
662 			child = entry_to_node(entry);
663 			offset = 0;
664 			continue;
665 		}
666 		offset++;
667 		while (offset == RADIX_TREE_MAP_SIZE) {
668 			struct radix_tree_node *old = child;
669 			offset = child->offset + 1;
670 			child = child->parent;
671 			WARN_ON_ONCE(!list_empty(&old->private_list));
672 			radix_tree_node_free(old);
673 			if (old == entry_to_node(node))
674 				return;
675 		}
676 	}
677 }
678 
679 static inline int insert_entries(struct radix_tree_node *node,
680 		void __rcu **slot, void *item, bool replace)
681 {
682 	if (*slot)
683 		return -EEXIST;
684 	rcu_assign_pointer(*slot, item);
685 	if (node) {
686 		node->count++;
687 		if (xa_is_value(item))
688 			node->nr_values++;
689 	}
690 	return 1;
691 }
692 
693 /**
694  *	__radix_tree_insert    -    insert into a radix tree
695  *	@root:		radix tree root
696  *	@index:		index key
697  *	@item:		item to insert
698  *
699  *	Insert an item into the radix tree at position @index.
700  */
701 int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
702 			void *item)
703 {
704 	struct radix_tree_node *node;
705 	void __rcu **slot;
706 	int error;
707 
708 	BUG_ON(radix_tree_is_internal_node(item));
709 
710 	error = __radix_tree_create(root, index, &node, &slot);
711 	if (error)
712 		return error;
713 
714 	error = insert_entries(node, slot, item, false);
715 	if (error < 0)
716 		return error;
717 
718 	if (node) {
719 		unsigned offset = get_slot_offset(node, slot);
720 		BUG_ON(tag_get(node, 0, offset));
721 		BUG_ON(tag_get(node, 1, offset));
722 		BUG_ON(tag_get(node, 2, offset));
723 	} else {
724 		BUG_ON(root_tags_get(root));
725 	}
726 
727 	return 0;
728 }
729 EXPORT_SYMBOL(radix_tree_insert);
730 
731 /**
732  *	__radix_tree_lookup	-	lookup an item in a radix tree
733  *	@root:		radix tree root
734  *	@index:		index key
735  *	@nodep:		returns node
736  *	@slotp:		returns slot
737  *
738  *	Lookup and return the item at position @index in the radix
739  *	tree @root.
740  *
741  *	Until there is more than one item in the tree, no nodes are
742  *	allocated and @root->xa_head is used as a direct slot instead of
743  *	pointing to a node, in which case *@nodep will be NULL.
744  */
745 void *__radix_tree_lookup(const struct radix_tree_root *root,
746 			  unsigned long index, struct radix_tree_node **nodep,
747 			  void __rcu ***slotp)
748 {
749 	struct radix_tree_node *node, *parent;
750 	unsigned long maxindex;
751 	void __rcu **slot;
752 
753  restart:
754 	parent = NULL;
755 	slot = (void __rcu **)&root->xa_head;
756 	radix_tree_load_root(root, &node, &maxindex);
757 	if (index > maxindex)
758 		return NULL;
759 
760 	while (radix_tree_is_internal_node(node)) {
761 		unsigned offset;
762 
763 		parent = entry_to_node(node);
764 		offset = radix_tree_descend(parent, &node, index);
765 		slot = parent->slots + offset;
766 		if (node == RADIX_TREE_RETRY)
767 			goto restart;
768 		if (parent->shift == 0)
769 			break;
770 	}
771 
772 	if (nodep)
773 		*nodep = parent;
774 	if (slotp)
775 		*slotp = slot;
776 	return node;
777 }
778 
779 /**
780  *	radix_tree_lookup_slot    -    lookup a slot in a radix tree
781  *	@root:		radix tree root
782  *	@index:		index key
783  *
784  *	Returns:  the slot corresponding to the position @index in the
785  *	radix tree @root. This is useful for update-if-exists operations.
786  *
787  *	This function can be called under rcu_read_lock iff the slot is not
788  *	modified by radix_tree_replace_slot, otherwise it must be called
789  *	exclusive from other writers. Any dereference of the slot must be done
790  *	using radix_tree_deref_slot.
791  */
792 void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
793 				unsigned long index)
794 {
795 	void __rcu **slot;
796 
797 	if (!__radix_tree_lookup(root, index, NULL, &slot))
798 		return NULL;
799 	return slot;
800 }
801 EXPORT_SYMBOL(radix_tree_lookup_slot);
802 
803 /**
804  *	radix_tree_lookup    -    perform lookup operation on a radix tree
805  *	@root:		radix tree root
806  *	@index:		index key
807  *
808  *	Lookup the item at the position @index in the radix tree @root.
809  *
810  *	This function can be called under rcu_read_lock, however the caller
811  *	must manage lifetimes of leaf nodes (eg. RCU may also be used to free
812  *	them safely). No RCU barriers are required to access or modify the
813  *	returned item, however.
814  */
815 void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
816 {
817 	return __radix_tree_lookup(root, index, NULL, NULL);
818 }
819 EXPORT_SYMBOL(radix_tree_lookup);
820 
821 static void replace_slot(void __rcu **slot, void *item,
822 		struct radix_tree_node *node, int count, int values)
823 {
824 	if (node && (count || values)) {
825 		node->count += count;
826 		node->nr_values += values;
827 	}
828 
829 	rcu_assign_pointer(*slot, item);
830 }
831 
832 static bool node_tag_get(const struct radix_tree_root *root,
833 				const struct radix_tree_node *node,
834 				unsigned int tag, unsigned int offset)
835 {
836 	if (node)
837 		return tag_get(node, tag, offset);
838 	return root_tag_get(root, tag);
839 }
840 
841 /*
842  * IDR users want to be able to store NULL in the tree, so if the slot isn't
843  * free, don't adjust the count, even if it's transitioning between NULL and
844  * non-NULL.  For the IDA, we mark slots as being IDR_FREE while they still
845  * have empty bits, but it only stores NULL in slots when they're being
846  * deleted.
847  */
848 static int calculate_count(struct radix_tree_root *root,
849 				struct radix_tree_node *node, void __rcu **slot,
850 				void *item, void *old)
851 {
852 	if (is_idr(root)) {
853 		unsigned offset = get_slot_offset(node, slot);
854 		bool free = node_tag_get(root, node, IDR_FREE, offset);
855 		if (!free)
856 			return 0;
857 		if (!old)
858 			return 1;
859 	}
860 	return !!item - !!old;
861 }
862 
863 /**
864  * __radix_tree_replace		- replace item in a slot
865  * @root:		radix tree root
866  * @node:		pointer to tree node
867  * @slot:		pointer to slot in @node
868  * @item:		new item to store in the slot.
869  *
870  * For use with __radix_tree_lookup().  Caller must hold tree write locked
871  * across slot lookup and replacement.
872  */
873 void __radix_tree_replace(struct radix_tree_root *root,
874 			  struct radix_tree_node *node,
875 			  void __rcu **slot, void *item)
876 {
877 	void *old = rcu_dereference_raw(*slot);
878 	int values = !!xa_is_value(item) - !!xa_is_value(old);
879 	int count = calculate_count(root, node, slot, item, old);
880 
881 	/*
882 	 * This function supports replacing value entries and
883 	 * deleting entries, but that needs accounting against the
884 	 * node unless the slot is root->xa_head.
885 	 */
886 	WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
887 			(count || values));
888 	replace_slot(slot, item, node, count, values);
889 
890 	if (!node)
891 		return;
892 
893 	delete_node(root, node);
894 }
895 
896 /**
897  * radix_tree_replace_slot	- replace item in a slot
898  * @root:	radix tree root
899  * @slot:	pointer to slot
900  * @item:	new item to store in the slot.
901  *
902  * For use with radix_tree_lookup_slot() and
903  * radix_tree_gang_lookup_tag_slot().  Caller must hold tree write locked
904  * across slot lookup and replacement.
905  *
906  * NOTE: This cannot be used to switch between non-entries (empty slots),
907  * regular entries, and value entries, as that requires accounting
908  * inside the radix tree node. When switching from one type of entry or
909  * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
910  * radix_tree_iter_replace().
911  */
912 void radix_tree_replace_slot(struct radix_tree_root *root,
913 			     void __rcu **slot, void *item)
914 {
915 	__radix_tree_replace(root, NULL, slot, item);
916 }
917 EXPORT_SYMBOL(radix_tree_replace_slot);
918 
919 /**
920  * radix_tree_iter_replace - replace item in a slot
921  * @root:	radix tree root
922  * @slot:	pointer to slot
923  * @item:	new item to store in the slot.
924  *
925  * For use with radix_tree_for_each_slot().
926  * Caller must hold tree write locked.
927  */
928 void radix_tree_iter_replace(struct radix_tree_root *root,
929 				const struct radix_tree_iter *iter,
930 				void __rcu **slot, void *item)
931 {
932 	__radix_tree_replace(root, iter->node, slot, item);
933 }
934 
935 static void node_tag_set(struct radix_tree_root *root,
936 				struct radix_tree_node *node,
937 				unsigned int tag, unsigned int offset)
938 {
939 	while (node) {
940 		if (tag_get(node, tag, offset))
941 			return;
942 		tag_set(node, tag, offset);
943 		offset = node->offset;
944 		node = node->parent;
945 	}
946 
947 	if (!root_tag_get(root, tag))
948 		root_tag_set(root, tag);
949 }
950 
951 /**
952  *	radix_tree_tag_set - set a tag on a radix tree node
953  *	@root:		radix tree root
954  *	@index:		index key
955  *	@tag:		tag index
956  *
957  *	Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
958  *	corresponding to @index in the radix tree.  From
959  *	the root all the way down to the leaf node.
960  *
961  *	Returns the address of the tagged item.  Setting a tag on a not-present
962  *	item is a bug.
963  */
964 void *radix_tree_tag_set(struct radix_tree_root *root,
965 			unsigned long index, unsigned int tag)
966 {
967 	struct radix_tree_node *node, *parent;
968 	unsigned long maxindex;
969 
970 	radix_tree_load_root(root, &node, &maxindex);
971 	BUG_ON(index > maxindex);
972 
973 	while (radix_tree_is_internal_node(node)) {
974 		unsigned offset;
975 
976 		parent = entry_to_node(node);
977 		offset = radix_tree_descend(parent, &node, index);
978 		BUG_ON(!node);
979 
980 		if (!tag_get(parent, tag, offset))
981 			tag_set(parent, tag, offset);
982 	}
983 
984 	/* set the root's tag bit */
985 	if (!root_tag_get(root, tag))
986 		root_tag_set(root, tag);
987 
988 	return node;
989 }
990 EXPORT_SYMBOL(radix_tree_tag_set);
991 
992 static void node_tag_clear(struct radix_tree_root *root,
993 				struct radix_tree_node *node,
994 				unsigned int tag, unsigned int offset)
995 {
996 	while (node) {
997 		if (!tag_get(node, tag, offset))
998 			return;
999 		tag_clear(node, tag, offset);
1000 		if (any_tag_set(node, tag))
1001 			return;
1002 
1003 		offset = node->offset;
1004 		node = node->parent;
1005 	}
1006 
1007 	/* clear the root's tag bit */
1008 	if (root_tag_get(root, tag))
1009 		root_tag_clear(root, tag);
1010 }
1011 
1012 /**
1013  *	radix_tree_tag_clear - clear a tag on a radix tree node
1014  *	@root:		radix tree root
1015  *	@index:		index key
1016  *	@tag:		tag index
1017  *
1018  *	Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
1019  *	corresponding to @index in the radix tree.  If this causes
1020  *	the leaf node to have no tags set then clear the tag in the
1021  *	next-to-leaf node, etc.
1022  *
1023  *	Returns the address of the tagged item on success, else NULL.  ie:
1024  *	has the same return value and semantics as radix_tree_lookup().
1025  */
1026 void *radix_tree_tag_clear(struct radix_tree_root *root,
1027 			unsigned long index, unsigned int tag)
1028 {
1029 	struct radix_tree_node *node, *parent;
1030 	unsigned long maxindex;
1031 	int offset;
1032 
1033 	radix_tree_load_root(root, &node, &maxindex);
1034 	if (index > maxindex)
1035 		return NULL;
1036 
1037 	parent = NULL;
1038 
1039 	while (radix_tree_is_internal_node(node)) {
1040 		parent = entry_to_node(node);
1041 		offset = radix_tree_descend(parent, &node, index);
1042 	}
1043 
1044 	if (node)
1045 		node_tag_clear(root, parent, tag, offset);
1046 
1047 	return node;
1048 }
1049 EXPORT_SYMBOL(radix_tree_tag_clear);
1050 
1051 /**
1052   * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
1053   * @root: radix tree root
1054   * @iter: iterator state
1055   * @tag: tag to clear
1056   */
1057 void radix_tree_iter_tag_clear(struct radix_tree_root *root,
1058 			const struct radix_tree_iter *iter, unsigned int tag)
1059 {
1060 	node_tag_clear(root, iter->node, tag, iter_offset(iter));
1061 }
1062 
1063 /**
1064  * radix_tree_tag_get - get a tag on a radix tree node
1065  * @root:		radix tree root
1066  * @index:		index key
1067  * @tag:		tag index (< RADIX_TREE_MAX_TAGS)
1068  *
1069  * Return values:
1070  *
1071  *  0: tag not present or not set
1072  *  1: tag set
1073  *
1074  * Note that the return value of this function may not be relied on, even if
1075  * the RCU lock is held, unless tag modification and node deletion are excluded
1076  * from concurrency.
1077  */
1078 int radix_tree_tag_get(const struct radix_tree_root *root,
1079 			unsigned long index, unsigned int tag)
1080 {
1081 	struct radix_tree_node *node, *parent;
1082 	unsigned long maxindex;
1083 
1084 	if (!root_tag_get(root, tag))
1085 		return 0;
1086 
1087 	radix_tree_load_root(root, &node, &maxindex);
1088 	if (index > maxindex)
1089 		return 0;
1090 
1091 	while (radix_tree_is_internal_node(node)) {
1092 		unsigned offset;
1093 
1094 		parent = entry_to_node(node);
1095 		offset = radix_tree_descend(parent, &node, index);
1096 
1097 		if (!tag_get(parent, tag, offset))
1098 			return 0;
1099 		if (node == RADIX_TREE_RETRY)
1100 			break;
1101 	}
1102 
1103 	return 1;
1104 }
1105 EXPORT_SYMBOL(radix_tree_tag_get);
1106 
1107 /* Construct iter->tags bit-mask from node->tags[tag] array */
1108 static void set_iter_tags(struct radix_tree_iter *iter,
1109 				struct radix_tree_node *node, unsigned offset,
1110 				unsigned tag)
1111 {
1112 	unsigned tag_long = offset / BITS_PER_LONG;
1113 	unsigned tag_bit  = offset % BITS_PER_LONG;
1114 
1115 	if (!node) {
1116 		iter->tags = 1;
1117 		return;
1118 	}
1119 
1120 	iter->tags = node->tags[tag][tag_long] >> tag_bit;
1121 
1122 	/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
1123 	if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
1124 		/* Pick tags from next element */
1125 		if (tag_bit)
1126 			iter->tags |= node->tags[tag][tag_long + 1] <<
1127 						(BITS_PER_LONG - tag_bit);
1128 		/* Clip chunk size, here only BITS_PER_LONG tags */
1129 		iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
1130 	}
1131 }
1132 
1133 void __rcu **radix_tree_iter_resume(void __rcu **slot,
1134 					struct radix_tree_iter *iter)
1135 {
1136 	slot++;
1137 	iter->index = __radix_tree_iter_add(iter, 1);
1138 	iter->next_index = iter->index;
1139 	iter->tags = 0;
1140 	return NULL;
1141 }
1142 EXPORT_SYMBOL(radix_tree_iter_resume);
1143 
1144 /**
1145  * radix_tree_next_chunk - find next chunk of slots for iteration
1146  *
1147  * @root:	radix tree root
1148  * @iter:	iterator state
1149  * @flags:	RADIX_TREE_ITER_* flags and tag index
1150  * Returns:	pointer to chunk first slot, or NULL if iteration is over
1151  */
1152 void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1153 			     struct radix_tree_iter *iter, unsigned flags)
1154 {
1155 	unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1156 	struct radix_tree_node *node, *child;
1157 	unsigned long index, offset, maxindex;
1158 
1159 	if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
1160 		return NULL;
1161 
1162 	/*
1163 	 * Catch next_index overflow after ~0UL. iter->index never overflows
1164 	 * during iterating; it can be zero only at the beginning.
1165 	 * And we cannot overflow iter->next_index in a single step,
1166 	 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
1167 	 *
1168 	 * This condition also used by radix_tree_next_slot() to stop
1169 	 * contiguous iterating, and forbid switching to the next chunk.
1170 	 */
1171 	index = iter->next_index;
1172 	if (!index && iter->index)
1173 		return NULL;
1174 
1175  restart:
1176 	radix_tree_load_root(root, &child, &maxindex);
1177 	if (index > maxindex)
1178 		return NULL;
1179 	if (!child)
1180 		return NULL;
1181 
1182 	if (!radix_tree_is_internal_node(child)) {
1183 		/* Single-slot tree */
1184 		iter->index = index;
1185 		iter->next_index = maxindex + 1;
1186 		iter->tags = 1;
1187 		iter->node = NULL;
1188 		return (void __rcu **)&root->xa_head;
1189 	}
1190 
1191 	do {
1192 		node = entry_to_node(child);
1193 		offset = radix_tree_descend(node, &child, index);
1194 
1195 		if ((flags & RADIX_TREE_ITER_TAGGED) ?
1196 				!tag_get(node, tag, offset) : !child) {
1197 			/* Hole detected */
1198 			if (flags & RADIX_TREE_ITER_CONTIG)
1199 				return NULL;
1200 
1201 			if (flags & RADIX_TREE_ITER_TAGGED)
1202 				offset = radix_tree_find_next_bit(node, tag,
1203 						offset + 1);
1204 			else
1205 				while (++offset	< RADIX_TREE_MAP_SIZE) {
1206 					void *slot = rcu_dereference_raw(
1207 							node->slots[offset]);
1208 					if (slot)
1209 						break;
1210 				}
1211 			index &= ~node_maxindex(node);
1212 			index += offset << node->shift;
1213 			/* Overflow after ~0UL */
1214 			if (!index)
1215 				return NULL;
1216 			if (offset == RADIX_TREE_MAP_SIZE)
1217 				goto restart;
1218 			child = rcu_dereference_raw(node->slots[offset]);
1219 		}
1220 
1221 		if (!child)
1222 			goto restart;
1223 		if (child == RADIX_TREE_RETRY)
1224 			break;
1225 	} while (node->shift && radix_tree_is_internal_node(child));
1226 
1227 	/* Update the iterator state */
1228 	iter->index = (index &~ node_maxindex(node)) | offset;
1229 	iter->next_index = (index | node_maxindex(node)) + 1;
1230 	iter->node = node;
1231 
1232 	if (flags & RADIX_TREE_ITER_TAGGED)
1233 		set_iter_tags(iter, node, offset, tag);
1234 
1235 	return node->slots + offset;
1236 }
1237 EXPORT_SYMBOL(radix_tree_next_chunk);
1238 
1239 /**
1240  *	radix_tree_gang_lookup - perform multiple lookup on a radix tree
1241  *	@root:		radix tree root
1242  *	@results:	where the results of the lookup are placed
1243  *	@first_index:	start the lookup from this key
1244  *	@max_items:	place up to this many items at *results
1245  *
1246  *	Performs an index-ascending scan of the tree for present items.  Places
1247  *	them at *@results and returns the number of items which were placed at
1248  *	*@results.
1249  *
1250  *	The implementation is naive.
1251  *
1252  *	Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1253  *	rcu_read_lock. In this case, rather than the returned results being
1254  *	an atomic snapshot of the tree at a single point in time, the
1255  *	semantics of an RCU protected gang lookup are as though multiple
1256  *	radix_tree_lookups have been issued in individual locks, and results
1257  *	stored in 'results'.
1258  */
1259 unsigned int
1260 radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
1261 			unsigned long first_index, unsigned int max_items)
1262 {
1263 	struct radix_tree_iter iter;
1264 	void __rcu **slot;
1265 	unsigned int ret = 0;
1266 
1267 	if (unlikely(!max_items))
1268 		return 0;
1269 
1270 	radix_tree_for_each_slot(slot, root, &iter, first_index) {
1271 		results[ret] = rcu_dereference_raw(*slot);
1272 		if (!results[ret])
1273 			continue;
1274 		if (radix_tree_is_internal_node(results[ret])) {
1275 			slot = radix_tree_iter_retry(&iter);
1276 			continue;
1277 		}
1278 		if (++ret == max_items)
1279 			break;
1280 	}
1281 
1282 	return ret;
1283 }
1284 EXPORT_SYMBOL(radix_tree_gang_lookup);
1285 
1286 /**
1287  *	radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1288  *	                             based on a tag
1289  *	@root:		radix tree root
1290  *	@results:	where the results of the lookup are placed
1291  *	@first_index:	start the lookup from this key
1292  *	@max_items:	place up to this many items at *results
1293  *	@tag:		the tag index (< RADIX_TREE_MAX_TAGS)
1294  *
1295  *	Performs an index-ascending scan of the tree for present items which
1296  *	have the tag indexed by @tag set.  Places the items at *@results and
1297  *	returns the number of items which were placed at *@results.
1298  */
1299 unsigned int
1300 radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
1301 		unsigned long first_index, unsigned int max_items,
1302 		unsigned int tag)
1303 {
1304 	struct radix_tree_iter iter;
1305 	void __rcu **slot;
1306 	unsigned int ret = 0;
1307 
1308 	if (unlikely(!max_items))
1309 		return 0;
1310 
1311 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1312 		results[ret] = rcu_dereference_raw(*slot);
1313 		if (!results[ret])
1314 			continue;
1315 		if (radix_tree_is_internal_node(results[ret])) {
1316 			slot = radix_tree_iter_retry(&iter);
1317 			continue;
1318 		}
1319 		if (++ret == max_items)
1320 			break;
1321 	}
1322 
1323 	return ret;
1324 }
1325 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1326 
1327 /**
1328  *	radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1329  *					  radix tree based on a tag
1330  *	@root:		radix tree root
1331  *	@results:	where the results of the lookup are placed
1332  *	@first_index:	start the lookup from this key
1333  *	@max_items:	place up to this many items at *results
1334  *	@tag:		the tag index (< RADIX_TREE_MAX_TAGS)
1335  *
1336  *	Performs an index-ascending scan of the tree for present items which
1337  *	have the tag indexed by @tag set.  Places the slots at *@results and
1338  *	returns the number of slots which were placed at *@results.
1339  */
1340 unsigned int
1341 radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
1342 		void __rcu ***results, unsigned long first_index,
1343 		unsigned int max_items, unsigned int tag)
1344 {
1345 	struct radix_tree_iter iter;
1346 	void __rcu **slot;
1347 	unsigned int ret = 0;
1348 
1349 	if (unlikely(!max_items))
1350 		return 0;
1351 
1352 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1353 		results[ret] = slot;
1354 		if (++ret == max_items)
1355 			break;
1356 	}
1357 
1358 	return ret;
1359 }
1360 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1361 
1362 static bool __radix_tree_delete(struct radix_tree_root *root,
1363 				struct radix_tree_node *node, void __rcu **slot)
1364 {
1365 	void *old = rcu_dereference_raw(*slot);
1366 	int values = xa_is_value(old) ? -1 : 0;
1367 	unsigned offset = get_slot_offset(node, slot);
1368 	int tag;
1369 
1370 	if (is_idr(root))
1371 		node_tag_set(root, node, IDR_FREE, offset);
1372 	else
1373 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1374 			node_tag_clear(root, node, tag, offset);
1375 
1376 	replace_slot(slot, NULL, node, -1, values);
1377 	return node && delete_node(root, node);
1378 }
1379 
1380 /**
1381  * radix_tree_iter_delete - delete the entry at this iterator position
1382  * @root: radix tree root
1383  * @iter: iterator state
1384  * @slot: pointer to slot
1385  *
1386  * Delete the entry at the position currently pointed to by the iterator.
1387  * This may result in the current node being freed; if it is, the iterator
1388  * is advanced so that it will not reference the freed memory.  This
1389  * function may be called without any locking if there are no other threads
1390  * which can access this tree.
1391  */
1392 void radix_tree_iter_delete(struct radix_tree_root *root,
1393 				struct radix_tree_iter *iter, void __rcu **slot)
1394 {
1395 	if (__radix_tree_delete(root, iter->node, slot))
1396 		iter->index = iter->next_index;
1397 }
1398 EXPORT_SYMBOL(radix_tree_iter_delete);
1399 
1400 /**
1401  * radix_tree_delete_item - delete an item from a radix tree
1402  * @root: radix tree root
1403  * @index: index key
1404  * @item: expected item
1405  *
1406  * Remove @item at @index from the radix tree rooted at @root.
1407  *
1408  * Return: the deleted entry, or %NULL if it was not present
1409  * or the entry at the given @index was not @item.
1410  */
1411 void *radix_tree_delete_item(struct radix_tree_root *root,
1412 			     unsigned long index, void *item)
1413 {
1414 	struct radix_tree_node *node = NULL;
1415 	void __rcu **slot = NULL;
1416 	void *entry;
1417 
1418 	entry = __radix_tree_lookup(root, index, &node, &slot);
1419 	if (!slot)
1420 		return NULL;
1421 	if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
1422 						get_slot_offset(node, slot))))
1423 		return NULL;
1424 
1425 	if (item && entry != item)
1426 		return NULL;
1427 
1428 	__radix_tree_delete(root, node, slot);
1429 
1430 	return entry;
1431 }
1432 EXPORT_SYMBOL(radix_tree_delete_item);
1433 
1434 /**
1435  * radix_tree_delete - delete an entry from a radix tree
1436  * @root: radix tree root
1437  * @index: index key
1438  *
1439  * Remove the entry at @index from the radix tree rooted at @root.
1440  *
1441  * Return: The deleted entry, or %NULL if it was not present.
1442  */
1443 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1444 {
1445 	return radix_tree_delete_item(root, index, NULL);
1446 }
1447 EXPORT_SYMBOL(radix_tree_delete);
1448 
1449 /**
1450  *	radix_tree_tagged - test whether any items in the tree are tagged
1451  *	@root:		radix tree root
1452  *	@tag:		tag to test
1453  */
1454 int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
1455 {
1456 	return root_tag_get(root, tag);
1457 }
1458 EXPORT_SYMBOL(radix_tree_tagged);
1459 
1460 /**
1461  * idr_preload - preload for idr_alloc()
1462  * @gfp_mask: allocation mask to use for preloading
1463  *
1464  * Preallocate memory to use for the next call to idr_alloc().  This function
1465  * returns with preemption disabled.  It will be enabled by idr_preload_end().
1466  */
1467 void idr_preload(gfp_t gfp_mask)
1468 {
1469 	if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
1470 		local_lock(&radix_tree_preloads.lock);
1471 }
1472 EXPORT_SYMBOL(idr_preload);
1473 
1474 void __rcu **idr_get_free(struct radix_tree_root *root,
1475 			      struct radix_tree_iter *iter, gfp_t gfp,
1476 			      unsigned long max)
1477 {
1478 	struct radix_tree_node *node = NULL, *child;
1479 	void __rcu **slot = (void __rcu **)&root->xa_head;
1480 	unsigned long maxindex, start = iter->next_index;
1481 	unsigned int shift, offset = 0;
1482 
1483  grow:
1484 	shift = radix_tree_load_root(root, &child, &maxindex);
1485 	if (!radix_tree_tagged(root, IDR_FREE))
1486 		start = max(start, maxindex + 1);
1487 	if (start > max)
1488 		return ERR_PTR(-ENOSPC);
1489 
1490 	if (start > maxindex) {
1491 		int error = radix_tree_extend(root, gfp, start, shift);
1492 		if (error < 0)
1493 			return ERR_PTR(error);
1494 		shift = error;
1495 		child = rcu_dereference_raw(root->xa_head);
1496 	}
1497 	if (start == 0 && shift == 0)
1498 		shift = RADIX_TREE_MAP_SHIFT;
1499 
1500 	while (shift) {
1501 		shift -= RADIX_TREE_MAP_SHIFT;
1502 		if (child == NULL) {
1503 			/* Have to add a child node.  */
1504 			child = radix_tree_node_alloc(gfp, node, root, shift,
1505 							offset, 0, 0);
1506 			if (!child)
1507 				return ERR_PTR(-ENOMEM);
1508 			all_tag_set(child, IDR_FREE);
1509 			rcu_assign_pointer(*slot, node_to_entry(child));
1510 			if (node)
1511 				node->count++;
1512 		} else if (!radix_tree_is_internal_node(child))
1513 			break;
1514 
1515 		node = entry_to_node(child);
1516 		offset = radix_tree_descend(node, &child, start);
1517 		if (!tag_get(node, IDR_FREE, offset)) {
1518 			offset = radix_tree_find_next_bit(node, IDR_FREE,
1519 							offset + 1);
1520 			start = next_index(start, node, offset);
1521 			if (start > max || start == 0)
1522 				return ERR_PTR(-ENOSPC);
1523 			while (offset == RADIX_TREE_MAP_SIZE) {
1524 				offset = node->offset + 1;
1525 				node = node->parent;
1526 				if (!node)
1527 					goto grow;
1528 				shift = node->shift;
1529 			}
1530 			child = rcu_dereference_raw(node->slots[offset]);
1531 		}
1532 		slot = &node->slots[offset];
1533 	}
1534 
1535 	iter->index = start;
1536 	if (node)
1537 		iter->next_index = 1 + min(max, (start | node_maxindex(node)));
1538 	else
1539 		iter->next_index = 1;
1540 	iter->node = node;
1541 	set_iter_tags(iter, node, offset, IDR_FREE);
1542 
1543 	return slot;
1544 }
1545 
1546 /**
1547  * idr_destroy - release all internal memory from an IDR
1548  * @idr: idr handle
1549  *
1550  * After this function is called, the IDR is empty, and may be reused or
1551  * the data structure containing it may be freed.
1552  *
1553  * A typical clean-up sequence for objects stored in an idr tree will use
1554  * idr_for_each() to free all objects, if necessary, then idr_destroy() to
1555  * free the memory used to keep track of those objects.
1556  */
1557 void idr_destroy(struct idr *idr)
1558 {
1559 	struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
1560 	if (radix_tree_is_internal_node(node))
1561 		radix_tree_free_nodes(node);
1562 	idr->idr_rt.xa_head = NULL;
1563 	root_tag_set(&idr->idr_rt, IDR_FREE);
1564 }
1565 EXPORT_SYMBOL(idr_destroy);
1566 
1567 static void
1568 radix_tree_node_ctor(void *arg)
1569 {
1570 	struct radix_tree_node *node = arg;
1571 
1572 	memset(node, 0, sizeof(*node));
1573 	INIT_LIST_HEAD(&node->private_list);
1574 }
1575 
1576 static int radix_tree_cpu_dead(unsigned int cpu)
1577 {
1578 	struct radix_tree_preload *rtp;
1579 	struct radix_tree_node *node;
1580 
1581 	/* Free per-cpu pool of preloaded nodes */
1582 	rtp = &per_cpu(radix_tree_preloads, cpu);
1583 	while (rtp->nr) {
1584 		node = rtp->nodes;
1585 		rtp->nodes = node->parent;
1586 		kmem_cache_free(radix_tree_node_cachep, node);
1587 		rtp->nr--;
1588 	}
1589 	return 0;
1590 }
1591 
1592 void __init radix_tree_init(void)
1593 {
1594 	int ret;
1595 
1596 	BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
1597 	BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
1598 	BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
1599 	radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1600 			sizeof(struct radix_tree_node), 0,
1601 			SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1602 			radix_tree_node_ctor);
1603 	ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
1604 					NULL, radix_tree_cpu_dead);
1605 	WARN_ON(ret < 0);
1606 }
1607