xref: /linux/lib/maple_tree.c (revision f6e0a4984c2e7244689ea87b62b433bed9d07e94)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Maple Tree implementation
4  * Copyright (c) 2018-2022 Oracle Corporation
5  * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6  *	    Matthew Wilcox <willy@infradead.org>
7  * Copyright (c) 2023 ByteDance
8  * Author: Peng Zhang <zhangpeng.00@bytedance.com>
9  */
10 
11 /*
12  * DOC: Interesting implementation details of the Maple Tree
13  *
14  * Each node type has a number of slots for entries and a number of slots for
15  * pivots.  In the case of dense nodes, the pivots are implied by the position
16  * and are simply the slot index + the minimum of the node.
17  *
18  * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
19  * indicate that the tree is specifying ranges.  Pivots may appear in the
20  * subtree with an entry attached to the value whereas keys are unique to a
21  * specific position of a B-tree.  Pivot values are inclusive of the slot with
22  * the same index.
23  *
24  *
25  * The following illustrates the layout of a range64 nodes slots and pivots.
26  *
27  *
28  *  Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
29  *           ┬   ┬   ┬   ┬     ┬    ┬    ┬    ┬    ┬
30  *           │   │   │   │     │    │    │    │    └─ Implied maximum
31  *           │   │   │   │     │    │    │    └─ Pivot 14
32  *           │   │   │   │     │    │    └─ Pivot 13
33  *           │   │   │   │     │    └─ Pivot 12
34  *           │   │   │   │     └─ Pivot 11
35  *           │   │   │   └─ Pivot 2
36  *           │   │   └─ Pivot 1
37  *           │   └─ Pivot 0
38  *           └─  Implied minimum
39  *
40  * Slot contents:
41  *  Internal (non-leaf) nodes contain pointers to other nodes.
42  *  Leaf nodes contain entries.
43  *
44  * The location of interest is often referred to as an offset.  All offsets have
45  * a slot, but the last offset has an implied pivot from the node above (or
46  * UINT_MAX for the root node.
47  *
48  * Ranges complicate certain write activities.  When modifying any of
49  * the B-tree variants, it is known that one entry will either be added or
50  * deleted.  When modifying the Maple Tree, one store operation may overwrite
51  * the entire data set, or one half of the tree, or the middle half of the tree.
52  *
53  */
54 
55 
56 #include <linux/maple_tree.h>
57 #include <linux/xarray.h>
58 #include <linux/types.h>
59 #include <linux/export.h>
60 #include <linux/slab.h>
61 #include <linux/limits.h>
62 #include <asm/barrier.h>
63 
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/maple_tree.h>
66 
67 #define MA_ROOT_PARENT 1
68 
69 /*
70  * Maple state flags
71  * * MA_STATE_BULK		- Bulk insert mode
72  * * MA_STATE_REBALANCE		- Indicate a rebalance during bulk insert
73  * * MA_STATE_PREALLOC		- Preallocated nodes, WARN_ON allocation
74  */
75 #define MA_STATE_BULK		1
76 #define MA_STATE_REBALANCE	2
77 #define MA_STATE_PREALLOC	4
78 
79 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
80 #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
81 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
82 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
83 static struct kmem_cache *maple_node_cache;
84 
85 #ifdef CONFIG_DEBUG_MAPLE_TREE
86 static const unsigned long mt_max[] = {
87 	[maple_dense]		= MAPLE_NODE_SLOTS,
88 	[maple_leaf_64]		= ULONG_MAX,
89 	[maple_range_64]	= ULONG_MAX,
90 	[maple_arange_64]	= ULONG_MAX,
91 };
92 #define mt_node_max(x) mt_max[mte_node_type(x)]
93 #endif
94 
95 static const unsigned char mt_slots[] = {
96 	[maple_dense]		= MAPLE_NODE_SLOTS,
97 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS,
98 	[maple_range_64]	= MAPLE_RANGE64_SLOTS,
99 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS,
100 };
101 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
102 
103 static const unsigned char mt_pivots[] = {
104 	[maple_dense]		= 0,
105 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS - 1,
106 	[maple_range_64]	= MAPLE_RANGE64_SLOTS - 1,
107 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS - 1,
108 };
109 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
110 
111 static const unsigned char mt_min_slots[] = {
112 	[maple_dense]		= MAPLE_NODE_SLOTS / 2,
113 	[maple_leaf_64]		= (MAPLE_RANGE64_SLOTS / 2) - 2,
114 	[maple_range_64]	= (MAPLE_RANGE64_SLOTS / 2) - 2,
115 	[maple_arange_64]	= (MAPLE_ARANGE64_SLOTS / 2) - 1,
116 };
117 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
118 
119 #define MAPLE_BIG_NODE_SLOTS	(MAPLE_RANGE64_SLOTS * 2 + 2)
120 #define MAPLE_BIG_NODE_GAPS	(MAPLE_ARANGE64_SLOTS * 2 + 1)
121 
122 struct maple_big_node {
123 	struct maple_pnode *parent;
124 	unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
125 	union {
126 		struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
127 		struct {
128 			unsigned long padding[MAPLE_BIG_NODE_GAPS];
129 			unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 		};
131 	};
132 	unsigned char b_end;
133 	enum maple_type type;
134 };
135 
136 /*
137  * The maple_subtree_state is used to build a tree to replace a segment of an
138  * existing tree in a more atomic way.  Any walkers of the older tree will hit a
139  * dead node and restart on updates.
140  */
141 struct maple_subtree_state {
142 	struct ma_state *orig_l;	/* Original left side of subtree */
143 	struct ma_state *orig_r;	/* Original right side of subtree */
144 	struct ma_state *l;		/* New left side of subtree */
145 	struct ma_state *m;		/* New middle of subtree (rare) */
146 	struct ma_state *r;		/* New right side of subtree */
147 	struct ma_topiary *free;	/* nodes to be freed */
148 	struct ma_topiary *destroy;	/* Nodes to be destroyed (walked and freed) */
149 	struct maple_big_node *bn;
150 };
151 
152 #ifdef CONFIG_KASAN_STACK
153 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
154 #define noinline_for_kasan noinline_for_stack
155 #else
156 #define noinline_for_kasan inline
157 #endif
158 
159 /* Functions */
160 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
161 {
162 	return kmem_cache_alloc(maple_node_cache, gfp);
163 }
164 
165 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
166 {
167 	return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
168 }
169 
170 static inline void mt_free_one(struct maple_node *node)
171 {
172 	kmem_cache_free(maple_node_cache, node);
173 }
174 
175 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
176 {
177 	kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
178 }
179 
180 static void mt_free_rcu(struct rcu_head *head)
181 {
182 	struct maple_node *node = container_of(head, struct maple_node, rcu);
183 
184 	kmem_cache_free(maple_node_cache, node);
185 }
186 
187 /*
188  * ma_free_rcu() - Use rcu callback to free a maple node
189  * @node: The node to free
190  *
191  * The maple tree uses the parent pointer to indicate this node is no longer in
192  * use and will be freed.
193  */
194 static void ma_free_rcu(struct maple_node *node)
195 {
196 	WARN_ON(node->parent != ma_parent_ptr(node));
197 	call_rcu(&node->rcu, mt_free_rcu);
198 }
199 
200 static void mas_set_height(struct ma_state *mas)
201 {
202 	unsigned int new_flags = mas->tree->ma_flags;
203 
204 	new_flags &= ~MT_FLAGS_HEIGHT_MASK;
205 	MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
206 	new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
207 	mas->tree->ma_flags = new_flags;
208 }
209 
210 static unsigned int mas_mt_height(struct ma_state *mas)
211 {
212 	return mt_height(mas->tree);
213 }
214 
215 static inline unsigned int mt_attr(struct maple_tree *mt)
216 {
217 	return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
218 }
219 
220 static __always_inline enum maple_type mte_node_type(
221 		const struct maple_enode *entry)
222 {
223 	return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
224 		MAPLE_NODE_TYPE_MASK;
225 }
226 
227 static __always_inline bool ma_is_dense(const enum maple_type type)
228 {
229 	return type < maple_leaf_64;
230 }
231 
232 static __always_inline bool ma_is_leaf(const enum maple_type type)
233 {
234 	return type < maple_range_64;
235 }
236 
237 static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
238 {
239 	return ma_is_leaf(mte_node_type(entry));
240 }
241 
242 /*
243  * We also reserve values with the bottom two bits set to '10' which are
244  * below 4096
245  */
246 static __always_inline bool mt_is_reserved(const void *entry)
247 {
248 	return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
249 		xa_is_internal(entry);
250 }
251 
252 static __always_inline void mas_set_err(struct ma_state *mas, long err)
253 {
254 	mas->node = MA_ERROR(err);
255 	mas->status = ma_error;
256 }
257 
258 static __always_inline bool mas_is_ptr(const struct ma_state *mas)
259 {
260 	return mas->status == ma_root;
261 }
262 
263 static __always_inline bool mas_is_start(const struct ma_state *mas)
264 {
265 	return mas->status == ma_start;
266 }
267 
268 static __always_inline bool mas_is_none(const struct ma_state *mas)
269 {
270 	return mas->status == ma_none;
271 }
272 
273 static __always_inline bool mas_is_paused(const struct ma_state *mas)
274 {
275 	return mas->status == ma_pause;
276 }
277 
278 static __always_inline bool mas_is_overflow(struct ma_state *mas)
279 {
280 	return mas->status == ma_overflow;
281 }
282 
283 static inline bool mas_is_underflow(struct ma_state *mas)
284 {
285 	return mas->status == ma_underflow;
286 }
287 
288 static __always_inline struct maple_node *mte_to_node(
289 		const struct maple_enode *entry)
290 {
291 	return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
292 }
293 
294 /*
295  * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
296  * @entry: The maple encoded node
297  *
298  * Return: a maple topiary pointer
299  */
300 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
301 {
302 	return (struct maple_topiary *)
303 		((unsigned long)entry & ~MAPLE_NODE_MASK);
304 }
305 
306 /*
307  * mas_mn() - Get the maple state node.
308  * @mas: The maple state
309  *
310  * Return: the maple node (not encoded - bare pointer).
311  */
312 static inline struct maple_node *mas_mn(const struct ma_state *mas)
313 {
314 	return mte_to_node(mas->node);
315 }
316 
317 /*
318  * mte_set_node_dead() - Set a maple encoded node as dead.
319  * @mn: The maple encoded node.
320  */
321 static inline void mte_set_node_dead(struct maple_enode *mn)
322 {
323 	mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
324 	smp_wmb(); /* Needed for RCU */
325 }
326 
327 /* Bit 1 indicates the root is a node */
328 #define MAPLE_ROOT_NODE			0x02
329 /* maple_type stored bit 3-6 */
330 #define MAPLE_ENODE_TYPE_SHIFT		0x03
331 /* Bit 2 means a NULL somewhere below */
332 #define MAPLE_ENODE_NULL		0x04
333 
334 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
335 					     enum maple_type type)
336 {
337 	return (void *)((unsigned long)node |
338 			(type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
339 }
340 
341 static inline void *mte_mk_root(const struct maple_enode *node)
342 {
343 	return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
344 }
345 
346 static inline void *mte_safe_root(const struct maple_enode *node)
347 {
348 	return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
349 }
350 
351 static inline void *mte_set_full(const struct maple_enode *node)
352 {
353 	return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
354 }
355 
356 static inline void *mte_clear_full(const struct maple_enode *node)
357 {
358 	return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
359 }
360 
361 static inline bool mte_has_null(const struct maple_enode *node)
362 {
363 	return (unsigned long)node & MAPLE_ENODE_NULL;
364 }
365 
366 static __always_inline bool ma_is_root(struct maple_node *node)
367 {
368 	return ((unsigned long)node->parent & MA_ROOT_PARENT);
369 }
370 
371 static __always_inline bool mte_is_root(const struct maple_enode *node)
372 {
373 	return ma_is_root(mte_to_node(node));
374 }
375 
376 static inline bool mas_is_root_limits(const struct ma_state *mas)
377 {
378 	return !mas->min && mas->max == ULONG_MAX;
379 }
380 
381 static __always_inline bool mt_is_alloc(struct maple_tree *mt)
382 {
383 	return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
384 }
385 
386 /*
387  * The Parent Pointer
388  * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
389  * When storing a 32 or 64 bit values, the offset can fit into 5 bits.  The 16
390  * bit values need an extra bit to store the offset.  This extra bit comes from
391  * a reuse of the last bit in the node type.  This is possible by using bit 1 to
392  * indicate if bit 2 is part of the type or the slot.
393  *
394  * Note types:
395  *  0x??1 = Root
396  *  0x?00 = 16 bit nodes
397  *  0x010 = 32 bit nodes
398  *  0x110 = 64 bit nodes
399  *
400  * Slot size and alignment
401  *  0b??1 : Root
402  *  0b?00 : 16 bit values, type in 0-1, slot in 2-7
403  *  0b010 : 32 bit values, type in 0-2, slot in 3-7
404  *  0b110 : 64 bit values, type in 0-2, slot in 3-7
405  */
406 
407 #define MAPLE_PARENT_ROOT		0x01
408 
409 #define MAPLE_PARENT_SLOT_SHIFT		0x03
410 #define MAPLE_PARENT_SLOT_MASK		0xF8
411 
412 #define MAPLE_PARENT_16B_SLOT_SHIFT	0x02
413 #define MAPLE_PARENT_16B_SLOT_MASK	0xFC
414 
415 #define MAPLE_PARENT_RANGE64		0x06
416 #define MAPLE_PARENT_RANGE32		0x04
417 #define MAPLE_PARENT_NOT_RANGE16	0x02
418 
419 /*
420  * mte_parent_shift() - Get the parent shift for the slot storage.
421  * @parent: The parent pointer cast as an unsigned long
422  * Return: The shift into that pointer to the star to of the slot
423  */
424 static inline unsigned long mte_parent_shift(unsigned long parent)
425 {
426 	/* Note bit 1 == 0 means 16B */
427 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
428 		return MAPLE_PARENT_SLOT_SHIFT;
429 
430 	return MAPLE_PARENT_16B_SLOT_SHIFT;
431 }
432 
433 /*
434  * mte_parent_slot_mask() - Get the slot mask for the parent.
435  * @parent: The parent pointer cast as an unsigned long.
436  * Return: The slot mask for that parent.
437  */
438 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
439 {
440 	/* Note bit 1 == 0 means 16B */
441 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
442 		return MAPLE_PARENT_SLOT_MASK;
443 
444 	return MAPLE_PARENT_16B_SLOT_MASK;
445 }
446 
447 /*
448  * mas_parent_type() - Return the maple_type of the parent from the stored
449  * parent type.
450  * @mas: The maple state
451  * @enode: The maple_enode to extract the parent's enum
452  * Return: The node->parent maple_type
453  */
454 static inline
455 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
456 {
457 	unsigned long p_type;
458 
459 	p_type = (unsigned long)mte_to_node(enode)->parent;
460 	if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
461 		return 0;
462 
463 	p_type &= MAPLE_NODE_MASK;
464 	p_type &= ~mte_parent_slot_mask(p_type);
465 	switch (p_type) {
466 	case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
467 		if (mt_is_alloc(mas->tree))
468 			return maple_arange_64;
469 		return maple_range_64;
470 	}
471 
472 	return 0;
473 }
474 
475 /*
476  * mas_set_parent() - Set the parent node and encode the slot
477  * @enode: The encoded maple node.
478  * @parent: The encoded maple node that is the parent of @enode.
479  * @slot: The slot that @enode resides in @parent.
480  *
481  * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
482  * parent type.
483  */
484 static inline
485 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
486 		    const struct maple_enode *parent, unsigned char slot)
487 {
488 	unsigned long val = (unsigned long)parent;
489 	unsigned long shift;
490 	unsigned long type;
491 	enum maple_type p_type = mte_node_type(parent);
492 
493 	MAS_BUG_ON(mas, p_type == maple_dense);
494 	MAS_BUG_ON(mas, p_type == maple_leaf_64);
495 
496 	switch (p_type) {
497 	case maple_range_64:
498 	case maple_arange_64:
499 		shift = MAPLE_PARENT_SLOT_SHIFT;
500 		type = MAPLE_PARENT_RANGE64;
501 		break;
502 	default:
503 	case maple_dense:
504 	case maple_leaf_64:
505 		shift = type = 0;
506 		break;
507 	}
508 
509 	val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
510 	val |= (slot << shift) | type;
511 	mte_to_node(enode)->parent = ma_parent_ptr(val);
512 }
513 
514 /*
515  * mte_parent_slot() - get the parent slot of @enode.
516  * @enode: The encoded maple node.
517  *
518  * Return: The slot in the parent node where @enode resides.
519  */
520 static __always_inline
521 unsigned int mte_parent_slot(const struct maple_enode *enode)
522 {
523 	unsigned long val = (unsigned long)mte_to_node(enode)->parent;
524 
525 	if (unlikely(val & MA_ROOT_PARENT))
526 		return 0;
527 
528 	/*
529 	 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
530 	 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
531 	 */
532 	return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
533 }
534 
535 /*
536  * mte_parent() - Get the parent of @node.
537  * @node: The encoded maple node.
538  *
539  * Return: The parent maple node.
540  */
541 static __always_inline
542 struct maple_node *mte_parent(const struct maple_enode *enode)
543 {
544 	return (void *)((unsigned long)
545 			(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
546 }
547 
548 /*
549  * ma_dead_node() - check if the @enode is dead.
550  * @enode: The encoded maple node
551  *
552  * Return: true if dead, false otherwise.
553  */
554 static __always_inline bool ma_dead_node(const struct maple_node *node)
555 {
556 	struct maple_node *parent;
557 
558 	/* Do not reorder reads from the node prior to the parent check */
559 	smp_rmb();
560 	parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
561 	return (parent == node);
562 }
563 
564 /*
565  * mte_dead_node() - check if the @enode is dead.
566  * @enode: The encoded maple node
567  *
568  * Return: true if dead, false otherwise.
569  */
570 static __always_inline bool mte_dead_node(const struct maple_enode *enode)
571 {
572 	struct maple_node *parent, *node;
573 
574 	node = mte_to_node(enode);
575 	/* Do not reorder reads from the node prior to the parent check */
576 	smp_rmb();
577 	parent = mte_parent(enode);
578 	return (parent == node);
579 }
580 
581 /*
582  * mas_allocated() - Get the number of nodes allocated in a maple state.
583  * @mas: The maple state
584  *
585  * The ma_state alloc member is overloaded to hold a pointer to the first
586  * allocated node or to the number of requested nodes to allocate.  If bit 0 is
587  * set, then the alloc contains the number of requested nodes.  If there is an
588  * allocated node, then the total allocated nodes is in that node.
589  *
590  * Return: The total number of nodes allocated
591  */
592 static inline unsigned long mas_allocated(const struct ma_state *mas)
593 {
594 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
595 		return 0;
596 
597 	return mas->alloc->total;
598 }
599 
600 /*
601  * mas_set_alloc_req() - Set the requested number of allocations.
602  * @mas: the maple state
603  * @count: the number of allocations.
604  *
605  * The requested number of allocations is either in the first allocated node,
606  * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
607  * no allocated node.  Set the request either in the node or do the necessary
608  * encoding to store in @mas->alloc directly.
609  */
610 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
611 {
612 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
613 		if (!count)
614 			mas->alloc = NULL;
615 		else
616 			mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
617 		return;
618 	}
619 
620 	mas->alloc->request_count = count;
621 }
622 
623 /*
624  * mas_alloc_req() - get the requested number of allocations.
625  * @mas: The maple state
626  *
627  * The alloc count is either stored directly in @mas, or in
628  * @mas->alloc->request_count if there is at least one node allocated.  Decode
629  * the request count if it's stored directly in @mas->alloc.
630  *
631  * Return: The allocation request count.
632  */
633 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
634 {
635 	if ((unsigned long)mas->alloc & 0x1)
636 		return (unsigned long)(mas->alloc) >> 1;
637 	else if (mas->alloc)
638 		return mas->alloc->request_count;
639 	return 0;
640 }
641 
642 /*
643  * ma_pivots() - Get a pointer to the maple node pivots.
644  * @node - the maple node
645  * @type - the node type
646  *
647  * In the event of a dead node, this array may be %NULL
648  *
649  * Return: A pointer to the maple node pivots
650  */
651 static inline unsigned long *ma_pivots(struct maple_node *node,
652 					   enum maple_type type)
653 {
654 	switch (type) {
655 	case maple_arange_64:
656 		return node->ma64.pivot;
657 	case maple_range_64:
658 	case maple_leaf_64:
659 		return node->mr64.pivot;
660 	case maple_dense:
661 		return NULL;
662 	}
663 	return NULL;
664 }
665 
666 /*
667  * ma_gaps() - Get a pointer to the maple node gaps.
668  * @node - the maple node
669  * @type - the node type
670  *
671  * Return: A pointer to the maple node gaps
672  */
673 static inline unsigned long *ma_gaps(struct maple_node *node,
674 				     enum maple_type type)
675 {
676 	switch (type) {
677 	case maple_arange_64:
678 		return node->ma64.gap;
679 	case maple_range_64:
680 	case maple_leaf_64:
681 	case maple_dense:
682 		return NULL;
683 	}
684 	return NULL;
685 }
686 
687 /*
688  * mas_safe_pivot() - get the pivot at @piv or mas->max.
689  * @mas: The maple state
690  * @pivots: The pointer to the maple node pivots
691  * @piv: The pivot to fetch
692  * @type: The maple node type
693  *
694  * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
695  * otherwise.
696  */
697 static __always_inline unsigned long
698 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
699 	       unsigned char piv, enum maple_type type)
700 {
701 	if (piv >= mt_pivots[type])
702 		return mas->max;
703 
704 	return pivots[piv];
705 }
706 
707 /*
708  * mas_safe_min() - Return the minimum for a given offset.
709  * @mas: The maple state
710  * @pivots: The pointer to the maple node pivots
711  * @offset: The offset into the pivot array
712  *
713  * Return: The minimum range value that is contained in @offset.
714  */
715 static inline unsigned long
716 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
717 {
718 	if (likely(offset))
719 		return pivots[offset - 1] + 1;
720 
721 	return mas->min;
722 }
723 
724 /*
725  * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
726  * @mn: The encoded maple node
727  * @piv: The pivot offset
728  * @val: The value of the pivot
729  */
730 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
731 				unsigned long val)
732 {
733 	struct maple_node *node = mte_to_node(mn);
734 	enum maple_type type = mte_node_type(mn);
735 
736 	BUG_ON(piv >= mt_pivots[type]);
737 	switch (type) {
738 	case maple_range_64:
739 	case maple_leaf_64:
740 		node->mr64.pivot[piv] = val;
741 		break;
742 	case maple_arange_64:
743 		node->ma64.pivot[piv] = val;
744 		break;
745 	case maple_dense:
746 		break;
747 	}
748 
749 }
750 
751 /*
752  * ma_slots() - Get a pointer to the maple node slots.
753  * @mn: The maple node
754  * @mt: The maple node type
755  *
756  * Return: A pointer to the maple node slots
757  */
758 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
759 {
760 	switch (mt) {
761 	case maple_arange_64:
762 		return mn->ma64.slot;
763 	case maple_range_64:
764 	case maple_leaf_64:
765 		return mn->mr64.slot;
766 	case maple_dense:
767 		return mn->slot;
768 	}
769 
770 	return NULL;
771 }
772 
773 static inline bool mt_write_locked(const struct maple_tree *mt)
774 {
775 	return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
776 		lockdep_is_held(&mt->ma_lock);
777 }
778 
779 static __always_inline bool mt_locked(const struct maple_tree *mt)
780 {
781 	return mt_external_lock(mt) ? mt_lock_is_held(mt) :
782 		lockdep_is_held(&mt->ma_lock);
783 }
784 
785 static __always_inline void *mt_slot(const struct maple_tree *mt,
786 		void __rcu **slots, unsigned char offset)
787 {
788 	return rcu_dereference_check(slots[offset], mt_locked(mt));
789 }
790 
791 static __always_inline void *mt_slot_locked(struct maple_tree *mt,
792 		void __rcu **slots, unsigned char offset)
793 {
794 	return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
795 }
796 /*
797  * mas_slot_locked() - Get the slot value when holding the maple tree lock.
798  * @mas: The maple state
799  * @slots: The pointer to the slots
800  * @offset: The offset into the slots array to fetch
801  *
802  * Return: The entry stored in @slots at the @offset.
803  */
804 static __always_inline void *mas_slot_locked(struct ma_state *mas,
805 		void __rcu **slots, unsigned char offset)
806 {
807 	return mt_slot_locked(mas->tree, slots, offset);
808 }
809 
810 /*
811  * mas_slot() - Get the slot value when not holding the maple tree lock.
812  * @mas: The maple state
813  * @slots: The pointer to the slots
814  * @offset: The offset into the slots array to fetch
815  *
816  * Return: The entry stored in @slots at the @offset
817  */
818 static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
819 		unsigned char offset)
820 {
821 	return mt_slot(mas->tree, slots, offset);
822 }
823 
824 /*
825  * mas_root() - Get the maple tree root.
826  * @mas: The maple state.
827  *
828  * Return: The pointer to the root of the tree
829  */
830 static __always_inline void *mas_root(struct ma_state *mas)
831 {
832 	return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
833 }
834 
835 static inline void *mt_root_locked(struct maple_tree *mt)
836 {
837 	return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
838 }
839 
840 /*
841  * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
842  * @mas: The maple state.
843  *
844  * Return: The pointer to the root of the tree
845  */
846 static inline void *mas_root_locked(struct ma_state *mas)
847 {
848 	return mt_root_locked(mas->tree);
849 }
850 
851 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
852 					     enum maple_type mt)
853 {
854 	switch (mt) {
855 	case maple_arange_64:
856 		return &mn->ma64.meta;
857 	default:
858 		return &mn->mr64.meta;
859 	}
860 }
861 
862 /*
863  * ma_set_meta() - Set the metadata information of a node.
864  * @mn: The maple node
865  * @mt: The maple node type
866  * @offset: The offset of the highest sub-gap in this node.
867  * @end: The end of the data in this node.
868  */
869 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
870 			       unsigned char offset, unsigned char end)
871 {
872 	struct maple_metadata *meta = ma_meta(mn, mt);
873 
874 	meta->gap = offset;
875 	meta->end = end;
876 }
877 
878 /*
879  * mt_clear_meta() - clear the metadata information of a node, if it exists
880  * @mt: The maple tree
881  * @mn: The maple node
882  * @type: The maple node type
883  * @offset: The offset of the highest sub-gap in this node.
884  * @end: The end of the data in this node.
885  */
886 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
887 				  enum maple_type type)
888 {
889 	struct maple_metadata *meta;
890 	unsigned long *pivots;
891 	void __rcu **slots;
892 	void *next;
893 
894 	switch (type) {
895 	case maple_range_64:
896 		pivots = mn->mr64.pivot;
897 		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
898 			slots = mn->mr64.slot;
899 			next = mt_slot_locked(mt, slots,
900 					      MAPLE_RANGE64_SLOTS - 1);
901 			if (unlikely((mte_to_node(next) &&
902 				      mte_node_type(next))))
903 				return; /* no metadata, could be node */
904 		}
905 		fallthrough;
906 	case maple_arange_64:
907 		meta = ma_meta(mn, type);
908 		break;
909 	default:
910 		return;
911 	}
912 
913 	meta->gap = 0;
914 	meta->end = 0;
915 }
916 
917 /*
918  * ma_meta_end() - Get the data end of a node from the metadata
919  * @mn: The maple node
920  * @mt: The maple node type
921  */
922 static inline unsigned char ma_meta_end(struct maple_node *mn,
923 					enum maple_type mt)
924 {
925 	struct maple_metadata *meta = ma_meta(mn, mt);
926 
927 	return meta->end;
928 }
929 
930 /*
931  * ma_meta_gap() - Get the largest gap location of a node from the metadata
932  * @mn: The maple node
933  */
934 static inline unsigned char ma_meta_gap(struct maple_node *mn)
935 {
936 	return mn->ma64.meta.gap;
937 }
938 
939 /*
940  * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
941  * @mn: The maple node
942  * @mn: The maple node type
943  * @offset: The location of the largest gap.
944  */
945 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
946 				   unsigned char offset)
947 {
948 
949 	struct maple_metadata *meta = ma_meta(mn, mt);
950 
951 	meta->gap = offset;
952 }
953 
954 /*
955  * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
956  * @mat - the ma_topiary, a linked list of dead nodes.
957  * @dead_enode - the node to be marked as dead and added to the tail of the list
958  *
959  * Add the @dead_enode to the linked list in @mat.
960  */
961 static inline void mat_add(struct ma_topiary *mat,
962 			   struct maple_enode *dead_enode)
963 {
964 	mte_set_node_dead(dead_enode);
965 	mte_to_mat(dead_enode)->next = NULL;
966 	if (!mat->tail) {
967 		mat->tail = mat->head = dead_enode;
968 		return;
969 	}
970 
971 	mte_to_mat(mat->tail)->next = dead_enode;
972 	mat->tail = dead_enode;
973 }
974 
975 static void mt_free_walk(struct rcu_head *head);
976 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
977 			    bool free);
978 /*
979  * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
980  * @mas - the maple state
981  * @mat - the ma_topiary linked list of dead nodes to free.
982  *
983  * Destroy walk a dead list.
984  */
985 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
986 {
987 	struct maple_enode *next;
988 	struct maple_node *node;
989 	bool in_rcu = mt_in_rcu(mas->tree);
990 
991 	while (mat->head) {
992 		next = mte_to_mat(mat->head)->next;
993 		node = mte_to_node(mat->head);
994 		mt_destroy_walk(mat->head, mas->tree, !in_rcu);
995 		if (in_rcu)
996 			call_rcu(&node->rcu, mt_free_walk);
997 		mat->head = next;
998 	}
999 }
1000 /*
1001  * mas_descend() - Descend into the slot stored in the ma_state.
1002  * @mas - the maple state.
1003  *
1004  * Note: Not RCU safe, only use in write side or debug code.
1005  */
1006 static inline void mas_descend(struct ma_state *mas)
1007 {
1008 	enum maple_type type;
1009 	unsigned long *pivots;
1010 	struct maple_node *node;
1011 	void __rcu **slots;
1012 
1013 	node = mas_mn(mas);
1014 	type = mte_node_type(mas->node);
1015 	pivots = ma_pivots(node, type);
1016 	slots = ma_slots(node, type);
1017 
1018 	if (mas->offset)
1019 		mas->min = pivots[mas->offset - 1] + 1;
1020 	mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1021 	mas->node = mas_slot(mas, slots, mas->offset);
1022 }
1023 
1024 /*
1025  * mte_set_gap() - Set a maple node gap.
1026  * @mn: The encoded maple node
1027  * @gap: The offset of the gap to set
1028  * @val: The gap value
1029  */
1030 static inline void mte_set_gap(const struct maple_enode *mn,
1031 				 unsigned char gap, unsigned long val)
1032 {
1033 	switch (mte_node_type(mn)) {
1034 	default:
1035 		break;
1036 	case maple_arange_64:
1037 		mte_to_node(mn)->ma64.gap[gap] = val;
1038 		break;
1039 	}
1040 }
1041 
1042 /*
1043  * mas_ascend() - Walk up a level of the tree.
1044  * @mas: The maple state
1045  *
1046  * Sets the @mas->max and @mas->min to the correct values when walking up.  This
1047  * may cause several levels of walking up to find the correct min and max.
1048  * May find a dead node which will cause a premature return.
1049  * Return: 1 on dead node, 0 otherwise
1050  */
1051 static int mas_ascend(struct ma_state *mas)
1052 {
1053 	struct maple_enode *p_enode; /* parent enode. */
1054 	struct maple_enode *a_enode; /* ancestor enode. */
1055 	struct maple_node *a_node; /* ancestor node. */
1056 	struct maple_node *p_node; /* parent node. */
1057 	unsigned char a_slot;
1058 	enum maple_type a_type;
1059 	unsigned long min, max;
1060 	unsigned long *pivots;
1061 	bool set_max = false, set_min = false;
1062 
1063 	a_node = mas_mn(mas);
1064 	if (ma_is_root(a_node)) {
1065 		mas->offset = 0;
1066 		return 0;
1067 	}
1068 
1069 	p_node = mte_parent(mas->node);
1070 	if (unlikely(a_node == p_node))
1071 		return 1;
1072 
1073 	a_type = mas_parent_type(mas, mas->node);
1074 	mas->offset = mte_parent_slot(mas->node);
1075 	a_enode = mt_mk_node(p_node, a_type);
1076 
1077 	/* Check to make sure all parent information is still accurate */
1078 	if (p_node != mte_parent(mas->node))
1079 		return 1;
1080 
1081 	mas->node = a_enode;
1082 
1083 	if (mte_is_root(a_enode)) {
1084 		mas->max = ULONG_MAX;
1085 		mas->min = 0;
1086 		return 0;
1087 	}
1088 
1089 	min = 0;
1090 	max = ULONG_MAX;
1091 	if (!mas->offset) {
1092 		min = mas->min;
1093 		set_min = true;
1094 	}
1095 
1096 	if (mas->max == ULONG_MAX)
1097 		set_max = true;
1098 
1099 	do {
1100 		p_enode = a_enode;
1101 		a_type = mas_parent_type(mas, p_enode);
1102 		a_node = mte_parent(p_enode);
1103 		a_slot = mte_parent_slot(p_enode);
1104 		a_enode = mt_mk_node(a_node, a_type);
1105 		pivots = ma_pivots(a_node, a_type);
1106 
1107 		if (unlikely(ma_dead_node(a_node)))
1108 			return 1;
1109 
1110 		if (!set_min && a_slot) {
1111 			set_min = true;
1112 			min = pivots[a_slot - 1] + 1;
1113 		}
1114 
1115 		if (!set_max && a_slot < mt_pivots[a_type]) {
1116 			set_max = true;
1117 			max = pivots[a_slot];
1118 		}
1119 
1120 		if (unlikely(ma_dead_node(a_node)))
1121 			return 1;
1122 
1123 		if (unlikely(ma_is_root(a_node)))
1124 			break;
1125 
1126 	} while (!set_min || !set_max);
1127 
1128 	mas->max = max;
1129 	mas->min = min;
1130 	return 0;
1131 }
1132 
1133 /*
1134  * mas_pop_node() - Get a previously allocated maple node from the maple state.
1135  * @mas: The maple state
1136  *
1137  * Return: A pointer to a maple node.
1138  */
1139 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1140 {
1141 	struct maple_alloc *ret, *node = mas->alloc;
1142 	unsigned long total = mas_allocated(mas);
1143 	unsigned int req = mas_alloc_req(mas);
1144 
1145 	/* nothing or a request pending. */
1146 	if (WARN_ON(!total))
1147 		return NULL;
1148 
1149 	if (total == 1) {
1150 		/* single allocation in this ma_state */
1151 		mas->alloc = NULL;
1152 		ret = node;
1153 		goto single_node;
1154 	}
1155 
1156 	if (node->node_count == 1) {
1157 		/* Single allocation in this node. */
1158 		mas->alloc = node->slot[0];
1159 		mas->alloc->total = node->total - 1;
1160 		ret = node;
1161 		goto new_head;
1162 	}
1163 	node->total--;
1164 	ret = node->slot[--node->node_count];
1165 	node->slot[node->node_count] = NULL;
1166 
1167 single_node:
1168 new_head:
1169 	if (req) {
1170 		req++;
1171 		mas_set_alloc_req(mas, req);
1172 	}
1173 
1174 	memset(ret, 0, sizeof(*ret));
1175 	return (struct maple_node *)ret;
1176 }
1177 
1178 /*
1179  * mas_push_node() - Push a node back on the maple state allocation.
1180  * @mas: The maple state
1181  * @used: The used maple node
1182  *
1183  * Stores the maple node back into @mas->alloc for reuse.  Updates allocated and
1184  * requested node count as necessary.
1185  */
1186 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1187 {
1188 	struct maple_alloc *reuse = (struct maple_alloc *)used;
1189 	struct maple_alloc *head = mas->alloc;
1190 	unsigned long count;
1191 	unsigned int requested = mas_alloc_req(mas);
1192 
1193 	count = mas_allocated(mas);
1194 
1195 	reuse->request_count = 0;
1196 	reuse->node_count = 0;
1197 	if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1198 		head->slot[head->node_count++] = reuse;
1199 		head->total++;
1200 		goto done;
1201 	}
1202 
1203 	reuse->total = 1;
1204 	if ((head) && !((unsigned long)head & 0x1)) {
1205 		reuse->slot[0] = head;
1206 		reuse->node_count = 1;
1207 		reuse->total += head->total;
1208 	}
1209 
1210 	mas->alloc = reuse;
1211 done:
1212 	if (requested > 1)
1213 		mas_set_alloc_req(mas, requested - 1);
1214 }
1215 
1216 /*
1217  * mas_alloc_nodes() - Allocate nodes into a maple state
1218  * @mas: The maple state
1219  * @gfp: The GFP Flags
1220  */
1221 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1222 {
1223 	struct maple_alloc *node;
1224 	unsigned long allocated = mas_allocated(mas);
1225 	unsigned int requested = mas_alloc_req(mas);
1226 	unsigned int count;
1227 	void **slots = NULL;
1228 	unsigned int max_req = 0;
1229 
1230 	if (!requested)
1231 		return;
1232 
1233 	mas_set_alloc_req(mas, 0);
1234 	if (mas->mas_flags & MA_STATE_PREALLOC) {
1235 		if (allocated)
1236 			return;
1237 		BUG_ON(!allocated);
1238 		WARN_ON(!allocated);
1239 	}
1240 
1241 	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1242 		node = (struct maple_alloc *)mt_alloc_one(gfp);
1243 		if (!node)
1244 			goto nomem_one;
1245 
1246 		if (allocated) {
1247 			node->slot[0] = mas->alloc;
1248 			node->node_count = 1;
1249 		} else {
1250 			node->node_count = 0;
1251 		}
1252 
1253 		mas->alloc = node;
1254 		node->total = ++allocated;
1255 		requested--;
1256 	}
1257 
1258 	node = mas->alloc;
1259 	node->request_count = 0;
1260 	while (requested) {
1261 		max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1262 		slots = (void **)&node->slot[node->node_count];
1263 		max_req = min(requested, max_req);
1264 		count = mt_alloc_bulk(gfp, max_req, slots);
1265 		if (!count)
1266 			goto nomem_bulk;
1267 
1268 		if (node->node_count == 0) {
1269 			node->slot[0]->node_count = 0;
1270 			node->slot[0]->request_count = 0;
1271 		}
1272 
1273 		node->node_count += count;
1274 		allocated += count;
1275 		node = node->slot[0];
1276 		requested -= count;
1277 	}
1278 	mas->alloc->total = allocated;
1279 	return;
1280 
1281 nomem_bulk:
1282 	/* Clean up potential freed allocations on bulk failure */
1283 	memset(slots, 0, max_req * sizeof(unsigned long));
1284 nomem_one:
1285 	mas_set_alloc_req(mas, requested);
1286 	if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1287 		mas->alloc->total = allocated;
1288 	mas_set_err(mas, -ENOMEM);
1289 }
1290 
1291 /*
1292  * mas_free() - Free an encoded maple node
1293  * @mas: The maple state
1294  * @used: The encoded maple node to free.
1295  *
1296  * Uses rcu free if necessary, pushes @used back on the maple state allocations
1297  * otherwise.
1298  */
1299 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1300 {
1301 	struct maple_node *tmp = mte_to_node(used);
1302 
1303 	if (mt_in_rcu(mas->tree))
1304 		ma_free_rcu(tmp);
1305 	else
1306 		mas_push_node(mas, tmp);
1307 }
1308 
1309 /*
1310  * mas_node_count() - Check if enough nodes are allocated and request more if
1311  * there is not enough nodes.
1312  * @mas: The maple state
1313  * @count: The number of nodes needed
1314  * @gfp: the gfp flags
1315  */
1316 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1317 {
1318 	unsigned long allocated = mas_allocated(mas);
1319 
1320 	if (allocated < count) {
1321 		mas_set_alloc_req(mas, count - allocated);
1322 		mas_alloc_nodes(mas, gfp);
1323 	}
1324 }
1325 
1326 /*
1327  * mas_node_count() - Check if enough nodes are allocated and request more if
1328  * there is not enough nodes.
1329  * @mas: The maple state
1330  * @count: The number of nodes needed
1331  *
1332  * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1333  */
1334 static void mas_node_count(struct ma_state *mas, int count)
1335 {
1336 	return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1337 }
1338 
1339 /*
1340  * mas_start() - Sets up maple state for operations.
1341  * @mas: The maple state.
1342  *
1343  * If mas->status == mas_start, then set the min, max and depth to
1344  * defaults.
1345  *
1346  * Return:
1347  * - If mas->node is an error or not mas_start, return NULL.
1348  * - If it's an empty tree:     NULL & mas->status == ma_none
1349  * - If it's a single entry:    The entry & mas->status == mas_root
1350  * - If it's a tree:            NULL & mas->status == safe root node.
1351  */
1352 static inline struct maple_enode *mas_start(struct ma_state *mas)
1353 {
1354 	if (likely(mas_is_start(mas))) {
1355 		struct maple_enode *root;
1356 
1357 		mas->min = 0;
1358 		mas->max = ULONG_MAX;
1359 
1360 retry:
1361 		mas->depth = 0;
1362 		root = mas_root(mas);
1363 		/* Tree with nodes */
1364 		if (likely(xa_is_node(root))) {
1365 			mas->depth = 1;
1366 			mas->status = ma_active;
1367 			mas->node = mte_safe_root(root);
1368 			mas->offset = 0;
1369 			if (mte_dead_node(mas->node))
1370 				goto retry;
1371 
1372 			return NULL;
1373 		}
1374 
1375 		/* empty tree */
1376 		if (unlikely(!root)) {
1377 			mas->node = NULL;
1378 			mas->status = ma_none;
1379 			mas->offset = MAPLE_NODE_SLOTS;
1380 			return NULL;
1381 		}
1382 
1383 		/* Single entry tree */
1384 		mas->status = ma_root;
1385 		mas->offset = MAPLE_NODE_SLOTS;
1386 
1387 		/* Single entry tree. */
1388 		if (mas->index > 0)
1389 			return NULL;
1390 
1391 		return root;
1392 	}
1393 
1394 	return NULL;
1395 }
1396 
1397 /*
1398  * ma_data_end() - Find the end of the data in a node.
1399  * @node: The maple node
1400  * @type: The maple node type
1401  * @pivots: The array of pivots in the node
1402  * @max: The maximum value in the node
1403  *
1404  * Uses metadata to find the end of the data when possible.
1405  * Return: The zero indexed last slot with data (may be null).
1406  */
1407 static __always_inline unsigned char ma_data_end(struct maple_node *node,
1408 		enum maple_type type, unsigned long *pivots, unsigned long max)
1409 {
1410 	unsigned char offset;
1411 
1412 	if (!pivots)
1413 		return 0;
1414 
1415 	if (type == maple_arange_64)
1416 		return ma_meta_end(node, type);
1417 
1418 	offset = mt_pivots[type] - 1;
1419 	if (likely(!pivots[offset]))
1420 		return ma_meta_end(node, type);
1421 
1422 	if (likely(pivots[offset] == max))
1423 		return offset;
1424 
1425 	return mt_pivots[type];
1426 }
1427 
1428 /*
1429  * mas_data_end() - Find the end of the data (slot).
1430  * @mas: the maple state
1431  *
1432  * This method is optimized to check the metadata of a node if the node type
1433  * supports data end metadata.
1434  *
1435  * Return: The zero indexed last slot with data (may be null).
1436  */
1437 static inline unsigned char mas_data_end(struct ma_state *mas)
1438 {
1439 	enum maple_type type;
1440 	struct maple_node *node;
1441 	unsigned char offset;
1442 	unsigned long *pivots;
1443 
1444 	type = mte_node_type(mas->node);
1445 	node = mas_mn(mas);
1446 	if (type == maple_arange_64)
1447 		return ma_meta_end(node, type);
1448 
1449 	pivots = ma_pivots(node, type);
1450 	if (unlikely(ma_dead_node(node)))
1451 		return 0;
1452 
1453 	offset = mt_pivots[type] - 1;
1454 	if (likely(!pivots[offset]))
1455 		return ma_meta_end(node, type);
1456 
1457 	if (likely(pivots[offset] == mas->max))
1458 		return offset;
1459 
1460 	return mt_pivots[type];
1461 }
1462 
1463 /*
1464  * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1465  * @mas - the maple state
1466  *
1467  * Return: The maximum gap in the leaf.
1468  */
1469 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1470 {
1471 	enum maple_type mt;
1472 	unsigned long pstart, gap, max_gap;
1473 	struct maple_node *mn;
1474 	unsigned long *pivots;
1475 	void __rcu **slots;
1476 	unsigned char i;
1477 	unsigned char max_piv;
1478 
1479 	mt = mte_node_type(mas->node);
1480 	mn = mas_mn(mas);
1481 	slots = ma_slots(mn, mt);
1482 	max_gap = 0;
1483 	if (unlikely(ma_is_dense(mt))) {
1484 		gap = 0;
1485 		for (i = 0; i < mt_slots[mt]; i++) {
1486 			if (slots[i]) {
1487 				if (gap > max_gap)
1488 					max_gap = gap;
1489 				gap = 0;
1490 			} else {
1491 				gap++;
1492 			}
1493 		}
1494 		if (gap > max_gap)
1495 			max_gap = gap;
1496 		return max_gap;
1497 	}
1498 
1499 	/*
1500 	 * Check the first implied pivot optimizes the loop below and slot 1 may
1501 	 * be skipped if there is a gap in slot 0.
1502 	 */
1503 	pivots = ma_pivots(mn, mt);
1504 	if (likely(!slots[0])) {
1505 		max_gap = pivots[0] - mas->min + 1;
1506 		i = 2;
1507 	} else {
1508 		i = 1;
1509 	}
1510 
1511 	/* reduce max_piv as the special case is checked before the loop */
1512 	max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1513 	/*
1514 	 * Check end implied pivot which can only be a gap on the right most
1515 	 * node.
1516 	 */
1517 	if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1518 		gap = ULONG_MAX - pivots[max_piv];
1519 		if (gap > max_gap)
1520 			max_gap = gap;
1521 
1522 		if (max_gap > pivots[max_piv] - mas->min)
1523 			return max_gap;
1524 	}
1525 
1526 	for (; i <= max_piv; i++) {
1527 		/* data == no gap. */
1528 		if (likely(slots[i]))
1529 			continue;
1530 
1531 		pstart = pivots[i - 1];
1532 		gap = pivots[i] - pstart;
1533 		if (gap > max_gap)
1534 			max_gap = gap;
1535 
1536 		/* There cannot be two gaps in a row. */
1537 		i++;
1538 	}
1539 	return max_gap;
1540 }
1541 
1542 /*
1543  * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1544  * @node: The maple node
1545  * @gaps: The pointer to the gaps
1546  * @mt: The maple node type
1547  * @*off: Pointer to store the offset location of the gap.
1548  *
1549  * Uses the metadata data end to scan backwards across set gaps.
1550  *
1551  * Return: The maximum gap value
1552  */
1553 static inline unsigned long
1554 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1555 	    unsigned char *off)
1556 {
1557 	unsigned char offset, i;
1558 	unsigned long max_gap = 0;
1559 
1560 	i = offset = ma_meta_end(node, mt);
1561 	do {
1562 		if (gaps[i] > max_gap) {
1563 			max_gap = gaps[i];
1564 			offset = i;
1565 		}
1566 	} while (i--);
1567 
1568 	*off = offset;
1569 	return max_gap;
1570 }
1571 
1572 /*
1573  * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1574  * @mas: The maple state.
1575  *
1576  * Return: The gap value.
1577  */
1578 static inline unsigned long mas_max_gap(struct ma_state *mas)
1579 {
1580 	unsigned long *gaps;
1581 	unsigned char offset;
1582 	enum maple_type mt;
1583 	struct maple_node *node;
1584 
1585 	mt = mte_node_type(mas->node);
1586 	if (ma_is_leaf(mt))
1587 		return mas_leaf_max_gap(mas);
1588 
1589 	node = mas_mn(mas);
1590 	MAS_BUG_ON(mas, mt != maple_arange_64);
1591 	offset = ma_meta_gap(node);
1592 	gaps = ma_gaps(node, mt);
1593 	return gaps[offset];
1594 }
1595 
1596 /*
1597  * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1598  * @mas: The maple state
1599  * @offset: The gap offset in the parent to set
1600  * @new: The new gap value.
1601  *
1602  * Set the parent gap then continue to set the gap upwards, using the metadata
1603  * of the parent to see if it is necessary to check the node above.
1604  */
1605 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1606 		unsigned long new)
1607 {
1608 	unsigned long meta_gap = 0;
1609 	struct maple_node *pnode;
1610 	struct maple_enode *penode;
1611 	unsigned long *pgaps;
1612 	unsigned char meta_offset;
1613 	enum maple_type pmt;
1614 
1615 	pnode = mte_parent(mas->node);
1616 	pmt = mas_parent_type(mas, mas->node);
1617 	penode = mt_mk_node(pnode, pmt);
1618 	pgaps = ma_gaps(pnode, pmt);
1619 
1620 ascend:
1621 	MAS_BUG_ON(mas, pmt != maple_arange_64);
1622 	meta_offset = ma_meta_gap(pnode);
1623 	meta_gap = pgaps[meta_offset];
1624 
1625 	pgaps[offset] = new;
1626 
1627 	if (meta_gap == new)
1628 		return;
1629 
1630 	if (offset != meta_offset) {
1631 		if (meta_gap > new)
1632 			return;
1633 
1634 		ma_set_meta_gap(pnode, pmt, offset);
1635 	} else if (new < meta_gap) {
1636 		new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1637 		ma_set_meta_gap(pnode, pmt, meta_offset);
1638 	}
1639 
1640 	if (ma_is_root(pnode))
1641 		return;
1642 
1643 	/* Go to the parent node. */
1644 	pnode = mte_parent(penode);
1645 	pmt = mas_parent_type(mas, penode);
1646 	pgaps = ma_gaps(pnode, pmt);
1647 	offset = mte_parent_slot(penode);
1648 	penode = mt_mk_node(pnode, pmt);
1649 	goto ascend;
1650 }
1651 
1652 /*
1653  * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1654  * @mas - the maple state.
1655  */
1656 static inline void mas_update_gap(struct ma_state *mas)
1657 {
1658 	unsigned char pslot;
1659 	unsigned long p_gap;
1660 	unsigned long max_gap;
1661 
1662 	if (!mt_is_alloc(mas->tree))
1663 		return;
1664 
1665 	if (mte_is_root(mas->node))
1666 		return;
1667 
1668 	max_gap = mas_max_gap(mas);
1669 
1670 	pslot = mte_parent_slot(mas->node);
1671 	p_gap = ma_gaps(mte_parent(mas->node),
1672 			mas_parent_type(mas, mas->node))[pslot];
1673 
1674 	if (p_gap != max_gap)
1675 		mas_parent_gap(mas, pslot, max_gap);
1676 }
1677 
1678 /*
1679  * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1680  * @parent with the slot encoded.
1681  * @mas - the maple state (for the tree)
1682  * @parent - the maple encoded node containing the children.
1683  */
1684 static inline void mas_adopt_children(struct ma_state *mas,
1685 		struct maple_enode *parent)
1686 {
1687 	enum maple_type type = mte_node_type(parent);
1688 	struct maple_node *node = mte_to_node(parent);
1689 	void __rcu **slots = ma_slots(node, type);
1690 	unsigned long *pivots = ma_pivots(node, type);
1691 	struct maple_enode *child;
1692 	unsigned char offset;
1693 
1694 	offset = ma_data_end(node, type, pivots, mas->max);
1695 	do {
1696 		child = mas_slot_locked(mas, slots, offset);
1697 		mas_set_parent(mas, child, parent, offset);
1698 	} while (offset--);
1699 }
1700 
1701 /*
1702  * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1703  * node as dead.
1704  * @mas - the maple state with the new node
1705  * @old_enode - The old maple encoded node to replace.
1706  */
1707 static inline void mas_put_in_tree(struct ma_state *mas,
1708 		struct maple_enode *old_enode)
1709 	__must_hold(mas->tree->ma_lock)
1710 {
1711 	unsigned char offset;
1712 	void __rcu **slots;
1713 
1714 	if (mte_is_root(mas->node)) {
1715 		mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
1716 		rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1717 		mas_set_height(mas);
1718 	} else {
1719 
1720 		offset = mte_parent_slot(mas->node);
1721 		slots = ma_slots(mte_parent(mas->node),
1722 				 mas_parent_type(mas, mas->node));
1723 		rcu_assign_pointer(slots[offset], mas->node);
1724 	}
1725 
1726 	mte_set_node_dead(old_enode);
1727 }
1728 
1729 /*
1730  * mas_replace_node() - Replace a node by putting it in the tree, marking it
1731  * dead, and freeing it.
1732  * the parent encoding to locate the maple node in the tree.
1733  * @mas - the ma_state with @mas->node pointing to the new node.
1734  * @old_enode - The old maple encoded node.
1735  */
1736 static inline void mas_replace_node(struct ma_state *mas,
1737 		struct maple_enode *old_enode)
1738 	__must_hold(mas->tree->ma_lock)
1739 {
1740 	mas_put_in_tree(mas, old_enode);
1741 	mas_free(mas, old_enode);
1742 }
1743 
1744 /*
1745  * mas_find_child() - Find a child who has the parent @mas->node.
1746  * @mas: the maple state with the parent.
1747  * @child: the maple state to store the child.
1748  */
1749 static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
1750 	__must_hold(mas->tree->ma_lock)
1751 {
1752 	enum maple_type mt;
1753 	unsigned char offset;
1754 	unsigned char end;
1755 	unsigned long *pivots;
1756 	struct maple_enode *entry;
1757 	struct maple_node *node;
1758 	void __rcu **slots;
1759 
1760 	mt = mte_node_type(mas->node);
1761 	node = mas_mn(mas);
1762 	slots = ma_slots(node, mt);
1763 	pivots = ma_pivots(node, mt);
1764 	end = ma_data_end(node, mt, pivots, mas->max);
1765 	for (offset = mas->offset; offset <= end; offset++) {
1766 		entry = mas_slot_locked(mas, slots, offset);
1767 		if (mte_parent(entry) == node) {
1768 			*child = *mas;
1769 			mas->offset = offset + 1;
1770 			child->offset = offset;
1771 			mas_descend(child);
1772 			child->offset = 0;
1773 			return true;
1774 		}
1775 	}
1776 	return false;
1777 }
1778 
1779 /*
1780  * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1781  * old data or set b_node->b_end.
1782  * @b_node: the maple_big_node
1783  * @shift: the shift count
1784  */
1785 static inline void mab_shift_right(struct maple_big_node *b_node,
1786 				 unsigned char shift)
1787 {
1788 	unsigned long size = b_node->b_end * sizeof(unsigned long);
1789 
1790 	memmove(b_node->pivot + shift, b_node->pivot, size);
1791 	memmove(b_node->slot + shift, b_node->slot, size);
1792 	if (b_node->type == maple_arange_64)
1793 		memmove(b_node->gap + shift, b_node->gap, size);
1794 }
1795 
1796 /*
1797  * mab_middle_node() - Check if a middle node is needed (unlikely)
1798  * @b_node: the maple_big_node that contains the data.
1799  * @size: the amount of data in the b_node
1800  * @split: the potential split location
1801  * @slot_count: the size that can be stored in a single node being considered.
1802  *
1803  * Return: true if a middle node is required.
1804  */
1805 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1806 				   unsigned char slot_count)
1807 {
1808 	unsigned char size = b_node->b_end;
1809 
1810 	if (size >= 2 * slot_count)
1811 		return true;
1812 
1813 	if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1814 		return true;
1815 
1816 	return false;
1817 }
1818 
1819 /*
1820  * mab_no_null_split() - ensure the split doesn't fall on a NULL
1821  * @b_node: the maple_big_node with the data
1822  * @split: the suggested split location
1823  * @slot_count: the number of slots in the node being considered.
1824  *
1825  * Return: the split location.
1826  */
1827 static inline int mab_no_null_split(struct maple_big_node *b_node,
1828 				    unsigned char split, unsigned char slot_count)
1829 {
1830 	if (!b_node->slot[split]) {
1831 		/*
1832 		 * If the split is less than the max slot && the right side will
1833 		 * still be sufficient, then increment the split on NULL.
1834 		 */
1835 		if ((split < slot_count - 1) &&
1836 		    (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1837 			split++;
1838 		else
1839 			split--;
1840 	}
1841 	return split;
1842 }
1843 
1844 /*
1845  * mab_calc_split() - Calculate the split location and if there needs to be two
1846  * splits.
1847  * @bn: The maple_big_node with the data
1848  * @mid_split: The second split, if required.  0 otherwise.
1849  *
1850  * Return: The first split location.  The middle split is set in @mid_split.
1851  */
1852 static inline int mab_calc_split(struct ma_state *mas,
1853 	 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1854 {
1855 	unsigned char b_end = bn->b_end;
1856 	int split = b_end / 2; /* Assume equal split. */
1857 	unsigned char slot_min, slot_count = mt_slots[bn->type];
1858 
1859 	/*
1860 	 * To support gap tracking, all NULL entries are kept together and a node cannot
1861 	 * end on a NULL entry, with the exception of the left-most leaf.  The
1862 	 * limitation means that the split of a node must be checked for this condition
1863 	 * and be able to put more data in one direction or the other.
1864 	 */
1865 	if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1866 		*mid_split = 0;
1867 		split = b_end - mt_min_slots[bn->type];
1868 
1869 		if (!ma_is_leaf(bn->type))
1870 			return split;
1871 
1872 		mas->mas_flags |= MA_STATE_REBALANCE;
1873 		if (!bn->slot[split])
1874 			split--;
1875 		return split;
1876 	}
1877 
1878 	/*
1879 	 * Although extremely rare, it is possible to enter what is known as the 3-way
1880 	 * split scenario.  The 3-way split comes about by means of a store of a range
1881 	 * that overwrites the end and beginning of two full nodes.  The result is a set
1882 	 * of entries that cannot be stored in 2 nodes.  Sometimes, these two nodes can
1883 	 * also be located in different parent nodes which are also full.  This can
1884 	 * carry upwards all the way to the root in the worst case.
1885 	 */
1886 	if (unlikely(mab_middle_node(bn, split, slot_count))) {
1887 		split = b_end / 3;
1888 		*mid_split = split * 2;
1889 	} else {
1890 		slot_min = mt_min_slots[bn->type];
1891 
1892 		*mid_split = 0;
1893 		/*
1894 		 * Avoid having a range less than the slot count unless it
1895 		 * causes one node to be deficient.
1896 		 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1897 		 */
1898 		while ((split < slot_count - 1) &&
1899 		       ((bn->pivot[split] - min) < slot_count - 1) &&
1900 		       (b_end - split > slot_min))
1901 			split++;
1902 	}
1903 
1904 	/* Avoid ending a node on a NULL entry */
1905 	split = mab_no_null_split(bn, split, slot_count);
1906 
1907 	if (unlikely(*mid_split))
1908 		*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1909 
1910 	return split;
1911 }
1912 
1913 /*
1914  * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1915  * and set @b_node->b_end to the next free slot.
1916  * @mas: The maple state
1917  * @mas_start: The starting slot to copy
1918  * @mas_end: The end slot to copy (inclusively)
1919  * @b_node: The maple_big_node to place the data
1920  * @mab_start: The starting location in maple_big_node to store the data.
1921  */
1922 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1923 			unsigned char mas_end, struct maple_big_node *b_node,
1924 			unsigned char mab_start)
1925 {
1926 	enum maple_type mt;
1927 	struct maple_node *node;
1928 	void __rcu **slots;
1929 	unsigned long *pivots, *gaps;
1930 	int i = mas_start, j = mab_start;
1931 	unsigned char piv_end;
1932 
1933 	node = mas_mn(mas);
1934 	mt = mte_node_type(mas->node);
1935 	pivots = ma_pivots(node, mt);
1936 	if (!i) {
1937 		b_node->pivot[j] = pivots[i++];
1938 		if (unlikely(i > mas_end))
1939 			goto complete;
1940 		j++;
1941 	}
1942 
1943 	piv_end = min(mas_end, mt_pivots[mt]);
1944 	for (; i < piv_end; i++, j++) {
1945 		b_node->pivot[j] = pivots[i];
1946 		if (unlikely(!b_node->pivot[j]))
1947 			break;
1948 
1949 		if (unlikely(mas->max == b_node->pivot[j]))
1950 			goto complete;
1951 	}
1952 
1953 	if (likely(i <= mas_end))
1954 		b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1955 
1956 complete:
1957 	b_node->b_end = ++j;
1958 	j -= mab_start;
1959 	slots = ma_slots(node, mt);
1960 	memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1961 	if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1962 		gaps = ma_gaps(node, mt);
1963 		memcpy(b_node->gap + mab_start, gaps + mas_start,
1964 		       sizeof(unsigned long) * j);
1965 	}
1966 }
1967 
1968 /*
1969  * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1970  * @node: The maple node
1971  * @mt: The maple type
1972  * @end: The node end
1973  */
1974 static inline void mas_leaf_set_meta(struct maple_node *node,
1975 		enum maple_type mt, unsigned char end)
1976 {
1977 	if (end < mt_slots[mt] - 1)
1978 		ma_set_meta(node, mt, 0, end);
1979 }
1980 
1981 /*
1982  * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1983  * @b_node: the maple_big_node that has the data
1984  * @mab_start: the start location in @b_node.
1985  * @mab_end: The end location in @b_node (inclusively)
1986  * @mas: The maple state with the maple encoded node.
1987  */
1988 static inline void mab_mas_cp(struct maple_big_node *b_node,
1989 			      unsigned char mab_start, unsigned char mab_end,
1990 			      struct ma_state *mas, bool new_max)
1991 {
1992 	int i, j = 0;
1993 	enum maple_type mt = mte_node_type(mas->node);
1994 	struct maple_node *node = mte_to_node(mas->node);
1995 	void __rcu **slots = ma_slots(node, mt);
1996 	unsigned long *pivots = ma_pivots(node, mt);
1997 	unsigned long *gaps = NULL;
1998 	unsigned char end;
1999 
2000 	if (mab_end - mab_start > mt_pivots[mt])
2001 		mab_end--;
2002 
2003 	if (!pivots[mt_pivots[mt] - 1])
2004 		slots[mt_pivots[mt]] = NULL;
2005 
2006 	i = mab_start;
2007 	do {
2008 		pivots[j++] = b_node->pivot[i++];
2009 	} while (i <= mab_end && likely(b_node->pivot[i]));
2010 
2011 	memcpy(slots, b_node->slot + mab_start,
2012 	       sizeof(void *) * (i - mab_start));
2013 
2014 	if (new_max)
2015 		mas->max = b_node->pivot[i - 1];
2016 
2017 	end = j - 1;
2018 	if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2019 		unsigned long max_gap = 0;
2020 		unsigned char offset = 0;
2021 
2022 		gaps = ma_gaps(node, mt);
2023 		do {
2024 			gaps[--j] = b_node->gap[--i];
2025 			if (gaps[j] > max_gap) {
2026 				offset = j;
2027 				max_gap = gaps[j];
2028 			}
2029 		} while (j);
2030 
2031 		ma_set_meta(node, mt, offset, end);
2032 	} else {
2033 		mas_leaf_set_meta(node, mt, end);
2034 	}
2035 }
2036 
2037 /*
2038  * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2039  * @mas: The maple state
2040  * @end: The maple node end
2041  * @mt: The maple node type
2042  */
2043 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2044 				      enum maple_type mt)
2045 {
2046 	if (!(mas->mas_flags & MA_STATE_BULK))
2047 		return;
2048 
2049 	if (mte_is_root(mas->node))
2050 		return;
2051 
2052 	if (end > mt_min_slots[mt]) {
2053 		mas->mas_flags &= ~MA_STATE_REBALANCE;
2054 		return;
2055 	}
2056 }
2057 
2058 /*
2059  * mas_store_b_node() - Store an @entry into the b_node while also copying the
2060  * data from a maple encoded node.
2061  * @wr_mas: the maple write state
2062  * @b_node: the maple_big_node to fill with data
2063  * @offset_end: the offset to end copying
2064  *
2065  * Return: The actual end of the data stored in @b_node
2066  */
2067 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2068 		struct maple_big_node *b_node, unsigned char offset_end)
2069 {
2070 	unsigned char slot;
2071 	unsigned char b_end;
2072 	/* Possible underflow of piv will wrap back to 0 before use. */
2073 	unsigned long piv;
2074 	struct ma_state *mas = wr_mas->mas;
2075 
2076 	b_node->type = wr_mas->type;
2077 	b_end = 0;
2078 	slot = mas->offset;
2079 	if (slot) {
2080 		/* Copy start data up to insert. */
2081 		mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2082 		b_end = b_node->b_end;
2083 		piv = b_node->pivot[b_end - 1];
2084 	} else
2085 		piv = mas->min - 1;
2086 
2087 	if (piv + 1 < mas->index) {
2088 		/* Handle range starting after old range */
2089 		b_node->slot[b_end] = wr_mas->content;
2090 		if (!wr_mas->content)
2091 			b_node->gap[b_end] = mas->index - 1 - piv;
2092 		b_node->pivot[b_end++] = mas->index - 1;
2093 	}
2094 
2095 	/* Store the new entry. */
2096 	mas->offset = b_end;
2097 	b_node->slot[b_end] = wr_mas->entry;
2098 	b_node->pivot[b_end] = mas->last;
2099 
2100 	/* Appended. */
2101 	if (mas->last >= mas->max)
2102 		goto b_end;
2103 
2104 	/* Handle new range ending before old range ends */
2105 	piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2106 	if (piv > mas->last) {
2107 		if (piv == ULONG_MAX)
2108 			mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2109 
2110 		if (offset_end != slot)
2111 			wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2112 							  offset_end);
2113 
2114 		b_node->slot[++b_end] = wr_mas->content;
2115 		if (!wr_mas->content)
2116 			b_node->gap[b_end] = piv - mas->last + 1;
2117 		b_node->pivot[b_end] = piv;
2118 	}
2119 
2120 	slot = offset_end + 1;
2121 	if (slot > mas->end)
2122 		goto b_end;
2123 
2124 	/* Copy end data to the end of the node. */
2125 	mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
2126 	b_node->b_end--;
2127 	return;
2128 
2129 b_end:
2130 	b_node->b_end = b_end;
2131 }
2132 
2133 /*
2134  * mas_prev_sibling() - Find the previous node with the same parent.
2135  * @mas: the maple state
2136  *
2137  * Return: True if there is a previous sibling, false otherwise.
2138  */
2139 static inline bool mas_prev_sibling(struct ma_state *mas)
2140 {
2141 	unsigned int p_slot = mte_parent_slot(mas->node);
2142 
2143 	if (mte_is_root(mas->node))
2144 		return false;
2145 
2146 	if (!p_slot)
2147 		return false;
2148 
2149 	mas_ascend(mas);
2150 	mas->offset = p_slot - 1;
2151 	mas_descend(mas);
2152 	return true;
2153 }
2154 
2155 /*
2156  * mas_next_sibling() - Find the next node with the same parent.
2157  * @mas: the maple state
2158  *
2159  * Return: true if there is a next sibling, false otherwise.
2160  */
2161 static inline bool mas_next_sibling(struct ma_state *mas)
2162 {
2163 	MA_STATE(parent, mas->tree, mas->index, mas->last);
2164 
2165 	if (mte_is_root(mas->node))
2166 		return false;
2167 
2168 	parent = *mas;
2169 	mas_ascend(&parent);
2170 	parent.offset = mte_parent_slot(mas->node) + 1;
2171 	if (parent.offset > mas_data_end(&parent))
2172 		return false;
2173 
2174 	*mas = parent;
2175 	mas_descend(mas);
2176 	return true;
2177 }
2178 
2179 /*
2180  * mte_node_or_none() - Set the enode and state.
2181  * @enode: The encoded maple node.
2182  *
2183  * Set the node to the enode and the status.
2184  */
2185 static inline void mas_node_or_none(struct ma_state *mas,
2186 		struct maple_enode *enode)
2187 {
2188 	if (enode) {
2189 		mas->node = enode;
2190 		mas->status = ma_active;
2191 	} else {
2192 		mas->node = NULL;
2193 		mas->status = ma_none;
2194 	}
2195 }
2196 
2197 /*
2198  * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2199  * @wr_mas: The maple write state
2200  *
2201  * Uses mas_slot_locked() and does not need to worry about dead nodes.
2202  */
2203 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2204 {
2205 	struct ma_state *mas = wr_mas->mas;
2206 	unsigned char count, offset;
2207 
2208 	if (unlikely(ma_is_dense(wr_mas->type))) {
2209 		wr_mas->r_max = wr_mas->r_min = mas->index;
2210 		mas->offset = mas->index = mas->min;
2211 		return;
2212 	}
2213 
2214 	wr_mas->node = mas_mn(wr_mas->mas);
2215 	wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2216 	count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
2217 				       wr_mas->pivots, mas->max);
2218 	offset = mas->offset;
2219 
2220 	while (offset < count && mas->index > wr_mas->pivots[offset])
2221 		offset++;
2222 
2223 	wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2224 	wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2225 	wr_mas->offset_end = mas->offset = offset;
2226 }
2227 
2228 /*
2229  * mast_rebalance_next() - Rebalance against the next node
2230  * @mast: The maple subtree state
2231  * @old_r: The encoded maple node to the right (next node).
2232  */
2233 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2234 {
2235 	unsigned char b_end = mast->bn->b_end;
2236 
2237 	mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2238 		   mast->bn, b_end);
2239 	mast->orig_r->last = mast->orig_r->max;
2240 }
2241 
2242 /*
2243  * mast_rebalance_prev() - Rebalance against the previous node
2244  * @mast: The maple subtree state
2245  * @old_l: The encoded maple node to the left (previous node)
2246  */
2247 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2248 {
2249 	unsigned char end = mas_data_end(mast->orig_l) + 1;
2250 	unsigned char b_end = mast->bn->b_end;
2251 
2252 	mab_shift_right(mast->bn, end);
2253 	mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2254 	mast->l->min = mast->orig_l->min;
2255 	mast->orig_l->index = mast->orig_l->min;
2256 	mast->bn->b_end = end + b_end;
2257 	mast->l->offset += end;
2258 }
2259 
2260 /*
2261  * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2262  * the node to the right.  Checking the nodes to the right then the left at each
2263  * level upwards until root is reached.
2264  * Data is copied into the @mast->bn.
2265  * @mast: The maple_subtree_state.
2266  */
2267 static inline
2268 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2269 {
2270 	struct ma_state r_tmp = *mast->orig_r;
2271 	struct ma_state l_tmp = *mast->orig_l;
2272 	unsigned char depth = 0;
2273 
2274 	r_tmp = *mast->orig_r;
2275 	l_tmp = *mast->orig_l;
2276 	do {
2277 		mas_ascend(mast->orig_r);
2278 		mas_ascend(mast->orig_l);
2279 		depth++;
2280 		if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2281 			mast->orig_r->offset++;
2282 			do {
2283 				mas_descend(mast->orig_r);
2284 				mast->orig_r->offset = 0;
2285 			} while (--depth);
2286 
2287 			mast_rebalance_next(mast);
2288 			*mast->orig_l = l_tmp;
2289 			return true;
2290 		} else if (mast->orig_l->offset != 0) {
2291 			mast->orig_l->offset--;
2292 			do {
2293 				mas_descend(mast->orig_l);
2294 				mast->orig_l->offset =
2295 					mas_data_end(mast->orig_l);
2296 			} while (--depth);
2297 
2298 			mast_rebalance_prev(mast);
2299 			*mast->orig_r = r_tmp;
2300 			return true;
2301 		}
2302 	} while (!mte_is_root(mast->orig_r->node));
2303 
2304 	*mast->orig_r = r_tmp;
2305 	*mast->orig_l = l_tmp;
2306 	return false;
2307 }
2308 
2309 /*
2310  * mast_ascend() - Ascend the original left and right maple states.
2311  * @mast: the maple subtree state.
2312  *
2313  * Ascend the original left and right sides.  Set the offsets to point to the
2314  * data already in the new tree (@mast->l and @mast->r).
2315  */
2316 static inline void mast_ascend(struct maple_subtree_state *mast)
2317 {
2318 	MA_WR_STATE(wr_mas, mast->orig_r,  NULL);
2319 	mas_ascend(mast->orig_l);
2320 	mas_ascend(mast->orig_r);
2321 
2322 	mast->orig_r->offset = 0;
2323 	mast->orig_r->index = mast->r->max;
2324 	/* last should be larger than or equal to index */
2325 	if (mast->orig_r->last < mast->orig_r->index)
2326 		mast->orig_r->last = mast->orig_r->index;
2327 
2328 	wr_mas.type = mte_node_type(mast->orig_r->node);
2329 	mas_wr_node_walk(&wr_mas);
2330 	/* Set up the left side of things */
2331 	mast->orig_l->offset = 0;
2332 	mast->orig_l->index = mast->l->min;
2333 	wr_mas.mas = mast->orig_l;
2334 	wr_mas.type = mte_node_type(mast->orig_l->node);
2335 	mas_wr_node_walk(&wr_mas);
2336 
2337 	mast->bn->type = wr_mas.type;
2338 }
2339 
2340 /*
2341  * mas_new_ma_node() - Create and return a new maple node.  Helper function.
2342  * @mas: the maple state with the allocations.
2343  * @b_node: the maple_big_node with the type encoding.
2344  *
2345  * Use the node type from the maple_big_node to allocate a new node from the
2346  * ma_state.  This function exists mainly for code readability.
2347  *
2348  * Return: A new maple encoded node
2349  */
2350 static inline struct maple_enode
2351 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2352 {
2353 	return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2354 }
2355 
2356 /*
2357  * mas_mab_to_node() - Set up right and middle nodes
2358  *
2359  * @mas: the maple state that contains the allocations.
2360  * @b_node: the node which contains the data.
2361  * @left: The pointer which will have the left node
2362  * @right: The pointer which may have the right node
2363  * @middle: the pointer which may have the middle node (rare)
2364  * @mid_split: the split location for the middle node
2365  *
2366  * Return: the split of left.
2367  */
2368 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2369 	struct maple_big_node *b_node, struct maple_enode **left,
2370 	struct maple_enode **right, struct maple_enode **middle,
2371 	unsigned char *mid_split, unsigned long min)
2372 {
2373 	unsigned char split = 0;
2374 	unsigned char slot_count = mt_slots[b_node->type];
2375 
2376 	*left = mas_new_ma_node(mas, b_node);
2377 	*right = NULL;
2378 	*middle = NULL;
2379 	*mid_split = 0;
2380 
2381 	if (b_node->b_end < slot_count) {
2382 		split = b_node->b_end;
2383 	} else {
2384 		split = mab_calc_split(mas, b_node, mid_split, min);
2385 		*right = mas_new_ma_node(mas, b_node);
2386 	}
2387 
2388 	if (*mid_split)
2389 		*middle = mas_new_ma_node(mas, b_node);
2390 
2391 	return split;
2392 
2393 }
2394 
2395 /*
2396  * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2397  * pointer.
2398  * @b_node - the big node to add the entry
2399  * @mas - the maple state to get the pivot (mas->max)
2400  * @entry - the entry to add, if NULL nothing happens.
2401  */
2402 static inline void mab_set_b_end(struct maple_big_node *b_node,
2403 				 struct ma_state *mas,
2404 				 void *entry)
2405 {
2406 	if (!entry)
2407 		return;
2408 
2409 	b_node->slot[b_node->b_end] = entry;
2410 	if (mt_is_alloc(mas->tree))
2411 		b_node->gap[b_node->b_end] = mas_max_gap(mas);
2412 	b_node->pivot[b_node->b_end++] = mas->max;
2413 }
2414 
2415 /*
2416  * mas_set_split_parent() - combine_then_separate helper function.  Sets the parent
2417  * of @mas->node to either @left or @right, depending on @slot and @split
2418  *
2419  * @mas - the maple state with the node that needs a parent
2420  * @left - possible parent 1
2421  * @right - possible parent 2
2422  * @slot - the slot the mas->node was placed
2423  * @split - the split location between @left and @right
2424  */
2425 static inline void mas_set_split_parent(struct ma_state *mas,
2426 					struct maple_enode *left,
2427 					struct maple_enode *right,
2428 					unsigned char *slot, unsigned char split)
2429 {
2430 	if (mas_is_none(mas))
2431 		return;
2432 
2433 	if ((*slot) <= split)
2434 		mas_set_parent(mas, mas->node, left, *slot);
2435 	else if (right)
2436 		mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2437 
2438 	(*slot)++;
2439 }
2440 
2441 /*
2442  * mte_mid_split_check() - Check if the next node passes the mid-split
2443  * @**l: Pointer to left encoded maple node.
2444  * @**m: Pointer to middle encoded maple node.
2445  * @**r: Pointer to right encoded maple node.
2446  * @slot: The offset
2447  * @*split: The split location.
2448  * @mid_split: The middle split.
2449  */
2450 static inline void mte_mid_split_check(struct maple_enode **l,
2451 				       struct maple_enode **r,
2452 				       struct maple_enode *right,
2453 				       unsigned char slot,
2454 				       unsigned char *split,
2455 				       unsigned char mid_split)
2456 {
2457 	if (*r == right)
2458 		return;
2459 
2460 	if (slot < mid_split)
2461 		return;
2462 
2463 	*l = *r;
2464 	*r = right;
2465 	*split = mid_split;
2466 }
2467 
2468 /*
2469  * mast_set_split_parents() - Helper function to set three nodes parents.  Slot
2470  * is taken from @mast->l.
2471  * @mast - the maple subtree state
2472  * @left - the left node
2473  * @right - the right node
2474  * @split - the split location.
2475  */
2476 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2477 					  struct maple_enode *left,
2478 					  struct maple_enode *middle,
2479 					  struct maple_enode *right,
2480 					  unsigned char split,
2481 					  unsigned char mid_split)
2482 {
2483 	unsigned char slot;
2484 	struct maple_enode *l = left;
2485 	struct maple_enode *r = right;
2486 
2487 	if (mas_is_none(mast->l))
2488 		return;
2489 
2490 	if (middle)
2491 		r = middle;
2492 
2493 	slot = mast->l->offset;
2494 
2495 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2496 	mas_set_split_parent(mast->l, l, r, &slot, split);
2497 
2498 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2499 	mas_set_split_parent(mast->m, l, r, &slot, split);
2500 
2501 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2502 	mas_set_split_parent(mast->r, l, r, &slot, split);
2503 }
2504 
2505 /*
2506  * mas_topiary_node() - Dispose of a single node
2507  * @mas: The maple state for pushing nodes
2508  * @enode: The encoded maple node
2509  * @in_rcu: If the tree is in rcu mode
2510  *
2511  * The node will either be RCU freed or pushed back on the maple state.
2512  */
2513 static inline void mas_topiary_node(struct ma_state *mas,
2514 		struct ma_state *tmp_mas, bool in_rcu)
2515 {
2516 	struct maple_node *tmp;
2517 	struct maple_enode *enode;
2518 
2519 	if (mas_is_none(tmp_mas))
2520 		return;
2521 
2522 	enode = tmp_mas->node;
2523 	tmp = mte_to_node(enode);
2524 	mte_set_node_dead(enode);
2525 	if (in_rcu)
2526 		ma_free_rcu(tmp);
2527 	else
2528 		mas_push_node(mas, tmp);
2529 }
2530 
2531 /*
2532  * mas_topiary_replace() - Replace the data with new data, then repair the
2533  * parent links within the new tree.  Iterate over the dead sub-tree and collect
2534  * the dead subtrees and topiary the nodes that are no longer of use.
2535  *
2536  * The new tree will have up to three children with the correct parent.  Keep
2537  * track of the new entries as they need to be followed to find the next level
2538  * of new entries.
2539  *
2540  * The old tree will have up to three children with the old parent.  Keep track
2541  * of the old entries as they may have more nodes below replaced.  Nodes within
2542  * [index, last] are dead subtrees, others need to be freed and followed.
2543  *
2544  * @mas: The maple state pointing at the new data
2545  * @old_enode: The maple encoded node being replaced
2546  *
2547  */
2548 static inline void mas_topiary_replace(struct ma_state *mas,
2549 		struct maple_enode *old_enode)
2550 {
2551 	struct ma_state tmp[3], tmp_next[3];
2552 	MA_TOPIARY(subtrees, mas->tree);
2553 	bool in_rcu;
2554 	int i, n;
2555 
2556 	/* Place data in tree & then mark node as old */
2557 	mas_put_in_tree(mas, old_enode);
2558 
2559 	/* Update the parent pointers in the tree */
2560 	tmp[0] = *mas;
2561 	tmp[0].offset = 0;
2562 	tmp[1].status = ma_none;
2563 	tmp[2].status = ma_none;
2564 	while (!mte_is_leaf(tmp[0].node)) {
2565 		n = 0;
2566 		for (i = 0; i < 3; i++) {
2567 			if (mas_is_none(&tmp[i]))
2568 				continue;
2569 
2570 			while (n < 3) {
2571 				if (!mas_find_child(&tmp[i], &tmp_next[n]))
2572 					break;
2573 				n++;
2574 			}
2575 
2576 			mas_adopt_children(&tmp[i], tmp[i].node);
2577 		}
2578 
2579 		if (MAS_WARN_ON(mas, n == 0))
2580 			break;
2581 
2582 		while (n < 3)
2583 			tmp_next[n++].status = ma_none;
2584 
2585 		for (i = 0; i < 3; i++)
2586 			tmp[i] = tmp_next[i];
2587 	}
2588 
2589 	/* Collect the old nodes that need to be discarded */
2590 	if (mte_is_leaf(old_enode))
2591 		return mas_free(mas, old_enode);
2592 
2593 	tmp[0] = *mas;
2594 	tmp[0].offset = 0;
2595 	tmp[0].node = old_enode;
2596 	tmp[1].status = ma_none;
2597 	tmp[2].status = ma_none;
2598 	in_rcu = mt_in_rcu(mas->tree);
2599 	do {
2600 		n = 0;
2601 		for (i = 0; i < 3; i++) {
2602 			if (mas_is_none(&tmp[i]))
2603 				continue;
2604 
2605 			while (n < 3) {
2606 				if (!mas_find_child(&tmp[i], &tmp_next[n]))
2607 					break;
2608 
2609 				if ((tmp_next[n].min >= tmp_next->index) &&
2610 				    (tmp_next[n].max <= tmp_next->last)) {
2611 					mat_add(&subtrees, tmp_next[n].node);
2612 					tmp_next[n].status = ma_none;
2613 				} else {
2614 					n++;
2615 				}
2616 			}
2617 		}
2618 
2619 		if (MAS_WARN_ON(mas, n == 0))
2620 			break;
2621 
2622 		while (n < 3)
2623 			tmp_next[n++].status = ma_none;
2624 
2625 		for (i = 0; i < 3; i++) {
2626 			mas_topiary_node(mas, &tmp[i], in_rcu);
2627 			tmp[i] = tmp_next[i];
2628 		}
2629 	} while (!mte_is_leaf(tmp[0].node));
2630 
2631 	for (i = 0; i < 3; i++)
2632 		mas_topiary_node(mas, &tmp[i], in_rcu);
2633 
2634 	mas_mat_destroy(mas, &subtrees);
2635 }
2636 
2637 /*
2638  * mas_wmb_replace() - Write memory barrier and replace
2639  * @mas: The maple state
2640  * @old: The old maple encoded node that is being replaced.
2641  *
2642  * Updates gap as necessary.
2643  */
2644 static inline void mas_wmb_replace(struct ma_state *mas,
2645 		struct maple_enode *old_enode)
2646 {
2647 	/* Insert the new data in the tree */
2648 	mas_topiary_replace(mas, old_enode);
2649 
2650 	if (mte_is_leaf(mas->node))
2651 		return;
2652 
2653 	mas_update_gap(mas);
2654 }
2655 
2656 /*
2657  * mast_cp_to_nodes() - Copy data out to nodes.
2658  * @mast: The maple subtree state
2659  * @left: The left encoded maple node
2660  * @middle: The middle encoded maple node
2661  * @right: The right encoded maple node
2662  * @split: The location to split between left and (middle ? middle : right)
2663  * @mid_split: The location to split between middle and right.
2664  */
2665 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2666 	struct maple_enode *left, struct maple_enode *middle,
2667 	struct maple_enode *right, unsigned char split, unsigned char mid_split)
2668 {
2669 	bool new_lmax = true;
2670 
2671 	mas_node_or_none(mast->l, left);
2672 	mas_node_or_none(mast->m, middle);
2673 	mas_node_or_none(mast->r, right);
2674 
2675 	mast->l->min = mast->orig_l->min;
2676 	if (split == mast->bn->b_end) {
2677 		mast->l->max = mast->orig_r->max;
2678 		new_lmax = false;
2679 	}
2680 
2681 	mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2682 
2683 	if (middle) {
2684 		mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2685 		mast->m->min = mast->bn->pivot[split] + 1;
2686 		split = mid_split;
2687 	}
2688 
2689 	mast->r->max = mast->orig_r->max;
2690 	if (right) {
2691 		mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2692 		mast->r->min = mast->bn->pivot[split] + 1;
2693 	}
2694 }
2695 
2696 /*
2697  * mast_combine_cp_left - Copy in the original left side of the tree into the
2698  * combined data set in the maple subtree state big node.
2699  * @mast: The maple subtree state
2700  */
2701 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2702 {
2703 	unsigned char l_slot = mast->orig_l->offset;
2704 
2705 	if (!l_slot)
2706 		return;
2707 
2708 	mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2709 }
2710 
2711 /*
2712  * mast_combine_cp_right: Copy in the original right side of the tree into the
2713  * combined data set in the maple subtree state big node.
2714  * @mast: The maple subtree state
2715  */
2716 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2717 {
2718 	if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2719 		return;
2720 
2721 	mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2722 		   mt_slot_count(mast->orig_r->node), mast->bn,
2723 		   mast->bn->b_end);
2724 	mast->orig_r->last = mast->orig_r->max;
2725 }
2726 
2727 /*
2728  * mast_sufficient: Check if the maple subtree state has enough data in the big
2729  * node to create at least one sufficient node
2730  * @mast: the maple subtree state
2731  */
2732 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2733 {
2734 	if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2735 		return true;
2736 
2737 	return false;
2738 }
2739 
2740 /*
2741  * mast_overflow: Check if there is too much data in the subtree state for a
2742  * single node.
2743  * @mast: The maple subtree state
2744  */
2745 static inline bool mast_overflow(struct maple_subtree_state *mast)
2746 {
2747 	if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2748 		return true;
2749 
2750 	return false;
2751 }
2752 
2753 static inline void *mtree_range_walk(struct ma_state *mas)
2754 {
2755 	unsigned long *pivots;
2756 	unsigned char offset;
2757 	struct maple_node *node;
2758 	struct maple_enode *next, *last;
2759 	enum maple_type type;
2760 	void __rcu **slots;
2761 	unsigned char end;
2762 	unsigned long max, min;
2763 	unsigned long prev_max, prev_min;
2764 
2765 	next = mas->node;
2766 	min = mas->min;
2767 	max = mas->max;
2768 	do {
2769 		last = next;
2770 		node = mte_to_node(next);
2771 		type = mte_node_type(next);
2772 		pivots = ma_pivots(node, type);
2773 		end = ma_data_end(node, type, pivots, max);
2774 		prev_min = min;
2775 		prev_max = max;
2776 		if (pivots[0] >= mas->index) {
2777 			offset = 0;
2778 			max = pivots[0];
2779 			goto next;
2780 		}
2781 
2782 		offset = 1;
2783 		while (offset < end) {
2784 			if (pivots[offset] >= mas->index) {
2785 				max = pivots[offset];
2786 				break;
2787 			}
2788 			offset++;
2789 		}
2790 
2791 		min = pivots[offset - 1] + 1;
2792 next:
2793 		slots = ma_slots(node, type);
2794 		next = mt_slot(mas->tree, slots, offset);
2795 		if (unlikely(ma_dead_node(node)))
2796 			goto dead_node;
2797 	} while (!ma_is_leaf(type));
2798 
2799 	mas->end = end;
2800 	mas->offset = offset;
2801 	mas->index = min;
2802 	mas->last = max;
2803 	mas->min = prev_min;
2804 	mas->max = prev_max;
2805 	mas->node = last;
2806 	return (void *)next;
2807 
2808 dead_node:
2809 	mas_reset(mas);
2810 	return NULL;
2811 }
2812 
2813 /*
2814  * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2815  * @mas: The starting maple state
2816  * @mast: The maple_subtree_state, keeps track of 4 maple states.
2817  * @count: The estimated count of iterations needed.
2818  *
2819  * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2820  * is hit.  First @b_node is split into two entries which are inserted into the
2821  * next iteration of the loop.  @b_node is returned populated with the final
2822  * iteration. @mas is used to obtain allocations.  orig_l_mas keeps track of the
2823  * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2824  * to account of what has been copied into the new sub-tree.  The update of
2825  * orig_l_mas->last is used in mas_consume to find the slots that will need to
2826  * be either freed or destroyed.  orig_l_mas->depth keeps track of the height of
2827  * the new sub-tree in case the sub-tree becomes the full tree.
2828  *
2829  * Return: the number of elements in b_node during the last loop.
2830  */
2831 static int mas_spanning_rebalance(struct ma_state *mas,
2832 		struct maple_subtree_state *mast, unsigned char count)
2833 {
2834 	unsigned char split, mid_split;
2835 	unsigned char slot = 0;
2836 	struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2837 	struct maple_enode *old_enode;
2838 
2839 	MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2840 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2841 	MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2842 
2843 	/*
2844 	 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2845 	 * Rebalancing is done by use of the ``struct maple_topiary``.
2846 	 */
2847 	mast->l = &l_mas;
2848 	mast->m = &m_mas;
2849 	mast->r = &r_mas;
2850 	l_mas.status = r_mas.status = m_mas.status = ma_none;
2851 
2852 	/* Check if this is not root and has sufficient data.  */
2853 	if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
2854 	    unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
2855 		mast_spanning_rebalance(mast);
2856 
2857 	l_mas.depth = 0;
2858 
2859 	/*
2860 	 * Each level of the tree is examined and balanced, pushing data to the left or
2861 	 * right, or rebalancing against left or right nodes is employed to avoid
2862 	 * rippling up the tree to limit the amount of churn.  Once a new sub-section of
2863 	 * the tree is created, there may be a mix of new and old nodes.  The old nodes
2864 	 * will have the incorrect parent pointers and currently be in two trees: the
2865 	 * original tree and the partially new tree.  To remedy the parent pointers in
2866 	 * the old tree, the new data is swapped into the active tree and a walk down
2867 	 * the tree is performed and the parent pointers are updated.
2868 	 * See mas_topiary_replace() for more information.
2869 	 */
2870 	while (count--) {
2871 		mast->bn->b_end--;
2872 		mast->bn->type = mte_node_type(mast->orig_l->node);
2873 		split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
2874 					&mid_split, mast->orig_l->min);
2875 		mast_set_split_parents(mast, left, middle, right, split,
2876 				       mid_split);
2877 		mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
2878 
2879 		/*
2880 		 * Copy data from next level in the tree to mast->bn from next
2881 		 * iteration
2882 		 */
2883 		memset(mast->bn, 0, sizeof(struct maple_big_node));
2884 		mast->bn->type = mte_node_type(left);
2885 		l_mas.depth++;
2886 
2887 		/* Root already stored in l->node. */
2888 		if (mas_is_root_limits(mast->l))
2889 			goto new_root;
2890 
2891 		mast_ascend(mast);
2892 		mast_combine_cp_left(mast);
2893 		l_mas.offset = mast->bn->b_end;
2894 		mab_set_b_end(mast->bn, &l_mas, left);
2895 		mab_set_b_end(mast->bn, &m_mas, middle);
2896 		mab_set_b_end(mast->bn, &r_mas, right);
2897 
2898 		/* Copy anything necessary out of the right node. */
2899 		mast_combine_cp_right(mast);
2900 		mast->orig_l->last = mast->orig_l->max;
2901 
2902 		if (mast_sufficient(mast))
2903 			continue;
2904 
2905 		if (mast_overflow(mast))
2906 			continue;
2907 
2908 		/* May be a new root stored in mast->bn */
2909 		if (mas_is_root_limits(mast->orig_l))
2910 			break;
2911 
2912 		mast_spanning_rebalance(mast);
2913 
2914 		/* rebalancing from other nodes may require another loop. */
2915 		if (!count)
2916 			count++;
2917 	}
2918 
2919 	l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
2920 				mte_node_type(mast->orig_l->node));
2921 	l_mas.depth++;
2922 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
2923 	mas_set_parent(mas, left, l_mas.node, slot);
2924 	if (middle)
2925 		mas_set_parent(mas, middle, l_mas.node, ++slot);
2926 
2927 	if (right)
2928 		mas_set_parent(mas, right, l_mas.node, ++slot);
2929 
2930 	if (mas_is_root_limits(mast->l)) {
2931 new_root:
2932 		mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
2933 		while (!mte_is_root(mast->orig_l->node))
2934 			mast_ascend(mast);
2935 	} else {
2936 		mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
2937 	}
2938 
2939 	old_enode = mast->orig_l->node;
2940 	mas->depth = l_mas.depth;
2941 	mas->node = l_mas.node;
2942 	mas->min = l_mas.min;
2943 	mas->max = l_mas.max;
2944 	mas->offset = l_mas.offset;
2945 	mas_wmb_replace(mas, old_enode);
2946 	mtree_range_walk(mas);
2947 	return mast->bn->b_end;
2948 }
2949 
2950 /*
2951  * mas_rebalance() - Rebalance a given node.
2952  * @mas: The maple state
2953  * @b_node: The big maple node.
2954  *
2955  * Rebalance two nodes into a single node or two new nodes that are sufficient.
2956  * Continue upwards until tree is sufficient.
2957  *
2958  * Return: the number of elements in b_node during the last loop.
2959  */
2960 static inline int mas_rebalance(struct ma_state *mas,
2961 				struct maple_big_node *b_node)
2962 {
2963 	char empty_count = mas_mt_height(mas);
2964 	struct maple_subtree_state mast;
2965 	unsigned char shift, b_end = ++b_node->b_end;
2966 
2967 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
2968 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2969 
2970 	trace_ma_op(__func__, mas);
2971 
2972 	/*
2973 	 * Rebalancing occurs if a node is insufficient.  Data is rebalanced
2974 	 * against the node to the right if it exists, otherwise the node to the
2975 	 * left of this node is rebalanced against this node.  If rebalancing
2976 	 * causes just one node to be produced instead of two, then the parent
2977 	 * is also examined and rebalanced if it is insufficient.  Every level
2978 	 * tries to combine the data in the same way.  If one node contains the
2979 	 * entire range of the tree, then that node is used as a new root node.
2980 	 */
2981 	mas_node_count(mas, empty_count * 2 - 1);
2982 	if (mas_is_err(mas))
2983 		return 0;
2984 
2985 	mast.orig_l = &l_mas;
2986 	mast.orig_r = &r_mas;
2987 	mast.bn = b_node;
2988 	mast.bn->type = mte_node_type(mas->node);
2989 
2990 	l_mas = r_mas = *mas;
2991 
2992 	if (mas_next_sibling(&r_mas)) {
2993 		mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
2994 		r_mas.last = r_mas.index = r_mas.max;
2995 	} else {
2996 		mas_prev_sibling(&l_mas);
2997 		shift = mas_data_end(&l_mas) + 1;
2998 		mab_shift_right(b_node, shift);
2999 		mas->offset += shift;
3000 		mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3001 		b_node->b_end = shift + b_end;
3002 		l_mas.index = l_mas.last = l_mas.min;
3003 	}
3004 
3005 	return mas_spanning_rebalance(mas, &mast, empty_count);
3006 }
3007 
3008 /*
3009  * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3010  * state.
3011  * @mas: The maple state
3012  * @end: The end of the left-most node.
3013  *
3014  * During a mass-insert event (such as forking), it may be necessary to
3015  * rebalance the left-most node when it is not sufficient.
3016  */
3017 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3018 {
3019 	enum maple_type mt = mte_node_type(mas->node);
3020 	struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3021 	struct maple_enode *eparent, *old_eparent;
3022 	unsigned char offset, tmp, split = mt_slots[mt] / 2;
3023 	void __rcu **l_slots, **slots;
3024 	unsigned long *l_pivs, *pivs, gap;
3025 	bool in_rcu = mt_in_rcu(mas->tree);
3026 
3027 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3028 
3029 	l_mas = *mas;
3030 	mas_prev_sibling(&l_mas);
3031 
3032 	/* set up node. */
3033 	if (in_rcu) {
3034 		/* Allocate for both left and right as well as parent. */
3035 		mas_node_count(mas, 3);
3036 		if (mas_is_err(mas))
3037 			return;
3038 
3039 		newnode = mas_pop_node(mas);
3040 	} else {
3041 		newnode = &reuse;
3042 	}
3043 
3044 	node = mas_mn(mas);
3045 	newnode->parent = node->parent;
3046 	slots = ma_slots(newnode, mt);
3047 	pivs = ma_pivots(newnode, mt);
3048 	left = mas_mn(&l_mas);
3049 	l_slots = ma_slots(left, mt);
3050 	l_pivs = ma_pivots(left, mt);
3051 	if (!l_slots[split])
3052 		split++;
3053 	tmp = mas_data_end(&l_mas) - split;
3054 
3055 	memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3056 	memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3057 	pivs[tmp] = l_mas.max;
3058 	memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3059 	memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3060 
3061 	l_mas.max = l_pivs[split];
3062 	mas->min = l_mas.max + 1;
3063 	old_eparent = mt_mk_node(mte_parent(l_mas.node),
3064 			     mas_parent_type(&l_mas, l_mas.node));
3065 	tmp += end;
3066 	if (!in_rcu) {
3067 		unsigned char max_p = mt_pivots[mt];
3068 		unsigned char max_s = mt_slots[mt];
3069 
3070 		if (tmp < max_p)
3071 			memset(pivs + tmp, 0,
3072 			       sizeof(unsigned long) * (max_p - tmp));
3073 
3074 		if (tmp < mt_slots[mt])
3075 			memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3076 
3077 		memcpy(node, newnode, sizeof(struct maple_node));
3078 		ma_set_meta(node, mt, 0, tmp - 1);
3079 		mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
3080 			      l_pivs[split]);
3081 
3082 		/* Remove data from l_pivs. */
3083 		tmp = split + 1;
3084 		memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3085 		memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3086 		ma_set_meta(left, mt, 0, split);
3087 		eparent = old_eparent;
3088 
3089 		goto done;
3090 	}
3091 
3092 	/* RCU requires replacing both l_mas, mas, and parent. */
3093 	mas->node = mt_mk_node(newnode, mt);
3094 	ma_set_meta(newnode, mt, 0, tmp);
3095 
3096 	new_left = mas_pop_node(mas);
3097 	new_left->parent = left->parent;
3098 	mt = mte_node_type(l_mas.node);
3099 	slots = ma_slots(new_left, mt);
3100 	pivs = ma_pivots(new_left, mt);
3101 	memcpy(slots, l_slots, sizeof(void *) * split);
3102 	memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3103 	ma_set_meta(new_left, mt, 0, split);
3104 	l_mas.node = mt_mk_node(new_left, mt);
3105 
3106 	/* replace parent. */
3107 	offset = mte_parent_slot(mas->node);
3108 	mt = mas_parent_type(&l_mas, l_mas.node);
3109 	parent = mas_pop_node(mas);
3110 	slots = ma_slots(parent, mt);
3111 	pivs = ma_pivots(parent, mt);
3112 	memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
3113 	rcu_assign_pointer(slots[offset], mas->node);
3114 	rcu_assign_pointer(slots[offset - 1], l_mas.node);
3115 	pivs[offset - 1] = l_mas.max;
3116 	eparent = mt_mk_node(parent, mt);
3117 done:
3118 	gap = mas_leaf_max_gap(mas);
3119 	mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3120 	gap = mas_leaf_max_gap(&l_mas);
3121 	mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3122 	mas_ascend(mas);
3123 
3124 	if (in_rcu) {
3125 		mas_replace_node(mas, old_eparent);
3126 		mas_adopt_children(mas, mas->node);
3127 	}
3128 
3129 	mas_update_gap(mas);
3130 }
3131 
3132 /*
3133  * mas_split_final_node() - Split the final node in a subtree operation.
3134  * @mast: the maple subtree state
3135  * @mas: The maple state
3136  * @height: The height of the tree in case it's a new root.
3137  */
3138 static inline void mas_split_final_node(struct maple_subtree_state *mast,
3139 					struct ma_state *mas, int height)
3140 {
3141 	struct maple_enode *ancestor;
3142 
3143 	if (mte_is_root(mas->node)) {
3144 		if (mt_is_alloc(mas->tree))
3145 			mast->bn->type = maple_arange_64;
3146 		else
3147 			mast->bn->type = maple_range_64;
3148 		mas->depth = height;
3149 	}
3150 	/*
3151 	 * Only a single node is used here, could be root.
3152 	 * The Big_node data should just fit in a single node.
3153 	 */
3154 	ancestor = mas_new_ma_node(mas, mast->bn);
3155 	mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3156 	mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3157 	mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3158 
3159 	mast->l->node = ancestor;
3160 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3161 	mas->offset = mast->bn->b_end - 1;
3162 }
3163 
3164 /*
3165  * mast_fill_bnode() - Copy data into the big node in the subtree state
3166  * @mast: The maple subtree state
3167  * @mas: the maple state
3168  * @skip: The number of entries to skip for new nodes insertion.
3169  */
3170 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3171 					 struct ma_state *mas,
3172 					 unsigned char skip)
3173 {
3174 	bool cp = true;
3175 	unsigned char split;
3176 
3177 	memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3178 	memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3179 	memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3180 	mast->bn->b_end = 0;
3181 
3182 	if (mte_is_root(mas->node)) {
3183 		cp = false;
3184 	} else {
3185 		mas_ascend(mas);
3186 		mas->offset = mte_parent_slot(mas->node);
3187 	}
3188 
3189 	if (cp && mast->l->offset)
3190 		mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3191 
3192 	split = mast->bn->b_end;
3193 	mab_set_b_end(mast->bn, mast->l, mast->l->node);
3194 	mast->r->offset = mast->bn->b_end;
3195 	mab_set_b_end(mast->bn, mast->r, mast->r->node);
3196 	if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3197 		cp = false;
3198 
3199 	if (cp)
3200 		mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3201 			   mast->bn, mast->bn->b_end);
3202 
3203 	mast->bn->b_end--;
3204 	mast->bn->type = mte_node_type(mas->node);
3205 }
3206 
3207 /*
3208  * mast_split_data() - Split the data in the subtree state big node into regular
3209  * nodes.
3210  * @mast: The maple subtree state
3211  * @mas: The maple state
3212  * @split: The location to split the big node
3213  */
3214 static inline void mast_split_data(struct maple_subtree_state *mast,
3215 	   struct ma_state *mas, unsigned char split)
3216 {
3217 	unsigned char p_slot;
3218 
3219 	mab_mas_cp(mast->bn, 0, split, mast->l, true);
3220 	mte_set_pivot(mast->r->node, 0, mast->r->max);
3221 	mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3222 	mast->l->offset = mte_parent_slot(mas->node);
3223 	mast->l->max = mast->bn->pivot[split];
3224 	mast->r->min = mast->l->max + 1;
3225 	if (mte_is_leaf(mas->node))
3226 		return;
3227 
3228 	p_slot = mast->orig_l->offset;
3229 	mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3230 			     &p_slot, split);
3231 	mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3232 			     &p_slot, split);
3233 }
3234 
3235 /*
3236  * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3237  * data to the right or left node if there is room.
3238  * @mas: The maple state
3239  * @height: The current height of the maple state
3240  * @mast: The maple subtree state
3241  * @left: Push left or not.
3242  *
3243  * Keeping the height of the tree low means faster lookups.
3244  *
3245  * Return: True if pushed, false otherwise.
3246  */
3247 static inline bool mas_push_data(struct ma_state *mas, int height,
3248 				 struct maple_subtree_state *mast, bool left)
3249 {
3250 	unsigned char slot_total = mast->bn->b_end;
3251 	unsigned char end, space, split;
3252 
3253 	MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3254 	tmp_mas = *mas;
3255 	tmp_mas.depth = mast->l->depth;
3256 
3257 	if (left && !mas_prev_sibling(&tmp_mas))
3258 		return false;
3259 	else if (!left && !mas_next_sibling(&tmp_mas))
3260 		return false;
3261 
3262 	end = mas_data_end(&tmp_mas);
3263 	slot_total += end;
3264 	space = 2 * mt_slot_count(mas->node) - 2;
3265 	/* -2 instead of -1 to ensure there isn't a triple split */
3266 	if (ma_is_leaf(mast->bn->type))
3267 		space--;
3268 
3269 	if (mas->max == ULONG_MAX)
3270 		space--;
3271 
3272 	if (slot_total >= space)
3273 		return false;
3274 
3275 	/* Get the data; Fill mast->bn */
3276 	mast->bn->b_end++;
3277 	if (left) {
3278 		mab_shift_right(mast->bn, end + 1);
3279 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3280 		mast->bn->b_end = slot_total + 1;
3281 	} else {
3282 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3283 	}
3284 
3285 	/* Configure mast for splitting of mast->bn */
3286 	split = mt_slots[mast->bn->type] - 2;
3287 	if (left) {
3288 		/*  Switch mas to prev node  */
3289 		*mas = tmp_mas;
3290 		/* Start using mast->l for the left side. */
3291 		tmp_mas.node = mast->l->node;
3292 		*mast->l = tmp_mas;
3293 	} else {
3294 		tmp_mas.node = mast->r->node;
3295 		*mast->r = tmp_mas;
3296 		split = slot_total - split;
3297 	}
3298 	split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3299 	/* Update parent slot for split calculation. */
3300 	if (left)
3301 		mast->orig_l->offset += end + 1;
3302 
3303 	mast_split_data(mast, mas, split);
3304 	mast_fill_bnode(mast, mas, 2);
3305 	mas_split_final_node(mast, mas, height + 1);
3306 	return true;
3307 }
3308 
3309 /*
3310  * mas_split() - Split data that is too big for one node into two.
3311  * @mas: The maple state
3312  * @b_node: The maple big node
3313  * Return: 1 on success, 0 on failure.
3314  */
3315 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3316 {
3317 	struct maple_subtree_state mast;
3318 	int height = 0;
3319 	unsigned char mid_split, split = 0;
3320 	struct maple_enode *old;
3321 
3322 	/*
3323 	 * Splitting is handled differently from any other B-tree; the Maple
3324 	 * Tree splits upwards.  Splitting up means that the split operation
3325 	 * occurs when the walk of the tree hits the leaves and not on the way
3326 	 * down.  The reason for splitting up is that it is impossible to know
3327 	 * how much space will be needed until the leaf is (or leaves are)
3328 	 * reached.  Since overwriting data is allowed and a range could
3329 	 * overwrite more than one range or result in changing one entry into 3
3330 	 * entries, it is impossible to know if a split is required until the
3331 	 * data is examined.
3332 	 *
3333 	 * Splitting is a balancing act between keeping allocations to a minimum
3334 	 * and avoiding a 'jitter' event where a tree is expanded to make room
3335 	 * for an entry followed by a contraction when the entry is removed.  To
3336 	 * accomplish the balance, there are empty slots remaining in both left
3337 	 * and right nodes after a split.
3338 	 */
3339 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3340 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3341 	MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3342 	MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3343 
3344 	trace_ma_op(__func__, mas);
3345 	mas->depth = mas_mt_height(mas);
3346 	/* Allocation failures will happen early. */
3347 	mas_node_count(mas, 1 + mas->depth * 2);
3348 	if (mas_is_err(mas))
3349 		return 0;
3350 
3351 	mast.l = &l_mas;
3352 	mast.r = &r_mas;
3353 	mast.orig_l = &prev_l_mas;
3354 	mast.orig_r = &prev_r_mas;
3355 	mast.bn = b_node;
3356 
3357 	while (height++ <= mas->depth) {
3358 		if (mt_slots[b_node->type] > b_node->b_end) {
3359 			mas_split_final_node(&mast, mas, height);
3360 			break;
3361 		}
3362 
3363 		l_mas = r_mas = *mas;
3364 		l_mas.node = mas_new_ma_node(mas, b_node);
3365 		r_mas.node = mas_new_ma_node(mas, b_node);
3366 		/*
3367 		 * Another way that 'jitter' is avoided is to terminate a split up early if the
3368 		 * left or right node has space to spare.  This is referred to as "pushing left"
3369 		 * or "pushing right" and is similar to the B* tree, except the nodes left or
3370 		 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3371 		 * is a significant savings.
3372 		 */
3373 		/* Try to push left. */
3374 		if (mas_push_data(mas, height, &mast, true))
3375 			break;
3376 		/* Try to push right. */
3377 		if (mas_push_data(mas, height, &mast, false))
3378 			break;
3379 
3380 		split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3381 		mast_split_data(&mast, mas, split);
3382 		/*
3383 		 * Usually correct, mab_mas_cp in the above call overwrites
3384 		 * r->max.
3385 		 */
3386 		mast.r->max = mas->max;
3387 		mast_fill_bnode(&mast, mas, 1);
3388 		prev_l_mas = *mast.l;
3389 		prev_r_mas = *mast.r;
3390 	}
3391 
3392 	/* Set the original node as dead */
3393 	old = mas->node;
3394 	mas->node = l_mas.node;
3395 	mas_wmb_replace(mas, old);
3396 	mtree_range_walk(mas);
3397 	return 1;
3398 }
3399 
3400 /*
3401  * mas_reuse_node() - Reuse the node to store the data.
3402  * @wr_mas: The maple write state
3403  * @bn: The maple big node
3404  * @end: The end of the data.
3405  *
3406  * Will always return false in RCU mode.
3407  *
3408  * Return: True if node was reused, false otherwise.
3409  */
3410 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3411 			  struct maple_big_node *bn, unsigned char end)
3412 {
3413 	/* Need to be rcu safe. */
3414 	if (mt_in_rcu(wr_mas->mas->tree))
3415 		return false;
3416 
3417 	if (end > bn->b_end) {
3418 		int clear = mt_slots[wr_mas->type] - bn->b_end;
3419 
3420 		memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3421 		memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3422 	}
3423 	mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3424 	return true;
3425 }
3426 
3427 /*
3428  * mas_commit_b_node() - Commit the big node into the tree.
3429  * @wr_mas: The maple write state
3430  * @b_node: The maple big node
3431  * @end: The end of the data.
3432  */
3433 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3434 			    struct maple_big_node *b_node, unsigned char end)
3435 {
3436 	struct maple_node *node;
3437 	struct maple_enode *old_enode;
3438 	unsigned char b_end = b_node->b_end;
3439 	enum maple_type b_type = b_node->type;
3440 
3441 	old_enode = wr_mas->mas->node;
3442 	if ((b_end < mt_min_slots[b_type]) &&
3443 	    (!mte_is_root(old_enode)) &&
3444 	    (mas_mt_height(wr_mas->mas) > 1))
3445 		return mas_rebalance(wr_mas->mas, b_node);
3446 
3447 	if (b_end >= mt_slots[b_type])
3448 		return mas_split(wr_mas->mas, b_node);
3449 
3450 	if (mas_reuse_node(wr_mas, b_node, end))
3451 		goto reuse_node;
3452 
3453 	mas_node_count(wr_mas->mas, 1);
3454 	if (mas_is_err(wr_mas->mas))
3455 		return 0;
3456 
3457 	node = mas_pop_node(wr_mas->mas);
3458 	node->parent = mas_mn(wr_mas->mas)->parent;
3459 	wr_mas->mas->node = mt_mk_node(node, b_type);
3460 	mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3461 	mas_replace_node(wr_mas->mas, old_enode);
3462 reuse_node:
3463 	mas_update_gap(wr_mas->mas);
3464 	wr_mas->mas->end = b_end;
3465 	return 1;
3466 }
3467 
3468 /*
3469  * mas_root_expand() - Expand a root to a node
3470  * @mas: The maple state
3471  * @entry: The entry to store into the tree
3472  */
3473 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3474 {
3475 	void *contents = mas_root_locked(mas);
3476 	enum maple_type type = maple_leaf_64;
3477 	struct maple_node *node;
3478 	void __rcu **slots;
3479 	unsigned long *pivots;
3480 	int slot = 0;
3481 
3482 	mas_node_count(mas, 1);
3483 	if (unlikely(mas_is_err(mas)))
3484 		return 0;
3485 
3486 	node = mas_pop_node(mas);
3487 	pivots = ma_pivots(node, type);
3488 	slots = ma_slots(node, type);
3489 	node->parent = ma_parent_ptr(mas_tree_parent(mas));
3490 	mas->node = mt_mk_node(node, type);
3491 	mas->status = ma_active;
3492 
3493 	if (mas->index) {
3494 		if (contents) {
3495 			rcu_assign_pointer(slots[slot], contents);
3496 			if (likely(mas->index > 1))
3497 				slot++;
3498 		}
3499 		pivots[slot++] = mas->index - 1;
3500 	}
3501 
3502 	rcu_assign_pointer(slots[slot], entry);
3503 	mas->offset = slot;
3504 	pivots[slot] = mas->last;
3505 	if (mas->last != ULONG_MAX)
3506 		pivots[++slot] = ULONG_MAX;
3507 
3508 	mas->depth = 1;
3509 	mas_set_height(mas);
3510 	ma_set_meta(node, maple_leaf_64, 0, slot);
3511 	/* swap the new root into the tree */
3512 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3513 	return slot;
3514 }
3515 
3516 static inline void mas_store_root(struct ma_state *mas, void *entry)
3517 {
3518 	if (likely((mas->last != 0) || (mas->index != 0)))
3519 		mas_root_expand(mas, entry);
3520 	else if (((unsigned long) (entry) & 3) == 2)
3521 		mas_root_expand(mas, entry);
3522 	else {
3523 		rcu_assign_pointer(mas->tree->ma_root, entry);
3524 		mas->status = ma_start;
3525 	}
3526 }
3527 
3528 /*
3529  * mas_is_span_wr() - Check if the write needs to be treated as a write that
3530  * spans the node.
3531  * @mas: The maple state
3532  * @piv: The pivot value being written
3533  * @type: The maple node type
3534  * @entry: The data to write
3535  *
3536  * Spanning writes are writes that start in one node and end in another OR if
3537  * the write of a %NULL will cause the node to end with a %NULL.
3538  *
3539  * Return: True if this is a spanning write, false otherwise.
3540  */
3541 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3542 {
3543 	unsigned long max = wr_mas->r_max;
3544 	unsigned long last = wr_mas->mas->last;
3545 	enum maple_type type = wr_mas->type;
3546 	void *entry = wr_mas->entry;
3547 
3548 	/* Contained in this pivot, fast path */
3549 	if (last < max)
3550 		return false;
3551 
3552 	if (ma_is_leaf(type)) {
3553 		max = wr_mas->mas->max;
3554 		if (last < max)
3555 			return false;
3556 	}
3557 
3558 	if (last == max) {
3559 		/*
3560 		 * The last entry of leaf node cannot be NULL unless it is the
3561 		 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3562 		 */
3563 		if (entry || last == ULONG_MAX)
3564 			return false;
3565 	}
3566 
3567 	trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3568 	return true;
3569 }
3570 
3571 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3572 {
3573 	wr_mas->type = mte_node_type(wr_mas->mas->node);
3574 	mas_wr_node_walk(wr_mas);
3575 	wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3576 }
3577 
3578 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3579 {
3580 	wr_mas->mas->max = wr_mas->r_max;
3581 	wr_mas->mas->min = wr_mas->r_min;
3582 	wr_mas->mas->node = wr_mas->content;
3583 	wr_mas->mas->offset = 0;
3584 	wr_mas->mas->depth++;
3585 }
3586 /*
3587  * mas_wr_walk() - Walk the tree for a write.
3588  * @wr_mas: The maple write state
3589  *
3590  * Uses mas_slot_locked() and does not need to worry about dead nodes.
3591  *
3592  * Return: True if it's contained in a node, false on spanning write.
3593  */
3594 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3595 {
3596 	struct ma_state *mas = wr_mas->mas;
3597 
3598 	while (true) {
3599 		mas_wr_walk_descend(wr_mas);
3600 		if (unlikely(mas_is_span_wr(wr_mas)))
3601 			return false;
3602 
3603 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3604 						  mas->offset);
3605 		if (ma_is_leaf(wr_mas->type))
3606 			return true;
3607 
3608 		mas_wr_walk_traverse(wr_mas);
3609 	}
3610 
3611 	return true;
3612 }
3613 
3614 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3615 {
3616 	struct ma_state *mas = wr_mas->mas;
3617 
3618 	while (true) {
3619 		mas_wr_walk_descend(wr_mas);
3620 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3621 						  mas->offset);
3622 		if (ma_is_leaf(wr_mas->type))
3623 			return true;
3624 		mas_wr_walk_traverse(wr_mas);
3625 
3626 	}
3627 	return true;
3628 }
3629 /*
3630  * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3631  * @l_wr_mas: The left maple write state
3632  * @r_wr_mas: The right maple write state
3633  */
3634 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3635 					    struct ma_wr_state *r_wr_mas)
3636 {
3637 	struct ma_state *r_mas = r_wr_mas->mas;
3638 	struct ma_state *l_mas = l_wr_mas->mas;
3639 	unsigned char l_slot;
3640 
3641 	l_slot = l_mas->offset;
3642 	if (!l_wr_mas->content)
3643 		l_mas->index = l_wr_mas->r_min;
3644 
3645 	if ((l_mas->index == l_wr_mas->r_min) &&
3646 		 (l_slot &&
3647 		  !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3648 		if (l_slot > 1)
3649 			l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3650 		else
3651 			l_mas->index = l_mas->min;
3652 
3653 		l_mas->offset = l_slot - 1;
3654 	}
3655 
3656 	if (!r_wr_mas->content) {
3657 		if (r_mas->last < r_wr_mas->r_max)
3658 			r_mas->last = r_wr_mas->r_max;
3659 		r_mas->offset++;
3660 	} else if ((r_mas->last == r_wr_mas->r_max) &&
3661 	    (r_mas->last < r_mas->max) &&
3662 	    !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3663 		r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3664 					     r_wr_mas->type, r_mas->offset + 1);
3665 		r_mas->offset++;
3666 	}
3667 }
3668 
3669 static inline void *mas_state_walk(struct ma_state *mas)
3670 {
3671 	void *entry;
3672 
3673 	entry = mas_start(mas);
3674 	if (mas_is_none(mas))
3675 		return NULL;
3676 
3677 	if (mas_is_ptr(mas))
3678 		return entry;
3679 
3680 	return mtree_range_walk(mas);
3681 }
3682 
3683 /*
3684  * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3685  * to date.
3686  *
3687  * @mas: The maple state.
3688  *
3689  * Note: Leaves mas in undesirable state.
3690  * Return: The entry for @mas->index or %NULL on dead node.
3691  */
3692 static inline void *mtree_lookup_walk(struct ma_state *mas)
3693 {
3694 	unsigned long *pivots;
3695 	unsigned char offset;
3696 	struct maple_node *node;
3697 	struct maple_enode *next;
3698 	enum maple_type type;
3699 	void __rcu **slots;
3700 	unsigned char end;
3701 
3702 	next = mas->node;
3703 	do {
3704 		node = mte_to_node(next);
3705 		type = mte_node_type(next);
3706 		pivots = ma_pivots(node, type);
3707 		end = mt_pivots[type];
3708 		offset = 0;
3709 		do {
3710 			if (pivots[offset] >= mas->index)
3711 				break;
3712 		} while (++offset < end);
3713 
3714 		slots = ma_slots(node, type);
3715 		next = mt_slot(mas->tree, slots, offset);
3716 		if (unlikely(ma_dead_node(node)))
3717 			goto dead_node;
3718 	} while (!ma_is_leaf(type));
3719 
3720 	return (void *)next;
3721 
3722 dead_node:
3723 	mas_reset(mas);
3724 	return NULL;
3725 }
3726 
3727 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
3728 /*
3729  * mas_new_root() - Create a new root node that only contains the entry passed
3730  * in.
3731  * @mas: The maple state
3732  * @entry: The entry to store.
3733  *
3734  * Only valid when the index == 0 and the last == ULONG_MAX
3735  *
3736  * Return 0 on error, 1 on success.
3737  */
3738 static inline int mas_new_root(struct ma_state *mas, void *entry)
3739 {
3740 	struct maple_enode *root = mas_root_locked(mas);
3741 	enum maple_type type = maple_leaf_64;
3742 	struct maple_node *node;
3743 	void __rcu **slots;
3744 	unsigned long *pivots;
3745 
3746 	if (!entry && !mas->index && mas->last == ULONG_MAX) {
3747 		mas->depth = 0;
3748 		mas_set_height(mas);
3749 		rcu_assign_pointer(mas->tree->ma_root, entry);
3750 		mas->status = ma_start;
3751 		goto done;
3752 	}
3753 
3754 	mas_node_count(mas, 1);
3755 	if (mas_is_err(mas))
3756 		return 0;
3757 
3758 	node = mas_pop_node(mas);
3759 	pivots = ma_pivots(node, type);
3760 	slots = ma_slots(node, type);
3761 	node->parent = ma_parent_ptr(mas_tree_parent(mas));
3762 	mas->node = mt_mk_node(node, type);
3763 	mas->status = ma_active;
3764 	rcu_assign_pointer(slots[0], entry);
3765 	pivots[0] = mas->last;
3766 	mas->depth = 1;
3767 	mas_set_height(mas);
3768 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3769 
3770 done:
3771 	if (xa_is_node(root))
3772 		mte_destroy_walk(root, mas->tree);
3773 
3774 	return 1;
3775 }
3776 /*
3777  * mas_wr_spanning_store() - Create a subtree with the store operation completed
3778  * and new nodes where necessary, then place the sub-tree in the actual tree.
3779  * Note that mas is expected to point to the node which caused the store to
3780  * span.
3781  * @wr_mas: The maple write state
3782  *
3783  * Return: 0 on error, positive on success.
3784  */
3785 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3786 {
3787 	struct maple_subtree_state mast;
3788 	struct maple_big_node b_node;
3789 	struct ma_state *mas;
3790 	unsigned char height;
3791 
3792 	/* Left and Right side of spanning store */
3793 	MA_STATE(l_mas, NULL, 0, 0);
3794 	MA_STATE(r_mas, NULL, 0, 0);
3795 	MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3796 	MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3797 
3798 	/*
3799 	 * A store operation that spans multiple nodes is called a spanning
3800 	 * store and is handled early in the store call stack by the function
3801 	 * mas_is_span_wr().  When a spanning store is identified, the maple
3802 	 * state is duplicated.  The first maple state walks the left tree path
3803 	 * to ``index``, the duplicate walks the right tree path to ``last``.
3804 	 * The data in the two nodes are combined into a single node, two nodes,
3805 	 * or possibly three nodes (see the 3-way split above).  A ``NULL``
3806 	 * written to the last entry of a node is considered a spanning store as
3807 	 * a rebalance is required for the operation to complete and an overflow
3808 	 * of data may happen.
3809 	 */
3810 	mas = wr_mas->mas;
3811 	trace_ma_op(__func__, mas);
3812 
3813 	if (unlikely(!mas->index && mas->last == ULONG_MAX))
3814 		return mas_new_root(mas, wr_mas->entry);
3815 	/*
3816 	 * Node rebalancing may occur due to this store, so there may be three new
3817 	 * entries per level plus a new root.
3818 	 */
3819 	height = mas_mt_height(mas);
3820 	mas_node_count(mas, 1 + height * 3);
3821 	if (mas_is_err(mas))
3822 		return 0;
3823 
3824 	/*
3825 	 * Set up right side.  Need to get to the next offset after the spanning
3826 	 * store to ensure it's not NULL and to combine both the next node and
3827 	 * the node with the start together.
3828 	 */
3829 	r_mas = *mas;
3830 	/* Avoid overflow, walk to next slot in the tree. */
3831 	if (r_mas.last + 1)
3832 		r_mas.last++;
3833 
3834 	r_mas.index = r_mas.last;
3835 	mas_wr_walk_index(&r_wr_mas);
3836 	r_mas.last = r_mas.index = mas->last;
3837 
3838 	/* Set up left side. */
3839 	l_mas = *mas;
3840 	mas_wr_walk_index(&l_wr_mas);
3841 
3842 	if (!wr_mas->entry) {
3843 		mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
3844 		mas->offset = l_mas.offset;
3845 		mas->index = l_mas.index;
3846 		mas->last = l_mas.last = r_mas.last;
3847 	}
3848 
3849 	/* expanding NULLs may make this cover the entire range */
3850 	if (!l_mas.index && r_mas.last == ULONG_MAX) {
3851 		mas_set_range(mas, 0, ULONG_MAX);
3852 		return mas_new_root(mas, wr_mas->entry);
3853 	}
3854 
3855 	memset(&b_node, 0, sizeof(struct maple_big_node));
3856 	/* Copy l_mas and store the value in b_node. */
3857 	mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
3858 	/* Copy r_mas into b_node. */
3859 	if (r_mas.offset <= r_mas.end)
3860 		mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
3861 			   &b_node, b_node.b_end + 1);
3862 	else
3863 		b_node.b_end++;
3864 
3865 	/* Stop spanning searches by searching for just index. */
3866 	l_mas.index = l_mas.last = mas->index;
3867 
3868 	mast.bn = &b_node;
3869 	mast.orig_l = &l_mas;
3870 	mast.orig_r = &r_mas;
3871 	/* Combine l_mas and r_mas and split them up evenly again. */
3872 	return mas_spanning_rebalance(mas, &mast, height + 1);
3873 }
3874 
3875 /*
3876  * mas_wr_node_store() - Attempt to store the value in a node
3877  * @wr_mas: The maple write state
3878  *
3879  * Attempts to reuse the node, but may allocate.
3880  *
3881  * Return: True if stored, false otherwise
3882  */
3883 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
3884 				     unsigned char new_end)
3885 {
3886 	struct ma_state *mas = wr_mas->mas;
3887 	void __rcu **dst_slots;
3888 	unsigned long *dst_pivots;
3889 	unsigned char dst_offset, offset_end = wr_mas->offset_end;
3890 	struct maple_node reuse, *newnode;
3891 	unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
3892 	bool in_rcu = mt_in_rcu(mas->tree);
3893 
3894 	/* Check if there is enough data. The room is enough. */
3895 	if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
3896 	    !(mas->mas_flags & MA_STATE_BULK))
3897 		return false;
3898 
3899 	if (mas->last == wr_mas->end_piv)
3900 		offset_end++; /* don't copy this offset */
3901 	else if (unlikely(wr_mas->r_max == ULONG_MAX))
3902 		mas_bulk_rebalance(mas, mas->end, wr_mas->type);
3903 
3904 	/* set up node. */
3905 	if (in_rcu) {
3906 		mas_node_count(mas, 1);
3907 		if (mas_is_err(mas))
3908 			return false;
3909 
3910 		newnode = mas_pop_node(mas);
3911 	} else {
3912 		memset(&reuse, 0, sizeof(struct maple_node));
3913 		newnode = &reuse;
3914 	}
3915 
3916 	newnode->parent = mas_mn(mas)->parent;
3917 	dst_pivots = ma_pivots(newnode, wr_mas->type);
3918 	dst_slots = ma_slots(newnode, wr_mas->type);
3919 	/* Copy from start to insert point */
3920 	memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
3921 	memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
3922 
3923 	/* Handle insert of new range starting after old range */
3924 	if (wr_mas->r_min < mas->index) {
3925 		rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
3926 		dst_pivots[mas->offset++] = mas->index - 1;
3927 	}
3928 
3929 	/* Store the new entry and range end. */
3930 	if (mas->offset < node_pivots)
3931 		dst_pivots[mas->offset] = mas->last;
3932 	rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
3933 
3934 	/*
3935 	 * this range wrote to the end of the node or it overwrote the rest of
3936 	 * the data
3937 	 */
3938 	if (offset_end > mas->end)
3939 		goto done;
3940 
3941 	dst_offset = mas->offset + 1;
3942 	/* Copy to the end of node if necessary. */
3943 	copy_size = mas->end - offset_end + 1;
3944 	memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
3945 	       sizeof(void *) * copy_size);
3946 	memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
3947 	       sizeof(unsigned long) * (copy_size - 1));
3948 
3949 	if (new_end < node_pivots)
3950 		dst_pivots[new_end] = mas->max;
3951 
3952 done:
3953 	mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
3954 	if (in_rcu) {
3955 		struct maple_enode *old_enode = mas->node;
3956 
3957 		mas->node = mt_mk_node(newnode, wr_mas->type);
3958 		mas_replace_node(mas, old_enode);
3959 	} else {
3960 		memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
3961 	}
3962 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
3963 	mas_update_gap(mas);
3964 	mas->end = new_end;
3965 	return true;
3966 }
3967 
3968 /*
3969  * mas_wr_slot_store: Attempt to store a value in a slot.
3970  * @wr_mas: the maple write state
3971  *
3972  * Return: True if stored, false otherwise
3973  */
3974 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
3975 {
3976 	struct ma_state *mas = wr_mas->mas;
3977 	unsigned char offset = mas->offset;
3978 	void __rcu **slots = wr_mas->slots;
3979 	bool gap = false;
3980 
3981 	gap |= !mt_slot_locked(mas->tree, slots, offset);
3982 	gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
3983 
3984 	if (wr_mas->offset_end - offset == 1) {
3985 		if (mas->index == wr_mas->r_min) {
3986 			/* Overwriting the range and a part of the next one */
3987 			rcu_assign_pointer(slots[offset], wr_mas->entry);
3988 			wr_mas->pivots[offset] = mas->last;
3989 		} else {
3990 			/* Overwriting a part of the range and the next one */
3991 			rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
3992 			wr_mas->pivots[offset] = mas->index - 1;
3993 			mas->offset++; /* Keep mas accurate. */
3994 		}
3995 	} else if (!mt_in_rcu(mas->tree)) {
3996 		/*
3997 		 * Expand the range, only partially overwriting the previous and
3998 		 * next ranges
3999 		 */
4000 		gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
4001 		rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4002 		wr_mas->pivots[offset] = mas->index - 1;
4003 		wr_mas->pivots[offset + 1] = mas->last;
4004 		mas->offset++; /* Keep mas accurate. */
4005 	} else {
4006 		return false;
4007 	}
4008 
4009 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4010 	/*
4011 	 * Only update gap when the new entry is empty or there is an empty
4012 	 * entry in the original two ranges.
4013 	 */
4014 	if (!wr_mas->entry || gap)
4015 		mas_update_gap(mas);
4016 
4017 	return true;
4018 }
4019 
4020 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4021 {
4022 	struct ma_state *mas = wr_mas->mas;
4023 
4024 	if (!wr_mas->slots[wr_mas->offset_end]) {
4025 		/* If this one is null, the next and prev are not */
4026 		mas->last = wr_mas->end_piv;
4027 	} else {
4028 		/* Check next slot(s) if we are overwriting the end */
4029 		if ((mas->last == wr_mas->end_piv) &&
4030 		    (mas->end != wr_mas->offset_end) &&
4031 		    !wr_mas->slots[wr_mas->offset_end + 1]) {
4032 			wr_mas->offset_end++;
4033 			if (wr_mas->offset_end == mas->end)
4034 				mas->last = mas->max;
4035 			else
4036 				mas->last = wr_mas->pivots[wr_mas->offset_end];
4037 			wr_mas->end_piv = mas->last;
4038 		}
4039 	}
4040 
4041 	if (!wr_mas->content) {
4042 		/* If this one is null, the next and prev are not */
4043 		mas->index = wr_mas->r_min;
4044 	} else {
4045 		/* Check prev slot if we are overwriting the start */
4046 		if (mas->index == wr_mas->r_min && mas->offset &&
4047 		    !wr_mas->slots[mas->offset - 1]) {
4048 			mas->offset--;
4049 			wr_mas->r_min = mas->index =
4050 				mas_safe_min(mas, wr_mas->pivots, mas->offset);
4051 			wr_mas->r_max = wr_mas->pivots[mas->offset];
4052 		}
4053 	}
4054 }
4055 
4056 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4057 {
4058 	while ((wr_mas->offset_end < wr_mas->mas->end) &&
4059 	       (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4060 		wr_mas->offset_end++;
4061 
4062 	if (wr_mas->offset_end < wr_mas->mas->end)
4063 		wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4064 	else
4065 		wr_mas->end_piv = wr_mas->mas->max;
4066 
4067 	if (!wr_mas->entry)
4068 		mas_wr_extend_null(wr_mas);
4069 }
4070 
4071 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4072 {
4073 	struct ma_state *mas = wr_mas->mas;
4074 	unsigned char new_end = mas->end + 2;
4075 
4076 	new_end -= wr_mas->offset_end - mas->offset;
4077 	if (wr_mas->r_min == mas->index)
4078 		new_end--;
4079 
4080 	if (wr_mas->end_piv == mas->last)
4081 		new_end--;
4082 
4083 	return new_end;
4084 }
4085 
4086 /*
4087  * mas_wr_append: Attempt to append
4088  * @wr_mas: the maple write state
4089  * @new_end: The end of the node after the modification
4090  *
4091  * This is currently unsafe in rcu mode since the end of the node may be cached
4092  * by readers while the node contents may be updated which could result in
4093  * inaccurate information.
4094  *
4095  * Return: True if appended, false otherwise
4096  */
4097 static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
4098 		unsigned char new_end)
4099 {
4100 	struct ma_state *mas;
4101 	void __rcu **slots;
4102 	unsigned char end;
4103 
4104 	mas = wr_mas->mas;
4105 	if (mt_in_rcu(mas->tree))
4106 		return false;
4107 
4108 	end = mas->end;
4109 	if (mas->offset != end)
4110 		return false;
4111 
4112 	if (new_end < mt_pivots[wr_mas->type]) {
4113 		wr_mas->pivots[new_end] = wr_mas->pivots[end];
4114 		ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
4115 	}
4116 
4117 	slots = wr_mas->slots;
4118 	if (new_end == end + 1) {
4119 		if (mas->last == wr_mas->r_max) {
4120 			/* Append to end of range */
4121 			rcu_assign_pointer(slots[new_end], wr_mas->entry);
4122 			wr_mas->pivots[end] = mas->index - 1;
4123 			mas->offset = new_end;
4124 		} else {
4125 			/* Append to start of range */
4126 			rcu_assign_pointer(slots[new_end], wr_mas->content);
4127 			wr_mas->pivots[end] = mas->last;
4128 			rcu_assign_pointer(slots[end], wr_mas->entry);
4129 		}
4130 	} else {
4131 		/* Append to the range without touching any boundaries. */
4132 		rcu_assign_pointer(slots[new_end], wr_mas->content);
4133 		wr_mas->pivots[end + 1] = mas->last;
4134 		rcu_assign_pointer(slots[end + 1], wr_mas->entry);
4135 		wr_mas->pivots[end] = mas->index - 1;
4136 		mas->offset = end + 1;
4137 	}
4138 
4139 	if (!wr_mas->content || !wr_mas->entry)
4140 		mas_update_gap(mas);
4141 
4142 	mas->end = new_end;
4143 	trace_ma_write(__func__, mas, new_end, wr_mas->entry);
4144 	return  true;
4145 }
4146 
4147 /*
4148  * mas_wr_bnode() - Slow path for a modification.
4149  * @wr_mas: The write maple state
4150  *
4151  * This is where split, rebalance end up.
4152  */
4153 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4154 {
4155 	struct maple_big_node b_node;
4156 
4157 	trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4158 	memset(&b_node, 0, sizeof(struct maple_big_node));
4159 	mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4160 	mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
4161 }
4162 
4163 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4164 {
4165 	struct ma_state *mas = wr_mas->mas;
4166 	unsigned char new_end;
4167 
4168 	/* Direct replacement */
4169 	if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4170 		rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4171 		if (!!wr_mas->entry ^ !!wr_mas->content)
4172 			mas_update_gap(mas);
4173 		return;
4174 	}
4175 
4176 	/*
4177 	 * new_end exceeds the size of the maple node and cannot enter the fast
4178 	 * path.
4179 	 */
4180 	new_end = mas_wr_new_end(wr_mas);
4181 	if (new_end >= mt_slots[wr_mas->type])
4182 		goto slow_path;
4183 
4184 	/* Attempt to append */
4185 	if (mas_wr_append(wr_mas, new_end))
4186 		return;
4187 
4188 	if (new_end == mas->end && mas_wr_slot_store(wr_mas))
4189 		return;
4190 
4191 	if (mas_wr_node_store(wr_mas, new_end))
4192 		return;
4193 
4194 	if (mas_is_err(mas))
4195 		return;
4196 
4197 slow_path:
4198 	mas_wr_bnode(wr_mas);
4199 }
4200 
4201 /*
4202  * mas_wr_store_entry() - Internal call to store a value
4203  * @mas: The maple state
4204  * @entry: The entry to store.
4205  *
4206  * Return: The contents that was stored at the index.
4207  */
4208 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4209 {
4210 	struct ma_state *mas = wr_mas->mas;
4211 
4212 	wr_mas->content = mas_start(mas);
4213 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4214 		mas_store_root(mas, wr_mas->entry);
4215 		return wr_mas->content;
4216 	}
4217 
4218 	if (unlikely(!mas_wr_walk(wr_mas))) {
4219 		mas_wr_spanning_store(wr_mas);
4220 		return wr_mas->content;
4221 	}
4222 
4223 	/* At this point, we are at the leaf node that needs to be altered. */
4224 	mas_wr_end_piv(wr_mas);
4225 	/* New root for a single pointer */
4226 	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4227 		mas_new_root(mas, wr_mas->entry);
4228 		return wr_mas->content;
4229 	}
4230 
4231 	mas_wr_modify(wr_mas);
4232 	return wr_mas->content;
4233 }
4234 
4235 /**
4236  * mas_insert() - Internal call to insert a value
4237  * @mas: The maple state
4238  * @entry: The entry to store
4239  *
4240  * Return: %NULL or the contents that already exists at the requested index
4241  * otherwise.  The maple state needs to be checked for error conditions.
4242  */
4243 static inline void *mas_insert(struct ma_state *mas, void *entry)
4244 {
4245 	MA_WR_STATE(wr_mas, mas, entry);
4246 
4247 	/*
4248 	 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4249 	 * tree.  If the insert fits exactly into an existing gap with a value
4250 	 * of NULL, then the slot only needs to be written with the new value.
4251 	 * If the range being inserted is adjacent to another range, then only a
4252 	 * single pivot needs to be inserted (as well as writing the entry).  If
4253 	 * the new range is within a gap but does not touch any other ranges,
4254 	 * then two pivots need to be inserted: the start - 1, and the end.  As
4255 	 * usual, the entry must be written.  Most operations require a new node
4256 	 * to be allocated and replace an existing node to ensure RCU safety,
4257 	 * when in RCU mode.  The exception to requiring a newly allocated node
4258 	 * is when inserting at the end of a node (appending).  When done
4259 	 * carefully, appending can reuse the node in place.
4260 	 */
4261 	wr_mas.content = mas_start(mas);
4262 	if (wr_mas.content)
4263 		goto exists;
4264 
4265 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4266 		mas_store_root(mas, entry);
4267 		return NULL;
4268 	}
4269 
4270 	/* spanning writes always overwrite something */
4271 	if (!mas_wr_walk(&wr_mas))
4272 		goto exists;
4273 
4274 	/* At this point, we are at the leaf node that needs to be altered. */
4275 	wr_mas.offset_end = mas->offset;
4276 	wr_mas.end_piv = wr_mas.r_max;
4277 
4278 	if (wr_mas.content || (mas->last > wr_mas.r_max))
4279 		goto exists;
4280 
4281 	if (!entry)
4282 		return NULL;
4283 
4284 	mas_wr_modify(&wr_mas);
4285 	return wr_mas.content;
4286 
4287 exists:
4288 	mas_set_err(mas, -EEXIST);
4289 	return wr_mas.content;
4290 
4291 }
4292 
4293 /**
4294  * mas_alloc_cyclic() - Internal call to find somewhere to store an entry
4295  * @mas: The maple state.
4296  * @startp: Pointer to ID.
4297  * @range_lo: Lower bound of range to search.
4298  * @range_hi: Upper bound of range to search.
4299  * @entry: The entry to store.
4300  * @next: Pointer to next ID to allocate.
4301  * @gfp: The GFP_FLAGS to use for allocations.
4302  *
4303  * Return: 0 if the allocation succeeded without wrapping, 1 if the
4304  * allocation succeeded after wrapping, or -EBUSY if there are no
4305  * free entries.
4306  */
4307 int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
4308 		void *entry, unsigned long range_lo, unsigned long range_hi,
4309 		unsigned long *next, gfp_t gfp)
4310 {
4311 	unsigned long min = range_lo;
4312 	int ret = 0;
4313 
4314 	range_lo = max(min, *next);
4315 	ret = mas_empty_area(mas, range_lo, range_hi, 1);
4316 	if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) {
4317 		mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED;
4318 		ret = 1;
4319 	}
4320 	if (ret < 0 && range_lo > min) {
4321 		ret = mas_empty_area(mas, min, range_hi, 1);
4322 		if (ret == 0)
4323 			ret = 1;
4324 	}
4325 	if (ret < 0)
4326 		return ret;
4327 
4328 	do {
4329 		mas_insert(mas, entry);
4330 	} while (mas_nomem(mas, gfp));
4331 	if (mas_is_err(mas))
4332 		return xa_err(mas->node);
4333 
4334 	*startp = mas->index;
4335 	*next = *startp + 1;
4336 	if (*next == 0)
4337 		mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
4338 
4339 	return ret;
4340 }
4341 EXPORT_SYMBOL(mas_alloc_cyclic);
4342 
4343 static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4344 {
4345 retry:
4346 	mas_set(mas, index);
4347 	mas_state_walk(mas);
4348 	if (mas_is_start(mas))
4349 		goto retry;
4350 }
4351 
4352 static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
4353 		struct maple_node *node, const unsigned long index)
4354 {
4355 	if (unlikely(ma_dead_node(node))) {
4356 		mas_rewalk(mas, index);
4357 		return true;
4358 	}
4359 	return false;
4360 }
4361 
4362 /*
4363  * mas_prev_node() - Find the prev non-null entry at the same level in the
4364  * tree.  The prev value will be mas->node[mas->offset] or the status will be
4365  * ma_none.
4366  * @mas: The maple state
4367  * @min: The lower limit to search
4368  *
4369  * The prev node value will be mas->node[mas->offset] or the status will be
4370  * ma_none.
4371  * Return: 1 if the node is dead, 0 otherwise.
4372  */
4373 static int mas_prev_node(struct ma_state *mas, unsigned long min)
4374 {
4375 	enum maple_type mt;
4376 	int offset, level;
4377 	void __rcu **slots;
4378 	struct maple_node *node;
4379 	unsigned long *pivots;
4380 	unsigned long max;
4381 
4382 	node = mas_mn(mas);
4383 	if (!mas->min)
4384 		goto no_entry;
4385 
4386 	max = mas->min - 1;
4387 	if (max < min)
4388 		goto no_entry;
4389 
4390 	level = 0;
4391 	do {
4392 		if (ma_is_root(node))
4393 			goto no_entry;
4394 
4395 		/* Walk up. */
4396 		if (unlikely(mas_ascend(mas)))
4397 			return 1;
4398 		offset = mas->offset;
4399 		level++;
4400 		node = mas_mn(mas);
4401 	} while (!offset);
4402 
4403 	offset--;
4404 	mt = mte_node_type(mas->node);
4405 	while (level > 1) {
4406 		level--;
4407 		slots = ma_slots(node, mt);
4408 		mas->node = mas_slot(mas, slots, offset);
4409 		if (unlikely(ma_dead_node(node)))
4410 			return 1;
4411 
4412 		mt = mte_node_type(mas->node);
4413 		node = mas_mn(mas);
4414 		pivots = ma_pivots(node, mt);
4415 		offset = ma_data_end(node, mt, pivots, max);
4416 		if (unlikely(ma_dead_node(node)))
4417 			return 1;
4418 	}
4419 
4420 	slots = ma_slots(node, mt);
4421 	mas->node = mas_slot(mas, slots, offset);
4422 	pivots = ma_pivots(node, mt);
4423 	if (unlikely(ma_dead_node(node)))
4424 		return 1;
4425 
4426 	if (likely(offset))
4427 		mas->min = pivots[offset - 1] + 1;
4428 	mas->max = max;
4429 	mas->offset = mas_data_end(mas);
4430 	if (unlikely(mte_dead_node(mas->node)))
4431 		return 1;
4432 
4433 	mas->end = mas->offset;
4434 	return 0;
4435 
4436 no_entry:
4437 	if (unlikely(ma_dead_node(node)))
4438 		return 1;
4439 
4440 	mas->status = ma_underflow;
4441 	return 0;
4442 }
4443 
4444 /*
4445  * mas_prev_slot() - Get the entry in the previous slot
4446  *
4447  * @mas: The maple state
4448  * @max: The minimum starting range
4449  * @empty: Can be empty
4450  * @set_underflow: Set the @mas->node to underflow state on limit.
4451  *
4452  * Return: The entry in the previous slot which is possibly NULL
4453  */
4454 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
4455 {
4456 	void *entry;
4457 	void __rcu **slots;
4458 	unsigned long pivot;
4459 	enum maple_type type;
4460 	unsigned long *pivots;
4461 	struct maple_node *node;
4462 	unsigned long save_point = mas->index;
4463 
4464 retry:
4465 	node = mas_mn(mas);
4466 	type = mte_node_type(mas->node);
4467 	pivots = ma_pivots(node, type);
4468 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4469 		goto retry;
4470 
4471 	if (mas->min <= min) {
4472 		pivot = mas_safe_min(mas, pivots, mas->offset);
4473 
4474 		if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4475 			goto retry;
4476 
4477 		if (pivot <= min)
4478 			goto underflow;
4479 	}
4480 
4481 again:
4482 	if (likely(mas->offset)) {
4483 		mas->offset--;
4484 		mas->last = mas->index - 1;
4485 		mas->index = mas_safe_min(mas, pivots, mas->offset);
4486 	} else  {
4487 		if (mas->index <= min)
4488 			goto underflow;
4489 
4490 		if (mas_prev_node(mas, min)) {
4491 			mas_rewalk(mas, save_point);
4492 			goto retry;
4493 		}
4494 
4495 		if (WARN_ON_ONCE(mas_is_underflow(mas)))
4496 			return NULL;
4497 
4498 		mas->last = mas->max;
4499 		node = mas_mn(mas);
4500 		type = mte_node_type(mas->node);
4501 		pivots = ma_pivots(node, type);
4502 		mas->index = pivots[mas->offset - 1] + 1;
4503 	}
4504 
4505 	slots = ma_slots(node, type);
4506 	entry = mas_slot(mas, slots, mas->offset);
4507 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4508 		goto retry;
4509 
4510 
4511 	if (likely(entry))
4512 		return entry;
4513 
4514 	if (!empty) {
4515 		if (mas->index <= min) {
4516 			mas->status = ma_underflow;
4517 			return NULL;
4518 		}
4519 
4520 		goto again;
4521 	}
4522 
4523 	return entry;
4524 
4525 underflow:
4526 	mas->status = ma_underflow;
4527 	return NULL;
4528 }
4529 
4530 /*
4531  * mas_next_node() - Get the next node at the same level in the tree.
4532  * @mas: The maple state
4533  * @max: The maximum pivot value to check.
4534  *
4535  * The next value will be mas->node[mas->offset] or the status will have
4536  * overflowed.
4537  * Return: 1 on dead node, 0 otherwise.
4538  */
4539 static int mas_next_node(struct ma_state *mas, struct maple_node *node,
4540 		unsigned long max)
4541 {
4542 	unsigned long min;
4543 	unsigned long *pivots;
4544 	struct maple_enode *enode;
4545 	struct maple_node *tmp;
4546 	int level = 0;
4547 	unsigned char node_end;
4548 	enum maple_type mt;
4549 	void __rcu **slots;
4550 
4551 	if (mas->max >= max)
4552 		goto overflow;
4553 
4554 	min = mas->max + 1;
4555 	level = 0;
4556 	do {
4557 		if (ma_is_root(node))
4558 			goto overflow;
4559 
4560 		/* Walk up. */
4561 		if (unlikely(mas_ascend(mas)))
4562 			return 1;
4563 
4564 		level++;
4565 		node = mas_mn(mas);
4566 		mt = mte_node_type(mas->node);
4567 		pivots = ma_pivots(node, mt);
4568 		node_end = ma_data_end(node, mt, pivots, mas->max);
4569 		if (unlikely(ma_dead_node(node)))
4570 			return 1;
4571 
4572 	} while (unlikely(mas->offset == node_end));
4573 
4574 	slots = ma_slots(node, mt);
4575 	mas->offset++;
4576 	enode = mas_slot(mas, slots, mas->offset);
4577 	if (unlikely(ma_dead_node(node)))
4578 		return 1;
4579 
4580 	if (level > 1)
4581 		mas->offset = 0;
4582 
4583 	while (unlikely(level > 1)) {
4584 		level--;
4585 		mas->node = enode;
4586 		node = mas_mn(mas);
4587 		mt = mte_node_type(mas->node);
4588 		slots = ma_slots(node, mt);
4589 		enode = mas_slot(mas, slots, 0);
4590 		if (unlikely(ma_dead_node(node)))
4591 			return 1;
4592 	}
4593 
4594 	if (!mas->offset)
4595 		pivots = ma_pivots(node, mt);
4596 
4597 	mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4598 	tmp = mte_to_node(enode);
4599 	mt = mte_node_type(enode);
4600 	pivots = ma_pivots(tmp, mt);
4601 	mas->end = ma_data_end(tmp, mt, pivots, mas->max);
4602 	if (unlikely(ma_dead_node(node)))
4603 		return 1;
4604 
4605 	mas->node = enode;
4606 	mas->min = min;
4607 	return 0;
4608 
4609 overflow:
4610 	if (unlikely(ma_dead_node(node)))
4611 		return 1;
4612 
4613 	mas->status = ma_overflow;
4614 	return 0;
4615 }
4616 
4617 /*
4618  * mas_next_slot() - Get the entry in the next slot
4619  *
4620  * @mas: The maple state
4621  * @max: The maximum starting range
4622  * @empty: Can be empty
4623  * @set_overflow: Should @mas->node be set to overflow when the limit is
4624  * reached.
4625  *
4626  * Return: The entry in the next slot which is possibly NULL
4627  */
4628 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
4629 {
4630 	void __rcu **slots;
4631 	unsigned long *pivots;
4632 	unsigned long pivot;
4633 	enum maple_type type;
4634 	struct maple_node *node;
4635 	unsigned long save_point = mas->last;
4636 	void *entry;
4637 
4638 retry:
4639 	node = mas_mn(mas);
4640 	type = mte_node_type(mas->node);
4641 	pivots = ma_pivots(node, type);
4642 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4643 		goto retry;
4644 
4645 	if (mas->max >= max) {
4646 		if (likely(mas->offset < mas->end))
4647 			pivot = pivots[mas->offset];
4648 		else
4649 			pivot = mas->max;
4650 
4651 		if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4652 			goto retry;
4653 
4654 		if (pivot >= max) { /* Was at the limit, next will extend beyond */
4655 			mas->status = ma_overflow;
4656 			return NULL;
4657 		}
4658 	}
4659 
4660 	if (likely(mas->offset < mas->end)) {
4661 		mas->index = pivots[mas->offset] + 1;
4662 again:
4663 		mas->offset++;
4664 		if (likely(mas->offset < mas->end))
4665 			mas->last = pivots[mas->offset];
4666 		else
4667 			mas->last = mas->max;
4668 	} else  {
4669 		if (mas->last >= max) {
4670 			mas->status = ma_overflow;
4671 			return NULL;
4672 		}
4673 
4674 		if (mas_next_node(mas, node, max)) {
4675 			mas_rewalk(mas, save_point);
4676 			goto retry;
4677 		}
4678 
4679 		if (WARN_ON_ONCE(mas_is_overflow(mas)))
4680 			return NULL;
4681 
4682 		mas->offset = 0;
4683 		mas->index = mas->min;
4684 		node = mas_mn(mas);
4685 		type = mte_node_type(mas->node);
4686 		pivots = ma_pivots(node, type);
4687 		mas->last = pivots[0];
4688 	}
4689 
4690 	slots = ma_slots(node, type);
4691 	entry = mt_slot(mas->tree, slots, mas->offset);
4692 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4693 		goto retry;
4694 
4695 	if (entry)
4696 		return entry;
4697 
4698 
4699 	if (!empty) {
4700 		if (mas->last >= max) {
4701 			mas->status = ma_overflow;
4702 			return NULL;
4703 		}
4704 
4705 		mas->index = mas->last + 1;
4706 		goto again;
4707 	}
4708 
4709 	return entry;
4710 }
4711 
4712 /*
4713  * mas_next_entry() - Internal function to get the next entry.
4714  * @mas: The maple state
4715  * @limit: The maximum range start.
4716  *
4717  * Set the @mas->node to the next entry and the range_start to
4718  * the beginning value for the entry.  Does not check beyond @limit.
4719  * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
4720  * @mas->last on overflow.
4721  * Restarts on dead nodes.
4722  *
4723  * Return: the next entry or %NULL.
4724  */
4725 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4726 {
4727 	if (mas->last >= limit) {
4728 		mas->status = ma_overflow;
4729 		return NULL;
4730 	}
4731 
4732 	return mas_next_slot(mas, limit, false);
4733 }
4734 
4735 /*
4736  * mas_rev_awalk() - Internal function.  Reverse allocation walk.  Find the
4737  * highest gap address of a given size in a given node and descend.
4738  * @mas: The maple state
4739  * @size: The needed size.
4740  *
4741  * Return: True if found in a leaf, false otherwise.
4742  *
4743  */
4744 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4745 		unsigned long *gap_min, unsigned long *gap_max)
4746 {
4747 	enum maple_type type = mte_node_type(mas->node);
4748 	struct maple_node *node = mas_mn(mas);
4749 	unsigned long *pivots, *gaps;
4750 	void __rcu **slots;
4751 	unsigned long gap = 0;
4752 	unsigned long max, min;
4753 	unsigned char offset;
4754 
4755 	if (unlikely(mas_is_err(mas)))
4756 		return true;
4757 
4758 	if (ma_is_dense(type)) {
4759 		/* dense nodes. */
4760 		mas->offset = (unsigned char)(mas->index - mas->min);
4761 		return true;
4762 	}
4763 
4764 	pivots = ma_pivots(node, type);
4765 	slots = ma_slots(node, type);
4766 	gaps = ma_gaps(node, type);
4767 	offset = mas->offset;
4768 	min = mas_safe_min(mas, pivots, offset);
4769 	/* Skip out of bounds. */
4770 	while (mas->last < min)
4771 		min = mas_safe_min(mas, pivots, --offset);
4772 
4773 	max = mas_safe_pivot(mas, pivots, offset, type);
4774 	while (mas->index <= max) {
4775 		gap = 0;
4776 		if (gaps)
4777 			gap = gaps[offset];
4778 		else if (!mas_slot(mas, slots, offset))
4779 			gap = max - min + 1;
4780 
4781 		if (gap) {
4782 			if ((size <= gap) && (size <= mas->last - min + 1))
4783 				break;
4784 
4785 			if (!gaps) {
4786 				/* Skip the next slot, it cannot be a gap. */
4787 				if (offset < 2)
4788 					goto ascend;
4789 
4790 				offset -= 2;
4791 				max = pivots[offset];
4792 				min = mas_safe_min(mas, pivots, offset);
4793 				continue;
4794 			}
4795 		}
4796 
4797 		if (!offset)
4798 			goto ascend;
4799 
4800 		offset--;
4801 		max = min - 1;
4802 		min = mas_safe_min(mas, pivots, offset);
4803 	}
4804 
4805 	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4806 		goto no_space;
4807 
4808 	if (unlikely(ma_is_leaf(type))) {
4809 		mas->offset = offset;
4810 		*gap_min = min;
4811 		*gap_max = min + gap - 1;
4812 		return true;
4813 	}
4814 
4815 	/* descend, only happens under lock. */
4816 	mas->node = mas_slot(mas, slots, offset);
4817 	mas->min = min;
4818 	mas->max = max;
4819 	mas->offset = mas_data_end(mas);
4820 	return false;
4821 
4822 ascend:
4823 	if (!mte_is_root(mas->node))
4824 		return false;
4825 
4826 no_space:
4827 	mas_set_err(mas, -EBUSY);
4828 	return false;
4829 }
4830 
4831 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4832 {
4833 	enum maple_type type = mte_node_type(mas->node);
4834 	unsigned long pivot, min, gap = 0;
4835 	unsigned char offset, data_end;
4836 	unsigned long *gaps, *pivots;
4837 	void __rcu **slots;
4838 	struct maple_node *node;
4839 	bool found = false;
4840 
4841 	if (ma_is_dense(type)) {
4842 		mas->offset = (unsigned char)(mas->index - mas->min);
4843 		return true;
4844 	}
4845 
4846 	node = mas_mn(mas);
4847 	pivots = ma_pivots(node, type);
4848 	slots = ma_slots(node, type);
4849 	gaps = ma_gaps(node, type);
4850 	offset = mas->offset;
4851 	min = mas_safe_min(mas, pivots, offset);
4852 	data_end = ma_data_end(node, type, pivots, mas->max);
4853 	for (; offset <= data_end; offset++) {
4854 		pivot = mas_safe_pivot(mas, pivots, offset, type);
4855 
4856 		/* Not within lower bounds */
4857 		if (mas->index > pivot)
4858 			goto next_slot;
4859 
4860 		if (gaps)
4861 			gap = gaps[offset];
4862 		else if (!mas_slot(mas, slots, offset))
4863 			gap = min(pivot, mas->last) - max(mas->index, min) + 1;
4864 		else
4865 			goto next_slot;
4866 
4867 		if (gap >= size) {
4868 			if (ma_is_leaf(type)) {
4869 				found = true;
4870 				goto done;
4871 			}
4872 			if (mas->index <= pivot) {
4873 				mas->node = mas_slot(mas, slots, offset);
4874 				mas->min = min;
4875 				mas->max = pivot;
4876 				offset = 0;
4877 				break;
4878 			}
4879 		}
4880 next_slot:
4881 		min = pivot + 1;
4882 		if (mas->last <= pivot) {
4883 			mas_set_err(mas, -EBUSY);
4884 			return true;
4885 		}
4886 	}
4887 
4888 	if (mte_is_root(mas->node))
4889 		found = true;
4890 done:
4891 	mas->offset = offset;
4892 	return found;
4893 }
4894 
4895 /**
4896  * mas_walk() - Search for @mas->index in the tree.
4897  * @mas: The maple state.
4898  *
4899  * mas->index and mas->last will be set to the range if there is a value.  If
4900  * mas->status is ma_none, reset to ma_start
4901  *
4902  * Return: the entry at the location or %NULL.
4903  */
4904 void *mas_walk(struct ma_state *mas)
4905 {
4906 	void *entry;
4907 
4908 	if (!mas_is_active(mas) || !mas_is_start(mas))
4909 		mas->status = ma_start;
4910 retry:
4911 	entry = mas_state_walk(mas);
4912 	if (mas_is_start(mas)) {
4913 		goto retry;
4914 	} else if (mas_is_none(mas)) {
4915 		mas->index = 0;
4916 		mas->last = ULONG_MAX;
4917 	} else if (mas_is_ptr(mas)) {
4918 		if (!mas->index) {
4919 			mas->last = 0;
4920 			return entry;
4921 		}
4922 
4923 		mas->index = 1;
4924 		mas->last = ULONG_MAX;
4925 		mas->status = ma_none;
4926 		return NULL;
4927 	}
4928 
4929 	return entry;
4930 }
4931 EXPORT_SYMBOL_GPL(mas_walk);
4932 
4933 static inline bool mas_rewind_node(struct ma_state *mas)
4934 {
4935 	unsigned char slot;
4936 
4937 	do {
4938 		if (mte_is_root(mas->node)) {
4939 			slot = mas->offset;
4940 			if (!slot)
4941 				return false;
4942 		} else {
4943 			mas_ascend(mas);
4944 			slot = mas->offset;
4945 		}
4946 	} while (!slot);
4947 
4948 	mas->offset = --slot;
4949 	return true;
4950 }
4951 
4952 /*
4953  * mas_skip_node() - Internal function.  Skip over a node.
4954  * @mas: The maple state.
4955  *
4956  * Return: true if there is another node, false otherwise.
4957  */
4958 static inline bool mas_skip_node(struct ma_state *mas)
4959 {
4960 	if (mas_is_err(mas))
4961 		return false;
4962 
4963 	do {
4964 		if (mte_is_root(mas->node)) {
4965 			if (mas->offset >= mas_data_end(mas)) {
4966 				mas_set_err(mas, -EBUSY);
4967 				return false;
4968 			}
4969 		} else {
4970 			mas_ascend(mas);
4971 		}
4972 	} while (mas->offset >= mas_data_end(mas));
4973 
4974 	mas->offset++;
4975 	return true;
4976 }
4977 
4978 /*
4979  * mas_awalk() - Allocation walk.  Search from low address to high, for a gap of
4980  * @size
4981  * @mas: The maple state
4982  * @size: The size of the gap required
4983  *
4984  * Search between @mas->index and @mas->last for a gap of @size.
4985  */
4986 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
4987 {
4988 	struct maple_enode *last = NULL;
4989 
4990 	/*
4991 	 * There are 4 options:
4992 	 * go to child (descend)
4993 	 * go back to parent (ascend)
4994 	 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
4995 	 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
4996 	 */
4997 	while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
4998 		if (last == mas->node)
4999 			mas_skip_node(mas);
5000 		else
5001 			last = mas->node;
5002 	}
5003 }
5004 
5005 /*
5006  * mas_sparse_area() - Internal function.  Return upper or lower limit when
5007  * searching for a gap in an empty tree.
5008  * @mas: The maple state
5009  * @min: the minimum range
5010  * @max: The maximum range
5011  * @size: The size of the gap
5012  * @fwd: Searching forward or back
5013  */
5014 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5015 				unsigned long max, unsigned long size, bool fwd)
5016 {
5017 	if (!unlikely(mas_is_none(mas)) && min == 0) {
5018 		min++;
5019 		/*
5020 		 * At this time, min is increased, we need to recheck whether
5021 		 * the size is satisfied.
5022 		 */
5023 		if (min > max || max - min + 1 < size)
5024 			return -EBUSY;
5025 	}
5026 	/* mas_is_ptr */
5027 
5028 	if (fwd) {
5029 		mas->index = min;
5030 		mas->last = min + size - 1;
5031 	} else {
5032 		mas->last = max;
5033 		mas->index = max - size + 1;
5034 	}
5035 	return 0;
5036 }
5037 
5038 /*
5039  * mas_empty_area() - Get the lowest address within the range that is
5040  * sufficient for the size requested.
5041  * @mas: The maple state
5042  * @min: The lowest value of the range
5043  * @max: The highest value of the range
5044  * @size: The size needed
5045  */
5046 int mas_empty_area(struct ma_state *mas, unsigned long min,
5047 		unsigned long max, unsigned long size)
5048 {
5049 	unsigned char offset;
5050 	unsigned long *pivots;
5051 	enum maple_type mt;
5052 	struct maple_node *node;
5053 
5054 	if (min > max)
5055 		return -EINVAL;
5056 
5057 	if (size == 0 || max - min < size - 1)
5058 		return -EINVAL;
5059 
5060 	if (mas_is_start(mas))
5061 		mas_start(mas);
5062 	else if (mas->offset >= 2)
5063 		mas->offset -= 2;
5064 	else if (!mas_skip_node(mas))
5065 		return -EBUSY;
5066 
5067 	/* Empty set */
5068 	if (mas_is_none(mas) || mas_is_ptr(mas))
5069 		return mas_sparse_area(mas, min, max, size, true);
5070 
5071 	/* The start of the window can only be within these values */
5072 	mas->index = min;
5073 	mas->last = max;
5074 	mas_awalk(mas, size);
5075 
5076 	if (unlikely(mas_is_err(mas)))
5077 		return xa_err(mas->node);
5078 
5079 	offset = mas->offset;
5080 	if (unlikely(offset == MAPLE_NODE_SLOTS))
5081 		return -EBUSY;
5082 
5083 	node = mas_mn(mas);
5084 	mt = mte_node_type(mas->node);
5085 	pivots = ma_pivots(node, mt);
5086 	min = mas_safe_min(mas, pivots, offset);
5087 	if (mas->index < min)
5088 		mas->index = min;
5089 	mas->last = mas->index + size - 1;
5090 	mas->end = ma_data_end(node, mt, pivots, mas->max);
5091 	return 0;
5092 }
5093 EXPORT_SYMBOL_GPL(mas_empty_area);
5094 
5095 /*
5096  * mas_empty_area_rev() - Get the highest address within the range that is
5097  * sufficient for the size requested.
5098  * @mas: The maple state
5099  * @min: The lowest value of the range
5100  * @max: The highest value of the range
5101  * @size: The size needed
5102  */
5103 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5104 		unsigned long max, unsigned long size)
5105 {
5106 	struct maple_enode *last = mas->node;
5107 
5108 	if (min > max)
5109 		return -EINVAL;
5110 
5111 	if (size == 0 || max - min < size - 1)
5112 		return -EINVAL;
5113 
5114 	if (mas_is_start(mas)) {
5115 		mas_start(mas);
5116 		mas->offset = mas_data_end(mas);
5117 	} else if (mas->offset >= 2) {
5118 		mas->offset -= 2;
5119 	} else if (!mas_rewind_node(mas)) {
5120 		return -EBUSY;
5121 	}
5122 
5123 	/* Empty set. */
5124 	if (mas_is_none(mas) || mas_is_ptr(mas))
5125 		return mas_sparse_area(mas, min, max, size, false);
5126 
5127 	/* The start of the window can only be within these values. */
5128 	mas->index = min;
5129 	mas->last = max;
5130 
5131 	while (!mas_rev_awalk(mas, size, &min, &max)) {
5132 		if (last == mas->node) {
5133 			if (!mas_rewind_node(mas))
5134 				return -EBUSY;
5135 		} else {
5136 			last = mas->node;
5137 		}
5138 	}
5139 
5140 	if (mas_is_err(mas))
5141 		return xa_err(mas->node);
5142 
5143 	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5144 		return -EBUSY;
5145 
5146 	/* Trim the upper limit to the max. */
5147 	if (max < mas->last)
5148 		mas->last = max;
5149 
5150 	mas->index = mas->last - size + 1;
5151 	mas->end = mas_data_end(mas);
5152 	return 0;
5153 }
5154 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5155 
5156 /*
5157  * mte_dead_leaves() - Mark all leaves of a node as dead.
5158  * @mas: The maple state
5159  * @slots: Pointer to the slot array
5160  * @type: The maple node type
5161  *
5162  * Must hold the write lock.
5163  *
5164  * Return: The number of leaves marked as dead.
5165  */
5166 static inline
5167 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5168 			      void __rcu **slots)
5169 {
5170 	struct maple_node *node;
5171 	enum maple_type type;
5172 	void *entry;
5173 	int offset;
5174 
5175 	for (offset = 0; offset < mt_slot_count(enode); offset++) {
5176 		entry = mt_slot(mt, slots, offset);
5177 		type = mte_node_type(entry);
5178 		node = mte_to_node(entry);
5179 		/* Use both node and type to catch LE & BE metadata */
5180 		if (!node || !type)
5181 			break;
5182 
5183 		mte_set_node_dead(entry);
5184 		node->type = type;
5185 		rcu_assign_pointer(slots[offset], node);
5186 	}
5187 
5188 	return offset;
5189 }
5190 
5191 /**
5192  * mte_dead_walk() - Walk down a dead tree to just before the leaves
5193  * @enode: The maple encoded node
5194  * @offset: The starting offset
5195  *
5196  * Note: This can only be used from the RCU callback context.
5197  */
5198 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5199 {
5200 	struct maple_node *node, *next;
5201 	void __rcu **slots = NULL;
5202 
5203 	next = mte_to_node(*enode);
5204 	do {
5205 		*enode = ma_enode_ptr(next);
5206 		node = mte_to_node(*enode);
5207 		slots = ma_slots(node, node->type);
5208 		next = rcu_dereference_protected(slots[offset],
5209 					lock_is_held(&rcu_callback_map));
5210 		offset = 0;
5211 	} while (!ma_is_leaf(next->type));
5212 
5213 	return slots;
5214 }
5215 
5216 /**
5217  * mt_free_walk() - Walk & free a tree in the RCU callback context
5218  * @head: The RCU head that's within the node.
5219  *
5220  * Note: This can only be used from the RCU callback context.
5221  */
5222 static void mt_free_walk(struct rcu_head *head)
5223 {
5224 	void __rcu **slots;
5225 	struct maple_node *node, *start;
5226 	struct maple_enode *enode;
5227 	unsigned char offset;
5228 	enum maple_type type;
5229 
5230 	node = container_of(head, struct maple_node, rcu);
5231 
5232 	if (ma_is_leaf(node->type))
5233 		goto free_leaf;
5234 
5235 	start = node;
5236 	enode = mt_mk_node(node, node->type);
5237 	slots = mte_dead_walk(&enode, 0);
5238 	node = mte_to_node(enode);
5239 	do {
5240 		mt_free_bulk(node->slot_len, slots);
5241 		offset = node->parent_slot + 1;
5242 		enode = node->piv_parent;
5243 		if (mte_to_node(enode) == node)
5244 			goto free_leaf;
5245 
5246 		type = mte_node_type(enode);
5247 		slots = ma_slots(mte_to_node(enode), type);
5248 		if ((offset < mt_slots[type]) &&
5249 		    rcu_dereference_protected(slots[offset],
5250 					      lock_is_held(&rcu_callback_map)))
5251 			slots = mte_dead_walk(&enode, offset);
5252 		node = mte_to_node(enode);
5253 	} while ((node != start) || (node->slot_len < offset));
5254 
5255 	slots = ma_slots(node, node->type);
5256 	mt_free_bulk(node->slot_len, slots);
5257 
5258 free_leaf:
5259 	mt_free_rcu(&node->rcu);
5260 }
5261 
5262 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5263 	struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5264 {
5265 	struct maple_node *node;
5266 	struct maple_enode *next = *enode;
5267 	void __rcu **slots = NULL;
5268 	enum maple_type type;
5269 	unsigned char next_offset = 0;
5270 
5271 	do {
5272 		*enode = next;
5273 		node = mte_to_node(*enode);
5274 		type = mte_node_type(*enode);
5275 		slots = ma_slots(node, type);
5276 		next = mt_slot_locked(mt, slots, next_offset);
5277 		if ((mte_dead_node(next)))
5278 			next = mt_slot_locked(mt, slots, ++next_offset);
5279 
5280 		mte_set_node_dead(*enode);
5281 		node->type = type;
5282 		node->piv_parent = prev;
5283 		node->parent_slot = offset;
5284 		offset = next_offset;
5285 		next_offset = 0;
5286 		prev = *enode;
5287 	} while (!mte_is_leaf(next));
5288 
5289 	return slots;
5290 }
5291 
5292 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5293 			    bool free)
5294 {
5295 	void __rcu **slots;
5296 	struct maple_node *node = mte_to_node(enode);
5297 	struct maple_enode *start;
5298 
5299 	if (mte_is_leaf(enode)) {
5300 		node->type = mte_node_type(enode);
5301 		goto free_leaf;
5302 	}
5303 
5304 	start = enode;
5305 	slots = mte_destroy_descend(&enode, mt, start, 0);
5306 	node = mte_to_node(enode); // Updated in the above call.
5307 	do {
5308 		enum maple_type type;
5309 		unsigned char offset;
5310 		struct maple_enode *parent, *tmp;
5311 
5312 		node->slot_len = mte_dead_leaves(enode, mt, slots);
5313 		if (free)
5314 			mt_free_bulk(node->slot_len, slots);
5315 		offset = node->parent_slot + 1;
5316 		enode = node->piv_parent;
5317 		if (mte_to_node(enode) == node)
5318 			goto free_leaf;
5319 
5320 		type = mte_node_type(enode);
5321 		slots = ma_slots(mte_to_node(enode), type);
5322 		if (offset >= mt_slots[type])
5323 			goto next;
5324 
5325 		tmp = mt_slot_locked(mt, slots, offset);
5326 		if (mte_node_type(tmp) && mte_to_node(tmp)) {
5327 			parent = enode;
5328 			enode = tmp;
5329 			slots = mte_destroy_descend(&enode, mt, parent, offset);
5330 		}
5331 next:
5332 		node = mte_to_node(enode);
5333 	} while (start != enode);
5334 
5335 	node = mte_to_node(enode);
5336 	node->slot_len = mte_dead_leaves(enode, mt, slots);
5337 	if (free)
5338 		mt_free_bulk(node->slot_len, slots);
5339 
5340 free_leaf:
5341 	if (free)
5342 		mt_free_rcu(&node->rcu);
5343 	else
5344 		mt_clear_meta(mt, node, node->type);
5345 }
5346 
5347 /*
5348  * mte_destroy_walk() - Free a tree or sub-tree.
5349  * @enode: the encoded maple node (maple_enode) to start
5350  * @mt: the tree to free - needed for node types.
5351  *
5352  * Must hold the write lock.
5353  */
5354 static inline void mte_destroy_walk(struct maple_enode *enode,
5355 				    struct maple_tree *mt)
5356 {
5357 	struct maple_node *node = mte_to_node(enode);
5358 
5359 	if (mt_in_rcu(mt)) {
5360 		mt_destroy_walk(enode, mt, false);
5361 		call_rcu(&node->rcu, mt_free_walk);
5362 	} else {
5363 		mt_destroy_walk(enode, mt, true);
5364 	}
5365 }
5366 
5367 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5368 {
5369 	if (!mas_is_active(wr_mas->mas)) {
5370 		if (mas_is_start(wr_mas->mas))
5371 			return;
5372 
5373 		if (unlikely(mas_is_paused(wr_mas->mas)))
5374 			goto reset;
5375 
5376 		if (unlikely(mas_is_none(wr_mas->mas)))
5377 			goto reset;
5378 
5379 		if (unlikely(mas_is_overflow(wr_mas->mas)))
5380 			goto reset;
5381 
5382 		if (unlikely(mas_is_underflow(wr_mas->mas)))
5383 			goto reset;
5384 	}
5385 
5386 	/*
5387 	 * A less strict version of mas_is_span_wr() where we allow spanning
5388 	 * writes within this node.  This is to stop partial walks in
5389 	 * mas_prealloc() from being reset.
5390 	 */
5391 	if (wr_mas->mas->last > wr_mas->mas->max)
5392 		goto reset;
5393 
5394 	if (wr_mas->entry)
5395 		return;
5396 
5397 	if (mte_is_leaf(wr_mas->mas->node) &&
5398 	    wr_mas->mas->last == wr_mas->mas->max)
5399 		goto reset;
5400 
5401 	return;
5402 
5403 reset:
5404 	mas_reset(wr_mas->mas);
5405 }
5406 
5407 /* Interface */
5408 
5409 /**
5410  * mas_store() - Store an @entry.
5411  * @mas: The maple state.
5412  * @entry: The entry to store.
5413  *
5414  * The @mas->index and @mas->last is used to set the range for the @entry.
5415  * Note: The @mas should have pre-allocated entries to ensure there is memory to
5416  * store the entry.  Please see mas_expected_entries()/mas_destroy() for more details.
5417  *
5418  * Return: the first entry between mas->index and mas->last or %NULL.
5419  */
5420 void *mas_store(struct ma_state *mas, void *entry)
5421 {
5422 	MA_WR_STATE(wr_mas, mas, entry);
5423 
5424 	trace_ma_write(__func__, mas, 0, entry);
5425 #ifdef CONFIG_DEBUG_MAPLE_TREE
5426 	if (MAS_WARN_ON(mas, mas->index > mas->last))
5427 		pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5428 
5429 	if (mas->index > mas->last) {
5430 		mas_set_err(mas, -EINVAL);
5431 		return NULL;
5432 	}
5433 
5434 #endif
5435 
5436 	/*
5437 	 * Storing is the same operation as insert with the added caveat that it
5438 	 * can overwrite entries.  Although this seems simple enough, one may
5439 	 * want to examine what happens if a single store operation was to
5440 	 * overwrite multiple entries within a self-balancing B-Tree.
5441 	 */
5442 	mas_wr_store_setup(&wr_mas);
5443 	mas_wr_store_entry(&wr_mas);
5444 	return wr_mas.content;
5445 }
5446 EXPORT_SYMBOL_GPL(mas_store);
5447 
5448 /**
5449  * mas_store_gfp() - Store a value into the tree.
5450  * @mas: The maple state
5451  * @entry: The entry to store
5452  * @gfp: The GFP_FLAGS to use for allocations if necessary.
5453  *
5454  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5455  * be allocated.
5456  */
5457 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5458 {
5459 	MA_WR_STATE(wr_mas, mas, entry);
5460 
5461 	mas_wr_store_setup(&wr_mas);
5462 	trace_ma_write(__func__, mas, 0, entry);
5463 retry:
5464 	mas_wr_store_entry(&wr_mas);
5465 	if (unlikely(mas_nomem(mas, gfp)))
5466 		goto retry;
5467 
5468 	if (unlikely(mas_is_err(mas)))
5469 		return xa_err(mas->node);
5470 
5471 	return 0;
5472 }
5473 EXPORT_SYMBOL_GPL(mas_store_gfp);
5474 
5475 /**
5476  * mas_store_prealloc() - Store a value into the tree using memory
5477  * preallocated in the maple state.
5478  * @mas: The maple state
5479  * @entry: The entry to store.
5480  */
5481 void mas_store_prealloc(struct ma_state *mas, void *entry)
5482 {
5483 	MA_WR_STATE(wr_mas, mas, entry);
5484 
5485 	mas_wr_store_setup(&wr_mas);
5486 	trace_ma_write(__func__, mas, 0, entry);
5487 	mas_wr_store_entry(&wr_mas);
5488 	MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5489 	mas_destroy(mas);
5490 }
5491 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5492 
5493 /**
5494  * mas_preallocate() - Preallocate enough nodes for a store operation
5495  * @mas: The maple state
5496  * @entry: The entry that will be stored
5497  * @gfp: The GFP_FLAGS to use for allocations.
5498  *
5499  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5500  */
5501 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5502 {
5503 	MA_WR_STATE(wr_mas, mas, entry);
5504 	unsigned char node_size;
5505 	int request = 1;
5506 	int ret;
5507 
5508 
5509 	if (unlikely(!mas->index && mas->last == ULONG_MAX))
5510 		goto ask_now;
5511 
5512 	mas_wr_store_setup(&wr_mas);
5513 	wr_mas.content = mas_start(mas);
5514 	/* Root expand */
5515 	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5516 		goto ask_now;
5517 
5518 	if (unlikely(!mas_wr_walk(&wr_mas))) {
5519 		/* Spanning store, use worst case for now */
5520 		request = 1 + mas_mt_height(mas) * 3;
5521 		goto ask_now;
5522 	}
5523 
5524 	/* At this point, we are at the leaf node that needs to be altered. */
5525 	/* Exact fit, no nodes needed. */
5526 	if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
5527 		return 0;
5528 
5529 	mas_wr_end_piv(&wr_mas);
5530 	node_size = mas_wr_new_end(&wr_mas);
5531 
5532 	/* Slot store, does not require additional nodes */
5533 	if (node_size == mas->end) {
5534 		/* reuse node */
5535 		if (!mt_in_rcu(mas->tree))
5536 			return 0;
5537 		/* shifting boundary */
5538 		if (wr_mas.offset_end - mas->offset == 1)
5539 			return 0;
5540 	}
5541 
5542 	if (node_size >= mt_slots[wr_mas.type]) {
5543 		/* Split, worst case for now. */
5544 		request = 1 + mas_mt_height(mas) * 2;
5545 		goto ask_now;
5546 	}
5547 
5548 	/* New root needs a single node */
5549 	if (unlikely(mte_is_root(mas->node)))
5550 		goto ask_now;
5551 
5552 	/* Potential spanning rebalance collapsing a node, use worst-case */
5553 	if (node_size  - 1 <= mt_min_slots[wr_mas.type])
5554 		request = mas_mt_height(mas) * 2 - 1;
5555 
5556 	/* node store, slot store needs one node */
5557 ask_now:
5558 	mas_node_count_gfp(mas, request, gfp);
5559 	mas->mas_flags |= MA_STATE_PREALLOC;
5560 	if (likely(!mas_is_err(mas)))
5561 		return 0;
5562 
5563 	mas_set_alloc_req(mas, 0);
5564 	ret = xa_err(mas->node);
5565 	mas_reset(mas);
5566 	mas_destroy(mas);
5567 	mas_reset(mas);
5568 	return ret;
5569 }
5570 EXPORT_SYMBOL_GPL(mas_preallocate);
5571 
5572 /*
5573  * mas_destroy() - destroy a maple state.
5574  * @mas: The maple state
5575  *
5576  * Upon completion, check the left-most node and rebalance against the node to
5577  * the right if necessary.  Frees any allocated nodes associated with this maple
5578  * state.
5579  */
5580 void mas_destroy(struct ma_state *mas)
5581 {
5582 	struct maple_alloc *node;
5583 	unsigned long total;
5584 
5585 	/*
5586 	 * When using mas_for_each() to insert an expected number of elements,
5587 	 * it is possible that the number inserted is less than the expected
5588 	 * number.  To fix an invalid final node, a check is performed here to
5589 	 * rebalance the previous node with the final node.
5590 	 */
5591 	if (mas->mas_flags & MA_STATE_REBALANCE) {
5592 		unsigned char end;
5593 
5594 		mas_start(mas);
5595 		mtree_range_walk(mas);
5596 		end = mas->end + 1;
5597 		if (end < mt_min_slot_count(mas->node) - 1)
5598 			mas_destroy_rebalance(mas, end);
5599 
5600 		mas->mas_flags &= ~MA_STATE_REBALANCE;
5601 	}
5602 	mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5603 
5604 	total = mas_allocated(mas);
5605 	while (total) {
5606 		node = mas->alloc;
5607 		mas->alloc = node->slot[0];
5608 		if (node->node_count > 1) {
5609 			size_t count = node->node_count - 1;
5610 
5611 			mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5612 			total -= count;
5613 		}
5614 		mt_free_one(ma_mnode_ptr(node));
5615 		total--;
5616 	}
5617 
5618 	mas->alloc = NULL;
5619 }
5620 EXPORT_SYMBOL_GPL(mas_destroy);
5621 
5622 /*
5623  * mas_expected_entries() - Set the expected number of entries that will be inserted.
5624  * @mas: The maple state
5625  * @nr_entries: The number of expected entries.
5626  *
5627  * This will attempt to pre-allocate enough nodes to store the expected number
5628  * of entries.  The allocations will occur using the bulk allocator interface
5629  * for speed.  Please call mas_destroy() on the @mas after inserting the entries
5630  * to ensure any unused nodes are freed.
5631  *
5632  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5633  */
5634 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5635 {
5636 	int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5637 	struct maple_enode *enode = mas->node;
5638 	int nr_nodes;
5639 	int ret;
5640 
5641 	/*
5642 	 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5643 	 * forking a process and duplicating the VMAs from one tree to a new
5644 	 * tree.  When such a situation arises, it is known that the new tree is
5645 	 * not going to be used until the entire tree is populated.  For
5646 	 * performance reasons, it is best to use a bulk load with RCU disabled.
5647 	 * This allows for optimistic splitting that favours the left and reuse
5648 	 * of nodes during the operation.
5649 	 */
5650 
5651 	/* Optimize splitting for bulk insert in-order */
5652 	mas->mas_flags |= MA_STATE_BULK;
5653 
5654 	/*
5655 	 * Avoid overflow, assume a gap between each entry and a trailing null.
5656 	 * If this is wrong, it just means allocation can happen during
5657 	 * insertion of entries.
5658 	 */
5659 	nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5660 	if (!mt_is_alloc(mas->tree))
5661 		nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5662 
5663 	/* Leaves; reduce slots to keep space for expansion */
5664 	nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5665 	/* Internal nodes */
5666 	nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5667 	/* Add working room for split (2 nodes) + new parents */
5668 	mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
5669 
5670 	/* Detect if allocations run out */
5671 	mas->mas_flags |= MA_STATE_PREALLOC;
5672 
5673 	if (!mas_is_err(mas))
5674 		return 0;
5675 
5676 	ret = xa_err(mas->node);
5677 	mas->node = enode;
5678 	mas_destroy(mas);
5679 	return ret;
5680 
5681 }
5682 EXPORT_SYMBOL_GPL(mas_expected_entries);
5683 
5684 static bool mas_next_setup(struct ma_state *mas, unsigned long max,
5685 		void **entry)
5686 {
5687 	bool was_none = mas_is_none(mas);
5688 
5689 	if (unlikely(mas->last >= max)) {
5690 		mas->status = ma_overflow;
5691 		return true;
5692 	}
5693 
5694 	switch (mas->status) {
5695 	case ma_active:
5696 		return false;
5697 	case ma_none:
5698 		fallthrough;
5699 	case ma_pause:
5700 		mas->status = ma_start;
5701 		fallthrough;
5702 	case ma_start:
5703 		mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5704 		break;
5705 	case ma_overflow:
5706 		/* Overflowed before, but the max changed */
5707 		mas->status = ma_active;
5708 		break;
5709 	case ma_underflow:
5710 		/* The user expects the mas to be one before where it is */
5711 		mas->status = ma_active;
5712 		*entry = mas_walk(mas);
5713 		if (*entry)
5714 			return true;
5715 		break;
5716 	case ma_root:
5717 		break;
5718 	case ma_error:
5719 		return true;
5720 	}
5721 
5722 	if (likely(mas_is_active(mas))) /* Fast path */
5723 		return false;
5724 
5725 	if (mas_is_ptr(mas)) {
5726 		*entry = NULL;
5727 		if (was_none && mas->index == 0) {
5728 			mas->index = mas->last = 0;
5729 			return true;
5730 		}
5731 		mas->index = 1;
5732 		mas->last = ULONG_MAX;
5733 		mas->status = ma_none;
5734 		return true;
5735 	}
5736 
5737 	if (mas_is_none(mas))
5738 		return true;
5739 
5740 	return false;
5741 }
5742 
5743 /**
5744  * mas_next() - Get the next entry.
5745  * @mas: The maple state
5746  * @max: The maximum index to check.
5747  *
5748  * Returns the next entry after @mas->index.
5749  * Must hold rcu_read_lock or the write lock.
5750  * Can return the zero entry.
5751  *
5752  * Return: The next entry or %NULL
5753  */
5754 void *mas_next(struct ma_state *mas, unsigned long max)
5755 {
5756 	void *entry = NULL;
5757 
5758 	if (mas_next_setup(mas, max, &entry))
5759 		return entry;
5760 
5761 	/* Retries on dead nodes handled by mas_next_slot */
5762 	return mas_next_slot(mas, max, false);
5763 }
5764 EXPORT_SYMBOL_GPL(mas_next);
5765 
5766 /**
5767  * mas_next_range() - Advance the maple state to the next range
5768  * @mas: The maple state
5769  * @max: The maximum index to check.
5770  *
5771  * Sets @mas->index and @mas->last to the range.
5772  * Must hold rcu_read_lock or the write lock.
5773  * Can return the zero entry.
5774  *
5775  * Return: The next entry or %NULL
5776  */
5777 void *mas_next_range(struct ma_state *mas, unsigned long max)
5778 {
5779 	void *entry = NULL;
5780 
5781 	if (mas_next_setup(mas, max, &entry))
5782 		return entry;
5783 
5784 	/* Retries on dead nodes handled by mas_next_slot */
5785 	return mas_next_slot(mas, max, true);
5786 }
5787 EXPORT_SYMBOL_GPL(mas_next_range);
5788 
5789 /**
5790  * mt_next() - get the next value in the maple tree
5791  * @mt: The maple tree
5792  * @index: The start index
5793  * @max: The maximum index to check
5794  *
5795  * Takes RCU read lock internally to protect the search, which does not
5796  * protect the returned pointer after dropping RCU read lock.
5797  * See also: Documentation/core-api/maple_tree.rst
5798  *
5799  * Return: The entry higher than @index or %NULL if nothing is found.
5800  */
5801 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5802 {
5803 	void *entry = NULL;
5804 	MA_STATE(mas, mt, index, index);
5805 
5806 	rcu_read_lock();
5807 	entry = mas_next(&mas, max);
5808 	rcu_read_unlock();
5809 	return entry;
5810 }
5811 EXPORT_SYMBOL_GPL(mt_next);
5812 
5813 static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
5814 {
5815 	if (unlikely(mas->index <= min)) {
5816 		mas->status = ma_underflow;
5817 		return true;
5818 	}
5819 
5820 	switch (mas->status) {
5821 	case ma_active:
5822 		return false;
5823 	case ma_start:
5824 		break;
5825 	case ma_none:
5826 		fallthrough;
5827 	case ma_pause:
5828 		mas->status = ma_start;
5829 		break;
5830 	case ma_underflow:
5831 		/* underflowed before but the min changed */
5832 		mas->status = ma_active;
5833 		break;
5834 	case ma_overflow:
5835 		/* User expects mas to be one after where it is */
5836 		mas->status = ma_active;
5837 		*entry = mas_walk(mas);
5838 		if (*entry)
5839 			return true;
5840 		break;
5841 	case ma_root:
5842 		break;
5843 	case ma_error:
5844 		return true;
5845 	}
5846 
5847 	if (mas_is_start(mas))
5848 		mas_walk(mas);
5849 
5850 	if (unlikely(mas_is_ptr(mas))) {
5851 		if (!mas->index) {
5852 			mas->status = ma_none;
5853 			return true;
5854 		}
5855 		mas->index = mas->last = 0;
5856 		*entry = mas_root(mas);
5857 		return true;
5858 	}
5859 
5860 	if (mas_is_none(mas)) {
5861 		if (mas->index) {
5862 			/* Walked to out-of-range pointer? */
5863 			mas->index = mas->last = 0;
5864 			mas->status = ma_root;
5865 			*entry = mas_root(mas);
5866 			return true;
5867 		}
5868 		return true;
5869 	}
5870 
5871 	return false;
5872 }
5873 
5874 /**
5875  * mas_prev() - Get the previous entry
5876  * @mas: The maple state
5877  * @min: The minimum value to check.
5878  *
5879  * Must hold rcu_read_lock or the write lock.
5880  * Will reset mas to ma_start if the status is ma_none.  Will stop on not
5881  * searchable nodes.
5882  *
5883  * Return: the previous value or %NULL.
5884  */
5885 void *mas_prev(struct ma_state *mas, unsigned long min)
5886 {
5887 	void *entry = NULL;
5888 
5889 	if (mas_prev_setup(mas, min, &entry))
5890 		return entry;
5891 
5892 	return mas_prev_slot(mas, min, false);
5893 }
5894 EXPORT_SYMBOL_GPL(mas_prev);
5895 
5896 /**
5897  * mas_prev_range() - Advance to the previous range
5898  * @mas: The maple state
5899  * @min: The minimum value to check.
5900  *
5901  * Sets @mas->index and @mas->last to the range.
5902  * Must hold rcu_read_lock or the write lock.
5903  * Will reset mas to ma_start if the node is ma_none.  Will stop on not
5904  * searchable nodes.
5905  *
5906  * Return: the previous value or %NULL.
5907  */
5908 void *mas_prev_range(struct ma_state *mas, unsigned long min)
5909 {
5910 	void *entry = NULL;
5911 
5912 	if (mas_prev_setup(mas, min, &entry))
5913 		return entry;
5914 
5915 	return mas_prev_slot(mas, min, true);
5916 }
5917 EXPORT_SYMBOL_GPL(mas_prev_range);
5918 
5919 /**
5920  * mt_prev() - get the previous value in the maple tree
5921  * @mt: The maple tree
5922  * @index: The start index
5923  * @min: The minimum index to check
5924  *
5925  * Takes RCU read lock internally to protect the search, which does not
5926  * protect the returned pointer after dropping RCU read lock.
5927  * See also: Documentation/core-api/maple_tree.rst
5928  *
5929  * Return: The entry before @index or %NULL if nothing is found.
5930  */
5931 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5932 {
5933 	void *entry = NULL;
5934 	MA_STATE(mas, mt, index, index);
5935 
5936 	rcu_read_lock();
5937 	entry = mas_prev(&mas, min);
5938 	rcu_read_unlock();
5939 	return entry;
5940 }
5941 EXPORT_SYMBOL_GPL(mt_prev);
5942 
5943 /**
5944  * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5945  * @mas: The maple state to pause
5946  *
5947  * Some users need to pause a walk and drop the lock they're holding in
5948  * order to yield to a higher priority thread or carry out an operation
5949  * on an entry.  Those users should call this function before they drop
5950  * the lock.  It resets the @mas to be suitable for the next iteration
5951  * of the loop after the user has reacquired the lock.  If most entries
5952  * found during a walk require you to call mas_pause(), the mt_for_each()
5953  * iterator may be more appropriate.
5954  *
5955  */
5956 void mas_pause(struct ma_state *mas)
5957 {
5958 	mas->status = ma_pause;
5959 	mas->node = NULL;
5960 }
5961 EXPORT_SYMBOL_GPL(mas_pause);
5962 
5963 /**
5964  * mas_find_setup() - Internal function to set up mas_find*().
5965  * @mas: The maple state
5966  * @max: The maximum index
5967  * @entry: Pointer to the entry
5968  *
5969  * Returns: True if entry is the answer, false otherwise.
5970  */
5971 static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
5972 {
5973 	switch (mas->status) {
5974 	case ma_active:
5975 		if (mas->last < max)
5976 			return false;
5977 		return true;
5978 	case ma_start:
5979 		break;
5980 	case ma_pause:
5981 		if (unlikely(mas->last >= max))
5982 			return true;
5983 
5984 		mas->index = ++mas->last;
5985 		mas->status = ma_start;
5986 		break;
5987 	case ma_none:
5988 		if (unlikely(mas->last >= max))
5989 			return true;
5990 
5991 		mas->index = mas->last;
5992 		mas->status = ma_start;
5993 		break;
5994 	case ma_underflow:
5995 		/* mas is pointing at entry before unable to go lower */
5996 		if (unlikely(mas->index >= max)) {
5997 			mas->status = ma_overflow;
5998 			return true;
5999 		}
6000 
6001 		mas->status = ma_active;
6002 		*entry = mas_walk(mas);
6003 		if (*entry)
6004 			return true;
6005 		break;
6006 	case ma_overflow:
6007 		if (unlikely(mas->last >= max))
6008 			return true;
6009 
6010 		mas->status = ma_active;
6011 		*entry = mas_walk(mas);
6012 		if (*entry)
6013 			return true;
6014 		break;
6015 	case ma_root:
6016 		break;
6017 	case ma_error:
6018 		return true;
6019 	}
6020 
6021 	if (mas_is_start(mas)) {
6022 		/* First run or continue */
6023 		if (mas->index > max)
6024 			return true;
6025 
6026 		*entry = mas_walk(mas);
6027 		if (*entry)
6028 			return true;
6029 
6030 	}
6031 
6032 	if (unlikely(mas_is_ptr(mas)))
6033 		goto ptr_out_of_range;
6034 
6035 	if (unlikely(mas_is_none(mas)))
6036 		return true;
6037 
6038 	if (mas->index == max)
6039 		return true;
6040 
6041 	return false;
6042 
6043 ptr_out_of_range:
6044 	mas->status = ma_none;
6045 	mas->index = 1;
6046 	mas->last = ULONG_MAX;
6047 	return true;
6048 }
6049 
6050 /**
6051  * mas_find() - On the first call, find the entry at or after mas->index up to
6052  * %max.  Otherwise, find the entry after mas->index.
6053  * @mas: The maple state
6054  * @max: The maximum value to check.
6055  *
6056  * Must hold rcu_read_lock or the write lock.
6057  * If an entry exists, last and index are updated accordingly.
6058  * May set @mas->status to ma_overflow.
6059  *
6060  * Return: The entry or %NULL.
6061  */
6062 void *mas_find(struct ma_state *mas, unsigned long max)
6063 {
6064 	void *entry = NULL;
6065 
6066 	if (mas_find_setup(mas, max, &entry))
6067 		return entry;
6068 
6069 	/* Retries on dead nodes handled by mas_next_slot */
6070 	entry = mas_next_slot(mas, max, false);
6071 	/* Ignore overflow */
6072 	mas->status = ma_active;
6073 	return entry;
6074 }
6075 EXPORT_SYMBOL_GPL(mas_find);
6076 
6077 /**
6078  * mas_find_range() - On the first call, find the entry at or after
6079  * mas->index up to %max.  Otherwise, advance to the next slot mas->index.
6080  * @mas: The maple state
6081  * @max: The maximum value to check.
6082  *
6083  * Must hold rcu_read_lock or the write lock.
6084  * If an entry exists, last and index are updated accordingly.
6085  * May set @mas->status to ma_overflow.
6086  *
6087  * Return: The entry or %NULL.
6088  */
6089 void *mas_find_range(struct ma_state *mas, unsigned long max)
6090 {
6091 	void *entry = NULL;
6092 
6093 	if (mas_find_setup(mas, max, &entry))
6094 		return entry;
6095 
6096 	/* Retries on dead nodes handled by mas_next_slot */
6097 	return mas_next_slot(mas, max, true);
6098 }
6099 EXPORT_SYMBOL_GPL(mas_find_range);
6100 
6101 /**
6102  * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6103  * @mas: The maple state
6104  * @min: The minimum index
6105  * @entry: Pointer to the entry
6106  *
6107  * Returns: True if entry is the answer, false otherwise.
6108  */
6109 static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
6110 		void **entry)
6111 {
6112 
6113 	switch (mas->status) {
6114 	case ma_active:
6115 		goto active;
6116 	case ma_start:
6117 		break;
6118 	case ma_pause:
6119 		if (unlikely(mas->index <= min)) {
6120 			mas->status = ma_underflow;
6121 			return true;
6122 		}
6123 		mas->last = --mas->index;
6124 		mas->status = ma_start;
6125 		break;
6126 	case ma_none:
6127 		if (mas->index <= min)
6128 			goto none;
6129 
6130 		mas->last = mas->index;
6131 		mas->status = ma_start;
6132 		break;
6133 	case ma_overflow: /* user expects the mas to be one after where it is */
6134 		if (unlikely(mas->index <= min)) {
6135 			mas->status = ma_underflow;
6136 			return true;
6137 		}
6138 
6139 		mas->status = ma_active;
6140 		break;
6141 	case ma_underflow: /* user expects the mas to be one before where it is */
6142 		if (unlikely(mas->index <= min))
6143 			return true;
6144 
6145 		mas->status = ma_active;
6146 		break;
6147 	case ma_root:
6148 		break;
6149 	case ma_error:
6150 		return true;
6151 	}
6152 
6153 	if (mas_is_start(mas)) {
6154 		/* First run or continue */
6155 		if (mas->index < min)
6156 			return true;
6157 
6158 		*entry = mas_walk(mas);
6159 		if (*entry)
6160 			return true;
6161 	}
6162 
6163 	if (unlikely(mas_is_ptr(mas)))
6164 		goto none;
6165 
6166 	if (unlikely(mas_is_none(mas))) {
6167 		/*
6168 		 * Walked to the location, and there was nothing so the previous
6169 		 * location is 0.
6170 		 */
6171 		mas->last = mas->index = 0;
6172 		mas->status = ma_root;
6173 		*entry = mas_root(mas);
6174 		return true;
6175 	}
6176 
6177 active:
6178 	if (mas->index < min)
6179 		return true;
6180 
6181 	return false;
6182 
6183 none:
6184 	mas->status = ma_none;
6185 	return true;
6186 }
6187 
6188 /**
6189  * mas_find_rev: On the first call, find the first non-null entry at or below
6190  * mas->index down to %min.  Otherwise find the first non-null entry below
6191  * mas->index down to %min.
6192  * @mas: The maple state
6193  * @min: The minimum value to check.
6194  *
6195  * Must hold rcu_read_lock or the write lock.
6196  * If an entry exists, last and index are updated accordingly.
6197  * May set @mas->status to ma_underflow.
6198  *
6199  * Return: The entry or %NULL.
6200  */
6201 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6202 {
6203 	void *entry = NULL;
6204 
6205 	if (mas_find_rev_setup(mas, min, &entry))
6206 		return entry;
6207 
6208 	/* Retries on dead nodes handled by mas_prev_slot */
6209 	return mas_prev_slot(mas, min, false);
6210 
6211 }
6212 EXPORT_SYMBOL_GPL(mas_find_rev);
6213 
6214 /**
6215  * mas_find_range_rev: On the first call, find the first non-null entry at or
6216  * below mas->index down to %min.  Otherwise advance to the previous slot after
6217  * mas->index down to %min.
6218  * @mas: The maple state
6219  * @min: The minimum value to check.
6220  *
6221  * Must hold rcu_read_lock or the write lock.
6222  * If an entry exists, last and index are updated accordingly.
6223  * May set @mas->status to ma_underflow.
6224  *
6225  * Return: The entry or %NULL.
6226  */
6227 void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
6228 {
6229 	void *entry = NULL;
6230 
6231 	if (mas_find_rev_setup(mas, min, &entry))
6232 		return entry;
6233 
6234 	/* Retries on dead nodes handled by mas_prev_slot */
6235 	return mas_prev_slot(mas, min, true);
6236 }
6237 EXPORT_SYMBOL_GPL(mas_find_range_rev);
6238 
6239 /**
6240  * mas_erase() - Find the range in which index resides and erase the entire
6241  * range.
6242  * @mas: The maple state
6243  *
6244  * Must hold the write lock.
6245  * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6246  * erases that range.
6247  *
6248  * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6249  */
6250 void *mas_erase(struct ma_state *mas)
6251 {
6252 	void *entry;
6253 	MA_WR_STATE(wr_mas, mas, NULL);
6254 
6255 	if (!mas_is_active(mas) || !mas_is_start(mas))
6256 		mas->status = ma_start;
6257 
6258 	/* Retry unnecessary when holding the write lock. */
6259 	entry = mas_state_walk(mas);
6260 	if (!entry)
6261 		return NULL;
6262 
6263 write_retry:
6264 	/* Must reset to ensure spanning writes of last slot are detected */
6265 	mas_reset(mas);
6266 	mas_wr_store_setup(&wr_mas);
6267 	mas_wr_store_entry(&wr_mas);
6268 	if (mas_nomem(mas, GFP_KERNEL))
6269 		goto write_retry;
6270 
6271 	return entry;
6272 }
6273 EXPORT_SYMBOL_GPL(mas_erase);
6274 
6275 /**
6276  * mas_nomem() - Check if there was an error allocating and do the allocation
6277  * if necessary If there are allocations, then free them.
6278  * @mas: The maple state
6279  * @gfp: The GFP_FLAGS to use for allocations
6280  * Return: true on allocation, false otherwise.
6281  */
6282 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6283 	__must_hold(mas->tree->ma_lock)
6284 {
6285 	if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6286 		mas_destroy(mas);
6287 		return false;
6288 	}
6289 
6290 	if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6291 		mtree_unlock(mas->tree);
6292 		mas_alloc_nodes(mas, gfp);
6293 		mtree_lock(mas->tree);
6294 	} else {
6295 		mas_alloc_nodes(mas, gfp);
6296 	}
6297 
6298 	if (!mas_allocated(mas))
6299 		return false;
6300 
6301 	mas->status = ma_start;
6302 	return true;
6303 }
6304 
6305 void __init maple_tree_init(void)
6306 {
6307 	maple_node_cache = kmem_cache_create("maple_node",
6308 			sizeof(struct maple_node), sizeof(struct maple_node),
6309 			SLAB_PANIC, NULL);
6310 }
6311 
6312 /**
6313  * mtree_load() - Load a value stored in a maple tree
6314  * @mt: The maple tree
6315  * @index: The index to load
6316  *
6317  * Return: the entry or %NULL
6318  */
6319 void *mtree_load(struct maple_tree *mt, unsigned long index)
6320 {
6321 	MA_STATE(mas, mt, index, index);
6322 	void *entry;
6323 
6324 	trace_ma_read(__func__, &mas);
6325 	rcu_read_lock();
6326 retry:
6327 	entry = mas_start(&mas);
6328 	if (unlikely(mas_is_none(&mas)))
6329 		goto unlock;
6330 
6331 	if (unlikely(mas_is_ptr(&mas))) {
6332 		if (index)
6333 			entry = NULL;
6334 
6335 		goto unlock;
6336 	}
6337 
6338 	entry = mtree_lookup_walk(&mas);
6339 	if (!entry && unlikely(mas_is_start(&mas)))
6340 		goto retry;
6341 unlock:
6342 	rcu_read_unlock();
6343 	if (xa_is_zero(entry))
6344 		return NULL;
6345 
6346 	return entry;
6347 }
6348 EXPORT_SYMBOL(mtree_load);
6349 
6350 /**
6351  * mtree_store_range() - Store an entry at a given range.
6352  * @mt: The maple tree
6353  * @index: The start of the range
6354  * @last: The end of the range
6355  * @entry: The entry to store
6356  * @gfp: The GFP_FLAGS to use for allocations
6357  *
6358  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6359  * be allocated.
6360  */
6361 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6362 		unsigned long last, void *entry, gfp_t gfp)
6363 {
6364 	MA_STATE(mas, mt, index, last);
6365 	MA_WR_STATE(wr_mas, &mas, entry);
6366 
6367 	trace_ma_write(__func__, &mas, 0, entry);
6368 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6369 		return -EINVAL;
6370 
6371 	if (index > last)
6372 		return -EINVAL;
6373 
6374 	mtree_lock(mt);
6375 retry:
6376 	mas_wr_store_entry(&wr_mas);
6377 	if (mas_nomem(&mas, gfp))
6378 		goto retry;
6379 
6380 	mtree_unlock(mt);
6381 	if (mas_is_err(&mas))
6382 		return xa_err(mas.node);
6383 
6384 	return 0;
6385 }
6386 EXPORT_SYMBOL(mtree_store_range);
6387 
6388 /**
6389  * mtree_store() - Store an entry at a given index.
6390  * @mt: The maple tree
6391  * @index: The index to store the value
6392  * @entry: The entry to store
6393  * @gfp: The GFP_FLAGS to use for allocations
6394  *
6395  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6396  * be allocated.
6397  */
6398 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6399 		 gfp_t gfp)
6400 {
6401 	return mtree_store_range(mt, index, index, entry, gfp);
6402 }
6403 EXPORT_SYMBOL(mtree_store);
6404 
6405 /**
6406  * mtree_insert_range() - Insert an entry at a given range if there is no value.
6407  * @mt: The maple tree
6408  * @first: The start of the range
6409  * @last: The end of the range
6410  * @entry: The entry to store
6411  * @gfp: The GFP_FLAGS to use for allocations.
6412  *
6413  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6414  * request, -ENOMEM if memory could not be allocated.
6415  */
6416 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6417 		unsigned long last, void *entry, gfp_t gfp)
6418 {
6419 	MA_STATE(ms, mt, first, last);
6420 
6421 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6422 		return -EINVAL;
6423 
6424 	if (first > last)
6425 		return -EINVAL;
6426 
6427 	mtree_lock(mt);
6428 retry:
6429 	mas_insert(&ms, entry);
6430 	if (mas_nomem(&ms, gfp))
6431 		goto retry;
6432 
6433 	mtree_unlock(mt);
6434 	if (mas_is_err(&ms))
6435 		return xa_err(ms.node);
6436 
6437 	return 0;
6438 }
6439 EXPORT_SYMBOL(mtree_insert_range);
6440 
6441 /**
6442  * mtree_insert() - Insert an entry at a given index if there is no value.
6443  * @mt: The maple tree
6444  * @index : The index to store the value
6445  * @entry: The entry to store
6446  * @gfp: The GFP_FLAGS to use for allocations.
6447  *
6448  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6449  * request, -ENOMEM if memory could not be allocated.
6450  */
6451 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6452 		 gfp_t gfp)
6453 {
6454 	return mtree_insert_range(mt, index, index, entry, gfp);
6455 }
6456 EXPORT_SYMBOL(mtree_insert);
6457 
6458 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6459 		void *entry, unsigned long size, unsigned long min,
6460 		unsigned long max, gfp_t gfp)
6461 {
6462 	int ret = 0;
6463 
6464 	MA_STATE(mas, mt, 0, 0);
6465 	if (!mt_is_alloc(mt))
6466 		return -EINVAL;
6467 
6468 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6469 		return -EINVAL;
6470 
6471 	mtree_lock(mt);
6472 retry:
6473 	ret = mas_empty_area(&mas, min, max, size);
6474 	if (ret)
6475 		goto unlock;
6476 
6477 	mas_insert(&mas, entry);
6478 	/*
6479 	 * mas_nomem() may release the lock, causing the allocated area
6480 	 * to be unavailable, so try to allocate a free area again.
6481 	 */
6482 	if (mas_nomem(&mas, gfp))
6483 		goto retry;
6484 
6485 	if (mas_is_err(&mas))
6486 		ret = xa_err(mas.node);
6487 	else
6488 		*startp = mas.index;
6489 
6490 unlock:
6491 	mtree_unlock(mt);
6492 	return ret;
6493 }
6494 EXPORT_SYMBOL(mtree_alloc_range);
6495 
6496 /**
6497  * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree.
6498  * @mt: The maple tree.
6499  * @startp: Pointer to ID.
6500  * @range_lo: Lower bound of range to search.
6501  * @range_hi: Upper bound of range to search.
6502  * @entry: The entry to store.
6503  * @next: Pointer to next ID to allocate.
6504  * @gfp: The GFP_FLAGS to use for allocations.
6505  *
6506  * Finds an empty entry in @mt after @next, stores the new index into
6507  * the @id pointer, stores the entry at that index, then updates @next.
6508  *
6509  * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag.
6510  *
6511  * Context: Any context.  Takes and releases the mt.lock.  May sleep if
6512  * the @gfp flags permit.
6513  *
6514  * Return: 0 if the allocation succeeded without wrapping, 1 if the
6515  * allocation succeeded after wrapping, -ENOMEM if memory could not be
6516  * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no
6517  * free entries.
6518  */
6519 int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
6520 		void *entry, unsigned long range_lo, unsigned long range_hi,
6521 		unsigned long *next, gfp_t gfp)
6522 {
6523 	int ret;
6524 
6525 	MA_STATE(mas, mt, 0, 0);
6526 
6527 	if (!mt_is_alloc(mt))
6528 		return -EINVAL;
6529 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6530 		return -EINVAL;
6531 	mtree_lock(mt);
6532 	ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi,
6533 			       next, gfp);
6534 	mtree_unlock(mt);
6535 	return ret;
6536 }
6537 EXPORT_SYMBOL(mtree_alloc_cyclic);
6538 
6539 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6540 		void *entry, unsigned long size, unsigned long min,
6541 		unsigned long max, gfp_t gfp)
6542 {
6543 	int ret = 0;
6544 
6545 	MA_STATE(mas, mt, 0, 0);
6546 	if (!mt_is_alloc(mt))
6547 		return -EINVAL;
6548 
6549 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6550 		return -EINVAL;
6551 
6552 	mtree_lock(mt);
6553 retry:
6554 	ret = mas_empty_area_rev(&mas, min, max, size);
6555 	if (ret)
6556 		goto unlock;
6557 
6558 	mas_insert(&mas, entry);
6559 	/*
6560 	 * mas_nomem() may release the lock, causing the allocated area
6561 	 * to be unavailable, so try to allocate a free area again.
6562 	 */
6563 	if (mas_nomem(&mas, gfp))
6564 		goto retry;
6565 
6566 	if (mas_is_err(&mas))
6567 		ret = xa_err(mas.node);
6568 	else
6569 		*startp = mas.index;
6570 
6571 unlock:
6572 	mtree_unlock(mt);
6573 	return ret;
6574 }
6575 EXPORT_SYMBOL(mtree_alloc_rrange);
6576 
6577 /**
6578  * mtree_erase() - Find an index and erase the entire range.
6579  * @mt: The maple tree
6580  * @index: The index to erase
6581  *
6582  * Erasing is the same as a walk to an entry then a store of a NULL to that
6583  * ENTIRE range.  In fact, it is implemented as such using the advanced API.
6584  *
6585  * Return: The entry stored at the @index or %NULL
6586  */
6587 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6588 {
6589 	void *entry = NULL;
6590 
6591 	MA_STATE(mas, mt, index, index);
6592 	trace_ma_op(__func__, &mas);
6593 
6594 	mtree_lock(mt);
6595 	entry = mas_erase(&mas);
6596 	mtree_unlock(mt);
6597 
6598 	return entry;
6599 }
6600 EXPORT_SYMBOL(mtree_erase);
6601 
6602 /*
6603  * mas_dup_free() - Free an incomplete duplication of a tree.
6604  * @mas: The maple state of a incomplete tree.
6605  *
6606  * The parameter @mas->node passed in indicates that the allocation failed on
6607  * this node. This function frees all nodes starting from @mas->node in the
6608  * reverse order of mas_dup_build(). There is no need to hold the source tree
6609  * lock at this time.
6610  */
6611 static void mas_dup_free(struct ma_state *mas)
6612 {
6613 	struct maple_node *node;
6614 	enum maple_type type;
6615 	void __rcu **slots;
6616 	unsigned char count, i;
6617 
6618 	/* Maybe the first node allocation failed. */
6619 	if (mas_is_none(mas))
6620 		return;
6621 
6622 	while (!mte_is_root(mas->node)) {
6623 		mas_ascend(mas);
6624 		if (mas->offset) {
6625 			mas->offset--;
6626 			do {
6627 				mas_descend(mas);
6628 				mas->offset = mas_data_end(mas);
6629 			} while (!mte_is_leaf(mas->node));
6630 
6631 			mas_ascend(mas);
6632 		}
6633 
6634 		node = mte_to_node(mas->node);
6635 		type = mte_node_type(mas->node);
6636 		slots = ma_slots(node, type);
6637 		count = mas_data_end(mas) + 1;
6638 		for (i = 0; i < count; i++)
6639 			((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
6640 		mt_free_bulk(count, slots);
6641 	}
6642 
6643 	node = mte_to_node(mas->node);
6644 	mt_free_one(node);
6645 }
6646 
6647 /*
6648  * mas_copy_node() - Copy a maple node and replace the parent.
6649  * @mas: The maple state of source tree.
6650  * @new_mas: The maple state of new tree.
6651  * @parent: The parent of the new node.
6652  *
6653  * Copy @mas->node to @new_mas->node, set @parent to be the parent of
6654  * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
6655  */
6656 static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
6657 		struct maple_pnode *parent)
6658 {
6659 	struct maple_node *node = mte_to_node(mas->node);
6660 	struct maple_node *new_node = mte_to_node(new_mas->node);
6661 	unsigned long val;
6662 
6663 	/* Copy the node completely. */
6664 	memcpy(new_node, node, sizeof(struct maple_node));
6665 	/* Update the parent node pointer. */
6666 	val = (unsigned long)node->parent & MAPLE_NODE_MASK;
6667 	new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
6668 }
6669 
6670 /*
6671  * mas_dup_alloc() - Allocate child nodes for a maple node.
6672  * @mas: The maple state of source tree.
6673  * @new_mas: The maple state of new tree.
6674  * @gfp: The GFP_FLAGS to use for allocations.
6675  *
6676  * This function allocates child nodes for @new_mas->node during the duplication
6677  * process. If memory allocation fails, @mas is set to -ENOMEM.
6678  */
6679 static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
6680 		gfp_t gfp)
6681 {
6682 	struct maple_node *node = mte_to_node(mas->node);
6683 	struct maple_node *new_node = mte_to_node(new_mas->node);
6684 	enum maple_type type;
6685 	unsigned char request, count, i;
6686 	void __rcu **slots;
6687 	void __rcu **new_slots;
6688 	unsigned long val;
6689 
6690 	/* Allocate memory for child nodes. */
6691 	type = mte_node_type(mas->node);
6692 	new_slots = ma_slots(new_node, type);
6693 	request = mas_data_end(mas) + 1;
6694 	count = mt_alloc_bulk(gfp, request, (void **)new_slots);
6695 	if (unlikely(count < request)) {
6696 		memset(new_slots, 0, request * sizeof(void *));
6697 		mas_set_err(mas, -ENOMEM);
6698 		return;
6699 	}
6700 
6701 	/* Restore node type information in slots. */
6702 	slots = ma_slots(node, type);
6703 	for (i = 0; i < count; i++) {
6704 		val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
6705 		val &= MAPLE_NODE_MASK;
6706 		((unsigned long *)new_slots)[i] |= val;
6707 	}
6708 }
6709 
6710 /*
6711  * mas_dup_build() - Build a new maple tree from a source tree
6712  * @mas: The maple state of source tree, need to be in MAS_START state.
6713  * @new_mas: The maple state of new tree, need to be in MAS_START state.
6714  * @gfp: The GFP_FLAGS to use for allocations.
6715  *
6716  * This function builds a new tree in DFS preorder. If the memory allocation
6717  * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
6718  * last node. mas_dup_free() will free the incomplete duplication of a tree.
6719  *
6720  * Note that the attributes of the two trees need to be exactly the same, and the
6721  * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
6722  */
6723 static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
6724 		gfp_t gfp)
6725 {
6726 	struct maple_node *node;
6727 	struct maple_pnode *parent = NULL;
6728 	struct maple_enode *root;
6729 	enum maple_type type;
6730 
6731 	if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
6732 	    unlikely(!mtree_empty(new_mas->tree))) {
6733 		mas_set_err(mas, -EINVAL);
6734 		return;
6735 	}
6736 
6737 	root = mas_start(mas);
6738 	if (mas_is_ptr(mas) || mas_is_none(mas))
6739 		goto set_new_tree;
6740 
6741 	node = mt_alloc_one(gfp);
6742 	if (!node) {
6743 		new_mas->status = ma_none;
6744 		mas_set_err(mas, -ENOMEM);
6745 		return;
6746 	}
6747 
6748 	type = mte_node_type(mas->node);
6749 	root = mt_mk_node(node, type);
6750 	new_mas->node = root;
6751 	new_mas->min = 0;
6752 	new_mas->max = ULONG_MAX;
6753 	root = mte_mk_root(root);
6754 	while (1) {
6755 		mas_copy_node(mas, new_mas, parent);
6756 		if (!mte_is_leaf(mas->node)) {
6757 			/* Only allocate child nodes for non-leaf nodes. */
6758 			mas_dup_alloc(mas, new_mas, gfp);
6759 			if (unlikely(mas_is_err(mas)))
6760 				return;
6761 		} else {
6762 			/*
6763 			 * This is the last leaf node and duplication is
6764 			 * completed.
6765 			 */
6766 			if (mas->max == ULONG_MAX)
6767 				goto done;
6768 
6769 			/* This is not the last leaf node and needs to go up. */
6770 			do {
6771 				mas_ascend(mas);
6772 				mas_ascend(new_mas);
6773 			} while (mas->offset == mas_data_end(mas));
6774 
6775 			/* Move to the next subtree. */
6776 			mas->offset++;
6777 			new_mas->offset++;
6778 		}
6779 
6780 		mas_descend(mas);
6781 		parent = ma_parent_ptr(mte_to_node(new_mas->node));
6782 		mas_descend(new_mas);
6783 		mas->offset = 0;
6784 		new_mas->offset = 0;
6785 	}
6786 done:
6787 	/* Specially handle the parent of the root node. */
6788 	mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
6789 set_new_tree:
6790 	/* Make them the same height */
6791 	new_mas->tree->ma_flags = mas->tree->ma_flags;
6792 	rcu_assign_pointer(new_mas->tree->ma_root, root);
6793 }
6794 
6795 /**
6796  * __mt_dup(): Duplicate an entire maple tree
6797  * @mt: The source maple tree
6798  * @new: The new maple tree
6799  * @gfp: The GFP_FLAGS to use for allocations
6800  *
6801  * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6802  * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6803  * new child nodes in non-leaf nodes. The new node is exactly the same as the
6804  * source node except for all the addresses stored in it. It will be faster than
6805  * traversing all elements in the source tree and inserting them one by one into
6806  * the new tree.
6807  * The user needs to ensure that the attributes of the source tree and the new
6808  * tree are the same, and the new tree needs to be an empty tree, otherwise
6809  * -EINVAL will be returned.
6810  * Note that the user needs to manually lock the source tree and the new tree.
6811  *
6812  * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6813  * the attributes of the two trees are different or the new tree is not an empty
6814  * tree.
6815  */
6816 int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6817 {
6818 	int ret = 0;
6819 	MA_STATE(mas, mt, 0, 0);
6820 	MA_STATE(new_mas, new, 0, 0);
6821 
6822 	mas_dup_build(&mas, &new_mas, gfp);
6823 	if (unlikely(mas_is_err(&mas))) {
6824 		ret = xa_err(mas.node);
6825 		if (ret == -ENOMEM)
6826 			mas_dup_free(&new_mas);
6827 	}
6828 
6829 	return ret;
6830 }
6831 EXPORT_SYMBOL(__mt_dup);
6832 
6833 /**
6834  * mtree_dup(): Duplicate an entire maple tree
6835  * @mt: The source maple tree
6836  * @new: The new maple tree
6837  * @gfp: The GFP_FLAGS to use for allocations
6838  *
6839  * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6840  * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6841  * new child nodes in non-leaf nodes. The new node is exactly the same as the
6842  * source node except for all the addresses stored in it. It will be faster than
6843  * traversing all elements in the source tree and inserting them one by one into
6844  * the new tree.
6845  * The user needs to ensure that the attributes of the source tree and the new
6846  * tree are the same, and the new tree needs to be an empty tree, otherwise
6847  * -EINVAL will be returned.
6848  *
6849  * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6850  * the attributes of the two trees are different or the new tree is not an empty
6851  * tree.
6852  */
6853 int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6854 {
6855 	int ret = 0;
6856 	MA_STATE(mas, mt, 0, 0);
6857 	MA_STATE(new_mas, new, 0, 0);
6858 
6859 	mas_lock(&new_mas);
6860 	mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
6861 	mas_dup_build(&mas, &new_mas, gfp);
6862 	mas_unlock(&mas);
6863 	if (unlikely(mas_is_err(&mas))) {
6864 		ret = xa_err(mas.node);
6865 		if (ret == -ENOMEM)
6866 			mas_dup_free(&new_mas);
6867 	}
6868 
6869 	mas_unlock(&new_mas);
6870 	return ret;
6871 }
6872 EXPORT_SYMBOL(mtree_dup);
6873 
6874 /**
6875  * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6876  * @mt: The maple tree
6877  *
6878  * Note: Does not handle locking.
6879  */
6880 void __mt_destroy(struct maple_tree *mt)
6881 {
6882 	void *root = mt_root_locked(mt);
6883 
6884 	rcu_assign_pointer(mt->ma_root, NULL);
6885 	if (xa_is_node(root))
6886 		mte_destroy_walk(root, mt);
6887 
6888 	mt->ma_flags = mt_attr(mt);
6889 }
6890 EXPORT_SYMBOL_GPL(__mt_destroy);
6891 
6892 /**
6893  * mtree_destroy() - Destroy a maple tree
6894  * @mt: The maple tree
6895  *
6896  * Frees all resources used by the tree.  Handles locking.
6897  */
6898 void mtree_destroy(struct maple_tree *mt)
6899 {
6900 	mtree_lock(mt);
6901 	__mt_destroy(mt);
6902 	mtree_unlock(mt);
6903 }
6904 EXPORT_SYMBOL(mtree_destroy);
6905 
6906 /**
6907  * mt_find() - Search from the start up until an entry is found.
6908  * @mt: The maple tree
6909  * @index: Pointer which contains the start location of the search
6910  * @max: The maximum value of the search range
6911  *
6912  * Takes RCU read lock internally to protect the search, which does not
6913  * protect the returned pointer after dropping RCU read lock.
6914  * See also: Documentation/core-api/maple_tree.rst
6915  *
6916  * In case that an entry is found @index is updated to point to the next
6917  * possible entry independent whether the found entry is occupying a
6918  * single index or a range if indices.
6919  *
6920  * Return: The entry at or after the @index or %NULL
6921  */
6922 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6923 {
6924 	MA_STATE(mas, mt, *index, *index);
6925 	void *entry;
6926 #ifdef CONFIG_DEBUG_MAPLE_TREE
6927 	unsigned long copy = *index;
6928 #endif
6929 
6930 	trace_ma_read(__func__, &mas);
6931 
6932 	if ((*index) > max)
6933 		return NULL;
6934 
6935 	rcu_read_lock();
6936 retry:
6937 	entry = mas_state_walk(&mas);
6938 	if (mas_is_start(&mas))
6939 		goto retry;
6940 
6941 	if (unlikely(xa_is_zero(entry)))
6942 		entry = NULL;
6943 
6944 	if (entry)
6945 		goto unlock;
6946 
6947 	while (mas_is_active(&mas) && (mas.last < max)) {
6948 		entry = mas_next_entry(&mas, max);
6949 		if (likely(entry && !xa_is_zero(entry)))
6950 			break;
6951 	}
6952 
6953 	if (unlikely(xa_is_zero(entry)))
6954 		entry = NULL;
6955 unlock:
6956 	rcu_read_unlock();
6957 	if (likely(entry)) {
6958 		*index = mas.last + 1;
6959 #ifdef CONFIG_DEBUG_MAPLE_TREE
6960 		if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6961 			pr_err("index not increased! %lx <= %lx\n",
6962 			       *index, copy);
6963 #endif
6964 	}
6965 
6966 	return entry;
6967 }
6968 EXPORT_SYMBOL(mt_find);
6969 
6970 /**
6971  * mt_find_after() - Search from the start up until an entry is found.
6972  * @mt: The maple tree
6973  * @index: Pointer which contains the start location of the search
6974  * @max: The maximum value to check
6975  *
6976  * Same as mt_find() except that it checks @index for 0 before
6977  * searching. If @index == 0, the search is aborted. This covers a wrap
6978  * around of @index to 0 in an iterator loop.
6979  *
6980  * Return: The entry at or after the @index or %NULL
6981  */
6982 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6983 		    unsigned long max)
6984 {
6985 	if (!(*index))
6986 		return NULL;
6987 
6988 	return mt_find(mt, index, max);
6989 }
6990 EXPORT_SYMBOL(mt_find_after);
6991 
6992 #ifdef CONFIG_DEBUG_MAPLE_TREE
6993 atomic_t maple_tree_tests_run;
6994 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6995 atomic_t maple_tree_tests_passed;
6996 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6997 
6998 #ifndef __KERNEL__
6999 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
7000 void mt_set_non_kernel(unsigned int val)
7001 {
7002 	kmem_cache_set_non_kernel(maple_node_cache, val);
7003 }
7004 
7005 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
7006 unsigned long mt_get_alloc_size(void)
7007 {
7008 	return kmem_cache_get_alloc(maple_node_cache);
7009 }
7010 
7011 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
7012 void mt_zero_nr_tallocated(void)
7013 {
7014 	kmem_cache_zero_nr_tallocated(maple_node_cache);
7015 }
7016 
7017 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
7018 unsigned int mt_nr_tallocated(void)
7019 {
7020 	return kmem_cache_nr_tallocated(maple_node_cache);
7021 }
7022 
7023 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
7024 unsigned int mt_nr_allocated(void)
7025 {
7026 	return kmem_cache_nr_allocated(maple_node_cache);
7027 }
7028 
7029 void mt_cache_shrink(void)
7030 {
7031 }
7032 #else
7033 /*
7034  * mt_cache_shrink() - For testing, don't use this.
7035  *
7036  * Certain testcases can trigger an OOM when combined with other memory
7037  * debugging configuration options.  This function is used to reduce the
7038  * possibility of an out of memory even due to kmem_cache objects remaining
7039  * around for longer than usual.
7040  */
7041 void mt_cache_shrink(void)
7042 {
7043 	kmem_cache_shrink(maple_node_cache);
7044 
7045 }
7046 EXPORT_SYMBOL_GPL(mt_cache_shrink);
7047 
7048 #endif /* not defined __KERNEL__ */
7049 /*
7050  * mas_get_slot() - Get the entry in the maple state node stored at @offset.
7051  * @mas: The maple state
7052  * @offset: The offset into the slot array to fetch.
7053  *
7054  * Return: The entry stored at @offset.
7055  */
7056 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
7057 		unsigned char offset)
7058 {
7059 	return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
7060 			offset);
7061 }
7062 
7063 /* Depth first search, post-order */
7064 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
7065 {
7066 
7067 	struct maple_enode *p, *mn = mas->node;
7068 	unsigned long p_min, p_max;
7069 
7070 	mas_next_node(mas, mas_mn(mas), max);
7071 	if (!mas_is_overflow(mas))
7072 		return;
7073 
7074 	if (mte_is_root(mn))
7075 		return;
7076 
7077 	mas->node = mn;
7078 	mas_ascend(mas);
7079 	do {
7080 		p = mas->node;
7081 		p_min = mas->min;
7082 		p_max = mas->max;
7083 		mas_prev_node(mas, 0);
7084 	} while (!mas_is_underflow(mas));
7085 
7086 	mas->node = p;
7087 	mas->max = p_max;
7088 	mas->min = p_min;
7089 }
7090 
7091 /* Tree validations */
7092 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7093 		unsigned long min, unsigned long max, unsigned int depth,
7094 		enum mt_dump_format format);
7095 static void mt_dump_range(unsigned long min, unsigned long max,
7096 			  unsigned int depth, enum mt_dump_format format)
7097 {
7098 	static const char spaces[] = "                                ";
7099 
7100 	switch(format) {
7101 	case mt_dump_hex:
7102 		if (min == max)
7103 			pr_info("%.*s%lx: ", depth * 2, spaces, min);
7104 		else
7105 			pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
7106 		break;
7107 	case mt_dump_dec:
7108 		if (min == max)
7109 			pr_info("%.*s%lu: ", depth * 2, spaces, min);
7110 		else
7111 			pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
7112 	}
7113 }
7114 
7115 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
7116 			  unsigned int depth, enum mt_dump_format format)
7117 {
7118 	mt_dump_range(min, max, depth, format);
7119 
7120 	if (xa_is_value(entry))
7121 		pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
7122 				xa_to_value(entry), entry);
7123 	else if (xa_is_zero(entry))
7124 		pr_cont("zero (%ld)\n", xa_to_internal(entry));
7125 	else if (mt_is_reserved(entry))
7126 		pr_cont("UNKNOWN ENTRY (%p)\n", entry);
7127 	else
7128 		pr_cont("%p\n", entry);
7129 }
7130 
7131 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
7132 		unsigned long min, unsigned long max, unsigned int depth,
7133 		enum mt_dump_format format)
7134 {
7135 	struct maple_range_64 *node = &mte_to_node(entry)->mr64;
7136 	bool leaf = mte_is_leaf(entry);
7137 	unsigned long first = min;
7138 	int i;
7139 
7140 	pr_cont(" contents: ");
7141 	for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
7142 		switch(format) {
7143 		case mt_dump_hex:
7144 			pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7145 			break;
7146 		case mt_dump_dec:
7147 			pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7148 		}
7149 	}
7150 	pr_cont("%p\n", node->slot[i]);
7151 	for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
7152 		unsigned long last = max;
7153 
7154 		if (i < (MAPLE_RANGE64_SLOTS - 1))
7155 			last = node->pivot[i];
7156 		else if (!node->slot[i] && max != mt_node_max(entry))
7157 			break;
7158 		if (last == 0 && i > 0)
7159 			break;
7160 		if (leaf)
7161 			mt_dump_entry(mt_slot(mt, node->slot, i),
7162 					first, last, depth + 1, format);
7163 		else if (node->slot[i])
7164 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
7165 					first, last, depth + 1, format);
7166 
7167 		if (last == max)
7168 			break;
7169 		if (last > max) {
7170 			switch(format) {
7171 			case mt_dump_hex:
7172 				pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
7173 					node, last, max, i);
7174 				break;
7175 			case mt_dump_dec:
7176 				pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7177 					node, last, max, i);
7178 			}
7179 		}
7180 		first = last + 1;
7181 	}
7182 }
7183 
7184 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
7185 	unsigned long min, unsigned long max, unsigned int depth,
7186 	enum mt_dump_format format)
7187 {
7188 	struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
7189 	bool leaf = mte_is_leaf(entry);
7190 	unsigned long first = min;
7191 	int i;
7192 
7193 	pr_cont(" contents: ");
7194 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7195 		switch (format) {
7196 		case mt_dump_hex:
7197 			pr_cont("%lx ", node->gap[i]);
7198 			break;
7199 		case mt_dump_dec:
7200 			pr_cont("%lu ", node->gap[i]);
7201 		}
7202 	}
7203 	pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
7204 	for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
7205 		switch (format) {
7206 		case mt_dump_hex:
7207 			pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7208 			break;
7209 		case mt_dump_dec:
7210 			pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7211 		}
7212 	}
7213 	pr_cont("%p\n", node->slot[i]);
7214 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7215 		unsigned long last = max;
7216 
7217 		if (i < (MAPLE_ARANGE64_SLOTS - 1))
7218 			last = node->pivot[i];
7219 		else if (!node->slot[i])
7220 			break;
7221 		if (last == 0 && i > 0)
7222 			break;
7223 		if (leaf)
7224 			mt_dump_entry(mt_slot(mt, node->slot, i),
7225 					first, last, depth + 1, format);
7226 		else if (node->slot[i])
7227 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
7228 					first, last, depth + 1, format);
7229 
7230 		if (last == max)
7231 			break;
7232 		if (last > max) {
7233 			pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7234 					node, last, max, i);
7235 			break;
7236 		}
7237 		first = last + 1;
7238 	}
7239 }
7240 
7241 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7242 		unsigned long min, unsigned long max, unsigned int depth,
7243 		enum mt_dump_format format)
7244 {
7245 	struct maple_node *node = mte_to_node(entry);
7246 	unsigned int type = mte_node_type(entry);
7247 	unsigned int i;
7248 
7249 	mt_dump_range(min, max, depth, format);
7250 
7251 	pr_cont("node %p depth %d type %d parent %p", node, depth, type,
7252 			node ? node->parent : NULL);
7253 	switch (type) {
7254 	case maple_dense:
7255 		pr_cont("\n");
7256 		for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
7257 			if (min + i > max)
7258 				pr_cont("OUT OF RANGE: ");
7259 			mt_dump_entry(mt_slot(mt, node->slot, i),
7260 					min + i, min + i, depth, format);
7261 		}
7262 		break;
7263 	case maple_leaf_64:
7264 	case maple_range_64:
7265 		mt_dump_range64(mt, entry, min, max, depth, format);
7266 		break;
7267 	case maple_arange_64:
7268 		mt_dump_arange64(mt, entry, min, max, depth, format);
7269 		break;
7270 
7271 	default:
7272 		pr_cont(" UNKNOWN TYPE\n");
7273 	}
7274 }
7275 
7276 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
7277 {
7278 	void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
7279 
7280 	pr_info("maple_tree(%p) flags %X, height %u root %p\n",
7281 		 mt, mt->ma_flags, mt_height(mt), entry);
7282 	if (!xa_is_node(entry))
7283 		mt_dump_entry(entry, 0, 0, 0, format);
7284 	else if (entry)
7285 		mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
7286 }
7287 EXPORT_SYMBOL_GPL(mt_dump);
7288 
7289 /*
7290  * Calculate the maximum gap in a node and check if that's what is reported in
7291  * the parent (unless root).
7292  */
7293 static void mas_validate_gaps(struct ma_state *mas)
7294 {
7295 	struct maple_enode *mte = mas->node;
7296 	struct maple_node *p_mn, *node = mte_to_node(mte);
7297 	enum maple_type mt = mte_node_type(mas->node);
7298 	unsigned long gap = 0, max_gap = 0;
7299 	unsigned long p_end, p_start = mas->min;
7300 	unsigned char p_slot, offset;
7301 	unsigned long *gaps = NULL;
7302 	unsigned long *pivots = ma_pivots(node, mt);
7303 	unsigned int i;
7304 
7305 	if (ma_is_dense(mt)) {
7306 		for (i = 0; i < mt_slot_count(mte); i++) {
7307 			if (mas_get_slot(mas, i)) {
7308 				if (gap > max_gap)
7309 					max_gap = gap;
7310 				gap = 0;
7311 				continue;
7312 			}
7313 			gap++;
7314 		}
7315 		goto counted;
7316 	}
7317 
7318 	gaps = ma_gaps(node, mt);
7319 	for (i = 0; i < mt_slot_count(mte); i++) {
7320 		p_end = mas_safe_pivot(mas, pivots, i, mt);
7321 
7322 		if (!gaps) {
7323 			if (!mas_get_slot(mas, i))
7324 				gap = p_end - p_start + 1;
7325 		} else {
7326 			void *entry = mas_get_slot(mas, i);
7327 
7328 			gap = gaps[i];
7329 			MT_BUG_ON(mas->tree, !entry);
7330 
7331 			if (gap > p_end - p_start + 1) {
7332 				pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7333 				       mas_mn(mas), i, gap, p_end, p_start,
7334 				       p_end - p_start + 1);
7335 				MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
7336 			}
7337 		}
7338 
7339 		if (gap > max_gap)
7340 			max_gap = gap;
7341 
7342 		p_start = p_end + 1;
7343 		if (p_end >= mas->max)
7344 			break;
7345 	}
7346 
7347 counted:
7348 	if (mt == maple_arange_64) {
7349 		MT_BUG_ON(mas->tree, !gaps);
7350 		offset = ma_meta_gap(node);
7351 		if (offset > i) {
7352 			pr_err("gap offset %p[%u] is invalid\n", node, offset);
7353 			MT_BUG_ON(mas->tree, 1);
7354 		}
7355 
7356 		if (gaps[offset] != max_gap) {
7357 			pr_err("gap %p[%u] is not the largest gap %lu\n",
7358 			       node, offset, max_gap);
7359 			MT_BUG_ON(mas->tree, 1);
7360 		}
7361 
7362 		for (i++ ; i < mt_slot_count(mte); i++) {
7363 			if (gaps[i] != 0) {
7364 				pr_err("gap %p[%u] beyond node limit != 0\n",
7365 				       node, i);
7366 				MT_BUG_ON(mas->tree, 1);
7367 			}
7368 		}
7369 	}
7370 
7371 	if (mte_is_root(mte))
7372 		return;
7373 
7374 	p_slot = mte_parent_slot(mas->node);
7375 	p_mn = mte_parent(mte);
7376 	MT_BUG_ON(mas->tree, max_gap > mas->max);
7377 	if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7378 		pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7379 		mt_dump(mas->tree, mt_dump_hex);
7380 		MT_BUG_ON(mas->tree, 1);
7381 	}
7382 }
7383 
7384 static void mas_validate_parent_slot(struct ma_state *mas)
7385 {
7386 	struct maple_node *parent;
7387 	struct maple_enode *node;
7388 	enum maple_type p_type;
7389 	unsigned char p_slot;
7390 	void __rcu **slots;
7391 	int i;
7392 
7393 	if (mte_is_root(mas->node))
7394 		return;
7395 
7396 	p_slot = mte_parent_slot(mas->node);
7397 	p_type = mas_parent_type(mas, mas->node);
7398 	parent = mte_parent(mas->node);
7399 	slots = ma_slots(parent, p_type);
7400 	MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7401 
7402 	/* Check prev/next parent slot for duplicate node entry */
7403 
7404 	for (i = 0; i < mt_slots[p_type]; i++) {
7405 		node = mas_slot(mas, slots, i);
7406 		if (i == p_slot) {
7407 			if (node != mas->node)
7408 				pr_err("parent %p[%u] does not have %p\n",
7409 					parent, i, mas_mn(mas));
7410 			MT_BUG_ON(mas->tree, node != mas->node);
7411 		} else if (node == mas->node) {
7412 			pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7413 			       mas_mn(mas), parent, i, p_slot);
7414 			MT_BUG_ON(mas->tree, node == mas->node);
7415 		}
7416 	}
7417 }
7418 
7419 static void mas_validate_child_slot(struct ma_state *mas)
7420 {
7421 	enum maple_type type = mte_node_type(mas->node);
7422 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7423 	unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7424 	struct maple_enode *child;
7425 	unsigned char i;
7426 
7427 	if (mte_is_leaf(mas->node))
7428 		return;
7429 
7430 	for (i = 0; i < mt_slots[type]; i++) {
7431 		child = mas_slot(mas, slots, i);
7432 
7433 		if (!child) {
7434 			pr_err("Non-leaf node lacks child at %p[%u]\n",
7435 			       mas_mn(mas), i);
7436 			MT_BUG_ON(mas->tree, 1);
7437 		}
7438 
7439 		if (mte_parent_slot(child) != i) {
7440 			pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7441 			       mas_mn(mas), i, mte_to_node(child),
7442 			       mte_parent_slot(child));
7443 			MT_BUG_ON(mas->tree, 1);
7444 		}
7445 
7446 		if (mte_parent(child) != mte_to_node(mas->node)) {
7447 			pr_err("child %p has parent %p not %p\n",
7448 			       mte_to_node(child), mte_parent(child),
7449 			       mte_to_node(mas->node));
7450 			MT_BUG_ON(mas->tree, 1);
7451 		}
7452 
7453 		if (i < mt_pivots[type] && pivots[i] == mas->max)
7454 			break;
7455 	}
7456 }
7457 
7458 /*
7459  * Validate all pivots are within mas->min and mas->max, check metadata ends
7460  * where the maximum ends and ensure there is no slots or pivots set outside of
7461  * the end of the data.
7462  */
7463 static void mas_validate_limits(struct ma_state *mas)
7464 {
7465 	int i;
7466 	unsigned long prev_piv = 0;
7467 	enum maple_type type = mte_node_type(mas->node);
7468 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7469 	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7470 
7471 	for (i = 0; i < mt_slots[type]; i++) {
7472 		unsigned long piv;
7473 
7474 		piv = mas_safe_pivot(mas, pivots, i, type);
7475 
7476 		if (!piv && (i != 0)) {
7477 			pr_err("Missing node limit pivot at %p[%u]",
7478 			       mas_mn(mas), i);
7479 			MAS_WARN_ON(mas, 1);
7480 		}
7481 
7482 		if (prev_piv > piv) {
7483 			pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7484 				mas_mn(mas), i, piv, prev_piv);
7485 			MAS_WARN_ON(mas, piv < prev_piv);
7486 		}
7487 
7488 		if (piv < mas->min) {
7489 			pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7490 				piv, mas->min);
7491 			MAS_WARN_ON(mas, piv < mas->min);
7492 		}
7493 		if (piv > mas->max) {
7494 			pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7495 				piv, mas->max);
7496 			MAS_WARN_ON(mas, piv > mas->max);
7497 		}
7498 		prev_piv = piv;
7499 		if (piv == mas->max)
7500 			break;
7501 	}
7502 
7503 	if (mas_data_end(mas) != i) {
7504 		pr_err("node%p: data_end %u != the last slot offset %u\n",
7505 		       mas_mn(mas), mas_data_end(mas), i);
7506 		MT_BUG_ON(mas->tree, 1);
7507 	}
7508 
7509 	for (i += 1; i < mt_slots[type]; i++) {
7510 		void *entry = mas_slot(mas, slots, i);
7511 
7512 		if (entry && (i != mt_slots[type] - 1)) {
7513 			pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7514 			       i, entry);
7515 			MT_BUG_ON(mas->tree, entry != NULL);
7516 		}
7517 
7518 		if (i < mt_pivots[type]) {
7519 			unsigned long piv = pivots[i];
7520 
7521 			if (!piv)
7522 				continue;
7523 
7524 			pr_err("%p[%u] should not have piv %lu\n",
7525 			       mas_mn(mas), i, piv);
7526 			MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7527 		}
7528 	}
7529 }
7530 
7531 static void mt_validate_nulls(struct maple_tree *mt)
7532 {
7533 	void *entry, *last = (void *)1;
7534 	unsigned char offset = 0;
7535 	void __rcu **slots;
7536 	MA_STATE(mas, mt, 0, 0);
7537 
7538 	mas_start(&mas);
7539 	if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
7540 		return;
7541 
7542 	while (!mte_is_leaf(mas.node))
7543 		mas_descend(&mas);
7544 
7545 	slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7546 	do {
7547 		entry = mas_slot(&mas, slots, offset);
7548 		if (!last && !entry) {
7549 			pr_err("Sequential nulls end at %p[%u]\n",
7550 				mas_mn(&mas), offset);
7551 		}
7552 		MT_BUG_ON(mt, !last && !entry);
7553 		last = entry;
7554 		if (offset == mas_data_end(&mas)) {
7555 			mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7556 			if (mas_is_overflow(&mas))
7557 				return;
7558 			offset = 0;
7559 			slots = ma_slots(mte_to_node(mas.node),
7560 					 mte_node_type(mas.node));
7561 		} else {
7562 			offset++;
7563 		}
7564 
7565 	} while (!mas_is_overflow(&mas));
7566 }
7567 
7568 /*
7569  * validate a maple tree by checking:
7570  * 1. The limits (pivots are within mas->min to mas->max)
7571  * 2. The gap is correctly set in the parents
7572  */
7573 void mt_validate(struct maple_tree *mt)
7574 {
7575 	unsigned char end;
7576 
7577 	MA_STATE(mas, mt, 0, 0);
7578 	rcu_read_lock();
7579 	mas_start(&mas);
7580 	if (!mas_is_active(&mas))
7581 		goto done;
7582 
7583 	while (!mte_is_leaf(mas.node))
7584 		mas_descend(&mas);
7585 
7586 	while (!mas_is_overflow(&mas)) {
7587 		MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7588 		end = mas_data_end(&mas);
7589 		if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
7590 				(mas.max != ULONG_MAX))) {
7591 			pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
7592 		}
7593 
7594 		mas_validate_parent_slot(&mas);
7595 		mas_validate_limits(&mas);
7596 		mas_validate_child_slot(&mas);
7597 		if (mt_is_alloc(mt))
7598 			mas_validate_gaps(&mas);
7599 		mas_dfs_postorder(&mas, ULONG_MAX);
7600 	}
7601 	mt_validate_nulls(mt);
7602 done:
7603 	rcu_read_unlock();
7604 
7605 }
7606 EXPORT_SYMBOL_GPL(mt_validate);
7607 
7608 void mas_dump(const struct ma_state *mas)
7609 {
7610 	pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7611 	switch (mas->status) {
7612 	case ma_active:
7613 		pr_err("(ma_active)");
7614 		break;
7615 	case ma_none:
7616 		pr_err("(ma_none)");
7617 		break;
7618 	case ma_root:
7619 		pr_err("(ma_root)");
7620 		break;
7621 	case ma_start:
7622 		pr_err("(ma_start) ");
7623 		break;
7624 	case ma_pause:
7625 		pr_err("(ma_pause) ");
7626 		break;
7627 	case ma_overflow:
7628 		pr_err("(ma_overflow) ");
7629 		break;
7630 	case ma_underflow:
7631 		pr_err("(ma_underflow) ");
7632 		break;
7633 	case ma_error:
7634 		pr_err("(ma_error) ");
7635 		break;
7636 	}
7637 
7638 	pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
7639 	       mas->index, mas->last);
7640 	pr_err("     min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7641 	       mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7642 	if (mas->index > mas->last)
7643 		pr_err("Check index & last\n");
7644 }
7645 EXPORT_SYMBOL_GPL(mas_dump);
7646 
7647 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7648 {
7649 	pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7650 	       wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7651 	pr_err("        type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7652 	       wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
7653 	       wr_mas->end_piv);
7654 }
7655 EXPORT_SYMBOL_GPL(mas_wr_dump);
7656 
7657 #endif /* CONFIG_DEBUG_MAPLE_TREE */
7658