xref: /linux/include/linux/maple_tree.h (revision 7db0787000d44d52710e5cdd67113458fa28f3cd)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 #ifndef _LINUX_MAPLE_TREE_H
3 #define _LINUX_MAPLE_TREE_H
4 /*
5  * Maple Tree - An RCU-safe adaptive tree for storing ranges
6  * Copyright (c) 2018-2022 Oracle
7  * Authors:     Liam R. Howlett <Liam.Howlett@Oracle.com>
8  *              Matthew Wilcox <willy@infradead.org>
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/rcupdate.h>
13 #include <linux/spinlock.h>
14 /* #define CONFIG_MAPLE_RCU_DISABLED */
15 
16 /*
17  * Allocated nodes are mutable until they have been inserted into the tree,
18  * at which time they cannot change their type until they have been removed
19  * from the tree and an RCU grace period has passed.
20  *
21  * Removed nodes have their ->parent set to point to themselves.  RCU readers
22  * check ->parent before relying on the value that they loaded from the
23  * slots array.  This lets us reuse the slots array for the RCU head.
24  *
25  * Nodes in the tree point to their parent unless bit 0 is set.
26  */
27 #if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
28 /* 64bit sizes */
29 #define MAPLE_NODE_SLOTS	31	/* 256 bytes including ->parent */
30 #define MAPLE_RANGE64_SLOTS	16	/* 256 bytes */
31 #define MAPLE_ARANGE64_SLOTS	10	/* 240 bytes */
32 #define MAPLE_ALLOC_SLOTS	(MAPLE_NODE_SLOTS - 1)
33 #else
34 /* 32bit sizes */
35 #define MAPLE_NODE_SLOTS	63	/* 256 bytes including ->parent */
36 #define MAPLE_RANGE64_SLOTS	32	/* 256 bytes */
37 #define MAPLE_ARANGE64_SLOTS	21	/* 240 bytes */
38 #define MAPLE_ALLOC_SLOTS	(MAPLE_NODE_SLOTS - 2)
39 #endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
40 
41 #define MAPLE_NODE_MASK		255UL
42 
43 /*
44  * The node->parent of the root node has bit 0 set and the rest of the pointer
45  * is a pointer to the tree itself.  No more bits are available in this pointer
46  * (on m68k, the data structure may only be 2-byte aligned).
47  *
48  * Internal non-root nodes can only have maple_range_* nodes as parents.  The
49  * parent pointer is 256B aligned like all other tree nodes.  When storing a 32
50  * or 64 bit values, the offset can fit into 4 bits.  The 16 bit values need an
51  * extra bit to store the offset.  This extra bit comes from a reuse of the last
52  * bit in the node type.  This is possible by using bit 1 to indicate if bit 2
53  * is part of the type or the slot.
54  *
55  * Once the type is decided, the decision of an allocation range type or a
56  * range type is done by examining the immutable tree flag for the
57  * MT_FLAGS_ALLOC_RANGE flag.
58  *
59  *  Node types:
60  *   0b??1 = Root
61  *   0b?00 = 16 bit nodes
62  *   0b010 = 32 bit nodes
63  *   0b110 = 64 bit nodes
64  *
65  *  Slot size and location in the parent pointer:
66  *   type  : slot location
67  *   0b??1 : Root
68  *   0b?00 : 16 bit values, type in 0-1, slot in 2-6
69  *   0b010 : 32 bit values, type in 0-2, slot in 3-6
70  *   0b110 : 64 bit values, type in 0-2, slot in 3-6
71  */
72 
73 /*
74  * This metadata is used to optimize the gap updating code and in reverse
75  * searching for gaps or any other code that needs to find the end of the data.
76  */
77 struct maple_metadata {
78 	unsigned char end;	/* end of data */
79 	unsigned char gap;	/* offset of largest gap */
80 };
81 
82 /*
83  * Leaf nodes do not store pointers to nodes, they store user data.  Users may
84  * store almost any bit pattern.  As noted above, the optimisation of storing an
85  * entry at 0 in the root pointer cannot be done for data which have the bottom
86  * two bits set to '10'.  We also reserve values with the bottom two bits set to
87  * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use.  Some APIs
88  * return errnos as a negative errno shifted right by two bits and the bottom
89  * two bits set to '10', and while choosing to store these values in the array
90  * is not an error, it may lead to confusion if you're testing for an error with
91  * mas_is_err().
92  *
93  * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
94  * 3-6), bit 2 is reserved.  That leaves bits 0-1 unused for now.
95  *
96  * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
97  * indicate that the tree is specifying ranges,  Pivots may appear in the
98  * subtree with an entry attached to the value whereas keys are unique to a
99  * specific position of a B-tree.  Pivot values are inclusive of the slot with
100  * the same index.
101  */
102 
103 struct maple_range_64 {
104 	struct maple_pnode *parent;
105 	unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
106 	union {
107 		void __rcu *slot[MAPLE_RANGE64_SLOTS];
108 		struct {
109 			void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
110 			struct maple_metadata meta;
111 		};
112 	};
113 };
114 
115 /*
116  * At tree creation time, the user can specify that they're willing to trade off
117  * storing fewer entries in a tree in return for storing more information in
118  * each node.
119  *
120  * The maple tree supports recording the largest range of NULL entries available
121  * in this node, also called gaps.  This optimises the tree for allocating a
122  * range.
123  */
124 struct maple_arange_64 {
125 	struct maple_pnode *parent;
126 	unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
127 	void __rcu *slot[MAPLE_ARANGE64_SLOTS];
128 	unsigned long gap[MAPLE_ARANGE64_SLOTS];
129 	struct maple_metadata meta;
130 };
131 
132 struct maple_topiary {
133 	struct maple_pnode *parent;
134 	struct maple_enode *next; /* Overlaps the pivot */
135 };
136 
137 enum maple_type {
138 	maple_dense,
139 	maple_leaf_64,
140 	maple_range_64,
141 	maple_arange_64,
142 };
143 
144 enum store_type {
145 	wr_invalid,
146 	wr_new_root,
147 	wr_store_root,
148 	wr_exact_fit,
149 	wr_spanning_store,
150 	wr_split_store,
151 	wr_rebalance,
152 	wr_append,
153 	wr_node_store,
154 	wr_slot_store,
155 };
156 
157 /**
158  * DOC: Maple tree flags
159  *
160  * * MT_FLAGS_ALLOC_RANGE	- Track gaps in this tree
161  * * MT_FLAGS_USE_RCU		- Operate in RCU mode
162  * * MT_FLAGS_HEIGHT_OFFSET	- The position of the tree height in the flags
163  * * MT_FLAGS_HEIGHT_MASK	- The mask for the maple tree height value
164  * * MT_FLAGS_LOCK_MASK		- How the mt_lock is used
165  * * MT_FLAGS_LOCK_IRQ		- Acquired irq-safe
166  * * MT_FLAGS_LOCK_BH		- Acquired bh-safe
167  * * MT_FLAGS_LOCK_EXTERN	- mt_lock is not used
168  *
169  * MAPLE_HEIGHT_MAX	The largest height that can be stored
170  */
171 #define MT_FLAGS_ALLOC_RANGE	0x01
172 #define MT_FLAGS_USE_RCU	0x02
173 #define MT_FLAGS_HEIGHT_OFFSET	0x02
174 #define MT_FLAGS_HEIGHT_MASK	0x7C
175 #define MT_FLAGS_LOCK_MASK	0x300
176 #define MT_FLAGS_LOCK_IRQ	0x100
177 #define MT_FLAGS_LOCK_BH	0x200
178 #define MT_FLAGS_LOCK_EXTERN	0x300
179 #define MT_FLAGS_ALLOC_WRAPPED	0x0800
180 
181 #define MAPLE_HEIGHT_MAX	31
182 
183 
184 #define MAPLE_NODE_TYPE_MASK	0x0F
185 #define MAPLE_NODE_TYPE_SHIFT	0x03
186 
187 #define MAPLE_RESERVED_RANGE	4096
188 
189 #ifdef CONFIG_LOCKDEP
190 #define mt_lock_is_held(mt)                                             \
191 	(!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
192 
193 #define mt_write_lock_is_held(mt)					\
194 	(!(mt)->ma_external_lock ||					\
195 	 lock_is_held_type((mt)->ma_external_lock, 0))
196 
197 #define mt_set_external_lock(mt, lock)					\
198 	(mt)->ma_external_lock = &(lock)->dep_map
199 
200 #define mt_on_stack(mt)			(mt).ma_external_lock = NULL
201 #else
202 #define mt_lock_is_held(mt)		1
203 #define mt_write_lock_is_held(mt)	1
204 #define mt_set_external_lock(mt, lock)	do { } while (0)
205 #define mt_on_stack(mt)			do { } while (0)
206 #endif
207 
208 /*
209  * If the tree contains a single entry at index 0, it is usually stored in
210  * tree->ma_root.  To optimise for the page cache, an entry which ends in '00',
211  * '01' or '11' is stored in the root, but an entry which ends in '10' will be
212  * stored in a node.  Bits 3-6 are used to store enum maple_type.
213  *
214  * The flags are used both to store some immutable information about this tree
215  * (set at tree creation time) and dynamic information set under the spinlock.
216  *
217  * Another use of flags are to indicate global states of the tree.  This is the
218  * case with the MT_FLAGS_USE_RCU flag, which indicates the tree is currently in
219  * RCU mode.  This mode was added to allow the tree to reuse nodes instead of
220  * re-allocating and RCU freeing nodes when there is a single user.
221  */
222 struct maple_tree {
223 	union {
224 		spinlock_t		ma_lock;
225 #ifdef CONFIG_LOCKDEP
226 		struct lockdep_map	*ma_external_lock;
227 #endif
228 	};
229 	unsigned int	ma_flags;
230 	void __rcu      *ma_root;
231 };
232 
233 /**
234  * MTREE_INIT() - Initialize a maple tree
235  * @name: The maple tree name
236  * @__flags: The maple tree flags
237  *
238  */
239 #define MTREE_INIT(name, __flags) {					\
240 	.ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock),		\
241 	.ma_flags = __flags,						\
242 	.ma_root = NULL,						\
243 }
244 
245 /**
246  * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
247  * @name: The tree name
248  * @__flags: The maple tree flags
249  * @__lock: The external lock
250  */
251 #ifdef CONFIG_LOCKDEP
252 #define MTREE_INIT_EXT(name, __flags, __lock) {				\
253 	.ma_external_lock = &(__lock).dep_map,				\
254 	.ma_flags = (__flags),						\
255 	.ma_root = NULL,						\
256 }
257 #else
258 #define MTREE_INIT_EXT(name, __flags, __lock)	MTREE_INIT(name, __flags)
259 #endif
260 
261 #define DEFINE_MTREE(name)						\
262 	struct maple_tree name = MTREE_INIT(name, 0)
263 
264 #define mtree_lock(mt)		spin_lock((&(mt)->ma_lock))
265 #define mtree_lock_nested(mas, subclass) \
266 		spin_lock_nested((&(mt)->ma_lock), subclass)
267 #define mtree_unlock(mt)	spin_unlock((&(mt)->ma_lock))
268 
269 /*
270  * The Maple Tree squeezes various bits in at various points which aren't
271  * necessarily obvious.  Usually, this is done by observing that pointers are
272  * N-byte aligned and thus the bottom log_2(N) bits are available for use.  We
273  * don't use the high bits of pointers to store additional information because
274  * we don't know what bits are unused on any given architecture.
275  *
276  * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
277  * low bits for our own purposes.  Nodes are currently of 4 types:
278  * 1. Single pointer (Range is 0-0)
279  * 2. Non-leaf Allocation Range nodes
280  * 3. Non-leaf Range nodes
281  * 4. Leaf Range nodes All nodes consist of a number of node slots,
282  *    pivots, and a parent pointer.
283  */
284 
285 struct maple_node {
286 	union {
287 		struct {
288 			struct maple_pnode *parent;
289 			void __rcu *slot[MAPLE_NODE_SLOTS];
290 		};
291 		struct {
292 			void *pad;
293 			struct rcu_head rcu;
294 			struct maple_enode *piv_parent;
295 			unsigned char parent_slot;
296 			enum maple_type type;
297 			unsigned char slot_len;
298 			unsigned int ma_flags;
299 		};
300 		struct maple_range_64 mr64;
301 		struct maple_arange_64 ma64;
302 	};
303 };
304 
305 /*
306  * More complicated stores can cause two nodes to become one or three and
307  * potentially alter the height of the tree.  Either half of the tree may need
308  * to be rebalanced against the other.  The ma_topiary struct is used to track
309  * which nodes have been 'cut' from the tree so that the change can be done
310  * safely at a later date.  This is done to support RCU.
311  */
312 struct ma_topiary {
313 	struct maple_enode *head;
314 	struct maple_enode *tail;
315 	struct maple_tree *mtree;
316 };
317 
318 void *mtree_load(struct maple_tree *mt, unsigned long index);
319 
320 int mtree_insert(struct maple_tree *mt, unsigned long index,
321 		void *entry, gfp_t gfp);
322 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
323 		unsigned long last, void *entry, gfp_t gfp);
324 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
325 		void *entry, unsigned long size, unsigned long min,
326 		unsigned long max, gfp_t gfp);
327 int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
328 		void *entry, unsigned long range_lo, unsigned long range_hi,
329 		unsigned long *next, gfp_t gfp);
330 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
331 		void *entry, unsigned long size, unsigned long min,
332 		unsigned long max, gfp_t gfp);
333 
334 int mtree_store_range(struct maple_tree *mt, unsigned long first,
335 		      unsigned long last, void *entry, gfp_t gfp);
336 int mtree_store(struct maple_tree *mt, unsigned long index,
337 		void *entry, gfp_t gfp);
338 void *mtree_erase(struct maple_tree *mt, unsigned long index);
339 
340 int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
341 int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
342 
343 void mtree_destroy(struct maple_tree *mt);
344 void __mt_destroy(struct maple_tree *mt);
345 
346 /**
347  * mtree_empty() - Determine if a tree has any present entries.
348  * @mt: Maple Tree.
349  *
350  * Context: Any context.
351  * Return: %true if the tree contains only NULL pointers.
352  */
353 static inline bool mtree_empty(const struct maple_tree *mt)
354 {
355 	return mt->ma_root == NULL;
356 }
357 
358 /* Advanced API */
359 
360 /*
361  * Maple State Status
362  * ma_active means the maple state is pointing to a node and offset and can
363  * continue operating on the tree.
364  * ma_start means we have not searched the tree.
365  * ma_root means we have searched the tree and the entry we found lives in
366  * the root of the tree (ie it has index 0, length 1 and is the only entry in
367  * the tree).
368  * ma_none means we have searched the tree and there is no node in the
369  * tree for this entry.  For example, we searched for index 1 in an empty
370  * tree.  Or we have a tree which points to a full leaf node and we
371  * searched for an entry which is larger than can be contained in that
372  * leaf node.
373  * ma_pause means the data within the maple state may be stale, restart the
374  * operation
375  * ma_overflow means the search has reached the upper limit of the search
376  * ma_underflow means the search has reached the lower limit of the search
377  * ma_error means there was an error, check the node for the error number.
378  */
379 enum maple_status {
380 	ma_active,
381 	ma_start,
382 	ma_root,
383 	ma_none,
384 	ma_pause,
385 	ma_overflow,
386 	ma_underflow,
387 	ma_error,
388 };
389 
390 /*
391  * The maple state is defined in the struct ma_state and is used to keep track
392  * of information during operations, and even between operations when using the
393  * advanced API.
394  *
395  * If state->node has bit 0 set then it references a tree location which is not
396  * a node (eg the root).  If bit 1 is set, the rest of the bits are a negative
397  * errno.  Bit 2 (the 'unallocated slots' bit) is clear.  Bits 3-6 indicate the
398  * node type.
399  *
400  * state->alloc either has a request number of nodes or an allocated node.  If
401  * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
402  * and the remaining bits are the value.  If state->alloc is a node, then the
403  * node will be of type maple_alloc.  maple_alloc has MAPLE_NODE_SLOTS - 1 for
404  * storing more allocated nodes, a total number of nodes allocated, and the
405  * node_count in this node.  node_count is the number of allocated nodes in this
406  * node.  The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
407  * nodes into state->alloc->slot[0]'s node.  Nodes are taken from state->alloc
408  * by removing a node from the state->alloc node until state->alloc->node_count
409  * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
410  * to state->alloc.  Nodes are pushed onto state->alloc by putting the current
411  * state->alloc into the pushed node's slot[0].
412  *
413  * The state also contains the implied min/max of the state->node, the depth of
414  * this search, and the offset. The implied min/max are either from the parent
415  * node or are 0-oo for the root node.  The depth is incremented or decremented
416  * every time a node is walked down or up.  The offset is the slot/pivot of
417  * interest in the node - either for reading or writing.
418  *
419  * When returning a value the maple state index and last respectively contain
420  * the start and end of the range for the entry.  Ranges are inclusive in the
421  * Maple Tree.
422  *
423  * The status of the state is used to determine how the next action should treat
424  * the state.  For instance, if the status is ma_start then the next action
425  * should start at the root of the tree and walk down.  If the status is
426  * ma_pause then the node may be stale data and should be discarded.  If the
427  * status is ma_overflow, then the last action hit the upper limit.
428  *
429  */
430 struct ma_state {
431 	struct maple_tree *tree;	/* The tree we're operating in */
432 	unsigned long index;		/* The index we're operating on - range start */
433 	unsigned long last;		/* The last index we're operating on - range end */
434 	struct maple_enode *node;	/* The node containing this entry */
435 	unsigned long min;		/* The minimum index of this node - implied pivot min */
436 	unsigned long max;		/* The maximum index of this node - implied pivot max */
437 	struct slab_sheaf *sheaf;	/* Allocated nodes for this operation */
438 	struct maple_node *alloc;	/* A single allocated node for fast path writes */
439 	unsigned long node_request;	/* The number of nodes to allocate for this operation */
440 	enum maple_status status;	/* The status of the state (active, start, none, etc) */
441 	unsigned char depth;		/* depth of tree descent during write */
442 	unsigned char offset;
443 	unsigned char mas_flags;
444 	unsigned char end;		/* The end of the node */
445 	enum store_type store_type;	/* The type of store needed for this operation */
446 };
447 
448 struct ma_wr_state {
449 	struct ma_state *mas;
450 	struct maple_node *node;	/* Decoded mas->node */
451 	unsigned long r_min;		/* range min */
452 	unsigned long r_max;		/* range max */
453 	enum maple_type type;		/* mas->node type */
454 	unsigned char offset_end;	/* The offset where the write ends */
455 	unsigned long *pivots;		/* mas->node->pivots pointer */
456 	unsigned long end_piv;		/* The pivot at the offset end */
457 	void __rcu **slots;		/* mas->node->slots pointer */
458 	void *entry;			/* The entry to write */
459 	void *content;			/* The existing entry that is being overwritten */
460 	unsigned char vacant_height;	/* Height of lowest node with free space */
461 	unsigned char sufficient_height;/* Height of lowest node with min sufficiency + 1 nodes */
462 };
463 
464 #define mas_lock(mas)           spin_lock(&((mas)->tree->ma_lock))
465 #define mas_lock_nested(mas, subclass) \
466 		spin_lock_nested(&((mas)->tree->ma_lock), subclass)
467 #define mas_unlock(mas)         spin_unlock(&((mas)->tree->ma_lock))
468 
469 /*
470  * Special values for ma_state.node.
471  * MA_ERROR represents an errno.  After dropping the lock and attempting
472  * to resolve the error, the walk would have to be restarted from the
473  * top of the tree as the tree may have been modified.
474  */
475 #define MA_ERROR(err) \
476 		((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
477 
478 /*
479  * When changing MA_STATE, remember to also change rust/kernel/maple_tree.rs
480  */
481 #define MA_STATE(name, mt, first, end)					\
482 	struct ma_state name = {					\
483 		.tree = mt,						\
484 		.index = first,						\
485 		.last = end,						\
486 		.node = NULL,						\
487 		.status = ma_start,					\
488 		.min = 0,						\
489 		.max = ULONG_MAX,					\
490 		.sheaf = NULL,						\
491 		.alloc = NULL,						\
492 		.node_request = 0,					\
493 		.mas_flags = 0,						\
494 		.store_type = wr_invalid,				\
495 	}
496 
497 #define MA_WR_STATE(name, ma_state, wr_entry)				\
498 	struct ma_wr_state name = {					\
499 		.mas = ma_state,					\
500 		.content = NULL,					\
501 		.entry = wr_entry,					\
502 		.vacant_height = 0,					\
503 		.sufficient_height = 0					\
504 	}
505 
506 #define MA_TOPIARY(name, tree)						\
507 	struct ma_topiary name = {					\
508 		.head = NULL,						\
509 		.tail = NULL,						\
510 		.mtree = tree,						\
511 	}
512 
513 void *mas_walk(struct ma_state *mas);
514 void *mas_store(struct ma_state *mas, void *entry);
515 void *mas_erase(struct ma_state *mas);
516 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
517 void mas_store_prealloc(struct ma_state *mas, void *entry);
518 void *mas_find(struct ma_state *mas, unsigned long max);
519 void *mas_find_range(struct ma_state *mas, unsigned long max);
520 void *mas_find_rev(struct ma_state *mas, unsigned long min);
521 void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
522 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
523 int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
524 		void *entry, unsigned long range_lo, unsigned long range_hi,
525 		unsigned long *next, gfp_t gfp);
526 
527 bool mas_nomem(struct ma_state *mas, gfp_t gfp);
528 void mas_pause(struct ma_state *mas);
529 void maple_tree_init(void);
530 void mas_destroy(struct ma_state *mas);
531 
532 void *mas_prev(struct ma_state *mas, unsigned long min);
533 void *mas_prev_range(struct ma_state *mas, unsigned long max);
534 void *mas_next(struct ma_state *mas, unsigned long max);
535 void *mas_next_range(struct ma_state *mas, unsigned long max);
536 
537 int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
538 		   unsigned long size);
539 /*
540  * This finds an empty area from the highest address to the lowest.
541  * AKA "Topdown" version,
542  */
543 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
544 		       unsigned long max, unsigned long size);
545 
546 static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
547 			    unsigned long addr)
548 {
549 	memset(mas, 0, sizeof(struct ma_state));
550 	mas->tree = tree;
551 	mas->index = mas->last = addr;
552 	mas->max = ULONG_MAX;
553 	mas->status = ma_start;
554 	mas->node = NULL;
555 }
556 
557 static inline bool mas_is_active(struct ma_state *mas)
558 {
559 	return mas->status == ma_active;
560 }
561 
562 static inline bool mas_is_err(struct ma_state *mas)
563 {
564 	return mas->status == ma_error;
565 }
566 
567 /**
568  * mas_reset() - Reset a Maple Tree operation state.
569  * @mas: Maple Tree operation state.
570  *
571  * Resets the error or walk state of the @mas so future walks of the
572  * array will start from the root.  Use this if you have dropped the
573  * lock and want to reuse the ma_state.
574  *
575  * Context: Any context.
576  */
577 static __always_inline void mas_reset(struct ma_state *mas)
578 {
579 	mas->status = ma_start;
580 	mas->node = NULL;
581 }
582 
583 /**
584  * mas_for_each() - Iterate over a range of the maple tree.
585  * @__mas: Maple Tree operation state (maple_state)
586  * @__entry: Entry retrieved from the tree
587  * @__max: maximum index to retrieve from the tree
588  *
589  * When returned, mas->index and mas->last will hold the entire range for the
590  * entry.
591  *
592  * Note: may return the zero entry.
593  */
594 #define mas_for_each(__mas, __entry, __max) \
595 	while (((__entry) = mas_find((__mas), (__max))) != NULL)
596 
597 /**
598  * mas_for_each_rev() - Iterate over a range of the maple tree in reverse order.
599  * @__mas: Maple Tree operation state (maple_state)
600  * @__entry: Entry retrieved from the tree
601  * @__min: minimum index to retrieve from the tree
602  *
603  * When returned, mas->index and mas->last will hold the entire range for the
604  * entry.
605  *
606  * Note: may return the zero entry.
607  */
608 #define mas_for_each_rev(__mas, __entry, __min) \
609 	while (((__entry) = mas_find_rev((__mas), (__min))) != NULL)
610 
611 #ifdef CONFIG_DEBUG_MAPLE_TREE
612 enum mt_dump_format {
613 	mt_dump_dec,
614 	mt_dump_hex,
615 };
616 
617 extern atomic_t maple_tree_tests_run;
618 extern atomic_t maple_tree_tests_passed;
619 
620 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
621 void mas_dump(const struct ma_state *mas);
622 void mas_wr_dump(const struct ma_wr_state *wr_mas);
623 void mt_validate(struct maple_tree *mt);
624 void mt_cache_shrink(void);
625 #define MT_BUG_ON(__tree, __x) do {					\
626 	atomic_inc(&maple_tree_tests_run);				\
627 	if (__x) {							\
628 		pr_info("BUG at %s:%d (%u)\n",				\
629 		__func__, __LINE__, __x);				\
630 		mt_dump(__tree, mt_dump_hex);				\
631 		pr_info("Pass: %u Run:%u\n",				\
632 			atomic_read(&maple_tree_tests_passed),		\
633 			atomic_read(&maple_tree_tests_run));		\
634 		dump_stack();						\
635 	} else {							\
636 		atomic_inc(&maple_tree_tests_passed);			\
637 	}								\
638 } while (0)
639 
640 #define MAS_BUG_ON(__mas, __x) do {					\
641 	atomic_inc(&maple_tree_tests_run);				\
642 	if (__x) {							\
643 		pr_info("BUG at %s:%d (%u)\n",				\
644 		__func__, __LINE__, __x);				\
645 		mas_dump(__mas);					\
646 		mt_dump((__mas)->tree, mt_dump_hex);			\
647 		pr_info("Pass: %u Run:%u\n",				\
648 			atomic_read(&maple_tree_tests_passed),		\
649 			atomic_read(&maple_tree_tests_run));		\
650 		dump_stack();						\
651 	} else {							\
652 		atomic_inc(&maple_tree_tests_passed);			\
653 	}								\
654 } while (0)
655 
656 #define MAS_WR_BUG_ON(__wrmas, __x) do {				\
657 	atomic_inc(&maple_tree_tests_run);				\
658 	if (__x) {							\
659 		pr_info("BUG at %s:%d (%u)\n",				\
660 		__func__, __LINE__, __x);				\
661 		mas_wr_dump(__wrmas);					\
662 		mas_dump((__wrmas)->mas);				\
663 		mt_dump((__wrmas)->mas->tree, mt_dump_hex);		\
664 		pr_info("Pass: %u Run:%u\n",				\
665 			atomic_read(&maple_tree_tests_passed),		\
666 			atomic_read(&maple_tree_tests_run));		\
667 		dump_stack();						\
668 	} else {							\
669 		atomic_inc(&maple_tree_tests_passed);			\
670 	}								\
671 } while (0)
672 
673 #define MT_WARN_ON(__tree, __x)  ({					\
674 	int ret = !!(__x);						\
675 	atomic_inc(&maple_tree_tests_run);				\
676 	if (ret) {							\
677 		pr_info("WARN at %s:%d (%u)\n",				\
678 		__func__, __LINE__, __x);				\
679 		mt_dump(__tree, mt_dump_hex);				\
680 		pr_info("Pass: %u Run:%u\n",				\
681 			atomic_read(&maple_tree_tests_passed),		\
682 			atomic_read(&maple_tree_tests_run));		\
683 		dump_stack();						\
684 	} else {							\
685 		atomic_inc(&maple_tree_tests_passed);			\
686 	}								\
687 	unlikely(ret);							\
688 })
689 
690 #define MAS_WARN_ON(__mas, __x) ({					\
691 	int ret = !!(__x);						\
692 	atomic_inc(&maple_tree_tests_run);				\
693 	if (ret) {							\
694 		pr_info("WARN at %s:%d (%u)\n",				\
695 		__func__, __LINE__, __x);				\
696 		mas_dump(__mas);					\
697 		mt_dump((__mas)->tree, mt_dump_hex);			\
698 		pr_info("Pass: %u Run:%u\n",				\
699 			atomic_read(&maple_tree_tests_passed),		\
700 			atomic_read(&maple_tree_tests_run));		\
701 		dump_stack();						\
702 	} else {							\
703 		atomic_inc(&maple_tree_tests_passed);			\
704 	}								\
705 	unlikely(ret);							\
706 })
707 
708 #define MAS_WR_WARN_ON(__wrmas, __x) ({					\
709 	int ret = !!(__x);						\
710 	atomic_inc(&maple_tree_tests_run);				\
711 	if (ret) {							\
712 		pr_info("WARN at %s:%d (%u)\n",				\
713 		__func__, __LINE__, __x);				\
714 		mas_wr_dump(__wrmas);					\
715 		mas_dump((__wrmas)->mas);				\
716 		mt_dump((__wrmas)->mas->tree, mt_dump_hex);		\
717 		pr_info("Pass: %u Run:%u\n",				\
718 			atomic_read(&maple_tree_tests_passed),		\
719 			atomic_read(&maple_tree_tests_run));		\
720 		dump_stack();						\
721 	} else {							\
722 		atomic_inc(&maple_tree_tests_passed);			\
723 	}								\
724 	unlikely(ret);							\
725 })
726 #else
727 #define MT_BUG_ON(__tree, __x)		BUG_ON(__x)
728 #define MAS_BUG_ON(__mas, __x)		BUG_ON(__x)
729 #define MAS_WR_BUG_ON(__mas, __x)	BUG_ON(__x)
730 #define MT_WARN_ON(__tree, __x)		WARN_ON(__x)
731 #define MAS_WARN_ON(__mas, __x)		WARN_ON(__x)
732 #define MAS_WR_WARN_ON(__mas, __x)	WARN_ON(__x)
733 #endif /* CONFIG_DEBUG_MAPLE_TREE */
734 
735 /**
736  * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
737  * current location.
738  * @mas: Maple Tree operation state.
739  * @start: New start of range in the Maple Tree.
740  * @last: New end of range in the Maple Tree.
741  *
742  * set the internal maple state values to a sub-range.
743  * Please use mas_set_range() if you do not know where you are in the tree.
744  */
745 static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
746 		unsigned long last)
747 {
748 	/* Ensure the range starts within the current slot */
749 	MAS_WARN_ON(mas, mas_is_active(mas) &&
750 		   (mas->index > start || mas->last < start));
751 	mas->index = start;
752 	mas->last = last;
753 }
754 
755 /**
756  * mas_set_range() - Set up Maple Tree operation state for a different index.
757  * @mas: Maple Tree operation state.
758  * @start: New start of range in the Maple Tree.
759  * @last: New end of range in the Maple Tree.
760  *
761  * Move the operation state to refer to a different range.  This will
762  * have the effect of starting a walk from the top; see mas_next()
763  * to move to an adjacent index.
764  */
765 static inline
766 void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
767 {
768 	mas_reset(mas);
769 	__mas_set_range(mas, start, last);
770 }
771 
772 /**
773  * mas_set() - Set up Maple Tree operation state for a different index.
774  * @mas: Maple Tree operation state.
775  * @index: New index into the Maple Tree.
776  *
777  * Move the operation state to refer to a different index.  This will
778  * have the effect of starting a walk from the top; see mas_next()
779  * to move to an adjacent index.
780  */
781 static inline void mas_set(struct ma_state *mas, unsigned long index)
782 {
783 
784 	mas_set_range(mas, index, index);
785 }
786 
787 static inline bool mt_external_lock(const struct maple_tree *mt)
788 {
789 	return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
790 }
791 
792 /**
793  * mt_init_flags() - Initialise an empty maple tree with flags.
794  * @mt: Maple Tree
795  * @flags: maple tree flags.
796  *
797  * If you need to initialise a Maple Tree with special flags (eg, an
798  * allocation tree), use this function.
799  *
800  * Context: Any context.
801  */
802 static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
803 {
804 	mt->ma_flags = flags;
805 	if (!mt_external_lock(mt))
806 		spin_lock_init(&mt->ma_lock);
807 	rcu_assign_pointer(mt->ma_root, NULL);
808 }
809 
810 /**
811  * mt_init() - Initialise an empty maple tree.
812  * @mt: Maple Tree
813  *
814  * An empty Maple Tree.
815  *
816  * Context: Any context.
817  */
818 static inline void mt_init(struct maple_tree *mt)
819 {
820 	mt_init_flags(mt, 0);
821 }
822 
823 static inline bool mt_in_rcu(struct maple_tree *mt)
824 {
825 #ifdef CONFIG_MAPLE_RCU_DISABLED
826 	return false;
827 #endif
828 	return mt->ma_flags & MT_FLAGS_USE_RCU;
829 }
830 
831 /**
832  * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
833  * @mt: The Maple Tree
834  */
835 static inline void mt_clear_in_rcu(struct maple_tree *mt)
836 {
837 	if (!mt_in_rcu(mt))
838 		return;
839 
840 	if (mt_external_lock(mt)) {
841 		WARN_ON(!mt_lock_is_held(mt));
842 		mt->ma_flags &= ~MT_FLAGS_USE_RCU;
843 	} else {
844 		mtree_lock(mt);
845 		mt->ma_flags &= ~MT_FLAGS_USE_RCU;
846 		mtree_unlock(mt);
847 	}
848 }
849 
850 /**
851  * mt_set_in_rcu() - Switch the tree to RCU safe mode.
852  * @mt: The Maple Tree
853  */
854 static inline void mt_set_in_rcu(struct maple_tree *mt)
855 {
856 	if (mt_in_rcu(mt))
857 		return;
858 
859 	if (mt_external_lock(mt)) {
860 		WARN_ON(!mt_lock_is_held(mt));
861 		mt->ma_flags |= MT_FLAGS_USE_RCU;
862 	} else {
863 		mtree_lock(mt);
864 		mt->ma_flags |= MT_FLAGS_USE_RCU;
865 		mtree_unlock(mt);
866 	}
867 }
868 
869 static inline unsigned int mt_height(const struct maple_tree *mt)
870 {
871 	return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
872 }
873 
874 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
875 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
876 		    unsigned long max);
877 void *mt_prev(struct maple_tree *mt, unsigned long index,  unsigned long min);
878 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
879 
880 /**
881  * mt_for_each - Iterate over each entry starting at index until max.
882  * @__tree: The Maple Tree
883  * @__entry: The current entry
884  * @__index: The index to start the search from. Subsequently used as iterator.
885  * @__max: The maximum limit for @index
886  *
887  * This iterator skips all entries, which resolve to a NULL pointer,
888  * e.g. entries which has been reserved with XA_ZERO_ENTRY.
889  */
890 #define mt_for_each(__tree, __entry, __index, __max) \
891 	for (__entry = mt_find(__tree, &(__index), __max); \
892 		__entry; __entry = mt_find_after(__tree, &(__index), __max))
893 
894 #endif /*_LINUX_MAPLE_TREE_H */
895