1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
7 * Copyright (c) 2023 ByteDance
8 * Author: Peng Zhang <zhangpeng.00@bytedance.com>
9 */
10
11 /*
12 * DOC: Interesting implementation details of the Maple Tree
13 *
14 * Each node type has a number of slots for entries and a number of slots for
15 * pivots. In the case of dense nodes, the pivots are implied by the position
16 * and are simply the slot index + the minimum of the node.
17 *
18 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
19 * indicate that the tree is specifying ranges. Pivots may appear in the
20 * subtree with an entry attached to the value whereas keys are unique to a
21 * specific position of a B-tree. Pivot values are inclusive of the slot with
22 * the same index.
23 *
24 *
25 * The following illustrates the layout of a range64 nodes slots and pivots.
26 *
27 *
28 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
29 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
30 * │ │ │ │ │ │ │ │ └─ Implied maximum
31 * │ │ │ │ │ │ │ └─ Pivot 14
32 * │ │ │ │ │ │ └─ Pivot 13
33 * │ │ │ │ │ └─ Pivot 12
34 * │ │ │ │ └─ Pivot 11
35 * │ │ │ └─ Pivot 2
36 * │ │ └─ Pivot 1
37 * │ └─ Pivot 0
38 * └─ Implied minimum
39 *
40 * Slot contents:
41 * Internal (non-leaf) nodes contain pointers to other nodes.
42 * Leaf nodes contain entries.
43 *
44 * The location of interest is often referred to as an offset. All offsets have
45 * a slot, but the last offset has an implied pivot from the node above (or
46 * UINT_MAX for the root node.
47 *
48 * Ranges complicate certain write activities. When modifying any of
49 * the B-tree variants, it is known that one entry will either be added or
50 * deleted. When modifying the Maple Tree, one store operation may overwrite
51 * the entire data set, or one half of the tree, or the middle half of the tree.
52 *
53 */
54
55
56 #include <linux/maple_tree.h>
57 #include <linux/xarray.h>
58 #include <linux/types.h>
59 #include <linux/export.h>
60 #include <linux/slab.h>
61 #include <linux/limits.h>
62 #include <asm/barrier.h>
63
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/maple_tree.h>
66
67 #define MA_ROOT_PARENT 1
68
69 /*
70 * Maple state flags
71 * * MA_STATE_BULK - Bulk insert mode
72 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
73 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
74 */
75 #define MA_STATE_BULK 1
76 #define MA_STATE_REBALANCE 2
77 #define MA_STATE_PREALLOC 4
78
79 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
80 #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
81 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
82 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
83 static struct kmem_cache *maple_node_cache;
84
85 #ifdef CONFIG_DEBUG_MAPLE_TREE
86 static const unsigned long mt_max[] = {
87 [maple_dense] = MAPLE_NODE_SLOTS,
88 [maple_leaf_64] = ULONG_MAX,
89 [maple_range_64] = ULONG_MAX,
90 [maple_arange_64] = ULONG_MAX,
91 };
92 #define mt_node_max(x) mt_max[mte_node_type(x)]
93 #endif
94
95 static const unsigned char mt_slots[] = {
96 [maple_dense] = MAPLE_NODE_SLOTS,
97 [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
98 [maple_range_64] = MAPLE_RANGE64_SLOTS,
99 [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
100 };
101 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
102
103 static const unsigned char mt_pivots[] = {
104 [maple_dense] = 0,
105 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
106 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
107 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
108 };
109 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
110
111 static const unsigned char mt_min_slots[] = {
112 [maple_dense] = MAPLE_NODE_SLOTS / 2,
113 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
114 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
115 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
116 };
117 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
118
119 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
120 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
121
122 struct maple_big_node {
123 struct maple_pnode *parent;
124 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
125 union {
126 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
127 struct {
128 unsigned long padding[MAPLE_BIG_NODE_GAPS];
129 unsigned long gap[MAPLE_BIG_NODE_GAPS];
130 };
131 };
132 unsigned char b_end;
133 enum maple_type type;
134 };
135
136 /*
137 * The maple_subtree_state is used to build a tree to replace a segment of an
138 * existing tree in a more atomic way. Any walkers of the older tree will hit a
139 * dead node and restart on updates.
140 */
141 struct maple_subtree_state {
142 struct ma_state *orig_l; /* Original left side of subtree */
143 struct ma_state *orig_r; /* Original right side of subtree */
144 struct ma_state *l; /* New left side of subtree */
145 struct ma_state *m; /* New middle of subtree (rare) */
146 struct ma_state *r; /* New right side of subtree */
147 struct ma_topiary *free; /* nodes to be freed */
148 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
149 struct maple_big_node *bn;
150 };
151
152 #ifdef CONFIG_KASAN_STACK
153 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
154 #define noinline_for_kasan noinline_for_stack
155 #else
156 #define noinline_for_kasan inline
157 #endif
158
159 /* Functions */
mt_alloc_one(gfp_t gfp)160 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
161 {
162 return kmem_cache_alloc(maple_node_cache, gfp);
163 }
164
mt_alloc_bulk(gfp_t gfp,size_t size,void ** nodes)165 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
166 {
167 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
168 }
169
mt_free_one(struct maple_node * node)170 static inline void mt_free_one(struct maple_node *node)
171 {
172 kmem_cache_free(maple_node_cache, node);
173 }
174
mt_free_bulk(size_t size,void __rcu ** nodes)175 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
176 {
177 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
178 }
179
mt_free_rcu(struct rcu_head * head)180 static void mt_free_rcu(struct rcu_head *head)
181 {
182 struct maple_node *node = container_of(head, struct maple_node, rcu);
183
184 kmem_cache_free(maple_node_cache, node);
185 }
186
187 /*
188 * ma_free_rcu() - Use rcu callback to free a maple node
189 * @node: The node to free
190 *
191 * The maple tree uses the parent pointer to indicate this node is no longer in
192 * use and will be freed.
193 */
ma_free_rcu(struct maple_node * node)194 static void ma_free_rcu(struct maple_node *node)
195 {
196 WARN_ON(node->parent != ma_parent_ptr(node));
197 call_rcu(&node->rcu, mt_free_rcu);
198 }
199
mas_set_height(struct ma_state * mas)200 static void mas_set_height(struct ma_state *mas)
201 {
202 unsigned int new_flags = mas->tree->ma_flags;
203
204 new_flags &= ~MT_FLAGS_HEIGHT_MASK;
205 MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
206 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
207 mas->tree->ma_flags = new_flags;
208 }
209
mas_mt_height(struct ma_state * mas)210 static unsigned int mas_mt_height(struct ma_state *mas)
211 {
212 return mt_height(mas->tree);
213 }
214
mt_attr(struct maple_tree * mt)215 static inline unsigned int mt_attr(struct maple_tree *mt)
216 {
217 return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
218 }
219
mte_node_type(const struct maple_enode * entry)220 static __always_inline enum maple_type mte_node_type(
221 const struct maple_enode *entry)
222 {
223 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
224 MAPLE_NODE_TYPE_MASK;
225 }
226
ma_is_dense(const enum maple_type type)227 static __always_inline bool ma_is_dense(const enum maple_type type)
228 {
229 return type < maple_leaf_64;
230 }
231
ma_is_leaf(const enum maple_type type)232 static __always_inline bool ma_is_leaf(const enum maple_type type)
233 {
234 return type < maple_range_64;
235 }
236
mte_is_leaf(const struct maple_enode * entry)237 static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
238 {
239 return ma_is_leaf(mte_node_type(entry));
240 }
241
242 /*
243 * We also reserve values with the bottom two bits set to '10' which are
244 * below 4096
245 */
mt_is_reserved(const void * entry)246 static __always_inline bool mt_is_reserved(const void *entry)
247 {
248 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
249 xa_is_internal(entry);
250 }
251
mas_set_err(struct ma_state * mas,long err)252 static __always_inline void mas_set_err(struct ma_state *mas, long err)
253 {
254 mas->node = MA_ERROR(err);
255 mas->status = ma_error;
256 }
257
mas_is_ptr(const struct ma_state * mas)258 static __always_inline bool mas_is_ptr(const struct ma_state *mas)
259 {
260 return mas->status == ma_root;
261 }
262
mas_is_start(const struct ma_state * mas)263 static __always_inline bool mas_is_start(const struct ma_state *mas)
264 {
265 return mas->status == ma_start;
266 }
267
mas_is_none(const struct ma_state * mas)268 static __always_inline bool mas_is_none(const struct ma_state *mas)
269 {
270 return mas->status == ma_none;
271 }
272
mas_is_paused(const struct ma_state * mas)273 static __always_inline bool mas_is_paused(const struct ma_state *mas)
274 {
275 return mas->status == ma_pause;
276 }
277
mas_is_overflow(struct ma_state * mas)278 static __always_inline bool mas_is_overflow(struct ma_state *mas)
279 {
280 return mas->status == ma_overflow;
281 }
282
mas_is_underflow(struct ma_state * mas)283 static inline bool mas_is_underflow(struct ma_state *mas)
284 {
285 return mas->status == ma_underflow;
286 }
287
mte_to_node(const struct maple_enode * entry)288 static __always_inline struct maple_node *mte_to_node(
289 const struct maple_enode *entry)
290 {
291 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
292 }
293
294 /*
295 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
296 * @entry: The maple encoded node
297 *
298 * Return: a maple topiary pointer
299 */
mte_to_mat(const struct maple_enode * entry)300 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
301 {
302 return (struct maple_topiary *)
303 ((unsigned long)entry & ~MAPLE_NODE_MASK);
304 }
305
306 /*
307 * mas_mn() - Get the maple state node.
308 * @mas: The maple state
309 *
310 * Return: the maple node (not encoded - bare pointer).
311 */
mas_mn(const struct ma_state * mas)312 static inline struct maple_node *mas_mn(const struct ma_state *mas)
313 {
314 return mte_to_node(mas->node);
315 }
316
317 /*
318 * mte_set_node_dead() - Set a maple encoded node as dead.
319 * @mn: The maple encoded node.
320 */
mte_set_node_dead(struct maple_enode * mn)321 static inline void mte_set_node_dead(struct maple_enode *mn)
322 {
323 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
324 smp_wmb(); /* Needed for RCU */
325 }
326
327 /* Bit 1 indicates the root is a node */
328 #define MAPLE_ROOT_NODE 0x02
329 /* maple_type stored bit 3-6 */
330 #define MAPLE_ENODE_TYPE_SHIFT 0x03
331 /* Bit 2 means a NULL somewhere below */
332 #define MAPLE_ENODE_NULL 0x04
333
mt_mk_node(const struct maple_node * node,enum maple_type type)334 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
335 enum maple_type type)
336 {
337 return (void *)((unsigned long)node |
338 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
339 }
340
mte_mk_root(const struct maple_enode * node)341 static inline void *mte_mk_root(const struct maple_enode *node)
342 {
343 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
344 }
345
mte_safe_root(const struct maple_enode * node)346 static inline void *mte_safe_root(const struct maple_enode *node)
347 {
348 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
349 }
350
mte_set_full(const struct maple_enode * node)351 static inline void *mte_set_full(const struct maple_enode *node)
352 {
353 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
354 }
355
mte_clear_full(const struct maple_enode * node)356 static inline void *mte_clear_full(const struct maple_enode *node)
357 {
358 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
359 }
360
mte_has_null(const struct maple_enode * node)361 static inline bool mte_has_null(const struct maple_enode *node)
362 {
363 return (unsigned long)node & MAPLE_ENODE_NULL;
364 }
365
ma_is_root(struct maple_node * node)366 static __always_inline bool ma_is_root(struct maple_node *node)
367 {
368 return ((unsigned long)node->parent & MA_ROOT_PARENT);
369 }
370
mte_is_root(const struct maple_enode * node)371 static __always_inline bool mte_is_root(const struct maple_enode *node)
372 {
373 return ma_is_root(mte_to_node(node));
374 }
375
mas_is_root_limits(const struct ma_state * mas)376 static inline bool mas_is_root_limits(const struct ma_state *mas)
377 {
378 return !mas->min && mas->max == ULONG_MAX;
379 }
380
mt_is_alloc(struct maple_tree * mt)381 static __always_inline bool mt_is_alloc(struct maple_tree *mt)
382 {
383 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
384 }
385
386 /*
387 * The Parent Pointer
388 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
389 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
390 * bit values need an extra bit to store the offset. This extra bit comes from
391 * a reuse of the last bit in the node type. This is possible by using bit 1 to
392 * indicate if bit 2 is part of the type or the slot.
393 *
394 * Note types:
395 * 0x??1 = Root
396 * 0x?00 = 16 bit nodes
397 * 0x010 = 32 bit nodes
398 * 0x110 = 64 bit nodes
399 *
400 * Slot size and alignment
401 * 0b??1 : Root
402 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
403 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
404 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
405 */
406
407 #define MAPLE_PARENT_ROOT 0x01
408
409 #define MAPLE_PARENT_SLOT_SHIFT 0x03
410 #define MAPLE_PARENT_SLOT_MASK 0xF8
411
412 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
413 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
414
415 #define MAPLE_PARENT_RANGE64 0x06
416 #define MAPLE_PARENT_RANGE32 0x04
417 #define MAPLE_PARENT_NOT_RANGE16 0x02
418
419 /*
420 * mte_parent_shift() - Get the parent shift for the slot storage.
421 * @parent: The parent pointer cast as an unsigned long
422 * Return: The shift into that pointer to the star to of the slot
423 */
mte_parent_shift(unsigned long parent)424 static inline unsigned long mte_parent_shift(unsigned long parent)
425 {
426 /* Note bit 1 == 0 means 16B */
427 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
428 return MAPLE_PARENT_SLOT_SHIFT;
429
430 return MAPLE_PARENT_16B_SLOT_SHIFT;
431 }
432
433 /*
434 * mte_parent_slot_mask() - Get the slot mask for the parent.
435 * @parent: The parent pointer cast as an unsigned long.
436 * Return: The slot mask for that parent.
437 */
mte_parent_slot_mask(unsigned long parent)438 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
439 {
440 /* Note bit 1 == 0 means 16B */
441 if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
442 return MAPLE_PARENT_SLOT_MASK;
443
444 return MAPLE_PARENT_16B_SLOT_MASK;
445 }
446
447 /*
448 * mas_parent_type() - Return the maple_type of the parent from the stored
449 * parent type.
450 * @mas: The maple state
451 * @enode: The maple_enode to extract the parent's enum
452 * Return: The node->parent maple_type
453 */
454 static inline
mas_parent_type(struct ma_state * mas,struct maple_enode * enode)455 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
456 {
457 unsigned long p_type;
458
459 p_type = (unsigned long)mte_to_node(enode)->parent;
460 if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
461 return 0;
462
463 p_type &= MAPLE_NODE_MASK;
464 p_type &= ~mte_parent_slot_mask(p_type);
465 switch (p_type) {
466 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
467 if (mt_is_alloc(mas->tree))
468 return maple_arange_64;
469 return maple_range_64;
470 }
471
472 return 0;
473 }
474
475 /*
476 * mas_set_parent() - Set the parent node and encode the slot
477 * @enode: The encoded maple node.
478 * @parent: The encoded maple node that is the parent of @enode.
479 * @slot: The slot that @enode resides in @parent.
480 *
481 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
482 * parent type.
483 */
484 static inline
mas_set_parent(struct ma_state * mas,struct maple_enode * enode,const struct maple_enode * parent,unsigned char slot)485 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
486 const struct maple_enode *parent, unsigned char slot)
487 {
488 unsigned long val = (unsigned long)parent;
489 unsigned long shift;
490 unsigned long type;
491 enum maple_type p_type = mte_node_type(parent);
492
493 MAS_BUG_ON(mas, p_type == maple_dense);
494 MAS_BUG_ON(mas, p_type == maple_leaf_64);
495
496 switch (p_type) {
497 case maple_range_64:
498 case maple_arange_64:
499 shift = MAPLE_PARENT_SLOT_SHIFT;
500 type = MAPLE_PARENT_RANGE64;
501 break;
502 default:
503 case maple_dense:
504 case maple_leaf_64:
505 shift = type = 0;
506 break;
507 }
508
509 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
510 val |= (slot << shift) | type;
511 mte_to_node(enode)->parent = ma_parent_ptr(val);
512 }
513
514 /*
515 * mte_parent_slot() - get the parent slot of @enode.
516 * @enode: The encoded maple node.
517 *
518 * Return: The slot in the parent node where @enode resides.
519 */
520 static __always_inline
mte_parent_slot(const struct maple_enode * enode)521 unsigned int mte_parent_slot(const struct maple_enode *enode)
522 {
523 unsigned long val = (unsigned long)mte_to_node(enode)->parent;
524
525 if (unlikely(val & MA_ROOT_PARENT))
526 return 0;
527
528 /*
529 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
530 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
531 */
532 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
533 }
534
535 /*
536 * mte_parent() - Get the parent of @node.
537 * @node: The encoded maple node.
538 *
539 * Return: The parent maple node.
540 */
541 static __always_inline
mte_parent(const struct maple_enode * enode)542 struct maple_node *mte_parent(const struct maple_enode *enode)
543 {
544 return (void *)((unsigned long)
545 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
546 }
547
548 /*
549 * ma_dead_node() - check if the @enode is dead.
550 * @enode: The encoded maple node
551 *
552 * Return: true if dead, false otherwise.
553 */
ma_dead_node(const struct maple_node * node)554 static __always_inline bool ma_dead_node(const struct maple_node *node)
555 {
556 struct maple_node *parent;
557
558 /* Do not reorder reads from the node prior to the parent check */
559 smp_rmb();
560 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
561 return (parent == node);
562 }
563
564 /*
565 * mte_dead_node() - check if the @enode is dead.
566 * @enode: The encoded maple node
567 *
568 * Return: true if dead, false otherwise.
569 */
mte_dead_node(const struct maple_enode * enode)570 static __always_inline bool mte_dead_node(const struct maple_enode *enode)
571 {
572 struct maple_node *parent, *node;
573
574 node = mte_to_node(enode);
575 /* Do not reorder reads from the node prior to the parent check */
576 smp_rmb();
577 parent = mte_parent(enode);
578 return (parent == node);
579 }
580
581 /*
582 * mas_allocated() - Get the number of nodes allocated in a maple state.
583 * @mas: The maple state
584 *
585 * The ma_state alloc member is overloaded to hold a pointer to the first
586 * allocated node or to the number of requested nodes to allocate. If bit 0 is
587 * set, then the alloc contains the number of requested nodes. If there is an
588 * allocated node, then the total allocated nodes is in that node.
589 *
590 * Return: The total number of nodes allocated
591 */
mas_allocated(const struct ma_state * mas)592 static inline unsigned long mas_allocated(const struct ma_state *mas)
593 {
594 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
595 return 0;
596
597 return mas->alloc->total;
598 }
599
600 /*
601 * mas_set_alloc_req() - Set the requested number of allocations.
602 * @mas: the maple state
603 * @count: the number of allocations.
604 *
605 * The requested number of allocations is either in the first allocated node,
606 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
607 * no allocated node. Set the request either in the node or do the necessary
608 * encoding to store in @mas->alloc directly.
609 */
mas_set_alloc_req(struct ma_state * mas,unsigned long count)610 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
611 {
612 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
613 if (!count)
614 mas->alloc = NULL;
615 else
616 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
617 return;
618 }
619
620 mas->alloc->request_count = count;
621 }
622
623 /*
624 * mas_alloc_req() - get the requested number of allocations.
625 * @mas: The maple state
626 *
627 * The alloc count is either stored directly in @mas, or in
628 * @mas->alloc->request_count if there is at least one node allocated. Decode
629 * the request count if it's stored directly in @mas->alloc.
630 *
631 * Return: The allocation request count.
632 */
mas_alloc_req(const struct ma_state * mas)633 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
634 {
635 if ((unsigned long)mas->alloc & 0x1)
636 return (unsigned long)(mas->alloc) >> 1;
637 else if (mas->alloc)
638 return mas->alloc->request_count;
639 return 0;
640 }
641
642 /*
643 * ma_pivots() - Get a pointer to the maple node pivots.
644 * @node - the maple node
645 * @type - the node type
646 *
647 * In the event of a dead node, this array may be %NULL
648 *
649 * Return: A pointer to the maple node pivots
650 */
ma_pivots(struct maple_node * node,enum maple_type type)651 static inline unsigned long *ma_pivots(struct maple_node *node,
652 enum maple_type type)
653 {
654 switch (type) {
655 case maple_arange_64:
656 return node->ma64.pivot;
657 case maple_range_64:
658 case maple_leaf_64:
659 return node->mr64.pivot;
660 case maple_dense:
661 return NULL;
662 }
663 return NULL;
664 }
665
666 /*
667 * ma_gaps() - Get a pointer to the maple node gaps.
668 * @node - the maple node
669 * @type - the node type
670 *
671 * Return: A pointer to the maple node gaps
672 */
ma_gaps(struct maple_node * node,enum maple_type type)673 static inline unsigned long *ma_gaps(struct maple_node *node,
674 enum maple_type type)
675 {
676 switch (type) {
677 case maple_arange_64:
678 return node->ma64.gap;
679 case maple_range_64:
680 case maple_leaf_64:
681 case maple_dense:
682 return NULL;
683 }
684 return NULL;
685 }
686
687 /*
688 * mas_safe_pivot() - get the pivot at @piv or mas->max.
689 * @mas: The maple state
690 * @pivots: The pointer to the maple node pivots
691 * @piv: The pivot to fetch
692 * @type: The maple node type
693 *
694 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
695 * otherwise.
696 */
697 static __always_inline unsigned long
mas_safe_pivot(const struct ma_state * mas,unsigned long * pivots,unsigned char piv,enum maple_type type)698 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
699 unsigned char piv, enum maple_type type)
700 {
701 if (piv >= mt_pivots[type])
702 return mas->max;
703
704 return pivots[piv];
705 }
706
707 /*
708 * mas_safe_min() - Return the minimum for a given offset.
709 * @mas: The maple state
710 * @pivots: The pointer to the maple node pivots
711 * @offset: The offset into the pivot array
712 *
713 * Return: The minimum range value that is contained in @offset.
714 */
715 static inline unsigned long
mas_safe_min(struct ma_state * mas,unsigned long * pivots,unsigned char offset)716 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
717 {
718 if (likely(offset))
719 return pivots[offset - 1] + 1;
720
721 return mas->min;
722 }
723
724 /*
725 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
726 * @mn: The encoded maple node
727 * @piv: The pivot offset
728 * @val: The value of the pivot
729 */
mte_set_pivot(struct maple_enode * mn,unsigned char piv,unsigned long val)730 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
731 unsigned long val)
732 {
733 struct maple_node *node = mte_to_node(mn);
734 enum maple_type type = mte_node_type(mn);
735
736 BUG_ON(piv >= mt_pivots[type]);
737 switch (type) {
738 case maple_range_64:
739 case maple_leaf_64:
740 node->mr64.pivot[piv] = val;
741 break;
742 case maple_arange_64:
743 node->ma64.pivot[piv] = val;
744 break;
745 case maple_dense:
746 break;
747 }
748
749 }
750
751 /*
752 * ma_slots() - Get a pointer to the maple node slots.
753 * @mn: The maple node
754 * @mt: The maple node type
755 *
756 * Return: A pointer to the maple node slots
757 */
ma_slots(struct maple_node * mn,enum maple_type mt)758 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
759 {
760 switch (mt) {
761 case maple_arange_64:
762 return mn->ma64.slot;
763 case maple_range_64:
764 case maple_leaf_64:
765 return mn->mr64.slot;
766 case maple_dense:
767 return mn->slot;
768 }
769
770 return NULL;
771 }
772
mt_write_locked(const struct maple_tree * mt)773 static inline bool mt_write_locked(const struct maple_tree *mt)
774 {
775 return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
776 lockdep_is_held(&mt->ma_lock);
777 }
778
mt_locked(const struct maple_tree * mt)779 static __always_inline bool mt_locked(const struct maple_tree *mt)
780 {
781 return mt_external_lock(mt) ? mt_lock_is_held(mt) :
782 lockdep_is_held(&mt->ma_lock);
783 }
784
mt_slot(const struct maple_tree * mt,void __rcu ** slots,unsigned char offset)785 static __always_inline void *mt_slot(const struct maple_tree *mt,
786 void __rcu **slots, unsigned char offset)
787 {
788 return rcu_dereference_check(slots[offset], mt_locked(mt));
789 }
790
mt_slot_locked(struct maple_tree * mt,void __rcu ** slots,unsigned char offset)791 static __always_inline void *mt_slot_locked(struct maple_tree *mt,
792 void __rcu **slots, unsigned char offset)
793 {
794 return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
795 }
796 /*
797 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
798 * @mas: The maple state
799 * @slots: The pointer to the slots
800 * @offset: The offset into the slots array to fetch
801 *
802 * Return: The entry stored in @slots at the @offset.
803 */
mas_slot_locked(struct ma_state * mas,void __rcu ** slots,unsigned char offset)804 static __always_inline void *mas_slot_locked(struct ma_state *mas,
805 void __rcu **slots, unsigned char offset)
806 {
807 return mt_slot_locked(mas->tree, slots, offset);
808 }
809
810 /*
811 * mas_slot() - Get the slot value when not holding the maple tree lock.
812 * @mas: The maple state
813 * @slots: The pointer to the slots
814 * @offset: The offset into the slots array to fetch
815 *
816 * Return: The entry stored in @slots at the @offset
817 */
mas_slot(struct ma_state * mas,void __rcu ** slots,unsigned char offset)818 static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
819 unsigned char offset)
820 {
821 return mt_slot(mas->tree, slots, offset);
822 }
823
824 /*
825 * mas_root() - Get the maple tree root.
826 * @mas: The maple state.
827 *
828 * Return: The pointer to the root of the tree
829 */
mas_root(struct ma_state * mas)830 static __always_inline void *mas_root(struct ma_state *mas)
831 {
832 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
833 }
834
mt_root_locked(struct maple_tree * mt)835 static inline void *mt_root_locked(struct maple_tree *mt)
836 {
837 return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
838 }
839
840 /*
841 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
842 * @mas: The maple state.
843 *
844 * Return: The pointer to the root of the tree
845 */
mas_root_locked(struct ma_state * mas)846 static inline void *mas_root_locked(struct ma_state *mas)
847 {
848 return mt_root_locked(mas->tree);
849 }
850
ma_meta(struct maple_node * mn,enum maple_type mt)851 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
852 enum maple_type mt)
853 {
854 switch (mt) {
855 case maple_arange_64:
856 return &mn->ma64.meta;
857 default:
858 return &mn->mr64.meta;
859 }
860 }
861
862 /*
863 * ma_set_meta() - Set the metadata information of a node.
864 * @mn: The maple node
865 * @mt: The maple node type
866 * @offset: The offset of the highest sub-gap in this node.
867 * @end: The end of the data in this node.
868 */
ma_set_meta(struct maple_node * mn,enum maple_type mt,unsigned char offset,unsigned char end)869 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
870 unsigned char offset, unsigned char end)
871 {
872 struct maple_metadata *meta = ma_meta(mn, mt);
873
874 meta->gap = offset;
875 meta->end = end;
876 }
877
878 /*
879 * mt_clear_meta() - clear the metadata information of a node, if it exists
880 * @mt: The maple tree
881 * @mn: The maple node
882 * @type: The maple node type
883 * @offset: The offset of the highest sub-gap in this node.
884 * @end: The end of the data in this node.
885 */
mt_clear_meta(struct maple_tree * mt,struct maple_node * mn,enum maple_type type)886 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
887 enum maple_type type)
888 {
889 struct maple_metadata *meta;
890 unsigned long *pivots;
891 void __rcu **slots;
892 void *next;
893
894 switch (type) {
895 case maple_range_64:
896 pivots = mn->mr64.pivot;
897 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
898 slots = mn->mr64.slot;
899 next = mt_slot_locked(mt, slots,
900 MAPLE_RANGE64_SLOTS - 1);
901 if (unlikely((mte_to_node(next) &&
902 mte_node_type(next))))
903 return; /* no metadata, could be node */
904 }
905 fallthrough;
906 case maple_arange_64:
907 meta = ma_meta(mn, type);
908 break;
909 default:
910 return;
911 }
912
913 meta->gap = 0;
914 meta->end = 0;
915 }
916
917 /*
918 * ma_meta_end() - Get the data end of a node from the metadata
919 * @mn: The maple node
920 * @mt: The maple node type
921 */
ma_meta_end(struct maple_node * mn,enum maple_type mt)922 static inline unsigned char ma_meta_end(struct maple_node *mn,
923 enum maple_type mt)
924 {
925 struct maple_metadata *meta = ma_meta(mn, mt);
926
927 return meta->end;
928 }
929
930 /*
931 * ma_meta_gap() - Get the largest gap location of a node from the metadata
932 * @mn: The maple node
933 */
ma_meta_gap(struct maple_node * mn)934 static inline unsigned char ma_meta_gap(struct maple_node *mn)
935 {
936 return mn->ma64.meta.gap;
937 }
938
939 /*
940 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
941 * @mn: The maple node
942 * @mn: The maple node type
943 * @offset: The location of the largest gap.
944 */
ma_set_meta_gap(struct maple_node * mn,enum maple_type mt,unsigned char offset)945 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
946 unsigned char offset)
947 {
948
949 struct maple_metadata *meta = ma_meta(mn, mt);
950
951 meta->gap = offset;
952 }
953
954 /*
955 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
956 * @mat - the ma_topiary, a linked list of dead nodes.
957 * @dead_enode - the node to be marked as dead and added to the tail of the list
958 *
959 * Add the @dead_enode to the linked list in @mat.
960 */
mat_add(struct ma_topiary * mat,struct maple_enode * dead_enode)961 static inline void mat_add(struct ma_topiary *mat,
962 struct maple_enode *dead_enode)
963 {
964 mte_set_node_dead(dead_enode);
965 mte_to_mat(dead_enode)->next = NULL;
966 if (!mat->tail) {
967 mat->tail = mat->head = dead_enode;
968 return;
969 }
970
971 mte_to_mat(mat->tail)->next = dead_enode;
972 mat->tail = dead_enode;
973 }
974
975 static void mt_free_walk(struct rcu_head *head);
976 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
977 bool free);
978 /*
979 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
980 * @mas - the maple state
981 * @mat - the ma_topiary linked list of dead nodes to free.
982 *
983 * Destroy walk a dead list.
984 */
mas_mat_destroy(struct ma_state * mas,struct ma_topiary * mat)985 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
986 {
987 struct maple_enode *next;
988 struct maple_node *node;
989 bool in_rcu = mt_in_rcu(mas->tree);
990
991 while (mat->head) {
992 next = mte_to_mat(mat->head)->next;
993 node = mte_to_node(mat->head);
994 mt_destroy_walk(mat->head, mas->tree, !in_rcu);
995 if (in_rcu)
996 call_rcu(&node->rcu, mt_free_walk);
997 mat->head = next;
998 }
999 }
1000 /*
1001 * mas_descend() - Descend into the slot stored in the ma_state.
1002 * @mas - the maple state.
1003 *
1004 * Note: Not RCU safe, only use in write side or debug code.
1005 */
mas_descend(struct ma_state * mas)1006 static inline void mas_descend(struct ma_state *mas)
1007 {
1008 enum maple_type type;
1009 unsigned long *pivots;
1010 struct maple_node *node;
1011 void __rcu **slots;
1012
1013 node = mas_mn(mas);
1014 type = mte_node_type(mas->node);
1015 pivots = ma_pivots(node, type);
1016 slots = ma_slots(node, type);
1017
1018 if (mas->offset)
1019 mas->min = pivots[mas->offset - 1] + 1;
1020 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1021 mas->node = mas_slot(mas, slots, mas->offset);
1022 }
1023
1024 /*
1025 * mte_set_gap() - Set a maple node gap.
1026 * @mn: The encoded maple node
1027 * @gap: The offset of the gap to set
1028 * @val: The gap value
1029 */
mte_set_gap(const struct maple_enode * mn,unsigned char gap,unsigned long val)1030 static inline void mte_set_gap(const struct maple_enode *mn,
1031 unsigned char gap, unsigned long val)
1032 {
1033 switch (mte_node_type(mn)) {
1034 default:
1035 break;
1036 case maple_arange_64:
1037 mte_to_node(mn)->ma64.gap[gap] = val;
1038 break;
1039 }
1040 }
1041
1042 /*
1043 * mas_ascend() - Walk up a level of the tree.
1044 * @mas: The maple state
1045 *
1046 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1047 * may cause several levels of walking up to find the correct min and max.
1048 * May find a dead node which will cause a premature return.
1049 * Return: 1 on dead node, 0 otherwise
1050 */
mas_ascend(struct ma_state * mas)1051 static int mas_ascend(struct ma_state *mas)
1052 {
1053 struct maple_enode *p_enode; /* parent enode. */
1054 struct maple_enode *a_enode; /* ancestor enode. */
1055 struct maple_node *a_node; /* ancestor node. */
1056 struct maple_node *p_node; /* parent node. */
1057 unsigned char a_slot;
1058 enum maple_type a_type;
1059 unsigned long min, max;
1060 unsigned long *pivots;
1061 bool set_max = false, set_min = false;
1062
1063 a_node = mas_mn(mas);
1064 if (ma_is_root(a_node)) {
1065 mas->offset = 0;
1066 return 0;
1067 }
1068
1069 p_node = mte_parent(mas->node);
1070 if (unlikely(a_node == p_node))
1071 return 1;
1072
1073 a_type = mas_parent_type(mas, mas->node);
1074 mas->offset = mte_parent_slot(mas->node);
1075 a_enode = mt_mk_node(p_node, a_type);
1076
1077 /* Check to make sure all parent information is still accurate */
1078 if (p_node != mte_parent(mas->node))
1079 return 1;
1080
1081 mas->node = a_enode;
1082
1083 if (mte_is_root(a_enode)) {
1084 mas->max = ULONG_MAX;
1085 mas->min = 0;
1086 return 0;
1087 }
1088
1089 min = 0;
1090 max = ULONG_MAX;
1091 if (!mas->offset) {
1092 min = mas->min;
1093 set_min = true;
1094 }
1095
1096 if (mas->max == ULONG_MAX)
1097 set_max = true;
1098
1099 do {
1100 p_enode = a_enode;
1101 a_type = mas_parent_type(mas, p_enode);
1102 a_node = mte_parent(p_enode);
1103 a_slot = mte_parent_slot(p_enode);
1104 a_enode = mt_mk_node(a_node, a_type);
1105 pivots = ma_pivots(a_node, a_type);
1106
1107 if (unlikely(ma_dead_node(a_node)))
1108 return 1;
1109
1110 if (!set_min && a_slot) {
1111 set_min = true;
1112 min = pivots[a_slot - 1] + 1;
1113 }
1114
1115 if (!set_max && a_slot < mt_pivots[a_type]) {
1116 set_max = true;
1117 max = pivots[a_slot];
1118 }
1119
1120 if (unlikely(ma_dead_node(a_node)))
1121 return 1;
1122
1123 if (unlikely(ma_is_root(a_node)))
1124 break;
1125
1126 } while (!set_min || !set_max);
1127
1128 mas->max = max;
1129 mas->min = min;
1130 return 0;
1131 }
1132
1133 /*
1134 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1135 * @mas: The maple state
1136 *
1137 * Return: A pointer to a maple node.
1138 */
mas_pop_node(struct ma_state * mas)1139 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1140 {
1141 struct maple_alloc *ret, *node = mas->alloc;
1142 unsigned long total = mas_allocated(mas);
1143 unsigned int req = mas_alloc_req(mas);
1144
1145 /* nothing or a request pending. */
1146 if (WARN_ON(!total))
1147 return NULL;
1148
1149 if (total == 1) {
1150 /* single allocation in this ma_state */
1151 mas->alloc = NULL;
1152 ret = node;
1153 goto single_node;
1154 }
1155
1156 if (node->node_count == 1) {
1157 /* Single allocation in this node. */
1158 mas->alloc = node->slot[0];
1159 mas->alloc->total = node->total - 1;
1160 ret = node;
1161 goto new_head;
1162 }
1163 node->total--;
1164 ret = node->slot[--node->node_count];
1165 node->slot[node->node_count] = NULL;
1166
1167 single_node:
1168 new_head:
1169 if (req) {
1170 req++;
1171 mas_set_alloc_req(mas, req);
1172 }
1173
1174 memset(ret, 0, sizeof(*ret));
1175 return (struct maple_node *)ret;
1176 }
1177
1178 /*
1179 * mas_push_node() - Push a node back on the maple state allocation.
1180 * @mas: The maple state
1181 * @used: The used maple node
1182 *
1183 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1184 * requested node count as necessary.
1185 */
mas_push_node(struct ma_state * mas,struct maple_node * used)1186 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1187 {
1188 struct maple_alloc *reuse = (struct maple_alloc *)used;
1189 struct maple_alloc *head = mas->alloc;
1190 unsigned long count;
1191 unsigned int requested = mas_alloc_req(mas);
1192
1193 count = mas_allocated(mas);
1194
1195 reuse->request_count = 0;
1196 reuse->node_count = 0;
1197 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1198 head->slot[head->node_count++] = reuse;
1199 head->total++;
1200 goto done;
1201 }
1202
1203 reuse->total = 1;
1204 if ((head) && !((unsigned long)head & 0x1)) {
1205 reuse->slot[0] = head;
1206 reuse->node_count = 1;
1207 reuse->total += head->total;
1208 }
1209
1210 mas->alloc = reuse;
1211 done:
1212 if (requested > 1)
1213 mas_set_alloc_req(mas, requested - 1);
1214 }
1215
1216 /*
1217 * mas_alloc_nodes() - Allocate nodes into a maple state
1218 * @mas: The maple state
1219 * @gfp: The GFP Flags
1220 */
mas_alloc_nodes(struct ma_state * mas,gfp_t gfp)1221 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1222 {
1223 struct maple_alloc *node;
1224 unsigned long allocated = mas_allocated(mas);
1225 unsigned int requested = mas_alloc_req(mas);
1226 unsigned int count;
1227 void **slots = NULL;
1228 unsigned int max_req = 0;
1229
1230 if (!requested)
1231 return;
1232
1233 mas_set_alloc_req(mas, 0);
1234 if (mas->mas_flags & MA_STATE_PREALLOC) {
1235 if (allocated)
1236 return;
1237 BUG_ON(!allocated);
1238 WARN_ON(!allocated);
1239 }
1240
1241 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1242 node = (struct maple_alloc *)mt_alloc_one(gfp);
1243 if (!node)
1244 goto nomem_one;
1245
1246 if (allocated) {
1247 node->slot[0] = mas->alloc;
1248 node->node_count = 1;
1249 } else {
1250 node->node_count = 0;
1251 }
1252
1253 mas->alloc = node;
1254 node->total = ++allocated;
1255 requested--;
1256 }
1257
1258 node = mas->alloc;
1259 node->request_count = 0;
1260 while (requested) {
1261 max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1262 slots = (void **)&node->slot[node->node_count];
1263 max_req = min(requested, max_req);
1264 count = mt_alloc_bulk(gfp, max_req, slots);
1265 if (!count)
1266 goto nomem_bulk;
1267
1268 if (node->node_count == 0) {
1269 node->slot[0]->node_count = 0;
1270 node->slot[0]->request_count = 0;
1271 }
1272
1273 node->node_count += count;
1274 allocated += count;
1275 node = node->slot[0];
1276 requested -= count;
1277 }
1278 mas->alloc->total = allocated;
1279 return;
1280
1281 nomem_bulk:
1282 /* Clean up potential freed allocations on bulk failure */
1283 memset(slots, 0, max_req * sizeof(unsigned long));
1284 nomem_one:
1285 mas_set_alloc_req(mas, requested);
1286 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1287 mas->alloc->total = allocated;
1288 mas_set_err(mas, -ENOMEM);
1289 }
1290
1291 /*
1292 * mas_free() - Free an encoded maple node
1293 * @mas: The maple state
1294 * @used: The encoded maple node to free.
1295 *
1296 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1297 * otherwise.
1298 */
mas_free(struct ma_state * mas,struct maple_enode * used)1299 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1300 {
1301 struct maple_node *tmp = mte_to_node(used);
1302
1303 if (mt_in_rcu(mas->tree))
1304 ma_free_rcu(tmp);
1305 else
1306 mas_push_node(mas, tmp);
1307 }
1308
1309 /*
1310 * mas_node_count_gfp() - Check if enough nodes are allocated and request more
1311 * if there is not enough nodes.
1312 * @mas: The maple state
1313 * @count: The number of nodes needed
1314 * @gfp: the gfp flags
1315 */
mas_node_count_gfp(struct ma_state * mas,int count,gfp_t gfp)1316 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1317 {
1318 unsigned long allocated = mas_allocated(mas);
1319
1320 if (allocated < count) {
1321 mas_set_alloc_req(mas, count - allocated);
1322 mas_alloc_nodes(mas, gfp);
1323 }
1324 }
1325
1326 /*
1327 * mas_node_count() - Check if enough nodes are allocated and request more if
1328 * there is not enough nodes.
1329 * @mas: The maple state
1330 * @count: The number of nodes needed
1331 *
1332 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1333 */
mas_node_count(struct ma_state * mas,int count)1334 static void mas_node_count(struct ma_state *mas, int count)
1335 {
1336 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1337 }
1338
1339 /*
1340 * mas_start() - Sets up maple state for operations.
1341 * @mas: The maple state.
1342 *
1343 * If mas->status == mas_start, then set the min, max and depth to
1344 * defaults.
1345 *
1346 * Return:
1347 * - If mas->node is an error or not mas_start, return NULL.
1348 * - If it's an empty tree: NULL & mas->status == ma_none
1349 * - If it's a single entry: The entry & mas->status == mas_root
1350 * - If it's a tree: NULL & mas->status == safe root node.
1351 */
mas_start(struct ma_state * mas)1352 static inline struct maple_enode *mas_start(struct ma_state *mas)
1353 {
1354 if (likely(mas_is_start(mas))) {
1355 struct maple_enode *root;
1356
1357 mas->min = 0;
1358 mas->max = ULONG_MAX;
1359
1360 retry:
1361 mas->depth = 0;
1362 root = mas_root(mas);
1363 /* Tree with nodes */
1364 if (likely(xa_is_node(root))) {
1365 mas->depth = 1;
1366 mas->status = ma_active;
1367 mas->node = mte_safe_root(root);
1368 mas->offset = 0;
1369 if (mte_dead_node(mas->node))
1370 goto retry;
1371
1372 return NULL;
1373 }
1374
1375 /* empty tree */
1376 if (unlikely(!root)) {
1377 mas->node = NULL;
1378 mas->status = ma_none;
1379 mas->offset = MAPLE_NODE_SLOTS;
1380 return NULL;
1381 }
1382
1383 /* Single entry tree */
1384 mas->status = ma_root;
1385 mas->offset = MAPLE_NODE_SLOTS;
1386
1387 /* Single entry tree. */
1388 if (mas->index > 0)
1389 return NULL;
1390
1391 return root;
1392 }
1393
1394 return NULL;
1395 }
1396
1397 /*
1398 * ma_data_end() - Find the end of the data in a node.
1399 * @node: The maple node
1400 * @type: The maple node type
1401 * @pivots: The array of pivots in the node
1402 * @max: The maximum value in the node
1403 *
1404 * Uses metadata to find the end of the data when possible.
1405 * Return: The zero indexed last slot with data (may be null).
1406 */
ma_data_end(struct maple_node * node,enum maple_type type,unsigned long * pivots,unsigned long max)1407 static __always_inline unsigned char ma_data_end(struct maple_node *node,
1408 enum maple_type type, unsigned long *pivots, unsigned long max)
1409 {
1410 unsigned char offset;
1411
1412 if (!pivots)
1413 return 0;
1414
1415 if (type == maple_arange_64)
1416 return ma_meta_end(node, type);
1417
1418 offset = mt_pivots[type] - 1;
1419 if (likely(!pivots[offset]))
1420 return ma_meta_end(node, type);
1421
1422 if (likely(pivots[offset] == max))
1423 return offset;
1424
1425 return mt_pivots[type];
1426 }
1427
1428 /*
1429 * mas_data_end() - Find the end of the data (slot).
1430 * @mas: the maple state
1431 *
1432 * This method is optimized to check the metadata of a node if the node type
1433 * supports data end metadata.
1434 *
1435 * Return: The zero indexed last slot with data (may be null).
1436 */
mas_data_end(struct ma_state * mas)1437 static inline unsigned char mas_data_end(struct ma_state *mas)
1438 {
1439 enum maple_type type;
1440 struct maple_node *node;
1441 unsigned char offset;
1442 unsigned long *pivots;
1443
1444 type = mte_node_type(mas->node);
1445 node = mas_mn(mas);
1446 if (type == maple_arange_64)
1447 return ma_meta_end(node, type);
1448
1449 pivots = ma_pivots(node, type);
1450 if (unlikely(ma_dead_node(node)))
1451 return 0;
1452
1453 offset = mt_pivots[type] - 1;
1454 if (likely(!pivots[offset]))
1455 return ma_meta_end(node, type);
1456
1457 if (likely(pivots[offset] == mas->max))
1458 return offset;
1459
1460 return mt_pivots[type];
1461 }
1462
1463 /*
1464 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1465 * @mas - the maple state
1466 *
1467 * Return: The maximum gap in the leaf.
1468 */
mas_leaf_max_gap(struct ma_state * mas)1469 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1470 {
1471 enum maple_type mt;
1472 unsigned long pstart, gap, max_gap;
1473 struct maple_node *mn;
1474 unsigned long *pivots;
1475 void __rcu **slots;
1476 unsigned char i;
1477 unsigned char max_piv;
1478
1479 mt = mte_node_type(mas->node);
1480 mn = mas_mn(mas);
1481 slots = ma_slots(mn, mt);
1482 max_gap = 0;
1483 if (unlikely(ma_is_dense(mt))) {
1484 gap = 0;
1485 for (i = 0; i < mt_slots[mt]; i++) {
1486 if (slots[i]) {
1487 if (gap > max_gap)
1488 max_gap = gap;
1489 gap = 0;
1490 } else {
1491 gap++;
1492 }
1493 }
1494 if (gap > max_gap)
1495 max_gap = gap;
1496 return max_gap;
1497 }
1498
1499 /*
1500 * Check the first implied pivot optimizes the loop below and slot 1 may
1501 * be skipped if there is a gap in slot 0.
1502 */
1503 pivots = ma_pivots(mn, mt);
1504 if (likely(!slots[0])) {
1505 max_gap = pivots[0] - mas->min + 1;
1506 i = 2;
1507 } else {
1508 i = 1;
1509 }
1510
1511 /* reduce max_piv as the special case is checked before the loop */
1512 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1513 /*
1514 * Check end implied pivot which can only be a gap on the right most
1515 * node.
1516 */
1517 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1518 gap = ULONG_MAX - pivots[max_piv];
1519 if (gap > max_gap)
1520 max_gap = gap;
1521
1522 if (max_gap > pivots[max_piv] - mas->min)
1523 return max_gap;
1524 }
1525
1526 for (; i <= max_piv; i++) {
1527 /* data == no gap. */
1528 if (likely(slots[i]))
1529 continue;
1530
1531 pstart = pivots[i - 1];
1532 gap = pivots[i] - pstart;
1533 if (gap > max_gap)
1534 max_gap = gap;
1535
1536 /* There cannot be two gaps in a row. */
1537 i++;
1538 }
1539 return max_gap;
1540 }
1541
1542 /*
1543 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1544 * @node: The maple node
1545 * @gaps: The pointer to the gaps
1546 * @mt: The maple node type
1547 * @*off: Pointer to store the offset location of the gap.
1548 *
1549 * Uses the metadata data end to scan backwards across set gaps.
1550 *
1551 * Return: The maximum gap value
1552 */
1553 static inline unsigned long
ma_max_gap(struct maple_node * node,unsigned long * gaps,enum maple_type mt,unsigned char * off)1554 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1555 unsigned char *off)
1556 {
1557 unsigned char offset, i;
1558 unsigned long max_gap = 0;
1559
1560 i = offset = ma_meta_end(node, mt);
1561 do {
1562 if (gaps[i] > max_gap) {
1563 max_gap = gaps[i];
1564 offset = i;
1565 }
1566 } while (i--);
1567
1568 *off = offset;
1569 return max_gap;
1570 }
1571
1572 /*
1573 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1574 * @mas: The maple state.
1575 *
1576 * Return: The gap value.
1577 */
mas_max_gap(struct ma_state * mas)1578 static inline unsigned long mas_max_gap(struct ma_state *mas)
1579 {
1580 unsigned long *gaps;
1581 unsigned char offset;
1582 enum maple_type mt;
1583 struct maple_node *node;
1584
1585 mt = mte_node_type(mas->node);
1586 if (ma_is_leaf(mt))
1587 return mas_leaf_max_gap(mas);
1588
1589 node = mas_mn(mas);
1590 MAS_BUG_ON(mas, mt != maple_arange_64);
1591 offset = ma_meta_gap(node);
1592 gaps = ma_gaps(node, mt);
1593 return gaps[offset];
1594 }
1595
1596 /*
1597 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1598 * @mas: The maple state
1599 * @offset: The gap offset in the parent to set
1600 * @new: The new gap value.
1601 *
1602 * Set the parent gap then continue to set the gap upwards, using the metadata
1603 * of the parent to see if it is necessary to check the node above.
1604 */
mas_parent_gap(struct ma_state * mas,unsigned char offset,unsigned long new)1605 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1606 unsigned long new)
1607 {
1608 unsigned long meta_gap = 0;
1609 struct maple_node *pnode;
1610 struct maple_enode *penode;
1611 unsigned long *pgaps;
1612 unsigned char meta_offset;
1613 enum maple_type pmt;
1614
1615 pnode = mte_parent(mas->node);
1616 pmt = mas_parent_type(mas, mas->node);
1617 penode = mt_mk_node(pnode, pmt);
1618 pgaps = ma_gaps(pnode, pmt);
1619
1620 ascend:
1621 MAS_BUG_ON(mas, pmt != maple_arange_64);
1622 meta_offset = ma_meta_gap(pnode);
1623 meta_gap = pgaps[meta_offset];
1624
1625 pgaps[offset] = new;
1626
1627 if (meta_gap == new)
1628 return;
1629
1630 if (offset != meta_offset) {
1631 if (meta_gap > new)
1632 return;
1633
1634 ma_set_meta_gap(pnode, pmt, offset);
1635 } else if (new < meta_gap) {
1636 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1637 ma_set_meta_gap(pnode, pmt, meta_offset);
1638 }
1639
1640 if (ma_is_root(pnode))
1641 return;
1642
1643 /* Go to the parent node. */
1644 pnode = mte_parent(penode);
1645 pmt = mas_parent_type(mas, penode);
1646 pgaps = ma_gaps(pnode, pmt);
1647 offset = mte_parent_slot(penode);
1648 penode = mt_mk_node(pnode, pmt);
1649 goto ascend;
1650 }
1651
1652 /*
1653 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1654 * @mas - the maple state.
1655 */
mas_update_gap(struct ma_state * mas)1656 static inline void mas_update_gap(struct ma_state *mas)
1657 {
1658 unsigned char pslot;
1659 unsigned long p_gap;
1660 unsigned long max_gap;
1661
1662 if (!mt_is_alloc(mas->tree))
1663 return;
1664
1665 if (mte_is_root(mas->node))
1666 return;
1667
1668 max_gap = mas_max_gap(mas);
1669
1670 pslot = mte_parent_slot(mas->node);
1671 p_gap = ma_gaps(mte_parent(mas->node),
1672 mas_parent_type(mas, mas->node))[pslot];
1673
1674 if (p_gap != max_gap)
1675 mas_parent_gap(mas, pslot, max_gap);
1676 }
1677
1678 /*
1679 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1680 * @parent with the slot encoded.
1681 * @mas - the maple state (for the tree)
1682 * @parent - the maple encoded node containing the children.
1683 */
mas_adopt_children(struct ma_state * mas,struct maple_enode * parent)1684 static inline void mas_adopt_children(struct ma_state *mas,
1685 struct maple_enode *parent)
1686 {
1687 enum maple_type type = mte_node_type(parent);
1688 struct maple_node *node = mte_to_node(parent);
1689 void __rcu **slots = ma_slots(node, type);
1690 unsigned long *pivots = ma_pivots(node, type);
1691 struct maple_enode *child;
1692 unsigned char offset;
1693
1694 offset = ma_data_end(node, type, pivots, mas->max);
1695 do {
1696 child = mas_slot_locked(mas, slots, offset);
1697 mas_set_parent(mas, child, parent, offset);
1698 } while (offset--);
1699 }
1700
1701 /*
1702 * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1703 * node as dead.
1704 * @mas - the maple state with the new node
1705 * @old_enode - The old maple encoded node to replace.
1706 */
mas_put_in_tree(struct ma_state * mas,struct maple_enode * old_enode)1707 static inline void mas_put_in_tree(struct ma_state *mas,
1708 struct maple_enode *old_enode)
1709 __must_hold(mas->tree->ma_lock)
1710 {
1711 unsigned char offset;
1712 void __rcu **slots;
1713
1714 if (mte_is_root(mas->node)) {
1715 mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
1716 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1717 mas_set_height(mas);
1718 } else {
1719
1720 offset = mte_parent_slot(mas->node);
1721 slots = ma_slots(mte_parent(mas->node),
1722 mas_parent_type(mas, mas->node));
1723 rcu_assign_pointer(slots[offset], mas->node);
1724 }
1725
1726 mte_set_node_dead(old_enode);
1727 }
1728
1729 /*
1730 * mas_replace_node() - Replace a node by putting it in the tree, marking it
1731 * dead, and freeing it.
1732 * the parent encoding to locate the maple node in the tree.
1733 * @mas - the ma_state with @mas->node pointing to the new node.
1734 * @old_enode - The old maple encoded node.
1735 */
mas_replace_node(struct ma_state * mas,struct maple_enode * old_enode)1736 static inline void mas_replace_node(struct ma_state *mas,
1737 struct maple_enode *old_enode)
1738 __must_hold(mas->tree->ma_lock)
1739 {
1740 mas_put_in_tree(mas, old_enode);
1741 mas_free(mas, old_enode);
1742 }
1743
1744 /*
1745 * mas_find_child() - Find a child who has the parent @mas->node.
1746 * @mas: the maple state with the parent.
1747 * @child: the maple state to store the child.
1748 */
mas_find_child(struct ma_state * mas,struct ma_state * child)1749 static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
1750 __must_hold(mas->tree->ma_lock)
1751 {
1752 enum maple_type mt;
1753 unsigned char offset;
1754 unsigned char end;
1755 unsigned long *pivots;
1756 struct maple_enode *entry;
1757 struct maple_node *node;
1758 void __rcu **slots;
1759
1760 mt = mte_node_type(mas->node);
1761 node = mas_mn(mas);
1762 slots = ma_slots(node, mt);
1763 pivots = ma_pivots(node, mt);
1764 end = ma_data_end(node, mt, pivots, mas->max);
1765 for (offset = mas->offset; offset <= end; offset++) {
1766 entry = mas_slot_locked(mas, slots, offset);
1767 if (mte_parent(entry) == node) {
1768 *child = *mas;
1769 mas->offset = offset + 1;
1770 child->offset = offset;
1771 mas_descend(child);
1772 child->offset = 0;
1773 return true;
1774 }
1775 }
1776 return false;
1777 }
1778
1779 /*
1780 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1781 * old data or set b_node->b_end.
1782 * @b_node: the maple_big_node
1783 * @shift: the shift count
1784 */
mab_shift_right(struct maple_big_node * b_node,unsigned char shift)1785 static inline void mab_shift_right(struct maple_big_node *b_node,
1786 unsigned char shift)
1787 {
1788 unsigned long size = b_node->b_end * sizeof(unsigned long);
1789
1790 memmove(b_node->pivot + shift, b_node->pivot, size);
1791 memmove(b_node->slot + shift, b_node->slot, size);
1792 if (b_node->type == maple_arange_64)
1793 memmove(b_node->gap + shift, b_node->gap, size);
1794 }
1795
1796 /*
1797 * mab_middle_node() - Check if a middle node is needed (unlikely)
1798 * @b_node: the maple_big_node that contains the data.
1799 * @size: the amount of data in the b_node
1800 * @split: the potential split location
1801 * @slot_count: the size that can be stored in a single node being considered.
1802 *
1803 * Return: true if a middle node is required.
1804 */
mab_middle_node(struct maple_big_node * b_node,int split,unsigned char slot_count)1805 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1806 unsigned char slot_count)
1807 {
1808 unsigned char size = b_node->b_end;
1809
1810 if (size >= 2 * slot_count)
1811 return true;
1812
1813 if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1814 return true;
1815
1816 return false;
1817 }
1818
1819 /*
1820 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1821 * @b_node: the maple_big_node with the data
1822 * @split: the suggested split location
1823 * @slot_count: the number of slots in the node being considered.
1824 *
1825 * Return: the split location.
1826 */
mab_no_null_split(struct maple_big_node * b_node,unsigned char split,unsigned char slot_count)1827 static inline int mab_no_null_split(struct maple_big_node *b_node,
1828 unsigned char split, unsigned char slot_count)
1829 {
1830 if (!b_node->slot[split]) {
1831 /*
1832 * If the split is less than the max slot && the right side will
1833 * still be sufficient, then increment the split on NULL.
1834 */
1835 if ((split < slot_count - 1) &&
1836 (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1837 split++;
1838 else
1839 split--;
1840 }
1841 return split;
1842 }
1843
1844 /*
1845 * mab_calc_split() - Calculate the split location and if there needs to be two
1846 * splits.
1847 * @bn: The maple_big_node with the data
1848 * @mid_split: The second split, if required. 0 otherwise.
1849 *
1850 * Return: The first split location. The middle split is set in @mid_split.
1851 */
mab_calc_split(struct ma_state * mas,struct maple_big_node * bn,unsigned char * mid_split,unsigned long min)1852 static inline int mab_calc_split(struct ma_state *mas,
1853 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1854 {
1855 unsigned char b_end = bn->b_end;
1856 int split = b_end / 2; /* Assume equal split. */
1857 unsigned char slot_min, slot_count = mt_slots[bn->type];
1858
1859 /*
1860 * To support gap tracking, all NULL entries are kept together and a node cannot
1861 * end on a NULL entry, with the exception of the left-most leaf. The
1862 * limitation means that the split of a node must be checked for this condition
1863 * and be able to put more data in one direction or the other.
1864 */
1865 if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1866 *mid_split = 0;
1867 split = b_end - mt_min_slots[bn->type];
1868
1869 if (!ma_is_leaf(bn->type))
1870 return split;
1871
1872 mas->mas_flags |= MA_STATE_REBALANCE;
1873 if (!bn->slot[split])
1874 split--;
1875 return split;
1876 }
1877
1878 /*
1879 * Although extremely rare, it is possible to enter what is known as the 3-way
1880 * split scenario. The 3-way split comes about by means of a store of a range
1881 * that overwrites the end and beginning of two full nodes. The result is a set
1882 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1883 * also be located in different parent nodes which are also full. This can
1884 * carry upwards all the way to the root in the worst case.
1885 */
1886 if (unlikely(mab_middle_node(bn, split, slot_count))) {
1887 split = b_end / 3;
1888 *mid_split = split * 2;
1889 } else {
1890 slot_min = mt_min_slots[bn->type];
1891
1892 *mid_split = 0;
1893 /*
1894 * Avoid having a range less than the slot count unless it
1895 * causes one node to be deficient.
1896 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1897 */
1898 while ((split < slot_count - 1) &&
1899 ((bn->pivot[split] - min) < slot_count - 1) &&
1900 (b_end - split > slot_min))
1901 split++;
1902 }
1903
1904 /* Avoid ending a node on a NULL entry */
1905 split = mab_no_null_split(bn, split, slot_count);
1906
1907 if (unlikely(*mid_split))
1908 *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1909
1910 return split;
1911 }
1912
1913 /*
1914 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1915 * and set @b_node->b_end to the next free slot.
1916 * @mas: The maple state
1917 * @mas_start: The starting slot to copy
1918 * @mas_end: The end slot to copy (inclusively)
1919 * @b_node: The maple_big_node to place the data
1920 * @mab_start: The starting location in maple_big_node to store the data.
1921 */
mas_mab_cp(struct ma_state * mas,unsigned char mas_start,unsigned char mas_end,struct maple_big_node * b_node,unsigned char mab_start)1922 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1923 unsigned char mas_end, struct maple_big_node *b_node,
1924 unsigned char mab_start)
1925 {
1926 enum maple_type mt;
1927 struct maple_node *node;
1928 void __rcu **slots;
1929 unsigned long *pivots, *gaps;
1930 int i = mas_start, j = mab_start;
1931 unsigned char piv_end;
1932
1933 node = mas_mn(mas);
1934 mt = mte_node_type(mas->node);
1935 pivots = ma_pivots(node, mt);
1936 if (!i) {
1937 b_node->pivot[j] = pivots[i++];
1938 if (unlikely(i > mas_end))
1939 goto complete;
1940 j++;
1941 }
1942
1943 piv_end = min(mas_end, mt_pivots[mt]);
1944 for (; i < piv_end; i++, j++) {
1945 b_node->pivot[j] = pivots[i];
1946 if (unlikely(!b_node->pivot[j]))
1947 break;
1948
1949 if (unlikely(mas->max == b_node->pivot[j]))
1950 goto complete;
1951 }
1952
1953 if (likely(i <= mas_end))
1954 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1955
1956 complete:
1957 b_node->b_end = ++j;
1958 j -= mab_start;
1959 slots = ma_slots(node, mt);
1960 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1961 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1962 gaps = ma_gaps(node, mt);
1963 memcpy(b_node->gap + mab_start, gaps + mas_start,
1964 sizeof(unsigned long) * j);
1965 }
1966 }
1967
1968 /*
1969 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1970 * @node: The maple node
1971 * @mt: The maple type
1972 * @end: The node end
1973 */
mas_leaf_set_meta(struct maple_node * node,enum maple_type mt,unsigned char end)1974 static inline void mas_leaf_set_meta(struct maple_node *node,
1975 enum maple_type mt, unsigned char end)
1976 {
1977 if (end < mt_slots[mt] - 1)
1978 ma_set_meta(node, mt, 0, end);
1979 }
1980
1981 /*
1982 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1983 * @b_node: the maple_big_node that has the data
1984 * @mab_start: the start location in @b_node.
1985 * @mab_end: The end location in @b_node (inclusively)
1986 * @mas: The maple state with the maple encoded node.
1987 */
mab_mas_cp(struct maple_big_node * b_node,unsigned char mab_start,unsigned char mab_end,struct ma_state * mas,bool new_max)1988 static inline void mab_mas_cp(struct maple_big_node *b_node,
1989 unsigned char mab_start, unsigned char mab_end,
1990 struct ma_state *mas, bool new_max)
1991 {
1992 int i, j = 0;
1993 enum maple_type mt = mte_node_type(mas->node);
1994 struct maple_node *node = mte_to_node(mas->node);
1995 void __rcu **slots = ma_slots(node, mt);
1996 unsigned long *pivots = ma_pivots(node, mt);
1997 unsigned long *gaps = NULL;
1998 unsigned char end;
1999
2000 if (mab_end - mab_start > mt_pivots[mt])
2001 mab_end--;
2002
2003 if (!pivots[mt_pivots[mt] - 1])
2004 slots[mt_pivots[mt]] = NULL;
2005
2006 i = mab_start;
2007 do {
2008 pivots[j++] = b_node->pivot[i++];
2009 } while (i <= mab_end && likely(b_node->pivot[i]));
2010
2011 memcpy(slots, b_node->slot + mab_start,
2012 sizeof(void *) * (i - mab_start));
2013
2014 if (new_max)
2015 mas->max = b_node->pivot[i - 1];
2016
2017 end = j - 1;
2018 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2019 unsigned long max_gap = 0;
2020 unsigned char offset = 0;
2021
2022 gaps = ma_gaps(node, mt);
2023 do {
2024 gaps[--j] = b_node->gap[--i];
2025 if (gaps[j] > max_gap) {
2026 offset = j;
2027 max_gap = gaps[j];
2028 }
2029 } while (j);
2030
2031 ma_set_meta(node, mt, offset, end);
2032 } else {
2033 mas_leaf_set_meta(node, mt, end);
2034 }
2035 }
2036
2037 /*
2038 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2039 * @mas: The maple state
2040 * @end: The maple node end
2041 * @mt: The maple node type
2042 */
mas_bulk_rebalance(struct ma_state * mas,unsigned char end,enum maple_type mt)2043 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2044 enum maple_type mt)
2045 {
2046 if (!(mas->mas_flags & MA_STATE_BULK))
2047 return;
2048
2049 if (mte_is_root(mas->node))
2050 return;
2051
2052 if (end > mt_min_slots[mt]) {
2053 mas->mas_flags &= ~MA_STATE_REBALANCE;
2054 return;
2055 }
2056 }
2057
2058 /*
2059 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2060 * data from a maple encoded node.
2061 * @wr_mas: the maple write state
2062 * @b_node: the maple_big_node to fill with data
2063 * @offset_end: the offset to end copying
2064 *
2065 * Return: The actual end of the data stored in @b_node
2066 */
mas_store_b_node(struct ma_wr_state * wr_mas,struct maple_big_node * b_node,unsigned char offset_end)2067 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2068 struct maple_big_node *b_node, unsigned char offset_end)
2069 {
2070 unsigned char slot;
2071 unsigned char b_end;
2072 /* Possible underflow of piv will wrap back to 0 before use. */
2073 unsigned long piv;
2074 struct ma_state *mas = wr_mas->mas;
2075
2076 b_node->type = wr_mas->type;
2077 b_end = 0;
2078 slot = mas->offset;
2079 if (slot) {
2080 /* Copy start data up to insert. */
2081 mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2082 b_end = b_node->b_end;
2083 piv = b_node->pivot[b_end - 1];
2084 } else
2085 piv = mas->min - 1;
2086
2087 if (piv + 1 < mas->index) {
2088 /* Handle range starting after old range */
2089 b_node->slot[b_end] = wr_mas->content;
2090 if (!wr_mas->content)
2091 b_node->gap[b_end] = mas->index - 1 - piv;
2092 b_node->pivot[b_end++] = mas->index - 1;
2093 }
2094
2095 /* Store the new entry. */
2096 mas->offset = b_end;
2097 b_node->slot[b_end] = wr_mas->entry;
2098 b_node->pivot[b_end] = mas->last;
2099
2100 /* Appended. */
2101 if (mas->last >= mas->max)
2102 goto b_end;
2103
2104 /* Handle new range ending before old range ends */
2105 piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2106 if (piv > mas->last) {
2107 if (piv == ULONG_MAX)
2108 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2109
2110 if (offset_end != slot)
2111 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2112 offset_end);
2113
2114 b_node->slot[++b_end] = wr_mas->content;
2115 if (!wr_mas->content)
2116 b_node->gap[b_end] = piv - mas->last + 1;
2117 b_node->pivot[b_end] = piv;
2118 }
2119
2120 slot = offset_end + 1;
2121 if (slot > mas->end)
2122 goto b_end;
2123
2124 /* Copy end data to the end of the node. */
2125 mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
2126 b_node->b_end--;
2127 return;
2128
2129 b_end:
2130 b_node->b_end = b_end;
2131 }
2132
2133 /*
2134 * mas_prev_sibling() - Find the previous node with the same parent.
2135 * @mas: the maple state
2136 *
2137 * Return: True if there is a previous sibling, false otherwise.
2138 */
mas_prev_sibling(struct ma_state * mas)2139 static inline bool mas_prev_sibling(struct ma_state *mas)
2140 {
2141 unsigned int p_slot = mte_parent_slot(mas->node);
2142
2143 if (mte_is_root(mas->node))
2144 return false;
2145
2146 if (!p_slot)
2147 return false;
2148
2149 mas_ascend(mas);
2150 mas->offset = p_slot - 1;
2151 mas_descend(mas);
2152 return true;
2153 }
2154
2155 /*
2156 * mas_next_sibling() - Find the next node with the same parent.
2157 * @mas: the maple state
2158 *
2159 * Return: true if there is a next sibling, false otherwise.
2160 */
mas_next_sibling(struct ma_state * mas)2161 static inline bool mas_next_sibling(struct ma_state *mas)
2162 {
2163 MA_STATE(parent, mas->tree, mas->index, mas->last);
2164
2165 if (mte_is_root(mas->node))
2166 return false;
2167
2168 parent = *mas;
2169 mas_ascend(&parent);
2170 parent.offset = mte_parent_slot(mas->node) + 1;
2171 if (parent.offset > mas_data_end(&parent))
2172 return false;
2173
2174 *mas = parent;
2175 mas_descend(mas);
2176 return true;
2177 }
2178
2179 /*
2180 * mte_node_or_none() - Set the enode and state.
2181 * @enode: The encoded maple node.
2182 *
2183 * Set the node to the enode and the status.
2184 */
mas_node_or_none(struct ma_state * mas,struct maple_enode * enode)2185 static inline void mas_node_or_none(struct ma_state *mas,
2186 struct maple_enode *enode)
2187 {
2188 if (enode) {
2189 mas->node = enode;
2190 mas->status = ma_active;
2191 } else {
2192 mas->node = NULL;
2193 mas->status = ma_none;
2194 }
2195 }
2196
2197 /*
2198 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2199 * @wr_mas: The maple write state
2200 *
2201 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2202 */
mas_wr_node_walk(struct ma_wr_state * wr_mas)2203 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2204 {
2205 struct ma_state *mas = wr_mas->mas;
2206 unsigned char count, offset;
2207
2208 if (unlikely(ma_is_dense(wr_mas->type))) {
2209 wr_mas->r_max = wr_mas->r_min = mas->index;
2210 mas->offset = mas->index = mas->min;
2211 return;
2212 }
2213
2214 wr_mas->node = mas_mn(wr_mas->mas);
2215 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2216 count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
2217 wr_mas->pivots, mas->max);
2218 offset = mas->offset;
2219
2220 while (offset < count && mas->index > wr_mas->pivots[offset])
2221 offset++;
2222
2223 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2224 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2225 wr_mas->offset_end = mas->offset = offset;
2226 }
2227
2228 /*
2229 * mast_rebalance_next() - Rebalance against the next node
2230 * @mast: The maple subtree state
2231 * @old_r: The encoded maple node to the right (next node).
2232 */
mast_rebalance_next(struct maple_subtree_state * mast)2233 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2234 {
2235 unsigned char b_end = mast->bn->b_end;
2236
2237 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2238 mast->bn, b_end);
2239 mast->orig_r->last = mast->orig_r->max;
2240 }
2241
2242 /*
2243 * mast_rebalance_prev() - Rebalance against the previous node
2244 * @mast: The maple subtree state
2245 * @old_l: The encoded maple node to the left (previous node)
2246 */
mast_rebalance_prev(struct maple_subtree_state * mast)2247 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2248 {
2249 unsigned char end = mas_data_end(mast->orig_l) + 1;
2250 unsigned char b_end = mast->bn->b_end;
2251
2252 mab_shift_right(mast->bn, end);
2253 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2254 mast->l->min = mast->orig_l->min;
2255 mast->orig_l->index = mast->orig_l->min;
2256 mast->bn->b_end = end + b_end;
2257 mast->l->offset += end;
2258 }
2259
2260 /*
2261 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2262 * the node to the right. Checking the nodes to the right then the left at each
2263 * level upwards until root is reached.
2264 * Data is copied into the @mast->bn.
2265 * @mast: The maple_subtree_state.
2266 */
2267 static inline
mast_spanning_rebalance(struct maple_subtree_state * mast)2268 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2269 {
2270 struct ma_state r_tmp = *mast->orig_r;
2271 struct ma_state l_tmp = *mast->orig_l;
2272 unsigned char depth = 0;
2273
2274 do {
2275 mas_ascend(mast->orig_r);
2276 mas_ascend(mast->orig_l);
2277 depth++;
2278 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2279 mast->orig_r->offset++;
2280 do {
2281 mas_descend(mast->orig_r);
2282 mast->orig_r->offset = 0;
2283 } while (--depth);
2284
2285 mast_rebalance_next(mast);
2286 *mast->orig_l = l_tmp;
2287 return true;
2288 } else if (mast->orig_l->offset != 0) {
2289 mast->orig_l->offset--;
2290 do {
2291 mas_descend(mast->orig_l);
2292 mast->orig_l->offset =
2293 mas_data_end(mast->orig_l);
2294 } while (--depth);
2295
2296 mast_rebalance_prev(mast);
2297 *mast->orig_r = r_tmp;
2298 return true;
2299 }
2300 } while (!mte_is_root(mast->orig_r->node));
2301
2302 *mast->orig_r = r_tmp;
2303 *mast->orig_l = l_tmp;
2304 return false;
2305 }
2306
2307 /*
2308 * mast_ascend() - Ascend the original left and right maple states.
2309 * @mast: the maple subtree state.
2310 *
2311 * Ascend the original left and right sides. Set the offsets to point to the
2312 * data already in the new tree (@mast->l and @mast->r).
2313 */
mast_ascend(struct maple_subtree_state * mast)2314 static inline void mast_ascend(struct maple_subtree_state *mast)
2315 {
2316 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2317 mas_ascend(mast->orig_l);
2318 mas_ascend(mast->orig_r);
2319
2320 mast->orig_r->offset = 0;
2321 mast->orig_r->index = mast->r->max;
2322 /* last should be larger than or equal to index */
2323 if (mast->orig_r->last < mast->orig_r->index)
2324 mast->orig_r->last = mast->orig_r->index;
2325
2326 wr_mas.type = mte_node_type(mast->orig_r->node);
2327 mas_wr_node_walk(&wr_mas);
2328 /* Set up the left side of things */
2329 mast->orig_l->offset = 0;
2330 mast->orig_l->index = mast->l->min;
2331 wr_mas.mas = mast->orig_l;
2332 wr_mas.type = mte_node_type(mast->orig_l->node);
2333 mas_wr_node_walk(&wr_mas);
2334
2335 mast->bn->type = wr_mas.type;
2336 }
2337
2338 /*
2339 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2340 * @mas: the maple state with the allocations.
2341 * @b_node: the maple_big_node with the type encoding.
2342 *
2343 * Use the node type from the maple_big_node to allocate a new node from the
2344 * ma_state. This function exists mainly for code readability.
2345 *
2346 * Return: A new maple encoded node
2347 */
2348 static inline struct maple_enode
mas_new_ma_node(struct ma_state * mas,struct maple_big_node * b_node)2349 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2350 {
2351 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2352 }
2353
2354 /*
2355 * mas_mab_to_node() - Set up right and middle nodes
2356 *
2357 * @mas: the maple state that contains the allocations.
2358 * @b_node: the node which contains the data.
2359 * @left: The pointer which will have the left node
2360 * @right: The pointer which may have the right node
2361 * @middle: the pointer which may have the middle node (rare)
2362 * @mid_split: the split location for the middle node
2363 *
2364 * Return: the split of left.
2365 */
mas_mab_to_node(struct ma_state * mas,struct maple_big_node * b_node,struct maple_enode ** left,struct maple_enode ** right,struct maple_enode ** middle,unsigned char * mid_split,unsigned long min)2366 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2367 struct maple_big_node *b_node, struct maple_enode **left,
2368 struct maple_enode **right, struct maple_enode **middle,
2369 unsigned char *mid_split, unsigned long min)
2370 {
2371 unsigned char split = 0;
2372 unsigned char slot_count = mt_slots[b_node->type];
2373
2374 *left = mas_new_ma_node(mas, b_node);
2375 *right = NULL;
2376 *middle = NULL;
2377 *mid_split = 0;
2378
2379 if (b_node->b_end < slot_count) {
2380 split = b_node->b_end;
2381 } else {
2382 split = mab_calc_split(mas, b_node, mid_split, min);
2383 *right = mas_new_ma_node(mas, b_node);
2384 }
2385
2386 if (*mid_split)
2387 *middle = mas_new_ma_node(mas, b_node);
2388
2389 return split;
2390
2391 }
2392
2393 /*
2394 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2395 * pointer.
2396 * @b_node - the big node to add the entry
2397 * @mas - the maple state to get the pivot (mas->max)
2398 * @entry - the entry to add, if NULL nothing happens.
2399 */
mab_set_b_end(struct maple_big_node * b_node,struct ma_state * mas,void * entry)2400 static inline void mab_set_b_end(struct maple_big_node *b_node,
2401 struct ma_state *mas,
2402 void *entry)
2403 {
2404 if (!entry)
2405 return;
2406
2407 b_node->slot[b_node->b_end] = entry;
2408 if (mt_is_alloc(mas->tree))
2409 b_node->gap[b_node->b_end] = mas_max_gap(mas);
2410 b_node->pivot[b_node->b_end++] = mas->max;
2411 }
2412
2413 /*
2414 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2415 * of @mas->node to either @left or @right, depending on @slot and @split
2416 *
2417 * @mas - the maple state with the node that needs a parent
2418 * @left - possible parent 1
2419 * @right - possible parent 2
2420 * @slot - the slot the mas->node was placed
2421 * @split - the split location between @left and @right
2422 */
mas_set_split_parent(struct ma_state * mas,struct maple_enode * left,struct maple_enode * right,unsigned char * slot,unsigned char split)2423 static inline void mas_set_split_parent(struct ma_state *mas,
2424 struct maple_enode *left,
2425 struct maple_enode *right,
2426 unsigned char *slot, unsigned char split)
2427 {
2428 if (mas_is_none(mas))
2429 return;
2430
2431 if ((*slot) <= split)
2432 mas_set_parent(mas, mas->node, left, *slot);
2433 else if (right)
2434 mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2435
2436 (*slot)++;
2437 }
2438
2439 /*
2440 * mte_mid_split_check() - Check if the next node passes the mid-split
2441 * @**l: Pointer to left encoded maple node.
2442 * @**m: Pointer to middle encoded maple node.
2443 * @**r: Pointer to right encoded maple node.
2444 * @slot: The offset
2445 * @*split: The split location.
2446 * @mid_split: The middle split.
2447 */
mte_mid_split_check(struct maple_enode ** l,struct maple_enode ** r,struct maple_enode * right,unsigned char slot,unsigned char * split,unsigned char mid_split)2448 static inline void mte_mid_split_check(struct maple_enode **l,
2449 struct maple_enode **r,
2450 struct maple_enode *right,
2451 unsigned char slot,
2452 unsigned char *split,
2453 unsigned char mid_split)
2454 {
2455 if (*r == right)
2456 return;
2457
2458 if (slot < mid_split)
2459 return;
2460
2461 *l = *r;
2462 *r = right;
2463 *split = mid_split;
2464 }
2465
2466 /*
2467 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2468 * is taken from @mast->l.
2469 * @mast - the maple subtree state
2470 * @left - the left node
2471 * @right - the right node
2472 * @split - the split location.
2473 */
mast_set_split_parents(struct maple_subtree_state * mast,struct maple_enode * left,struct maple_enode * middle,struct maple_enode * right,unsigned char split,unsigned char mid_split)2474 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2475 struct maple_enode *left,
2476 struct maple_enode *middle,
2477 struct maple_enode *right,
2478 unsigned char split,
2479 unsigned char mid_split)
2480 {
2481 unsigned char slot;
2482 struct maple_enode *l = left;
2483 struct maple_enode *r = right;
2484
2485 if (mas_is_none(mast->l))
2486 return;
2487
2488 if (middle)
2489 r = middle;
2490
2491 slot = mast->l->offset;
2492
2493 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2494 mas_set_split_parent(mast->l, l, r, &slot, split);
2495
2496 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2497 mas_set_split_parent(mast->m, l, r, &slot, split);
2498
2499 mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2500 mas_set_split_parent(mast->r, l, r, &slot, split);
2501 }
2502
2503 /*
2504 * mas_topiary_node() - Dispose of a single node
2505 * @mas: The maple state for pushing nodes
2506 * @enode: The encoded maple node
2507 * @in_rcu: If the tree is in rcu mode
2508 *
2509 * The node will either be RCU freed or pushed back on the maple state.
2510 */
mas_topiary_node(struct ma_state * mas,struct ma_state * tmp_mas,bool in_rcu)2511 static inline void mas_topiary_node(struct ma_state *mas,
2512 struct ma_state *tmp_mas, bool in_rcu)
2513 {
2514 struct maple_node *tmp;
2515 struct maple_enode *enode;
2516
2517 if (mas_is_none(tmp_mas))
2518 return;
2519
2520 enode = tmp_mas->node;
2521 tmp = mte_to_node(enode);
2522 mte_set_node_dead(enode);
2523 if (in_rcu)
2524 ma_free_rcu(tmp);
2525 else
2526 mas_push_node(mas, tmp);
2527 }
2528
2529 /*
2530 * mas_topiary_replace() - Replace the data with new data, then repair the
2531 * parent links within the new tree. Iterate over the dead sub-tree and collect
2532 * the dead subtrees and topiary the nodes that are no longer of use.
2533 *
2534 * The new tree will have up to three children with the correct parent. Keep
2535 * track of the new entries as they need to be followed to find the next level
2536 * of new entries.
2537 *
2538 * The old tree will have up to three children with the old parent. Keep track
2539 * of the old entries as they may have more nodes below replaced. Nodes within
2540 * [index, last] are dead subtrees, others need to be freed and followed.
2541 *
2542 * @mas: The maple state pointing at the new data
2543 * @old_enode: The maple encoded node being replaced
2544 *
2545 */
mas_topiary_replace(struct ma_state * mas,struct maple_enode * old_enode)2546 static inline void mas_topiary_replace(struct ma_state *mas,
2547 struct maple_enode *old_enode)
2548 {
2549 struct ma_state tmp[3], tmp_next[3];
2550 MA_TOPIARY(subtrees, mas->tree);
2551 bool in_rcu;
2552 int i, n;
2553
2554 /* Place data in tree & then mark node as old */
2555 mas_put_in_tree(mas, old_enode);
2556
2557 /* Update the parent pointers in the tree */
2558 tmp[0] = *mas;
2559 tmp[0].offset = 0;
2560 tmp[1].status = ma_none;
2561 tmp[2].status = ma_none;
2562 while (!mte_is_leaf(tmp[0].node)) {
2563 n = 0;
2564 for (i = 0; i < 3; i++) {
2565 if (mas_is_none(&tmp[i]))
2566 continue;
2567
2568 while (n < 3) {
2569 if (!mas_find_child(&tmp[i], &tmp_next[n]))
2570 break;
2571 n++;
2572 }
2573
2574 mas_adopt_children(&tmp[i], tmp[i].node);
2575 }
2576
2577 if (MAS_WARN_ON(mas, n == 0))
2578 break;
2579
2580 while (n < 3)
2581 tmp_next[n++].status = ma_none;
2582
2583 for (i = 0; i < 3; i++)
2584 tmp[i] = tmp_next[i];
2585 }
2586
2587 /* Collect the old nodes that need to be discarded */
2588 if (mte_is_leaf(old_enode))
2589 return mas_free(mas, old_enode);
2590
2591 tmp[0] = *mas;
2592 tmp[0].offset = 0;
2593 tmp[0].node = old_enode;
2594 tmp[1].status = ma_none;
2595 tmp[2].status = ma_none;
2596 in_rcu = mt_in_rcu(mas->tree);
2597 do {
2598 n = 0;
2599 for (i = 0; i < 3; i++) {
2600 if (mas_is_none(&tmp[i]))
2601 continue;
2602
2603 while (n < 3) {
2604 if (!mas_find_child(&tmp[i], &tmp_next[n]))
2605 break;
2606
2607 if ((tmp_next[n].min >= tmp_next->index) &&
2608 (tmp_next[n].max <= tmp_next->last)) {
2609 mat_add(&subtrees, tmp_next[n].node);
2610 tmp_next[n].status = ma_none;
2611 } else {
2612 n++;
2613 }
2614 }
2615 }
2616
2617 if (MAS_WARN_ON(mas, n == 0))
2618 break;
2619
2620 while (n < 3)
2621 tmp_next[n++].status = ma_none;
2622
2623 for (i = 0; i < 3; i++) {
2624 mas_topiary_node(mas, &tmp[i], in_rcu);
2625 tmp[i] = tmp_next[i];
2626 }
2627 } while (!mte_is_leaf(tmp[0].node));
2628
2629 for (i = 0; i < 3; i++)
2630 mas_topiary_node(mas, &tmp[i], in_rcu);
2631
2632 mas_mat_destroy(mas, &subtrees);
2633 }
2634
2635 /*
2636 * mas_wmb_replace() - Write memory barrier and replace
2637 * @mas: The maple state
2638 * @old: The old maple encoded node that is being replaced.
2639 *
2640 * Updates gap as necessary.
2641 */
mas_wmb_replace(struct ma_state * mas,struct maple_enode * old_enode)2642 static inline void mas_wmb_replace(struct ma_state *mas,
2643 struct maple_enode *old_enode)
2644 {
2645 /* Insert the new data in the tree */
2646 mas_topiary_replace(mas, old_enode);
2647
2648 if (mte_is_leaf(mas->node))
2649 return;
2650
2651 mas_update_gap(mas);
2652 }
2653
2654 /*
2655 * mast_cp_to_nodes() - Copy data out to nodes.
2656 * @mast: The maple subtree state
2657 * @left: The left encoded maple node
2658 * @middle: The middle encoded maple node
2659 * @right: The right encoded maple node
2660 * @split: The location to split between left and (middle ? middle : right)
2661 * @mid_split: The location to split between middle and right.
2662 */
mast_cp_to_nodes(struct maple_subtree_state * mast,struct maple_enode * left,struct maple_enode * middle,struct maple_enode * right,unsigned char split,unsigned char mid_split)2663 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2664 struct maple_enode *left, struct maple_enode *middle,
2665 struct maple_enode *right, unsigned char split, unsigned char mid_split)
2666 {
2667 bool new_lmax = true;
2668
2669 mas_node_or_none(mast->l, left);
2670 mas_node_or_none(mast->m, middle);
2671 mas_node_or_none(mast->r, right);
2672
2673 mast->l->min = mast->orig_l->min;
2674 if (split == mast->bn->b_end) {
2675 mast->l->max = mast->orig_r->max;
2676 new_lmax = false;
2677 }
2678
2679 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2680
2681 if (middle) {
2682 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2683 mast->m->min = mast->bn->pivot[split] + 1;
2684 split = mid_split;
2685 }
2686
2687 mast->r->max = mast->orig_r->max;
2688 if (right) {
2689 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2690 mast->r->min = mast->bn->pivot[split] + 1;
2691 }
2692 }
2693
2694 /*
2695 * mast_combine_cp_left - Copy in the original left side of the tree into the
2696 * combined data set in the maple subtree state big node.
2697 * @mast: The maple subtree state
2698 */
mast_combine_cp_left(struct maple_subtree_state * mast)2699 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2700 {
2701 unsigned char l_slot = mast->orig_l->offset;
2702
2703 if (!l_slot)
2704 return;
2705
2706 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2707 }
2708
2709 /*
2710 * mast_combine_cp_right: Copy in the original right side of the tree into the
2711 * combined data set in the maple subtree state big node.
2712 * @mast: The maple subtree state
2713 */
mast_combine_cp_right(struct maple_subtree_state * mast)2714 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2715 {
2716 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2717 return;
2718
2719 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2720 mt_slot_count(mast->orig_r->node), mast->bn,
2721 mast->bn->b_end);
2722 mast->orig_r->last = mast->orig_r->max;
2723 }
2724
2725 /*
2726 * mast_sufficient: Check if the maple subtree state has enough data in the big
2727 * node to create at least one sufficient node
2728 * @mast: the maple subtree state
2729 */
mast_sufficient(struct maple_subtree_state * mast)2730 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2731 {
2732 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2733 return true;
2734
2735 return false;
2736 }
2737
2738 /*
2739 * mast_overflow: Check if there is too much data in the subtree state for a
2740 * single node.
2741 * @mast: The maple subtree state
2742 */
mast_overflow(struct maple_subtree_state * mast)2743 static inline bool mast_overflow(struct maple_subtree_state *mast)
2744 {
2745 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2746 return true;
2747
2748 return false;
2749 }
2750
mtree_range_walk(struct ma_state * mas)2751 static inline void *mtree_range_walk(struct ma_state *mas)
2752 {
2753 unsigned long *pivots;
2754 unsigned char offset;
2755 struct maple_node *node;
2756 struct maple_enode *next, *last;
2757 enum maple_type type;
2758 void __rcu **slots;
2759 unsigned char end;
2760 unsigned long max, min;
2761 unsigned long prev_max, prev_min;
2762
2763 next = mas->node;
2764 min = mas->min;
2765 max = mas->max;
2766 do {
2767 last = next;
2768 node = mte_to_node(next);
2769 type = mte_node_type(next);
2770 pivots = ma_pivots(node, type);
2771 end = ma_data_end(node, type, pivots, max);
2772 prev_min = min;
2773 prev_max = max;
2774 if (pivots[0] >= mas->index) {
2775 offset = 0;
2776 max = pivots[0];
2777 goto next;
2778 }
2779
2780 offset = 1;
2781 while (offset < end) {
2782 if (pivots[offset] >= mas->index) {
2783 max = pivots[offset];
2784 break;
2785 }
2786 offset++;
2787 }
2788
2789 min = pivots[offset - 1] + 1;
2790 next:
2791 slots = ma_slots(node, type);
2792 next = mt_slot(mas->tree, slots, offset);
2793 if (unlikely(ma_dead_node(node)))
2794 goto dead_node;
2795 } while (!ma_is_leaf(type));
2796
2797 mas->end = end;
2798 mas->offset = offset;
2799 mas->index = min;
2800 mas->last = max;
2801 mas->min = prev_min;
2802 mas->max = prev_max;
2803 mas->node = last;
2804 return (void *)next;
2805
2806 dead_node:
2807 mas_reset(mas);
2808 return NULL;
2809 }
2810
2811 /*
2812 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2813 * @mas: The starting maple state
2814 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2815 * @count: The estimated count of iterations needed.
2816 *
2817 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2818 * is hit. First @b_node is split into two entries which are inserted into the
2819 * next iteration of the loop. @b_node is returned populated with the final
2820 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
2821 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2822 * to account of what has been copied into the new sub-tree. The update of
2823 * orig_l_mas->last is used in mas_consume to find the slots that will need to
2824 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
2825 * the new sub-tree in case the sub-tree becomes the full tree.
2826 *
2827 * Return: the number of elements in b_node during the last loop.
2828 */
mas_spanning_rebalance(struct ma_state * mas,struct maple_subtree_state * mast,unsigned char count)2829 static int mas_spanning_rebalance(struct ma_state *mas,
2830 struct maple_subtree_state *mast, unsigned char count)
2831 {
2832 unsigned char split, mid_split;
2833 unsigned char slot = 0;
2834 struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2835 struct maple_enode *old_enode;
2836
2837 MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2838 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2839 MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2840
2841 /*
2842 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2843 * Rebalancing is done by use of the ``struct maple_topiary``.
2844 */
2845 mast->l = &l_mas;
2846 mast->m = &m_mas;
2847 mast->r = &r_mas;
2848 l_mas.status = r_mas.status = m_mas.status = ma_none;
2849
2850 /* Check if this is not root and has sufficient data. */
2851 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
2852 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
2853 mast_spanning_rebalance(mast);
2854
2855 l_mas.depth = 0;
2856
2857 /*
2858 * Each level of the tree is examined and balanced, pushing data to the left or
2859 * right, or rebalancing against left or right nodes is employed to avoid
2860 * rippling up the tree to limit the amount of churn. Once a new sub-section of
2861 * the tree is created, there may be a mix of new and old nodes. The old nodes
2862 * will have the incorrect parent pointers and currently be in two trees: the
2863 * original tree and the partially new tree. To remedy the parent pointers in
2864 * the old tree, the new data is swapped into the active tree and a walk down
2865 * the tree is performed and the parent pointers are updated.
2866 * See mas_topiary_replace() for more information.
2867 */
2868 while (count--) {
2869 mast->bn->b_end--;
2870 mast->bn->type = mte_node_type(mast->orig_l->node);
2871 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
2872 &mid_split, mast->orig_l->min);
2873 mast_set_split_parents(mast, left, middle, right, split,
2874 mid_split);
2875 mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
2876
2877 /*
2878 * Copy data from next level in the tree to mast->bn from next
2879 * iteration
2880 */
2881 memset(mast->bn, 0, sizeof(struct maple_big_node));
2882 mast->bn->type = mte_node_type(left);
2883 l_mas.depth++;
2884
2885 /* Root already stored in l->node. */
2886 if (mas_is_root_limits(mast->l))
2887 goto new_root;
2888
2889 mast_ascend(mast);
2890 mast_combine_cp_left(mast);
2891 l_mas.offset = mast->bn->b_end;
2892 mab_set_b_end(mast->bn, &l_mas, left);
2893 mab_set_b_end(mast->bn, &m_mas, middle);
2894 mab_set_b_end(mast->bn, &r_mas, right);
2895
2896 /* Copy anything necessary out of the right node. */
2897 mast_combine_cp_right(mast);
2898 mast->orig_l->last = mast->orig_l->max;
2899
2900 if (mast_sufficient(mast))
2901 continue;
2902
2903 if (mast_overflow(mast))
2904 continue;
2905
2906 /* May be a new root stored in mast->bn */
2907 if (mas_is_root_limits(mast->orig_l))
2908 break;
2909
2910 mast_spanning_rebalance(mast);
2911
2912 /* rebalancing from other nodes may require another loop. */
2913 if (!count)
2914 count++;
2915 }
2916
2917 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
2918 mte_node_type(mast->orig_l->node));
2919 l_mas.depth++;
2920 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
2921 mas_set_parent(mas, left, l_mas.node, slot);
2922 if (middle)
2923 mas_set_parent(mas, middle, l_mas.node, ++slot);
2924
2925 if (right)
2926 mas_set_parent(mas, right, l_mas.node, ++slot);
2927
2928 if (mas_is_root_limits(mast->l)) {
2929 new_root:
2930 mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
2931 while (!mte_is_root(mast->orig_l->node))
2932 mast_ascend(mast);
2933 } else {
2934 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
2935 }
2936
2937 old_enode = mast->orig_l->node;
2938 mas->depth = l_mas.depth;
2939 mas->node = l_mas.node;
2940 mas->min = l_mas.min;
2941 mas->max = l_mas.max;
2942 mas->offset = l_mas.offset;
2943 mas_wmb_replace(mas, old_enode);
2944 mtree_range_walk(mas);
2945 return mast->bn->b_end;
2946 }
2947
2948 /*
2949 * mas_rebalance() - Rebalance a given node.
2950 * @mas: The maple state
2951 * @b_node: The big maple node.
2952 *
2953 * Rebalance two nodes into a single node or two new nodes that are sufficient.
2954 * Continue upwards until tree is sufficient.
2955 *
2956 * Return: the number of elements in b_node during the last loop.
2957 */
mas_rebalance(struct ma_state * mas,struct maple_big_node * b_node)2958 static inline int mas_rebalance(struct ma_state *mas,
2959 struct maple_big_node *b_node)
2960 {
2961 char empty_count = mas_mt_height(mas);
2962 struct maple_subtree_state mast;
2963 unsigned char shift, b_end = ++b_node->b_end;
2964
2965 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
2966 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2967
2968 trace_ma_op(__func__, mas);
2969
2970 /*
2971 * Rebalancing occurs if a node is insufficient. Data is rebalanced
2972 * against the node to the right if it exists, otherwise the node to the
2973 * left of this node is rebalanced against this node. If rebalancing
2974 * causes just one node to be produced instead of two, then the parent
2975 * is also examined and rebalanced if it is insufficient. Every level
2976 * tries to combine the data in the same way. If one node contains the
2977 * entire range of the tree, then that node is used as a new root node.
2978 */
2979 mas_node_count(mas, empty_count * 2 - 1);
2980 if (mas_is_err(mas))
2981 return 0;
2982
2983 mast.orig_l = &l_mas;
2984 mast.orig_r = &r_mas;
2985 mast.bn = b_node;
2986 mast.bn->type = mte_node_type(mas->node);
2987
2988 l_mas = r_mas = *mas;
2989
2990 if (mas_next_sibling(&r_mas)) {
2991 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
2992 r_mas.last = r_mas.index = r_mas.max;
2993 } else {
2994 mas_prev_sibling(&l_mas);
2995 shift = mas_data_end(&l_mas) + 1;
2996 mab_shift_right(b_node, shift);
2997 mas->offset += shift;
2998 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
2999 b_node->b_end = shift + b_end;
3000 l_mas.index = l_mas.last = l_mas.min;
3001 }
3002
3003 return mas_spanning_rebalance(mas, &mast, empty_count);
3004 }
3005
3006 /*
3007 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3008 * state.
3009 * @mas: The maple state
3010 * @end: The end of the left-most node.
3011 *
3012 * During a mass-insert event (such as forking), it may be necessary to
3013 * rebalance the left-most node when it is not sufficient.
3014 */
mas_destroy_rebalance(struct ma_state * mas,unsigned char end)3015 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3016 {
3017 enum maple_type mt = mte_node_type(mas->node);
3018 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3019 struct maple_enode *eparent, *old_eparent;
3020 unsigned char offset, tmp, split = mt_slots[mt] / 2;
3021 void __rcu **l_slots, **slots;
3022 unsigned long *l_pivs, *pivs, gap;
3023 bool in_rcu = mt_in_rcu(mas->tree);
3024
3025 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3026
3027 l_mas = *mas;
3028 mas_prev_sibling(&l_mas);
3029
3030 /* set up node. */
3031 if (in_rcu) {
3032 /* Allocate for both left and right as well as parent. */
3033 mas_node_count(mas, 3);
3034 if (mas_is_err(mas))
3035 return;
3036
3037 newnode = mas_pop_node(mas);
3038 } else {
3039 newnode = &reuse;
3040 }
3041
3042 node = mas_mn(mas);
3043 newnode->parent = node->parent;
3044 slots = ma_slots(newnode, mt);
3045 pivs = ma_pivots(newnode, mt);
3046 left = mas_mn(&l_mas);
3047 l_slots = ma_slots(left, mt);
3048 l_pivs = ma_pivots(left, mt);
3049 if (!l_slots[split])
3050 split++;
3051 tmp = mas_data_end(&l_mas) - split;
3052
3053 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3054 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3055 pivs[tmp] = l_mas.max;
3056 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3057 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3058
3059 l_mas.max = l_pivs[split];
3060 mas->min = l_mas.max + 1;
3061 old_eparent = mt_mk_node(mte_parent(l_mas.node),
3062 mas_parent_type(&l_mas, l_mas.node));
3063 tmp += end;
3064 if (!in_rcu) {
3065 unsigned char max_p = mt_pivots[mt];
3066 unsigned char max_s = mt_slots[mt];
3067
3068 if (tmp < max_p)
3069 memset(pivs + tmp, 0,
3070 sizeof(unsigned long) * (max_p - tmp));
3071
3072 if (tmp < mt_slots[mt])
3073 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3074
3075 memcpy(node, newnode, sizeof(struct maple_node));
3076 ma_set_meta(node, mt, 0, tmp - 1);
3077 mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
3078 l_pivs[split]);
3079
3080 /* Remove data from l_pivs. */
3081 tmp = split + 1;
3082 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3083 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3084 ma_set_meta(left, mt, 0, split);
3085 eparent = old_eparent;
3086
3087 goto done;
3088 }
3089
3090 /* RCU requires replacing both l_mas, mas, and parent. */
3091 mas->node = mt_mk_node(newnode, mt);
3092 ma_set_meta(newnode, mt, 0, tmp);
3093
3094 new_left = mas_pop_node(mas);
3095 new_left->parent = left->parent;
3096 mt = mte_node_type(l_mas.node);
3097 slots = ma_slots(new_left, mt);
3098 pivs = ma_pivots(new_left, mt);
3099 memcpy(slots, l_slots, sizeof(void *) * split);
3100 memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3101 ma_set_meta(new_left, mt, 0, split);
3102 l_mas.node = mt_mk_node(new_left, mt);
3103
3104 /* replace parent. */
3105 offset = mte_parent_slot(mas->node);
3106 mt = mas_parent_type(&l_mas, l_mas.node);
3107 parent = mas_pop_node(mas);
3108 slots = ma_slots(parent, mt);
3109 pivs = ma_pivots(parent, mt);
3110 memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
3111 rcu_assign_pointer(slots[offset], mas->node);
3112 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3113 pivs[offset - 1] = l_mas.max;
3114 eparent = mt_mk_node(parent, mt);
3115 done:
3116 gap = mas_leaf_max_gap(mas);
3117 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3118 gap = mas_leaf_max_gap(&l_mas);
3119 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3120 mas_ascend(mas);
3121
3122 if (in_rcu) {
3123 mas_replace_node(mas, old_eparent);
3124 mas_adopt_children(mas, mas->node);
3125 }
3126
3127 mas_update_gap(mas);
3128 }
3129
3130 /*
3131 * mas_split_final_node() - Split the final node in a subtree operation.
3132 * @mast: the maple subtree state
3133 * @mas: The maple state
3134 * @height: The height of the tree in case it's a new root.
3135 */
mas_split_final_node(struct maple_subtree_state * mast,struct ma_state * mas,int height)3136 static inline void mas_split_final_node(struct maple_subtree_state *mast,
3137 struct ma_state *mas, int height)
3138 {
3139 struct maple_enode *ancestor;
3140
3141 if (mte_is_root(mas->node)) {
3142 if (mt_is_alloc(mas->tree))
3143 mast->bn->type = maple_arange_64;
3144 else
3145 mast->bn->type = maple_range_64;
3146 mas->depth = height;
3147 }
3148 /*
3149 * Only a single node is used here, could be root.
3150 * The Big_node data should just fit in a single node.
3151 */
3152 ancestor = mas_new_ma_node(mas, mast->bn);
3153 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3154 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3155 mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3156
3157 mast->l->node = ancestor;
3158 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3159 mas->offset = mast->bn->b_end - 1;
3160 }
3161
3162 /*
3163 * mast_fill_bnode() - Copy data into the big node in the subtree state
3164 * @mast: The maple subtree state
3165 * @mas: the maple state
3166 * @skip: The number of entries to skip for new nodes insertion.
3167 */
mast_fill_bnode(struct maple_subtree_state * mast,struct ma_state * mas,unsigned char skip)3168 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3169 struct ma_state *mas,
3170 unsigned char skip)
3171 {
3172 bool cp = true;
3173 unsigned char split;
3174
3175 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3176 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3177 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3178 mast->bn->b_end = 0;
3179
3180 if (mte_is_root(mas->node)) {
3181 cp = false;
3182 } else {
3183 mas_ascend(mas);
3184 mas->offset = mte_parent_slot(mas->node);
3185 }
3186
3187 if (cp && mast->l->offset)
3188 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3189
3190 split = mast->bn->b_end;
3191 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3192 mast->r->offset = mast->bn->b_end;
3193 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3194 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3195 cp = false;
3196
3197 if (cp)
3198 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3199 mast->bn, mast->bn->b_end);
3200
3201 mast->bn->b_end--;
3202 mast->bn->type = mte_node_type(mas->node);
3203 }
3204
3205 /*
3206 * mast_split_data() - Split the data in the subtree state big node into regular
3207 * nodes.
3208 * @mast: The maple subtree state
3209 * @mas: The maple state
3210 * @split: The location to split the big node
3211 */
mast_split_data(struct maple_subtree_state * mast,struct ma_state * mas,unsigned char split)3212 static inline void mast_split_data(struct maple_subtree_state *mast,
3213 struct ma_state *mas, unsigned char split)
3214 {
3215 unsigned char p_slot;
3216
3217 mab_mas_cp(mast->bn, 0, split, mast->l, true);
3218 mte_set_pivot(mast->r->node, 0, mast->r->max);
3219 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3220 mast->l->offset = mte_parent_slot(mas->node);
3221 mast->l->max = mast->bn->pivot[split];
3222 mast->r->min = mast->l->max + 1;
3223 if (mte_is_leaf(mas->node))
3224 return;
3225
3226 p_slot = mast->orig_l->offset;
3227 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3228 &p_slot, split);
3229 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3230 &p_slot, split);
3231 }
3232
3233 /*
3234 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3235 * data to the right or left node if there is room.
3236 * @mas: The maple state
3237 * @height: The current height of the maple state
3238 * @mast: The maple subtree state
3239 * @left: Push left or not.
3240 *
3241 * Keeping the height of the tree low means faster lookups.
3242 *
3243 * Return: True if pushed, false otherwise.
3244 */
mas_push_data(struct ma_state * mas,int height,struct maple_subtree_state * mast,bool left)3245 static inline bool mas_push_data(struct ma_state *mas, int height,
3246 struct maple_subtree_state *mast, bool left)
3247 {
3248 unsigned char slot_total = mast->bn->b_end;
3249 unsigned char end, space, split;
3250
3251 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3252 tmp_mas = *mas;
3253 tmp_mas.depth = mast->l->depth;
3254
3255 if (left && !mas_prev_sibling(&tmp_mas))
3256 return false;
3257 else if (!left && !mas_next_sibling(&tmp_mas))
3258 return false;
3259
3260 end = mas_data_end(&tmp_mas);
3261 slot_total += end;
3262 space = 2 * mt_slot_count(mas->node) - 2;
3263 /* -2 instead of -1 to ensure there isn't a triple split */
3264 if (ma_is_leaf(mast->bn->type))
3265 space--;
3266
3267 if (mas->max == ULONG_MAX)
3268 space--;
3269
3270 if (slot_total >= space)
3271 return false;
3272
3273 /* Get the data; Fill mast->bn */
3274 mast->bn->b_end++;
3275 if (left) {
3276 mab_shift_right(mast->bn, end + 1);
3277 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3278 mast->bn->b_end = slot_total + 1;
3279 } else {
3280 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3281 }
3282
3283 /* Configure mast for splitting of mast->bn */
3284 split = mt_slots[mast->bn->type] - 2;
3285 if (left) {
3286 /* Switch mas to prev node */
3287 *mas = tmp_mas;
3288 /* Start using mast->l for the left side. */
3289 tmp_mas.node = mast->l->node;
3290 *mast->l = tmp_mas;
3291 } else {
3292 tmp_mas.node = mast->r->node;
3293 *mast->r = tmp_mas;
3294 split = slot_total - split;
3295 }
3296 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3297 /* Update parent slot for split calculation. */
3298 if (left)
3299 mast->orig_l->offset += end + 1;
3300
3301 mast_split_data(mast, mas, split);
3302 mast_fill_bnode(mast, mas, 2);
3303 mas_split_final_node(mast, mas, height + 1);
3304 return true;
3305 }
3306
3307 /*
3308 * mas_split() - Split data that is too big for one node into two.
3309 * @mas: The maple state
3310 * @b_node: The maple big node
3311 * Return: 1 on success, 0 on failure.
3312 */
mas_split(struct ma_state * mas,struct maple_big_node * b_node)3313 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3314 {
3315 struct maple_subtree_state mast;
3316 int height = 0;
3317 unsigned char mid_split, split = 0;
3318 struct maple_enode *old;
3319
3320 /*
3321 * Splitting is handled differently from any other B-tree; the Maple
3322 * Tree splits upwards. Splitting up means that the split operation
3323 * occurs when the walk of the tree hits the leaves and not on the way
3324 * down. The reason for splitting up is that it is impossible to know
3325 * how much space will be needed until the leaf is (or leaves are)
3326 * reached. Since overwriting data is allowed and a range could
3327 * overwrite more than one range or result in changing one entry into 3
3328 * entries, it is impossible to know if a split is required until the
3329 * data is examined.
3330 *
3331 * Splitting is a balancing act between keeping allocations to a minimum
3332 * and avoiding a 'jitter' event where a tree is expanded to make room
3333 * for an entry followed by a contraction when the entry is removed. To
3334 * accomplish the balance, there are empty slots remaining in both left
3335 * and right nodes after a split.
3336 */
3337 MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3338 MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3339 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3340 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3341
3342 trace_ma_op(__func__, mas);
3343 mas->depth = mas_mt_height(mas);
3344 /* Allocation failures will happen early. */
3345 mas_node_count(mas, 1 + mas->depth * 2);
3346 if (mas_is_err(mas))
3347 return 0;
3348
3349 mast.l = &l_mas;
3350 mast.r = &r_mas;
3351 mast.orig_l = &prev_l_mas;
3352 mast.orig_r = &prev_r_mas;
3353 mast.bn = b_node;
3354
3355 while (height++ <= mas->depth) {
3356 if (mt_slots[b_node->type] > b_node->b_end) {
3357 mas_split_final_node(&mast, mas, height);
3358 break;
3359 }
3360
3361 l_mas = r_mas = *mas;
3362 l_mas.node = mas_new_ma_node(mas, b_node);
3363 r_mas.node = mas_new_ma_node(mas, b_node);
3364 /*
3365 * Another way that 'jitter' is avoided is to terminate a split up early if the
3366 * left or right node has space to spare. This is referred to as "pushing left"
3367 * or "pushing right" and is similar to the B* tree, except the nodes left or
3368 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3369 * is a significant savings.
3370 */
3371 /* Try to push left. */
3372 if (mas_push_data(mas, height, &mast, true))
3373 break;
3374 /* Try to push right. */
3375 if (mas_push_data(mas, height, &mast, false))
3376 break;
3377
3378 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3379 mast_split_data(&mast, mas, split);
3380 /*
3381 * Usually correct, mab_mas_cp in the above call overwrites
3382 * r->max.
3383 */
3384 mast.r->max = mas->max;
3385 mast_fill_bnode(&mast, mas, 1);
3386 prev_l_mas = *mast.l;
3387 prev_r_mas = *mast.r;
3388 }
3389
3390 /* Set the original node as dead */
3391 old = mas->node;
3392 mas->node = l_mas.node;
3393 mas_wmb_replace(mas, old);
3394 mtree_range_walk(mas);
3395 return 1;
3396 }
3397
3398 /*
3399 * mas_reuse_node() - Reuse the node to store the data.
3400 * @wr_mas: The maple write state
3401 * @bn: The maple big node
3402 * @end: The end of the data.
3403 *
3404 * Will always return false in RCU mode.
3405 *
3406 * Return: True if node was reused, false otherwise.
3407 */
mas_reuse_node(struct ma_wr_state * wr_mas,struct maple_big_node * bn,unsigned char end)3408 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3409 struct maple_big_node *bn, unsigned char end)
3410 {
3411 /* Need to be rcu safe. */
3412 if (mt_in_rcu(wr_mas->mas->tree))
3413 return false;
3414
3415 if (end > bn->b_end) {
3416 int clear = mt_slots[wr_mas->type] - bn->b_end;
3417
3418 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3419 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3420 }
3421 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3422 return true;
3423 }
3424
3425 /*
3426 * mas_commit_b_node() - Commit the big node into the tree.
3427 * @wr_mas: The maple write state
3428 * @b_node: The maple big node
3429 * @end: The end of the data.
3430 */
mas_commit_b_node(struct ma_wr_state * wr_mas,struct maple_big_node * b_node,unsigned char end)3431 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3432 struct maple_big_node *b_node, unsigned char end)
3433 {
3434 struct maple_node *node;
3435 struct maple_enode *old_enode;
3436 unsigned char b_end = b_node->b_end;
3437 enum maple_type b_type = b_node->type;
3438
3439 old_enode = wr_mas->mas->node;
3440 if ((b_end < mt_min_slots[b_type]) &&
3441 (!mte_is_root(old_enode)) &&
3442 (mas_mt_height(wr_mas->mas) > 1))
3443 return mas_rebalance(wr_mas->mas, b_node);
3444
3445 if (b_end >= mt_slots[b_type])
3446 return mas_split(wr_mas->mas, b_node);
3447
3448 if (mas_reuse_node(wr_mas, b_node, end))
3449 goto reuse_node;
3450
3451 mas_node_count(wr_mas->mas, 1);
3452 if (mas_is_err(wr_mas->mas))
3453 return 0;
3454
3455 node = mas_pop_node(wr_mas->mas);
3456 node->parent = mas_mn(wr_mas->mas)->parent;
3457 wr_mas->mas->node = mt_mk_node(node, b_type);
3458 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3459 mas_replace_node(wr_mas->mas, old_enode);
3460 reuse_node:
3461 mas_update_gap(wr_mas->mas);
3462 wr_mas->mas->end = b_end;
3463 return 1;
3464 }
3465
3466 /*
3467 * mas_root_expand() - Expand a root to a node
3468 * @mas: The maple state
3469 * @entry: The entry to store into the tree
3470 */
mas_root_expand(struct ma_state * mas,void * entry)3471 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3472 {
3473 void *contents = mas_root_locked(mas);
3474 enum maple_type type = maple_leaf_64;
3475 struct maple_node *node;
3476 void __rcu **slots;
3477 unsigned long *pivots;
3478 int slot = 0;
3479
3480 mas_node_count(mas, 1);
3481 if (unlikely(mas_is_err(mas)))
3482 return 0;
3483
3484 node = mas_pop_node(mas);
3485 pivots = ma_pivots(node, type);
3486 slots = ma_slots(node, type);
3487 node->parent = ma_parent_ptr(mas_tree_parent(mas));
3488 mas->node = mt_mk_node(node, type);
3489 mas->status = ma_active;
3490
3491 if (mas->index) {
3492 if (contents) {
3493 rcu_assign_pointer(slots[slot], contents);
3494 if (likely(mas->index > 1))
3495 slot++;
3496 }
3497 pivots[slot++] = mas->index - 1;
3498 }
3499
3500 rcu_assign_pointer(slots[slot], entry);
3501 mas->offset = slot;
3502 pivots[slot] = mas->last;
3503 if (mas->last != ULONG_MAX)
3504 pivots[++slot] = ULONG_MAX;
3505
3506 mas->depth = 1;
3507 mas_set_height(mas);
3508 ma_set_meta(node, maple_leaf_64, 0, slot);
3509 /* swap the new root into the tree */
3510 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3511 return slot;
3512 }
3513
mas_store_root(struct ma_state * mas,void * entry)3514 static inline void mas_store_root(struct ma_state *mas, void *entry)
3515 {
3516 if (likely((mas->last != 0) || (mas->index != 0)))
3517 mas_root_expand(mas, entry);
3518 else if (((unsigned long) (entry) & 3) == 2)
3519 mas_root_expand(mas, entry);
3520 else {
3521 rcu_assign_pointer(mas->tree->ma_root, entry);
3522 mas->status = ma_start;
3523 }
3524 }
3525
3526 /*
3527 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3528 * spans the node.
3529 * @mas: The maple state
3530 * @piv: The pivot value being written
3531 * @type: The maple node type
3532 * @entry: The data to write
3533 *
3534 * Spanning writes are writes that start in one node and end in another OR if
3535 * the write of a %NULL will cause the node to end with a %NULL.
3536 *
3537 * Return: True if this is a spanning write, false otherwise.
3538 */
mas_is_span_wr(struct ma_wr_state * wr_mas)3539 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3540 {
3541 unsigned long max = wr_mas->r_max;
3542 unsigned long last = wr_mas->mas->last;
3543 enum maple_type type = wr_mas->type;
3544 void *entry = wr_mas->entry;
3545
3546 /* Contained in this pivot, fast path */
3547 if (last < max)
3548 return false;
3549
3550 if (ma_is_leaf(type)) {
3551 max = wr_mas->mas->max;
3552 if (last < max)
3553 return false;
3554 }
3555
3556 if (last == max) {
3557 /*
3558 * The last entry of leaf node cannot be NULL unless it is the
3559 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3560 */
3561 if (entry || last == ULONG_MAX)
3562 return false;
3563 }
3564
3565 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3566 return true;
3567 }
3568
mas_wr_walk_descend(struct ma_wr_state * wr_mas)3569 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3570 {
3571 wr_mas->type = mte_node_type(wr_mas->mas->node);
3572 mas_wr_node_walk(wr_mas);
3573 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3574 }
3575
mas_wr_walk_traverse(struct ma_wr_state * wr_mas)3576 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3577 {
3578 wr_mas->mas->max = wr_mas->r_max;
3579 wr_mas->mas->min = wr_mas->r_min;
3580 wr_mas->mas->node = wr_mas->content;
3581 wr_mas->mas->offset = 0;
3582 wr_mas->mas->depth++;
3583 }
3584 /*
3585 * mas_wr_walk() - Walk the tree for a write.
3586 * @wr_mas: The maple write state
3587 *
3588 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3589 *
3590 * Return: True if it's contained in a node, false on spanning write.
3591 */
mas_wr_walk(struct ma_wr_state * wr_mas)3592 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3593 {
3594 struct ma_state *mas = wr_mas->mas;
3595
3596 while (true) {
3597 mas_wr_walk_descend(wr_mas);
3598 if (unlikely(mas_is_span_wr(wr_mas)))
3599 return false;
3600
3601 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3602 mas->offset);
3603 if (ma_is_leaf(wr_mas->type))
3604 return true;
3605
3606 mas_wr_walk_traverse(wr_mas);
3607 }
3608
3609 return true;
3610 }
3611
mas_wr_walk_index(struct ma_wr_state * wr_mas)3612 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3613 {
3614 struct ma_state *mas = wr_mas->mas;
3615
3616 while (true) {
3617 mas_wr_walk_descend(wr_mas);
3618 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3619 mas->offset);
3620 if (ma_is_leaf(wr_mas->type))
3621 return true;
3622 mas_wr_walk_traverse(wr_mas);
3623
3624 }
3625 return true;
3626 }
3627 /*
3628 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3629 * @l_wr_mas: The left maple write state
3630 * @r_wr_mas: The right maple write state
3631 */
mas_extend_spanning_null(struct ma_wr_state * l_wr_mas,struct ma_wr_state * r_wr_mas)3632 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3633 struct ma_wr_state *r_wr_mas)
3634 {
3635 struct ma_state *r_mas = r_wr_mas->mas;
3636 struct ma_state *l_mas = l_wr_mas->mas;
3637 unsigned char l_slot;
3638
3639 l_slot = l_mas->offset;
3640 if (!l_wr_mas->content)
3641 l_mas->index = l_wr_mas->r_min;
3642
3643 if ((l_mas->index == l_wr_mas->r_min) &&
3644 (l_slot &&
3645 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3646 if (l_slot > 1)
3647 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3648 else
3649 l_mas->index = l_mas->min;
3650
3651 l_mas->offset = l_slot - 1;
3652 }
3653
3654 if (!r_wr_mas->content) {
3655 if (r_mas->last < r_wr_mas->r_max)
3656 r_mas->last = r_wr_mas->r_max;
3657 r_mas->offset++;
3658 } else if ((r_mas->last == r_wr_mas->r_max) &&
3659 (r_mas->last < r_mas->max) &&
3660 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3661 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3662 r_wr_mas->type, r_mas->offset + 1);
3663 r_mas->offset++;
3664 }
3665 }
3666
mas_state_walk(struct ma_state * mas)3667 static inline void *mas_state_walk(struct ma_state *mas)
3668 {
3669 void *entry;
3670
3671 entry = mas_start(mas);
3672 if (mas_is_none(mas))
3673 return NULL;
3674
3675 if (mas_is_ptr(mas))
3676 return entry;
3677
3678 return mtree_range_walk(mas);
3679 }
3680
3681 /*
3682 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3683 * to date.
3684 *
3685 * @mas: The maple state.
3686 *
3687 * Note: Leaves mas in undesirable state.
3688 * Return: The entry for @mas->index or %NULL on dead node.
3689 */
mtree_lookup_walk(struct ma_state * mas)3690 static inline void *mtree_lookup_walk(struct ma_state *mas)
3691 {
3692 unsigned long *pivots;
3693 unsigned char offset;
3694 struct maple_node *node;
3695 struct maple_enode *next;
3696 enum maple_type type;
3697 void __rcu **slots;
3698 unsigned char end;
3699
3700 next = mas->node;
3701 do {
3702 node = mte_to_node(next);
3703 type = mte_node_type(next);
3704 pivots = ma_pivots(node, type);
3705 end = mt_pivots[type];
3706 offset = 0;
3707 do {
3708 if (pivots[offset] >= mas->index)
3709 break;
3710 } while (++offset < end);
3711
3712 slots = ma_slots(node, type);
3713 next = mt_slot(mas->tree, slots, offset);
3714 if (unlikely(ma_dead_node(node)))
3715 goto dead_node;
3716 } while (!ma_is_leaf(type));
3717
3718 return (void *)next;
3719
3720 dead_node:
3721 mas_reset(mas);
3722 return NULL;
3723 }
3724
3725 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
3726 /*
3727 * mas_new_root() - Create a new root node that only contains the entry passed
3728 * in.
3729 * @mas: The maple state
3730 * @entry: The entry to store.
3731 *
3732 * Only valid when the index == 0 and the last == ULONG_MAX
3733 *
3734 * Return 0 on error, 1 on success.
3735 */
mas_new_root(struct ma_state * mas,void * entry)3736 static inline int mas_new_root(struct ma_state *mas, void *entry)
3737 {
3738 struct maple_enode *root = mas_root_locked(mas);
3739 enum maple_type type = maple_leaf_64;
3740 struct maple_node *node;
3741 void __rcu **slots;
3742 unsigned long *pivots;
3743
3744 if (!entry && !mas->index && mas->last == ULONG_MAX) {
3745 mas->depth = 0;
3746 mas_set_height(mas);
3747 rcu_assign_pointer(mas->tree->ma_root, entry);
3748 mas->status = ma_start;
3749 goto done;
3750 }
3751
3752 mas_node_count(mas, 1);
3753 if (mas_is_err(mas))
3754 return 0;
3755
3756 node = mas_pop_node(mas);
3757 pivots = ma_pivots(node, type);
3758 slots = ma_slots(node, type);
3759 node->parent = ma_parent_ptr(mas_tree_parent(mas));
3760 mas->node = mt_mk_node(node, type);
3761 mas->status = ma_active;
3762 rcu_assign_pointer(slots[0], entry);
3763 pivots[0] = mas->last;
3764 mas->depth = 1;
3765 mas_set_height(mas);
3766 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3767
3768 done:
3769 if (xa_is_node(root))
3770 mte_destroy_walk(root, mas->tree);
3771
3772 return 1;
3773 }
3774 /*
3775 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3776 * and new nodes where necessary, then place the sub-tree in the actual tree.
3777 * Note that mas is expected to point to the node which caused the store to
3778 * span.
3779 * @wr_mas: The maple write state
3780 *
3781 * Return: 0 on error, positive on success.
3782 */
mas_wr_spanning_store(struct ma_wr_state * wr_mas)3783 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3784 {
3785 struct maple_subtree_state mast;
3786 struct maple_big_node b_node;
3787 struct ma_state *mas;
3788 unsigned char height;
3789
3790 /* Left and Right side of spanning store */
3791 MA_STATE(l_mas, NULL, 0, 0);
3792 MA_STATE(r_mas, NULL, 0, 0);
3793 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3794 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3795
3796 /*
3797 * A store operation that spans multiple nodes is called a spanning
3798 * store and is handled early in the store call stack by the function
3799 * mas_is_span_wr(). When a spanning store is identified, the maple
3800 * state is duplicated. The first maple state walks the left tree path
3801 * to ``index``, the duplicate walks the right tree path to ``last``.
3802 * The data in the two nodes are combined into a single node, two nodes,
3803 * or possibly three nodes (see the 3-way split above). A ``NULL``
3804 * written to the last entry of a node is considered a spanning store as
3805 * a rebalance is required for the operation to complete and an overflow
3806 * of data may happen.
3807 */
3808 mas = wr_mas->mas;
3809 trace_ma_op(__func__, mas);
3810
3811 if (unlikely(!mas->index && mas->last == ULONG_MAX))
3812 return mas_new_root(mas, wr_mas->entry);
3813 /*
3814 * Node rebalancing may occur due to this store, so there may be three new
3815 * entries per level plus a new root.
3816 */
3817 height = mas_mt_height(mas);
3818 mas_node_count(mas, 1 + height * 3);
3819 if (mas_is_err(mas))
3820 return 0;
3821
3822 /*
3823 * Set up right side. Need to get to the next offset after the spanning
3824 * store to ensure it's not NULL and to combine both the next node and
3825 * the node with the start together.
3826 */
3827 r_mas = *mas;
3828 /* Avoid overflow, walk to next slot in the tree. */
3829 if (r_mas.last + 1)
3830 r_mas.last++;
3831
3832 r_mas.index = r_mas.last;
3833 mas_wr_walk_index(&r_wr_mas);
3834 r_mas.last = r_mas.index = mas->last;
3835
3836 /* Set up left side. */
3837 l_mas = *mas;
3838 mas_wr_walk_index(&l_wr_mas);
3839
3840 if (!wr_mas->entry) {
3841 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
3842 mas->offset = l_mas.offset;
3843 mas->index = l_mas.index;
3844 mas->last = l_mas.last = r_mas.last;
3845 }
3846
3847 /* expanding NULLs may make this cover the entire range */
3848 if (!l_mas.index && r_mas.last == ULONG_MAX) {
3849 mas_set_range(mas, 0, ULONG_MAX);
3850 return mas_new_root(mas, wr_mas->entry);
3851 }
3852
3853 memset(&b_node, 0, sizeof(struct maple_big_node));
3854 /* Copy l_mas and store the value in b_node. */
3855 mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
3856 /* Copy r_mas into b_node. */
3857 if (r_mas.offset <= r_mas.end)
3858 mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
3859 &b_node, b_node.b_end + 1);
3860 else
3861 b_node.b_end++;
3862
3863 /* Stop spanning searches by searching for just index. */
3864 l_mas.index = l_mas.last = mas->index;
3865
3866 mast.bn = &b_node;
3867 mast.orig_l = &l_mas;
3868 mast.orig_r = &r_mas;
3869 /* Combine l_mas and r_mas and split them up evenly again. */
3870 return mas_spanning_rebalance(mas, &mast, height + 1);
3871 }
3872
3873 /*
3874 * mas_wr_node_store() - Attempt to store the value in a node
3875 * @wr_mas: The maple write state
3876 *
3877 * Attempts to reuse the node, but may allocate.
3878 *
3879 * Return: True if stored, false otherwise
3880 */
mas_wr_node_store(struct ma_wr_state * wr_mas,unsigned char new_end)3881 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
3882 unsigned char new_end)
3883 {
3884 struct ma_state *mas = wr_mas->mas;
3885 void __rcu **dst_slots;
3886 unsigned long *dst_pivots;
3887 unsigned char dst_offset, offset_end = wr_mas->offset_end;
3888 struct maple_node reuse, *newnode;
3889 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
3890 bool in_rcu = mt_in_rcu(mas->tree);
3891
3892 /* Check if there is enough data. The room is enough. */
3893 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
3894 !(mas->mas_flags & MA_STATE_BULK))
3895 return false;
3896
3897 if (mas->last == wr_mas->end_piv)
3898 offset_end++; /* don't copy this offset */
3899 else if (unlikely(wr_mas->r_max == ULONG_MAX))
3900 mas_bulk_rebalance(mas, mas->end, wr_mas->type);
3901
3902 /* set up node. */
3903 if (in_rcu) {
3904 mas_node_count(mas, 1);
3905 if (mas_is_err(mas))
3906 return false;
3907
3908 newnode = mas_pop_node(mas);
3909 } else {
3910 memset(&reuse, 0, sizeof(struct maple_node));
3911 newnode = &reuse;
3912 }
3913
3914 newnode->parent = mas_mn(mas)->parent;
3915 dst_pivots = ma_pivots(newnode, wr_mas->type);
3916 dst_slots = ma_slots(newnode, wr_mas->type);
3917 /* Copy from start to insert point */
3918 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
3919 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
3920
3921 /* Handle insert of new range starting after old range */
3922 if (wr_mas->r_min < mas->index) {
3923 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
3924 dst_pivots[mas->offset++] = mas->index - 1;
3925 }
3926
3927 /* Store the new entry and range end. */
3928 if (mas->offset < node_pivots)
3929 dst_pivots[mas->offset] = mas->last;
3930 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
3931
3932 /*
3933 * this range wrote to the end of the node or it overwrote the rest of
3934 * the data
3935 */
3936 if (offset_end > mas->end)
3937 goto done;
3938
3939 dst_offset = mas->offset + 1;
3940 /* Copy to the end of node if necessary. */
3941 copy_size = mas->end - offset_end + 1;
3942 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
3943 sizeof(void *) * copy_size);
3944 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
3945 sizeof(unsigned long) * (copy_size - 1));
3946
3947 if (new_end < node_pivots)
3948 dst_pivots[new_end] = mas->max;
3949
3950 done:
3951 mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
3952 if (in_rcu) {
3953 struct maple_enode *old_enode = mas->node;
3954
3955 mas->node = mt_mk_node(newnode, wr_mas->type);
3956 mas_replace_node(mas, old_enode);
3957 } else {
3958 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
3959 }
3960 trace_ma_write(__func__, mas, 0, wr_mas->entry);
3961 mas_update_gap(mas);
3962 mas->end = new_end;
3963 return true;
3964 }
3965
3966 /*
3967 * mas_wr_slot_store: Attempt to store a value in a slot.
3968 * @wr_mas: the maple write state
3969 *
3970 * Return: True if stored, false otherwise
3971 */
mas_wr_slot_store(struct ma_wr_state * wr_mas)3972 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
3973 {
3974 struct ma_state *mas = wr_mas->mas;
3975 unsigned char offset = mas->offset;
3976 void __rcu **slots = wr_mas->slots;
3977 bool gap = false;
3978
3979 gap |= !mt_slot_locked(mas->tree, slots, offset);
3980 gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
3981
3982 if (wr_mas->offset_end - offset == 1) {
3983 if (mas->index == wr_mas->r_min) {
3984 /* Overwriting the range and a part of the next one */
3985 rcu_assign_pointer(slots[offset], wr_mas->entry);
3986 wr_mas->pivots[offset] = mas->last;
3987 } else {
3988 /* Overwriting a part of the range and the next one */
3989 rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
3990 wr_mas->pivots[offset] = mas->index - 1;
3991 mas->offset++; /* Keep mas accurate. */
3992 }
3993 } else if (!mt_in_rcu(mas->tree)) {
3994 /*
3995 * Expand the range, only partially overwriting the previous and
3996 * next ranges
3997 */
3998 gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
3999 rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4000 wr_mas->pivots[offset] = mas->index - 1;
4001 wr_mas->pivots[offset + 1] = mas->last;
4002 mas->offset++; /* Keep mas accurate. */
4003 } else {
4004 return false;
4005 }
4006
4007 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4008 /*
4009 * Only update gap when the new entry is empty or there is an empty
4010 * entry in the original two ranges.
4011 */
4012 if (!wr_mas->entry || gap)
4013 mas_update_gap(mas);
4014
4015 return true;
4016 }
4017
mas_wr_extend_null(struct ma_wr_state * wr_mas)4018 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4019 {
4020 struct ma_state *mas = wr_mas->mas;
4021
4022 if (!wr_mas->slots[wr_mas->offset_end]) {
4023 /* If this one is null, the next and prev are not */
4024 mas->last = wr_mas->end_piv;
4025 } else {
4026 /* Check next slot(s) if we are overwriting the end */
4027 if ((mas->last == wr_mas->end_piv) &&
4028 (mas->end != wr_mas->offset_end) &&
4029 !wr_mas->slots[wr_mas->offset_end + 1]) {
4030 wr_mas->offset_end++;
4031 if (wr_mas->offset_end == mas->end)
4032 mas->last = mas->max;
4033 else
4034 mas->last = wr_mas->pivots[wr_mas->offset_end];
4035 wr_mas->end_piv = mas->last;
4036 }
4037 }
4038
4039 if (!wr_mas->content) {
4040 /* If this one is null, the next and prev are not */
4041 mas->index = wr_mas->r_min;
4042 } else {
4043 /* Check prev slot if we are overwriting the start */
4044 if (mas->index == wr_mas->r_min && mas->offset &&
4045 !wr_mas->slots[mas->offset - 1]) {
4046 mas->offset--;
4047 wr_mas->r_min = mas->index =
4048 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4049 wr_mas->r_max = wr_mas->pivots[mas->offset];
4050 }
4051 }
4052 }
4053
mas_wr_end_piv(struct ma_wr_state * wr_mas)4054 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4055 {
4056 while ((wr_mas->offset_end < wr_mas->mas->end) &&
4057 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4058 wr_mas->offset_end++;
4059
4060 if (wr_mas->offset_end < wr_mas->mas->end)
4061 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4062 else
4063 wr_mas->end_piv = wr_mas->mas->max;
4064
4065 if (!wr_mas->entry)
4066 mas_wr_extend_null(wr_mas);
4067 }
4068
mas_wr_new_end(struct ma_wr_state * wr_mas)4069 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4070 {
4071 struct ma_state *mas = wr_mas->mas;
4072 unsigned char new_end = mas->end + 2;
4073
4074 new_end -= wr_mas->offset_end - mas->offset;
4075 if (wr_mas->r_min == mas->index)
4076 new_end--;
4077
4078 if (wr_mas->end_piv == mas->last)
4079 new_end--;
4080
4081 return new_end;
4082 }
4083
4084 /*
4085 * mas_wr_append: Attempt to append
4086 * @wr_mas: the maple write state
4087 * @new_end: The end of the node after the modification
4088 *
4089 * This is currently unsafe in rcu mode since the end of the node may be cached
4090 * by readers while the node contents may be updated which could result in
4091 * inaccurate information.
4092 *
4093 * Return: True if appended, false otherwise
4094 */
mas_wr_append(struct ma_wr_state * wr_mas,unsigned char new_end)4095 static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
4096 unsigned char new_end)
4097 {
4098 struct ma_state *mas;
4099 void __rcu **slots;
4100 unsigned char end;
4101
4102 mas = wr_mas->mas;
4103 if (mt_in_rcu(mas->tree))
4104 return false;
4105
4106 end = mas->end;
4107 if (mas->offset != end)
4108 return false;
4109
4110 if (new_end < mt_pivots[wr_mas->type]) {
4111 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4112 ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
4113 }
4114
4115 slots = wr_mas->slots;
4116 if (new_end == end + 1) {
4117 if (mas->last == wr_mas->r_max) {
4118 /* Append to end of range */
4119 rcu_assign_pointer(slots[new_end], wr_mas->entry);
4120 wr_mas->pivots[end] = mas->index - 1;
4121 mas->offset = new_end;
4122 } else {
4123 /* Append to start of range */
4124 rcu_assign_pointer(slots[new_end], wr_mas->content);
4125 wr_mas->pivots[end] = mas->last;
4126 rcu_assign_pointer(slots[end], wr_mas->entry);
4127 }
4128 } else {
4129 /* Append to the range without touching any boundaries. */
4130 rcu_assign_pointer(slots[new_end], wr_mas->content);
4131 wr_mas->pivots[end + 1] = mas->last;
4132 rcu_assign_pointer(slots[end + 1], wr_mas->entry);
4133 wr_mas->pivots[end] = mas->index - 1;
4134 mas->offset = end + 1;
4135 }
4136
4137 if (!wr_mas->content || !wr_mas->entry)
4138 mas_update_gap(mas);
4139
4140 mas->end = new_end;
4141 trace_ma_write(__func__, mas, new_end, wr_mas->entry);
4142 return true;
4143 }
4144
4145 /*
4146 * mas_wr_bnode() - Slow path for a modification.
4147 * @wr_mas: The write maple state
4148 *
4149 * This is where split, rebalance end up.
4150 */
mas_wr_bnode(struct ma_wr_state * wr_mas)4151 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4152 {
4153 struct maple_big_node b_node;
4154
4155 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4156 memset(&b_node, 0, sizeof(struct maple_big_node));
4157 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4158 mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
4159 }
4160
mas_wr_modify(struct ma_wr_state * wr_mas)4161 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4162 {
4163 struct ma_state *mas = wr_mas->mas;
4164 unsigned char new_end;
4165
4166 /* Direct replacement */
4167 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4168 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4169 if (!!wr_mas->entry ^ !!wr_mas->content)
4170 mas_update_gap(mas);
4171 return;
4172 }
4173
4174 /*
4175 * new_end exceeds the size of the maple node and cannot enter the fast
4176 * path.
4177 */
4178 new_end = mas_wr_new_end(wr_mas);
4179 if (new_end >= mt_slots[wr_mas->type])
4180 goto slow_path;
4181
4182 /* Attempt to append */
4183 if (mas_wr_append(wr_mas, new_end))
4184 return;
4185
4186 if (new_end == mas->end && mas_wr_slot_store(wr_mas))
4187 return;
4188
4189 if (mas_wr_node_store(wr_mas, new_end))
4190 return;
4191
4192 if (mas_is_err(mas))
4193 return;
4194
4195 slow_path:
4196 mas_wr_bnode(wr_mas);
4197 }
4198
4199 /*
4200 * mas_wr_store_entry() - Internal call to store a value
4201 * @mas: The maple state
4202 * @entry: The entry to store.
4203 *
4204 * Return: The contents that was stored at the index.
4205 */
mas_wr_store_entry(struct ma_wr_state * wr_mas)4206 static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
4207 {
4208 struct ma_state *mas = wr_mas->mas;
4209
4210 wr_mas->content = mas_start(mas);
4211 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4212 mas_store_root(mas, wr_mas->entry);
4213 return;
4214 }
4215
4216 if (unlikely(!mas_wr_walk(wr_mas))) {
4217 mas_wr_spanning_store(wr_mas);
4218 return;
4219 }
4220
4221 /* At this point, we are at the leaf node that needs to be altered. */
4222 mas_wr_end_piv(wr_mas);
4223 /* New root for a single pointer */
4224 if (unlikely(!mas->index && mas->last == ULONG_MAX))
4225 mas_new_root(mas, wr_mas->entry);
4226 else
4227 mas_wr_modify(wr_mas);
4228 }
4229
4230 /**
4231 * mas_insert() - Internal call to insert a value
4232 * @mas: The maple state
4233 * @entry: The entry to store
4234 *
4235 * Return: %NULL or the contents that already exists at the requested index
4236 * otherwise. The maple state needs to be checked for error conditions.
4237 */
mas_insert(struct ma_state * mas,void * entry)4238 static inline void *mas_insert(struct ma_state *mas, void *entry)
4239 {
4240 MA_WR_STATE(wr_mas, mas, entry);
4241
4242 /*
4243 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4244 * tree. If the insert fits exactly into an existing gap with a value
4245 * of NULL, then the slot only needs to be written with the new value.
4246 * If the range being inserted is adjacent to another range, then only a
4247 * single pivot needs to be inserted (as well as writing the entry). If
4248 * the new range is within a gap but does not touch any other ranges,
4249 * then two pivots need to be inserted: the start - 1, and the end. As
4250 * usual, the entry must be written. Most operations require a new node
4251 * to be allocated and replace an existing node to ensure RCU safety,
4252 * when in RCU mode. The exception to requiring a newly allocated node
4253 * is when inserting at the end of a node (appending). When done
4254 * carefully, appending can reuse the node in place.
4255 */
4256 wr_mas.content = mas_start(mas);
4257 if (wr_mas.content)
4258 goto exists;
4259
4260 if (mas_is_none(mas) || mas_is_ptr(mas)) {
4261 mas_store_root(mas, entry);
4262 return NULL;
4263 }
4264
4265 /* spanning writes always overwrite something */
4266 if (!mas_wr_walk(&wr_mas))
4267 goto exists;
4268
4269 /* At this point, we are at the leaf node that needs to be altered. */
4270 wr_mas.offset_end = mas->offset;
4271 wr_mas.end_piv = wr_mas.r_max;
4272
4273 if (wr_mas.content || (mas->last > wr_mas.r_max))
4274 goto exists;
4275
4276 if (!entry)
4277 return NULL;
4278
4279 mas_wr_modify(&wr_mas);
4280 return wr_mas.content;
4281
4282 exists:
4283 mas_set_err(mas, -EEXIST);
4284 return wr_mas.content;
4285
4286 }
4287
4288 /**
4289 * mas_alloc_cyclic() - Internal call to find somewhere to store an entry
4290 * @mas: The maple state.
4291 * @startp: Pointer to ID.
4292 * @range_lo: Lower bound of range to search.
4293 * @range_hi: Upper bound of range to search.
4294 * @entry: The entry to store.
4295 * @next: Pointer to next ID to allocate.
4296 * @gfp: The GFP_FLAGS to use for allocations.
4297 *
4298 * Return: 0 if the allocation succeeded without wrapping, 1 if the
4299 * allocation succeeded after wrapping, or -EBUSY if there are no
4300 * free entries.
4301 */
mas_alloc_cyclic(struct ma_state * mas,unsigned long * startp,void * entry,unsigned long range_lo,unsigned long range_hi,unsigned long * next,gfp_t gfp)4302 int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
4303 void *entry, unsigned long range_lo, unsigned long range_hi,
4304 unsigned long *next, gfp_t gfp)
4305 {
4306 unsigned long min = range_lo;
4307 int ret = 0;
4308
4309 range_lo = max(min, *next);
4310 ret = mas_empty_area(mas, range_lo, range_hi, 1);
4311 if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) {
4312 mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED;
4313 ret = 1;
4314 }
4315 if (ret < 0 && range_lo > min) {
4316 ret = mas_empty_area(mas, min, range_hi, 1);
4317 if (ret == 0)
4318 ret = 1;
4319 }
4320 if (ret < 0)
4321 return ret;
4322
4323 do {
4324 mas_insert(mas, entry);
4325 } while (mas_nomem(mas, gfp));
4326 if (mas_is_err(mas))
4327 return xa_err(mas->node);
4328
4329 *startp = mas->index;
4330 *next = *startp + 1;
4331 if (*next == 0)
4332 mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
4333
4334 return ret;
4335 }
4336 EXPORT_SYMBOL(mas_alloc_cyclic);
4337
mas_rewalk(struct ma_state * mas,unsigned long index)4338 static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4339 {
4340 retry:
4341 mas_set(mas, index);
4342 mas_state_walk(mas);
4343 if (mas_is_start(mas))
4344 goto retry;
4345 }
4346
mas_rewalk_if_dead(struct ma_state * mas,struct maple_node * node,const unsigned long index)4347 static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
4348 struct maple_node *node, const unsigned long index)
4349 {
4350 if (unlikely(ma_dead_node(node))) {
4351 mas_rewalk(mas, index);
4352 return true;
4353 }
4354 return false;
4355 }
4356
4357 /*
4358 * mas_prev_node() - Find the prev non-null entry at the same level in the
4359 * tree. The prev value will be mas->node[mas->offset] or the status will be
4360 * ma_none.
4361 * @mas: The maple state
4362 * @min: The lower limit to search
4363 *
4364 * The prev node value will be mas->node[mas->offset] or the status will be
4365 * ma_none.
4366 * Return: 1 if the node is dead, 0 otherwise.
4367 */
mas_prev_node(struct ma_state * mas,unsigned long min)4368 static int mas_prev_node(struct ma_state *mas, unsigned long min)
4369 {
4370 enum maple_type mt;
4371 int offset, level;
4372 void __rcu **slots;
4373 struct maple_node *node;
4374 unsigned long *pivots;
4375 unsigned long max;
4376
4377 node = mas_mn(mas);
4378 if (!mas->min)
4379 goto no_entry;
4380
4381 max = mas->min - 1;
4382 if (max < min)
4383 goto no_entry;
4384
4385 level = 0;
4386 do {
4387 if (ma_is_root(node))
4388 goto no_entry;
4389
4390 /* Walk up. */
4391 if (unlikely(mas_ascend(mas)))
4392 return 1;
4393 offset = mas->offset;
4394 level++;
4395 node = mas_mn(mas);
4396 } while (!offset);
4397
4398 offset--;
4399 mt = mte_node_type(mas->node);
4400 while (level > 1) {
4401 level--;
4402 slots = ma_slots(node, mt);
4403 mas->node = mas_slot(mas, slots, offset);
4404 if (unlikely(ma_dead_node(node)))
4405 return 1;
4406
4407 mt = mte_node_type(mas->node);
4408 node = mas_mn(mas);
4409 pivots = ma_pivots(node, mt);
4410 offset = ma_data_end(node, mt, pivots, max);
4411 if (unlikely(ma_dead_node(node)))
4412 return 1;
4413 }
4414
4415 slots = ma_slots(node, mt);
4416 mas->node = mas_slot(mas, slots, offset);
4417 pivots = ma_pivots(node, mt);
4418 if (unlikely(ma_dead_node(node)))
4419 return 1;
4420
4421 if (likely(offset))
4422 mas->min = pivots[offset - 1] + 1;
4423 mas->max = max;
4424 mas->offset = mas_data_end(mas);
4425 if (unlikely(mte_dead_node(mas->node)))
4426 return 1;
4427
4428 mas->end = mas->offset;
4429 return 0;
4430
4431 no_entry:
4432 if (unlikely(ma_dead_node(node)))
4433 return 1;
4434
4435 mas->status = ma_underflow;
4436 return 0;
4437 }
4438
4439 /*
4440 * mas_prev_slot() - Get the entry in the previous slot
4441 *
4442 * @mas: The maple state
4443 * @max: The minimum starting range
4444 * @empty: Can be empty
4445 * @set_underflow: Set the @mas->node to underflow state on limit.
4446 *
4447 * Return: The entry in the previous slot which is possibly NULL
4448 */
mas_prev_slot(struct ma_state * mas,unsigned long min,bool empty)4449 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
4450 {
4451 void *entry;
4452 void __rcu **slots;
4453 unsigned long pivot;
4454 enum maple_type type;
4455 unsigned long *pivots;
4456 struct maple_node *node;
4457 unsigned long save_point = mas->index;
4458
4459 retry:
4460 node = mas_mn(mas);
4461 type = mte_node_type(mas->node);
4462 pivots = ma_pivots(node, type);
4463 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4464 goto retry;
4465
4466 if (mas->min <= min) {
4467 pivot = mas_safe_min(mas, pivots, mas->offset);
4468
4469 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4470 goto retry;
4471
4472 if (pivot <= min)
4473 goto underflow;
4474 }
4475
4476 again:
4477 if (likely(mas->offset)) {
4478 mas->offset--;
4479 mas->last = mas->index - 1;
4480 mas->index = mas_safe_min(mas, pivots, mas->offset);
4481 } else {
4482 if (mas->index <= min)
4483 goto underflow;
4484
4485 if (mas_prev_node(mas, min)) {
4486 mas_rewalk(mas, save_point);
4487 goto retry;
4488 }
4489
4490 if (WARN_ON_ONCE(mas_is_underflow(mas)))
4491 return NULL;
4492
4493 mas->last = mas->max;
4494 node = mas_mn(mas);
4495 type = mte_node_type(mas->node);
4496 pivots = ma_pivots(node, type);
4497 mas->index = pivots[mas->offset - 1] + 1;
4498 }
4499
4500 slots = ma_slots(node, type);
4501 entry = mas_slot(mas, slots, mas->offset);
4502 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4503 goto retry;
4504
4505
4506 if (likely(entry))
4507 return entry;
4508
4509 if (!empty) {
4510 if (mas->index <= min) {
4511 mas->status = ma_underflow;
4512 return NULL;
4513 }
4514
4515 goto again;
4516 }
4517
4518 return entry;
4519
4520 underflow:
4521 mas->status = ma_underflow;
4522 return NULL;
4523 }
4524
4525 /*
4526 * mas_next_node() - Get the next node at the same level in the tree.
4527 * @mas: The maple state
4528 * @max: The maximum pivot value to check.
4529 *
4530 * The next value will be mas->node[mas->offset] or the status will have
4531 * overflowed.
4532 * Return: 1 on dead node, 0 otherwise.
4533 */
mas_next_node(struct ma_state * mas,struct maple_node * node,unsigned long max)4534 static int mas_next_node(struct ma_state *mas, struct maple_node *node,
4535 unsigned long max)
4536 {
4537 unsigned long min;
4538 unsigned long *pivots;
4539 struct maple_enode *enode;
4540 struct maple_node *tmp;
4541 int level = 0;
4542 unsigned char node_end;
4543 enum maple_type mt;
4544 void __rcu **slots;
4545
4546 if (mas->max >= max)
4547 goto overflow;
4548
4549 min = mas->max + 1;
4550 level = 0;
4551 do {
4552 if (ma_is_root(node))
4553 goto overflow;
4554
4555 /* Walk up. */
4556 if (unlikely(mas_ascend(mas)))
4557 return 1;
4558
4559 level++;
4560 node = mas_mn(mas);
4561 mt = mte_node_type(mas->node);
4562 pivots = ma_pivots(node, mt);
4563 node_end = ma_data_end(node, mt, pivots, mas->max);
4564 if (unlikely(ma_dead_node(node)))
4565 return 1;
4566
4567 } while (unlikely(mas->offset == node_end));
4568
4569 slots = ma_slots(node, mt);
4570 mas->offset++;
4571 enode = mas_slot(mas, slots, mas->offset);
4572 if (unlikely(ma_dead_node(node)))
4573 return 1;
4574
4575 if (level > 1)
4576 mas->offset = 0;
4577
4578 while (unlikely(level > 1)) {
4579 level--;
4580 mas->node = enode;
4581 node = mas_mn(mas);
4582 mt = mte_node_type(mas->node);
4583 slots = ma_slots(node, mt);
4584 enode = mas_slot(mas, slots, 0);
4585 if (unlikely(ma_dead_node(node)))
4586 return 1;
4587 }
4588
4589 if (!mas->offset)
4590 pivots = ma_pivots(node, mt);
4591
4592 mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4593 tmp = mte_to_node(enode);
4594 mt = mte_node_type(enode);
4595 pivots = ma_pivots(tmp, mt);
4596 mas->end = ma_data_end(tmp, mt, pivots, mas->max);
4597 if (unlikely(ma_dead_node(node)))
4598 return 1;
4599
4600 mas->node = enode;
4601 mas->min = min;
4602 return 0;
4603
4604 overflow:
4605 if (unlikely(ma_dead_node(node)))
4606 return 1;
4607
4608 mas->status = ma_overflow;
4609 return 0;
4610 }
4611
4612 /*
4613 * mas_next_slot() - Get the entry in the next slot
4614 *
4615 * @mas: The maple state
4616 * @max: The maximum starting range
4617 * @empty: Can be empty
4618 * @set_overflow: Should @mas->node be set to overflow when the limit is
4619 * reached.
4620 *
4621 * Return: The entry in the next slot which is possibly NULL
4622 */
mas_next_slot(struct ma_state * mas,unsigned long max,bool empty)4623 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
4624 {
4625 void __rcu **slots;
4626 unsigned long *pivots;
4627 unsigned long pivot;
4628 enum maple_type type;
4629 struct maple_node *node;
4630 unsigned long save_point = mas->last;
4631 void *entry;
4632
4633 retry:
4634 node = mas_mn(mas);
4635 type = mte_node_type(mas->node);
4636 pivots = ma_pivots(node, type);
4637 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4638 goto retry;
4639
4640 if (mas->max >= max) {
4641 if (likely(mas->offset < mas->end))
4642 pivot = pivots[mas->offset];
4643 else
4644 pivot = mas->max;
4645
4646 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4647 goto retry;
4648
4649 if (pivot >= max) { /* Was at the limit, next will extend beyond */
4650 mas->status = ma_overflow;
4651 return NULL;
4652 }
4653 }
4654
4655 if (likely(mas->offset < mas->end)) {
4656 mas->index = pivots[mas->offset] + 1;
4657 again:
4658 mas->offset++;
4659 if (likely(mas->offset < mas->end))
4660 mas->last = pivots[mas->offset];
4661 else
4662 mas->last = mas->max;
4663 } else {
4664 if (mas->last >= max) {
4665 mas->status = ma_overflow;
4666 return NULL;
4667 }
4668
4669 if (mas_next_node(mas, node, max)) {
4670 mas_rewalk(mas, save_point);
4671 goto retry;
4672 }
4673
4674 if (WARN_ON_ONCE(mas_is_overflow(mas)))
4675 return NULL;
4676
4677 mas->offset = 0;
4678 mas->index = mas->min;
4679 node = mas_mn(mas);
4680 type = mte_node_type(mas->node);
4681 pivots = ma_pivots(node, type);
4682 mas->last = pivots[0];
4683 }
4684
4685 slots = ma_slots(node, type);
4686 entry = mt_slot(mas->tree, slots, mas->offset);
4687 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4688 goto retry;
4689
4690 if (entry)
4691 return entry;
4692
4693
4694 if (!empty) {
4695 if (mas->last >= max) {
4696 mas->status = ma_overflow;
4697 return NULL;
4698 }
4699
4700 mas->index = mas->last + 1;
4701 goto again;
4702 }
4703
4704 return entry;
4705 }
4706
4707 /*
4708 * mas_next_entry() - Internal function to get the next entry.
4709 * @mas: The maple state
4710 * @limit: The maximum range start.
4711 *
4712 * Set the @mas->node to the next entry and the range_start to
4713 * the beginning value for the entry. Does not check beyond @limit.
4714 * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
4715 * @mas->last on overflow.
4716 * Restarts on dead nodes.
4717 *
4718 * Return: the next entry or %NULL.
4719 */
mas_next_entry(struct ma_state * mas,unsigned long limit)4720 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4721 {
4722 if (mas->last >= limit) {
4723 mas->status = ma_overflow;
4724 return NULL;
4725 }
4726
4727 return mas_next_slot(mas, limit, false);
4728 }
4729
4730 /*
4731 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4732 * highest gap address of a given size in a given node and descend.
4733 * @mas: The maple state
4734 * @size: The needed size.
4735 *
4736 * Return: True if found in a leaf, false otherwise.
4737 *
4738 */
mas_rev_awalk(struct ma_state * mas,unsigned long size,unsigned long * gap_min,unsigned long * gap_max)4739 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4740 unsigned long *gap_min, unsigned long *gap_max)
4741 {
4742 enum maple_type type = mte_node_type(mas->node);
4743 struct maple_node *node = mas_mn(mas);
4744 unsigned long *pivots, *gaps;
4745 void __rcu **slots;
4746 unsigned long gap = 0;
4747 unsigned long max, min;
4748 unsigned char offset;
4749
4750 if (unlikely(mas_is_err(mas)))
4751 return true;
4752
4753 if (ma_is_dense(type)) {
4754 /* dense nodes. */
4755 mas->offset = (unsigned char)(mas->index - mas->min);
4756 return true;
4757 }
4758
4759 pivots = ma_pivots(node, type);
4760 slots = ma_slots(node, type);
4761 gaps = ma_gaps(node, type);
4762 offset = mas->offset;
4763 min = mas_safe_min(mas, pivots, offset);
4764 /* Skip out of bounds. */
4765 while (mas->last < min)
4766 min = mas_safe_min(mas, pivots, --offset);
4767
4768 max = mas_safe_pivot(mas, pivots, offset, type);
4769 while (mas->index <= max) {
4770 gap = 0;
4771 if (gaps)
4772 gap = gaps[offset];
4773 else if (!mas_slot(mas, slots, offset))
4774 gap = max - min + 1;
4775
4776 if (gap) {
4777 if ((size <= gap) && (size <= mas->last - min + 1))
4778 break;
4779
4780 if (!gaps) {
4781 /* Skip the next slot, it cannot be a gap. */
4782 if (offset < 2)
4783 goto ascend;
4784
4785 offset -= 2;
4786 max = pivots[offset];
4787 min = mas_safe_min(mas, pivots, offset);
4788 continue;
4789 }
4790 }
4791
4792 if (!offset)
4793 goto ascend;
4794
4795 offset--;
4796 max = min - 1;
4797 min = mas_safe_min(mas, pivots, offset);
4798 }
4799
4800 if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4801 goto no_space;
4802
4803 if (unlikely(ma_is_leaf(type))) {
4804 mas->offset = offset;
4805 *gap_min = min;
4806 *gap_max = min + gap - 1;
4807 return true;
4808 }
4809
4810 /* descend, only happens under lock. */
4811 mas->node = mas_slot(mas, slots, offset);
4812 mas->min = min;
4813 mas->max = max;
4814 mas->offset = mas_data_end(mas);
4815 return false;
4816
4817 ascend:
4818 if (!mte_is_root(mas->node))
4819 return false;
4820
4821 no_space:
4822 mas_set_err(mas, -EBUSY);
4823 return false;
4824 }
4825
mas_anode_descend(struct ma_state * mas,unsigned long size)4826 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4827 {
4828 enum maple_type type = mte_node_type(mas->node);
4829 unsigned long pivot, min, gap = 0;
4830 unsigned char offset, data_end;
4831 unsigned long *gaps, *pivots;
4832 void __rcu **slots;
4833 struct maple_node *node;
4834 bool found = false;
4835
4836 if (ma_is_dense(type)) {
4837 mas->offset = (unsigned char)(mas->index - mas->min);
4838 return true;
4839 }
4840
4841 node = mas_mn(mas);
4842 pivots = ma_pivots(node, type);
4843 slots = ma_slots(node, type);
4844 gaps = ma_gaps(node, type);
4845 offset = mas->offset;
4846 min = mas_safe_min(mas, pivots, offset);
4847 data_end = ma_data_end(node, type, pivots, mas->max);
4848 for (; offset <= data_end; offset++) {
4849 pivot = mas_safe_pivot(mas, pivots, offset, type);
4850
4851 /* Not within lower bounds */
4852 if (mas->index > pivot)
4853 goto next_slot;
4854
4855 if (gaps)
4856 gap = gaps[offset];
4857 else if (!mas_slot(mas, slots, offset))
4858 gap = min(pivot, mas->last) - max(mas->index, min) + 1;
4859 else
4860 goto next_slot;
4861
4862 if (gap >= size) {
4863 if (ma_is_leaf(type)) {
4864 found = true;
4865 goto done;
4866 }
4867 if (mas->index <= pivot) {
4868 mas->node = mas_slot(mas, slots, offset);
4869 mas->min = min;
4870 mas->max = pivot;
4871 offset = 0;
4872 break;
4873 }
4874 }
4875 next_slot:
4876 min = pivot + 1;
4877 if (mas->last <= pivot) {
4878 mas_set_err(mas, -EBUSY);
4879 return true;
4880 }
4881 }
4882
4883 if (mte_is_root(mas->node))
4884 found = true;
4885 done:
4886 mas->offset = offset;
4887 return found;
4888 }
4889
4890 /**
4891 * mas_walk() - Search for @mas->index in the tree.
4892 * @mas: The maple state.
4893 *
4894 * mas->index and mas->last will be set to the range if there is a value. If
4895 * mas->status is ma_none, reset to ma_start
4896 *
4897 * Return: the entry at the location or %NULL.
4898 */
mas_walk(struct ma_state * mas)4899 void *mas_walk(struct ma_state *mas)
4900 {
4901 void *entry;
4902
4903 if (!mas_is_active(mas) || !mas_is_start(mas))
4904 mas->status = ma_start;
4905 retry:
4906 entry = mas_state_walk(mas);
4907 if (mas_is_start(mas)) {
4908 goto retry;
4909 } else if (mas_is_none(mas)) {
4910 mas->index = 0;
4911 mas->last = ULONG_MAX;
4912 } else if (mas_is_ptr(mas)) {
4913 if (!mas->index) {
4914 mas->last = 0;
4915 return entry;
4916 }
4917
4918 mas->index = 1;
4919 mas->last = ULONG_MAX;
4920 mas->status = ma_none;
4921 return NULL;
4922 }
4923
4924 return entry;
4925 }
4926 EXPORT_SYMBOL_GPL(mas_walk);
4927
mas_rewind_node(struct ma_state * mas)4928 static inline bool mas_rewind_node(struct ma_state *mas)
4929 {
4930 unsigned char slot;
4931
4932 do {
4933 if (mte_is_root(mas->node)) {
4934 slot = mas->offset;
4935 if (!slot)
4936 return false;
4937 } else {
4938 mas_ascend(mas);
4939 slot = mas->offset;
4940 }
4941 } while (!slot);
4942
4943 mas->offset = --slot;
4944 return true;
4945 }
4946
4947 /*
4948 * mas_skip_node() - Internal function. Skip over a node.
4949 * @mas: The maple state.
4950 *
4951 * Return: true if there is another node, false otherwise.
4952 */
mas_skip_node(struct ma_state * mas)4953 static inline bool mas_skip_node(struct ma_state *mas)
4954 {
4955 if (mas_is_err(mas))
4956 return false;
4957
4958 do {
4959 if (mte_is_root(mas->node)) {
4960 if (mas->offset >= mas_data_end(mas)) {
4961 mas_set_err(mas, -EBUSY);
4962 return false;
4963 }
4964 } else {
4965 mas_ascend(mas);
4966 }
4967 } while (mas->offset >= mas_data_end(mas));
4968
4969 mas->offset++;
4970 return true;
4971 }
4972
4973 /*
4974 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
4975 * @size
4976 * @mas: The maple state
4977 * @size: The size of the gap required
4978 *
4979 * Search between @mas->index and @mas->last for a gap of @size.
4980 */
mas_awalk(struct ma_state * mas,unsigned long size)4981 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
4982 {
4983 struct maple_enode *last = NULL;
4984
4985 /*
4986 * There are 4 options:
4987 * go to child (descend)
4988 * go back to parent (ascend)
4989 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
4990 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
4991 */
4992 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
4993 if (last == mas->node)
4994 mas_skip_node(mas);
4995 else
4996 last = mas->node;
4997 }
4998 }
4999
5000 /*
5001 * mas_sparse_area() - Internal function. Return upper or lower limit when
5002 * searching for a gap in an empty tree.
5003 * @mas: The maple state
5004 * @min: the minimum range
5005 * @max: The maximum range
5006 * @size: The size of the gap
5007 * @fwd: Searching forward or back
5008 */
mas_sparse_area(struct ma_state * mas,unsigned long min,unsigned long max,unsigned long size,bool fwd)5009 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5010 unsigned long max, unsigned long size, bool fwd)
5011 {
5012 if (!unlikely(mas_is_none(mas)) && min == 0) {
5013 min++;
5014 /*
5015 * At this time, min is increased, we need to recheck whether
5016 * the size is satisfied.
5017 */
5018 if (min > max || max - min + 1 < size)
5019 return -EBUSY;
5020 }
5021 /* mas_is_ptr */
5022
5023 if (fwd) {
5024 mas->index = min;
5025 mas->last = min + size - 1;
5026 } else {
5027 mas->last = max;
5028 mas->index = max - size + 1;
5029 }
5030 return 0;
5031 }
5032
5033 /*
5034 * mas_empty_area() - Get the lowest address within the range that is
5035 * sufficient for the size requested.
5036 * @mas: The maple state
5037 * @min: The lowest value of the range
5038 * @max: The highest value of the range
5039 * @size: The size needed
5040 */
mas_empty_area(struct ma_state * mas,unsigned long min,unsigned long max,unsigned long size)5041 int mas_empty_area(struct ma_state *mas, unsigned long min,
5042 unsigned long max, unsigned long size)
5043 {
5044 unsigned char offset;
5045 unsigned long *pivots;
5046 enum maple_type mt;
5047 struct maple_node *node;
5048
5049 if (min > max)
5050 return -EINVAL;
5051
5052 if (size == 0 || max - min < size - 1)
5053 return -EINVAL;
5054
5055 if (mas_is_start(mas))
5056 mas_start(mas);
5057 else if (mas->offset >= 2)
5058 mas->offset -= 2;
5059 else if (!mas_skip_node(mas))
5060 return -EBUSY;
5061
5062 /* Empty set */
5063 if (mas_is_none(mas) || mas_is_ptr(mas))
5064 return mas_sparse_area(mas, min, max, size, true);
5065
5066 /* The start of the window can only be within these values */
5067 mas->index = min;
5068 mas->last = max;
5069 mas_awalk(mas, size);
5070
5071 if (unlikely(mas_is_err(mas)))
5072 return xa_err(mas->node);
5073
5074 offset = mas->offset;
5075 if (unlikely(offset == MAPLE_NODE_SLOTS))
5076 return -EBUSY;
5077
5078 node = mas_mn(mas);
5079 mt = mte_node_type(mas->node);
5080 pivots = ma_pivots(node, mt);
5081 min = mas_safe_min(mas, pivots, offset);
5082 if (mas->index < min)
5083 mas->index = min;
5084 mas->last = mas->index + size - 1;
5085 mas->end = ma_data_end(node, mt, pivots, mas->max);
5086 return 0;
5087 }
5088 EXPORT_SYMBOL_GPL(mas_empty_area);
5089
5090 /*
5091 * mas_empty_area_rev() - Get the highest address within the range that is
5092 * sufficient for the size requested.
5093 * @mas: The maple state
5094 * @min: The lowest value of the range
5095 * @max: The highest value of the range
5096 * @size: The size needed
5097 */
mas_empty_area_rev(struct ma_state * mas,unsigned long min,unsigned long max,unsigned long size)5098 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5099 unsigned long max, unsigned long size)
5100 {
5101 struct maple_enode *last = mas->node;
5102
5103 if (min > max)
5104 return -EINVAL;
5105
5106 if (size == 0 || max - min < size - 1)
5107 return -EINVAL;
5108
5109 if (mas_is_start(mas))
5110 mas_start(mas);
5111 else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
5112 return -EBUSY;
5113
5114 if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5115 return mas_sparse_area(mas, min, max, size, false);
5116 else if (mas->offset >= 2)
5117 mas->offset -= 2;
5118 else
5119 mas->offset = mas_data_end(mas);
5120
5121
5122 /* The start of the window can only be within these values. */
5123 mas->index = min;
5124 mas->last = max;
5125
5126 while (!mas_rev_awalk(mas, size, &min, &max)) {
5127 if (last == mas->node) {
5128 if (!mas_rewind_node(mas))
5129 return -EBUSY;
5130 } else {
5131 last = mas->node;
5132 }
5133 }
5134
5135 if (mas_is_err(mas))
5136 return xa_err(mas->node);
5137
5138 if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5139 return -EBUSY;
5140
5141 /* Trim the upper limit to the max. */
5142 if (max < mas->last)
5143 mas->last = max;
5144
5145 mas->index = mas->last - size + 1;
5146 mas->end = mas_data_end(mas);
5147 return 0;
5148 }
5149 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5150
5151 /*
5152 * mte_dead_leaves() - Mark all leaves of a node as dead.
5153 * @mas: The maple state
5154 * @slots: Pointer to the slot array
5155 * @type: The maple node type
5156 *
5157 * Must hold the write lock.
5158 *
5159 * Return: The number of leaves marked as dead.
5160 */
5161 static inline
mte_dead_leaves(struct maple_enode * enode,struct maple_tree * mt,void __rcu ** slots)5162 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5163 void __rcu **slots)
5164 {
5165 struct maple_node *node;
5166 enum maple_type type;
5167 void *entry;
5168 int offset;
5169
5170 for (offset = 0; offset < mt_slot_count(enode); offset++) {
5171 entry = mt_slot(mt, slots, offset);
5172 type = mte_node_type(entry);
5173 node = mte_to_node(entry);
5174 /* Use both node and type to catch LE & BE metadata */
5175 if (!node || !type)
5176 break;
5177
5178 mte_set_node_dead(entry);
5179 node->type = type;
5180 rcu_assign_pointer(slots[offset], node);
5181 }
5182
5183 return offset;
5184 }
5185
5186 /**
5187 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5188 * @enode: The maple encoded node
5189 * @offset: The starting offset
5190 *
5191 * Note: This can only be used from the RCU callback context.
5192 */
mte_dead_walk(struct maple_enode ** enode,unsigned char offset)5193 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5194 {
5195 struct maple_node *node, *next;
5196 void __rcu **slots = NULL;
5197
5198 next = mte_to_node(*enode);
5199 do {
5200 *enode = ma_enode_ptr(next);
5201 node = mte_to_node(*enode);
5202 slots = ma_slots(node, node->type);
5203 next = rcu_dereference_protected(slots[offset],
5204 lock_is_held(&rcu_callback_map));
5205 offset = 0;
5206 } while (!ma_is_leaf(next->type));
5207
5208 return slots;
5209 }
5210
5211 /**
5212 * mt_free_walk() - Walk & free a tree in the RCU callback context
5213 * @head: The RCU head that's within the node.
5214 *
5215 * Note: This can only be used from the RCU callback context.
5216 */
mt_free_walk(struct rcu_head * head)5217 static void mt_free_walk(struct rcu_head *head)
5218 {
5219 void __rcu **slots;
5220 struct maple_node *node, *start;
5221 struct maple_enode *enode;
5222 unsigned char offset;
5223 enum maple_type type;
5224
5225 node = container_of(head, struct maple_node, rcu);
5226
5227 if (ma_is_leaf(node->type))
5228 goto free_leaf;
5229
5230 start = node;
5231 enode = mt_mk_node(node, node->type);
5232 slots = mte_dead_walk(&enode, 0);
5233 node = mte_to_node(enode);
5234 do {
5235 mt_free_bulk(node->slot_len, slots);
5236 offset = node->parent_slot + 1;
5237 enode = node->piv_parent;
5238 if (mte_to_node(enode) == node)
5239 goto free_leaf;
5240
5241 type = mte_node_type(enode);
5242 slots = ma_slots(mte_to_node(enode), type);
5243 if ((offset < mt_slots[type]) &&
5244 rcu_dereference_protected(slots[offset],
5245 lock_is_held(&rcu_callback_map)))
5246 slots = mte_dead_walk(&enode, offset);
5247 node = mte_to_node(enode);
5248 } while ((node != start) || (node->slot_len < offset));
5249
5250 slots = ma_slots(node, node->type);
5251 mt_free_bulk(node->slot_len, slots);
5252
5253 free_leaf:
5254 mt_free_rcu(&node->rcu);
5255 }
5256
mte_destroy_descend(struct maple_enode ** enode,struct maple_tree * mt,struct maple_enode * prev,unsigned char offset)5257 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5258 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5259 {
5260 struct maple_node *node;
5261 struct maple_enode *next = *enode;
5262 void __rcu **slots = NULL;
5263 enum maple_type type;
5264 unsigned char next_offset = 0;
5265
5266 do {
5267 *enode = next;
5268 node = mte_to_node(*enode);
5269 type = mte_node_type(*enode);
5270 slots = ma_slots(node, type);
5271 next = mt_slot_locked(mt, slots, next_offset);
5272 if ((mte_dead_node(next)))
5273 next = mt_slot_locked(mt, slots, ++next_offset);
5274
5275 mte_set_node_dead(*enode);
5276 node->type = type;
5277 node->piv_parent = prev;
5278 node->parent_slot = offset;
5279 offset = next_offset;
5280 next_offset = 0;
5281 prev = *enode;
5282 } while (!mte_is_leaf(next));
5283
5284 return slots;
5285 }
5286
mt_destroy_walk(struct maple_enode * enode,struct maple_tree * mt,bool free)5287 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5288 bool free)
5289 {
5290 void __rcu **slots;
5291 struct maple_node *node = mte_to_node(enode);
5292 struct maple_enode *start;
5293
5294 if (mte_is_leaf(enode)) {
5295 node->type = mte_node_type(enode);
5296 goto free_leaf;
5297 }
5298
5299 start = enode;
5300 slots = mte_destroy_descend(&enode, mt, start, 0);
5301 node = mte_to_node(enode); // Updated in the above call.
5302 do {
5303 enum maple_type type;
5304 unsigned char offset;
5305 struct maple_enode *parent, *tmp;
5306
5307 node->slot_len = mte_dead_leaves(enode, mt, slots);
5308 if (free)
5309 mt_free_bulk(node->slot_len, slots);
5310 offset = node->parent_slot + 1;
5311 enode = node->piv_parent;
5312 if (mte_to_node(enode) == node)
5313 goto free_leaf;
5314
5315 type = mte_node_type(enode);
5316 slots = ma_slots(mte_to_node(enode), type);
5317 if (offset >= mt_slots[type])
5318 goto next;
5319
5320 tmp = mt_slot_locked(mt, slots, offset);
5321 if (mte_node_type(tmp) && mte_to_node(tmp)) {
5322 parent = enode;
5323 enode = tmp;
5324 slots = mte_destroy_descend(&enode, mt, parent, offset);
5325 }
5326 next:
5327 node = mte_to_node(enode);
5328 } while (start != enode);
5329
5330 node = mte_to_node(enode);
5331 node->slot_len = mte_dead_leaves(enode, mt, slots);
5332 if (free)
5333 mt_free_bulk(node->slot_len, slots);
5334
5335 free_leaf:
5336 if (free)
5337 mt_free_rcu(&node->rcu);
5338 else
5339 mt_clear_meta(mt, node, node->type);
5340 }
5341
5342 /*
5343 * mte_destroy_walk() - Free a tree or sub-tree.
5344 * @enode: the encoded maple node (maple_enode) to start
5345 * @mt: the tree to free - needed for node types.
5346 *
5347 * Must hold the write lock.
5348 */
mte_destroy_walk(struct maple_enode * enode,struct maple_tree * mt)5349 static inline void mte_destroy_walk(struct maple_enode *enode,
5350 struct maple_tree *mt)
5351 {
5352 struct maple_node *node = mte_to_node(enode);
5353
5354 if (mt_in_rcu(mt)) {
5355 mt_destroy_walk(enode, mt, false);
5356 call_rcu(&node->rcu, mt_free_walk);
5357 } else {
5358 mt_destroy_walk(enode, mt, true);
5359 }
5360 }
5361
mas_wr_store_setup(struct ma_wr_state * wr_mas)5362 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5363 {
5364 if (!mas_is_active(wr_mas->mas)) {
5365 if (mas_is_start(wr_mas->mas))
5366 return;
5367
5368 if (unlikely(mas_is_paused(wr_mas->mas)))
5369 goto reset;
5370
5371 if (unlikely(mas_is_none(wr_mas->mas)))
5372 goto reset;
5373
5374 if (unlikely(mas_is_overflow(wr_mas->mas)))
5375 goto reset;
5376
5377 if (unlikely(mas_is_underflow(wr_mas->mas)))
5378 goto reset;
5379 }
5380
5381 /*
5382 * A less strict version of mas_is_span_wr() where we allow spanning
5383 * writes within this node. This is to stop partial walks in
5384 * mas_prealloc() from being reset.
5385 */
5386 if (wr_mas->mas->last > wr_mas->mas->max)
5387 goto reset;
5388
5389 if (wr_mas->entry)
5390 return;
5391
5392 if (mte_is_leaf(wr_mas->mas->node) &&
5393 wr_mas->mas->last == wr_mas->mas->max)
5394 goto reset;
5395
5396 return;
5397
5398 reset:
5399 mas_reset(wr_mas->mas);
5400 }
5401
5402 /* Interface */
5403
5404 /**
5405 * mas_store() - Store an @entry.
5406 * @mas: The maple state.
5407 * @entry: The entry to store.
5408 *
5409 * The @mas->index and @mas->last is used to set the range for the @entry.
5410 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5411 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5412 *
5413 * Return: the first entry between mas->index and mas->last or %NULL.
5414 */
mas_store(struct ma_state * mas,void * entry)5415 void *mas_store(struct ma_state *mas, void *entry)
5416 {
5417 MA_WR_STATE(wr_mas, mas, entry);
5418
5419 trace_ma_write(__func__, mas, 0, entry);
5420 #ifdef CONFIG_DEBUG_MAPLE_TREE
5421 if (MAS_WARN_ON(mas, mas->index > mas->last))
5422 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5423
5424 if (mas->index > mas->last) {
5425 mas_set_err(mas, -EINVAL);
5426 return NULL;
5427 }
5428
5429 #endif
5430
5431 /*
5432 * Storing is the same operation as insert with the added caveat that it
5433 * can overwrite entries. Although this seems simple enough, one may
5434 * want to examine what happens if a single store operation was to
5435 * overwrite multiple entries within a self-balancing B-Tree.
5436 */
5437 mas_wr_store_setup(&wr_mas);
5438 mas_wr_store_entry(&wr_mas);
5439 return wr_mas.content;
5440 }
5441 EXPORT_SYMBOL_GPL(mas_store);
5442
5443 /**
5444 * mas_store_gfp() - Store a value into the tree.
5445 * @mas: The maple state
5446 * @entry: The entry to store
5447 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5448 *
5449 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5450 * be allocated.
5451 */
mas_store_gfp(struct ma_state * mas,void * entry,gfp_t gfp)5452 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5453 {
5454 MA_WR_STATE(wr_mas, mas, entry);
5455
5456 mas_wr_store_setup(&wr_mas);
5457 trace_ma_write(__func__, mas, 0, entry);
5458 retry:
5459 mas_wr_store_entry(&wr_mas);
5460 if (unlikely(mas_nomem(mas, gfp)))
5461 goto retry;
5462
5463 if (unlikely(mas_is_err(mas)))
5464 return xa_err(mas->node);
5465
5466 return 0;
5467 }
5468 EXPORT_SYMBOL_GPL(mas_store_gfp);
5469
5470 /**
5471 * mas_store_prealloc() - Store a value into the tree using memory
5472 * preallocated in the maple state.
5473 * @mas: The maple state
5474 * @entry: The entry to store.
5475 */
mas_store_prealloc(struct ma_state * mas,void * entry)5476 void mas_store_prealloc(struct ma_state *mas, void *entry)
5477 {
5478 MA_WR_STATE(wr_mas, mas, entry);
5479
5480 mas_wr_store_setup(&wr_mas);
5481 trace_ma_write(__func__, mas, 0, entry);
5482 mas_wr_store_entry(&wr_mas);
5483 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5484 mas_destroy(mas);
5485 }
5486 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5487
5488 /**
5489 * mas_preallocate() - Preallocate enough nodes for a store operation
5490 * @mas: The maple state
5491 * @entry: The entry that will be stored
5492 * @gfp: The GFP_FLAGS to use for allocations.
5493 *
5494 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5495 */
mas_preallocate(struct ma_state * mas,void * entry,gfp_t gfp)5496 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5497 {
5498 MA_WR_STATE(wr_mas, mas, entry);
5499 unsigned char node_size;
5500 int request = 1;
5501 int ret;
5502
5503
5504 if (unlikely(!mas->index && mas->last == ULONG_MAX))
5505 goto ask_now;
5506
5507 mas_wr_store_setup(&wr_mas);
5508 wr_mas.content = mas_start(mas);
5509 /* Root expand */
5510 if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5511 goto ask_now;
5512
5513 if (unlikely(!mas_wr_walk(&wr_mas))) {
5514 /* Spanning store, use worst case for now */
5515 request = 1 + mas_mt_height(mas) * 3;
5516 goto ask_now;
5517 }
5518
5519 /* At this point, we are at the leaf node that needs to be altered. */
5520 /* Exact fit, no nodes needed. */
5521 if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
5522 return 0;
5523
5524 mas_wr_end_piv(&wr_mas);
5525 node_size = mas_wr_new_end(&wr_mas);
5526
5527 /* Slot store, does not require additional nodes */
5528 if (node_size == mas->end) {
5529 /* reuse node */
5530 if (!mt_in_rcu(mas->tree))
5531 return 0;
5532 /* shifting boundary */
5533 if (wr_mas.offset_end - mas->offset == 1)
5534 return 0;
5535 }
5536
5537 if (node_size >= mt_slots[wr_mas.type]) {
5538 /* Split, worst case for now. */
5539 request = 1 + mas_mt_height(mas) * 2;
5540 goto ask_now;
5541 }
5542
5543 /* New root needs a single node */
5544 if (unlikely(mte_is_root(mas->node)))
5545 goto ask_now;
5546
5547 /* Potential spanning rebalance collapsing a node, use worst-case */
5548 if (node_size - 1 <= mt_min_slots[wr_mas.type])
5549 request = mas_mt_height(mas) * 2 - 1;
5550
5551 /* node store, slot store needs one node */
5552 ask_now:
5553 mas_node_count_gfp(mas, request, gfp);
5554 mas->mas_flags |= MA_STATE_PREALLOC;
5555 if (likely(!mas_is_err(mas)))
5556 return 0;
5557
5558 mas_set_alloc_req(mas, 0);
5559 ret = xa_err(mas->node);
5560 mas_reset(mas);
5561 mas_destroy(mas);
5562 mas_reset(mas);
5563 return ret;
5564 }
5565 EXPORT_SYMBOL_GPL(mas_preallocate);
5566
5567 /*
5568 * mas_destroy() - destroy a maple state.
5569 * @mas: The maple state
5570 *
5571 * Upon completion, check the left-most node and rebalance against the node to
5572 * the right if necessary. Frees any allocated nodes associated with this maple
5573 * state.
5574 */
mas_destroy(struct ma_state * mas)5575 void mas_destroy(struct ma_state *mas)
5576 {
5577 struct maple_alloc *node;
5578 unsigned long total;
5579
5580 /*
5581 * When using mas_for_each() to insert an expected number of elements,
5582 * it is possible that the number inserted is less than the expected
5583 * number. To fix an invalid final node, a check is performed here to
5584 * rebalance the previous node with the final node.
5585 */
5586 if (mas->mas_flags & MA_STATE_REBALANCE) {
5587 unsigned char end;
5588
5589 mas_start(mas);
5590 mtree_range_walk(mas);
5591 end = mas->end + 1;
5592 if (end < mt_min_slot_count(mas->node) - 1)
5593 mas_destroy_rebalance(mas, end);
5594
5595 mas->mas_flags &= ~MA_STATE_REBALANCE;
5596 }
5597 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5598
5599 total = mas_allocated(mas);
5600 while (total) {
5601 node = mas->alloc;
5602 mas->alloc = node->slot[0];
5603 if (node->node_count > 1) {
5604 size_t count = node->node_count - 1;
5605
5606 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5607 total -= count;
5608 }
5609 mt_free_one(ma_mnode_ptr(node));
5610 total--;
5611 }
5612
5613 mas->alloc = NULL;
5614 }
5615 EXPORT_SYMBOL_GPL(mas_destroy);
5616
5617 /*
5618 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5619 * @mas: The maple state
5620 * @nr_entries: The number of expected entries.
5621 *
5622 * This will attempt to pre-allocate enough nodes to store the expected number
5623 * of entries. The allocations will occur using the bulk allocator interface
5624 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5625 * to ensure any unused nodes are freed.
5626 *
5627 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5628 */
mas_expected_entries(struct ma_state * mas,unsigned long nr_entries)5629 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5630 {
5631 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5632 struct maple_enode *enode = mas->node;
5633 int nr_nodes;
5634 int ret;
5635
5636 /*
5637 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5638 * forking a process and duplicating the VMAs from one tree to a new
5639 * tree. When such a situation arises, it is known that the new tree is
5640 * not going to be used until the entire tree is populated. For
5641 * performance reasons, it is best to use a bulk load with RCU disabled.
5642 * This allows for optimistic splitting that favours the left and reuse
5643 * of nodes during the operation.
5644 */
5645
5646 /* Optimize splitting for bulk insert in-order */
5647 mas->mas_flags |= MA_STATE_BULK;
5648
5649 /*
5650 * Avoid overflow, assume a gap between each entry and a trailing null.
5651 * If this is wrong, it just means allocation can happen during
5652 * insertion of entries.
5653 */
5654 nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5655 if (!mt_is_alloc(mas->tree))
5656 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5657
5658 /* Leaves; reduce slots to keep space for expansion */
5659 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5660 /* Internal nodes */
5661 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5662 /* Add working room for split (2 nodes) + new parents */
5663 mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
5664
5665 /* Detect if allocations run out */
5666 mas->mas_flags |= MA_STATE_PREALLOC;
5667
5668 if (!mas_is_err(mas))
5669 return 0;
5670
5671 ret = xa_err(mas->node);
5672 mas->node = enode;
5673 mas_destroy(mas);
5674 return ret;
5675
5676 }
5677 EXPORT_SYMBOL_GPL(mas_expected_entries);
5678
mas_next_setup(struct ma_state * mas,unsigned long max,void ** entry)5679 static bool mas_next_setup(struct ma_state *mas, unsigned long max,
5680 void **entry)
5681 {
5682 bool was_none = mas_is_none(mas);
5683
5684 if (unlikely(mas->last >= max)) {
5685 mas->status = ma_overflow;
5686 return true;
5687 }
5688
5689 switch (mas->status) {
5690 case ma_active:
5691 return false;
5692 case ma_none:
5693 fallthrough;
5694 case ma_pause:
5695 mas->status = ma_start;
5696 fallthrough;
5697 case ma_start:
5698 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5699 break;
5700 case ma_overflow:
5701 /* Overflowed before, but the max changed */
5702 mas->status = ma_active;
5703 break;
5704 case ma_underflow:
5705 /* The user expects the mas to be one before where it is */
5706 mas->status = ma_active;
5707 *entry = mas_walk(mas);
5708 if (*entry)
5709 return true;
5710 break;
5711 case ma_root:
5712 break;
5713 case ma_error:
5714 return true;
5715 }
5716
5717 if (likely(mas_is_active(mas))) /* Fast path */
5718 return false;
5719
5720 if (mas_is_ptr(mas)) {
5721 *entry = NULL;
5722 if (was_none && mas->index == 0) {
5723 mas->index = mas->last = 0;
5724 return true;
5725 }
5726 mas->index = 1;
5727 mas->last = ULONG_MAX;
5728 mas->status = ma_none;
5729 return true;
5730 }
5731
5732 if (mas_is_none(mas))
5733 return true;
5734
5735 return false;
5736 }
5737
5738 /**
5739 * mas_next() - Get the next entry.
5740 * @mas: The maple state
5741 * @max: The maximum index to check.
5742 *
5743 * Returns the next entry after @mas->index.
5744 * Must hold rcu_read_lock or the write lock.
5745 * Can return the zero entry.
5746 *
5747 * Return: The next entry or %NULL
5748 */
mas_next(struct ma_state * mas,unsigned long max)5749 void *mas_next(struct ma_state *mas, unsigned long max)
5750 {
5751 void *entry = NULL;
5752
5753 if (mas_next_setup(mas, max, &entry))
5754 return entry;
5755
5756 /* Retries on dead nodes handled by mas_next_slot */
5757 return mas_next_slot(mas, max, false);
5758 }
5759 EXPORT_SYMBOL_GPL(mas_next);
5760
5761 /**
5762 * mas_next_range() - Advance the maple state to the next range
5763 * @mas: The maple state
5764 * @max: The maximum index to check.
5765 *
5766 * Sets @mas->index and @mas->last to the range.
5767 * Must hold rcu_read_lock or the write lock.
5768 * Can return the zero entry.
5769 *
5770 * Return: The next entry or %NULL
5771 */
mas_next_range(struct ma_state * mas,unsigned long max)5772 void *mas_next_range(struct ma_state *mas, unsigned long max)
5773 {
5774 void *entry = NULL;
5775
5776 if (mas_next_setup(mas, max, &entry))
5777 return entry;
5778
5779 /* Retries on dead nodes handled by mas_next_slot */
5780 return mas_next_slot(mas, max, true);
5781 }
5782 EXPORT_SYMBOL_GPL(mas_next_range);
5783
5784 /**
5785 * mt_next() - get the next value in the maple tree
5786 * @mt: The maple tree
5787 * @index: The start index
5788 * @max: The maximum index to check
5789 *
5790 * Takes RCU read lock internally to protect the search, which does not
5791 * protect the returned pointer after dropping RCU read lock.
5792 * See also: Documentation/core-api/maple_tree.rst
5793 *
5794 * Return: The entry higher than @index or %NULL if nothing is found.
5795 */
mt_next(struct maple_tree * mt,unsigned long index,unsigned long max)5796 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5797 {
5798 void *entry = NULL;
5799 MA_STATE(mas, mt, index, index);
5800
5801 rcu_read_lock();
5802 entry = mas_next(&mas, max);
5803 rcu_read_unlock();
5804 return entry;
5805 }
5806 EXPORT_SYMBOL_GPL(mt_next);
5807
mas_prev_setup(struct ma_state * mas,unsigned long min,void ** entry)5808 static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
5809 {
5810 if (unlikely(mas->index <= min)) {
5811 mas->status = ma_underflow;
5812 return true;
5813 }
5814
5815 switch (mas->status) {
5816 case ma_active:
5817 return false;
5818 case ma_start:
5819 break;
5820 case ma_none:
5821 fallthrough;
5822 case ma_pause:
5823 mas->status = ma_start;
5824 break;
5825 case ma_underflow:
5826 /* underflowed before but the min changed */
5827 mas->status = ma_active;
5828 break;
5829 case ma_overflow:
5830 /* User expects mas to be one after where it is */
5831 mas->status = ma_active;
5832 *entry = mas_walk(mas);
5833 if (*entry)
5834 return true;
5835 break;
5836 case ma_root:
5837 break;
5838 case ma_error:
5839 return true;
5840 }
5841
5842 if (mas_is_start(mas))
5843 mas_walk(mas);
5844
5845 if (unlikely(mas_is_ptr(mas))) {
5846 if (!mas->index) {
5847 mas->status = ma_none;
5848 return true;
5849 }
5850 mas->index = mas->last = 0;
5851 *entry = mas_root(mas);
5852 return true;
5853 }
5854
5855 if (mas_is_none(mas)) {
5856 if (mas->index) {
5857 /* Walked to out-of-range pointer? */
5858 mas->index = mas->last = 0;
5859 mas->status = ma_root;
5860 *entry = mas_root(mas);
5861 return true;
5862 }
5863 return true;
5864 }
5865
5866 return false;
5867 }
5868
5869 /**
5870 * mas_prev() - Get the previous entry
5871 * @mas: The maple state
5872 * @min: The minimum value to check.
5873 *
5874 * Must hold rcu_read_lock or the write lock.
5875 * Will reset mas to ma_start if the status is ma_none. Will stop on not
5876 * searchable nodes.
5877 *
5878 * Return: the previous value or %NULL.
5879 */
mas_prev(struct ma_state * mas,unsigned long min)5880 void *mas_prev(struct ma_state *mas, unsigned long min)
5881 {
5882 void *entry = NULL;
5883
5884 if (mas_prev_setup(mas, min, &entry))
5885 return entry;
5886
5887 return mas_prev_slot(mas, min, false);
5888 }
5889 EXPORT_SYMBOL_GPL(mas_prev);
5890
5891 /**
5892 * mas_prev_range() - Advance to the previous range
5893 * @mas: The maple state
5894 * @min: The minimum value to check.
5895 *
5896 * Sets @mas->index and @mas->last to the range.
5897 * Must hold rcu_read_lock or the write lock.
5898 * Will reset mas to ma_start if the node is ma_none. Will stop on not
5899 * searchable nodes.
5900 *
5901 * Return: the previous value or %NULL.
5902 */
mas_prev_range(struct ma_state * mas,unsigned long min)5903 void *mas_prev_range(struct ma_state *mas, unsigned long min)
5904 {
5905 void *entry = NULL;
5906
5907 if (mas_prev_setup(mas, min, &entry))
5908 return entry;
5909
5910 return mas_prev_slot(mas, min, true);
5911 }
5912 EXPORT_SYMBOL_GPL(mas_prev_range);
5913
5914 /**
5915 * mt_prev() - get the previous value in the maple tree
5916 * @mt: The maple tree
5917 * @index: The start index
5918 * @min: The minimum index to check
5919 *
5920 * Takes RCU read lock internally to protect the search, which does not
5921 * protect the returned pointer after dropping RCU read lock.
5922 * See also: Documentation/core-api/maple_tree.rst
5923 *
5924 * Return: The entry before @index or %NULL if nothing is found.
5925 */
mt_prev(struct maple_tree * mt,unsigned long index,unsigned long min)5926 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5927 {
5928 void *entry = NULL;
5929 MA_STATE(mas, mt, index, index);
5930
5931 rcu_read_lock();
5932 entry = mas_prev(&mas, min);
5933 rcu_read_unlock();
5934 return entry;
5935 }
5936 EXPORT_SYMBOL_GPL(mt_prev);
5937
5938 /**
5939 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5940 * @mas: The maple state to pause
5941 *
5942 * Some users need to pause a walk and drop the lock they're holding in
5943 * order to yield to a higher priority thread or carry out an operation
5944 * on an entry. Those users should call this function before they drop
5945 * the lock. It resets the @mas to be suitable for the next iteration
5946 * of the loop after the user has reacquired the lock. If most entries
5947 * found during a walk require you to call mas_pause(), the mt_for_each()
5948 * iterator may be more appropriate.
5949 *
5950 */
mas_pause(struct ma_state * mas)5951 void mas_pause(struct ma_state *mas)
5952 {
5953 mas->status = ma_pause;
5954 mas->node = NULL;
5955 }
5956 EXPORT_SYMBOL_GPL(mas_pause);
5957
5958 /**
5959 * mas_find_setup() - Internal function to set up mas_find*().
5960 * @mas: The maple state
5961 * @max: The maximum index
5962 * @entry: Pointer to the entry
5963 *
5964 * Returns: True if entry is the answer, false otherwise.
5965 */
mas_find_setup(struct ma_state * mas,unsigned long max,void ** entry)5966 static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
5967 {
5968 switch (mas->status) {
5969 case ma_active:
5970 if (mas->last < max)
5971 return false;
5972 return true;
5973 case ma_start:
5974 break;
5975 case ma_pause:
5976 if (unlikely(mas->last >= max))
5977 return true;
5978
5979 mas->index = ++mas->last;
5980 mas->status = ma_start;
5981 break;
5982 case ma_none:
5983 if (unlikely(mas->last >= max))
5984 return true;
5985
5986 mas->index = mas->last;
5987 mas->status = ma_start;
5988 break;
5989 case ma_underflow:
5990 /* mas is pointing at entry before unable to go lower */
5991 if (unlikely(mas->index >= max)) {
5992 mas->status = ma_overflow;
5993 return true;
5994 }
5995
5996 mas->status = ma_active;
5997 *entry = mas_walk(mas);
5998 if (*entry)
5999 return true;
6000 break;
6001 case ma_overflow:
6002 if (unlikely(mas->last >= max))
6003 return true;
6004
6005 mas->status = ma_active;
6006 *entry = mas_walk(mas);
6007 if (*entry)
6008 return true;
6009 break;
6010 case ma_root:
6011 break;
6012 case ma_error:
6013 return true;
6014 }
6015
6016 if (mas_is_start(mas)) {
6017 /* First run or continue */
6018 if (mas->index > max)
6019 return true;
6020
6021 *entry = mas_walk(mas);
6022 if (*entry)
6023 return true;
6024
6025 }
6026
6027 if (unlikely(mas_is_ptr(mas)))
6028 goto ptr_out_of_range;
6029
6030 if (unlikely(mas_is_none(mas)))
6031 return true;
6032
6033 if (mas->index == max)
6034 return true;
6035
6036 return false;
6037
6038 ptr_out_of_range:
6039 mas->status = ma_none;
6040 mas->index = 1;
6041 mas->last = ULONG_MAX;
6042 return true;
6043 }
6044
6045 /**
6046 * mas_find() - On the first call, find the entry at or after mas->index up to
6047 * %max. Otherwise, find the entry after mas->index.
6048 * @mas: The maple state
6049 * @max: The maximum value to check.
6050 *
6051 * Must hold rcu_read_lock or the write lock.
6052 * If an entry exists, last and index are updated accordingly.
6053 * May set @mas->status to ma_overflow.
6054 *
6055 * Return: The entry or %NULL.
6056 */
mas_find(struct ma_state * mas,unsigned long max)6057 void *mas_find(struct ma_state *mas, unsigned long max)
6058 {
6059 void *entry = NULL;
6060
6061 if (mas_find_setup(mas, max, &entry))
6062 return entry;
6063
6064 /* Retries on dead nodes handled by mas_next_slot */
6065 entry = mas_next_slot(mas, max, false);
6066 /* Ignore overflow */
6067 mas->status = ma_active;
6068 return entry;
6069 }
6070 EXPORT_SYMBOL_GPL(mas_find);
6071
6072 /**
6073 * mas_find_range() - On the first call, find the entry at or after
6074 * mas->index up to %max. Otherwise, advance to the next slot mas->index.
6075 * @mas: The maple state
6076 * @max: The maximum value to check.
6077 *
6078 * Must hold rcu_read_lock or the write lock.
6079 * If an entry exists, last and index are updated accordingly.
6080 * May set @mas->status to ma_overflow.
6081 *
6082 * Return: The entry or %NULL.
6083 */
mas_find_range(struct ma_state * mas,unsigned long max)6084 void *mas_find_range(struct ma_state *mas, unsigned long max)
6085 {
6086 void *entry = NULL;
6087
6088 if (mas_find_setup(mas, max, &entry))
6089 return entry;
6090
6091 /* Retries on dead nodes handled by mas_next_slot */
6092 return mas_next_slot(mas, max, true);
6093 }
6094 EXPORT_SYMBOL_GPL(mas_find_range);
6095
6096 /**
6097 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6098 * @mas: The maple state
6099 * @min: The minimum index
6100 * @entry: Pointer to the entry
6101 *
6102 * Returns: True if entry is the answer, false otherwise.
6103 */
mas_find_rev_setup(struct ma_state * mas,unsigned long min,void ** entry)6104 static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
6105 void **entry)
6106 {
6107
6108 switch (mas->status) {
6109 case ma_active:
6110 goto active;
6111 case ma_start:
6112 break;
6113 case ma_pause:
6114 if (unlikely(mas->index <= min)) {
6115 mas->status = ma_underflow;
6116 return true;
6117 }
6118 mas->last = --mas->index;
6119 mas->status = ma_start;
6120 break;
6121 case ma_none:
6122 if (mas->index <= min)
6123 goto none;
6124
6125 mas->last = mas->index;
6126 mas->status = ma_start;
6127 break;
6128 case ma_overflow: /* user expects the mas to be one after where it is */
6129 if (unlikely(mas->index <= min)) {
6130 mas->status = ma_underflow;
6131 return true;
6132 }
6133
6134 mas->status = ma_active;
6135 break;
6136 case ma_underflow: /* user expects the mas to be one before where it is */
6137 if (unlikely(mas->index <= min))
6138 return true;
6139
6140 mas->status = ma_active;
6141 break;
6142 case ma_root:
6143 break;
6144 case ma_error:
6145 return true;
6146 }
6147
6148 if (mas_is_start(mas)) {
6149 /* First run or continue */
6150 if (mas->index < min)
6151 return true;
6152
6153 *entry = mas_walk(mas);
6154 if (*entry)
6155 return true;
6156 }
6157
6158 if (unlikely(mas_is_ptr(mas)))
6159 goto none;
6160
6161 if (unlikely(mas_is_none(mas))) {
6162 /*
6163 * Walked to the location, and there was nothing so the previous
6164 * location is 0.
6165 */
6166 mas->last = mas->index = 0;
6167 mas->status = ma_root;
6168 *entry = mas_root(mas);
6169 return true;
6170 }
6171
6172 active:
6173 if (mas->index < min)
6174 return true;
6175
6176 return false;
6177
6178 none:
6179 mas->status = ma_none;
6180 return true;
6181 }
6182
6183 /**
6184 * mas_find_rev: On the first call, find the first non-null entry at or below
6185 * mas->index down to %min. Otherwise find the first non-null entry below
6186 * mas->index down to %min.
6187 * @mas: The maple state
6188 * @min: The minimum value to check.
6189 *
6190 * Must hold rcu_read_lock or the write lock.
6191 * If an entry exists, last and index are updated accordingly.
6192 * May set @mas->status to ma_underflow.
6193 *
6194 * Return: The entry or %NULL.
6195 */
mas_find_rev(struct ma_state * mas,unsigned long min)6196 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6197 {
6198 void *entry = NULL;
6199
6200 if (mas_find_rev_setup(mas, min, &entry))
6201 return entry;
6202
6203 /* Retries on dead nodes handled by mas_prev_slot */
6204 return mas_prev_slot(mas, min, false);
6205
6206 }
6207 EXPORT_SYMBOL_GPL(mas_find_rev);
6208
6209 /**
6210 * mas_find_range_rev: On the first call, find the first non-null entry at or
6211 * below mas->index down to %min. Otherwise advance to the previous slot after
6212 * mas->index down to %min.
6213 * @mas: The maple state
6214 * @min: The minimum value to check.
6215 *
6216 * Must hold rcu_read_lock or the write lock.
6217 * If an entry exists, last and index are updated accordingly.
6218 * May set @mas->status to ma_underflow.
6219 *
6220 * Return: The entry or %NULL.
6221 */
mas_find_range_rev(struct ma_state * mas,unsigned long min)6222 void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
6223 {
6224 void *entry = NULL;
6225
6226 if (mas_find_rev_setup(mas, min, &entry))
6227 return entry;
6228
6229 /* Retries on dead nodes handled by mas_prev_slot */
6230 return mas_prev_slot(mas, min, true);
6231 }
6232 EXPORT_SYMBOL_GPL(mas_find_range_rev);
6233
6234 /**
6235 * mas_erase() - Find the range in which index resides and erase the entire
6236 * range.
6237 * @mas: The maple state
6238 *
6239 * Must hold the write lock.
6240 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6241 * erases that range.
6242 *
6243 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6244 */
mas_erase(struct ma_state * mas)6245 void *mas_erase(struct ma_state *mas)
6246 {
6247 void *entry;
6248 MA_WR_STATE(wr_mas, mas, NULL);
6249
6250 if (!mas_is_active(mas) || !mas_is_start(mas))
6251 mas->status = ma_start;
6252
6253 /* Retry unnecessary when holding the write lock. */
6254 entry = mas_state_walk(mas);
6255 if (!entry)
6256 return NULL;
6257
6258 write_retry:
6259 /* Must reset to ensure spanning writes of last slot are detected */
6260 mas_reset(mas);
6261 mas_wr_store_setup(&wr_mas);
6262 mas_wr_store_entry(&wr_mas);
6263 if (mas_nomem(mas, GFP_KERNEL))
6264 goto write_retry;
6265
6266 return entry;
6267 }
6268 EXPORT_SYMBOL_GPL(mas_erase);
6269
6270 /**
6271 * mas_nomem() - Check if there was an error allocating and do the allocation
6272 * if necessary If there are allocations, then free them.
6273 * @mas: The maple state
6274 * @gfp: The GFP_FLAGS to use for allocations
6275 * Return: true on allocation, false otherwise.
6276 */
mas_nomem(struct ma_state * mas,gfp_t gfp)6277 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6278 __must_hold(mas->tree->ma_lock)
6279 {
6280 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6281 mas_destroy(mas);
6282 return false;
6283 }
6284
6285 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6286 mtree_unlock(mas->tree);
6287 mas_alloc_nodes(mas, gfp);
6288 mtree_lock(mas->tree);
6289 } else {
6290 mas_alloc_nodes(mas, gfp);
6291 }
6292
6293 if (!mas_allocated(mas))
6294 return false;
6295
6296 mas->status = ma_start;
6297 return true;
6298 }
6299
maple_tree_init(void)6300 void __init maple_tree_init(void)
6301 {
6302 maple_node_cache = kmem_cache_create("maple_node",
6303 sizeof(struct maple_node), sizeof(struct maple_node),
6304 SLAB_PANIC, NULL);
6305 }
6306
6307 /**
6308 * mtree_load() - Load a value stored in a maple tree
6309 * @mt: The maple tree
6310 * @index: The index to load
6311 *
6312 * Return: the entry or %NULL
6313 */
mtree_load(struct maple_tree * mt,unsigned long index)6314 void *mtree_load(struct maple_tree *mt, unsigned long index)
6315 {
6316 MA_STATE(mas, mt, index, index);
6317 void *entry;
6318
6319 trace_ma_read(__func__, &mas);
6320 rcu_read_lock();
6321 retry:
6322 entry = mas_start(&mas);
6323 if (unlikely(mas_is_none(&mas)))
6324 goto unlock;
6325
6326 if (unlikely(mas_is_ptr(&mas))) {
6327 if (index)
6328 entry = NULL;
6329
6330 goto unlock;
6331 }
6332
6333 entry = mtree_lookup_walk(&mas);
6334 if (!entry && unlikely(mas_is_start(&mas)))
6335 goto retry;
6336 unlock:
6337 rcu_read_unlock();
6338 if (xa_is_zero(entry))
6339 return NULL;
6340
6341 return entry;
6342 }
6343 EXPORT_SYMBOL(mtree_load);
6344
6345 /**
6346 * mtree_store_range() - Store an entry at a given range.
6347 * @mt: The maple tree
6348 * @index: The start of the range
6349 * @last: The end of the range
6350 * @entry: The entry to store
6351 * @gfp: The GFP_FLAGS to use for allocations
6352 *
6353 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6354 * be allocated.
6355 */
mtree_store_range(struct maple_tree * mt,unsigned long index,unsigned long last,void * entry,gfp_t gfp)6356 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6357 unsigned long last, void *entry, gfp_t gfp)
6358 {
6359 MA_STATE(mas, mt, index, last);
6360 MA_WR_STATE(wr_mas, &mas, entry);
6361
6362 trace_ma_write(__func__, &mas, 0, entry);
6363 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6364 return -EINVAL;
6365
6366 if (index > last)
6367 return -EINVAL;
6368
6369 mtree_lock(mt);
6370 retry:
6371 mas_wr_store_entry(&wr_mas);
6372 if (mas_nomem(&mas, gfp))
6373 goto retry;
6374
6375 mtree_unlock(mt);
6376 if (mas_is_err(&mas))
6377 return xa_err(mas.node);
6378
6379 return 0;
6380 }
6381 EXPORT_SYMBOL(mtree_store_range);
6382
6383 /**
6384 * mtree_store() - Store an entry at a given index.
6385 * @mt: The maple tree
6386 * @index: The index to store the value
6387 * @entry: The entry to store
6388 * @gfp: The GFP_FLAGS to use for allocations
6389 *
6390 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6391 * be allocated.
6392 */
mtree_store(struct maple_tree * mt,unsigned long index,void * entry,gfp_t gfp)6393 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6394 gfp_t gfp)
6395 {
6396 return mtree_store_range(mt, index, index, entry, gfp);
6397 }
6398 EXPORT_SYMBOL(mtree_store);
6399
6400 /**
6401 * mtree_insert_range() - Insert an entry at a given range if there is no value.
6402 * @mt: The maple tree
6403 * @first: The start of the range
6404 * @last: The end of the range
6405 * @entry: The entry to store
6406 * @gfp: The GFP_FLAGS to use for allocations.
6407 *
6408 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6409 * request, -ENOMEM if memory could not be allocated.
6410 */
mtree_insert_range(struct maple_tree * mt,unsigned long first,unsigned long last,void * entry,gfp_t gfp)6411 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6412 unsigned long last, void *entry, gfp_t gfp)
6413 {
6414 MA_STATE(ms, mt, first, last);
6415
6416 if (WARN_ON_ONCE(xa_is_advanced(entry)))
6417 return -EINVAL;
6418
6419 if (first > last)
6420 return -EINVAL;
6421
6422 mtree_lock(mt);
6423 retry:
6424 mas_insert(&ms, entry);
6425 if (mas_nomem(&ms, gfp))
6426 goto retry;
6427
6428 mtree_unlock(mt);
6429 if (mas_is_err(&ms))
6430 return xa_err(ms.node);
6431
6432 return 0;
6433 }
6434 EXPORT_SYMBOL(mtree_insert_range);
6435
6436 /**
6437 * mtree_insert() - Insert an entry at a given index if there is no value.
6438 * @mt: The maple tree
6439 * @index : The index to store the value
6440 * @entry: The entry to store
6441 * @gfp: The GFP_FLAGS to use for allocations.
6442 *
6443 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6444 * request, -ENOMEM if memory could not be allocated.
6445 */
mtree_insert(struct maple_tree * mt,unsigned long index,void * entry,gfp_t gfp)6446 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6447 gfp_t gfp)
6448 {
6449 return mtree_insert_range(mt, index, index, entry, gfp);
6450 }
6451 EXPORT_SYMBOL(mtree_insert);
6452
mtree_alloc_range(struct maple_tree * mt,unsigned long * startp,void * entry,unsigned long size,unsigned long min,unsigned long max,gfp_t gfp)6453 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6454 void *entry, unsigned long size, unsigned long min,
6455 unsigned long max, gfp_t gfp)
6456 {
6457 int ret = 0;
6458
6459 MA_STATE(mas, mt, 0, 0);
6460 if (!mt_is_alloc(mt))
6461 return -EINVAL;
6462
6463 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6464 return -EINVAL;
6465
6466 mtree_lock(mt);
6467 retry:
6468 ret = mas_empty_area(&mas, min, max, size);
6469 if (ret)
6470 goto unlock;
6471
6472 mas_insert(&mas, entry);
6473 /*
6474 * mas_nomem() may release the lock, causing the allocated area
6475 * to be unavailable, so try to allocate a free area again.
6476 */
6477 if (mas_nomem(&mas, gfp))
6478 goto retry;
6479
6480 if (mas_is_err(&mas))
6481 ret = xa_err(mas.node);
6482 else
6483 *startp = mas.index;
6484
6485 unlock:
6486 mtree_unlock(mt);
6487 return ret;
6488 }
6489 EXPORT_SYMBOL(mtree_alloc_range);
6490
6491 /**
6492 * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree.
6493 * @mt: The maple tree.
6494 * @startp: Pointer to ID.
6495 * @range_lo: Lower bound of range to search.
6496 * @range_hi: Upper bound of range to search.
6497 * @entry: The entry to store.
6498 * @next: Pointer to next ID to allocate.
6499 * @gfp: The GFP_FLAGS to use for allocations.
6500 *
6501 * Finds an empty entry in @mt after @next, stores the new index into
6502 * the @id pointer, stores the entry at that index, then updates @next.
6503 *
6504 * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag.
6505 *
6506 * Context: Any context. Takes and releases the mt.lock. May sleep if
6507 * the @gfp flags permit.
6508 *
6509 * Return: 0 if the allocation succeeded without wrapping, 1 if the
6510 * allocation succeeded after wrapping, -ENOMEM if memory could not be
6511 * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no
6512 * free entries.
6513 */
mtree_alloc_cyclic(struct maple_tree * mt,unsigned long * startp,void * entry,unsigned long range_lo,unsigned long range_hi,unsigned long * next,gfp_t gfp)6514 int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
6515 void *entry, unsigned long range_lo, unsigned long range_hi,
6516 unsigned long *next, gfp_t gfp)
6517 {
6518 int ret;
6519
6520 MA_STATE(mas, mt, 0, 0);
6521
6522 if (!mt_is_alloc(mt))
6523 return -EINVAL;
6524 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6525 return -EINVAL;
6526 mtree_lock(mt);
6527 ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi,
6528 next, gfp);
6529 mtree_unlock(mt);
6530 return ret;
6531 }
6532 EXPORT_SYMBOL(mtree_alloc_cyclic);
6533
mtree_alloc_rrange(struct maple_tree * mt,unsigned long * startp,void * entry,unsigned long size,unsigned long min,unsigned long max,gfp_t gfp)6534 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6535 void *entry, unsigned long size, unsigned long min,
6536 unsigned long max, gfp_t gfp)
6537 {
6538 int ret = 0;
6539
6540 MA_STATE(mas, mt, 0, 0);
6541 if (!mt_is_alloc(mt))
6542 return -EINVAL;
6543
6544 if (WARN_ON_ONCE(mt_is_reserved(entry)))
6545 return -EINVAL;
6546
6547 mtree_lock(mt);
6548 retry:
6549 ret = mas_empty_area_rev(&mas, min, max, size);
6550 if (ret)
6551 goto unlock;
6552
6553 mas_insert(&mas, entry);
6554 /*
6555 * mas_nomem() may release the lock, causing the allocated area
6556 * to be unavailable, so try to allocate a free area again.
6557 */
6558 if (mas_nomem(&mas, gfp))
6559 goto retry;
6560
6561 if (mas_is_err(&mas))
6562 ret = xa_err(mas.node);
6563 else
6564 *startp = mas.index;
6565
6566 unlock:
6567 mtree_unlock(mt);
6568 return ret;
6569 }
6570 EXPORT_SYMBOL(mtree_alloc_rrange);
6571
6572 /**
6573 * mtree_erase() - Find an index and erase the entire range.
6574 * @mt: The maple tree
6575 * @index: The index to erase
6576 *
6577 * Erasing is the same as a walk to an entry then a store of a NULL to that
6578 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6579 *
6580 * Return: The entry stored at the @index or %NULL
6581 */
mtree_erase(struct maple_tree * mt,unsigned long index)6582 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6583 {
6584 void *entry = NULL;
6585
6586 MA_STATE(mas, mt, index, index);
6587 trace_ma_op(__func__, &mas);
6588
6589 mtree_lock(mt);
6590 entry = mas_erase(&mas);
6591 mtree_unlock(mt);
6592
6593 return entry;
6594 }
6595 EXPORT_SYMBOL(mtree_erase);
6596
6597 /*
6598 * mas_dup_free() - Free an incomplete duplication of a tree.
6599 * @mas: The maple state of a incomplete tree.
6600 *
6601 * The parameter @mas->node passed in indicates that the allocation failed on
6602 * this node. This function frees all nodes starting from @mas->node in the
6603 * reverse order of mas_dup_build(). There is no need to hold the source tree
6604 * lock at this time.
6605 */
mas_dup_free(struct ma_state * mas)6606 static void mas_dup_free(struct ma_state *mas)
6607 {
6608 struct maple_node *node;
6609 enum maple_type type;
6610 void __rcu **slots;
6611 unsigned char count, i;
6612
6613 /* Maybe the first node allocation failed. */
6614 if (mas_is_none(mas))
6615 return;
6616
6617 while (!mte_is_root(mas->node)) {
6618 mas_ascend(mas);
6619 if (mas->offset) {
6620 mas->offset--;
6621 do {
6622 mas_descend(mas);
6623 mas->offset = mas_data_end(mas);
6624 } while (!mte_is_leaf(mas->node));
6625
6626 mas_ascend(mas);
6627 }
6628
6629 node = mte_to_node(mas->node);
6630 type = mte_node_type(mas->node);
6631 slots = ma_slots(node, type);
6632 count = mas_data_end(mas) + 1;
6633 for (i = 0; i < count; i++)
6634 ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
6635 mt_free_bulk(count, slots);
6636 }
6637
6638 node = mte_to_node(mas->node);
6639 mt_free_one(node);
6640 }
6641
6642 /*
6643 * mas_copy_node() - Copy a maple node and replace the parent.
6644 * @mas: The maple state of source tree.
6645 * @new_mas: The maple state of new tree.
6646 * @parent: The parent of the new node.
6647 *
6648 * Copy @mas->node to @new_mas->node, set @parent to be the parent of
6649 * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
6650 */
mas_copy_node(struct ma_state * mas,struct ma_state * new_mas,struct maple_pnode * parent)6651 static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
6652 struct maple_pnode *parent)
6653 {
6654 struct maple_node *node = mte_to_node(mas->node);
6655 struct maple_node *new_node = mte_to_node(new_mas->node);
6656 unsigned long val;
6657
6658 /* Copy the node completely. */
6659 memcpy(new_node, node, sizeof(struct maple_node));
6660 /* Update the parent node pointer. */
6661 val = (unsigned long)node->parent & MAPLE_NODE_MASK;
6662 new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
6663 }
6664
6665 /*
6666 * mas_dup_alloc() - Allocate child nodes for a maple node.
6667 * @mas: The maple state of source tree.
6668 * @new_mas: The maple state of new tree.
6669 * @gfp: The GFP_FLAGS to use for allocations.
6670 *
6671 * This function allocates child nodes for @new_mas->node during the duplication
6672 * process. If memory allocation fails, @mas is set to -ENOMEM.
6673 */
mas_dup_alloc(struct ma_state * mas,struct ma_state * new_mas,gfp_t gfp)6674 static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
6675 gfp_t gfp)
6676 {
6677 struct maple_node *node = mte_to_node(mas->node);
6678 struct maple_node *new_node = mte_to_node(new_mas->node);
6679 enum maple_type type;
6680 unsigned char request, count, i;
6681 void __rcu **slots;
6682 void __rcu **new_slots;
6683 unsigned long val;
6684
6685 /* Allocate memory for child nodes. */
6686 type = mte_node_type(mas->node);
6687 new_slots = ma_slots(new_node, type);
6688 request = mas_data_end(mas) + 1;
6689 count = mt_alloc_bulk(gfp, request, (void **)new_slots);
6690 if (unlikely(count < request)) {
6691 memset(new_slots, 0, request * sizeof(void *));
6692 mas_set_err(mas, -ENOMEM);
6693 return;
6694 }
6695
6696 /* Restore node type information in slots. */
6697 slots = ma_slots(node, type);
6698 for (i = 0; i < count; i++) {
6699 val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
6700 val &= MAPLE_NODE_MASK;
6701 ((unsigned long *)new_slots)[i] |= val;
6702 }
6703 }
6704
6705 /*
6706 * mas_dup_build() - Build a new maple tree from a source tree
6707 * @mas: The maple state of source tree, need to be in MAS_START state.
6708 * @new_mas: The maple state of new tree, need to be in MAS_START state.
6709 * @gfp: The GFP_FLAGS to use for allocations.
6710 *
6711 * This function builds a new tree in DFS preorder. If the memory allocation
6712 * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
6713 * last node. mas_dup_free() will free the incomplete duplication of a tree.
6714 *
6715 * Note that the attributes of the two trees need to be exactly the same, and the
6716 * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
6717 */
mas_dup_build(struct ma_state * mas,struct ma_state * new_mas,gfp_t gfp)6718 static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
6719 gfp_t gfp)
6720 {
6721 struct maple_node *node;
6722 struct maple_pnode *parent = NULL;
6723 struct maple_enode *root;
6724 enum maple_type type;
6725
6726 if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
6727 unlikely(!mtree_empty(new_mas->tree))) {
6728 mas_set_err(mas, -EINVAL);
6729 return;
6730 }
6731
6732 root = mas_start(mas);
6733 if (mas_is_ptr(mas) || mas_is_none(mas))
6734 goto set_new_tree;
6735
6736 node = mt_alloc_one(gfp);
6737 if (!node) {
6738 new_mas->status = ma_none;
6739 mas_set_err(mas, -ENOMEM);
6740 return;
6741 }
6742
6743 type = mte_node_type(mas->node);
6744 root = mt_mk_node(node, type);
6745 new_mas->node = root;
6746 new_mas->min = 0;
6747 new_mas->max = ULONG_MAX;
6748 root = mte_mk_root(root);
6749 while (1) {
6750 mas_copy_node(mas, new_mas, parent);
6751 if (!mte_is_leaf(mas->node)) {
6752 /* Only allocate child nodes for non-leaf nodes. */
6753 mas_dup_alloc(mas, new_mas, gfp);
6754 if (unlikely(mas_is_err(mas)))
6755 return;
6756 } else {
6757 /*
6758 * This is the last leaf node and duplication is
6759 * completed.
6760 */
6761 if (mas->max == ULONG_MAX)
6762 goto done;
6763
6764 /* This is not the last leaf node and needs to go up. */
6765 do {
6766 mas_ascend(mas);
6767 mas_ascend(new_mas);
6768 } while (mas->offset == mas_data_end(mas));
6769
6770 /* Move to the next subtree. */
6771 mas->offset++;
6772 new_mas->offset++;
6773 }
6774
6775 mas_descend(mas);
6776 parent = ma_parent_ptr(mte_to_node(new_mas->node));
6777 mas_descend(new_mas);
6778 mas->offset = 0;
6779 new_mas->offset = 0;
6780 }
6781 done:
6782 /* Specially handle the parent of the root node. */
6783 mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
6784 set_new_tree:
6785 /* Make them the same height */
6786 new_mas->tree->ma_flags = mas->tree->ma_flags;
6787 rcu_assign_pointer(new_mas->tree->ma_root, root);
6788 }
6789
6790 /**
6791 * __mt_dup(): Duplicate an entire maple tree
6792 * @mt: The source maple tree
6793 * @new: The new maple tree
6794 * @gfp: The GFP_FLAGS to use for allocations
6795 *
6796 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6797 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6798 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6799 * source node except for all the addresses stored in it. It will be faster than
6800 * traversing all elements in the source tree and inserting them one by one into
6801 * the new tree.
6802 * The user needs to ensure that the attributes of the source tree and the new
6803 * tree are the same, and the new tree needs to be an empty tree, otherwise
6804 * -EINVAL will be returned.
6805 * Note that the user needs to manually lock the source tree and the new tree.
6806 *
6807 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6808 * the attributes of the two trees are different or the new tree is not an empty
6809 * tree.
6810 */
__mt_dup(struct maple_tree * mt,struct maple_tree * new,gfp_t gfp)6811 int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6812 {
6813 int ret = 0;
6814 MA_STATE(mas, mt, 0, 0);
6815 MA_STATE(new_mas, new, 0, 0);
6816
6817 mas_dup_build(&mas, &new_mas, gfp);
6818 if (unlikely(mas_is_err(&mas))) {
6819 ret = xa_err(mas.node);
6820 if (ret == -ENOMEM)
6821 mas_dup_free(&new_mas);
6822 }
6823
6824 return ret;
6825 }
6826 EXPORT_SYMBOL(__mt_dup);
6827
6828 /**
6829 * mtree_dup(): Duplicate an entire maple tree
6830 * @mt: The source maple tree
6831 * @new: The new maple tree
6832 * @gfp: The GFP_FLAGS to use for allocations
6833 *
6834 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6835 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6836 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6837 * source node except for all the addresses stored in it. It will be faster than
6838 * traversing all elements in the source tree and inserting them one by one into
6839 * the new tree.
6840 * The user needs to ensure that the attributes of the source tree and the new
6841 * tree are the same, and the new tree needs to be an empty tree, otherwise
6842 * -EINVAL will be returned.
6843 *
6844 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6845 * the attributes of the two trees are different or the new tree is not an empty
6846 * tree.
6847 */
mtree_dup(struct maple_tree * mt,struct maple_tree * new,gfp_t gfp)6848 int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6849 {
6850 int ret = 0;
6851 MA_STATE(mas, mt, 0, 0);
6852 MA_STATE(new_mas, new, 0, 0);
6853
6854 mas_lock(&new_mas);
6855 mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
6856 mas_dup_build(&mas, &new_mas, gfp);
6857 mas_unlock(&mas);
6858 if (unlikely(mas_is_err(&mas))) {
6859 ret = xa_err(mas.node);
6860 if (ret == -ENOMEM)
6861 mas_dup_free(&new_mas);
6862 }
6863
6864 mas_unlock(&new_mas);
6865 return ret;
6866 }
6867 EXPORT_SYMBOL(mtree_dup);
6868
6869 /**
6870 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6871 * @mt: The maple tree
6872 *
6873 * Note: Does not handle locking.
6874 */
__mt_destroy(struct maple_tree * mt)6875 void __mt_destroy(struct maple_tree *mt)
6876 {
6877 void *root = mt_root_locked(mt);
6878
6879 rcu_assign_pointer(mt->ma_root, NULL);
6880 if (xa_is_node(root))
6881 mte_destroy_walk(root, mt);
6882
6883 mt->ma_flags = mt_attr(mt);
6884 }
6885 EXPORT_SYMBOL_GPL(__mt_destroy);
6886
6887 /**
6888 * mtree_destroy() - Destroy a maple tree
6889 * @mt: The maple tree
6890 *
6891 * Frees all resources used by the tree. Handles locking.
6892 */
mtree_destroy(struct maple_tree * mt)6893 void mtree_destroy(struct maple_tree *mt)
6894 {
6895 mtree_lock(mt);
6896 __mt_destroy(mt);
6897 mtree_unlock(mt);
6898 }
6899 EXPORT_SYMBOL(mtree_destroy);
6900
6901 /**
6902 * mt_find() - Search from the start up until an entry is found.
6903 * @mt: The maple tree
6904 * @index: Pointer which contains the start location of the search
6905 * @max: The maximum value of the search range
6906 *
6907 * Takes RCU read lock internally to protect the search, which does not
6908 * protect the returned pointer after dropping RCU read lock.
6909 * See also: Documentation/core-api/maple_tree.rst
6910 *
6911 * In case that an entry is found @index is updated to point to the next
6912 * possible entry independent whether the found entry is occupying a
6913 * single index or a range if indices.
6914 *
6915 * Return: The entry at or after the @index or %NULL
6916 */
mt_find(struct maple_tree * mt,unsigned long * index,unsigned long max)6917 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6918 {
6919 MA_STATE(mas, mt, *index, *index);
6920 void *entry;
6921 #ifdef CONFIG_DEBUG_MAPLE_TREE
6922 unsigned long copy = *index;
6923 #endif
6924
6925 trace_ma_read(__func__, &mas);
6926
6927 if ((*index) > max)
6928 return NULL;
6929
6930 rcu_read_lock();
6931 retry:
6932 entry = mas_state_walk(&mas);
6933 if (mas_is_start(&mas))
6934 goto retry;
6935
6936 if (unlikely(xa_is_zero(entry)))
6937 entry = NULL;
6938
6939 if (entry)
6940 goto unlock;
6941
6942 while (mas_is_active(&mas) && (mas.last < max)) {
6943 entry = mas_next_entry(&mas, max);
6944 if (likely(entry && !xa_is_zero(entry)))
6945 break;
6946 }
6947
6948 if (unlikely(xa_is_zero(entry)))
6949 entry = NULL;
6950 unlock:
6951 rcu_read_unlock();
6952 if (likely(entry)) {
6953 *index = mas.last + 1;
6954 #ifdef CONFIG_DEBUG_MAPLE_TREE
6955 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6956 pr_err("index not increased! %lx <= %lx\n",
6957 *index, copy);
6958 #endif
6959 }
6960
6961 return entry;
6962 }
6963 EXPORT_SYMBOL(mt_find);
6964
6965 /**
6966 * mt_find_after() - Search from the start up until an entry is found.
6967 * @mt: The maple tree
6968 * @index: Pointer which contains the start location of the search
6969 * @max: The maximum value to check
6970 *
6971 * Same as mt_find() except that it checks @index for 0 before
6972 * searching. If @index == 0, the search is aborted. This covers a wrap
6973 * around of @index to 0 in an iterator loop.
6974 *
6975 * Return: The entry at or after the @index or %NULL
6976 */
mt_find_after(struct maple_tree * mt,unsigned long * index,unsigned long max)6977 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6978 unsigned long max)
6979 {
6980 if (!(*index))
6981 return NULL;
6982
6983 return mt_find(mt, index, max);
6984 }
6985 EXPORT_SYMBOL(mt_find_after);
6986
6987 #ifdef CONFIG_DEBUG_MAPLE_TREE
6988 atomic_t maple_tree_tests_run;
6989 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6990 atomic_t maple_tree_tests_passed;
6991 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6992
6993 #ifndef __KERNEL__
6994 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
mt_set_non_kernel(unsigned int val)6995 void mt_set_non_kernel(unsigned int val)
6996 {
6997 kmem_cache_set_non_kernel(maple_node_cache, val);
6998 }
6999
7000 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
mt_get_alloc_size(void)7001 unsigned long mt_get_alloc_size(void)
7002 {
7003 return kmem_cache_get_alloc(maple_node_cache);
7004 }
7005
7006 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
mt_zero_nr_tallocated(void)7007 void mt_zero_nr_tallocated(void)
7008 {
7009 kmem_cache_zero_nr_tallocated(maple_node_cache);
7010 }
7011
7012 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
mt_nr_tallocated(void)7013 unsigned int mt_nr_tallocated(void)
7014 {
7015 return kmem_cache_nr_tallocated(maple_node_cache);
7016 }
7017
7018 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
mt_nr_allocated(void)7019 unsigned int mt_nr_allocated(void)
7020 {
7021 return kmem_cache_nr_allocated(maple_node_cache);
7022 }
7023
mt_cache_shrink(void)7024 void mt_cache_shrink(void)
7025 {
7026 }
7027 #else
7028 /*
7029 * mt_cache_shrink() - For testing, don't use this.
7030 *
7031 * Certain testcases can trigger an OOM when combined with other memory
7032 * debugging configuration options. This function is used to reduce the
7033 * possibility of an out of memory even due to kmem_cache objects remaining
7034 * around for longer than usual.
7035 */
mt_cache_shrink(void)7036 void mt_cache_shrink(void)
7037 {
7038 kmem_cache_shrink(maple_node_cache);
7039
7040 }
7041 EXPORT_SYMBOL_GPL(mt_cache_shrink);
7042
7043 #endif /* not defined __KERNEL__ */
7044 /*
7045 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
7046 * @mas: The maple state
7047 * @offset: The offset into the slot array to fetch.
7048 *
7049 * Return: The entry stored at @offset.
7050 */
mas_get_slot(struct ma_state * mas,unsigned char offset)7051 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
7052 unsigned char offset)
7053 {
7054 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
7055 offset);
7056 }
7057
7058 /* Depth first search, post-order */
mas_dfs_postorder(struct ma_state * mas,unsigned long max)7059 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
7060 {
7061
7062 struct maple_enode *p, *mn = mas->node;
7063 unsigned long p_min, p_max;
7064
7065 mas_next_node(mas, mas_mn(mas), max);
7066 if (!mas_is_overflow(mas))
7067 return;
7068
7069 if (mte_is_root(mn))
7070 return;
7071
7072 mas->node = mn;
7073 mas_ascend(mas);
7074 do {
7075 p = mas->node;
7076 p_min = mas->min;
7077 p_max = mas->max;
7078 mas_prev_node(mas, 0);
7079 } while (!mas_is_underflow(mas));
7080
7081 mas->node = p;
7082 mas->max = p_max;
7083 mas->min = p_min;
7084 }
7085
7086 /* Tree validations */
7087 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7088 unsigned long min, unsigned long max, unsigned int depth,
7089 enum mt_dump_format format);
mt_dump_range(unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7090 static void mt_dump_range(unsigned long min, unsigned long max,
7091 unsigned int depth, enum mt_dump_format format)
7092 {
7093 static const char spaces[] = " ";
7094
7095 switch(format) {
7096 case mt_dump_hex:
7097 if (min == max)
7098 pr_info("%.*s%lx: ", depth * 2, spaces, min);
7099 else
7100 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
7101 break;
7102 case mt_dump_dec:
7103 if (min == max)
7104 pr_info("%.*s%lu: ", depth * 2, spaces, min);
7105 else
7106 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
7107 }
7108 }
7109
mt_dump_entry(void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7110 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
7111 unsigned int depth, enum mt_dump_format format)
7112 {
7113 mt_dump_range(min, max, depth, format);
7114
7115 if (xa_is_value(entry))
7116 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
7117 xa_to_value(entry), entry);
7118 else if (xa_is_zero(entry))
7119 pr_cont("zero (%ld)\n", xa_to_internal(entry));
7120 else if (mt_is_reserved(entry))
7121 pr_cont("UNKNOWN ENTRY (%p)\n", entry);
7122 else
7123 pr_cont("%p\n", entry);
7124 }
7125
mt_dump_range64(const struct maple_tree * mt,void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7126 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
7127 unsigned long min, unsigned long max, unsigned int depth,
7128 enum mt_dump_format format)
7129 {
7130 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
7131 bool leaf = mte_is_leaf(entry);
7132 unsigned long first = min;
7133 int i;
7134
7135 pr_cont(" contents: ");
7136 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
7137 switch(format) {
7138 case mt_dump_hex:
7139 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7140 break;
7141 case mt_dump_dec:
7142 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7143 }
7144 }
7145 pr_cont("%p\n", node->slot[i]);
7146 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
7147 unsigned long last = max;
7148
7149 if (i < (MAPLE_RANGE64_SLOTS - 1))
7150 last = node->pivot[i];
7151 else if (!node->slot[i] && max != mt_node_max(entry))
7152 break;
7153 if (last == 0 && i > 0)
7154 break;
7155 if (leaf)
7156 mt_dump_entry(mt_slot(mt, node->slot, i),
7157 first, last, depth + 1, format);
7158 else if (node->slot[i])
7159 mt_dump_node(mt, mt_slot(mt, node->slot, i),
7160 first, last, depth + 1, format);
7161
7162 if (last == max)
7163 break;
7164 if (last > max) {
7165 switch(format) {
7166 case mt_dump_hex:
7167 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
7168 node, last, max, i);
7169 break;
7170 case mt_dump_dec:
7171 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7172 node, last, max, i);
7173 }
7174 }
7175 first = last + 1;
7176 }
7177 }
7178
mt_dump_arange64(const struct maple_tree * mt,void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7179 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
7180 unsigned long min, unsigned long max, unsigned int depth,
7181 enum mt_dump_format format)
7182 {
7183 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
7184 bool leaf = mte_is_leaf(entry);
7185 unsigned long first = min;
7186 int i;
7187
7188 pr_cont(" contents: ");
7189 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7190 switch (format) {
7191 case mt_dump_hex:
7192 pr_cont("%lx ", node->gap[i]);
7193 break;
7194 case mt_dump_dec:
7195 pr_cont("%lu ", node->gap[i]);
7196 }
7197 }
7198 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
7199 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
7200 switch (format) {
7201 case mt_dump_hex:
7202 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7203 break;
7204 case mt_dump_dec:
7205 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7206 }
7207 }
7208 pr_cont("%p\n", node->slot[i]);
7209 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7210 unsigned long last = max;
7211
7212 if (i < (MAPLE_ARANGE64_SLOTS - 1))
7213 last = node->pivot[i];
7214 else if (!node->slot[i])
7215 break;
7216 if (last == 0 && i > 0)
7217 break;
7218 if (leaf)
7219 mt_dump_entry(mt_slot(mt, node->slot, i),
7220 first, last, depth + 1, format);
7221 else if (node->slot[i])
7222 mt_dump_node(mt, mt_slot(mt, node->slot, i),
7223 first, last, depth + 1, format);
7224
7225 if (last == max)
7226 break;
7227 if (last > max) {
7228 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7229 node, last, max, i);
7230 break;
7231 }
7232 first = last + 1;
7233 }
7234 }
7235
mt_dump_node(const struct maple_tree * mt,void * entry,unsigned long min,unsigned long max,unsigned int depth,enum mt_dump_format format)7236 static void mt_dump_node(const struct maple_tree *mt, void *entry,
7237 unsigned long min, unsigned long max, unsigned int depth,
7238 enum mt_dump_format format)
7239 {
7240 struct maple_node *node = mte_to_node(entry);
7241 unsigned int type = mte_node_type(entry);
7242 unsigned int i;
7243
7244 mt_dump_range(min, max, depth, format);
7245
7246 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
7247 node ? node->parent : NULL);
7248 switch (type) {
7249 case maple_dense:
7250 pr_cont("\n");
7251 for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
7252 if (min + i > max)
7253 pr_cont("OUT OF RANGE: ");
7254 mt_dump_entry(mt_slot(mt, node->slot, i),
7255 min + i, min + i, depth, format);
7256 }
7257 break;
7258 case maple_leaf_64:
7259 case maple_range_64:
7260 mt_dump_range64(mt, entry, min, max, depth, format);
7261 break;
7262 case maple_arange_64:
7263 mt_dump_arange64(mt, entry, min, max, depth, format);
7264 break;
7265
7266 default:
7267 pr_cont(" UNKNOWN TYPE\n");
7268 }
7269 }
7270
mt_dump(const struct maple_tree * mt,enum mt_dump_format format)7271 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
7272 {
7273 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
7274
7275 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
7276 mt, mt->ma_flags, mt_height(mt), entry);
7277 if (!xa_is_node(entry))
7278 mt_dump_entry(entry, 0, 0, 0, format);
7279 else if (entry)
7280 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
7281 }
7282 EXPORT_SYMBOL_GPL(mt_dump);
7283
7284 /*
7285 * Calculate the maximum gap in a node and check if that's what is reported in
7286 * the parent (unless root).
7287 */
mas_validate_gaps(struct ma_state * mas)7288 static void mas_validate_gaps(struct ma_state *mas)
7289 {
7290 struct maple_enode *mte = mas->node;
7291 struct maple_node *p_mn, *node = mte_to_node(mte);
7292 enum maple_type mt = mte_node_type(mas->node);
7293 unsigned long gap = 0, max_gap = 0;
7294 unsigned long p_end, p_start = mas->min;
7295 unsigned char p_slot, offset;
7296 unsigned long *gaps = NULL;
7297 unsigned long *pivots = ma_pivots(node, mt);
7298 unsigned int i;
7299
7300 if (ma_is_dense(mt)) {
7301 for (i = 0; i < mt_slot_count(mte); i++) {
7302 if (mas_get_slot(mas, i)) {
7303 if (gap > max_gap)
7304 max_gap = gap;
7305 gap = 0;
7306 continue;
7307 }
7308 gap++;
7309 }
7310 goto counted;
7311 }
7312
7313 gaps = ma_gaps(node, mt);
7314 for (i = 0; i < mt_slot_count(mte); i++) {
7315 p_end = mas_safe_pivot(mas, pivots, i, mt);
7316
7317 if (!gaps) {
7318 if (!mas_get_slot(mas, i))
7319 gap = p_end - p_start + 1;
7320 } else {
7321 void *entry = mas_get_slot(mas, i);
7322
7323 gap = gaps[i];
7324 MT_BUG_ON(mas->tree, !entry);
7325
7326 if (gap > p_end - p_start + 1) {
7327 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7328 mas_mn(mas), i, gap, p_end, p_start,
7329 p_end - p_start + 1);
7330 MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
7331 }
7332 }
7333
7334 if (gap > max_gap)
7335 max_gap = gap;
7336
7337 p_start = p_end + 1;
7338 if (p_end >= mas->max)
7339 break;
7340 }
7341
7342 counted:
7343 if (mt == maple_arange_64) {
7344 MT_BUG_ON(mas->tree, !gaps);
7345 offset = ma_meta_gap(node);
7346 if (offset > i) {
7347 pr_err("gap offset %p[%u] is invalid\n", node, offset);
7348 MT_BUG_ON(mas->tree, 1);
7349 }
7350
7351 if (gaps[offset] != max_gap) {
7352 pr_err("gap %p[%u] is not the largest gap %lu\n",
7353 node, offset, max_gap);
7354 MT_BUG_ON(mas->tree, 1);
7355 }
7356
7357 for (i++ ; i < mt_slot_count(mte); i++) {
7358 if (gaps[i] != 0) {
7359 pr_err("gap %p[%u] beyond node limit != 0\n",
7360 node, i);
7361 MT_BUG_ON(mas->tree, 1);
7362 }
7363 }
7364 }
7365
7366 if (mte_is_root(mte))
7367 return;
7368
7369 p_slot = mte_parent_slot(mas->node);
7370 p_mn = mte_parent(mte);
7371 MT_BUG_ON(mas->tree, max_gap > mas->max);
7372 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7373 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7374 mt_dump(mas->tree, mt_dump_hex);
7375 MT_BUG_ON(mas->tree, 1);
7376 }
7377 }
7378
mas_validate_parent_slot(struct ma_state * mas)7379 static void mas_validate_parent_slot(struct ma_state *mas)
7380 {
7381 struct maple_node *parent;
7382 struct maple_enode *node;
7383 enum maple_type p_type;
7384 unsigned char p_slot;
7385 void __rcu **slots;
7386 int i;
7387
7388 if (mte_is_root(mas->node))
7389 return;
7390
7391 p_slot = mte_parent_slot(mas->node);
7392 p_type = mas_parent_type(mas, mas->node);
7393 parent = mte_parent(mas->node);
7394 slots = ma_slots(parent, p_type);
7395 MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7396
7397 /* Check prev/next parent slot for duplicate node entry */
7398
7399 for (i = 0; i < mt_slots[p_type]; i++) {
7400 node = mas_slot(mas, slots, i);
7401 if (i == p_slot) {
7402 if (node != mas->node)
7403 pr_err("parent %p[%u] does not have %p\n",
7404 parent, i, mas_mn(mas));
7405 MT_BUG_ON(mas->tree, node != mas->node);
7406 } else if (node == mas->node) {
7407 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7408 mas_mn(mas), parent, i, p_slot);
7409 MT_BUG_ON(mas->tree, node == mas->node);
7410 }
7411 }
7412 }
7413
mas_validate_child_slot(struct ma_state * mas)7414 static void mas_validate_child_slot(struct ma_state *mas)
7415 {
7416 enum maple_type type = mte_node_type(mas->node);
7417 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7418 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7419 struct maple_enode *child;
7420 unsigned char i;
7421
7422 if (mte_is_leaf(mas->node))
7423 return;
7424
7425 for (i = 0; i < mt_slots[type]; i++) {
7426 child = mas_slot(mas, slots, i);
7427
7428 if (!child) {
7429 pr_err("Non-leaf node lacks child at %p[%u]\n",
7430 mas_mn(mas), i);
7431 MT_BUG_ON(mas->tree, 1);
7432 }
7433
7434 if (mte_parent_slot(child) != i) {
7435 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7436 mas_mn(mas), i, mte_to_node(child),
7437 mte_parent_slot(child));
7438 MT_BUG_ON(mas->tree, 1);
7439 }
7440
7441 if (mte_parent(child) != mte_to_node(mas->node)) {
7442 pr_err("child %p has parent %p not %p\n",
7443 mte_to_node(child), mte_parent(child),
7444 mte_to_node(mas->node));
7445 MT_BUG_ON(mas->tree, 1);
7446 }
7447
7448 if (i < mt_pivots[type] && pivots[i] == mas->max)
7449 break;
7450 }
7451 }
7452
7453 /*
7454 * Validate all pivots are within mas->min and mas->max, check metadata ends
7455 * where the maximum ends and ensure there is no slots or pivots set outside of
7456 * the end of the data.
7457 */
mas_validate_limits(struct ma_state * mas)7458 static void mas_validate_limits(struct ma_state *mas)
7459 {
7460 int i;
7461 unsigned long prev_piv = 0;
7462 enum maple_type type = mte_node_type(mas->node);
7463 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7464 unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7465
7466 for (i = 0; i < mt_slots[type]; i++) {
7467 unsigned long piv;
7468
7469 piv = mas_safe_pivot(mas, pivots, i, type);
7470
7471 if (!piv && (i != 0)) {
7472 pr_err("Missing node limit pivot at %p[%u]",
7473 mas_mn(mas), i);
7474 MAS_WARN_ON(mas, 1);
7475 }
7476
7477 if (prev_piv > piv) {
7478 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7479 mas_mn(mas), i, piv, prev_piv);
7480 MAS_WARN_ON(mas, piv < prev_piv);
7481 }
7482
7483 if (piv < mas->min) {
7484 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7485 piv, mas->min);
7486 MAS_WARN_ON(mas, piv < mas->min);
7487 }
7488 if (piv > mas->max) {
7489 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7490 piv, mas->max);
7491 MAS_WARN_ON(mas, piv > mas->max);
7492 }
7493 prev_piv = piv;
7494 if (piv == mas->max)
7495 break;
7496 }
7497
7498 if (mas_data_end(mas) != i) {
7499 pr_err("node%p: data_end %u != the last slot offset %u\n",
7500 mas_mn(mas), mas_data_end(mas), i);
7501 MT_BUG_ON(mas->tree, 1);
7502 }
7503
7504 for (i += 1; i < mt_slots[type]; i++) {
7505 void *entry = mas_slot(mas, slots, i);
7506
7507 if (entry && (i != mt_slots[type] - 1)) {
7508 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7509 i, entry);
7510 MT_BUG_ON(mas->tree, entry != NULL);
7511 }
7512
7513 if (i < mt_pivots[type]) {
7514 unsigned long piv = pivots[i];
7515
7516 if (!piv)
7517 continue;
7518
7519 pr_err("%p[%u] should not have piv %lu\n",
7520 mas_mn(mas), i, piv);
7521 MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7522 }
7523 }
7524 }
7525
mt_validate_nulls(struct maple_tree * mt)7526 static void mt_validate_nulls(struct maple_tree *mt)
7527 {
7528 void *entry, *last = (void *)1;
7529 unsigned char offset = 0;
7530 void __rcu **slots;
7531 MA_STATE(mas, mt, 0, 0);
7532
7533 mas_start(&mas);
7534 if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
7535 return;
7536
7537 while (!mte_is_leaf(mas.node))
7538 mas_descend(&mas);
7539
7540 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7541 do {
7542 entry = mas_slot(&mas, slots, offset);
7543 if (!last && !entry) {
7544 pr_err("Sequential nulls end at %p[%u]\n",
7545 mas_mn(&mas), offset);
7546 }
7547 MT_BUG_ON(mt, !last && !entry);
7548 last = entry;
7549 if (offset == mas_data_end(&mas)) {
7550 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7551 if (mas_is_overflow(&mas))
7552 return;
7553 offset = 0;
7554 slots = ma_slots(mte_to_node(mas.node),
7555 mte_node_type(mas.node));
7556 } else {
7557 offset++;
7558 }
7559
7560 } while (!mas_is_overflow(&mas));
7561 }
7562
7563 /*
7564 * validate a maple tree by checking:
7565 * 1. The limits (pivots are within mas->min to mas->max)
7566 * 2. The gap is correctly set in the parents
7567 */
mt_validate(struct maple_tree * mt)7568 void mt_validate(struct maple_tree *mt)
7569 __must_hold(mas->tree->ma_lock)
7570 {
7571 unsigned char end;
7572
7573 MA_STATE(mas, mt, 0, 0);
7574 mas_start(&mas);
7575 if (!mas_is_active(&mas))
7576 return;
7577
7578 while (!mte_is_leaf(mas.node))
7579 mas_descend(&mas);
7580
7581 while (!mas_is_overflow(&mas)) {
7582 MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7583 end = mas_data_end(&mas);
7584 if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
7585 (mas.max != ULONG_MAX))) {
7586 pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
7587 }
7588
7589 mas_validate_parent_slot(&mas);
7590 mas_validate_limits(&mas);
7591 mas_validate_child_slot(&mas);
7592 if (mt_is_alloc(mt))
7593 mas_validate_gaps(&mas);
7594 mas_dfs_postorder(&mas, ULONG_MAX);
7595 }
7596 mt_validate_nulls(mt);
7597 }
7598 EXPORT_SYMBOL_GPL(mt_validate);
7599
mas_dump(const struct ma_state * mas)7600 void mas_dump(const struct ma_state *mas)
7601 {
7602 pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7603 switch (mas->status) {
7604 case ma_active:
7605 pr_err("(ma_active)");
7606 break;
7607 case ma_none:
7608 pr_err("(ma_none)");
7609 break;
7610 case ma_root:
7611 pr_err("(ma_root)");
7612 break;
7613 case ma_start:
7614 pr_err("(ma_start) ");
7615 break;
7616 case ma_pause:
7617 pr_err("(ma_pause) ");
7618 break;
7619 case ma_overflow:
7620 pr_err("(ma_overflow) ");
7621 break;
7622 case ma_underflow:
7623 pr_err("(ma_underflow) ");
7624 break;
7625 case ma_error:
7626 pr_err("(ma_error) ");
7627 break;
7628 }
7629
7630 pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
7631 mas->index, mas->last);
7632 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7633 mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7634 if (mas->index > mas->last)
7635 pr_err("Check index & last\n");
7636 }
7637 EXPORT_SYMBOL_GPL(mas_dump);
7638
mas_wr_dump(const struct ma_wr_state * wr_mas)7639 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7640 {
7641 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7642 wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7643 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7644 wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
7645 wr_mas->end_piv);
7646 }
7647 EXPORT_SYMBOL_GPL(mas_wr_dump);
7648
7649 #endif /* CONFIG_DEBUG_MAPLE_TREE */
7650