1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Maple Tree implementation 4 * Copyright (c) 2018-2022 Oracle Corporation 5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> 6 * Matthew Wilcox <willy@infradead.org> 7 * Copyright (c) 2023 ByteDance 8 * Author: Peng Zhang <zhangpeng.00@bytedance.com> 9 */ 10 11 /* 12 * DOC: Interesting implementation details of the Maple Tree 13 * 14 * Each node type has a number of slots for entries and a number of slots for 15 * pivots. In the case of dense nodes, the pivots are implied by the position 16 * and are simply the slot index + the minimum of the node. 17 * 18 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 19 * indicate that the tree is specifying ranges. Pivots may appear in the 20 * subtree with an entry attached to the value whereas keys are unique to a 21 * specific position of a B-tree. Pivot values are inclusive of the slot with 22 * the same index. 23 * 24 * 25 * The following illustrates the layout of a range64 nodes slots and pivots. 26 * 27 * 28 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | 29 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ 30 * │ │ │ │ │ │ │ │ └─ Implied maximum 31 * │ │ │ │ │ │ │ └─ Pivot 14 32 * │ │ │ │ │ │ └─ Pivot 13 33 * │ │ │ │ │ └─ Pivot 12 34 * │ │ │ │ └─ Pivot 11 35 * │ │ │ └─ Pivot 2 36 * │ │ └─ Pivot 1 37 * │ └─ Pivot 0 38 * └─ Implied minimum 39 * 40 * Slot contents: 41 * Internal (non-leaf) nodes contain pointers to other nodes. 42 * Leaf nodes contain entries. 43 * 44 * The location of interest is often referred to as an offset. All offsets have 45 * a slot, but the last offset has an implied pivot from the node above (or 46 * UINT_MAX for the root node. 47 * 48 * Ranges complicate certain write activities. When modifying any of 49 * the B-tree variants, it is known that one entry will either be added or 50 * deleted. When modifying the Maple Tree, one store operation may overwrite 51 * the entire data set, or one half of the tree, or the middle half of the tree. 52 * 53 */ 54 55 56 #include <linux/maple_tree.h> 57 #include <linux/xarray.h> 58 #include <linux/types.h> 59 #include <linux/export.h> 60 #include <linux/slab.h> 61 #include <linux/limits.h> 62 #include <asm/barrier.h> 63 64 #define CREATE_TRACE_POINTS 65 #include <trace/events/maple_tree.h> 66 67 /* 68 * Kernel pointer hashing renders much of the maple tree dump useless as tagged 69 * pointers get hashed to arbitrary values. 70 * 71 * If CONFIG_DEBUG_VM_MAPLE_TREE is set we are in a debug mode where it is 72 * permissible to bypass this. Otherwise remain cautious and retain the hashing. 73 * 74 * Userland doesn't know about %px so also use %p there. 75 */ 76 #if defined(__KERNEL__) && defined(CONFIG_DEBUG_VM_MAPLE_TREE) 77 #define PTR_FMT "%px" 78 #else 79 #define PTR_FMT "%p" 80 #endif 81 82 #define MA_ROOT_PARENT 1 83 84 /* 85 * Maple state flags 86 * * MA_STATE_BULK - Bulk insert mode 87 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert 88 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation 89 */ 90 #define MA_STATE_BULK 1 91 #define MA_STATE_REBALANCE 2 92 #define MA_STATE_PREALLOC 4 93 94 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) 95 #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT) 96 #define ma_mnode_ptr(x) ((struct maple_node *)(x)) 97 #define ma_enode_ptr(x) ((struct maple_enode *)(x)) 98 static struct kmem_cache *maple_node_cache; 99 100 #ifdef CONFIG_DEBUG_MAPLE_TREE 101 static const unsigned long mt_max[] = { 102 [maple_dense] = MAPLE_NODE_SLOTS, 103 [maple_leaf_64] = ULONG_MAX, 104 [maple_range_64] = ULONG_MAX, 105 [maple_arange_64] = ULONG_MAX, 106 }; 107 #define mt_node_max(x) mt_max[mte_node_type(x)] 108 #endif 109 110 static const unsigned char mt_slots[] = { 111 [maple_dense] = MAPLE_NODE_SLOTS, 112 [maple_leaf_64] = MAPLE_RANGE64_SLOTS, 113 [maple_range_64] = MAPLE_RANGE64_SLOTS, 114 [maple_arange_64] = MAPLE_ARANGE64_SLOTS, 115 }; 116 #define mt_slot_count(x) mt_slots[mte_node_type(x)] 117 118 static const unsigned char mt_pivots[] = { 119 [maple_dense] = 0, 120 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, 121 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, 122 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, 123 }; 124 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] 125 126 static const unsigned char mt_min_slots[] = { 127 [maple_dense] = MAPLE_NODE_SLOTS / 2, 128 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 129 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 130 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, 131 }; 132 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] 133 134 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) 135 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) 136 137 struct maple_big_node { 138 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; 139 union { 140 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; 141 struct { 142 unsigned long padding[MAPLE_BIG_NODE_GAPS]; 143 unsigned long gap[MAPLE_BIG_NODE_GAPS]; 144 }; 145 }; 146 unsigned char b_end; 147 enum maple_type type; 148 }; 149 150 /* 151 * The maple_subtree_state is used to build a tree to replace a segment of an 152 * existing tree in a more atomic way. Any walkers of the older tree will hit a 153 * dead node and restart on updates. 154 */ 155 struct maple_subtree_state { 156 struct ma_state *orig_l; /* Original left side of subtree */ 157 struct ma_state *orig_r; /* Original right side of subtree */ 158 struct ma_state *l; /* New left side of subtree */ 159 struct ma_state *m; /* New middle of subtree (rare) */ 160 struct ma_state *r; /* New right side of subtree */ 161 struct ma_topiary *free; /* nodes to be freed */ 162 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ 163 struct maple_big_node *bn; 164 }; 165 166 #ifdef CONFIG_KASAN_STACK 167 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */ 168 #define noinline_for_kasan noinline_for_stack 169 #else 170 #define noinline_for_kasan inline 171 #endif 172 173 /* Functions */ 174 static inline struct maple_node *mt_alloc_one(gfp_t gfp) 175 { 176 return kmem_cache_alloc(maple_node_cache, gfp); 177 } 178 179 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) 180 { 181 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); 182 } 183 184 static inline void mt_free_one(struct maple_node *node) 185 { 186 kmem_cache_free(maple_node_cache, node); 187 } 188 189 static inline void mt_free_bulk(size_t size, void __rcu **nodes) 190 { 191 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 192 } 193 194 static void mt_free_rcu(struct rcu_head *head) 195 { 196 struct maple_node *node = container_of(head, struct maple_node, rcu); 197 198 kmem_cache_free(maple_node_cache, node); 199 } 200 201 /* 202 * ma_free_rcu() - Use rcu callback to free a maple node 203 * @node: The node to free 204 * 205 * The maple tree uses the parent pointer to indicate this node is no longer in 206 * use and will be freed. 207 */ 208 static void ma_free_rcu(struct maple_node *node) 209 { 210 WARN_ON(node->parent != ma_parent_ptr(node)); 211 call_rcu(&node->rcu, mt_free_rcu); 212 } 213 214 static void mas_set_height(struct ma_state *mas) 215 { 216 unsigned int new_flags = mas->tree->ma_flags; 217 218 new_flags &= ~MT_FLAGS_HEIGHT_MASK; 219 MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX); 220 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; 221 mas->tree->ma_flags = new_flags; 222 } 223 224 static unsigned int mas_mt_height(struct ma_state *mas) 225 { 226 return mt_height(mas->tree); 227 } 228 229 static inline unsigned int mt_attr(struct maple_tree *mt) 230 { 231 return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK; 232 } 233 234 static __always_inline enum maple_type mte_node_type( 235 const struct maple_enode *entry) 236 { 237 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & 238 MAPLE_NODE_TYPE_MASK; 239 } 240 241 static __always_inline bool ma_is_dense(const enum maple_type type) 242 { 243 return type < maple_leaf_64; 244 } 245 246 static __always_inline bool ma_is_leaf(const enum maple_type type) 247 { 248 return type < maple_range_64; 249 } 250 251 static __always_inline bool mte_is_leaf(const struct maple_enode *entry) 252 { 253 return ma_is_leaf(mte_node_type(entry)); 254 } 255 256 /* 257 * We also reserve values with the bottom two bits set to '10' which are 258 * below 4096 259 */ 260 static __always_inline bool mt_is_reserved(const void *entry) 261 { 262 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && 263 xa_is_internal(entry); 264 } 265 266 static __always_inline void mas_set_err(struct ma_state *mas, long err) 267 { 268 mas->node = MA_ERROR(err); 269 mas->status = ma_error; 270 } 271 272 static __always_inline bool mas_is_ptr(const struct ma_state *mas) 273 { 274 return mas->status == ma_root; 275 } 276 277 static __always_inline bool mas_is_start(const struct ma_state *mas) 278 { 279 return mas->status == ma_start; 280 } 281 282 static __always_inline bool mas_is_none(const struct ma_state *mas) 283 { 284 return mas->status == ma_none; 285 } 286 287 static __always_inline bool mas_is_paused(const struct ma_state *mas) 288 { 289 return mas->status == ma_pause; 290 } 291 292 static __always_inline bool mas_is_overflow(struct ma_state *mas) 293 { 294 return mas->status == ma_overflow; 295 } 296 297 static inline bool mas_is_underflow(struct ma_state *mas) 298 { 299 return mas->status == ma_underflow; 300 } 301 302 static __always_inline struct maple_node *mte_to_node( 303 const struct maple_enode *entry) 304 { 305 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); 306 } 307 308 /* 309 * mte_to_mat() - Convert a maple encoded node to a maple topiary node. 310 * @entry: The maple encoded node 311 * 312 * Return: a maple topiary pointer 313 */ 314 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) 315 { 316 return (struct maple_topiary *) 317 ((unsigned long)entry & ~MAPLE_NODE_MASK); 318 } 319 320 /* 321 * mas_mn() - Get the maple state node. 322 * @mas: The maple state 323 * 324 * Return: the maple node (not encoded - bare pointer). 325 */ 326 static inline struct maple_node *mas_mn(const struct ma_state *mas) 327 { 328 return mte_to_node(mas->node); 329 } 330 331 /* 332 * mte_set_node_dead() - Set a maple encoded node as dead. 333 * @mn: The maple encoded node. 334 */ 335 static inline void mte_set_node_dead(struct maple_enode *mn) 336 { 337 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); 338 smp_wmb(); /* Needed for RCU */ 339 } 340 341 /* Bit 1 indicates the root is a node */ 342 #define MAPLE_ROOT_NODE 0x02 343 /* maple_type stored bit 3-6 */ 344 #define MAPLE_ENODE_TYPE_SHIFT 0x03 345 /* Bit 2 means a NULL somewhere below */ 346 #define MAPLE_ENODE_NULL 0x04 347 348 static inline struct maple_enode *mt_mk_node(const struct maple_node *node, 349 enum maple_type type) 350 { 351 return (void *)((unsigned long)node | 352 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); 353 } 354 355 static inline void *mte_mk_root(const struct maple_enode *node) 356 { 357 return (void *)((unsigned long)node | MAPLE_ROOT_NODE); 358 } 359 360 static inline void *mte_safe_root(const struct maple_enode *node) 361 { 362 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); 363 } 364 365 static inline void __maybe_unused *mte_set_full(const struct maple_enode *node) 366 { 367 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); 368 } 369 370 static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node) 371 { 372 return (void *)((unsigned long)node | MAPLE_ENODE_NULL); 373 } 374 375 static inline bool __maybe_unused mte_has_null(const struct maple_enode *node) 376 { 377 return (unsigned long)node & MAPLE_ENODE_NULL; 378 } 379 380 static __always_inline bool ma_is_root(struct maple_node *node) 381 { 382 return ((unsigned long)node->parent & MA_ROOT_PARENT); 383 } 384 385 static __always_inline bool mte_is_root(const struct maple_enode *node) 386 { 387 return ma_is_root(mte_to_node(node)); 388 } 389 390 static inline bool mas_is_root_limits(const struct ma_state *mas) 391 { 392 return !mas->min && mas->max == ULONG_MAX; 393 } 394 395 static __always_inline bool mt_is_alloc(struct maple_tree *mt) 396 { 397 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); 398 } 399 400 /* 401 * The Parent Pointer 402 * Excluding root, the parent pointer is 256B aligned like all other tree nodes. 403 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 404 * bit values need an extra bit to store the offset. This extra bit comes from 405 * a reuse of the last bit in the node type. This is possible by using bit 1 to 406 * indicate if bit 2 is part of the type or the slot. 407 * 408 * Note types: 409 * 0x??1 = Root 410 * 0x?00 = 16 bit nodes 411 * 0x010 = 32 bit nodes 412 * 0x110 = 64 bit nodes 413 * 414 * Slot size and alignment 415 * 0b??1 : Root 416 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 417 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 418 * 0b110 : 64 bit values, type in 0-2, slot in 3-7 419 */ 420 421 #define MAPLE_PARENT_ROOT 0x01 422 423 #define MAPLE_PARENT_SLOT_SHIFT 0x03 424 #define MAPLE_PARENT_SLOT_MASK 0xF8 425 426 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 427 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC 428 429 #define MAPLE_PARENT_RANGE64 0x06 430 #define MAPLE_PARENT_RANGE32 0x04 431 #define MAPLE_PARENT_NOT_RANGE16 0x02 432 433 /* 434 * mte_parent_shift() - Get the parent shift for the slot storage. 435 * @parent: The parent pointer cast as an unsigned long 436 * Return: The shift into that pointer to the star to of the slot 437 */ 438 static inline unsigned long mte_parent_shift(unsigned long parent) 439 { 440 /* Note bit 1 == 0 means 16B */ 441 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 442 return MAPLE_PARENT_SLOT_SHIFT; 443 444 return MAPLE_PARENT_16B_SLOT_SHIFT; 445 } 446 447 /* 448 * mte_parent_slot_mask() - Get the slot mask for the parent. 449 * @parent: The parent pointer cast as an unsigned long. 450 * Return: The slot mask for that parent. 451 */ 452 static inline unsigned long mte_parent_slot_mask(unsigned long parent) 453 { 454 /* Note bit 1 == 0 means 16B */ 455 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 456 return MAPLE_PARENT_SLOT_MASK; 457 458 return MAPLE_PARENT_16B_SLOT_MASK; 459 } 460 461 /* 462 * mas_parent_type() - Return the maple_type of the parent from the stored 463 * parent type. 464 * @mas: The maple state 465 * @enode: The maple_enode to extract the parent's enum 466 * Return: The node->parent maple_type 467 */ 468 static inline 469 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode) 470 { 471 unsigned long p_type; 472 473 p_type = (unsigned long)mte_to_node(enode)->parent; 474 if (WARN_ON(p_type & MAPLE_PARENT_ROOT)) 475 return 0; 476 477 p_type &= MAPLE_NODE_MASK; 478 p_type &= ~mte_parent_slot_mask(p_type); 479 switch (p_type) { 480 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ 481 if (mt_is_alloc(mas->tree)) 482 return maple_arange_64; 483 return maple_range_64; 484 } 485 486 return 0; 487 } 488 489 /* 490 * mas_set_parent() - Set the parent node and encode the slot 491 * @mas: The maple state 492 * @enode: The encoded maple node. 493 * @parent: The encoded maple node that is the parent of @enode. 494 * @slot: The slot that @enode resides in @parent. 495 * 496 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the 497 * parent type. 498 */ 499 static inline 500 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, 501 const struct maple_enode *parent, unsigned char slot) 502 { 503 unsigned long val = (unsigned long)parent; 504 unsigned long shift; 505 unsigned long type; 506 enum maple_type p_type = mte_node_type(parent); 507 508 MAS_BUG_ON(mas, p_type == maple_dense); 509 MAS_BUG_ON(mas, p_type == maple_leaf_64); 510 511 switch (p_type) { 512 case maple_range_64: 513 case maple_arange_64: 514 shift = MAPLE_PARENT_SLOT_SHIFT; 515 type = MAPLE_PARENT_RANGE64; 516 break; 517 default: 518 case maple_dense: 519 case maple_leaf_64: 520 shift = type = 0; 521 break; 522 } 523 524 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ 525 val |= (slot << shift) | type; 526 mte_to_node(enode)->parent = ma_parent_ptr(val); 527 } 528 529 /* 530 * mte_parent_slot() - get the parent slot of @enode. 531 * @enode: The encoded maple node. 532 * 533 * Return: The slot in the parent node where @enode resides. 534 */ 535 static __always_inline 536 unsigned int mte_parent_slot(const struct maple_enode *enode) 537 { 538 unsigned long val = (unsigned long)mte_to_node(enode)->parent; 539 540 if (unlikely(val & MA_ROOT_PARENT)) 541 return 0; 542 543 /* 544 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost 545 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT 546 */ 547 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); 548 } 549 550 /* 551 * mte_parent() - Get the parent of @node. 552 * @enode: The encoded maple node. 553 * 554 * Return: The parent maple node. 555 */ 556 static __always_inline 557 struct maple_node *mte_parent(const struct maple_enode *enode) 558 { 559 return (void *)((unsigned long) 560 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); 561 } 562 563 /* 564 * ma_dead_node() - check if the @enode is dead. 565 * @enode: The encoded maple node 566 * 567 * Return: true if dead, false otherwise. 568 */ 569 static __always_inline bool ma_dead_node(const struct maple_node *node) 570 { 571 struct maple_node *parent; 572 573 /* Do not reorder reads from the node prior to the parent check */ 574 smp_rmb(); 575 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK); 576 return (parent == node); 577 } 578 579 /* 580 * mte_dead_node() - check if the @enode is dead. 581 * @enode: The encoded maple node 582 * 583 * Return: true if dead, false otherwise. 584 */ 585 static __always_inline bool mte_dead_node(const struct maple_enode *enode) 586 { 587 struct maple_node *parent, *node; 588 589 node = mte_to_node(enode); 590 /* Do not reorder reads from the node prior to the parent check */ 591 smp_rmb(); 592 parent = mte_parent(enode); 593 return (parent == node); 594 } 595 596 /* 597 * mas_allocated() - Get the number of nodes allocated in a maple state. 598 * @mas: The maple state 599 * 600 * The ma_state alloc member is overloaded to hold a pointer to the first 601 * allocated node or to the number of requested nodes to allocate. If bit 0 is 602 * set, then the alloc contains the number of requested nodes. If there is an 603 * allocated node, then the total allocated nodes is in that node. 604 * 605 * Return: The total number of nodes allocated 606 */ 607 static inline unsigned long mas_allocated(const struct ma_state *mas) 608 { 609 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 610 return 0; 611 612 return mas->alloc->total; 613 } 614 615 /* 616 * mas_set_alloc_req() - Set the requested number of allocations. 617 * @mas: the maple state 618 * @count: the number of allocations. 619 * 620 * The requested number of allocations is either in the first allocated node, 621 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 622 * no allocated node. Set the request either in the node or do the necessary 623 * encoding to store in @mas->alloc directly. 624 */ 625 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 626 { 627 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 628 if (!count) 629 mas->alloc = NULL; 630 else 631 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 632 return; 633 } 634 635 mas->alloc->request_count = count; 636 } 637 638 /* 639 * mas_alloc_req() - get the requested number of allocations. 640 * @mas: The maple state 641 * 642 * The alloc count is either stored directly in @mas, or in 643 * @mas->alloc->request_count if there is at least one node allocated. Decode 644 * the request count if it's stored directly in @mas->alloc. 645 * 646 * Return: The allocation request count. 647 */ 648 static inline unsigned int mas_alloc_req(const struct ma_state *mas) 649 { 650 if ((unsigned long)mas->alloc & 0x1) 651 return (unsigned long)(mas->alloc) >> 1; 652 else if (mas->alloc) 653 return mas->alloc->request_count; 654 return 0; 655 } 656 657 /* 658 * ma_pivots() - Get a pointer to the maple node pivots. 659 * @node: the maple node 660 * @type: the node type 661 * 662 * In the event of a dead node, this array may be %NULL 663 * 664 * Return: A pointer to the maple node pivots 665 */ 666 static inline unsigned long *ma_pivots(struct maple_node *node, 667 enum maple_type type) 668 { 669 switch (type) { 670 case maple_arange_64: 671 return node->ma64.pivot; 672 case maple_range_64: 673 case maple_leaf_64: 674 return node->mr64.pivot; 675 case maple_dense: 676 return NULL; 677 } 678 return NULL; 679 } 680 681 /* 682 * ma_gaps() - Get a pointer to the maple node gaps. 683 * @node: the maple node 684 * @type: the node type 685 * 686 * Return: A pointer to the maple node gaps 687 */ 688 static inline unsigned long *ma_gaps(struct maple_node *node, 689 enum maple_type type) 690 { 691 switch (type) { 692 case maple_arange_64: 693 return node->ma64.gap; 694 case maple_range_64: 695 case maple_leaf_64: 696 case maple_dense: 697 return NULL; 698 } 699 return NULL; 700 } 701 702 /* 703 * mas_safe_pivot() - get the pivot at @piv or mas->max. 704 * @mas: The maple state 705 * @pivots: The pointer to the maple node pivots 706 * @piv: The pivot to fetch 707 * @type: The maple node type 708 * 709 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max 710 * otherwise. 711 */ 712 static __always_inline unsigned long 713 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, 714 unsigned char piv, enum maple_type type) 715 { 716 if (piv >= mt_pivots[type]) 717 return mas->max; 718 719 return pivots[piv]; 720 } 721 722 /* 723 * mas_safe_min() - Return the minimum for a given offset. 724 * @mas: The maple state 725 * @pivots: The pointer to the maple node pivots 726 * @offset: The offset into the pivot array 727 * 728 * Return: The minimum range value that is contained in @offset. 729 */ 730 static inline unsigned long 731 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) 732 { 733 if (likely(offset)) 734 return pivots[offset - 1] + 1; 735 736 return mas->min; 737 } 738 739 /* 740 * mte_set_pivot() - Set a pivot to a value in an encoded maple node. 741 * @mn: The encoded maple node 742 * @piv: The pivot offset 743 * @val: The value of the pivot 744 */ 745 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, 746 unsigned long val) 747 { 748 struct maple_node *node = mte_to_node(mn); 749 enum maple_type type = mte_node_type(mn); 750 751 BUG_ON(piv >= mt_pivots[type]); 752 switch (type) { 753 case maple_range_64: 754 case maple_leaf_64: 755 node->mr64.pivot[piv] = val; 756 break; 757 case maple_arange_64: 758 node->ma64.pivot[piv] = val; 759 break; 760 case maple_dense: 761 break; 762 } 763 764 } 765 766 /* 767 * ma_slots() - Get a pointer to the maple node slots. 768 * @mn: The maple node 769 * @mt: The maple node type 770 * 771 * Return: A pointer to the maple node slots 772 */ 773 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) 774 { 775 switch (mt) { 776 case maple_arange_64: 777 return mn->ma64.slot; 778 case maple_range_64: 779 case maple_leaf_64: 780 return mn->mr64.slot; 781 case maple_dense: 782 return mn->slot; 783 } 784 785 return NULL; 786 } 787 788 static inline bool mt_write_locked(const struct maple_tree *mt) 789 { 790 return mt_external_lock(mt) ? mt_write_lock_is_held(mt) : 791 lockdep_is_held(&mt->ma_lock); 792 } 793 794 static __always_inline bool mt_locked(const struct maple_tree *mt) 795 { 796 return mt_external_lock(mt) ? mt_lock_is_held(mt) : 797 lockdep_is_held(&mt->ma_lock); 798 } 799 800 static __always_inline void *mt_slot(const struct maple_tree *mt, 801 void __rcu **slots, unsigned char offset) 802 { 803 return rcu_dereference_check(slots[offset], mt_locked(mt)); 804 } 805 806 static __always_inline void *mt_slot_locked(struct maple_tree *mt, 807 void __rcu **slots, unsigned char offset) 808 { 809 return rcu_dereference_protected(slots[offset], mt_write_locked(mt)); 810 } 811 /* 812 * mas_slot_locked() - Get the slot value when holding the maple tree lock. 813 * @mas: The maple state 814 * @slots: The pointer to the slots 815 * @offset: The offset into the slots array to fetch 816 * 817 * Return: The entry stored in @slots at the @offset. 818 */ 819 static __always_inline void *mas_slot_locked(struct ma_state *mas, 820 void __rcu **slots, unsigned char offset) 821 { 822 return mt_slot_locked(mas->tree, slots, offset); 823 } 824 825 /* 826 * mas_slot() - Get the slot value when not holding the maple tree lock. 827 * @mas: The maple state 828 * @slots: The pointer to the slots 829 * @offset: The offset into the slots array to fetch 830 * 831 * Return: The entry stored in @slots at the @offset 832 */ 833 static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots, 834 unsigned char offset) 835 { 836 return mt_slot(mas->tree, slots, offset); 837 } 838 839 /* 840 * mas_root() - Get the maple tree root. 841 * @mas: The maple state. 842 * 843 * Return: The pointer to the root of the tree 844 */ 845 static __always_inline void *mas_root(struct ma_state *mas) 846 { 847 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); 848 } 849 850 static inline void *mt_root_locked(struct maple_tree *mt) 851 { 852 return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt)); 853 } 854 855 /* 856 * mas_root_locked() - Get the maple tree root when holding the maple tree lock. 857 * @mas: The maple state. 858 * 859 * Return: The pointer to the root of the tree 860 */ 861 static inline void *mas_root_locked(struct ma_state *mas) 862 { 863 return mt_root_locked(mas->tree); 864 } 865 866 static inline struct maple_metadata *ma_meta(struct maple_node *mn, 867 enum maple_type mt) 868 { 869 switch (mt) { 870 case maple_arange_64: 871 return &mn->ma64.meta; 872 default: 873 return &mn->mr64.meta; 874 } 875 } 876 877 /* 878 * ma_set_meta() - Set the metadata information of a node. 879 * @mn: The maple node 880 * @mt: The maple node type 881 * @offset: The offset of the highest sub-gap in this node. 882 * @end: The end of the data in this node. 883 */ 884 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, 885 unsigned char offset, unsigned char end) 886 { 887 struct maple_metadata *meta = ma_meta(mn, mt); 888 889 meta->gap = offset; 890 meta->end = end; 891 } 892 893 /* 894 * mt_clear_meta() - clear the metadata information of a node, if it exists 895 * @mt: The maple tree 896 * @mn: The maple node 897 * @type: The maple node type 898 */ 899 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn, 900 enum maple_type type) 901 { 902 struct maple_metadata *meta; 903 unsigned long *pivots; 904 void __rcu **slots; 905 void *next; 906 907 switch (type) { 908 case maple_range_64: 909 pivots = mn->mr64.pivot; 910 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) { 911 slots = mn->mr64.slot; 912 next = mt_slot_locked(mt, slots, 913 MAPLE_RANGE64_SLOTS - 1); 914 if (unlikely((mte_to_node(next) && 915 mte_node_type(next)))) 916 return; /* no metadata, could be node */ 917 } 918 fallthrough; 919 case maple_arange_64: 920 meta = ma_meta(mn, type); 921 break; 922 default: 923 return; 924 } 925 926 meta->gap = 0; 927 meta->end = 0; 928 } 929 930 /* 931 * ma_meta_end() - Get the data end of a node from the metadata 932 * @mn: The maple node 933 * @mt: The maple node type 934 */ 935 static inline unsigned char ma_meta_end(struct maple_node *mn, 936 enum maple_type mt) 937 { 938 struct maple_metadata *meta = ma_meta(mn, mt); 939 940 return meta->end; 941 } 942 943 /* 944 * ma_meta_gap() - Get the largest gap location of a node from the metadata 945 * @mn: The maple node 946 */ 947 static inline unsigned char ma_meta_gap(struct maple_node *mn) 948 { 949 return mn->ma64.meta.gap; 950 } 951 952 /* 953 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata 954 * @mn: The maple node 955 * @mt: The maple node type 956 * @offset: The location of the largest gap. 957 */ 958 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, 959 unsigned char offset) 960 { 961 962 struct maple_metadata *meta = ma_meta(mn, mt); 963 964 meta->gap = offset; 965 } 966 967 /* 968 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. 969 * @mat: the ma_topiary, a linked list of dead nodes. 970 * @dead_enode: the node to be marked as dead and added to the tail of the list 971 * 972 * Add the @dead_enode to the linked list in @mat. 973 */ 974 static inline void mat_add(struct ma_topiary *mat, 975 struct maple_enode *dead_enode) 976 { 977 mte_set_node_dead(dead_enode); 978 mte_to_mat(dead_enode)->next = NULL; 979 if (!mat->tail) { 980 mat->tail = mat->head = dead_enode; 981 return; 982 } 983 984 mte_to_mat(mat->tail)->next = dead_enode; 985 mat->tail = dead_enode; 986 } 987 988 static void mt_free_walk(struct rcu_head *head); 989 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, 990 bool free); 991 /* 992 * mas_mat_destroy() - Free all nodes and subtrees in a dead list. 993 * @mas: the maple state 994 * @mat: the ma_topiary linked list of dead nodes to free. 995 * 996 * Destroy walk a dead list. 997 */ 998 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) 999 { 1000 struct maple_enode *next; 1001 struct maple_node *node; 1002 bool in_rcu = mt_in_rcu(mas->tree); 1003 1004 while (mat->head) { 1005 next = mte_to_mat(mat->head)->next; 1006 node = mte_to_node(mat->head); 1007 mt_destroy_walk(mat->head, mas->tree, !in_rcu); 1008 if (in_rcu) 1009 call_rcu(&node->rcu, mt_free_walk); 1010 mat->head = next; 1011 } 1012 } 1013 /* 1014 * mas_descend() - Descend into the slot stored in the ma_state. 1015 * @mas: the maple state. 1016 * 1017 * Note: Not RCU safe, only use in write side or debug code. 1018 */ 1019 static inline void mas_descend(struct ma_state *mas) 1020 { 1021 enum maple_type type; 1022 unsigned long *pivots; 1023 struct maple_node *node; 1024 void __rcu **slots; 1025 1026 node = mas_mn(mas); 1027 type = mte_node_type(mas->node); 1028 pivots = ma_pivots(node, type); 1029 slots = ma_slots(node, type); 1030 1031 if (mas->offset) 1032 mas->min = pivots[mas->offset - 1] + 1; 1033 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); 1034 mas->node = mas_slot(mas, slots, mas->offset); 1035 } 1036 1037 /* 1038 * mte_set_gap() - Set a maple node gap. 1039 * @mn: The encoded maple node 1040 * @gap: The offset of the gap to set 1041 * @val: The gap value 1042 */ 1043 static inline void mte_set_gap(const struct maple_enode *mn, 1044 unsigned char gap, unsigned long val) 1045 { 1046 switch (mte_node_type(mn)) { 1047 default: 1048 break; 1049 case maple_arange_64: 1050 mte_to_node(mn)->ma64.gap[gap] = val; 1051 break; 1052 } 1053 } 1054 1055 /* 1056 * mas_ascend() - Walk up a level of the tree. 1057 * @mas: The maple state 1058 * 1059 * Sets the @mas->max and @mas->min to the correct values when walking up. This 1060 * may cause several levels of walking up to find the correct min and max. 1061 * May find a dead node which will cause a premature return. 1062 * Return: 1 on dead node, 0 otherwise 1063 */ 1064 static int mas_ascend(struct ma_state *mas) 1065 { 1066 struct maple_enode *p_enode; /* parent enode. */ 1067 struct maple_enode *a_enode; /* ancestor enode. */ 1068 struct maple_node *a_node; /* ancestor node. */ 1069 struct maple_node *p_node; /* parent node. */ 1070 unsigned char a_slot; 1071 enum maple_type a_type; 1072 unsigned long min, max; 1073 unsigned long *pivots; 1074 bool set_max = false, set_min = false; 1075 1076 a_node = mas_mn(mas); 1077 if (ma_is_root(a_node)) { 1078 mas->offset = 0; 1079 return 0; 1080 } 1081 1082 p_node = mte_parent(mas->node); 1083 if (unlikely(a_node == p_node)) 1084 return 1; 1085 1086 a_type = mas_parent_type(mas, mas->node); 1087 mas->offset = mte_parent_slot(mas->node); 1088 a_enode = mt_mk_node(p_node, a_type); 1089 1090 /* Check to make sure all parent information is still accurate */ 1091 if (p_node != mte_parent(mas->node)) 1092 return 1; 1093 1094 mas->node = a_enode; 1095 1096 if (mte_is_root(a_enode)) { 1097 mas->max = ULONG_MAX; 1098 mas->min = 0; 1099 return 0; 1100 } 1101 1102 min = 0; 1103 max = ULONG_MAX; 1104 if (!mas->offset) { 1105 min = mas->min; 1106 set_min = true; 1107 } 1108 1109 if (mas->max == ULONG_MAX) 1110 set_max = true; 1111 1112 do { 1113 p_enode = a_enode; 1114 a_type = mas_parent_type(mas, p_enode); 1115 a_node = mte_parent(p_enode); 1116 a_slot = mte_parent_slot(p_enode); 1117 a_enode = mt_mk_node(a_node, a_type); 1118 pivots = ma_pivots(a_node, a_type); 1119 1120 if (unlikely(ma_dead_node(a_node))) 1121 return 1; 1122 1123 if (!set_min && a_slot) { 1124 set_min = true; 1125 min = pivots[a_slot - 1] + 1; 1126 } 1127 1128 if (!set_max && a_slot < mt_pivots[a_type]) { 1129 set_max = true; 1130 max = pivots[a_slot]; 1131 } 1132 1133 if (unlikely(ma_dead_node(a_node))) 1134 return 1; 1135 1136 if (unlikely(ma_is_root(a_node))) 1137 break; 1138 1139 } while (!set_min || !set_max); 1140 1141 mas->max = max; 1142 mas->min = min; 1143 return 0; 1144 } 1145 1146 /* 1147 * mas_pop_node() - Get a previously allocated maple node from the maple state. 1148 * @mas: The maple state 1149 * 1150 * Return: A pointer to a maple node. 1151 */ 1152 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1153 { 1154 struct maple_alloc *ret, *node = mas->alloc; 1155 unsigned long total = mas_allocated(mas); 1156 unsigned int req = mas_alloc_req(mas); 1157 1158 /* nothing or a request pending. */ 1159 if (WARN_ON(!total)) 1160 return NULL; 1161 1162 if (total == 1) { 1163 /* single allocation in this ma_state */ 1164 mas->alloc = NULL; 1165 ret = node; 1166 goto single_node; 1167 } 1168 1169 if (node->node_count == 1) { 1170 /* Single allocation in this node. */ 1171 mas->alloc = node->slot[0]; 1172 mas->alloc->total = node->total - 1; 1173 ret = node; 1174 goto new_head; 1175 } 1176 node->total--; 1177 ret = node->slot[--node->node_count]; 1178 node->slot[node->node_count] = NULL; 1179 1180 single_node: 1181 new_head: 1182 if (req) { 1183 req++; 1184 mas_set_alloc_req(mas, req); 1185 } 1186 1187 memset(ret, 0, sizeof(*ret)); 1188 return (struct maple_node *)ret; 1189 } 1190 1191 /* 1192 * mas_push_node() - Push a node back on the maple state allocation. 1193 * @mas: The maple state 1194 * @used: The used maple node 1195 * 1196 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1197 * requested node count as necessary. 1198 */ 1199 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1200 { 1201 struct maple_alloc *reuse = (struct maple_alloc *)used; 1202 struct maple_alloc *head = mas->alloc; 1203 unsigned long count; 1204 unsigned int requested = mas_alloc_req(mas); 1205 1206 count = mas_allocated(mas); 1207 1208 reuse->request_count = 0; 1209 reuse->node_count = 0; 1210 if (count) { 1211 if (head->node_count < MAPLE_ALLOC_SLOTS) { 1212 head->slot[head->node_count++] = reuse; 1213 head->total++; 1214 goto done; 1215 } 1216 reuse->slot[0] = head; 1217 reuse->node_count = 1; 1218 } 1219 1220 reuse->total = count + 1; 1221 mas->alloc = reuse; 1222 done: 1223 if (requested > 1) 1224 mas_set_alloc_req(mas, requested - 1); 1225 } 1226 1227 /* 1228 * mas_alloc_nodes() - Allocate nodes into a maple state 1229 * @mas: The maple state 1230 * @gfp: The GFP Flags 1231 */ 1232 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1233 { 1234 struct maple_alloc *node; 1235 unsigned long allocated = mas_allocated(mas); 1236 unsigned int requested = mas_alloc_req(mas); 1237 unsigned int count; 1238 void **slots = NULL; 1239 unsigned int max_req = 0; 1240 1241 if (!requested) 1242 return; 1243 1244 mas_set_alloc_req(mas, 0); 1245 if (mas->mas_flags & MA_STATE_PREALLOC) { 1246 if (allocated) 1247 return; 1248 BUG_ON(!allocated); 1249 WARN_ON(!allocated); 1250 } 1251 1252 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { 1253 node = (struct maple_alloc *)mt_alloc_one(gfp); 1254 if (!node) 1255 goto nomem_one; 1256 1257 if (allocated) { 1258 node->slot[0] = mas->alloc; 1259 node->node_count = 1; 1260 } else { 1261 node->node_count = 0; 1262 } 1263 1264 mas->alloc = node; 1265 node->total = ++allocated; 1266 node->request_count = 0; 1267 requested--; 1268 } 1269 1270 node = mas->alloc; 1271 while (requested) { 1272 max_req = MAPLE_ALLOC_SLOTS - node->node_count; 1273 slots = (void **)&node->slot[node->node_count]; 1274 max_req = min(requested, max_req); 1275 count = mt_alloc_bulk(gfp, max_req, slots); 1276 if (!count) 1277 goto nomem_bulk; 1278 1279 if (node->node_count == 0) { 1280 node->slot[0]->node_count = 0; 1281 node->slot[0]->request_count = 0; 1282 } 1283 1284 node->node_count += count; 1285 allocated += count; 1286 /* find a non-full node*/ 1287 do { 1288 node = node->slot[0]; 1289 } while (unlikely(node->node_count == MAPLE_ALLOC_SLOTS)); 1290 requested -= count; 1291 } 1292 mas->alloc->total = allocated; 1293 return; 1294 1295 nomem_bulk: 1296 /* Clean up potential freed allocations on bulk failure */ 1297 memset(slots, 0, max_req * sizeof(unsigned long)); 1298 mas->alloc->total = allocated; 1299 nomem_one: 1300 mas_set_alloc_req(mas, requested); 1301 mas_set_err(mas, -ENOMEM); 1302 } 1303 1304 /* 1305 * mas_free() - Free an encoded maple node 1306 * @mas: The maple state 1307 * @used: The encoded maple node to free. 1308 * 1309 * Uses rcu free if necessary, pushes @used back on the maple state allocations 1310 * otherwise. 1311 */ 1312 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1313 { 1314 struct maple_node *tmp = mte_to_node(used); 1315 1316 if (mt_in_rcu(mas->tree)) 1317 ma_free_rcu(tmp); 1318 else 1319 mas_push_node(mas, tmp); 1320 } 1321 1322 /* 1323 * mas_node_count_gfp() - Check if enough nodes are allocated and request more 1324 * if there is not enough nodes. 1325 * @mas: The maple state 1326 * @count: The number of nodes needed 1327 * @gfp: the gfp flags 1328 */ 1329 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1330 { 1331 unsigned long allocated = mas_allocated(mas); 1332 1333 if (allocated < count) { 1334 mas_set_alloc_req(mas, count - allocated); 1335 mas_alloc_nodes(mas, gfp); 1336 } 1337 } 1338 1339 /* 1340 * mas_node_count() - Check if enough nodes are allocated and request more if 1341 * there is not enough nodes. 1342 * @mas: The maple state 1343 * @count: The number of nodes needed 1344 * 1345 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. 1346 */ 1347 static void mas_node_count(struct ma_state *mas, int count) 1348 { 1349 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); 1350 } 1351 1352 /* 1353 * mas_start() - Sets up maple state for operations. 1354 * @mas: The maple state. 1355 * 1356 * If mas->status == mas_start, then set the min, max and depth to 1357 * defaults. 1358 * 1359 * Return: 1360 * - If mas->node is an error or not mas_start, return NULL. 1361 * - If it's an empty tree: NULL & mas->status == ma_none 1362 * - If it's a single entry: The entry & mas->status == ma_root 1363 * - If it's a tree: NULL & mas->status == ma_active 1364 */ 1365 static inline struct maple_enode *mas_start(struct ma_state *mas) 1366 { 1367 if (likely(mas_is_start(mas))) { 1368 struct maple_enode *root; 1369 1370 mas->min = 0; 1371 mas->max = ULONG_MAX; 1372 1373 retry: 1374 mas->depth = 0; 1375 root = mas_root(mas); 1376 /* Tree with nodes */ 1377 if (likely(xa_is_node(root))) { 1378 mas->depth = 1; 1379 mas->status = ma_active; 1380 mas->node = mte_safe_root(root); 1381 mas->offset = 0; 1382 if (mte_dead_node(mas->node)) 1383 goto retry; 1384 1385 return NULL; 1386 } 1387 1388 mas->node = NULL; 1389 /* empty tree */ 1390 if (unlikely(!root)) { 1391 mas->status = ma_none; 1392 mas->offset = MAPLE_NODE_SLOTS; 1393 return NULL; 1394 } 1395 1396 /* Single entry tree */ 1397 mas->status = ma_root; 1398 mas->offset = MAPLE_NODE_SLOTS; 1399 1400 /* Single entry tree. */ 1401 if (mas->index > 0) 1402 return NULL; 1403 1404 return root; 1405 } 1406 1407 return NULL; 1408 } 1409 1410 /* 1411 * ma_data_end() - Find the end of the data in a node. 1412 * @node: The maple node 1413 * @type: The maple node type 1414 * @pivots: The array of pivots in the node 1415 * @max: The maximum value in the node 1416 * 1417 * Uses metadata to find the end of the data when possible. 1418 * Return: The zero indexed last slot with data (may be null). 1419 */ 1420 static __always_inline unsigned char ma_data_end(struct maple_node *node, 1421 enum maple_type type, unsigned long *pivots, unsigned long max) 1422 { 1423 unsigned char offset; 1424 1425 if (!pivots) 1426 return 0; 1427 1428 if (type == maple_arange_64) 1429 return ma_meta_end(node, type); 1430 1431 offset = mt_pivots[type] - 1; 1432 if (likely(!pivots[offset])) 1433 return ma_meta_end(node, type); 1434 1435 if (likely(pivots[offset] == max)) 1436 return offset; 1437 1438 return mt_pivots[type]; 1439 } 1440 1441 /* 1442 * mas_data_end() - Find the end of the data (slot). 1443 * @mas: the maple state 1444 * 1445 * This method is optimized to check the metadata of a node if the node type 1446 * supports data end metadata. 1447 * 1448 * Return: The zero indexed last slot with data (may be null). 1449 */ 1450 static inline unsigned char mas_data_end(struct ma_state *mas) 1451 { 1452 enum maple_type type; 1453 struct maple_node *node; 1454 unsigned char offset; 1455 unsigned long *pivots; 1456 1457 type = mte_node_type(mas->node); 1458 node = mas_mn(mas); 1459 if (type == maple_arange_64) 1460 return ma_meta_end(node, type); 1461 1462 pivots = ma_pivots(node, type); 1463 if (unlikely(ma_dead_node(node))) 1464 return 0; 1465 1466 offset = mt_pivots[type] - 1; 1467 if (likely(!pivots[offset])) 1468 return ma_meta_end(node, type); 1469 1470 if (likely(pivots[offset] == mas->max)) 1471 return offset; 1472 1473 return mt_pivots[type]; 1474 } 1475 1476 /* 1477 * mas_leaf_max_gap() - Returns the largest gap in a leaf node 1478 * @mas: the maple state 1479 * 1480 * Return: The maximum gap in the leaf. 1481 */ 1482 static unsigned long mas_leaf_max_gap(struct ma_state *mas) 1483 { 1484 enum maple_type mt; 1485 unsigned long pstart, gap, max_gap; 1486 struct maple_node *mn; 1487 unsigned long *pivots; 1488 void __rcu **slots; 1489 unsigned char i; 1490 unsigned char max_piv; 1491 1492 mt = mte_node_type(mas->node); 1493 mn = mas_mn(mas); 1494 slots = ma_slots(mn, mt); 1495 max_gap = 0; 1496 if (unlikely(ma_is_dense(mt))) { 1497 gap = 0; 1498 for (i = 0; i < mt_slots[mt]; i++) { 1499 if (slots[i]) { 1500 if (gap > max_gap) 1501 max_gap = gap; 1502 gap = 0; 1503 } else { 1504 gap++; 1505 } 1506 } 1507 if (gap > max_gap) 1508 max_gap = gap; 1509 return max_gap; 1510 } 1511 1512 /* 1513 * Check the first implied pivot optimizes the loop below and slot 1 may 1514 * be skipped if there is a gap in slot 0. 1515 */ 1516 pivots = ma_pivots(mn, mt); 1517 if (likely(!slots[0])) { 1518 max_gap = pivots[0] - mas->min + 1; 1519 i = 2; 1520 } else { 1521 i = 1; 1522 } 1523 1524 /* reduce max_piv as the special case is checked before the loop */ 1525 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; 1526 /* 1527 * Check end implied pivot which can only be a gap on the right most 1528 * node. 1529 */ 1530 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { 1531 gap = ULONG_MAX - pivots[max_piv]; 1532 if (gap > max_gap) 1533 max_gap = gap; 1534 1535 if (max_gap > pivots[max_piv] - mas->min) 1536 return max_gap; 1537 } 1538 1539 for (; i <= max_piv; i++) { 1540 /* data == no gap. */ 1541 if (likely(slots[i])) 1542 continue; 1543 1544 pstart = pivots[i - 1]; 1545 gap = pivots[i] - pstart; 1546 if (gap > max_gap) 1547 max_gap = gap; 1548 1549 /* There cannot be two gaps in a row. */ 1550 i++; 1551 } 1552 return max_gap; 1553 } 1554 1555 /* 1556 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) 1557 * @node: The maple node 1558 * @gaps: The pointer to the gaps 1559 * @mt: The maple node type 1560 * @off: Pointer to store the offset location of the gap. 1561 * 1562 * Uses the metadata data end to scan backwards across set gaps. 1563 * 1564 * Return: The maximum gap value 1565 */ 1566 static inline unsigned long 1567 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, 1568 unsigned char *off) 1569 { 1570 unsigned char offset, i; 1571 unsigned long max_gap = 0; 1572 1573 i = offset = ma_meta_end(node, mt); 1574 do { 1575 if (gaps[i] > max_gap) { 1576 max_gap = gaps[i]; 1577 offset = i; 1578 } 1579 } while (i--); 1580 1581 *off = offset; 1582 return max_gap; 1583 } 1584 1585 /* 1586 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. 1587 * @mas: The maple state. 1588 * 1589 * Return: The gap value. 1590 */ 1591 static inline unsigned long mas_max_gap(struct ma_state *mas) 1592 { 1593 unsigned long *gaps; 1594 unsigned char offset; 1595 enum maple_type mt; 1596 struct maple_node *node; 1597 1598 mt = mte_node_type(mas->node); 1599 if (ma_is_leaf(mt)) 1600 return mas_leaf_max_gap(mas); 1601 1602 node = mas_mn(mas); 1603 MAS_BUG_ON(mas, mt != maple_arange_64); 1604 offset = ma_meta_gap(node); 1605 gaps = ma_gaps(node, mt); 1606 return gaps[offset]; 1607 } 1608 1609 /* 1610 * mas_parent_gap() - Set the parent gap and any gaps above, as needed 1611 * @mas: The maple state 1612 * @offset: The gap offset in the parent to set 1613 * @new: The new gap value. 1614 * 1615 * Set the parent gap then continue to set the gap upwards, using the metadata 1616 * of the parent to see if it is necessary to check the node above. 1617 */ 1618 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, 1619 unsigned long new) 1620 { 1621 unsigned long meta_gap = 0; 1622 struct maple_node *pnode; 1623 struct maple_enode *penode; 1624 unsigned long *pgaps; 1625 unsigned char meta_offset; 1626 enum maple_type pmt; 1627 1628 pnode = mte_parent(mas->node); 1629 pmt = mas_parent_type(mas, mas->node); 1630 penode = mt_mk_node(pnode, pmt); 1631 pgaps = ma_gaps(pnode, pmt); 1632 1633 ascend: 1634 MAS_BUG_ON(mas, pmt != maple_arange_64); 1635 meta_offset = ma_meta_gap(pnode); 1636 meta_gap = pgaps[meta_offset]; 1637 1638 pgaps[offset] = new; 1639 1640 if (meta_gap == new) 1641 return; 1642 1643 if (offset != meta_offset) { 1644 if (meta_gap > new) 1645 return; 1646 1647 ma_set_meta_gap(pnode, pmt, offset); 1648 } else if (new < meta_gap) { 1649 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); 1650 ma_set_meta_gap(pnode, pmt, meta_offset); 1651 } 1652 1653 if (ma_is_root(pnode)) 1654 return; 1655 1656 /* Go to the parent node. */ 1657 pnode = mte_parent(penode); 1658 pmt = mas_parent_type(mas, penode); 1659 pgaps = ma_gaps(pnode, pmt); 1660 offset = mte_parent_slot(penode); 1661 penode = mt_mk_node(pnode, pmt); 1662 goto ascend; 1663 } 1664 1665 /* 1666 * mas_update_gap() - Update a nodes gaps and propagate up if necessary. 1667 * @mas: the maple state. 1668 */ 1669 static inline void mas_update_gap(struct ma_state *mas) 1670 { 1671 unsigned char pslot; 1672 unsigned long p_gap; 1673 unsigned long max_gap; 1674 1675 if (!mt_is_alloc(mas->tree)) 1676 return; 1677 1678 if (mte_is_root(mas->node)) 1679 return; 1680 1681 max_gap = mas_max_gap(mas); 1682 1683 pslot = mte_parent_slot(mas->node); 1684 p_gap = ma_gaps(mte_parent(mas->node), 1685 mas_parent_type(mas, mas->node))[pslot]; 1686 1687 if (p_gap != max_gap) 1688 mas_parent_gap(mas, pslot, max_gap); 1689 } 1690 1691 /* 1692 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to 1693 * @parent with the slot encoded. 1694 * @mas: the maple state (for the tree) 1695 * @parent: the maple encoded node containing the children. 1696 */ 1697 static inline void mas_adopt_children(struct ma_state *mas, 1698 struct maple_enode *parent) 1699 { 1700 enum maple_type type = mte_node_type(parent); 1701 struct maple_node *node = mte_to_node(parent); 1702 void __rcu **slots = ma_slots(node, type); 1703 unsigned long *pivots = ma_pivots(node, type); 1704 struct maple_enode *child; 1705 unsigned char offset; 1706 1707 offset = ma_data_end(node, type, pivots, mas->max); 1708 do { 1709 child = mas_slot_locked(mas, slots, offset); 1710 mas_set_parent(mas, child, parent, offset); 1711 } while (offset--); 1712 } 1713 1714 /* 1715 * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old 1716 * node as dead. 1717 * @mas: the maple state with the new node 1718 * @old_enode: The old maple encoded node to replace. 1719 */ 1720 static inline void mas_put_in_tree(struct ma_state *mas, 1721 struct maple_enode *old_enode) 1722 __must_hold(mas->tree->ma_lock) 1723 { 1724 unsigned char offset; 1725 void __rcu **slots; 1726 1727 if (mte_is_root(mas->node)) { 1728 mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas)); 1729 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 1730 mas_set_height(mas); 1731 } else { 1732 1733 offset = mte_parent_slot(mas->node); 1734 slots = ma_slots(mte_parent(mas->node), 1735 mas_parent_type(mas, mas->node)); 1736 rcu_assign_pointer(slots[offset], mas->node); 1737 } 1738 1739 mte_set_node_dead(old_enode); 1740 } 1741 1742 /* 1743 * mas_replace_node() - Replace a node by putting it in the tree, marking it 1744 * dead, and freeing it. 1745 * the parent encoding to locate the maple node in the tree. 1746 * @mas: the ma_state with @mas->node pointing to the new node. 1747 * @old_enode: The old maple encoded node. 1748 */ 1749 static inline void mas_replace_node(struct ma_state *mas, 1750 struct maple_enode *old_enode) 1751 __must_hold(mas->tree->ma_lock) 1752 { 1753 mas_put_in_tree(mas, old_enode); 1754 mas_free(mas, old_enode); 1755 } 1756 1757 /* 1758 * mas_find_child() - Find a child who has the parent @mas->node. 1759 * @mas: the maple state with the parent. 1760 * @child: the maple state to store the child. 1761 */ 1762 static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child) 1763 __must_hold(mas->tree->ma_lock) 1764 { 1765 enum maple_type mt; 1766 unsigned char offset; 1767 unsigned char end; 1768 unsigned long *pivots; 1769 struct maple_enode *entry; 1770 struct maple_node *node; 1771 void __rcu **slots; 1772 1773 mt = mte_node_type(mas->node); 1774 node = mas_mn(mas); 1775 slots = ma_slots(node, mt); 1776 pivots = ma_pivots(node, mt); 1777 end = ma_data_end(node, mt, pivots, mas->max); 1778 for (offset = mas->offset; offset <= end; offset++) { 1779 entry = mas_slot_locked(mas, slots, offset); 1780 if (mte_parent(entry) == node) { 1781 *child = *mas; 1782 mas->offset = offset + 1; 1783 child->offset = offset; 1784 mas_descend(child); 1785 child->offset = 0; 1786 return true; 1787 } 1788 } 1789 return false; 1790 } 1791 1792 /* 1793 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the 1794 * old data or set b_node->b_end. 1795 * @b_node: the maple_big_node 1796 * @shift: the shift count 1797 */ 1798 static inline void mab_shift_right(struct maple_big_node *b_node, 1799 unsigned char shift) 1800 { 1801 unsigned long size = b_node->b_end * sizeof(unsigned long); 1802 1803 memmove(b_node->pivot + shift, b_node->pivot, size); 1804 memmove(b_node->slot + shift, b_node->slot, size); 1805 if (b_node->type == maple_arange_64) 1806 memmove(b_node->gap + shift, b_node->gap, size); 1807 } 1808 1809 /* 1810 * mab_middle_node() - Check if a middle node is needed (unlikely) 1811 * @b_node: the maple_big_node that contains the data. 1812 * @split: the potential split location 1813 * @slot_count: the size that can be stored in a single node being considered. 1814 * 1815 * Return: true if a middle node is required. 1816 */ 1817 static inline bool mab_middle_node(struct maple_big_node *b_node, int split, 1818 unsigned char slot_count) 1819 { 1820 unsigned char size = b_node->b_end; 1821 1822 if (size >= 2 * slot_count) 1823 return true; 1824 1825 if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) 1826 return true; 1827 1828 return false; 1829 } 1830 1831 /* 1832 * mab_no_null_split() - ensure the split doesn't fall on a NULL 1833 * @b_node: the maple_big_node with the data 1834 * @split: the suggested split location 1835 * @slot_count: the number of slots in the node being considered. 1836 * 1837 * Return: the split location. 1838 */ 1839 static inline int mab_no_null_split(struct maple_big_node *b_node, 1840 unsigned char split, unsigned char slot_count) 1841 { 1842 if (!b_node->slot[split]) { 1843 /* 1844 * If the split is less than the max slot && the right side will 1845 * still be sufficient, then increment the split on NULL. 1846 */ 1847 if ((split < slot_count - 1) && 1848 (b_node->b_end - split) > (mt_min_slots[b_node->type])) 1849 split++; 1850 else 1851 split--; 1852 } 1853 return split; 1854 } 1855 1856 /* 1857 * mab_calc_split() - Calculate the split location and if there needs to be two 1858 * splits. 1859 * @mas: The maple state 1860 * @bn: The maple_big_node with the data 1861 * @mid_split: The second split, if required. 0 otherwise. 1862 * 1863 * Return: The first split location. The middle split is set in @mid_split. 1864 */ 1865 static inline int mab_calc_split(struct ma_state *mas, 1866 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) 1867 { 1868 unsigned char b_end = bn->b_end; 1869 int split = b_end / 2; /* Assume equal split. */ 1870 unsigned char slot_min, slot_count = mt_slots[bn->type]; 1871 1872 /* 1873 * To support gap tracking, all NULL entries are kept together and a node cannot 1874 * end on a NULL entry, with the exception of the left-most leaf. The 1875 * limitation means that the split of a node must be checked for this condition 1876 * and be able to put more data in one direction or the other. 1877 */ 1878 if (unlikely((mas->mas_flags & MA_STATE_BULK))) { 1879 *mid_split = 0; 1880 split = b_end - mt_min_slots[bn->type]; 1881 1882 if (!ma_is_leaf(bn->type)) 1883 return split; 1884 1885 mas->mas_flags |= MA_STATE_REBALANCE; 1886 if (!bn->slot[split]) 1887 split--; 1888 return split; 1889 } 1890 1891 /* 1892 * Although extremely rare, it is possible to enter what is known as the 3-way 1893 * split scenario. The 3-way split comes about by means of a store of a range 1894 * that overwrites the end and beginning of two full nodes. The result is a set 1895 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can 1896 * also be located in different parent nodes which are also full. This can 1897 * carry upwards all the way to the root in the worst case. 1898 */ 1899 if (unlikely(mab_middle_node(bn, split, slot_count))) { 1900 split = b_end / 3; 1901 *mid_split = split * 2; 1902 } else { 1903 slot_min = mt_min_slots[bn->type]; 1904 1905 *mid_split = 0; 1906 /* 1907 * Avoid having a range less than the slot count unless it 1908 * causes one node to be deficient. 1909 * NOTE: mt_min_slots is 1 based, b_end and split are zero. 1910 */ 1911 while ((split < slot_count - 1) && 1912 ((bn->pivot[split] - min) < slot_count - 1) && 1913 (b_end - split > slot_min)) 1914 split++; 1915 } 1916 1917 /* Avoid ending a node on a NULL entry */ 1918 split = mab_no_null_split(bn, split, slot_count); 1919 1920 if (unlikely(*mid_split)) 1921 *mid_split = mab_no_null_split(bn, *mid_split, slot_count); 1922 1923 return split; 1924 } 1925 1926 /* 1927 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node 1928 * and set @b_node->b_end to the next free slot. 1929 * @mas: The maple state 1930 * @mas_start: The starting slot to copy 1931 * @mas_end: The end slot to copy (inclusively) 1932 * @b_node: The maple_big_node to place the data 1933 * @mab_start: The starting location in maple_big_node to store the data. 1934 */ 1935 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, 1936 unsigned char mas_end, struct maple_big_node *b_node, 1937 unsigned char mab_start) 1938 { 1939 enum maple_type mt; 1940 struct maple_node *node; 1941 void __rcu **slots; 1942 unsigned long *pivots, *gaps; 1943 int i = mas_start, j = mab_start; 1944 unsigned char piv_end; 1945 1946 node = mas_mn(mas); 1947 mt = mte_node_type(mas->node); 1948 pivots = ma_pivots(node, mt); 1949 if (!i) { 1950 b_node->pivot[j] = pivots[i++]; 1951 if (unlikely(i > mas_end)) 1952 goto complete; 1953 j++; 1954 } 1955 1956 piv_end = min(mas_end, mt_pivots[mt]); 1957 for (; i < piv_end; i++, j++) { 1958 b_node->pivot[j] = pivots[i]; 1959 if (unlikely(!b_node->pivot[j])) 1960 goto complete; 1961 1962 if (unlikely(mas->max == b_node->pivot[j])) 1963 goto complete; 1964 } 1965 1966 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); 1967 1968 complete: 1969 b_node->b_end = ++j; 1970 j -= mab_start; 1971 slots = ma_slots(node, mt); 1972 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); 1973 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { 1974 gaps = ma_gaps(node, mt); 1975 memcpy(b_node->gap + mab_start, gaps + mas_start, 1976 sizeof(unsigned long) * j); 1977 } 1978 } 1979 1980 /* 1981 * mas_leaf_set_meta() - Set the metadata of a leaf if possible. 1982 * @node: The maple node 1983 * @mt: The maple type 1984 * @end: The node end 1985 */ 1986 static inline void mas_leaf_set_meta(struct maple_node *node, 1987 enum maple_type mt, unsigned char end) 1988 { 1989 if (end < mt_slots[mt] - 1) 1990 ma_set_meta(node, mt, 0, end); 1991 } 1992 1993 /* 1994 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. 1995 * @b_node: the maple_big_node that has the data 1996 * @mab_start: the start location in @b_node. 1997 * @mab_end: The end location in @b_node (inclusively) 1998 * @mas: The maple state with the maple encoded node. 1999 */ 2000 static inline void mab_mas_cp(struct maple_big_node *b_node, 2001 unsigned char mab_start, unsigned char mab_end, 2002 struct ma_state *mas, bool new_max) 2003 { 2004 int i, j = 0; 2005 enum maple_type mt = mte_node_type(mas->node); 2006 struct maple_node *node = mte_to_node(mas->node); 2007 void __rcu **slots = ma_slots(node, mt); 2008 unsigned long *pivots = ma_pivots(node, mt); 2009 unsigned long *gaps = NULL; 2010 unsigned char end; 2011 2012 if (mab_end - mab_start > mt_pivots[mt]) 2013 mab_end--; 2014 2015 if (!pivots[mt_pivots[mt] - 1]) 2016 slots[mt_pivots[mt]] = NULL; 2017 2018 i = mab_start; 2019 do { 2020 pivots[j++] = b_node->pivot[i++]; 2021 } while (i <= mab_end && likely(b_node->pivot[i])); 2022 2023 memcpy(slots, b_node->slot + mab_start, 2024 sizeof(void *) * (i - mab_start)); 2025 2026 if (new_max) 2027 mas->max = b_node->pivot[i - 1]; 2028 2029 end = j - 1; 2030 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { 2031 unsigned long max_gap = 0; 2032 unsigned char offset = 0; 2033 2034 gaps = ma_gaps(node, mt); 2035 do { 2036 gaps[--j] = b_node->gap[--i]; 2037 if (gaps[j] > max_gap) { 2038 offset = j; 2039 max_gap = gaps[j]; 2040 } 2041 } while (j); 2042 2043 ma_set_meta(node, mt, offset, end); 2044 } else { 2045 mas_leaf_set_meta(node, mt, end); 2046 } 2047 } 2048 2049 /* 2050 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. 2051 * @mas: The maple state 2052 * @end: The maple node end 2053 * @mt: The maple node type 2054 */ 2055 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, 2056 enum maple_type mt) 2057 { 2058 if (!(mas->mas_flags & MA_STATE_BULK)) 2059 return; 2060 2061 if (mte_is_root(mas->node)) 2062 return; 2063 2064 if (end > mt_min_slots[mt]) { 2065 mas->mas_flags &= ~MA_STATE_REBALANCE; 2066 return; 2067 } 2068 } 2069 2070 /* 2071 * mas_store_b_node() - Store an @entry into the b_node while also copying the 2072 * data from a maple encoded node. 2073 * @wr_mas: the maple write state 2074 * @b_node: the maple_big_node to fill with data 2075 * @offset_end: the offset to end copying 2076 * 2077 * Return: The actual end of the data stored in @b_node 2078 */ 2079 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, 2080 struct maple_big_node *b_node, unsigned char offset_end) 2081 { 2082 unsigned char slot; 2083 unsigned char b_end; 2084 /* Possible underflow of piv will wrap back to 0 before use. */ 2085 unsigned long piv; 2086 struct ma_state *mas = wr_mas->mas; 2087 2088 b_node->type = wr_mas->type; 2089 b_end = 0; 2090 slot = mas->offset; 2091 if (slot) { 2092 /* Copy start data up to insert. */ 2093 mas_mab_cp(mas, 0, slot - 1, b_node, 0); 2094 b_end = b_node->b_end; 2095 piv = b_node->pivot[b_end - 1]; 2096 } else 2097 piv = mas->min - 1; 2098 2099 if (piv + 1 < mas->index) { 2100 /* Handle range starting after old range */ 2101 b_node->slot[b_end] = wr_mas->content; 2102 if (!wr_mas->content) 2103 b_node->gap[b_end] = mas->index - 1 - piv; 2104 b_node->pivot[b_end++] = mas->index - 1; 2105 } 2106 2107 /* Store the new entry. */ 2108 mas->offset = b_end; 2109 b_node->slot[b_end] = wr_mas->entry; 2110 b_node->pivot[b_end] = mas->last; 2111 2112 /* Appended. */ 2113 if (mas->last >= mas->max) 2114 goto b_end; 2115 2116 /* Handle new range ending before old range ends */ 2117 piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); 2118 if (piv > mas->last) { 2119 if (piv == ULONG_MAX) 2120 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); 2121 2122 if (offset_end != slot) 2123 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 2124 offset_end); 2125 2126 b_node->slot[++b_end] = wr_mas->content; 2127 if (!wr_mas->content) 2128 b_node->gap[b_end] = piv - mas->last + 1; 2129 b_node->pivot[b_end] = piv; 2130 } 2131 2132 slot = offset_end + 1; 2133 if (slot > mas->end) 2134 goto b_end; 2135 2136 /* Copy end data to the end of the node. */ 2137 mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end); 2138 b_node->b_end--; 2139 return; 2140 2141 b_end: 2142 b_node->b_end = b_end; 2143 } 2144 2145 /* 2146 * mas_prev_sibling() - Find the previous node with the same parent. 2147 * @mas: the maple state 2148 * 2149 * Return: True if there is a previous sibling, false otherwise. 2150 */ 2151 static inline bool mas_prev_sibling(struct ma_state *mas) 2152 { 2153 unsigned int p_slot = mte_parent_slot(mas->node); 2154 2155 /* For root node, p_slot is set to 0 by mte_parent_slot(). */ 2156 if (!p_slot) 2157 return false; 2158 2159 mas_ascend(mas); 2160 mas->offset = p_slot - 1; 2161 mas_descend(mas); 2162 return true; 2163 } 2164 2165 /* 2166 * mas_next_sibling() - Find the next node with the same parent. 2167 * @mas: the maple state 2168 * 2169 * Return: true if there is a next sibling, false otherwise. 2170 */ 2171 static inline bool mas_next_sibling(struct ma_state *mas) 2172 { 2173 MA_STATE(parent, mas->tree, mas->index, mas->last); 2174 2175 if (mte_is_root(mas->node)) 2176 return false; 2177 2178 parent = *mas; 2179 mas_ascend(&parent); 2180 parent.offset = mte_parent_slot(mas->node) + 1; 2181 if (parent.offset > mas_data_end(&parent)) 2182 return false; 2183 2184 *mas = parent; 2185 mas_descend(mas); 2186 return true; 2187 } 2188 2189 /* 2190 * mas_node_or_none() - Set the enode and state. 2191 * @mas: the maple state 2192 * @enode: The encoded maple node. 2193 * 2194 * Set the node to the enode and the status. 2195 */ 2196 static inline void mas_node_or_none(struct ma_state *mas, 2197 struct maple_enode *enode) 2198 { 2199 if (enode) { 2200 mas->node = enode; 2201 mas->status = ma_active; 2202 } else { 2203 mas->node = NULL; 2204 mas->status = ma_none; 2205 } 2206 } 2207 2208 /* 2209 * mas_wr_node_walk() - Find the correct offset for the index in the @mas. 2210 * If @mas->index cannot be found within the containing 2211 * node, we traverse to the last entry in the node. 2212 * @wr_mas: The maple write state 2213 * 2214 * Uses mas_slot_locked() and does not need to worry about dead nodes. 2215 */ 2216 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) 2217 { 2218 struct ma_state *mas = wr_mas->mas; 2219 unsigned char count, offset; 2220 2221 if (unlikely(ma_is_dense(wr_mas->type))) { 2222 wr_mas->r_max = wr_mas->r_min = mas->index; 2223 mas->offset = mas->index = mas->min; 2224 return; 2225 } 2226 2227 wr_mas->node = mas_mn(wr_mas->mas); 2228 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); 2229 count = mas->end = ma_data_end(wr_mas->node, wr_mas->type, 2230 wr_mas->pivots, mas->max); 2231 offset = mas->offset; 2232 2233 while (offset < count && mas->index > wr_mas->pivots[offset]) 2234 offset++; 2235 2236 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; 2237 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); 2238 wr_mas->offset_end = mas->offset = offset; 2239 } 2240 2241 /* 2242 * mast_rebalance_next() - Rebalance against the next node 2243 * @mast: The maple subtree state 2244 */ 2245 static inline void mast_rebalance_next(struct maple_subtree_state *mast) 2246 { 2247 unsigned char b_end = mast->bn->b_end; 2248 2249 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), 2250 mast->bn, b_end); 2251 mast->orig_r->last = mast->orig_r->max; 2252 } 2253 2254 /* 2255 * mast_rebalance_prev() - Rebalance against the previous node 2256 * @mast: The maple subtree state 2257 */ 2258 static inline void mast_rebalance_prev(struct maple_subtree_state *mast) 2259 { 2260 unsigned char end = mas_data_end(mast->orig_l) + 1; 2261 unsigned char b_end = mast->bn->b_end; 2262 2263 mab_shift_right(mast->bn, end); 2264 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); 2265 mast->l->min = mast->orig_l->min; 2266 mast->orig_l->index = mast->orig_l->min; 2267 mast->bn->b_end = end + b_end; 2268 mast->l->offset += end; 2269 } 2270 2271 /* 2272 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring 2273 * the node to the right. Checking the nodes to the right then the left at each 2274 * level upwards until root is reached. 2275 * Data is copied into the @mast->bn. 2276 * @mast: The maple_subtree_state. 2277 */ 2278 static inline 2279 bool mast_spanning_rebalance(struct maple_subtree_state *mast) 2280 { 2281 struct ma_state r_tmp = *mast->orig_r; 2282 struct ma_state l_tmp = *mast->orig_l; 2283 unsigned char depth = 0; 2284 2285 do { 2286 mas_ascend(mast->orig_r); 2287 mas_ascend(mast->orig_l); 2288 depth++; 2289 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { 2290 mast->orig_r->offset++; 2291 do { 2292 mas_descend(mast->orig_r); 2293 mast->orig_r->offset = 0; 2294 } while (--depth); 2295 2296 mast_rebalance_next(mast); 2297 *mast->orig_l = l_tmp; 2298 return true; 2299 } else if (mast->orig_l->offset != 0) { 2300 mast->orig_l->offset--; 2301 do { 2302 mas_descend(mast->orig_l); 2303 mast->orig_l->offset = 2304 mas_data_end(mast->orig_l); 2305 } while (--depth); 2306 2307 mast_rebalance_prev(mast); 2308 *mast->orig_r = r_tmp; 2309 return true; 2310 } 2311 } while (!mte_is_root(mast->orig_r->node)); 2312 2313 *mast->orig_r = r_tmp; 2314 *mast->orig_l = l_tmp; 2315 return false; 2316 } 2317 2318 /* 2319 * mast_ascend() - Ascend the original left and right maple states. 2320 * @mast: the maple subtree state. 2321 * 2322 * Ascend the original left and right sides. Set the offsets to point to the 2323 * data already in the new tree (@mast->l and @mast->r). 2324 */ 2325 static inline void mast_ascend(struct maple_subtree_state *mast) 2326 { 2327 MA_WR_STATE(wr_mas, mast->orig_r, NULL); 2328 mas_ascend(mast->orig_l); 2329 mas_ascend(mast->orig_r); 2330 2331 mast->orig_r->offset = 0; 2332 mast->orig_r->index = mast->r->max; 2333 /* last should be larger than or equal to index */ 2334 if (mast->orig_r->last < mast->orig_r->index) 2335 mast->orig_r->last = mast->orig_r->index; 2336 2337 wr_mas.type = mte_node_type(mast->orig_r->node); 2338 mas_wr_node_walk(&wr_mas); 2339 /* Set up the left side of things */ 2340 mast->orig_l->offset = 0; 2341 mast->orig_l->index = mast->l->min; 2342 wr_mas.mas = mast->orig_l; 2343 wr_mas.type = mte_node_type(mast->orig_l->node); 2344 mas_wr_node_walk(&wr_mas); 2345 2346 mast->bn->type = wr_mas.type; 2347 } 2348 2349 /* 2350 * mas_new_ma_node() - Create and return a new maple node. Helper function. 2351 * @mas: the maple state with the allocations. 2352 * @b_node: the maple_big_node with the type encoding. 2353 * 2354 * Use the node type from the maple_big_node to allocate a new node from the 2355 * ma_state. This function exists mainly for code readability. 2356 * 2357 * Return: A new maple encoded node 2358 */ 2359 static inline struct maple_enode 2360 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) 2361 { 2362 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); 2363 } 2364 2365 /* 2366 * mas_mab_to_node() - Set up right and middle nodes 2367 * 2368 * @mas: the maple state that contains the allocations. 2369 * @b_node: the node which contains the data. 2370 * @left: The pointer which will have the left node 2371 * @right: The pointer which may have the right node 2372 * @middle: the pointer which may have the middle node (rare) 2373 * @mid_split: the split location for the middle node 2374 * 2375 * Return: the split of left. 2376 */ 2377 static inline unsigned char mas_mab_to_node(struct ma_state *mas, 2378 struct maple_big_node *b_node, struct maple_enode **left, 2379 struct maple_enode **right, struct maple_enode **middle, 2380 unsigned char *mid_split, unsigned long min) 2381 { 2382 unsigned char split = 0; 2383 unsigned char slot_count = mt_slots[b_node->type]; 2384 2385 *left = mas_new_ma_node(mas, b_node); 2386 *right = NULL; 2387 *middle = NULL; 2388 *mid_split = 0; 2389 2390 if (b_node->b_end < slot_count) { 2391 split = b_node->b_end; 2392 } else { 2393 split = mab_calc_split(mas, b_node, mid_split, min); 2394 *right = mas_new_ma_node(mas, b_node); 2395 } 2396 2397 if (*mid_split) 2398 *middle = mas_new_ma_node(mas, b_node); 2399 2400 return split; 2401 2402 } 2403 2404 /* 2405 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end 2406 * pointer. 2407 * @b_node: the big node to add the entry 2408 * @mas: the maple state to get the pivot (mas->max) 2409 * @entry: the entry to add, if NULL nothing happens. 2410 */ 2411 static inline void mab_set_b_end(struct maple_big_node *b_node, 2412 struct ma_state *mas, 2413 void *entry) 2414 { 2415 if (!entry) 2416 return; 2417 2418 b_node->slot[b_node->b_end] = entry; 2419 if (mt_is_alloc(mas->tree)) 2420 b_node->gap[b_node->b_end] = mas_max_gap(mas); 2421 b_node->pivot[b_node->b_end++] = mas->max; 2422 } 2423 2424 /* 2425 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent 2426 * of @mas->node to either @left or @right, depending on @slot and @split 2427 * 2428 * @mas: the maple state with the node that needs a parent 2429 * @left: possible parent 1 2430 * @right: possible parent 2 2431 * @slot: the slot the mas->node was placed 2432 * @split: the split location between @left and @right 2433 */ 2434 static inline void mas_set_split_parent(struct ma_state *mas, 2435 struct maple_enode *left, 2436 struct maple_enode *right, 2437 unsigned char *slot, unsigned char split) 2438 { 2439 if (mas_is_none(mas)) 2440 return; 2441 2442 if ((*slot) <= split) 2443 mas_set_parent(mas, mas->node, left, *slot); 2444 else if (right) 2445 mas_set_parent(mas, mas->node, right, (*slot) - split - 1); 2446 2447 (*slot)++; 2448 } 2449 2450 /* 2451 * mte_mid_split_check() - Check if the next node passes the mid-split 2452 * @l: Pointer to left encoded maple node. 2453 * @m: Pointer to middle encoded maple node. 2454 * @r: Pointer to right encoded maple node. 2455 * @slot: The offset 2456 * @split: The split location. 2457 * @mid_split: The middle split. 2458 */ 2459 static inline void mte_mid_split_check(struct maple_enode **l, 2460 struct maple_enode **r, 2461 struct maple_enode *right, 2462 unsigned char slot, 2463 unsigned char *split, 2464 unsigned char mid_split) 2465 { 2466 if (*r == right) 2467 return; 2468 2469 if (slot < mid_split) 2470 return; 2471 2472 *l = *r; 2473 *r = right; 2474 *split = mid_split; 2475 } 2476 2477 /* 2478 * mast_set_split_parents() - Helper function to set three nodes parents. Slot 2479 * is taken from @mast->l. 2480 * @mast: the maple subtree state 2481 * @left: the left node 2482 * @right: the right node 2483 * @split: the split location. 2484 */ 2485 static inline void mast_set_split_parents(struct maple_subtree_state *mast, 2486 struct maple_enode *left, 2487 struct maple_enode *middle, 2488 struct maple_enode *right, 2489 unsigned char split, 2490 unsigned char mid_split) 2491 { 2492 unsigned char slot; 2493 struct maple_enode *l = left; 2494 struct maple_enode *r = right; 2495 2496 if (mas_is_none(mast->l)) 2497 return; 2498 2499 if (middle) 2500 r = middle; 2501 2502 slot = mast->l->offset; 2503 2504 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2505 mas_set_split_parent(mast->l, l, r, &slot, split); 2506 2507 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2508 mas_set_split_parent(mast->m, l, r, &slot, split); 2509 2510 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2511 mas_set_split_parent(mast->r, l, r, &slot, split); 2512 } 2513 2514 /* 2515 * mas_topiary_node() - Dispose of a single node 2516 * @mas: The maple state for pushing nodes 2517 * @in_rcu: If the tree is in rcu mode 2518 * 2519 * The node will either be RCU freed or pushed back on the maple state. 2520 */ 2521 static inline void mas_topiary_node(struct ma_state *mas, 2522 struct ma_state *tmp_mas, bool in_rcu) 2523 { 2524 struct maple_node *tmp; 2525 struct maple_enode *enode; 2526 2527 if (mas_is_none(tmp_mas)) 2528 return; 2529 2530 enode = tmp_mas->node; 2531 tmp = mte_to_node(enode); 2532 mte_set_node_dead(enode); 2533 if (in_rcu) 2534 ma_free_rcu(tmp); 2535 else 2536 mas_push_node(mas, tmp); 2537 } 2538 2539 /* 2540 * mas_topiary_replace() - Replace the data with new data, then repair the 2541 * parent links within the new tree. Iterate over the dead sub-tree and collect 2542 * the dead subtrees and topiary the nodes that are no longer of use. 2543 * 2544 * The new tree will have up to three children with the correct parent. Keep 2545 * track of the new entries as they need to be followed to find the next level 2546 * of new entries. 2547 * 2548 * The old tree will have up to three children with the old parent. Keep track 2549 * of the old entries as they may have more nodes below replaced. Nodes within 2550 * [index, last] are dead subtrees, others need to be freed and followed. 2551 * 2552 * @mas: The maple state pointing at the new data 2553 * @old_enode: The maple encoded node being replaced 2554 * 2555 */ 2556 static inline void mas_topiary_replace(struct ma_state *mas, 2557 struct maple_enode *old_enode) 2558 { 2559 struct ma_state tmp[3], tmp_next[3]; 2560 MA_TOPIARY(subtrees, mas->tree); 2561 bool in_rcu; 2562 int i, n; 2563 2564 /* Place data in tree & then mark node as old */ 2565 mas_put_in_tree(mas, old_enode); 2566 2567 /* Update the parent pointers in the tree */ 2568 tmp[0] = *mas; 2569 tmp[0].offset = 0; 2570 tmp[1].status = ma_none; 2571 tmp[2].status = ma_none; 2572 while (!mte_is_leaf(tmp[0].node)) { 2573 n = 0; 2574 for (i = 0; i < 3; i++) { 2575 if (mas_is_none(&tmp[i])) 2576 continue; 2577 2578 while (n < 3) { 2579 if (!mas_find_child(&tmp[i], &tmp_next[n])) 2580 break; 2581 n++; 2582 } 2583 2584 mas_adopt_children(&tmp[i], tmp[i].node); 2585 } 2586 2587 if (MAS_WARN_ON(mas, n == 0)) 2588 break; 2589 2590 while (n < 3) 2591 tmp_next[n++].status = ma_none; 2592 2593 for (i = 0; i < 3; i++) 2594 tmp[i] = tmp_next[i]; 2595 } 2596 2597 /* Collect the old nodes that need to be discarded */ 2598 if (mte_is_leaf(old_enode)) 2599 return mas_free(mas, old_enode); 2600 2601 tmp[0] = *mas; 2602 tmp[0].offset = 0; 2603 tmp[0].node = old_enode; 2604 tmp[1].status = ma_none; 2605 tmp[2].status = ma_none; 2606 in_rcu = mt_in_rcu(mas->tree); 2607 do { 2608 n = 0; 2609 for (i = 0; i < 3; i++) { 2610 if (mas_is_none(&tmp[i])) 2611 continue; 2612 2613 while (n < 3) { 2614 if (!mas_find_child(&tmp[i], &tmp_next[n])) 2615 break; 2616 2617 if ((tmp_next[n].min >= tmp_next->index) && 2618 (tmp_next[n].max <= tmp_next->last)) { 2619 mat_add(&subtrees, tmp_next[n].node); 2620 tmp_next[n].status = ma_none; 2621 } else { 2622 n++; 2623 } 2624 } 2625 } 2626 2627 if (MAS_WARN_ON(mas, n == 0)) 2628 break; 2629 2630 while (n < 3) 2631 tmp_next[n++].status = ma_none; 2632 2633 for (i = 0; i < 3; i++) { 2634 mas_topiary_node(mas, &tmp[i], in_rcu); 2635 tmp[i] = tmp_next[i]; 2636 } 2637 } while (!mte_is_leaf(tmp[0].node)); 2638 2639 for (i = 0; i < 3; i++) 2640 mas_topiary_node(mas, &tmp[i], in_rcu); 2641 2642 mas_mat_destroy(mas, &subtrees); 2643 } 2644 2645 /* 2646 * mas_wmb_replace() - Write memory barrier and replace 2647 * @mas: The maple state 2648 * @old_enode: The old maple encoded node that is being replaced. 2649 * 2650 * Updates gap as necessary. 2651 */ 2652 static inline void mas_wmb_replace(struct ma_state *mas, 2653 struct maple_enode *old_enode) 2654 { 2655 /* Insert the new data in the tree */ 2656 mas_topiary_replace(mas, old_enode); 2657 2658 if (mte_is_leaf(mas->node)) 2659 return; 2660 2661 mas_update_gap(mas); 2662 } 2663 2664 /* 2665 * mast_cp_to_nodes() - Copy data out to nodes. 2666 * @mast: The maple subtree state 2667 * @left: The left encoded maple node 2668 * @middle: The middle encoded maple node 2669 * @right: The right encoded maple node 2670 * @split: The location to split between left and (middle ? middle : right) 2671 * @mid_split: The location to split between middle and right. 2672 */ 2673 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, 2674 struct maple_enode *left, struct maple_enode *middle, 2675 struct maple_enode *right, unsigned char split, unsigned char mid_split) 2676 { 2677 bool new_lmax = true; 2678 2679 mas_node_or_none(mast->l, left); 2680 mas_node_or_none(mast->m, middle); 2681 mas_node_or_none(mast->r, right); 2682 2683 mast->l->min = mast->orig_l->min; 2684 if (split == mast->bn->b_end) { 2685 mast->l->max = mast->orig_r->max; 2686 new_lmax = false; 2687 } 2688 2689 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); 2690 2691 if (middle) { 2692 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); 2693 mast->m->min = mast->bn->pivot[split] + 1; 2694 split = mid_split; 2695 } 2696 2697 mast->r->max = mast->orig_r->max; 2698 if (right) { 2699 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); 2700 mast->r->min = mast->bn->pivot[split] + 1; 2701 } 2702 } 2703 2704 /* 2705 * mast_combine_cp_left - Copy in the original left side of the tree into the 2706 * combined data set in the maple subtree state big node. 2707 * @mast: The maple subtree state 2708 */ 2709 static inline void mast_combine_cp_left(struct maple_subtree_state *mast) 2710 { 2711 unsigned char l_slot = mast->orig_l->offset; 2712 2713 if (!l_slot) 2714 return; 2715 2716 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); 2717 } 2718 2719 /* 2720 * mast_combine_cp_right: Copy in the original right side of the tree into the 2721 * combined data set in the maple subtree state big node. 2722 * @mast: The maple subtree state 2723 */ 2724 static inline void mast_combine_cp_right(struct maple_subtree_state *mast) 2725 { 2726 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) 2727 return; 2728 2729 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, 2730 mt_slot_count(mast->orig_r->node), mast->bn, 2731 mast->bn->b_end); 2732 mast->orig_r->last = mast->orig_r->max; 2733 } 2734 2735 /* 2736 * mast_sufficient: Check if the maple subtree state has enough data in the big 2737 * node to create at least one sufficient node 2738 * @mast: the maple subtree state 2739 */ 2740 static inline bool mast_sufficient(struct maple_subtree_state *mast) 2741 { 2742 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) 2743 return true; 2744 2745 return false; 2746 } 2747 2748 /* 2749 * mast_overflow: Check if there is too much data in the subtree state for a 2750 * single node. 2751 * @mast: The maple subtree state 2752 */ 2753 static inline bool mast_overflow(struct maple_subtree_state *mast) 2754 { 2755 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) 2756 return true; 2757 2758 return false; 2759 } 2760 2761 static inline void *mtree_range_walk(struct ma_state *mas) 2762 { 2763 unsigned long *pivots; 2764 unsigned char offset; 2765 struct maple_node *node; 2766 struct maple_enode *next, *last; 2767 enum maple_type type; 2768 void __rcu **slots; 2769 unsigned char end; 2770 unsigned long max, min; 2771 unsigned long prev_max, prev_min; 2772 2773 next = mas->node; 2774 min = mas->min; 2775 max = mas->max; 2776 do { 2777 last = next; 2778 node = mte_to_node(next); 2779 type = mte_node_type(next); 2780 pivots = ma_pivots(node, type); 2781 end = ma_data_end(node, type, pivots, max); 2782 prev_min = min; 2783 prev_max = max; 2784 if (pivots[0] >= mas->index) { 2785 offset = 0; 2786 max = pivots[0]; 2787 goto next; 2788 } 2789 2790 offset = 1; 2791 while (offset < end) { 2792 if (pivots[offset] >= mas->index) { 2793 max = pivots[offset]; 2794 break; 2795 } 2796 offset++; 2797 } 2798 2799 min = pivots[offset - 1] + 1; 2800 next: 2801 slots = ma_slots(node, type); 2802 next = mt_slot(mas->tree, slots, offset); 2803 if (unlikely(ma_dead_node(node))) 2804 goto dead_node; 2805 } while (!ma_is_leaf(type)); 2806 2807 mas->end = end; 2808 mas->offset = offset; 2809 mas->index = min; 2810 mas->last = max; 2811 mas->min = prev_min; 2812 mas->max = prev_max; 2813 mas->node = last; 2814 return (void *)next; 2815 2816 dead_node: 2817 mas_reset(mas); 2818 return NULL; 2819 } 2820 2821 /* 2822 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. 2823 * @mas: The starting maple state 2824 * @mast: The maple_subtree_state, keeps track of 4 maple states. 2825 * @count: The estimated count of iterations needed. 2826 * 2827 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root 2828 * is hit. First @b_node is split into two entries which are inserted into the 2829 * next iteration of the loop. @b_node is returned populated with the final 2830 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the 2831 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last 2832 * to account of what has been copied into the new sub-tree. The update of 2833 * orig_l_mas->last is used in mas_consume to find the slots that will need to 2834 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of 2835 * the new sub-tree in case the sub-tree becomes the full tree. 2836 */ 2837 static void mas_spanning_rebalance(struct ma_state *mas, 2838 struct maple_subtree_state *mast, unsigned char count) 2839 { 2840 unsigned char split, mid_split; 2841 unsigned char slot = 0; 2842 struct maple_enode *left = NULL, *middle = NULL, *right = NULL; 2843 struct maple_enode *old_enode; 2844 2845 MA_STATE(l_mas, mas->tree, mas->index, mas->index); 2846 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2847 MA_STATE(m_mas, mas->tree, mas->index, mas->index); 2848 2849 /* 2850 * The tree needs to be rebalanced and leaves need to be kept at the same level. 2851 * Rebalancing is done by use of the ``struct maple_topiary``. 2852 */ 2853 mast->l = &l_mas; 2854 mast->m = &m_mas; 2855 mast->r = &r_mas; 2856 l_mas.status = r_mas.status = m_mas.status = ma_none; 2857 2858 /* Check if this is not root and has sufficient data. */ 2859 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && 2860 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) 2861 mast_spanning_rebalance(mast); 2862 2863 l_mas.depth = 0; 2864 2865 /* 2866 * Each level of the tree is examined and balanced, pushing data to the left or 2867 * right, or rebalancing against left or right nodes is employed to avoid 2868 * rippling up the tree to limit the amount of churn. Once a new sub-section of 2869 * the tree is created, there may be a mix of new and old nodes. The old nodes 2870 * will have the incorrect parent pointers and currently be in two trees: the 2871 * original tree and the partially new tree. To remedy the parent pointers in 2872 * the old tree, the new data is swapped into the active tree and a walk down 2873 * the tree is performed and the parent pointers are updated. 2874 * See mas_topiary_replace() for more information. 2875 */ 2876 while (count--) { 2877 mast->bn->b_end--; 2878 mast->bn->type = mte_node_type(mast->orig_l->node); 2879 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, 2880 &mid_split, mast->orig_l->min); 2881 mast_set_split_parents(mast, left, middle, right, split, 2882 mid_split); 2883 mast_cp_to_nodes(mast, left, middle, right, split, mid_split); 2884 2885 /* 2886 * Copy data from next level in the tree to mast->bn from next 2887 * iteration 2888 */ 2889 memset(mast->bn, 0, sizeof(struct maple_big_node)); 2890 mast->bn->type = mte_node_type(left); 2891 l_mas.depth++; 2892 2893 /* Root already stored in l->node. */ 2894 if (mas_is_root_limits(mast->l)) 2895 goto new_root; 2896 2897 mast_ascend(mast); 2898 mast_combine_cp_left(mast); 2899 l_mas.offset = mast->bn->b_end; 2900 mab_set_b_end(mast->bn, &l_mas, left); 2901 mab_set_b_end(mast->bn, &m_mas, middle); 2902 mab_set_b_end(mast->bn, &r_mas, right); 2903 2904 /* Copy anything necessary out of the right node. */ 2905 mast_combine_cp_right(mast); 2906 mast->orig_l->last = mast->orig_l->max; 2907 2908 if (mast_sufficient(mast)) 2909 continue; 2910 2911 if (mast_overflow(mast)) 2912 continue; 2913 2914 /* May be a new root stored in mast->bn */ 2915 if (mas_is_root_limits(mast->orig_l)) 2916 break; 2917 2918 mast_spanning_rebalance(mast); 2919 2920 /* rebalancing from other nodes may require another loop. */ 2921 if (!count) 2922 count++; 2923 } 2924 2925 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), 2926 mte_node_type(mast->orig_l->node)); 2927 l_mas.depth++; 2928 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); 2929 mas_set_parent(mas, left, l_mas.node, slot); 2930 if (middle) 2931 mas_set_parent(mas, middle, l_mas.node, ++slot); 2932 2933 if (right) 2934 mas_set_parent(mas, right, l_mas.node, ++slot); 2935 2936 if (mas_is_root_limits(mast->l)) { 2937 new_root: 2938 mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas)); 2939 while (!mte_is_root(mast->orig_l->node)) 2940 mast_ascend(mast); 2941 } else { 2942 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; 2943 } 2944 2945 old_enode = mast->orig_l->node; 2946 mas->depth = l_mas.depth; 2947 mas->node = l_mas.node; 2948 mas->min = l_mas.min; 2949 mas->max = l_mas.max; 2950 mas->offset = l_mas.offset; 2951 mas_wmb_replace(mas, old_enode); 2952 mtree_range_walk(mas); 2953 return; 2954 } 2955 2956 /* 2957 * mas_rebalance() - Rebalance a given node. 2958 * @mas: The maple state 2959 * @b_node: The big maple node. 2960 * 2961 * Rebalance two nodes into a single node or two new nodes that are sufficient. 2962 * Continue upwards until tree is sufficient. 2963 */ 2964 static inline void mas_rebalance(struct ma_state *mas, 2965 struct maple_big_node *b_node) 2966 { 2967 char empty_count = mas_mt_height(mas); 2968 struct maple_subtree_state mast; 2969 unsigned char shift, b_end = ++b_node->b_end; 2970 2971 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 2972 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2973 2974 trace_ma_op(__func__, mas); 2975 2976 /* 2977 * Rebalancing occurs if a node is insufficient. Data is rebalanced 2978 * against the node to the right if it exists, otherwise the node to the 2979 * left of this node is rebalanced against this node. If rebalancing 2980 * causes just one node to be produced instead of two, then the parent 2981 * is also examined and rebalanced if it is insufficient. Every level 2982 * tries to combine the data in the same way. If one node contains the 2983 * entire range of the tree, then that node is used as a new root node. 2984 */ 2985 2986 mast.orig_l = &l_mas; 2987 mast.orig_r = &r_mas; 2988 mast.bn = b_node; 2989 mast.bn->type = mte_node_type(mas->node); 2990 2991 l_mas = r_mas = *mas; 2992 2993 if (mas_next_sibling(&r_mas)) { 2994 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); 2995 r_mas.last = r_mas.index = r_mas.max; 2996 } else { 2997 mas_prev_sibling(&l_mas); 2998 shift = mas_data_end(&l_mas) + 1; 2999 mab_shift_right(b_node, shift); 3000 mas->offset += shift; 3001 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); 3002 b_node->b_end = shift + b_end; 3003 l_mas.index = l_mas.last = l_mas.min; 3004 } 3005 3006 return mas_spanning_rebalance(mas, &mast, empty_count); 3007 } 3008 3009 /* 3010 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple 3011 * state. 3012 * @mas: The maple state 3013 * @end: The end of the left-most node. 3014 * 3015 * During a mass-insert event (such as forking), it may be necessary to 3016 * rebalance the left-most node when it is not sufficient. 3017 */ 3018 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) 3019 { 3020 enum maple_type mt = mte_node_type(mas->node); 3021 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; 3022 struct maple_enode *eparent, *old_eparent; 3023 unsigned char offset, tmp, split = mt_slots[mt] / 2; 3024 void __rcu **l_slots, **slots; 3025 unsigned long *l_pivs, *pivs, gap; 3026 bool in_rcu = mt_in_rcu(mas->tree); 3027 3028 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3029 3030 l_mas = *mas; 3031 mas_prev_sibling(&l_mas); 3032 3033 /* set up node. */ 3034 if (in_rcu) { 3035 newnode = mas_pop_node(mas); 3036 } else { 3037 newnode = &reuse; 3038 } 3039 3040 node = mas_mn(mas); 3041 newnode->parent = node->parent; 3042 slots = ma_slots(newnode, mt); 3043 pivs = ma_pivots(newnode, mt); 3044 left = mas_mn(&l_mas); 3045 l_slots = ma_slots(left, mt); 3046 l_pivs = ma_pivots(left, mt); 3047 if (!l_slots[split]) 3048 split++; 3049 tmp = mas_data_end(&l_mas) - split; 3050 3051 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); 3052 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); 3053 pivs[tmp] = l_mas.max; 3054 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); 3055 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); 3056 3057 l_mas.max = l_pivs[split]; 3058 mas->min = l_mas.max + 1; 3059 old_eparent = mt_mk_node(mte_parent(l_mas.node), 3060 mas_parent_type(&l_mas, l_mas.node)); 3061 tmp += end; 3062 if (!in_rcu) { 3063 unsigned char max_p = mt_pivots[mt]; 3064 unsigned char max_s = mt_slots[mt]; 3065 3066 if (tmp < max_p) 3067 memset(pivs + tmp, 0, 3068 sizeof(unsigned long) * (max_p - tmp)); 3069 3070 if (tmp < mt_slots[mt]) 3071 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3072 3073 memcpy(node, newnode, sizeof(struct maple_node)); 3074 ma_set_meta(node, mt, 0, tmp - 1); 3075 mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node), 3076 l_pivs[split]); 3077 3078 /* Remove data from l_pivs. */ 3079 tmp = split + 1; 3080 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); 3081 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3082 ma_set_meta(left, mt, 0, split); 3083 eparent = old_eparent; 3084 3085 goto done; 3086 } 3087 3088 /* RCU requires replacing both l_mas, mas, and parent. */ 3089 mas->node = mt_mk_node(newnode, mt); 3090 ma_set_meta(newnode, mt, 0, tmp); 3091 3092 new_left = mas_pop_node(mas); 3093 new_left->parent = left->parent; 3094 mt = mte_node_type(l_mas.node); 3095 slots = ma_slots(new_left, mt); 3096 pivs = ma_pivots(new_left, mt); 3097 memcpy(slots, l_slots, sizeof(void *) * split); 3098 memcpy(pivs, l_pivs, sizeof(unsigned long) * split); 3099 ma_set_meta(new_left, mt, 0, split); 3100 l_mas.node = mt_mk_node(new_left, mt); 3101 3102 /* replace parent. */ 3103 offset = mte_parent_slot(mas->node); 3104 mt = mas_parent_type(&l_mas, l_mas.node); 3105 parent = mas_pop_node(mas); 3106 slots = ma_slots(parent, mt); 3107 pivs = ma_pivots(parent, mt); 3108 memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node)); 3109 rcu_assign_pointer(slots[offset], mas->node); 3110 rcu_assign_pointer(slots[offset - 1], l_mas.node); 3111 pivs[offset - 1] = l_mas.max; 3112 eparent = mt_mk_node(parent, mt); 3113 done: 3114 gap = mas_leaf_max_gap(mas); 3115 mte_set_gap(eparent, mte_parent_slot(mas->node), gap); 3116 gap = mas_leaf_max_gap(&l_mas); 3117 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); 3118 mas_ascend(mas); 3119 3120 if (in_rcu) { 3121 mas_replace_node(mas, old_eparent); 3122 mas_adopt_children(mas, mas->node); 3123 } 3124 3125 mas_update_gap(mas); 3126 } 3127 3128 /* 3129 * mas_split_final_node() - Split the final node in a subtree operation. 3130 * @mast: the maple subtree state 3131 * @mas: The maple state 3132 * @height: The height of the tree in case it's a new root. 3133 */ 3134 static inline void mas_split_final_node(struct maple_subtree_state *mast, 3135 struct ma_state *mas, int height) 3136 { 3137 struct maple_enode *ancestor; 3138 3139 if (mte_is_root(mas->node)) { 3140 if (mt_is_alloc(mas->tree)) 3141 mast->bn->type = maple_arange_64; 3142 else 3143 mast->bn->type = maple_range_64; 3144 mas->depth = height; 3145 } 3146 /* 3147 * Only a single node is used here, could be root. 3148 * The Big_node data should just fit in a single node. 3149 */ 3150 ancestor = mas_new_ma_node(mas, mast->bn); 3151 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset); 3152 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset); 3153 mte_to_node(ancestor)->parent = mas_mn(mas)->parent; 3154 3155 mast->l->node = ancestor; 3156 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); 3157 mas->offset = mast->bn->b_end - 1; 3158 } 3159 3160 /* 3161 * mast_fill_bnode() - Copy data into the big node in the subtree state 3162 * @mast: The maple subtree state 3163 * @mas: the maple state 3164 * @skip: The number of entries to skip for new nodes insertion. 3165 */ 3166 static inline void mast_fill_bnode(struct maple_subtree_state *mast, 3167 struct ma_state *mas, 3168 unsigned char skip) 3169 { 3170 bool cp = true; 3171 unsigned char split; 3172 3173 memset(mast->bn, 0, sizeof(struct maple_big_node)); 3174 3175 if (mte_is_root(mas->node)) { 3176 cp = false; 3177 } else { 3178 mas_ascend(mas); 3179 mas->offset = mte_parent_slot(mas->node); 3180 } 3181 3182 if (cp && mast->l->offset) 3183 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); 3184 3185 split = mast->bn->b_end; 3186 mab_set_b_end(mast->bn, mast->l, mast->l->node); 3187 mast->r->offset = mast->bn->b_end; 3188 mab_set_b_end(mast->bn, mast->r, mast->r->node); 3189 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) 3190 cp = false; 3191 3192 if (cp) 3193 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, 3194 mast->bn, mast->bn->b_end); 3195 3196 mast->bn->b_end--; 3197 mast->bn->type = mte_node_type(mas->node); 3198 } 3199 3200 /* 3201 * mast_split_data() - Split the data in the subtree state big node into regular 3202 * nodes. 3203 * @mast: The maple subtree state 3204 * @mas: The maple state 3205 * @split: The location to split the big node 3206 */ 3207 static inline void mast_split_data(struct maple_subtree_state *mast, 3208 struct ma_state *mas, unsigned char split) 3209 { 3210 unsigned char p_slot; 3211 3212 mab_mas_cp(mast->bn, 0, split, mast->l, true); 3213 mte_set_pivot(mast->r->node, 0, mast->r->max); 3214 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); 3215 mast->l->offset = mte_parent_slot(mas->node); 3216 mast->l->max = mast->bn->pivot[split]; 3217 mast->r->min = mast->l->max + 1; 3218 if (mte_is_leaf(mas->node)) 3219 return; 3220 3221 p_slot = mast->orig_l->offset; 3222 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, 3223 &p_slot, split); 3224 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, 3225 &p_slot, split); 3226 } 3227 3228 /* 3229 * mas_push_data() - Instead of splitting a node, it is beneficial to push the 3230 * data to the right or left node if there is room. 3231 * @mas: The maple state 3232 * @height: The current height of the maple state 3233 * @mast: The maple subtree state 3234 * @left: Push left or not. 3235 * 3236 * Keeping the height of the tree low means faster lookups. 3237 * 3238 * Return: True if pushed, false otherwise. 3239 */ 3240 static inline bool mas_push_data(struct ma_state *mas, int height, 3241 struct maple_subtree_state *mast, bool left) 3242 { 3243 unsigned char slot_total = mast->bn->b_end; 3244 unsigned char end, space, split; 3245 3246 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); 3247 tmp_mas = *mas; 3248 tmp_mas.depth = mast->l->depth; 3249 3250 if (left && !mas_prev_sibling(&tmp_mas)) 3251 return false; 3252 else if (!left && !mas_next_sibling(&tmp_mas)) 3253 return false; 3254 3255 end = mas_data_end(&tmp_mas); 3256 slot_total += end; 3257 space = 2 * mt_slot_count(mas->node) - 2; 3258 /* -2 instead of -1 to ensure there isn't a triple split */ 3259 if (ma_is_leaf(mast->bn->type)) 3260 space--; 3261 3262 if (mas->max == ULONG_MAX) 3263 space--; 3264 3265 if (slot_total >= space) 3266 return false; 3267 3268 /* Get the data; Fill mast->bn */ 3269 mast->bn->b_end++; 3270 if (left) { 3271 mab_shift_right(mast->bn, end + 1); 3272 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); 3273 mast->bn->b_end = slot_total + 1; 3274 } else { 3275 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); 3276 } 3277 3278 /* Configure mast for splitting of mast->bn */ 3279 split = mt_slots[mast->bn->type] - 2; 3280 if (left) { 3281 /* Switch mas to prev node */ 3282 *mas = tmp_mas; 3283 /* Start using mast->l for the left side. */ 3284 tmp_mas.node = mast->l->node; 3285 *mast->l = tmp_mas; 3286 } else { 3287 tmp_mas.node = mast->r->node; 3288 *mast->r = tmp_mas; 3289 split = slot_total - split; 3290 } 3291 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); 3292 /* Update parent slot for split calculation. */ 3293 if (left) 3294 mast->orig_l->offset += end + 1; 3295 3296 mast_split_data(mast, mas, split); 3297 mast_fill_bnode(mast, mas, 2); 3298 mas_split_final_node(mast, mas, height + 1); 3299 return true; 3300 } 3301 3302 /* 3303 * mas_split() - Split data that is too big for one node into two. 3304 * @mas: The maple state 3305 * @b_node: The maple big node 3306 */ 3307 static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) 3308 { 3309 struct maple_subtree_state mast; 3310 int height = 0; 3311 unsigned char mid_split, split = 0; 3312 struct maple_enode *old; 3313 3314 /* 3315 * Splitting is handled differently from any other B-tree; the Maple 3316 * Tree splits upwards. Splitting up means that the split operation 3317 * occurs when the walk of the tree hits the leaves and not on the way 3318 * down. The reason for splitting up is that it is impossible to know 3319 * how much space will be needed until the leaf is (or leaves are) 3320 * reached. Since overwriting data is allowed and a range could 3321 * overwrite more than one range or result in changing one entry into 3 3322 * entries, it is impossible to know if a split is required until the 3323 * data is examined. 3324 * 3325 * Splitting is a balancing act between keeping allocations to a minimum 3326 * and avoiding a 'jitter' event where a tree is expanded to make room 3327 * for an entry followed by a contraction when the entry is removed. To 3328 * accomplish the balance, there are empty slots remaining in both left 3329 * and right nodes after a split. 3330 */ 3331 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3332 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3333 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3334 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3335 3336 trace_ma_op(__func__, mas); 3337 mas->depth = mas_mt_height(mas); 3338 3339 mast.l = &l_mas; 3340 mast.r = &r_mas; 3341 mast.orig_l = &prev_l_mas; 3342 mast.orig_r = &prev_r_mas; 3343 mast.bn = b_node; 3344 3345 while (height++ <= mas->depth) { 3346 if (mt_slots[b_node->type] > b_node->b_end) { 3347 mas_split_final_node(&mast, mas, height); 3348 break; 3349 } 3350 3351 l_mas = r_mas = *mas; 3352 l_mas.node = mas_new_ma_node(mas, b_node); 3353 r_mas.node = mas_new_ma_node(mas, b_node); 3354 /* 3355 * Another way that 'jitter' is avoided is to terminate a split up early if the 3356 * left or right node has space to spare. This is referred to as "pushing left" 3357 * or "pushing right" and is similar to the B* tree, except the nodes left or 3358 * right can rarely be reused due to RCU, but the ripple upwards is halted which 3359 * is a significant savings. 3360 */ 3361 /* Try to push left. */ 3362 if (mas_push_data(mas, height, &mast, true)) 3363 break; 3364 /* Try to push right. */ 3365 if (mas_push_data(mas, height, &mast, false)) 3366 break; 3367 3368 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); 3369 mast_split_data(&mast, mas, split); 3370 /* 3371 * Usually correct, mab_mas_cp in the above call overwrites 3372 * r->max. 3373 */ 3374 mast.r->max = mas->max; 3375 mast_fill_bnode(&mast, mas, 1); 3376 prev_l_mas = *mast.l; 3377 prev_r_mas = *mast.r; 3378 } 3379 3380 /* Set the original node as dead */ 3381 old = mas->node; 3382 mas->node = l_mas.node; 3383 mas_wmb_replace(mas, old); 3384 mtree_range_walk(mas); 3385 return; 3386 } 3387 3388 /* 3389 * mas_commit_b_node() - Commit the big node into the tree. 3390 * @wr_mas: The maple write state 3391 * @b_node: The maple big node 3392 */ 3393 static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas, 3394 struct maple_big_node *b_node) 3395 { 3396 enum store_type type = wr_mas->mas->store_type; 3397 3398 WARN_ON_ONCE(type != wr_rebalance && type != wr_split_store); 3399 3400 if (type == wr_rebalance) 3401 return mas_rebalance(wr_mas->mas, b_node); 3402 3403 return mas_split(wr_mas->mas, b_node); 3404 } 3405 3406 /* 3407 * mas_root_expand() - Expand a root to a node 3408 * @mas: The maple state 3409 * @entry: The entry to store into the tree 3410 */ 3411 static inline void mas_root_expand(struct ma_state *mas, void *entry) 3412 { 3413 void *contents = mas_root_locked(mas); 3414 enum maple_type type = maple_leaf_64; 3415 struct maple_node *node; 3416 void __rcu **slots; 3417 unsigned long *pivots; 3418 int slot = 0; 3419 3420 node = mas_pop_node(mas); 3421 pivots = ma_pivots(node, type); 3422 slots = ma_slots(node, type); 3423 node->parent = ma_parent_ptr(mas_tree_parent(mas)); 3424 mas->node = mt_mk_node(node, type); 3425 mas->status = ma_active; 3426 3427 if (mas->index) { 3428 if (contents) { 3429 rcu_assign_pointer(slots[slot], contents); 3430 if (likely(mas->index > 1)) 3431 slot++; 3432 } 3433 pivots[slot++] = mas->index - 1; 3434 } 3435 3436 rcu_assign_pointer(slots[slot], entry); 3437 mas->offset = slot; 3438 pivots[slot] = mas->last; 3439 if (mas->last != ULONG_MAX) 3440 pivots[++slot] = ULONG_MAX; 3441 3442 mas->depth = 1; 3443 mas_set_height(mas); 3444 ma_set_meta(node, maple_leaf_64, 0, slot); 3445 /* swap the new root into the tree */ 3446 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3447 return; 3448 } 3449 3450 /* 3451 * mas_store_root() - Storing value into root. 3452 * @mas: The maple state 3453 * @entry: The entry to store. 3454 * 3455 * There is no root node now and we are storing a value into the root - this 3456 * function either assigns the pointer or expands into a node. 3457 */ 3458 static inline void mas_store_root(struct ma_state *mas, void *entry) 3459 { 3460 if (!entry) { 3461 if (!mas->index) 3462 rcu_assign_pointer(mas->tree->ma_root, NULL); 3463 } else if (likely((mas->last != 0) || (mas->index != 0))) 3464 mas_root_expand(mas, entry); 3465 else if (((unsigned long) (entry) & 3) == 2) 3466 mas_root_expand(mas, entry); 3467 else { 3468 rcu_assign_pointer(mas->tree->ma_root, entry); 3469 mas->status = ma_start; 3470 } 3471 } 3472 3473 /* 3474 * mas_is_span_wr() - Check if the write needs to be treated as a write that 3475 * spans the node. 3476 * @wr_mas: The maple write state 3477 * 3478 * Spanning writes are writes that start in one node and end in another OR if 3479 * the write of a %NULL will cause the node to end with a %NULL. 3480 * 3481 * Return: True if this is a spanning write, false otherwise. 3482 */ 3483 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) 3484 { 3485 unsigned long max = wr_mas->r_max; 3486 unsigned long last = wr_mas->mas->last; 3487 enum maple_type type = wr_mas->type; 3488 void *entry = wr_mas->entry; 3489 3490 /* Contained in this pivot, fast path */ 3491 if (last < max) 3492 return false; 3493 3494 if (ma_is_leaf(type)) { 3495 max = wr_mas->mas->max; 3496 if (last < max) 3497 return false; 3498 } 3499 3500 if (last == max) { 3501 /* 3502 * The last entry of leaf node cannot be NULL unless it is the 3503 * rightmost node (writing ULONG_MAX), otherwise it spans slots. 3504 */ 3505 if (entry || last == ULONG_MAX) 3506 return false; 3507 } 3508 3509 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); 3510 return true; 3511 } 3512 3513 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) 3514 { 3515 wr_mas->type = mte_node_type(wr_mas->mas->node); 3516 mas_wr_node_walk(wr_mas); 3517 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); 3518 } 3519 3520 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) 3521 { 3522 wr_mas->mas->max = wr_mas->r_max; 3523 wr_mas->mas->min = wr_mas->r_min; 3524 wr_mas->mas->node = wr_mas->content; 3525 wr_mas->mas->offset = 0; 3526 wr_mas->mas->depth++; 3527 } 3528 /* 3529 * mas_wr_walk() - Walk the tree for a write. 3530 * @wr_mas: The maple write state 3531 * 3532 * Uses mas_slot_locked() and does not need to worry about dead nodes. 3533 * 3534 * Return: True if it's contained in a node, false on spanning write. 3535 */ 3536 static bool mas_wr_walk(struct ma_wr_state *wr_mas) 3537 { 3538 struct ma_state *mas = wr_mas->mas; 3539 3540 while (true) { 3541 mas_wr_walk_descend(wr_mas); 3542 if (unlikely(mas_is_span_wr(wr_mas))) 3543 return false; 3544 3545 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3546 mas->offset); 3547 if (ma_is_leaf(wr_mas->type)) 3548 return true; 3549 3550 mas_wr_walk_traverse(wr_mas); 3551 } 3552 3553 return true; 3554 } 3555 3556 static void mas_wr_walk_index(struct ma_wr_state *wr_mas) 3557 { 3558 struct ma_state *mas = wr_mas->mas; 3559 3560 while (true) { 3561 mas_wr_walk_descend(wr_mas); 3562 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3563 mas->offset); 3564 if (ma_is_leaf(wr_mas->type)) 3565 return; 3566 mas_wr_walk_traverse(wr_mas); 3567 } 3568 } 3569 /* 3570 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. 3571 * @l_wr_mas: The left maple write state 3572 * @r_wr_mas: The right maple write state 3573 */ 3574 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, 3575 struct ma_wr_state *r_wr_mas) 3576 { 3577 struct ma_state *r_mas = r_wr_mas->mas; 3578 struct ma_state *l_mas = l_wr_mas->mas; 3579 unsigned char l_slot; 3580 3581 l_slot = l_mas->offset; 3582 if (!l_wr_mas->content) 3583 l_mas->index = l_wr_mas->r_min; 3584 3585 if ((l_mas->index == l_wr_mas->r_min) && 3586 (l_slot && 3587 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { 3588 if (l_slot > 1) 3589 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; 3590 else 3591 l_mas->index = l_mas->min; 3592 3593 l_mas->offset = l_slot - 1; 3594 } 3595 3596 if (!r_wr_mas->content) { 3597 if (r_mas->last < r_wr_mas->r_max) 3598 r_mas->last = r_wr_mas->r_max; 3599 r_mas->offset++; 3600 } else if ((r_mas->last == r_wr_mas->r_max) && 3601 (r_mas->last < r_mas->max) && 3602 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { 3603 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, 3604 r_wr_mas->type, r_mas->offset + 1); 3605 r_mas->offset++; 3606 } 3607 } 3608 3609 static inline void *mas_state_walk(struct ma_state *mas) 3610 { 3611 void *entry; 3612 3613 entry = mas_start(mas); 3614 if (mas_is_none(mas)) 3615 return NULL; 3616 3617 if (mas_is_ptr(mas)) 3618 return entry; 3619 3620 return mtree_range_walk(mas); 3621 } 3622 3623 /* 3624 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up 3625 * to date. 3626 * 3627 * @mas: The maple state. 3628 * 3629 * Note: Leaves mas in undesirable state. 3630 * Return: The entry for @mas->index or %NULL on dead node. 3631 */ 3632 static inline void *mtree_lookup_walk(struct ma_state *mas) 3633 { 3634 unsigned long *pivots; 3635 unsigned char offset; 3636 struct maple_node *node; 3637 struct maple_enode *next; 3638 enum maple_type type; 3639 void __rcu **slots; 3640 unsigned char end; 3641 3642 next = mas->node; 3643 do { 3644 node = mte_to_node(next); 3645 type = mte_node_type(next); 3646 pivots = ma_pivots(node, type); 3647 end = mt_pivots[type]; 3648 offset = 0; 3649 do { 3650 if (pivots[offset] >= mas->index) 3651 break; 3652 } while (++offset < end); 3653 3654 slots = ma_slots(node, type); 3655 next = mt_slot(mas->tree, slots, offset); 3656 if (unlikely(ma_dead_node(node))) 3657 goto dead_node; 3658 } while (!ma_is_leaf(type)); 3659 3660 return (void *)next; 3661 3662 dead_node: 3663 mas_reset(mas); 3664 return NULL; 3665 } 3666 3667 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); 3668 /* 3669 * mas_new_root() - Create a new root node that only contains the entry passed 3670 * in. 3671 * @mas: The maple state 3672 * @entry: The entry to store. 3673 * 3674 * Only valid when the index == 0 and the last == ULONG_MAX 3675 */ 3676 static inline void mas_new_root(struct ma_state *mas, void *entry) 3677 { 3678 struct maple_enode *root = mas_root_locked(mas); 3679 enum maple_type type = maple_leaf_64; 3680 struct maple_node *node; 3681 void __rcu **slots; 3682 unsigned long *pivots; 3683 3684 WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX); 3685 3686 if (!entry) { 3687 mas->depth = 0; 3688 mas_set_height(mas); 3689 rcu_assign_pointer(mas->tree->ma_root, entry); 3690 mas->status = ma_start; 3691 goto done; 3692 } 3693 3694 node = mas_pop_node(mas); 3695 pivots = ma_pivots(node, type); 3696 slots = ma_slots(node, type); 3697 node->parent = ma_parent_ptr(mas_tree_parent(mas)); 3698 mas->node = mt_mk_node(node, type); 3699 mas->status = ma_active; 3700 rcu_assign_pointer(slots[0], entry); 3701 pivots[0] = mas->last; 3702 mas->depth = 1; 3703 mas_set_height(mas); 3704 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3705 3706 done: 3707 if (xa_is_node(root)) 3708 mte_destroy_walk(root, mas->tree); 3709 3710 return; 3711 } 3712 /* 3713 * mas_wr_spanning_store() - Create a subtree with the store operation completed 3714 * and new nodes where necessary, then place the sub-tree in the actual tree. 3715 * Note that mas is expected to point to the node which caused the store to 3716 * span. 3717 * @wr_mas: The maple write state 3718 */ 3719 static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas) 3720 { 3721 struct maple_subtree_state mast; 3722 struct maple_big_node b_node; 3723 struct ma_state *mas; 3724 unsigned char height; 3725 3726 /* Left and Right side of spanning store */ 3727 MA_STATE(l_mas, NULL, 0, 0); 3728 MA_STATE(r_mas, NULL, 0, 0); 3729 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); 3730 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); 3731 3732 /* 3733 * A store operation that spans multiple nodes is called a spanning 3734 * store and is handled early in the store call stack by the function 3735 * mas_is_span_wr(). When a spanning store is identified, the maple 3736 * state is duplicated. The first maple state walks the left tree path 3737 * to ``index``, the duplicate walks the right tree path to ``last``. 3738 * The data in the two nodes are combined into a single node, two nodes, 3739 * or possibly three nodes (see the 3-way split above). A ``NULL`` 3740 * written to the last entry of a node is considered a spanning store as 3741 * a rebalance is required for the operation to complete and an overflow 3742 * of data may happen. 3743 */ 3744 mas = wr_mas->mas; 3745 trace_ma_op(__func__, mas); 3746 3747 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 3748 return mas_new_root(mas, wr_mas->entry); 3749 /* 3750 * Node rebalancing may occur due to this store, so there may be three new 3751 * entries per level plus a new root. 3752 */ 3753 height = mas_mt_height(mas); 3754 3755 /* 3756 * Set up right side. Need to get to the next offset after the spanning 3757 * store to ensure it's not NULL and to combine both the next node and 3758 * the node with the start together. 3759 */ 3760 r_mas = *mas; 3761 /* Avoid overflow, walk to next slot in the tree. */ 3762 if (r_mas.last + 1) 3763 r_mas.last++; 3764 3765 r_mas.index = r_mas.last; 3766 mas_wr_walk_index(&r_wr_mas); 3767 r_mas.last = r_mas.index = mas->last; 3768 3769 /* Set up left side. */ 3770 l_mas = *mas; 3771 mas_wr_walk_index(&l_wr_mas); 3772 3773 if (!wr_mas->entry) { 3774 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); 3775 mas->offset = l_mas.offset; 3776 mas->index = l_mas.index; 3777 mas->last = l_mas.last = r_mas.last; 3778 } 3779 3780 /* expanding NULLs may make this cover the entire range */ 3781 if (!l_mas.index && r_mas.last == ULONG_MAX) { 3782 mas_set_range(mas, 0, ULONG_MAX); 3783 return mas_new_root(mas, wr_mas->entry); 3784 } 3785 3786 memset(&b_node, 0, sizeof(struct maple_big_node)); 3787 /* Copy l_mas and store the value in b_node. */ 3788 mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); 3789 /* Copy r_mas into b_node if there is anything to copy. */ 3790 if (r_mas.max > r_mas.last) 3791 mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, 3792 &b_node, b_node.b_end + 1); 3793 else 3794 b_node.b_end++; 3795 3796 /* Stop spanning searches by searching for just index. */ 3797 l_mas.index = l_mas.last = mas->index; 3798 3799 mast.bn = &b_node; 3800 mast.orig_l = &l_mas; 3801 mast.orig_r = &r_mas; 3802 /* Combine l_mas and r_mas and split them up evenly again. */ 3803 return mas_spanning_rebalance(mas, &mast, height + 1); 3804 } 3805 3806 /* 3807 * mas_wr_node_store() - Attempt to store the value in a node 3808 * @wr_mas: The maple write state 3809 * 3810 * Attempts to reuse the node, but may allocate. 3811 */ 3812 static inline void mas_wr_node_store(struct ma_wr_state *wr_mas, 3813 unsigned char new_end) 3814 { 3815 struct ma_state *mas = wr_mas->mas; 3816 void __rcu **dst_slots; 3817 unsigned long *dst_pivots; 3818 unsigned char dst_offset, offset_end = wr_mas->offset_end; 3819 struct maple_node reuse, *newnode; 3820 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; 3821 bool in_rcu = mt_in_rcu(mas->tree); 3822 3823 if (mas->last == wr_mas->end_piv) 3824 offset_end++; /* don't copy this offset */ 3825 else if (unlikely(wr_mas->r_max == ULONG_MAX)) 3826 mas_bulk_rebalance(mas, mas->end, wr_mas->type); 3827 3828 /* set up node. */ 3829 if (in_rcu) { 3830 newnode = mas_pop_node(mas); 3831 } else { 3832 memset(&reuse, 0, sizeof(struct maple_node)); 3833 newnode = &reuse; 3834 } 3835 3836 newnode->parent = mas_mn(mas)->parent; 3837 dst_pivots = ma_pivots(newnode, wr_mas->type); 3838 dst_slots = ma_slots(newnode, wr_mas->type); 3839 /* Copy from start to insert point */ 3840 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); 3841 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); 3842 3843 /* Handle insert of new range starting after old range */ 3844 if (wr_mas->r_min < mas->index) { 3845 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); 3846 dst_pivots[mas->offset++] = mas->index - 1; 3847 } 3848 3849 /* Store the new entry and range end. */ 3850 if (mas->offset < node_pivots) 3851 dst_pivots[mas->offset] = mas->last; 3852 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); 3853 3854 /* 3855 * this range wrote to the end of the node or it overwrote the rest of 3856 * the data 3857 */ 3858 if (offset_end > mas->end) 3859 goto done; 3860 3861 dst_offset = mas->offset + 1; 3862 /* Copy to the end of node if necessary. */ 3863 copy_size = mas->end - offset_end + 1; 3864 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, 3865 sizeof(void *) * copy_size); 3866 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, 3867 sizeof(unsigned long) * (copy_size - 1)); 3868 3869 if (new_end < node_pivots) 3870 dst_pivots[new_end] = mas->max; 3871 3872 done: 3873 mas_leaf_set_meta(newnode, maple_leaf_64, new_end); 3874 if (in_rcu) { 3875 struct maple_enode *old_enode = mas->node; 3876 3877 mas->node = mt_mk_node(newnode, wr_mas->type); 3878 mas_replace_node(mas, old_enode); 3879 } else { 3880 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 3881 } 3882 trace_ma_write(__func__, mas, 0, wr_mas->entry); 3883 mas_update_gap(mas); 3884 mas->end = new_end; 3885 return; 3886 } 3887 3888 /* 3889 * mas_wr_slot_store: Attempt to store a value in a slot. 3890 * @wr_mas: the maple write state 3891 */ 3892 static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas) 3893 { 3894 struct ma_state *mas = wr_mas->mas; 3895 unsigned char offset = mas->offset; 3896 void __rcu **slots = wr_mas->slots; 3897 bool gap = false; 3898 3899 gap |= !mt_slot_locked(mas->tree, slots, offset); 3900 gap |= !mt_slot_locked(mas->tree, slots, offset + 1); 3901 3902 if (wr_mas->offset_end - offset == 1) { 3903 if (mas->index == wr_mas->r_min) { 3904 /* Overwriting the range and a part of the next one */ 3905 rcu_assign_pointer(slots[offset], wr_mas->entry); 3906 wr_mas->pivots[offset] = mas->last; 3907 } else { 3908 /* Overwriting a part of the range and the next one */ 3909 rcu_assign_pointer(slots[offset + 1], wr_mas->entry); 3910 wr_mas->pivots[offset] = mas->index - 1; 3911 mas->offset++; /* Keep mas accurate. */ 3912 } 3913 } else { 3914 WARN_ON_ONCE(mt_in_rcu(mas->tree)); 3915 /* 3916 * Expand the range, only partially overwriting the previous and 3917 * next ranges 3918 */ 3919 gap |= !mt_slot_locked(mas->tree, slots, offset + 2); 3920 rcu_assign_pointer(slots[offset + 1], wr_mas->entry); 3921 wr_mas->pivots[offset] = mas->index - 1; 3922 wr_mas->pivots[offset + 1] = mas->last; 3923 mas->offset++; /* Keep mas accurate. */ 3924 } 3925 3926 trace_ma_write(__func__, mas, 0, wr_mas->entry); 3927 /* 3928 * Only update gap when the new entry is empty or there is an empty 3929 * entry in the original two ranges. 3930 */ 3931 if (!wr_mas->entry || gap) 3932 mas_update_gap(mas); 3933 3934 return; 3935 } 3936 3937 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) 3938 { 3939 struct ma_state *mas = wr_mas->mas; 3940 3941 if (!wr_mas->slots[wr_mas->offset_end]) { 3942 /* If this one is null, the next and prev are not */ 3943 mas->last = wr_mas->end_piv; 3944 } else { 3945 /* Check next slot(s) if we are overwriting the end */ 3946 if ((mas->last == wr_mas->end_piv) && 3947 (mas->end != wr_mas->offset_end) && 3948 !wr_mas->slots[wr_mas->offset_end + 1]) { 3949 wr_mas->offset_end++; 3950 if (wr_mas->offset_end == mas->end) 3951 mas->last = mas->max; 3952 else 3953 mas->last = wr_mas->pivots[wr_mas->offset_end]; 3954 wr_mas->end_piv = mas->last; 3955 } 3956 } 3957 3958 if (!wr_mas->content) { 3959 /* If this one is null, the next and prev are not */ 3960 mas->index = wr_mas->r_min; 3961 } else { 3962 /* Check prev slot if we are overwriting the start */ 3963 if (mas->index == wr_mas->r_min && mas->offset && 3964 !wr_mas->slots[mas->offset - 1]) { 3965 mas->offset--; 3966 wr_mas->r_min = mas->index = 3967 mas_safe_min(mas, wr_mas->pivots, mas->offset); 3968 wr_mas->r_max = wr_mas->pivots[mas->offset]; 3969 } 3970 } 3971 } 3972 3973 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) 3974 { 3975 while ((wr_mas->offset_end < wr_mas->mas->end) && 3976 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) 3977 wr_mas->offset_end++; 3978 3979 if (wr_mas->offset_end < wr_mas->mas->end) 3980 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; 3981 else 3982 wr_mas->end_piv = wr_mas->mas->max; 3983 } 3984 3985 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) 3986 { 3987 struct ma_state *mas = wr_mas->mas; 3988 unsigned char new_end = mas->end + 2; 3989 3990 new_end -= wr_mas->offset_end - mas->offset; 3991 if (wr_mas->r_min == mas->index) 3992 new_end--; 3993 3994 if (wr_mas->end_piv == mas->last) 3995 new_end--; 3996 3997 return new_end; 3998 } 3999 4000 /* 4001 * mas_wr_append: Attempt to append 4002 * @wr_mas: the maple write state 4003 * @new_end: The end of the node after the modification 4004 * 4005 * This is currently unsafe in rcu mode since the end of the node may be cached 4006 * by readers while the node contents may be updated which could result in 4007 * inaccurate information. 4008 */ 4009 static inline void mas_wr_append(struct ma_wr_state *wr_mas, 4010 unsigned char new_end) 4011 { 4012 struct ma_state *mas = wr_mas->mas; 4013 void __rcu **slots; 4014 unsigned char end = mas->end; 4015 4016 if (new_end < mt_pivots[wr_mas->type]) { 4017 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4018 ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end); 4019 } 4020 4021 slots = wr_mas->slots; 4022 if (new_end == end + 1) { 4023 if (mas->last == wr_mas->r_max) { 4024 /* Append to end of range */ 4025 rcu_assign_pointer(slots[new_end], wr_mas->entry); 4026 wr_mas->pivots[end] = mas->index - 1; 4027 mas->offset = new_end; 4028 } else { 4029 /* Append to start of range */ 4030 rcu_assign_pointer(slots[new_end], wr_mas->content); 4031 wr_mas->pivots[end] = mas->last; 4032 rcu_assign_pointer(slots[end], wr_mas->entry); 4033 } 4034 } else { 4035 /* Append to the range without touching any boundaries. */ 4036 rcu_assign_pointer(slots[new_end], wr_mas->content); 4037 wr_mas->pivots[end + 1] = mas->last; 4038 rcu_assign_pointer(slots[end + 1], wr_mas->entry); 4039 wr_mas->pivots[end] = mas->index - 1; 4040 mas->offset = end + 1; 4041 } 4042 4043 if (!wr_mas->content || !wr_mas->entry) 4044 mas_update_gap(mas); 4045 4046 mas->end = new_end; 4047 trace_ma_write(__func__, mas, new_end, wr_mas->entry); 4048 return; 4049 } 4050 4051 /* 4052 * mas_wr_bnode() - Slow path for a modification. 4053 * @wr_mas: The write maple state 4054 * 4055 * This is where split, rebalance end up. 4056 */ 4057 static void mas_wr_bnode(struct ma_wr_state *wr_mas) 4058 { 4059 struct maple_big_node b_node; 4060 4061 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 4062 memset(&b_node, 0, sizeof(struct maple_big_node)); 4063 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 4064 mas_commit_b_node(wr_mas, &b_node); 4065 } 4066 4067 /* 4068 * mas_wr_store_entry() - Internal call to store a value 4069 * @wr_mas: The maple write state 4070 */ 4071 static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) 4072 { 4073 struct ma_state *mas = wr_mas->mas; 4074 unsigned char new_end = mas_wr_new_end(wr_mas); 4075 4076 switch (mas->store_type) { 4077 case wr_invalid: 4078 MT_BUG_ON(mas->tree, 1); 4079 return; 4080 case wr_new_root: 4081 mas_new_root(mas, wr_mas->entry); 4082 break; 4083 case wr_store_root: 4084 mas_store_root(mas, wr_mas->entry); 4085 break; 4086 case wr_exact_fit: 4087 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); 4088 if (!!wr_mas->entry ^ !!wr_mas->content) 4089 mas_update_gap(mas); 4090 break; 4091 case wr_append: 4092 mas_wr_append(wr_mas, new_end); 4093 break; 4094 case wr_slot_store: 4095 mas_wr_slot_store(wr_mas); 4096 break; 4097 case wr_node_store: 4098 mas_wr_node_store(wr_mas, new_end); 4099 break; 4100 case wr_spanning_store: 4101 mas_wr_spanning_store(wr_mas); 4102 break; 4103 case wr_split_store: 4104 case wr_rebalance: 4105 mas_wr_bnode(wr_mas); 4106 break; 4107 } 4108 4109 return; 4110 } 4111 4112 static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas) 4113 { 4114 struct ma_state *mas = wr_mas->mas; 4115 4116 if (!mas_is_active(mas)) { 4117 if (mas_is_start(mas)) 4118 goto set_content; 4119 4120 if (unlikely(mas_is_paused(mas))) 4121 goto reset; 4122 4123 if (unlikely(mas_is_none(mas))) 4124 goto reset; 4125 4126 if (unlikely(mas_is_overflow(mas))) 4127 goto reset; 4128 4129 if (unlikely(mas_is_underflow(mas))) 4130 goto reset; 4131 } 4132 4133 /* 4134 * A less strict version of mas_is_span_wr() where we allow spanning 4135 * writes within this node. This is to stop partial walks in 4136 * mas_prealloc() from being reset. 4137 */ 4138 if (mas->last > mas->max) 4139 goto reset; 4140 4141 if (wr_mas->entry) 4142 goto set_content; 4143 4144 if (mte_is_leaf(mas->node) && mas->last == mas->max) 4145 goto reset; 4146 4147 goto set_content; 4148 4149 reset: 4150 mas_reset(mas); 4151 set_content: 4152 wr_mas->content = mas_start(mas); 4153 } 4154 4155 /** 4156 * mas_prealloc_calc() - Calculate number of nodes needed for a 4157 * given store oepration 4158 * @mas: The maple state 4159 * @entry: The entry to store into the tree 4160 * 4161 * Return: Number of nodes required for preallocation. 4162 */ 4163 static inline int mas_prealloc_calc(struct ma_state *mas, void *entry) 4164 { 4165 int ret = mas_mt_height(mas) * 3 + 1; 4166 4167 switch (mas->store_type) { 4168 case wr_invalid: 4169 WARN_ON_ONCE(1); 4170 break; 4171 case wr_new_root: 4172 ret = 1; 4173 break; 4174 case wr_store_root: 4175 if (likely((mas->last != 0) || (mas->index != 0))) 4176 ret = 1; 4177 else if (((unsigned long) (entry) & 3) == 2) 4178 ret = 1; 4179 else 4180 ret = 0; 4181 break; 4182 case wr_spanning_store: 4183 ret = mas_mt_height(mas) * 3 + 1; 4184 break; 4185 case wr_split_store: 4186 ret = mas_mt_height(mas) * 2 + 1; 4187 break; 4188 case wr_rebalance: 4189 ret = mas_mt_height(mas) * 2 - 1; 4190 break; 4191 case wr_node_store: 4192 ret = mt_in_rcu(mas->tree) ? 1 : 0; 4193 break; 4194 case wr_append: 4195 case wr_exact_fit: 4196 case wr_slot_store: 4197 ret = 0; 4198 } 4199 4200 return ret; 4201 } 4202 4203 /* 4204 * mas_wr_store_type() - Determine the store type for a given 4205 * store operation. 4206 * @wr_mas: The maple write state 4207 * 4208 * Return: the type of store needed for the operation 4209 */ 4210 static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas) 4211 { 4212 struct ma_state *mas = wr_mas->mas; 4213 unsigned char new_end; 4214 4215 if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) 4216 return wr_store_root; 4217 4218 if (unlikely(!mas_wr_walk(wr_mas))) 4219 return wr_spanning_store; 4220 4221 /* At this point, we are at the leaf node that needs to be altered. */ 4222 mas_wr_end_piv(wr_mas); 4223 if (!wr_mas->entry) 4224 mas_wr_extend_null(wr_mas); 4225 4226 if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) 4227 return wr_exact_fit; 4228 4229 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 4230 return wr_new_root; 4231 4232 new_end = mas_wr_new_end(wr_mas); 4233 /* Potential spanning rebalance collapsing a node */ 4234 if (new_end < mt_min_slots[wr_mas->type]) { 4235 if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) 4236 return wr_rebalance; 4237 return wr_node_store; 4238 } 4239 4240 if (new_end >= mt_slots[wr_mas->type]) 4241 return wr_split_store; 4242 4243 if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) 4244 return wr_append; 4245 4246 if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) || 4247 (wr_mas->offset_end - mas->offset == 1))) 4248 return wr_slot_store; 4249 4250 return wr_node_store; 4251 } 4252 4253 /** 4254 * mas_wr_preallocate() - Preallocate enough nodes for a store operation 4255 * @wr_mas: The maple write state 4256 * @entry: The entry that will be stored 4257 * 4258 */ 4259 static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry) 4260 { 4261 struct ma_state *mas = wr_mas->mas; 4262 int request; 4263 4264 mas_wr_prealloc_setup(wr_mas); 4265 mas->store_type = mas_wr_store_type(wr_mas); 4266 request = mas_prealloc_calc(mas, entry); 4267 if (!request) 4268 return; 4269 4270 mas_node_count(mas, request); 4271 } 4272 4273 /** 4274 * mas_insert() - Internal call to insert a value 4275 * @mas: The maple state 4276 * @entry: The entry to store 4277 * 4278 * Return: %NULL or the contents that already exists at the requested index 4279 * otherwise. The maple state needs to be checked for error conditions. 4280 */ 4281 static inline void *mas_insert(struct ma_state *mas, void *entry) 4282 { 4283 MA_WR_STATE(wr_mas, mas, entry); 4284 4285 /* 4286 * Inserting a new range inserts either 0, 1, or 2 pivots within the 4287 * tree. If the insert fits exactly into an existing gap with a value 4288 * of NULL, then the slot only needs to be written with the new value. 4289 * If the range being inserted is adjacent to another range, then only a 4290 * single pivot needs to be inserted (as well as writing the entry). If 4291 * the new range is within a gap but does not touch any other ranges, 4292 * then two pivots need to be inserted: the start - 1, and the end. As 4293 * usual, the entry must be written. Most operations require a new node 4294 * to be allocated and replace an existing node to ensure RCU safety, 4295 * when in RCU mode. The exception to requiring a newly allocated node 4296 * is when inserting at the end of a node (appending). When done 4297 * carefully, appending can reuse the node in place. 4298 */ 4299 wr_mas.content = mas_start(mas); 4300 if (wr_mas.content) 4301 goto exists; 4302 4303 mas_wr_preallocate(&wr_mas, entry); 4304 if (mas_is_err(mas)) 4305 return NULL; 4306 4307 /* spanning writes always overwrite something */ 4308 if (mas->store_type == wr_spanning_store) 4309 goto exists; 4310 4311 /* At this point, we are at the leaf node that needs to be altered. */ 4312 if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) { 4313 wr_mas.offset_end = mas->offset; 4314 wr_mas.end_piv = wr_mas.r_max; 4315 4316 if (wr_mas.content || (mas->last > wr_mas.r_max)) 4317 goto exists; 4318 } 4319 4320 mas_wr_store_entry(&wr_mas); 4321 return wr_mas.content; 4322 4323 exists: 4324 mas_set_err(mas, -EEXIST); 4325 return wr_mas.content; 4326 4327 } 4328 4329 /** 4330 * mas_alloc_cyclic() - Internal call to find somewhere to store an entry 4331 * @mas: The maple state. 4332 * @startp: Pointer to ID. 4333 * @range_lo: Lower bound of range to search. 4334 * @range_hi: Upper bound of range to search. 4335 * @entry: The entry to store. 4336 * @next: Pointer to next ID to allocate. 4337 * @gfp: The GFP_FLAGS to use for allocations. 4338 * 4339 * Return: 0 if the allocation succeeded without wrapping, 1 if the 4340 * allocation succeeded after wrapping, or -EBUSY if there are no 4341 * free entries. 4342 */ 4343 int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp, 4344 void *entry, unsigned long range_lo, unsigned long range_hi, 4345 unsigned long *next, gfp_t gfp) 4346 { 4347 unsigned long min = range_lo; 4348 int ret = 0; 4349 4350 range_lo = max(min, *next); 4351 ret = mas_empty_area(mas, range_lo, range_hi, 1); 4352 if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) { 4353 mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED; 4354 ret = 1; 4355 } 4356 if (ret < 0 && range_lo > min) { 4357 mas_reset(mas); 4358 ret = mas_empty_area(mas, min, range_hi, 1); 4359 if (ret == 0) 4360 ret = 1; 4361 } 4362 if (ret < 0) 4363 return ret; 4364 4365 do { 4366 mas_insert(mas, entry); 4367 } while (mas_nomem(mas, gfp)); 4368 if (mas_is_err(mas)) 4369 return xa_err(mas->node); 4370 4371 *startp = mas->index; 4372 *next = *startp + 1; 4373 if (*next == 0) 4374 mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED; 4375 4376 mas_destroy(mas); 4377 return ret; 4378 } 4379 EXPORT_SYMBOL(mas_alloc_cyclic); 4380 4381 static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index) 4382 { 4383 retry: 4384 mas_set(mas, index); 4385 mas_state_walk(mas); 4386 if (mas_is_start(mas)) 4387 goto retry; 4388 } 4389 4390 static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas, 4391 struct maple_node *node, const unsigned long index) 4392 { 4393 if (unlikely(ma_dead_node(node))) { 4394 mas_rewalk(mas, index); 4395 return true; 4396 } 4397 return false; 4398 } 4399 4400 /* 4401 * mas_prev_node() - Find the prev non-null entry at the same level in the 4402 * tree. The prev value will be mas->node[mas->offset] or the status will be 4403 * ma_none. 4404 * @mas: The maple state 4405 * @min: The lower limit to search 4406 * 4407 * The prev node value will be mas->node[mas->offset] or the status will be 4408 * ma_none. 4409 * Return: 1 if the node is dead, 0 otherwise. 4410 */ 4411 static int mas_prev_node(struct ma_state *mas, unsigned long min) 4412 { 4413 enum maple_type mt; 4414 int offset, level; 4415 void __rcu **slots; 4416 struct maple_node *node; 4417 unsigned long *pivots; 4418 unsigned long max; 4419 4420 node = mas_mn(mas); 4421 if (!mas->min) 4422 goto no_entry; 4423 4424 max = mas->min - 1; 4425 if (max < min) 4426 goto no_entry; 4427 4428 level = 0; 4429 do { 4430 if (ma_is_root(node)) 4431 goto no_entry; 4432 4433 /* Walk up. */ 4434 if (unlikely(mas_ascend(mas))) 4435 return 1; 4436 offset = mas->offset; 4437 level++; 4438 node = mas_mn(mas); 4439 } while (!offset); 4440 4441 offset--; 4442 mt = mte_node_type(mas->node); 4443 while (level > 1) { 4444 level--; 4445 slots = ma_slots(node, mt); 4446 mas->node = mas_slot(mas, slots, offset); 4447 if (unlikely(ma_dead_node(node))) 4448 return 1; 4449 4450 mt = mte_node_type(mas->node); 4451 node = mas_mn(mas); 4452 pivots = ma_pivots(node, mt); 4453 offset = ma_data_end(node, mt, pivots, max); 4454 if (unlikely(ma_dead_node(node))) 4455 return 1; 4456 } 4457 4458 slots = ma_slots(node, mt); 4459 mas->node = mas_slot(mas, slots, offset); 4460 pivots = ma_pivots(node, mt); 4461 if (unlikely(ma_dead_node(node))) 4462 return 1; 4463 4464 if (likely(offset)) 4465 mas->min = pivots[offset - 1] + 1; 4466 mas->max = max; 4467 mas->offset = mas_data_end(mas); 4468 if (unlikely(mte_dead_node(mas->node))) 4469 return 1; 4470 4471 mas->end = mas->offset; 4472 return 0; 4473 4474 no_entry: 4475 if (unlikely(ma_dead_node(node))) 4476 return 1; 4477 4478 mas->status = ma_underflow; 4479 return 0; 4480 } 4481 4482 /* 4483 * mas_prev_slot() - Get the entry in the previous slot 4484 * 4485 * @mas: The maple state 4486 * @min: The minimum starting range 4487 * @empty: Can be empty 4488 * 4489 * Return: The entry in the previous slot which is possibly NULL 4490 */ 4491 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) 4492 { 4493 void *entry; 4494 void __rcu **slots; 4495 unsigned long pivot; 4496 enum maple_type type; 4497 unsigned long *pivots; 4498 struct maple_node *node; 4499 unsigned long save_point = mas->index; 4500 4501 retry: 4502 node = mas_mn(mas); 4503 type = mte_node_type(mas->node); 4504 pivots = ma_pivots(node, type); 4505 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4506 goto retry; 4507 4508 if (mas->min <= min) { 4509 pivot = mas_safe_min(mas, pivots, mas->offset); 4510 4511 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4512 goto retry; 4513 4514 if (pivot <= min) 4515 goto underflow; 4516 } 4517 4518 again: 4519 if (likely(mas->offset)) { 4520 mas->offset--; 4521 mas->last = mas->index - 1; 4522 mas->index = mas_safe_min(mas, pivots, mas->offset); 4523 } else { 4524 if (mas->index <= min) 4525 goto underflow; 4526 4527 if (mas_prev_node(mas, min)) { 4528 mas_rewalk(mas, save_point); 4529 goto retry; 4530 } 4531 4532 if (WARN_ON_ONCE(mas_is_underflow(mas))) 4533 return NULL; 4534 4535 mas->last = mas->max; 4536 node = mas_mn(mas); 4537 type = mte_node_type(mas->node); 4538 pivots = ma_pivots(node, type); 4539 mas->index = pivots[mas->offset - 1] + 1; 4540 } 4541 4542 slots = ma_slots(node, type); 4543 entry = mas_slot(mas, slots, mas->offset); 4544 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4545 goto retry; 4546 4547 4548 if (likely(entry)) 4549 return entry; 4550 4551 if (!empty) { 4552 if (mas->index <= min) { 4553 mas->status = ma_underflow; 4554 return NULL; 4555 } 4556 4557 goto again; 4558 } 4559 4560 return entry; 4561 4562 underflow: 4563 mas->status = ma_underflow; 4564 return NULL; 4565 } 4566 4567 /* 4568 * mas_next_node() - Get the next node at the same level in the tree. 4569 * @mas: The maple state 4570 * @node: The maple node 4571 * @max: The maximum pivot value to check. 4572 * 4573 * The next value will be mas->node[mas->offset] or the status will have 4574 * overflowed. 4575 * Return: 1 on dead node, 0 otherwise. 4576 */ 4577 static int mas_next_node(struct ma_state *mas, struct maple_node *node, 4578 unsigned long max) 4579 { 4580 unsigned long min; 4581 unsigned long *pivots; 4582 struct maple_enode *enode; 4583 struct maple_node *tmp; 4584 int level = 0; 4585 unsigned char node_end; 4586 enum maple_type mt; 4587 void __rcu **slots; 4588 4589 if (mas->max >= max) 4590 goto overflow; 4591 4592 min = mas->max + 1; 4593 level = 0; 4594 do { 4595 if (ma_is_root(node)) 4596 goto overflow; 4597 4598 /* Walk up. */ 4599 if (unlikely(mas_ascend(mas))) 4600 return 1; 4601 4602 level++; 4603 node = mas_mn(mas); 4604 mt = mte_node_type(mas->node); 4605 pivots = ma_pivots(node, mt); 4606 node_end = ma_data_end(node, mt, pivots, mas->max); 4607 if (unlikely(ma_dead_node(node))) 4608 return 1; 4609 4610 } while (unlikely(mas->offset == node_end)); 4611 4612 slots = ma_slots(node, mt); 4613 mas->offset++; 4614 enode = mas_slot(mas, slots, mas->offset); 4615 if (unlikely(ma_dead_node(node))) 4616 return 1; 4617 4618 if (level > 1) 4619 mas->offset = 0; 4620 4621 while (unlikely(level > 1)) { 4622 level--; 4623 mas->node = enode; 4624 node = mas_mn(mas); 4625 mt = mte_node_type(mas->node); 4626 slots = ma_slots(node, mt); 4627 enode = mas_slot(mas, slots, 0); 4628 if (unlikely(ma_dead_node(node))) 4629 return 1; 4630 } 4631 4632 if (!mas->offset) 4633 pivots = ma_pivots(node, mt); 4634 4635 mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt); 4636 tmp = mte_to_node(enode); 4637 mt = mte_node_type(enode); 4638 pivots = ma_pivots(tmp, mt); 4639 mas->end = ma_data_end(tmp, mt, pivots, mas->max); 4640 if (unlikely(ma_dead_node(node))) 4641 return 1; 4642 4643 mas->node = enode; 4644 mas->min = min; 4645 return 0; 4646 4647 overflow: 4648 if (unlikely(ma_dead_node(node))) 4649 return 1; 4650 4651 mas->status = ma_overflow; 4652 return 0; 4653 } 4654 4655 /* 4656 * mas_next_slot() - Get the entry in the next slot 4657 * 4658 * @mas: The maple state 4659 * @max: The maximum starting range 4660 * @empty: Can be empty 4661 * 4662 * Return: The entry in the next slot which is possibly NULL 4663 */ 4664 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) 4665 { 4666 void __rcu **slots; 4667 unsigned long *pivots; 4668 unsigned long pivot; 4669 enum maple_type type; 4670 struct maple_node *node; 4671 unsigned long save_point = mas->last; 4672 void *entry; 4673 4674 retry: 4675 node = mas_mn(mas); 4676 type = mte_node_type(mas->node); 4677 pivots = ma_pivots(node, type); 4678 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4679 goto retry; 4680 4681 if (mas->max >= max) { 4682 if (likely(mas->offset < mas->end)) 4683 pivot = pivots[mas->offset]; 4684 else 4685 pivot = mas->max; 4686 4687 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4688 goto retry; 4689 4690 if (pivot >= max) { /* Was at the limit, next will extend beyond */ 4691 mas->status = ma_overflow; 4692 return NULL; 4693 } 4694 } 4695 4696 if (likely(mas->offset < mas->end)) { 4697 mas->index = pivots[mas->offset] + 1; 4698 again: 4699 mas->offset++; 4700 if (likely(mas->offset < mas->end)) 4701 mas->last = pivots[mas->offset]; 4702 else 4703 mas->last = mas->max; 4704 } else { 4705 if (mas->last >= max) { 4706 mas->status = ma_overflow; 4707 return NULL; 4708 } 4709 4710 if (mas_next_node(mas, node, max)) { 4711 mas_rewalk(mas, save_point); 4712 goto retry; 4713 } 4714 4715 if (WARN_ON_ONCE(mas_is_overflow(mas))) 4716 return NULL; 4717 4718 mas->offset = 0; 4719 mas->index = mas->min; 4720 node = mas_mn(mas); 4721 type = mte_node_type(mas->node); 4722 pivots = ma_pivots(node, type); 4723 mas->last = pivots[0]; 4724 } 4725 4726 slots = ma_slots(node, type); 4727 entry = mt_slot(mas->tree, slots, mas->offset); 4728 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4729 goto retry; 4730 4731 if (entry) 4732 return entry; 4733 4734 4735 if (!empty) { 4736 if (mas->last >= max) { 4737 mas->status = ma_overflow; 4738 return NULL; 4739 } 4740 4741 mas->index = mas->last + 1; 4742 goto again; 4743 } 4744 4745 return entry; 4746 } 4747 4748 /* 4749 * mas_next_entry() - Internal function to get the next entry. 4750 * @mas: The maple state 4751 * @limit: The maximum range start. 4752 * 4753 * Set the @mas->node to the next entry and the range_start to 4754 * the beginning value for the entry. Does not check beyond @limit. 4755 * Sets @mas->index and @mas->last to the range, Does not update @mas->index and 4756 * @mas->last on overflow. 4757 * Restarts on dead nodes. 4758 * 4759 * Return: the next entry or %NULL. 4760 */ 4761 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4762 { 4763 if (mas->last >= limit) { 4764 mas->status = ma_overflow; 4765 return NULL; 4766 } 4767 4768 return mas_next_slot(mas, limit, false); 4769 } 4770 4771 /* 4772 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the 4773 * highest gap address of a given size in a given node and descend. 4774 * @mas: The maple state 4775 * @size: The needed size. 4776 * 4777 * Return: True if found in a leaf, false otherwise. 4778 * 4779 */ 4780 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, 4781 unsigned long *gap_min, unsigned long *gap_max) 4782 { 4783 enum maple_type type = mte_node_type(mas->node); 4784 struct maple_node *node = mas_mn(mas); 4785 unsigned long *pivots, *gaps; 4786 void __rcu **slots; 4787 unsigned long gap = 0; 4788 unsigned long max, min; 4789 unsigned char offset; 4790 4791 if (unlikely(mas_is_err(mas))) 4792 return true; 4793 4794 if (ma_is_dense(type)) { 4795 /* dense nodes. */ 4796 mas->offset = (unsigned char)(mas->index - mas->min); 4797 return true; 4798 } 4799 4800 pivots = ma_pivots(node, type); 4801 slots = ma_slots(node, type); 4802 gaps = ma_gaps(node, type); 4803 offset = mas->offset; 4804 min = mas_safe_min(mas, pivots, offset); 4805 /* Skip out of bounds. */ 4806 while (mas->last < min) 4807 min = mas_safe_min(mas, pivots, --offset); 4808 4809 max = mas_safe_pivot(mas, pivots, offset, type); 4810 while (mas->index <= max) { 4811 gap = 0; 4812 if (gaps) 4813 gap = gaps[offset]; 4814 else if (!mas_slot(mas, slots, offset)) 4815 gap = max - min + 1; 4816 4817 if (gap) { 4818 if ((size <= gap) && (size <= mas->last - min + 1)) 4819 break; 4820 4821 if (!gaps) { 4822 /* Skip the next slot, it cannot be a gap. */ 4823 if (offset < 2) 4824 goto ascend; 4825 4826 offset -= 2; 4827 max = pivots[offset]; 4828 min = mas_safe_min(mas, pivots, offset); 4829 continue; 4830 } 4831 } 4832 4833 if (!offset) 4834 goto ascend; 4835 4836 offset--; 4837 max = min - 1; 4838 min = mas_safe_min(mas, pivots, offset); 4839 } 4840 4841 if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) 4842 goto no_space; 4843 4844 if (unlikely(ma_is_leaf(type))) { 4845 mas->offset = offset; 4846 *gap_min = min; 4847 *gap_max = min + gap - 1; 4848 return true; 4849 } 4850 4851 /* descend, only happens under lock. */ 4852 mas->node = mas_slot(mas, slots, offset); 4853 mas->min = min; 4854 mas->max = max; 4855 mas->offset = mas_data_end(mas); 4856 return false; 4857 4858 ascend: 4859 if (!mte_is_root(mas->node)) 4860 return false; 4861 4862 no_space: 4863 mas_set_err(mas, -EBUSY); 4864 return false; 4865 } 4866 4867 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) 4868 { 4869 enum maple_type type = mte_node_type(mas->node); 4870 unsigned long pivot, min, gap = 0; 4871 unsigned char offset, data_end; 4872 unsigned long *gaps, *pivots; 4873 void __rcu **slots; 4874 struct maple_node *node; 4875 bool found = false; 4876 4877 if (ma_is_dense(type)) { 4878 mas->offset = (unsigned char)(mas->index - mas->min); 4879 return true; 4880 } 4881 4882 node = mas_mn(mas); 4883 pivots = ma_pivots(node, type); 4884 slots = ma_slots(node, type); 4885 gaps = ma_gaps(node, type); 4886 offset = mas->offset; 4887 min = mas_safe_min(mas, pivots, offset); 4888 data_end = ma_data_end(node, type, pivots, mas->max); 4889 for (; offset <= data_end; offset++) { 4890 pivot = mas_safe_pivot(mas, pivots, offset, type); 4891 4892 /* Not within lower bounds */ 4893 if (mas->index > pivot) 4894 goto next_slot; 4895 4896 if (gaps) 4897 gap = gaps[offset]; 4898 else if (!mas_slot(mas, slots, offset)) 4899 gap = min(pivot, mas->last) - max(mas->index, min) + 1; 4900 else 4901 goto next_slot; 4902 4903 if (gap >= size) { 4904 if (ma_is_leaf(type)) { 4905 found = true; 4906 goto done; 4907 } 4908 if (mas->index <= pivot) { 4909 mas->node = mas_slot(mas, slots, offset); 4910 mas->min = min; 4911 mas->max = pivot; 4912 offset = 0; 4913 break; 4914 } 4915 } 4916 next_slot: 4917 min = pivot + 1; 4918 if (mas->last <= pivot) { 4919 mas_set_err(mas, -EBUSY); 4920 return true; 4921 } 4922 } 4923 4924 if (mte_is_root(mas->node)) 4925 found = true; 4926 done: 4927 mas->offset = offset; 4928 return found; 4929 } 4930 4931 /** 4932 * mas_walk() - Search for @mas->index in the tree. 4933 * @mas: The maple state. 4934 * 4935 * mas->index and mas->last will be set to the range if there is a value. If 4936 * mas->status is ma_none, reset to ma_start 4937 * 4938 * Return: the entry at the location or %NULL. 4939 */ 4940 void *mas_walk(struct ma_state *mas) 4941 { 4942 void *entry; 4943 4944 if (!mas_is_active(mas) || !mas_is_start(mas)) 4945 mas->status = ma_start; 4946 retry: 4947 entry = mas_state_walk(mas); 4948 if (mas_is_start(mas)) { 4949 goto retry; 4950 } else if (mas_is_none(mas)) { 4951 mas->index = 0; 4952 mas->last = ULONG_MAX; 4953 } else if (mas_is_ptr(mas)) { 4954 if (!mas->index) { 4955 mas->last = 0; 4956 return entry; 4957 } 4958 4959 mas->index = 1; 4960 mas->last = ULONG_MAX; 4961 mas->status = ma_none; 4962 return NULL; 4963 } 4964 4965 return entry; 4966 } 4967 EXPORT_SYMBOL_GPL(mas_walk); 4968 4969 static inline bool mas_rewind_node(struct ma_state *mas) 4970 { 4971 unsigned char slot; 4972 4973 do { 4974 if (mte_is_root(mas->node)) { 4975 slot = mas->offset; 4976 if (!slot) 4977 return false; 4978 } else { 4979 mas_ascend(mas); 4980 slot = mas->offset; 4981 } 4982 } while (!slot); 4983 4984 mas->offset = --slot; 4985 return true; 4986 } 4987 4988 /* 4989 * mas_skip_node() - Internal function. Skip over a node. 4990 * @mas: The maple state. 4991 * 4992 * Return: true if there is another node, false otherwise. 4993 */ 4994 static inline bool mas_skip_node(struct ma_state *mas) 4995 { 4996 if (mas_is_err(mas)) 4997 return false; 4998 4999 do { 5000 if (mte_is_root(mas->node)) { 5001 if (mas->offset >= mas_data_end(mas)) { 5002 mas_set_err(mas, -EBUSY); 5003 return false; 5004 } 5005 } else { 5006 mas_ascend(mas); 5007 } 5008 } while (mas->offset >= mas_data_end(mas)); 5009 5010 mas->offset++; 5011 return true; 5012 } 5013 5014 /* 5015 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of 5016 * @size 5017 * @mas: The maple state 5018 * @size: The size of the gap required 5019 * 5020 * Search between @mas->index and @mas->last for a gap of @size. 5021 */ 5022 static inline void mas_awalk(struct ma_state *mas, unsigned long size) 5023 { 5024 struct maple_enode *last = NULL; 5025 5026 /* 5027 * There are 4 options: 5028 * go to child (descend) 5029 * go back to parent (ascend) 5030 * no gap found. (return, slot == MAPLE_NODE_SLOTS) 5031 * found the gap. (return, slot != MAPLE_NODE_SLOTS) 5032 */ 5033 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { 5034 if (last == mas->node) 5035 mas_skip_node(mas); 5036 else 5037 last = mas->node; 5038 } 5039 } 5040 5041 /* 5042 * mas_sparse_area() - Internal function. Return upper or lower limit when 5043 * searching for a gap in an empty tree. 5044 * @mas: The maple state 5045 * @min: the minimum range 5046 * @max: The maximum range 5047 * @size: The size of the gap 5048 * @fwd: Searching forward or back 5049 */ 5050 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, 5051 unsigned long max, unsigned long size, bool fwd) 5052 { 5053 if (!unlikely(mas_is_none(mas)) && min == 0) { 5054 min++; 5055 /* 5056 * At this time, min is increased, we need to recheck whether 5057 * the size is satisfied. 5058 */ 5059 if (min > max || max - min + 1 < size) 5060 return -EBUSY; 5061 } 5062 /* mas_is_ptr */ 5063 5064 if (fwd) { 5065 mas->index = min; 5066 mas->last = min + size - 1; 5067 } else { 5068 mas->last = max; 5069 mas->index = max - size + 1; 5070 } 5071 return 0; 5072 } 5073 5074 /* 5075 * mas_empty_area() - Get the lowest address within the range that is 5076 * sufficient for the size requested. 5077 * @mas: The maple state 5078 * @min: The lowest value of the range 5079 * @max: The highest value of the range 5080 * @size: The size needed 5081 */ 5082 int mas_empty_area(struct ma_state *mas, unsigned long min, 5083 unsigned long max, unsigned long size) 5084 { 5085 unsigned char offset; 5086 unsigned long *pivots; 5087 enum maple_type mt; 5088 struct maple_node *node; 5089 5090 if (min > max) 5091 return -EINVAL; 5092 5093 if (size == 0 || max - min < size - 1) 5094 return -EINVAL; 5095 5096 if (mas_is_start(mas)) 5097 mas_start(mas); 5098 else if (mas->offset >= 2) 5099 mas->offset -= 2; 5100 else if (!mas_skip_node(mas)) 5101 return -EBUSY; 5102 5103 /* Empty set */ 5104 if (mas_is_none(mas) || mas_is_ptr(mas)) 5105 return mas_sparse_area(mas, min, max, size, true); 5106 5107 /* The start of the window can only be within these values */ 5108 mas->index = min; 5109 mas->last = max; 5110 mas_awalk(mas, size); 5111 5112 if (unlikely(mas_is_err(mas))) 5113 return xa_err(mas->node); 5114 5115 offset = mas->offset; 5116 if (unlikely(offset == MAPLE_NODE_SLOTS)) 5117 return -EBUSY; 5118 5119 node = mas_mn(mas); 5120 mt = mte_node_type(mas->node); 5121 pivots = ma_pivots(node, mt); 5122 min = mas_safe_min(mas, pivots, offset); 5123 if (mas->index < min) 5124 mas->index = min; 5125 mas->last = mas->index + size - 1; 5126 mas->end = ma_data_end(node, mt, pivots, mas->max); 5127 return 0; 5128 } 5129 EXPORT_SYMBOL_GPL(mas_empty_area); 5130 5131 /* 5132 * mas_empty_area_rev() - Get the highest address within the range that is 5133 * sufficient for the size requested. 5134 * @mas: The maple state 5135 * @min: The lowest value of the range 5136 * @max: The highest value of the range 5137 * @size: The size needed 5138 */ 5139 int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 5140 unsigned long max, unsigned long size) 5141 { 5142 struct maple_enode *last = mas->node; 5143 5144 if (min > max) 5145 return -EINVAL; 5146 5147 if (size == 0 || max - min < size - 1) 5148 return -EINVAL; 5149 5150 if (mas_is_start(mas)) 5151 mas_start(mas); 5152 else if ((mas->offset < 2) && (!mas_rewind_node(mas))) 5153 return -EBUSY; 5154 5155 if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) 5156 return mas_sparse_area(mas, min, max, size, false); 5157 else if (mas->offset >= 2) 5158 mas->offset -= 2; 5159 else 5160 mas->offset = mas_data_end(mas); 5161 5162 5163 /* The start of the window can only be within these values. */ 5164 mas->index = min; 5165 mas->last = max; 5166 5167 while (!mas_rev_awalk(mas, size, &min, &max)) { 5168 if (last == mas->node) { 5169 if (!mas_rewind_node(mas)) 5170 return -EBUSY; 5171 } else { 5172 last = mas->node; 5173 } 5174 } 5175 5176 if (mas_is_err(mas)) 5177 return xa_err(mas->node); 5178 5179 if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) 5180 return -EBUSY; 5181 5182 /* Trim the upper limit to the max. */ 5183 if (max < mas->last) 5184 mas->last = max; 5185 5186 mas->index = mas->last - size + 1; 5187 mas->end = mas_data_end(mas); 5188 return 0; 5189 } 5190 EXPORT_SYMBOL_GPL(mas_empty_area_rev); 5191 5192 /* 5193 * mte_dead_leaves() - Mark all leaves of a node as dead. 5194 * @enode: the encoded node 5195 * @mt: the maple tree 5196 * @slots: Pointer to the slot array 5197 * 5198 * Must hold the write lock. 5199 * 5200 * Return: The number of leaves marked as dead. 5201 */ 5202 static inline 5203 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt, 5204 void __rcu **slots) 5205 { 5206 struct maple_node *node; 5207 enum maple_type type; 5208 void *entry; 5209 int offset; 5210 5211 for (offset = 0; offset < mt_slot_count(enode); offset++) { 5212 entry = mt_slot(mt, slots, offset); 5213 type = mte_node_type(entry); 5214 node = mte_to_node(entry); 5215 /* Use both node and type to catch LE & BE metadata */ 5216 if (!node || !type) 5217 break; 5218 5219 mte_set_node_dead(entry); 5220 node->type = type; 5221 rcu_assign_pointer(slots[offset], node); 5222 } 5223 5224 return offset; 5225 } 5226 5227 /** 5228 * mte_dead_walk() - Walk down a dead tree to just before the leaves 5229 * @enode: The maple encoded node 5230 * @offset: The starting offset 5231 * 5232 * Note: This can only be used from the RCU callback context. 5233 */ 5234 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset) 5235 { 5236 struct maple_node *node, *next; 5237 void __rcu **slots = NULL; 5238 5239 next = mte_to_node(*enode); 5240 do { 5241 *enode = ma_enode_ptr(next); 5242 node = mte_to_node(*enode); 5243 slots = ma_slots(node, node->type); 5244 next = rcu_dereference_protected(slots[offset], 5245 lock_is_held(&rcu_callback_map)); 5246 offset = 0; 5247 } while (!ma_is_leaf(next->type)); 5248 5249 return slots; 5250 } 5251 5252 /** 5253 * mt_free_walk() - Walk & free a tree in the RCU callback context 5254 * @head: The RCU head that's within the node. 5255 * 5256 * Note: This can only be used from the RCU callback context. 5257 */ 5258 static void mt_free_walk(struct rcu_head *head) 5259 { 5260 void __rcu **slots; 5261 struct maple_node *node, *start; 5262 struct maple_enode *enode; 5263 unsigned char offset; 5264 enum maple_type type; 5265 5266 node = container_of(head, struct maple_node, rcu); 5267 5268 if (ma_is_leaf(node->type)) 5269 goto free_leaf; 5270 5271 start = node; 5272 enode = mt_mk_node(node, node->type); 5273 slots = mte_dead_walk(&enode, 0); 5274 node = mte_to_node(enode); 5275 do { 5276 mt_free_bulk(node->slot_len, slots); 5277 offset = node->parent_slot + 1; 5278 enode = node->piv_parent; 5279 if (mte_to_node(enode) == node) 5280 goto free_leaf; 5281 5282 type = mte_node_type(enode); 5283 slots = ma_slots(mte_to_node(enode), type); 5284 if ((offset < mt_slots[type]) && 5285 rcu_dereference_protected(slots[offset], 5286 lock_is_held(&rcu_callback_map))) 5287 slots = mte_dead_walk(&enode, offset); 5288 node = mte_to_node(enode); 5289 } while ((node != start) || (node->slot_len < offset)); 5290 5291 slots = ma_slots(node, node->type); 5292 mt_free_bulk(node->slot_len, slots); 5293 5294 free_leaf: 5295 mt_free_rcu(&node->rcu); 5296 } 5297 5298 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode, 5299 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset) 5300 { 5301 struct maple_node *node; 5302 struct maple_enode *next = *enode; 5303 void __rcu **slots = NULL; 5304 enum maple_type type; 5305 unsigned char next_offset = 0; 5306 5307 do { 5308 *enode = next; 5309 node = mte_to_node(*enode); 5310 type = mte_node_type(*enode); 5311 slots = ma_slots(node, type); 5312 next = mt_slot_locked(mt, slots, next_offset); 5313 if ((mte_dead_node(next))) 5314 next = mt_slot_locked(mt, slots, ++next_offset); 5315 5316 mte_set_node_dead(*enode); 5317 node->type = type; 5318 node->piv_parent = prev; 5319 node->parent_slot = offset; 5320 offset = next_offset; 5321 next_offset = 0; 5322 prev = *enode; 5323 } while (!mte_is_leaf(next)); 5324 5325 return slots; 5326 } 5327 5328 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, 5329 bool free) 5330 { 5331 void __rcu **slots; 5332 struct maple_node *node = mte_to_node(enode); 5333 struct maple_enode *start; 5334 5335 if (mte_is_leaf(enode)) { 5336 node->type = mte_node_type(enode); 5337 goto free_leaf; 5338 } 5339 5340 start = enode; 5341 slots = mte_destroy_descend(&enode, mt, start, 0); 5342 node = mte_to_node(enode); // Updated in the above call. 5343 do { 5344 enum maple_type type; 5345 unsigned char offset; 5346 struct maple_enode *parent, *tmp; 5347 5348 node->slot_len = mte_dead_leaves(enode, mt, slots); 5349 if (free) 5350 mt_free_bulk(node->slot_len, slots); 5351 offset = node->parent_slot + 1; 5352 enode = node->piv_parent; 5353 if (mte_to_node(enode) == node) 5354 goto free_leaf; 5355 5356 type = mte_node_type(enode); 5357 slots = ma_slots(mte_to_node(enode), type); 5358 if (offset >= mt_slots[type]) 5359 goto next; 5360 5361 tmp = mt_slot_locked(mt, slots, offset); 5362 if (mte_node_type(tmp) && mte_to_node(tmp)) { 5363 parent = enode; 5364 enode = tmp; 5365 slots = mte_destroy_descend(&enode, mt, parent, offset); 5366 } 5367 next: 5368 node = mte_to_node(enode); 5369 } while (start != enode); 5370 5371 node = mte_to_node(enode); 5372 node->slot_len = mte_dead_leaves(enode, mt, slots); 5373 if (free) 5374 mt_free_bulk(node->slot_len, slots); 5375 5376 free_leaf: 5377 if (free) 5378 mt_free_rcu(&node->rcu); 5379 else 5380 mt_clear_meta(mt, node, node->type); 5381 } 5382 5383 /* 5384 * mte_destroy_walk() - Free a tree or sub-tree. 5385 * @enode: the encoded maple node (maple_enode) to start 5386 * @mt: the tree to free - needed for node types. 5387 * 5388 * Must hold the write lock. 5389 */ 5390 static inline void mte_destroy_walk(struct maple_enode *enode, 5391 struct maple_tree *mt) 5392 { 5393 struct maple_node *node = mte_to_node(enode); 5394 5395 if (mt_in_rcu(mt)) { 5396 mt_destroy_walk(enode, mt, false); 5397 call_rcu(&node->rcu, mt_free_walk); 5398 } else { 5399 mt_destroy_walk(enode, mt, true); 5400 } 5401 } 5402 /* Interface */ 5403 5404 /** 5405 * mas_store() - Store an @entry. 5406 * @mas: The maple state. 5407 * @entry: The entry to store. 5408 * 5409 * The @mas->index and @mas->last is used to set the range for the @entry. 5410 * 5411 * Return: the first entry between mas->index and mas->last or %NULL. 5412 */ 5413 void *mas_store(struct ma_state *mas, void *entry) 5414 { 5415 int request; 5416 MA_WR_STATE(wr_mas, mas, entry); 5417 5418 trace_ma_write(__func__, mas, 0, entry); 5419 #ifdef CONFIG_DEBUG_MAPLE_TREE 5420 if (MAS_WARN_ON(mas, mas->index > mas->last)) 5421 pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last, 5422 entry); 5423 5424 if (mas->index > mas->last) { 5425 mas_set_err(mas, -EINVAL); 5426 return NULL; 5427 } 5428 5429 #endif 5430 5431 /* 5432 * Storing is the same operation as insert with the added caveat that it 5433 * can overwrite entries. Although this seems simple enough, one may 5434 * want to examine what happens if a single store operation was to 5435 * overwrite multiple entries within a self-balancing B-Tree. 5436 */ 5437 mas_wr_prealloc_setup(&wr_mas); 5438 mas->store_type = mas_wr_store_type(&wr_mas); 5439 if (mas->mas_flags & MA_STATE_PREALLOC) { 5440 mas_wr_store_entry(&wr_mas); 5441 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); 5442 return wr_mas.content; 5443 } 5444 5445 request = mas_prealloc_calc(mas, entry); 5446 if (!request) 5447 goto store; 5448 5449 mas_node_count(mas, request); 5450 if (mas_is_err(mas)) 5451 return NULL; 5452 5453 store: 5454 mas_wr_store_entry(&wr_mas); 5455 mas_destroy(mas); 5456 return wr_mas.content; 5457 } 5458 EXPORT_SYMBOL_GPL(mas_store); 5459 5460 /** 5461 * mas_store_gfp() - Store a value into the tree. 5462 * @mas: The maple state 5463 * @entry: The entry to store 5464 * @gfp: The GFP_FLAGS to use for allocations if necessary. 5465 * 5466 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 5467 * be allocated. 5468 */ 5469 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) 5470 { 5471 unsigned long index = mas->index; 5472 unsigned long last = mas->last; 5473 MA_WR_STATE(wr_mas, mas, entry); 5474 int ret = 0; 5475 5476 retry: 5477 mas_wr_preallocate(&wr_mas, entry); 5478 if (unlikely(mas_nomem(mas, gfp))) { 5479 if (!entry) 5480 __mas_set_range(mas, index, last); 5481 goto retry; 5482 } 5483 5484 if (mas_is_err(mas)) { 5485 ret = xa_err(mas->node); 5486 goto out; 5487 } 5488 5489 mas_wr_store_entry(&wr_mas); 5490 out: 5491 mas_destroy(mas); 5492 return ret; 5493 } 5494 EXPORT_SYMBOL_GPL(mas_store_gfp); 5495 5496 /** 5497 * mas_store_prealloc() - Store a value into the tree using memory 5498 * preallocated in the maple state. 5499 * @mas: The maple state 5500 * @entry: The entry to store. 5501 */ 5502 void mas_store_prealloc(struct ma_state *mas, void *entry) 5503 { 5504 MA_WR_STATE(wr_mas, mas, entry); 5505 5506 if (mas->store_type == wr_store_root) { 5507 mas_wr_prealloc_setup(&wr_mas); 5508 goto store; 5509 } 5510 5511 mas_wr_walk_descend(&wr_mas); 5512 if (mas->store_type != wr_spanning_store) { 5513 /* set wr_mas->content to current slot */ 5514 wr_mas.content = mas_slot_locked(mas, wr_mas.slots, mas->offset); 5515 mas_wr_end_piv(&wr_mas); 5516 } 5517 5518 store: 5519 trace_ma_write(__func__, mas, 0, entry); 5520 mas_wr_store_entry(&wr_mas); 5521 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); 5522 mas_destroy(mas); 5523 } 5524 EXPORT_SYMBOL_GPL(mas_store_prealloc); 5525 5526 /** 5527 * mas_preallocate() - Preallocate enough nodes for a store operation 5528 * @mas: The maple state 5529 * @entry: The entry that will be stored 5530 * @gfp: The GFP_FLAGS to use for allocations. 5531 * 5532 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5533 */ 5534 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) 5535 { 5536 MA_WR_STATE(wr_mas, mas, entry); 5537 int ret = 0; 5538 int request; 5539 5540 mas_wr_prealloc_setup(&wr_mas); 5541 mas->store_type = mas_wr_store_type(&wr_mas); 5542 request = mas_prealloc_calc(mas, entry); 5543 if (!request) 5544 return ret; 5545 5546 mas_node_count_gfp(mas, request, gfp); 5547 if (mas_is_err(mas)) { 5548 mas_set_alloc_req(mas, 0); 5549 ret = xa_err(mas->node); 5550 mas_destroy(mas); 5551 mas_reset(mas); 5552 return ret; 5553 } 5554 5555 mas->mas_flags |= MA_STATE_PREALLOC; 5556 return ret; 5557 } 5558 EXPORT_SYMBOL_GPL(mas_preallocate); 5559 5560 /* 5561 * mas_destroy() - destroy a maple state. 5562 * @mas: The maple state 5563 * 5564 * Upon completion, check the left-most node and rebalance against the node to 5565 * the right if necessary. Frees any allocated nodes associated with this maple 5566 * state. 5567 */ 5568 void mas_destroy(struct ma_state *mas) 5569 { 5570 struct maple_alloc *node; 5571 unsigned long total; 5572 5573 /* 5574 * When using mas_for_each() to insert an expected number of elements, 5575 * it is possible that the number inserted is less than the expected 5576 * number. To fix an invalid final node, a check is performed here to 5577 * rebalance the previous node with the final node. 5578 */ 5579 if (mas->mas_flags & MA_STATE_REBALANCE) { 5580 unsigned char end; 5581 if (mas_is_err(mas)) 5582 mas_reset(mas); 5583 mas_start(mas); 5584 mtree_range_walk(mas); 5585 end = mas->end + 1; 5586 if (end < mt_min_slot_count(mas->node) - 1) 5587 mas_destroy_rebalance(mas, end); 5588 5589 mas->mas_flags &= ~MA_STATE_REBALANCE; 5590 } 5591 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); 5592 5593 total = mas_allocated(mas); 5594 while (total) { 5595 node = mas->alloc; 5596 mas->alloc = node->slot[0]; 5597 if (node->node_count > 1) { 5598 size_t count = node->node_count - 1; 5599 5600 mt_free_bulk(count, (void __rcu **)&node->slot[1]); 5601 total -= count; 5602 } 5603 mt_free_one(ma_mnode_ptr(node)); 5604 total--; 5605 } 5606 5607 mas->alloc = NULL; 5608 } 5609 EXPORT_SYMBOL_GPL(mas_destroy); 5610 5611 /* 5612 * mas_expected_entries() - Set the expected number of entries that will be inserted. 5613 * @mas: The maple state 5614 * @nr_entries: The number of expected entries. 5615 * 5616 * This will attempt to pre-allocate enough nodes to store the expected number 5617 * of entries. The allocations will occur using the bulk allocator interface 5618 * for speed. Please call mas_destroy() on the @mas after inserting the entries 5619 * to ensure any unused nodes are freed. 5620 * 5621 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5622 */ 5623 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) 5624 { 5625 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; 5626 struct maple_enode *enode = mas->node; 5627 int nr_nodes; 5628 int ret; 5629 5630 /* 5631 * Sometimes it is necessary to duplicate a tree to a new tree, such as 5632 * forking a process and duplicating the VMAs from one tree to a new 5633 * tree. When such a situation arises, it is known that the new tree is 5634 * not going to be used until the entire tree is populated. For 5635 * performance reasons, it is best to use a bulk load with RCU disabled. 5636 * This allows for optimistic splitting that favours the left and reuse 5637 * of nodes during the operation. 5638 */ 5639 5640 /* Optimize splitting for bulk insert in-order */ 5641 mas->mas_flags |= MA_STATE_BULK; 5642 5643 /* 5644 * Avoid overflow, assume a gap between each entry and a trailing null. 5645 * If this is wrong, it just means allocation can happen during 5646 * insertion of entries. 5647 */ 5648 nr_nodes = max(nr_entries, nr_entries * 2 + 1); 5649 if (!mt_is_alloc(mas->tree)) 5650 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; 5651 5652 /* Leaves; reduce slots to keep space for expansion */ 5653 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); 5654 /* Internal nodes */ 5655 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); 5656 /* Add working room for split (2 nodes) + new parents */ 5657 mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL); 5658 5659 /* Detect if allocations run out */ 5660 mas->mas_flags |= MA_STATE_PREALLOC; 5661 5662 if (!mas_is_err(mas)) 5663 return 0; 5664 5665 ret = xa_err(mas->node); 5666 mas->node = enode; 5667 mas_destroy(mas); 5668 return ret; 5669 5670 } 5671 EXPORT_SYMBOL_GPL(mas_expected_entries); 5672 5673 static bool mas_next_setup(struct ma_state *mas, unsigned long max, 5674 void **entry) 5675 { 5676 bool was_none = mas_is_none(mas); 5677 5678 if (unlikely(mas->last >= max)) { 5679 mas->status = ma_overflow; 5680 return true; 5681 } 5682 5683 switch (mas->status) { 5684 case ma_active: 5685 return false; 5686 case ma_none: 5687 fallthrough; 5688 case ma_pause: 5689 mas->status = ma_start; 5690 fallthrough; 5691 case ma_start: 5692 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ 5693 break; 5694 case ma_overflow: 5695 /* Overflowed before, but the max changed */ 5696 mas->status = ma_active; 5697 break; 5698 case ma_underflow: 5699 /* The user expects the mas to be one before where it is */ 5700 mas->status = ma_active; 5701 *entry = mas_walk(mas); 5702 if (*entry) 5703 return true; 5704 break; 5705 case ma_root: 5706 break; 5707 case ma_error: 5708 return true; 5709 } 5710 5711 if (likely(mas_is_active(mas))) /* Fast path */ 5712 return false; 5713 5714 if (mas_is_ptr(mas)) { 5715 *entry = NULL; 5716 if (was_none && mas->index == 0) { 5717 mas->index = mas->last = 0; 5718 return true; 5719 } 5720 mas->index = 1; 5721 mas->last = ULONG_MAX; 5722 mas->status = ma_none; 5723 return true; 5724 } 5725 5726 if (mas_is_none(mas)) 5727 return true; 5728 5729 return false; 5730 } 5731 5732 /** 5733 * mas_next() - Get the next entry. 5734 * @mas: The maple state 5735 * @max: The maximum index to check. 5736 * 5737 * Returns the next entry after @mas->index. 5738 * Must hold rcu_read_lock or the write lock. 5739 * Can return the zero entry. 5740 * 5741 * Return: The next entry or %NULL 5742 */ 5743 void *mas_next(struct ma_state *mas, unsigned long max) 5744 { 5745 void *entry = NULL; 5746 5747 if (mas_next_setup(mas, max, &entry)) 5748 return entry; 5749 5750 /* Retries on dead nodes handled by mas_next_slot */ 5751 return mas_next_slot(mas, max, false); 5752 } 5753 EXPORT_SYMBOL_GPL(mas_next); 5754 5755 /** 5756 * mas_next_range() - Advance the maple state to the next range 5757 * @mas: The maple state 5758 * @max: The maximum index to check. 5759 * 5760 * Sets @mas->index and @mas->last to the range. 5761 * Must hold rcu_read_lock or the write lock. 5762 * Can return the zero entry. 5763 * 5764 * Return: The next entry or %NULL 5765 */ 5766 void *mas_next_range(struct ma_state *mas, unsigned long max) 5767 { 5768 void *entry = NULL; 5769 5770 if (mas_next_setup(mas, max, &entry)) 5771 return entry; 5772 5773 /* Retries on dead nodes handled by mas_next_slot */ 5774 return mas_next_slot(mas, max, true); 5775 } 5776 EXPORT_SYMBOL_GPL(mas_next_range); 5777 5778 /** 5779 * mt_next() - get the next value in the maple tree 5780 * @mt: The maple tree 5781 * @index: The start index 5782 * @max: The maximum index to check 5783 * 5784 * Takes RCU read lock internally to protect the search, which does not 5785 * protect the returned pointer after dropping RCU read lock. 5786 * See also: Documentation/core-api/maple_tree.rst 5787 * 5788 * Return: The entry higher than @index or %NULL if nothing is found. 5789 */ 5790 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) 5791 { 5792 void *entry = NULL; 5793 MA_STATE(mas, mt, index, index); 5794 5795 rcu_read_lock(); 5796 entry = mas_next(&mas, max); 5797 rcu_read_unlock(); 5798 return entry; 5799 } 5800 EXPORT_SYMBOL_GPL(mt_next); 5801 5802 static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry) 5803 { 5804 if (unlikely(mas->index <= min)) { 5805 mas->status = ma_underflow; 5806 return true; 5807 } 5808 5809 switch (mas->status) { 5810 case ma_active: 5811 return false; 5812 case ma_start: 5813 break; 5814 case ma_none: 5815 fallthrough; 5816 case ma_pause: 5817 mas->status = ma_start; 5818 break; 5819 case ma_underflow: 5820 /* underflowed before but the min changed */ 5821 mas->status = ma_active; 5822 break; 5823 case ma_overflow: 5824 /* User expects mas to be one after where it is */ 5825 mas->status = ma_active; 5826 *entry = mas_walk(mas); 5827 if (*entry) 5828 return true; 5829 break; 5830 case ma_root: 5831 break; 5832 case ma_error: 5833 return true; 5834 } 5835 5836 if (mas_is_start(mas)) 5837 mas_walk(mas); 5838 5839 if (unlikely(mas_is_ptr(mas))) { 5840 if (!mas->index) { 5841 mas->status = ma_none; 5842 return true; 5843 } 5844 mas->index = mas->last = 0; 5845 *entry = mas_root(mas); 5846 return true; 5847 } 5848 5849 if (mas_is_none(mas)) { 5850 if (mas->index) { 5851 /* Walked to out-of-range pointer? */ 5852 mas->index = mas->last = 0; 5853 mas->status = ma_root; 5854 *entry = mas_root(mas); 5855 return true; 5856 } 5857 return true; 5858 } 5859 5860 return false; 5861 } 5862 5863 /** 5864 * mas_prev() - Get the previous entry 5865 * @mas: The maple state 5866 * @min: The minimum value to check. 5867 * 5868 * Must hold rcu_read_lock or the write lock. 5869 * Will reset mas to ma_start if the status is ma_none. Will stop on not 5870 * searchable nodes. 5871 * 5872 * Return: the previous value or %NULL. 5873 */ 5874 void *mas_prev(struct ma_state *mas, unsigned long min) 5875 { 5876 void *entry = NULL; 5877 5878 if (mas_prev_setup(mas, min, &entry)) 5879 return entry; 5880 5881 return mas_prev_slot(mas, min, false); 5882 } 5883 EXPORT_SYMBOL_GPL(mas_prev); 5884 5885 /** 5886 * mas_prev_range() - Advance to the previous range 5887 * @mas: The maple state 5888 * @min: The minimum value to check. 5889 * 5890 * Sets @mas->index and @mas->last to the range. 5891 * Must hold rcu_read_lock or the write lock. 5892 * Will reset mas to ma_start if the node is ma_none. Will stop on not 5893 * searchable nodes. 5894 * 5895 * Return: the previous value or %NULL. 5896 */ 5897 void *mas_prev_range(struct ma_state *mas, unsigned long min) 5898 { 5899 void *entry = NULL; 5900 5901 if (mas_prev_setup(mas, min, &entry)) 5902 return entry; 5903 5904 return mas_prev_slot(mas, min, true); 5905 } 5906 EXPORT_SYMBOL_GPL(mas_prev_range); 5907 5908 /** 5909 * mt_prev() - get the previous value in the maple tree 5910 * @mt: The maple tree 5911 * @index: The start index 5912 * @min: The minimum index to check 5913 * 5914 * Takes RCU read lock internally to protect the search, which does not 5915 * protect the returned pointer after dropping RCU read lock. 5916 * See also: Documentation/core-api/maple_tree.rst 5917 * 5918 * Return: The entry before @index or %NULL if nothing is found. 5919 */ 5920 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) 5921 { 5922 void *entry = NULL; 5923 MA_STATE(mas, mt, index, index); 5924 5925 rcu_read_lock(); 5926 entry = mas_prev(&mas, min); 5927 rcu_read_unlock(); 5928 return entry; 5929 } 5930 EXPORT_SYMBOL_GPL(mt_prev); 5931 5932 /** 5933 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. 5934 * @mas: The maple state to pause 5935 * 5936 * Some users need to pause a walk and drop the lock they're holding in 5937 * order to yield to a higher priority thread or carry out an operation 5938 * on an entry. Those users should call this function before they drop 5939 * the lock. It resets the @mas to be suitable for the next iteration 5940 * of the loop after the user has reacquired the lock. If most entries 5941 * found during a walk require you to call mas_pause(), the mt_for_each() 5942 * iterator may be more appropriate. 5943 * 5944 */ 5945 void mas_pause(struct ma_state *mas) 5946 { 5947 mas->status = ma_pause; 5948 mas->node = NULL; 5949 } 5950 EXPORT_SYMBOL_GPL(mas_pause); 5951 5952 /** 5953 * mas_find_setup() - Internal function to set up mas_find*(). 5954 * @mas: The maple state 5955 * @max: The maximum index 5956 * @entry: Pointer to the entry 5957 * 5958 * Returns: True if entry is the answer, false otherwise. 5959 */ 5960 static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry) 5961 { 5962 switch (mas->status) { 5963 case ma_active: 5964 if (mas->last < max) 5965 return false; 5966 return true; 5967 case ma_start: 5968 break; 5969 case ma_pause: 5970 if (unlikely(mas->last >= max)) 5971 return true; 5972 5973 mas->index = ++mas->last; 5974 mas->status = ma_start; 5975 break; 5976 case ma_none: 5977 if (unlikely(mas->last >= max)) 5978 return true; 5979 5980 mas->index = mas->last; 5981 mas->status = ma_start; 5982 break; 5983 case ma_underflow: 5984 /* mas is pointing at entry before unable to go lower */ 5985 if (unlikely(mas->index >= max)) { 5986 mas->status = ma_overflow; 5987 return true; 5988 } 5989 5990 mas->status = ma_active; 5991 *entry = mas_walk(mas); 5992 if (*entry) 5993 return true; 5994 break; 5995 case ma_overflow: 5996 if (unlikely(mas->last >= max)) 5997 return true; 5998 5999 mas->status = ma_active; 6000 *entry = mas_walk(mas); 6001 if (*entry) 6002 return true; 6003 break; 6004 case ma_root: 6005 break; 6006 case ma_error: 6007 return true; 6008 } 6009 6010 if (mas_is_start(mas)) { 6011 /* First run or continue */ 6012 if (mas->index > max) 6013 return true; 6014 6015 *entry = mas_walk(mas); 6016 if (*entry) 6017 return true; 6018 6019 } 6020 6021 if (unlikely(mas_is_ptr(mas))) 6022 goto ptr_out_of_range; 6023 6024 if (unlikely(mas_is_none(mas))) 6025 return true; 6026 6027 if (mas->index == max) 6028 return true; 6029 6030 return false; 6031 6032 ptr_out_of_range: 6033 mas->status = ma_none; 6034 mas->index = 1; 6035 mas->last = ULONG_MAX; 6036 return true; 6037 } 6038 6039 /** 6040 * mas_find() - On the first call, find the entry at or after mas->index up to 6041 * %max. Otherwise, find the entry after mas->index. 6042 * @mas: The maple state 6043 * @max: The maximum value to check. 6044 * 6045 * Must hold rcu_read_lock or the write lock. 6046 * If an entry exists, last and index are updated accordingly. 6047 * May set @mas->status to ma_overflow. 6048 * 6049 * Return: The entry or %NULL. 6050 */ 6051 void *mas_find(struct ma_state *mas, unsigned long max) 6052 { 6053 void *entry = NULL; 6054 6055 if (mas_find_setup(mas, max, &entry)) 6056 return entry; 6057 6058 /* Retries on dead nodes handled by mas_next_slot */ 6059 entry = mas_next_slot(mas, max, false); 6060 /* Ignore overflow */ 6061 mas->status = ma_active; 6062 return entry; 6063 } 6064 EXPORT_SYMBOL_GPL(mas_find); 6065 6066 /** 6067 * mas_find_range() - On the first call, find the entry at or after 6068 * mas->index up to %max. Otherwise, advance to the next slot mas->index. 6069 * @mas: The maple state 6070 * @max: The maximum value to check. 6071 * 6072 * Must hold rcu_read_lock or the write lock. 6073 * If an entry exists, last and index are updated accordingly. 6074 * May set @mas->status to ma_overflow. 6075 * 6076 * Return: The entry or %NULL. 6077 */ 6078 void *mas_find_range(struct ma_state *mas, unsigned long max) 6079 { 6080 void *entry = NULL; 6081 6082 if (mas_find_setup(mas, max, &entry)) 6083 return entry; 6084 6085 /* Retries on dead nodes handled by mas_next_slot */ 6086 return mas_next_slot(mas, max, true); 6087 } 6088 EXPORT_SYMBOL_GPL(mas_find_range); 6089 6090 /** 6091 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev() 6092 * @mas: The maple state 6093 * @min: The minimum index 6094 * @entry: Pointer to the entry 6095 * 6096 * Returns: True if entry is the answer, false otherwise. 6097 */ 6098 static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, 6099 void **entry) 6100 { 6101 6102 switch (mas->status) { 6103 case ma_active: 6104 goto active; 6105 case ma_start: 6106 break; 6107 case ma_pause: 6108 if (unlikely(mas->index <= min)) { 6109 mas->status = ma_underflow; 6110 return true; 6111 } 6112 mas->last = --mas->index; 6113 mas->status = ma_start; 6114 break; 6115 case ma_none: 6116 if (mas->index <= min) 6117 goto none; 6118 6119 mas->last = mas->index; 6120 mas->status = ma_start; 6121 break; 6122 case ma_overflow: /* user expects the mas to be one after where it is */ 6123 if (unlikely(mas->index <= min)) { 6124 mas->status = ma_underflow; 6125 return true; 6126 } 6127 6128 mas->status = ma_active; 6129 break; 6130 case ma_underflow: /* user expects the mas to be one before where it is */ 6131 if (unlikely(mas->index <= min)) 6132 return true; 6133 6134 mas->status = ma_active; 6135 break; 6136 case ma_root: 6137 break; 6138 case ma_error: 6139 return true; 6140 } 6141 6142 if (mas_is_start(mas)) { 6143 /* First run or continue */ 6144 if (mas->index < min) 6145 return true; 6146 6147 *entry = mas_walk(mas); 6148 if (*entry) 6149 return true; 6150 } 6151 6152 if (unlikely(mas_is_ptr(mas))) 6153 goto none; 6154 6155 if (unlikely(mas_is_none(mas))) { 6156 /* 6157 * Walked to the location, and there was nothing so the previous 6158 * location is 0. 6159 */ 6160 mas->last = mas->index = 0; 6161 mas->status = ma_root; 6162 *entry = mas_root(mas); 6163 return true; 6164 } 6165 6166 active: 6167 if (mas->index < min) 6168 return true; 6169 6170 return false; 6171 6172 none: 6173 mas->status = ma_none; 6174 return true; 6175 } 6176 6177 /** 6178 * mas_find_rev: On the first call, find the first non-null entry at or below 6179 * mas->index down to %min. Otherwise find the first non-null entry below 6180 * mas->index down to %min. 6181 * @mas: The maple state 6182 * @min: The minimum value to check. 6183 * 6184 * Must hold rcu_read_lock or the write lock. 6185 * If an entry exists, last and index are updated accordingly. 6186 * May set @mas->status to ma_underflow. 6187 * 6188 * Return: The entry or %NULL. 6189 */ 6190 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6191 { 6192 void *entry = NULL; 6193 6194 if (mas_find_rev_setup(mas, min, &entry)) 6195 return entry; 6196 6197 /* Retries on dead nodes handled by mas_prev_slot */ 6198 return mas_prev_slot(mas, min, false); 6199 6200 } 6201 EXPORT_SYMBOL_GPL(mas_find_rev); 6202 6203 /** 6204 * mas_find_range_rev: On the first call, find the first non-null entry at or 6205 * below mas->index down to %min. Otherwise advance to the previous slot after 6206 * mas->index down to %min. 6207 * @mas: The maple state 6208 * @min: The minimum value to check. 6209 * 6210 * Must hold rcu_read_lock or the write lock. 6211 * If an entry exists, last and index are updated accordingly. 6212 * May set @mas->status to ma_underflow. 6213 * 6214 * Return: The entry or %NULL. 6215 */ 6216 void *mas_find_range_rev(struct ma_state *mas, unsigned long min) 6217 { 6218 void *entry = NULL; 6219 6220 if (mas_find_rev_setup(mas, min, &entry)) 6221 return entry; 6222 6223 /* Retries on dead nodes handled by mas_prev_slot */ 6224 return mas_prev_slot(mas, min, true); 6225 } 6226 EXPORT_SYMBOL_GPL(mas_find_range_rev); 6227 6228 /** 6229 * mas_erase() - Find the range in which index resides and erase the entire 6230 * range. 6231 * @mas: The maple state 6232 * 6233 * Must hold the write lock. 6234 * Searches for @mas->index, sets @mas->index and @mas->last to the range and 6235 * erases that range. 6236 * 6237 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. 6238 */ 6239 void *mas_erase(struct ma_state *mas) 6240 { 6241 void *entry; 6242 unsigned long index = mas->index; 6243 MA_WR_STATE(wr_mas, mas, NULL); 6244 6245 if (!mas_is_active(mas) || !mas_is_start(mas)) 6246 mas->status = ma_start; 6247 6248 write_retry: 6249 entry = mas_state_walk(mas); 6250 if (!entry) 6251 return NULL; 6252 6253 /* Must reset to ensure spanning writes of last slot are detected */ 6254 mas_reset(mas); 6255 mas_wr_preallocate(&wr_mas, NULL); 6256 if (mas_nomem(mas, GFP_KERNEL)) { 6257 /* in case the range of entry changed when unlocked */ 6258 mas->index = mas->last = index; 6259 goto write_retry; 6260 } 6261 6262 if (mas_is_err(mas)) 6263 goto out; 6264 6265 mas_wr_store_entry(&wr_mas); 6266 out: 6267 mas_destroy(mas); 6268 return entry; 6269 } 6270 EXPORT_SYMBOL_GPL(mas_erase); 6271 6272 /** 6273 * mas_nomem() - Check if there was an error allocating and do the allocation 6274 * if necessary If there are allocations, then free them. 6275 * @mas: The maple state 6276 * @gfp: The GFP_FLAGS to use for allocations 6277 * Return: true on allocation, false otherwise. 6278 */ 6279 bool mas_nomem(struct ma_state *mas, gfp_t gfp) 6280 __must_hold(mas->tree->ma_lock) 6281 { 6282 if (likely(mas->node != MA_ERROR(-ENOMEM))) 6283 return false; 6284 6285 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { 6286 mtree_unlock(mas->tree); 6287 mas_alloc_nodes(mas, gfp); 6288 mtree_lock(mas->tree); 6289 } else { 6290 mas_alloc_nodes(mas, gfp); 6291 } 6292 6293 if (!mas_allocated(mas)) 6294 return false; 6295 6296 mas->status = ma_start; 6297 return true; 6298 } 6299 6300 void __init maple_tree_init(void) 6301 { 6302 maple_node_cache = kmem_cache_create("maple_node", 6303 sizeof(struct maple_node), sizeof(struct maple_node), 6304 SLAB_PANIC, NULL); 6305 } 6306 6307 /** 6308 * mtree_load() - Load a value stored in a maple tree 6309 * @mt: The maple tree 6310 * @index: The index to load 6311 * 6312 * Return: the entry or %NULL 6313 */ 6314 void *mtree_load(struct maple_tree *mt, unsigned long index) 6315 { 6316 MA_STATE(mas, mt, index, index); 6317 void *entry; 6318 6319 trace_ma_read(__func__, &mas); 6320 rcu_read_lock(); 6321 retry: 6322 entry = mas_start(&mas); 6323 if (unlikely(mas_is_none(&mas))) 6324 goto unlock; 6325 6326 if (unlikely(mas_is_ptr(&mas))) { 6327 if (index) 6328 entry = NULL; 6329 6330 goto unlock; 6331 } 6332 6333 entry = mtree_lookup_walk(&mas); 6334 if (!entry && unlikely(mas_is_start(&mas))) 6335 goto retry; 6336 unlock: 6337 rcu_read_unlock(); 6338 if (xa_is_zero(entry)) 6339 return NULL; 6340 6341 return entry; 6342 } 6343 EXPORT_SYMBOL(mtree_load); 6344 6345 /** 6346 * mtree_store_range() - Store an entry at a given range. 6347 * @mt: The maple tree 6348 * @index: The start of the range 6349 * @last: The end of the range 6350 * @entry: The entry to store 6351 * @gfp: The GFP_FLAGS to use for allocations 6352 * 6353 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6354 * be allocated. 6355 */ 6356 int mtree_store_range(struct maple_tree *mt, unsigned long index, 6357 unsigned long last, void *entry, gfp_t gfp) 6358 { 6359 MA_STATE(mas, mt, index, last); 6360 int ret = 0; 6361 6362 trace_ma_write(__func__, &mas, 0, entry); 6363 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6364 return -EINVAL; 6365 6366 if (index > last) 6367 return -EINVAL; 6368 6369 mtree_lock(mt); 6370 ret = mas_store_gfp(&mas, entry, gfp); 6371 mtree_unlock(mt); 6372 6373 return ret; 6374 } 6375 EXPORT_SYMBOL(mtree_store_range); 6376 6377 /** 6378 * mtree_store() - Store an entry at a given index. 6379 * @mt: The maple tree 6380 * @index: The index to store the value 6381 * @entry: The entry to store 6382 * @gfp: The GFP_FLAGS to use for allocations 6383 * 6384 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6385 * be allocated. 6386 */ 6387 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, 6388 gfp_t gfp) 6389 { 6390 return mtree_store_range(mt, index, index, entry, gfp); 6391 } 6392 EXPORT_SYMBOL(mtree_store); 6393 6394 /** 6395 * mtree_insert_range() - Insert an entry at a given range if there is no value. 6396 * @mt: The maple tree 6397 * @first: The start of the range 6398 * @last: The end of the range 6399 * @entry: The entry to store 6400 * @gfp: The GFP_FLAGS to use for allocations. 6401 * 6402 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6403 * request, -ENOMEM if memory could not be allocated. 6404 */ 6405 int mtree_insert_range(struct maple_tree *mt, unsigned long first, 6406 unsigned long last, void *entry, gfp_t gfp) 6407 { 6408 MA_STATE(ms, mt, first, last); 6409 int ret = 0; 6410 6411 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6412 return -EINVAL; 6413 6414 if (first > last) 6415 return -EINVAL; 6416 6417 mtree_lock(mt); 6418 retry: 6419 mas_insert(&ms, entry); 6420 if (mas_nomem(&ms, gfp)) 6421 goto retry; 6422 6423 mtree_unlock(mt); 6424 if (mas_is_err(&ms)) 6425 ret = xa_err(ms.node); 6426 6427 mas_destroy(&ms); 6428 return ret; 6429 } 6430 EXPORT_SYMBOL(mtree_insert_range); 6431 6432 /** 6433 * mtree_insert() - Insert an entry at a given index if there is no value. 6434 * @mt: The maple tree 6435 * @index : The index to store the value 6436 * @entry: The entry to store 6437 * @gfp: The GFP_FLAGS to use for allocations. 6438 * 6439 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6440 * request, -ENOMEM if memory could not be allocated. 6441 */ 6442 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, 6443 gfp_t gfp) 6444 { 6445 return mtree_insert_range(mt, index, index, entry, gfp); 6446 } 6447 EXPORT_SYMBOL(mtree_insert); 6448 6449 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 6450 void *entry, unsigned long size, unsigned long min, 6451 unsigned long max, gfp_t gfp) 6452 { 6453 int ret = 0; 6454 6455 MA_STATE(mas, mt, 0, 0); 6456 if (!mt_is_alloc(mt)) 6457 return -EINVAL; 6458 6459 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6460 return -EINVAL; 6461 6462 mtree_lock(mt); 6463 retry: 6464 ret = mas_empty_area(&mas, min, max, size); 6465 if (ret) 6466 goto unlock; 6467 6468 mas_insert(&mas, entry); 6469 /* 6470 * mas_nomem() may release the lock, causing the allocated area 6471 * to be unavailable, so try to allocate a free area again. 6472 */ 6473 if (mas_nomem(&mas, gfp)) 6474 goto retry; 6475 6476 if (mas_is_err(&mas)) 6477 ret = xa_err(mas.node); 6478 else 6479 *startp = mas.index; 6480 6481 unlock: 6482 mtree_unlock(mt); 6483 mas_destroy(&mas); 6484 return ret; 6485 } 6486 EXPORT_SYMBOL(mtree_alloc_range); 6487 6488 /** 6489 * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree. 6490 * @mt: The maple tree. 6491 * @startp: Pointer to ID. 6492 * @range_lo: Lower bound of range to search. 6493 * @range_hi: Upper bound of range to search. 6494 * @entry: The entry to store. 6495 * @next: Pointer to next ID to allocate. 6496 * @gfp: The GFP_FLAGS to use for allocations. 6497 * 6498 * Finds an empty entry in @mt after @next, stores the new index into 6499 * the @id pointer, stores the entry at that index, then updates @next. 6500 * 6501 * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag. 6502 * 6503 * Context: Any context. Takes and releases the mt.lock. May sleep if 6504 * the @gfp flags permit. 6505 * 6506 * Return: 0 if the allocation succeeded without wrapping, 1 if the 6507 * allocation succeeded after wrapping, -ENOMEM if memory could not be 6508 * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no 6509 * free entries. 6510 */ 6511 int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp, 6512 void *entry, unsigned long range_lo, unsigned long range_hi, 6513 unsigned long *next, gfp_t gfp) 6514 { 6515 int ret; 6516 6517 MA_STATE(mas, mt, 0, 0); 6518 6519 if (!mt_is_alloc(mt)) 6520 return -EINVAL; 6521 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6522 return -EINVAL; 6523 mtree_lock(mt); 6524 ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi, 6525 next, gfp); 6526 mtree_unlock(mt); 6527 return ret; 6528 } 6529 EXPORT_SYMBOL(mtree_alloc_cyclic); 6530 6531 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 6532 void *entry, unsigned long size, unsigned long min, 6533 unsigned long max, gfp_t gfp) 6534 { 6535 int ret = 0; 6536 6537 MA_STATE(mas, mt, 0, 0); 6538 if (!mt_is_alloc(mt)) 6539 return -EINVAL; 6540 6541 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6542 return -EINVAL; 6543 6544 mtree_lock(mt); 6545 retry: 6546 ret = mas_empty_area_rev(&mas, min, max, size); 6547 if (ret) 6548 goto unlock; 6549 6550 mas_insert(&mas, entry); 6551 /* 6552 * mas_nomem() may release the lock, causing the allocated area 6553 * to be unavailable, so try to allocate a free area again. 6554 */ 6555 if (mas_nomem(&mas, gfp)) 6556 goto retry; 6557 6558 if (mas_is_err(&mas)) 6559 ret = xa_err(mas.node); 6560 else 6561 *startp = mas.index; 6562 6563 unlock: 6564 mtree_unlock(mt); 6565 mas_destroy(&mas); 6566 return ret; 6567 } 6568 EXPORT_SYMBOL(mtree_alloc_rrange); 6569 6570 /** 6571 * mtree_erase() - Find an index and erase the entire range. 6572 * @mt: The maple tree 6573 * @index: The index to erase 6574 * 6575 * Erasing is the same as a walk to an entry then a store of a NULL to that 6576 * ENTIRE range. In fact, it is implemented as such using the advanced API. 6577 * 6578 * Return: The entry stored at the @index or %NULL 6579 */ 6580 void *mtree_erase(struct maple_tree *mt, unsigned long index) 6581 { 6582 void *entry = NULL; 6583 6584 MA_STATE(mas, mt, index, index); 6585 trace_ma_op(__func__, &mas); 6586 6587 mtree_lock(mt); 6588 entry = mas_erase(&mas); 6589 mtree_unlock(mt); 6590 6591 return entry; 6592 } 6593 EXPORT_SYMBOL(mtree_erase); 6594 6595 /* 6596 * mas_dup_free() - Free an incomplete duplication of a tree. 6597 * @mas: The maple state of a incomplete tree. 6598 * 6599 * The parameter @mas->node passed in indicates that the allocation failed on 6600 * this node. This function frees all nodes starting from @mas->node in the 6601 * reverse order of mas_dup_build(). There is no need to hold the source tree 6602 * lock at this time. 6603 */ 6604 static void mas_dup_free(struct ma_state *mas) 6605 { 6606 struct maple_node *node; 6607 enum maple_type type; 6608 void __rcu **slots; 6609 unsigned char count, i; 6610 6611 /* Maybe the first node allocation failed. */ 6612 if (mas_is_none(mas)) 6613 return; 6614 6615 while (!mte_is_root(mas->node)) { 6616 mas_ascend(mas); 6617 if (mas->offset) { 6618 mas->offset--; 6619 do { 6620 mas_descend(mas); 6621 mas->offset = mas_data_end(mas); 6622 } while (!mte_is_leaf(mas->node)); 6623 6624 mas_ascend(mas); 6625 } 6626 6627 node = mte_to_node(mas->node); 6628 type = mte_node_type(mas->node); 6629 slots = ma_slots(node, type); 6630 count = mas_data_end(mas) + 1; 6631 for (i = 0; i < count; i++) 6632 ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK; 6633 mt_free_bulk(count, slots); 6634 } 6635 6636 node = mte_to_node(mas->node); 6637 mt_free_one(node); 6638 } 6639 6640 /* 6641 * mas_copy_node() - Copy a maple node and replace the parent. 6642 * @mas: The maple state of source tree. 6643 * @new_mas: The maple state of new tree. 6644 * @parent: The parent of the new node. 6645 * 6646 * Copy @mas->node to @new_mas->node, set @parent to be the parent of 6647 * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM. 6648 */ 6649 static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas, 6650 struct maple_pnode *parent) 6651 { 6652 struct maple_node *node = mte_to_node(mas->node); 6653 struct maple_node *new_node = mte_to_node(new_mas->node); 6654 unsigned long val; 6655 6656 /* Copy the node completely. */ 6657 memcpy(new_node, node, sizeof(struct maple_node)); 6658 /* Update the parent node pointer. */ 6659 val = (unsigned long)node->parent & MAPLE_NODE_MASK; 6660 new_node->parent = ma_parent_ptr(val | (unsigned long)parent); 6661 } 6662 6663 /* 6664 * mas_dup_alloc() - Allocate child nodes for a maple node. 6665 * @mas: The maple state of source tree. 6666 * @new_mas: The maple state of new tree. 6667 * @gfp: The GFP_FLAGS to use for allocations. 6668 * 6669 * This function allocates child nodes for @new_mas->node during the duplication 6670 * process. If memory allocation fails, @mas is set to -ENOMEM. 6671 */ 6672 static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas, 6673 gfp_t gfp) 6674 { 6675 struct maple_node *node = mte_to_node(mas->node); 6676 struct maple_node *new_node = mte_to_node(new_mas->node); 6677 enum maple_type type; 6678 unsigned char request, count, i; 6679 void __rcu **slots; 6680 void __rcu **new_slots; 6681 unsigned long val; 6682 6683 /* Allocate memory for child nodes. */ 6684 type = mte_node_type(mas->node); 6685 new_slots = ma_slots(new_node, type); 6686 request = mas_data_end(mas) + 1; 6687 count = mt_alloc_bulk(gfp, request, (void **)new_slots); 6688 if (unlikely(count < request)) { 6689 memset(new_slots, 0, request * sizeof(void *)); 6690 mas_set_err(mas, -ENOMEM); 6691 return; 6692 } 6693 6694 /* Restore node type information in slots. */ 6695 slots = ma_slots(node, type); 6696 for (i = 0; i < count; i++) { 6697 val = (unsigned long)mt_slot_locked(mas->tree, slots, i); 6698 val &= MAPLE_NODE_MASK; 6699 ((unsigned long *)new_slots)[i] |= val; 6700 } 6701 } 6702 6703 /* 6704 * mas_dup_build() - Build a new maple tree from a source tree 6705 * @mas: The maple state of source tree, need to be in MAS_START state. 6706 * @new_mas: The maple state of new tree, need to be in MAS_START state. 6707 * @gfp: The GFP_FLAGS to use for allocations. 6708 * 6709 * This function builds a new tree in DFS preorder. If the memory allocation 6710 * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the 6711 * last node. mas_dup_free() will free the incomplete duplication of a tree. 6712 * 6713 * Note that the attributes of the two trees need to be exactly the same, and the 6714 * new tree needs to be empty, otherwise -EINVAL will be set in @mas. 6715 */ 6716 static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas, 6717 gfp_t gfp) 6718 { 6719 struct maple_node *node; 6720 struct maple_pnode *parent = NULL; 6721 struct maple_enode *root; 6722 enum maple_type type; 6723 6724 if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) || 6725 unlikely(!mtree_empty(new_mas->tree))) { 6726 mas_set_err(mas, -EINVAL); 6727 return; 6728 } 6729 6730 root = mas_start(mas); 6731 if (mas_is_ptr(mas) || mas_is_none(mas)) 6732 goto set_new_tree; 6733 6734 node = mt_alloc_one(gfp); 6735 if (!node) { 6736 new_mas->status = ma_none; 6737 mas_set_err(mas, -ENOMEM); 6738 return; 6739 } 6740 6741 type = mte_node_type(mas->node); 6742 root = mt_mk_node(node, type); 6743 new_mas->node = root; 6744 new_mas->min = 0; 6745 new_mas->max = ULONG_MAX; 6746 root = mte_mk_root(root); 6747 while (1) { 6748 mas_copy_node(mas, new_mas, parent); 6749 if (!mte_is_leaf(mas->node)) { 6750 /* Only allocate child nodes for non-leaf nodes. */ 6751 mas_dup_alloc(mas, new_mas, gfp); 6752 if (unlikely(mas_is_err(mas))) 6753 return; 6754 } else { 6755 /* 6756 * This is the last leaf node and duplication is 6757 * completed. 6758 */ 6759 if (mas->max == ULONG_MAX) 6760 goto done; 6761 6762 /* This is not the last leaf node and needs to go up. */ 6763 do { 6764 mas_ascend(mas); 6765 mas_ascend(new_mas); 6766 } while (mas->offset == mas_data_end(mas)); 6767 6768 /* Move to the next subtree. */ 6769 mas->offset++; 6770 new_mas->offset++; 6771 } 6772 6773 mas_descend(mas); 6774 parent = ma_parent_ptr(mte_to_node(new_mas->node)); 6775 mas_descend(new_mas); 6776 mas->offset = 0; 6777 new_mas->offset = 0; 6778 } 6779 done: 6780 /* Specially handle the parent of the root node. */ 6781 mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas)); 6782 set_new_tree: 6783 /* Make them the same height */ 6784 new_mas->tree->ma_flags = mas->tree->ma_flags; 6785 rcu_assign_pointer(new_mas->tree->ma_root, root); 6786 } 6787 6788 /** 6789 * __mt_dup(): Duplicate an entire maple tree 6790 * @mt: The source maple tree 6791 * @new: The new maple tree 6792 * @gfp: The GFP_FLAGS to use for allocations 6793 * 6794 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order 6795 * traversal. It uses memcpy() to copy nodes in the source tree and allocate 6796 * new child nodes in non-leaf nodes. The new node is exactly the same as the 6797 * source node except for all the addresses stored in it. It will be faster than 6798 * traversing all elements in the source tree and inserting them one by one into 6799 * the new tree. 6800 * The user needs to ensure that the attributes of the source tree and the new 6801 * tree are the same, and the new tree needs to be an empty tree, otherwise 6802 * -EINVAL will be returned. 6803 * Note that the user needs to manually lock the source tree and the new tree. 6804 * 6805 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If 6806 * the attributes of the two trees are different or the new tree is not an empty 6807 * tree. 6808 */ 6809 int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) 6810 { 6811 int ret = 0; 6812 MA_STATE(mas, mt, 0, 0); 6813 MA_STATE(new_mas, new, 0, 0); 6814 6815 mas_dup_build(&mas, &new_mas, gfp); 6816 if (unlikely(mas_is_err(&mas))) { 6817 ret = xa_err(mas.node); 6818 if (ret == -ENOMEM) 6819 mas_dup_free(&new_mas); 6820 } 6821 6822 return ret; 6823 } 6824 EXPORT_SYMBOL(__mt_dup); 6825 6826 /** 6827 * mtree_dup(): Duplicate an entire maple tree 6828 * @mt: The source maple tree 6829 * @new: The new maple tree 6830 * @gfp: The GFP_FLAGS to use for allocations 6831 * 6832 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order 6833 * traversal. It uses memcpy() to copy nodes in the source tree and allocate 6834 * new child nodes in non-leaf nodes. The new node is exactly the same as the 6835 * source node except for all the addresses stored in it. It will be faster than 6836 * traversing all elements in the source tree and inserting them one by one into 6837 * the new tree. 6838 * The user needs to ensure that the attributes of the source tree and the new 6839 * tree are the same, and the new tree needs to be an empty tree, otherwise 6840 * -EINVAL will be returned. 6841 * 6842 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If 6843 * the attributes of the two trees are different or the new tree is not an empty 6844 * tree. 6845 */ 6846 int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) 6847 { 6848 int ret = 0; 6849 MA_STATE(mas, mt, 0, 0); 6850 MA_STATE(new_mas, new, 0, 0); 6851 6852 mas_lock(&new_mas); 6853 mas_lock_nested(&mas, SINGLE_DEPTH_NESTING); 6854 mas_dup_build(&mas, &new_mas, gfp); 6855 mas_unlock(&mas); 6856 if (unlikely(mas_is_err(&mas))) { 6857 ret = xa_err(mas.node); 6858 if (ret == -ENOMEM) 6859 mas_dup_free(&new_mas); 6860 } 6861 6862 mas_unlock(&new_mas); 6863 return ret; 6864 } 6865 EXPORT_SYMBOL(mtree_dup); 6866 6867 /** 6868 * __mt_destroy() - Walk and free all nodes of a locked maple tree. 6869 * @mt: The maple tree 6870 * 6871 * Note: Does not handle locking. 6872 */ 6873 void __mt_destroy(struct maple_tree *mt) 6874 { 6875 void *root = mt_root_locked(mt); 6876 6877 rcu_assign_pointer(mt->ma_root, NULL); 6878 if (xa_is_node(root)) 6879 mte_destroy_walk(root, mt); 6880 6881 mt->ma_flags = mt_attr(mt); 6882 } 6883 EXPORT_SYMBOL_GPL(__mt_destroy); 6884 6885 /** 6886 * mtree_destroy() - Destroy a maple tree 6887 * @mt: The maple tree 6888 * 6889 * Frees all resources used by the tree. Handles locking. 6890 */ 6891 void mtree_destroy(struct maple_tree *mt) 6892 { 6893 mtree_lock(mt); 6894 __mt_destroy(mt); 6895 mtree_unlock(mt); 6896 } 6897 EXPORT_SYMBOL(mtree_destroy); 6898 6899 /** 6900 * mt_find() - Search from the start up until an entry is found. 6901 * @mt: The maple tree 6902 * @index: Pointer which contains the start location of the search 6903 * @max: The maximum value of the search range 6904 * 6905 * Takes RCU read lock internally to protect the search, which does not 6906 * protect the returned pointer after dropping RCU read lock. 6907 * See also: Documentation/core-api/maple_tree.rst 6908 * 6909 * In case that an entry is found @index is updated to point to the next 6910 * possible entry independent whether the found entry is occupying a 6911 * single index or a range if indices. 6912 * 6913 * Return: The entry at or after the @index or %NULL 6914 */ 6915 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) 6916 { 6917 MA_STATE(mas, mt, *index, *index); 6918 void *entry; 6919 #ifdef CONFIG_DEBUG_MAPLE_TREE 6920 unsigned long copy = *index; 6921 #endif 6922 6923 trace_ma_read(__func__, &mas); 6924 6925 if ((*index) > max) 6926 return NULL; 6927 6928 rcu_read_lock(); 6929 retry: 6930 entry = mas_state_walk(&mas); 6931 if (mas_is_start(&mas)) 6932 goto retry; 6933 6934 if (unlikely(xa_is_zero(entry))) 6935 entry = NULL; 6936 6937 if (entry) 6938 goto unlock; 6939 6940 while (mas_is_active(&mas) && (mas.last < max)) { 6941 entry = mas_next_entry(&mas, max); 6942 if (likely(entry && !xa_is_zero(entry))) 6943 break; 6944 } 6945 6946 if (unlikely(xa_is_zero(entry))) 6947 entry = NULL; 6948 unlock: 6949 rcu_read_unlock(); 6950 if (likely(entry)) { 6951 *index = mas.last + 1; 6952 #ifdef CONFIG_DEBUG_MAPLE_TREE 6953 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy))) 6954 pr_err("index not increased! %lx <= %lx\n", 6955 *index, copy); 6956 #endif 6957 } 6958 6959 return entry; 6960 } 6961 EXPORT_SYMBOL(mt_find); 6962 6963 /** 6964 * mt_find_after() - Search from the start up until an entry is found. 6965 * @mt: The maple tree 6966 * @index: Pointer which contains the start location of the search 6967 * @max: The maximum value to check 6968 * 6969 * Same as mt_find() except that it checks @index for 0 before 6970 * searching. If @index == 0, the search is aborted. This covers a wrap 6971 * around of @index to 0 in an iterator loop. 6972 * 6973 * Return: The entry at or after the @index or %NULL 6974 */ 6975 void *mt_find_after(struct maple_tree *mt, unsigned long *index, 6976 unsigned long max) 6977 { 6978 if (!(*index)) 6979 return NULL; 6980 6981 return mt_find(mt, index, max); 6982 } 6983 EXPORT_SYMBOL(mt_find_after); 6984 6985 #ifdef CONFIG_DEBUG_MAPLE_TREE 6986 atomic_t maple_tree_tests_run; 6987 EXPORT_SYMBOL_GPL(maple_tree_tests_run); 6988 atomic_t maple_tree_tests_passed; 6989 EXPORT_SYMBOL_GPL(maple_tree_tests_passed); 6990 6991 #ifndef __KERNEL__ 6992 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); 6993 void mt_set_non_kernel(unsigned int val) 6994 { 6995 kmem_cache_set_non_kernel(maple_node_cache, val); 6996 } 6997 6998 extern void kmem_cache_set_callback(struct kmem_cache *cachep, 6999 void (*callback)(void *)); 7000 void mt_set_callback(void (*callback)(void *)) 7001 { 7002 kmem_cache_set_callback(maple_node_cache, callback); 7003 } 7004 7005 extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private); 7006 void mt_set_private(void *private) 7007 { 7008 kmem_cache_set_private(maple_node_cache, private); 7009 } 7010 7011 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); 7012 unsigned long mt_get_alloc_size(void) 7013 { 7014 return kmem_cache_get_alloc(maple_node_cache); 7015 } 7016 7017 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); 7018 void mt_zero_nr_tallocated(void) 7019 { 7020 kmem_cache_zero_nr_tallocated(maple_node_cache); 7021 } 7022 7023 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); 7024 unsigned int mt_nr_tallocated(void) 7025 { 7026 return kmem_cache_nr_tallocated(maple_node_cache); 7027 } 7028 7029 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); 7030 unsigned int mt_nr_allocated(void) 7031 { 7032 return kmem_cache_nr_allocated(maple_node_cache); 7033 } 7034 7035 void mt_cache_shrink(void) 7036 { 7037 } 7038 #else 7039 /* 7040 * mt_cache_shrink() - For testing, don't use this. 7041 * 7042 * Certain testcases can trigger an OOM when combined with other memory 7043 * debugging configuration options. This function is used to reduce the 7044 * possibility of an out of memory even due to kmem_cache objects remaining 7045 * around for longer than usual. 7046 */ 7047 void mt_cache_shrink(void) 7048 { 7049 kmem_cache_shrink(maple_node_cache); 7050 7051 } 7052 EXPORT_SYMBOL_GPL(mt_cache_shrink); 7053 7054 #endif /* not defined __KERNEL__ */ 7055 /* 7056 * mas_get_slot() - Get the entry in the maple state node stored at @offset. 7057 * @mas: The maple state 7058 * @offset: The offset into the slot array to fetch. 7059 * 7060 * Return: The entry stored at @offset. 7061 */ 7062 static inline struct maple_enode *mas_get_slot(struct ma_state *mas, 7063 unsigned char offset) 7064 { 7065 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), 7066 offset); 7067 } 7068 7069 /* Depth first search, post-order */ 7070 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) 7071 { 7072 7073 struct maple_enode *p, *mn = mas->node; 7074 unsigned long p_min, p_max; 7075 7076 mas_next_node(mas, mas_mn(mas), max); 7077 if (!mas_is_overflow(mas)) 7078 return; 7079 7080 if (mte_is_root(mn)) 7081 return; 7082 7083 mas->node = mn; 7084 mas_ascend(mas); 7085 do { 7086 p = mas->node; 7087 p_min = mas->min; 7088 p_max = mas->max; 7089 mas_prev_node(mas, 0); 7090 } while (!mas_is_underflow(mas)); 7091 7092 mas->node = p; 7093 mas->max = p_max; 7094 mas->min = p_min; 7095 } 7096 7097 /* Tree validations */ 7098 static void mt_dump_node(const struct maple_tree *mt, void *entry, 7099 unsigned long min, unsigned long max, unsigned int depth, 7100 enum mt_dump_format format); 7101 static void mt_dump_range(unsigned long min, unsigned long max, 7102 unsigned int depth, enum mt_dump_format format) 7103 { 7104 static const char spaces[] = " "; 7105 7106 switch(format) { 7107 case mt_dump_hex: 7108 if (min == max) 7109 pr_info("%.*s%lx: ", depth * 2, spaces, min); 7110 else 7111 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); 7112 break; 7113 case mt_dump_dec: 7114 if (min == max) 7115 pr_info("%.*s%lu: ", depth * 2, spaces, min); 7116 else 7117 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); 7118 } 7119 } 7120 7121 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, 7122 unsigned int depth, enum mt_dump_format format) 7123 { 7124 mt_dump_range(min, max, depth, format); 7125 7126 if (xa_is_value(entry)) 7127 pr_cont("value %ld (0x%lx) [" PTR_FMT "]\n", xa_to_value(entry), 7128 xa_to_value(entry), entry); 7129 else if (xa_is_zero(entry)) 7130 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 7131 else if (mt_is_reserved(entry)) 7132 pr_cont("UNKNOWN ENTRY (" PTR_FMT ")\n", entry); 7133 else 7134 pr_cont(PTR_FMT "\n", entry); 7135 } 7136 7137 static void mt_dump_range64(const struct maple_tree *mt, void *entry, 7138 unsigned long min, unsigned long max, unsigned int depth, 7139 enum mt_dump_format format) 7140 { 7141 struct maple_range_64 *node = &mte_to_node(entry)->mr64; 7142 bool leaf = mte_is_leaf(entry); 7143 unsigned long first = min; 7144 int i; 7145 7146 pr_cont(" contents: "); 7147 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { 7148 switch(format) { 7149 case mt_dump_hex: 7150 pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]); 7151 break; 7152 case mt_dump_dec: 7153 pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]); 7154 } 7155 } 7156 pr_cont(PTR_FMT "\n", node->slot[i]); 7157 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { 7158 unsigned long last = max; 7159 7160 if (i < (MAPLE_RANGE64_SLOTS - 1)) 7161 last = node->pivot[i]; 7162 else if (!node->slot[i] && max != mt_node_max(entry)) 7163 break; 7164 if (last == 0 && i > 0) 7165 break; 7166 if (leaf) 7167 mt_dump_entry(mt_slot(mt, node->slot, i), 7168 first, last, depth + 1, format); 7169 else if (node->slot[i]) 7170 mt_dump_node(mt, mt_slot(mt, node->slot, i), 7171 first, last, depth + 1, format); 7172 7173 if (last == max) 7174 break; 7175 if (last > max) { 7176 switch(format) { 7177 case mt_dump_hex: 7178 pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n", 7179 node, last, max, i); 7180 break; 7181 case mt_dump_dec: 7182 pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n", 7183 node, last, max, i); 7184 } 7185 } 7186 first = last + 1; 7187 } 7188 } 7189 7190 static void mt_dump_arange64(const struct maple_tree *mt, void *entry, 7191 unsigned long min, unsigned long max, unsigned int depth, 7192 enum mt_dump_format format) 7193 { 7194 struct maple_arange_64 *node = &mte_to_node(entry)->ma64; 7195 unsigned long first = min; 7196 int i; 7197 7198 pr_cont(" contents: "); 7199 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 7200 switch (format) { 7201 case mt_dump_hex: 7202 pr_cont("%lx ", node->gap[i]); 7203 break; 7204 case mt_dump_dec: 7205 pr_cont("%lu ", node->gap[i]); 7206 } 7207 } 7208 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); 7209 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) { 7210 switch (format) { 7211 case mt_dump_hex: 7212 pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]); 7213 break; 7214 case mt_dump_dec: 7215 pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]); 7216 } 7217 } 7218 pr_cont(PTR_FMT "\n", node->slot[i]); 7219 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 7220 unsigned long last = max; 7221 7222 if (i < (MAPLE_ARANGE64_SLOTS - 1)) 7223 last = node->pivot[i]; 7224 else if (!node->slot[i]) 7225 break; 7226 if (last == 0 && i > 0) 7227 break; 7228 if (node->slot[i]) 7229 mt_dump_node(mt, mt_slot(mt, node->slot, i), 7230 first, last, depth + 1, format); 7231 7232 if (last == max) 7233 break; 7234 if (last > max) { 7235 switch(format) { 7236 case mt_dump_hex: 7237 pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n", 7238 node, last, max, i); 7239 break; 7240 case mt_dump_dec: 7241 pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n", 7242 node, last, max, i); 7243 } 7244 } 7245 first = last + 1; 7246 } 7247 } 7248 7249 static void mt_dump_node(const struct maple_tree *mt, void *entry, 7250 unsigned long min, unsigned long max, unsigned int depth, 7251 enum mt_dump_format format) 7252 { 7253 struct maple_node *node = mte_to_node(entry); 7254 unsigned int type = mte_node_type(entry); 7255 unsigned int i; 7256 7257 mt_dump_range(min, max, depth, format); 7258 7259 pr_cont("node " PTR_FMT " depth %d type %d parent " PTR_FMT, node, 7260 depth, type, node ? node->parent : NULL); 7261 switch (type) { 7262 case maple_dense: 7263 pr_cont("\n"); 7264 for (i = 0; i < MAPLE_NODE_SLOTS; i++) { 7265 if (min + i > max) 7266 pr_cont("OUT OF RANGE: "); 7267 mt_dump_entry(mt_slot(mt, node->slot, i), 7268 min + i, min + i, depth, format); 7269 } 7270 break; 7271 case maple_leaf_64: 7272 case maple_range_64: 7273 mt_dump_range64(mt, entry, min, max, depth, format); 7274 break; 7275 case maple_arange_64: 7276 mt_dump_arange64(mt, entry, min, max, depth, format); 7277 break; 7278 7279 default: 7280 pr_cont(" UNKNOWN TYPE\n"); 7281 } 7282 } 7283 7284 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) 7285 { 7286 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); 7287 7288 pr_info("maple_tree(" PTR_FMT ") flags %X, height %u root " PTR_FMT "\n", 7289 mt, mt->ma_flags, mt_height(mt), entry); 7290 if (xa_is_node(entry)) 7291 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); 7292 else if (entry) 7293 mt_dump_entry(entry, 0, 0, 0, format); 7294 else 7295 pr_info("(empty)\n"); 7296 } 7297 EXPORT_SYMBOL_GPL(mt_dump); 7298 7299 /* 7300 * Calculate the maximum gap in a node and check if that's what is reported in 7301 * the parent (unless root). 7302 */ 7303 static void mas_validate_gaps(struct ma_state *mas) 7304 { 7305 struct maple_enode *mte = mas->node; 7306 struct maple_node *p_mn, *node = mte_to_node(mte); 7307 enum maple_type mt = mte_node_type(mas->node); 7308 unsigned long gap = 0, max_gap = 0; 7309 unsigned long p_end, p_start = mas->min; 7310 unsigned char p_slot, offset; 7311 unsigned long *gaps = NULL; 7312 unsigned long *pivots = ma_pivots(node, mt); 7313 unsigned int i; 7314 7315 if (ma_is_dense(mt)) { 7316 for (i = 0; i < mt_slot_count(mte); i++) { 7317 if (mas_get_slot(mas, i)) { 7318 if (gap > max_gap) 7319 max_gap = gap; 7320 gap = 0; 7321 continue; 7322 } 7323 gap++; 7324 } 7325 goto counted; 7326 } 7327 7328 gaps = ma_gaps(node, mt); 7329 for (i = 0; i < mt_slot_count(mte); i++) { 7330 p_end = mas_safe_pivot(mas, pivots, i, mt); 7331 7332 if (!gaps) { 7333 if (!mas_get_slot(mas, i)) 7334 gap = p_end - p_start + 1; 7335 } else { 7336 void *entry = mas_get_slot(mas, i); 7337 7338 gap = gaps[i]; 7339 MT_BUG_ON(mas->tree, !entry); 7340 7341 if (gap > p_end - p_start + 1) { 7342 pr_err(PTR_FMT "[%u] %lu >= %lu - %lu + 1 (%lu)\n", 7343 mas_mn(mas), i, gap, p_end, p_start, 7344 p_end - p_start + 1); 7345 MT_BUG_ON(mas->tree, gap > p_end - p_start + 1); 7346 } 7347 } 7348 7349 if (gap > max_gap) 7350 max_gap = gap; 7351 7352 p_start = p_end + 1; 7353 if (p_end >= mas->max) 7354 break; 7355 } 7356 7357 counted: 7358 if (mt == maple_arange_64) { 7359 MT_BUG_ON(mas->tree, !gaps); 7360 offset = ma_meta_gap(node); 7361 if (offset > i) { 7362 pr_err("gap offset " PTR_FMT "[%u] is invalid\n", node, offset); 7363 MT_BUG_ON(mas->tree, 1); 7364 } 7365 7366 if (gaps[offset] != max_gap) { 7367 pr_err("gap " PTR_FMT "[%u] is not the largest gap %lu\n", 7368 node, offset, max_gap); 7369 MT_BUG_ON(mas->tree, 1); 7370 } 7371 7372 for (i++ ; i < mt_slot_count(mte); i++) { 7373 if (gaps[i] != 0) { 7374 pr_err("gap " PTR_FMT "[%u] beyond node limit != 0\n", 7375 node, i); 7376 MT_BUG_ON(mas->tree, 1); 7377 } 7378 } 7379 } 7380 7381 if (mte_is_root(mte)) 7382 return; 7383 7384 p_slot = mte_parent_slot(mas->node); 7385 p_mn = mte_parent(mte); 7386 MT_BUG_ON(mas->tree, max_gap > mas->max); 7387 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { 7388 pr_err("gap " PTR_FMT "[%u] != %lu\n", p_mn, p_slot, max_gap); 7389 mt_dump(mas->tree, mt_dump_hex); 7390 MT_BUG_ON(mas->tree, 1); 7391 } 7392 } 7393 7394 static void mas_validate_parent_slot(struct ma_state *mas) 7395 { 7396 struct maple_node *parent; 7397 struct maple_enode *node; 7398 enum maple_type p_type; 7399 unsigned char p_slot; 7400 void __rcu **slots; 7401 int i; 7402 7403 if (mte_is_root(mas->node)) 7404 return; 7405 7406 p_slot = mte_parent_slot(mas->node); 7407 p_type = mas_parent_type(mas, mas->node); 7408 parent = mte_parent(mas->node); 7409 slots = ma_slots(parent, p_type); 7410 MT_BUG_ON(mas->tree, mas_mn(mas) == parent); 7411 7412 /* Check prev/next parent slot for duplicate node entry */ 7413 7414 for (i = 0; i < mt_slots[p_type]; i++) { 7415 node = mas_slot(mas, slots, i); 7416 if (i == p_slot) { 7417 if (node != mas->node) 7418 pr_err("parent " PTR_FMT "[%u] does not have " PTR_FMT "\n", 7419 parent, i, mas_mn(mas)); 7420 MT_BUG_ON(mas->tree, node != mas->node); 7421 } else if (node == mas->node) { 7422 pr_err("Invalid child " PTR_FMT " at parent " PTR_FMT "[%u] p_slot %u\n", 7423 mas_mn(mas), parent, i, p_slot); 7424 MT_BUG_ON(mas->tree, node == mas->node); 7425 } 7426 } 7427 } 7428 7429 static void mas_validate_child_slot(struct ma_state *mas) 7430 { 7431 enum maple_type type = mte_node_type(mas->node); 7432 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7433 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); 7434 struct maple_enode *child; 7435 unsigned char i; 7436 7437 if (mte_is_leaf(mas->node)) 7438 return; 7439 7440 for (i = 0; i < mt_slots[type]; i++) { 7441 child = mas_slot(mas, slots, i); 7442 7443 if (!child) { 7444 pr_err("Non-leaf node lacks child at " PTR_FMT "[%u]\n", 7445 mas_mn(mas), i); 7446 MT_BUG_ON(mas->tree, 1); 7447 } 7448 7449 if (mte_parent_slot(child) != i) { 7450 pr_err("Slot error at " PTR_FMT "[%u]: child " PTR_FMT " has pslot %u\n", 7451 mas_mn(mas), i, mte_to_node(child), 7452 mte_parent_slot(child)); 7453 MT_BUG_ON(mas->tree, 1); 7454 } 7455 7456 if (mte_parent(child) != mte_to_node(mas->node)) { 7457 pr_err("child " PTR_FMT " has parent " PTR_FMT " not " PTR_FMT "\n", 7458 mte_to_node(child), mte_parent(child), 7459 mte_to_node(mas->node)); 7460 MT_BUG_ON(mas->tree, 1); 7461 } 7462 7463 if (i < mt_pivots[type] && pivots[i] == mas->max) 7464 break; 7465 } 7466 } 7467 7468 /* 7469 * Validate all pivots are within mas->min and mas->max, check metadata ends 7470 * where the maximum ends and ensure there is no slots or pivots set outside of 7471 * the end of the data. 7472 */ 7473 static void mas_validate_limits(struct ma_state *mas) 7474 { 7475 int i; 7476 unsigned long prev_piv = 0; 7477 enum maple_type type = mte_node_type(mas->node); 7478 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7479 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 7480 7481 for (i = 0; i < mt_slots[type]; i++) { 7482 unsigned long piv; 7483 7484 piv = mas_safe_pivot(mas, pivots, i, type); 7485 7486 if (!piv && (i != 0)) { 7487 pr_err("Missing node limit pivot at " PTR_FMT "[%u]", 7488 mas_mn(mas), i); 7489 MAS_WARN_ON(mas, 1); 7490 } 7491 7492 if (prev_piv > piv) { 7493 pr_err(PTR_FMT "[%u] piv %lu < prev_piv %lu\n", 7494 mas_mn(mas), i, piv, prev_piv); 7495 MAS_WARN_ON(mas, piv < prev_piv); 7496 } 7497 7498 if (piv < mas->min) { 7499 pr_err(PTR_FMT "[%u] %lu < %lu\n", mas_mn(mas), i, 7500 piv, mas->min); 7501 MAS_WARN_ON(mas, piv < mas->min); 7502 } 7503 if (piv > mas->max) { 7504 pr_err(PTR_FMT "[%u] %lu > %lu\n", mas_mn(mas), i, 7505 piv, mas->max); 7506 MAS_WARN_ON(mas, piv > mas->max); 7507 } 7508 prev_piv = piv; 7509 if (piv == mas->max) 7510 break; 7511 } 7512 7513 if (mas_data_end(mas) != i) { 7514 pr_err("node" PTR_FMT ": data_end %u != the last slot offset %u\n", 7515 mas_mn(mas), mas_data_end(mas), i); 7516 MT_BUG_ON(mas->tree, 1); 7517 } 7518 7519 for (i += 1; i < mt_slots[type]; i++) { 7520 void *entry = mas_slot(mas, slots, i); 7521 7522 if (entry && (i != mt_slots[type] - 1)) { 7523 pr_err(PTR_FMT "[%u] should not have entry " PTR_FMT "\n", 7524 mas_mn(mas), i, entry); 7525 MT_BUG_ON(mas->tree, entry != NULL); 7526 } 7527 7528 if (i < mt_pivots[type]) { 7529 unsigned long piv = pivots[i]; 7530 7531 if (!piv) 7532 continue; 7533 7534 pr_err(PTR_FMT "[%u] should not have piv %lu\n", 7535 mas_mn(mas), i, piv); 7536 MAS_WARN_ON(mas, i < mt_pivots[type] - 1); 7537 } 7538 } 7539 } 7540 7541 static void mt_validate_nulls(struct maple_tree *mt) 7542 { 7543 void *entry, *last = (void *)1; 7544 unsigned char offset = 0; 7545 void __rcu **slots; 7546 MA_STATE(mas, mt, 0, 0); 7547 7548 mas_start(&mas); 7549 if (mas_is_none(&mas) || (mas_is_ptr(&mas))) 7550 return; 7551 7552 while (!mte_is_leaf(mas.node)) 7553 mas_descend(&mas); 7554 7555 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); 7556 do { 7557 entry = mas_slot(&mas, slots, offset); 7558 if (!last && !entry) { 7559 pr_err("Sequential nulls end at " PTR_FMT "[%u]\n", 7560 mas_mn(&mas), offset); 7561 } 7562 MT_BUG_ON(mt, !last && !entry); 7563 last = entry; 7564 if (offset == mas_data_end(&mas)) { 7565 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); 7566 if (mas_is_overflow(&mas)) 7567 return; 7568 offset = 0; 7569 slots = ma_slots(mte_to_node(mas.node), 7570 mte_node_type(mas.node)); 7571 } else { 7572 offset++; 7573 } 7574 7575 } while (!mas_is_overflow(&mas)); 7576 } 7577 7578 /* 7579 * validate a maple tree by checking: 7580 * 1. The limits (pivots are within mas->min to mas->max) 7581 * 2. The gap is correctly set in the parents 7582 */ 7583 void mt_validate(struct maple_tree *mt) 7584 __must_hold(mas->tree->ma_lock) 7585 { 7586 unsigned char end; 7587 7588 MA_STATE(mas, mt, 0, 0); 7589 mas_start(&mas); 7590 if (!mas_is_active(&mas)) 7591 return; 7592 7593 while (!mte_is_leaf(mas.node)) 7594 mas_descend(&mas); 7595 7596 while (!mas_is_overflow(&mas)) { 7597 MAS_WARN_ON(&mas, mte_dead_node(mas.node)); 7598 end = mas_data_end(&mas); 7599 if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && 7600 (mas.max != ULONG_MAX))) { 7601 pr_err("Invalid size %u of " PTR_FMT "\n", 7602 end, mas_mn(&mas)); 7603 } 7604 7605 mas_validate_parent_slot(&mas); 7606 mas_validate_limits(&mas); 7607 mas_validate_child_slot(&mas); 7608 if (mt_is_alloc(mt)) 7609 mas_validate_gaps(&mas); 7610 mas_dfs_postorder(&mas, ULONG_MAX); 7611 } 7612 mt_validate_nulls(mt); 7613 } 7614 EXPORT_SYMBOL_GPL(mt_validate); 7615 7616 void mas_dump(const struct ma_state *mas) 7617 { 7618 pr_err("MAS: tree=" PTR_FMT " enode=" PTR_FMT " ", 7619 mas->tree, mas->node); 7620 switch (mas->status) { 7621 case ma_active: 7622 pr_err("(ma_active)"); 7623 break; 7624 case ma_none: 7625 pr_err("(ma_none)"); 7626 break; 7627 case ma_root: 7628 pr_err("(ma_root)"); 7629 break; 7630 case ma_start: 7631 pr_err("(ma_start) "); 7632 break; 7633 case ma_pause: 7634 pr_err("(ma_pause) "); 7635 break; 7636 case ma_overflow: 7637 pr_err("(ma_overflow) "); 7638 break; 7639 case ma_underflow: 7640 pr_err("(ma_underflow) "); 7641 break; 7642 case ma_error: 7643 pr_err("(ma_error) "); 7644 break; 7645 } 7646 7647 pr_err("Store Type: "); 7648 switch (mas->store_type) { 7649 case wr_invalid: 7650 pr_err("invalid store type\n"); 7651 break; 7652 case wr_new_root: 7653 pr_err("new_root\n"); 7654 break; 7655 case wr_store_root: 7656 pr_err("store_root\n"); 7657 break; 7658 case wr_exact_fit: 7659 pr_err("exact_fit\n"); 7660 break; 7661 case wr_split_store: 7662 pr_err("split_store\n"); 7663 break; 7664 case wr_slot_store: 7665 pr_err("slot_store\n"); 7666 break; 7667 case wr_append: 7668 pr_err("append\n"); 7669 break; 7670 case wr_node_store: 7671 pr_err("node_store\n"); 7672 break; 7673 case wr_spanning_store: 7674 pr_err("spanning_store\n"); 7675 break; 7676 case wr_rebalance: 7677 pr_err("rebalance\n"); 7678 break; 7679 } 7680 7681 pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end, 7682 mas->index, mas->last); 7683 pr_err(" min=%lx max=%lx alloc=" PTR_FMT ", depth=%u, flags=%x\n", 7684 mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); 7685 if (mas->index > mas->last) 7686 pr_err("Check index & last\n"); 7687 } 7688 EXPORT_SYMBOL_GPL(mas_dump); 7689 7690 void mas_wr_dump(const struct ma_wr_state *wr_mas) 7691 { 7692 pr_err("WR_MAS: node=" PTR_FMT " r_min=%lx r_max=%lx\n", 7693 wr_mas->node, wr_mas->r_min, wr_mas->r_max); 7694 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n", 7695 wr_mas->type, wr_mas->offset_end, wr_mas->mas->end, 7696 wr_mas->end_piv); 7697 } 7698 EXPORT_SYMBOL_GPL(mas_wr_dump); 7699 7700 #endif /* CONFIG_DEBUG_MAPLE_TREE */ 7701